#
tokens: 47988/50000 21/435 files (page 4/39)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 4 of 39. Use http://codebase.md/wshobson/maverick-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.example
├── .github
│   ├── dependabot.yml
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── feature_request.md
│   │   ├── question.md
│   │   └── security_report.md
│   ├── pull_request_template.md
│   └── workflows
│       ├── claude-code-review.yml
│       └── claude.yml
├── .gitignore
├── .python-version
├── .vscode
│   ├── launch.json
│   └── settings.json
├── alembic
│   ├── env.py
│   ├── script.py.mako
│   └── versions
│       ├── 001_initial_schema.py
│       ├── 003_add_performance_indexes.py
│       ├── 006_rename_metadata_columns.py
│       ├── 008_performance_optimization_indexes.py
│       ├── 009_rename_to_supply_demand.py
│       ├── 010_self_contained_schema.py
│       ├── 011_remove_proprietary_terms.py
│       ├── 013_add_backtest_persistence_models.py
│       ├── 014_add_portfolio_models.py
│       ├── 08e3945a0c93_merge_heads.py
│       ├── 9374a5c9b679_merge_heads_for_testing.py
│       ├── abf9b9afb134_merge_multiple_heads.py
│       ├── adda6d3fd84b_merge_proprietary_terms_removal_with_.py
│       ├── e0c75b0bdadb_fix_financial_data_precision_only.py
│       ├── f0696e2cac15_add_essential_performance_indexes.py
│       └── fix_database_integrity_issues.py
├── alembic.ini
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── DATABASE_SETUP.md
├── docker-compose.override.yml.example
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── api
│   │   └── backtesting.md
│   ├── BACKTESTING.md
│   ├── COST_BASIS_SPECIFICATION.md
│   ├── deep_research_agent.md
│   ├── exa_research_testing_strategy.md
│   ├── PORTFOLIO_PERSONALIZATION_PLAN.md
│   ├── PORTFOLIO.md
│   ├── SETUP_SELF_CONTAINED.md
│   └── speed_testing_framework.md
├── examples
│   ├── complete_speed_validation.py
│   ├── deep_research_integration.py
│   ├── llm_optimization_example.py
│   ├── llm_speed_demo.py
│   ├── monitoring_example.py
│   ├── parallel_research_example.py
│   ├── speed_optimization_demo.py
│   └── timeout_fix_demonstration.py
├── LICENSE
├── Makefile
├── MANIFEST.in
├── maverick_mcp
│   ├── __init__.py
│   ├── agents
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── circuit_breaker.py
│   │   ├── deep_research.py
│   │   ├── market_analysis.py
│   │   ├── optimized_research.py
│   │   ├── supervisor.py
│   │   └── technical_analysis.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── api_server.py
│   │   ├── connection_manager.py
│   │   ├── dependencies
│   │   │   ├── __init__.py
│   │   │   ├── stock_analysis.py
│   │   │   └── technical_analysis.py
│   │   ├── error_handling.py
│   │   ├── inspector_compatible_sse.py
│   │   ├── inspector_sse.py
│   │   ├── middleware
│   │   │   ├── error_handling.py
│   │   │   ├── mcp_logging.py
│   │   │   ├── rate_limiting_enhanced.py
│   │   │   └── security.py
│   │   ├── openapi_config.py
│   │   ├── routers
│   │   │   ├── __init__.py
│   │   │   ├── agents.py
│   │   │   ├── backtesting.py
│   │   │   ├── data_enhanced.py
│   │   │   ├── data.py
│   │   │   ├── health_enhanced.py
│   │   │   ├── health_tools.py
│   │   │   ├── health.py
│   │   │   ├── intelligent_backtesting.py
│   │   │   ├── introspection.py
│   │   │   ├── mcp_prompts.py
│   │   │   ├── monitoring.py
│   │   │   ├── news_sentiment_enhanced.py
│   │   │   ├── performance.py
│   │   │   ├── portfolio.py
│   │   │   ├── research.py
│   │   │   ├── screening_ddd.py
│   │   │   ├── screening_parallel.py
│   │   │   ├── screening.py
│   │   │   ├── technical_ddd.py
│   │   │   ├── technical_enhanced.py
│   │   │   ├── technical.py
│   │   │   └── tool_registry.py
│   │   ├── server.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   ├── base_service.py
│   │   │   ├── market_service.py
│   │   │   ├── portfolio_service.py
│   │   │   ├── prompt_service.py
│   │   │   └── resource_service.py
│   │   ├── simple_sse.py
│   │   └── utils
│   │       ├── __init__.py
│   │       ├── insomnia_export.py
│   │       └── postman_export.py
│   ├── application
│   │   ├── __init__.py
│   │   ├── commands
│   │   │   └── __init__.py
│   │   ├── dto
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_dto.py
│   │   ├── queries
│   │   │   ├── __init__.py
│   │   │   └── get_technical_analysis.py
│   │   └── screening
│   │       ├── __init__.py
│   │       ├── dtos.py
│   │       └── queries.py
│   ├── backtesting
│   │   ├── __init__.py
│   │   ├── ab_testing.py
│   │   ├── analysis.py
│   │   ├── batch_processing_stub.py
│   │   ├── batch_processing.py
│   │   ├── model_manager.py
│   │   ├── optimization.py
│   │   ├── persistence.py
│   │   ├── retraining_pipeline.py
│   │   ├── strategies
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   ├── ml
│   │   │   │   ├── __init__.py
│   │   │   │   ├── adaptive.py
│   │   │   │   ├── ensemble.py
│   │   │   │   ├── feature_engineering.py
│   │   │   │   └── regime_aware.py
│   │   │   ├── ml_strategies.py
│   │   │   ├── parser.py
│   │   │   └── templates.py
│   │   ├── strategy_executor.py
│   │   ├── vectorbt_engine.py
│   │   └── visualization.py
│   ├── config
│   │   ├── __init__.py
│   │   ├── constants.py
│   │   ├── database_self_contained.py
│   │   ├── database.py
│   │   ├── llm_optimization_config.py
│   │   ├── logging_settings.py
│   │   ├── plotly_config.py
│   │   ├── security_utils.py
│   │   ├── security.py
│   │   ├── settings.py
│   │   ├── technical_constants.py
│   │   ├── tool_estimation.py
│   │   └── validation.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── technical_analysis.py
│   │   └── visualization.py
│   ├── data
│   │   ├── __init__.py
│   │   ├── cache_manager.py
│   │   ├── cache.py
│   │   ├── django_adapter.py
│   │   ├── health.py
│   │   ├── models.py
│   │   ├── performance.py
│   │   ├── session_management.py
│   │   └── validation.py
│   ├── database
│   │   ├── __init__.py
│   │   ├── base.py
│   │   └── optimization.py
│   ├── dependencies.py
│   ├── domain
│   │   ├── __init__.py
│   │   ├── entities
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis.py
│   │   ├── events
│   │   │   └── __init__.py
│   │   ├── portfolio.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   ├── entities.py
│   │   │   ├── services.py
│   │   │   └── value_objects.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_service.py
│   │   ├── stock_analysis
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis_service.py
│   │   └── value_objects
│   │       ├── __init__.py
│   │       └── technical_indicators.py
│   ├── exceptions.py
│   ├── infrastructure
│   │   ├── __init__.py
│   │   ├── cache
│   │   │   └── __init__.py
│   │   ├── caching
│   │   │   ├── __init__.py
│   │   │   └── cache_management_service.py
│   │   ├── connection_manager.py
│   │   ├── data_fetching
│   │   │   ├── __init__.py
│   │   │   └── stock_data_service.py
│   │   ├── health
│   │   │   ├── __init__.py
│   │   │   └── health_checker.py
│   │   ├── persistence
│   │   │   ├── __init__.py
│   │   │   └── stock_repository.py
│   │   ├── providers
│   │   │   └── __init__.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   └── repositories.py
│   │   └── sse_optimizer.py
│   ├── langchain_tools
│   │   ├── __init__.py
│   │   ├── adapters.py
│   │   └── registry.py
│   ├── logging_config.py
│   ├── memory
│   │   ├── __init__.py
│   │   └── stores.py
│   ├── monitoring
│   │   ├── __init__.py
│   │   ├── health_check.py
│   │   ├── health_monitor.py
│   │   ├── integration_example.py
│   │   ├── metrics.py
│   │   ├── middleware.py
│   │   └── status_dashboard.py
│   ├── providers
│   │   ├── __init__.py
│   │   ├── dependencies.py
│   │   ├── factories
│   │   │   ├── __init__.py
│   │   │   ├── config_factory.py
│   │   │   └── provider_factory.py
│   │   ├── implementations
│   │   │   ├── __init__.py
│   │   │   ├── cache_adapter.py
│   │   │   ├── macro_data_adapter.py
│   │   │   ├── market_data_adapter.py
│   │   │   ├── persistence_adapter.py
│   │   │   └── stock_data_adapter.py
│   │   ├── interfaces
│   │   │   ├── __init__.py
│   │   │   ├── cache.py
│   │   │   ├── config.py
│   │   │   ├── macro_data.py
│   │   │   ├── market_data.py
│   │   │   ├── persistence.py
│   │   │   └── stock_data.py
│   │   ├── llm_factory.py
│   │   ├── macro_data.py
│   │   ├── market_data.py
│   │   ├── mocks
│   │   │   ├── __init__.py
│   │   │   ├── mock_cache.py
│   │   │   ├── mock_config.py
│   │   │   ├── mock_macro_data.py
│   │   │   ├── mock_market_data.py
│   │   │   ├── mock_persistence.py
│   │   │   └── mock_stock_data.py
│   │   ├── openrouter_provider.py
│   │   ├── optimized_screening.py
│   │   ├── optimized_stock_data.py
│   │   └── stock_data.py
│   ├── README.md
│   ├── tests
│   │   ├── __init__.py
│   │   ├── README_INMEMORY_TESTS.md
│   │   ├── test_cache_debug.py
│   │   ├── test_fixes_validation.py
│   │   ├── test_in_memory_routers.py
│   │   ├── test_in_memory_server.py
│   │   ├── test_macro_data_provider.py
│   │   ├── test_mailgun_email.py
│   │   ├── test_market_calendar_caching.py
│   │   ├── test_mcp_tool_fixes_pytest.py
│   │   ├── test_mcp_tool_fixes.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_models_functional.py
│   │   ├── test_server.py
│   │   ├── test_stock_data_enhanced.py
│   │   ├── test_stock_data_provider.py
│   │   └── test_technical_analysis.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── performance_monitoring.py
│   │   ├── portfolio_manager.py
│   │   ├── risk_management.py
│   │   └── sentiment_analysis.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── agent_errors.py
│   │   ├── batch_processing.py
│   │   ├── cache_warmer.py
│   │   ├── circuit_breaker_decorators.py
│   │   ├── circuit_breaker_services.py
│   │   ├── circuit_breaker.py
│   │   ├── data_chunking.py
│   │   ├── database_monitoring.py
│   │   ├── debug_utils.py
│   │   ├── fallback_strategies.py
│   │   ├── llm_optimization.py
│   │   ├── logging_example.py
│   │   ├── logging_init.py
│   │   ├── logging.py
│   │   ├── mcp_logging.py
│   │   ├── memory_profiler.py
│   │   ├── monitoring_middleware.py
│   │   ├── monitoring.py
│   │   ├── orchestration_logging.py
│   │   ├── parallel_research.py
│   │   ├── parallel_screening.py
│   │   ├── quick_cache.py
│   │   ├── resource_manager.py
│   │   ├── shutdown.py
│   │   ├── stock_helpers.py
│   │   ├── structured_logger.py
│   │   ├── tool_monitoring.py
│   │   ├── tracing.py
│   │   └── yfinance_pool.py
│   ├── validation
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── data.py
│   │   ├── middleware.py
│   │   ├── portfolio.py
│   │   ├── responses.py
│   │   ├── screening.py
│   │   └── technical.py
│   └── workflows
│       ├── __init__.py
│       ├── agents
│       │   ├── __init__.py
│       │   ├── market_analyzer.py
│       │   ├── optimizer_agent.py
│       │   ├── strategy_selector.py
│       │   └── validator_agent.py
│       ├── backtesting_workflow.py
│       └── state.py
├── PLANS.md
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│   ├── dev.sh
│   ├── INSTALLATION_GUIDE.md
│   ├── load_example.py
│   ├── load_market_data.py
│   ├── load_tiingo_data.py
│   ├── migrate_db.py
│   ├── README_TIINGO_LOADER.md
│   ├── requirements_tiingo.txt
│   ├── run_stock_screening.py
│   ├── run-migrations.sh
│   ├── seed_db.py
│   ├── seed_sp500.py
│   ├── setup_database.sh
│   ├── setup_self_contained.py
│   ├── setup_sp500_database.sh
│   ├── test_seeded_data.py
│   ├── test_tiingo_loader.py
│   ├── tiingo_config.py
│   └── validate_setup.py
├── SECURITY.md
├── server.json
├── setup.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── core
│   │   └── test_technical_analysis.py
│   ├── data
│   │   └── test_portfolio_models.py
│   ├── domain
│   │   ├── conftest.py
│   │   ├── test_portfolio_entities.py
│   │   └── test_technical_analysis_service.py
│   ├── fixtures
│   │   └── orchestration_fixtures.py
│   ├── integration
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── README.md
│   │   ├── run_integration_tests.sh
│   │   ├── test_api_technical.py
│   │   ├── test_chaos_engineering.py
│   │   ├── test_config_management.py
│   │   ├── test_full_backtest_workflow_advanced.py
│   │   ├── test_full_backtest_workflow.py
│   │   ├── test_high_volume.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_orchestration_complete.py
│   │   ├── test_portfolio_persistence.py
│   │   ├── test_redis_cache.py
│   │   ├── test_security_integration.py.disabled
│   │   └── vcr_setup.py
│   ├── performance
│   │   ├── __init__.py
│   │   ├── test_benchmarks.py
│   │   ├── test_load.py
│   │   ├── test_profiling.py
│   │   └── test_stress.py
│   ├── providers
│   │   └── test_stock_data_simple.py
│   ├── README.md
│   ├── test_agents_router_mcp.py
│   ├── test_backtest_persistence.py
│   ├── test_cache_management_service.py
│   ├── test_cache_serialization.py
│   ├── test_circuit_breaker.py
│   ├── test_database_pool_config_simple.py
│   ├── test_database_pool_config.py
│   ├── test_deep_research_functional.py
│   ├── test_deep_research_integration.py
│   ├── test_deep_research_parallel_execution.py
│   ├── test_error_handling.py
│   ├── test_event_loop_integrity.py
│   ├── test_exa_research_integration.py
│   ├── test_exception_hierarchy.py
│   ├── test_financial_search.py
│   ├── test_graceful_shutdown.py
│   ├── test_integration_simple.py
│   ├── test_langgraph_workflow.py
│   ├── test_market_data_async.py
│   ├── test_market_data_simple.py
│   ├── test_mcp_orchestration_functional.py
│   ├── test_ml_strategies.py
│   ├── test_optimized_research_agent.py
│   ├── test_orchestration_integration.py
│   ├── test_orchestration_logging.py
│   ├── test_orchestration_tools_simple.py
│   ├── test_parallel_research_integration.py
│   ├── test_parallel_research_orchestrator.py
│   ├── test_parallel_research_performance.py
│   ├── test_performance_optimizations.py
│   ├── test_production_validation.py
│   ├── test_provider_architecture.py
│   ├── test_rate_limiting_enhanced.py
│   ├── test_runner_validation.py
│   ├── test_security_comprehensive.py.disabled
│   ├── test_security_cors.py
│   ├── test_security_enhancements.py.disabled
│   ├── test_security_headers.py
│   ├── test_security_penetration.py
│   ├── test_session_management.py
│   ├── test_speed_optimization_validation.py
│   ├── test_stock_analysis_dependencies.py
│   ├── test_stock_analysis_service.py
│   ├── test_stock_data_fetching_service.py
│   ├── test_supervisor_agent.py
│   ├── test_supervisor_functional.py
│   ├── test_tool_estimation_config.py
│   ├── test_visualization.py
│   └── utils
│       ├── test_agent_errors.py
│       ├── test_logging.py
│       ├── test_parallel_screening.py
│       └── test_quick_cache.py
├── tools
│   ├── check_orchestration_config.py
│   ├── experiments
│   │   ├── validation_examples.py
│   │   └── validation_fixed.py
│   ├── fast_dev.sh
│   ├── hot_reload.py
│   ├── quick_test.py
│   └── templates
│       ├── new_router_template.py
│       ├── new_tool_template.py
│       ├── screening_strategy_template.py
│       └── test_template.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------

```markdown
  1 | ## 📋 Pull Request Summary
  2 | 
  3 | **Brief description of changes:**
  4 | A concise description of what this PR accomplishes.
  5 | 
  6 | **Related issue(s):**
  7 | - Fixes #(issue)
  8 | - Closes #(issue)
  9 | - Addresses #(issue)
 10 | 
 11 | ## 💰 Financial Disclaimer Acknowledgment
 12 | 
 13 | - [ ] I understand this is educational software and not financial advice
 14 | - [ ] Any financial analysis features include appropriate disclaimers
 15 | - [ ] This PR maintains the educational/personal-use focus of the project
 16 | 
 17 | ## 🔄 Type of Change
 18 | 
 19 | - [ ] 🐛 Bug fix (non-breaking change that fixes an issue)
 20 | - [ ] ✨ New feature (non-breaking change that adds functionality)
 21 | - [ ] 💥 Breaking change (fix or feature that would cause existing functionality to not work as expected)
 22 | - [ ] 📚 Documentation update (improvements to documentation)
 23 | - [ ] 🔧 Refactor (code changes that neither fix bugs nor add features)
 24 | - [ ] ⚡ Performance improvement
 25 | - [ ] 🧹 Chore (dependencies, build tools, etc.)
 26 | 
 27 | ## 🎯 Component Areas
 28 | 
 29 | **Primary areas affected:**
 30 | - [ ] Data fetching (Tiingo, Yahoo Finance, FRED)
 31 | - [ ] Technical analysis calculations
 32 | - [ ] Stock screening strategies  
 33 | - [ ] Portfolio analysis and optimization
 34 | - [ ] MCP server/tools implementation
 35 | - [ ] Database operations and models
 36 | - [ ] Caching (Redis/in-memory)
 37 | - [ ] Claude Desktop integration
 38 | - [ ] Development tools and setup
 39 | - [ ] Documentation and examples
 40 | 
 41 | ## 🔧 Implementation Details
 42 | 
 43 | **Technical approach:**
 44 | Describe the technical approach and any architectural decisions.
 45 | 
 46 | **Key changes:**
 47 | - Changed X to improve Y
 48 | - Added new function Z for feature A
 49 | - Refactored B to better handle C
 50 | 
 51 | **Dependencies:**
 52 | - [ ] No new dependencies added
 53 | - [ ] New dependencies added (list below)
 54 | - [ ] Dependencies removed (list below)
 55 | 
 56 | **New dependencies added:**
 57 | - package-name==version (reason for adding)
 58 | 
 59 | ## 🧪 Testing
 60 | 
 61 | **Testing performed:**
 62 | - [ ] Unit tests added/updated
 63 | - [ ] Integration tests added/updated  
 64 | - [ ] Manual testing completed
 65 | - [ ] Tested with Claude Desktop
 66 | - [ ] Tested with different data sources
 67 | - [ ] Performance testing completed
 68 | 
 69 | **Test scenarios covered:**
 70 | - [ ] Happy path functionality
 71 | - [ ] Error handling and edge cases
 72 | - [ ] Data validation and sanitization
 73 | - [ ] API rate limiting compliance
 74 | - [ ] Database operations
 75 | - [ ] Cache behavior
 76 | 
 77 | **Manual testing:**
 78 | ```bash
 79 | # Commands used for testing
 80 | make test
 81 | make test-integration
 82 | # etc.
 83 | ```
 84 | 
 85 | ## 📊 Financial Analysis Impact
 86 | 
 87 | **Financial calculations:**
 88 | - [ ] No financial calculations affected
 89 | - [ ] New financial calculations added (validated for accuracy)
 90 | - [ ] Existing calculations modified (thoroughly tested)
 91 | - [ ] All calculations include appropriate disclaimers
 92 | 
 93 | **Data providers:**
 94 | - [ ] No data provider changes
 95 | - [ ] New data provider integration
 96 | - [ ] Existing provider modifications
 97 | - [ ] Rate limiting compliance verified
 98 | 
 99 | **Market data handling:**
100 | - [ ] Historical data processing
101 | - [ ] Real-time data integration
102 | - [ ] Technical indicator calculations
103 | - [ ] Screening algorithm changes
104 | 
105 | ## 🔒 Security Considerations
106 | 
107 | **Security checklist:**
108 | - [ ] No hardcoded secrets or credentials
109 | - [ ] Input validation implemented
110 | - [ ] Error handling doesn't leak sensitive information
111 | - [ ] API keys handled securely via environment variables
112 | - [ ] SQL injection prevention verified
113 | - [ ] Rate limiting respected for external APIs
114 | 
115 | ## 📚 Documentation
116 | 
117 | **Documentation updates:**
118 | - [ ] Code comments added/updated
119 | - [ ] README.md updated (if needed)
120 | - [ ] API documentation updated
121 | - [ ] Examples/tutorials added
122 | - [ ] Financial disclaimers included where appropriate
123 | 
124 | **Breaking changes documentation:**
125 | - [ ] No breaking changes
126 | - [ ] Breaking changes documented in PR description
127 | - [ ] Migration guide provided
128 | - [ ] CHANGELOG.md updated
129 | 
130 | ## ✅ Pre-submission Checklist
131 | 
132 | **Code quality:**
133 | - [ ] Code follows the project style guide
134 | - [ ] Self-review of code completed
135 | - [ ] Tests added/updated and passing
136 | - [ ] No linting errors (`make lint`)
137 | - [ ] Type checking passes (`make typecheck`)
138 | - [ ] All tests pass (`make test`)
139 | 
140 | **Financial software standards:**
141 | - [ ] Financial disclaimers included where appropriate
142 | - [ ] No investment advice or guarantees provided
143 | - [ ] Educational purpose maintained
144 | - [ ] Data accuracy considerations documented
145 | - [ ] Risk warnings included for relevant features
146 | 
147 | **Community standards:**
148 | - [ ] PR title is descriptive and follows convention
149 | - [ ] Description clearly explains the changes
150 | - [ ] Related issues are linked
151 | - [ ] Screenshots/examples included (if applicable)
152 | - [ ] Ready for review
153 | 
154 | ## 📸 Screenshots/Examples
155 | 
156 | **Before and after (if applicable):**
157 | <!-- Add screenshots, CLI output, or code examples -->
158 | 
159 | **New functionality examples:**
160 | ```python
161 | # Example of new feature usage
162 | result = new_function(symbol="AAPL", period=20)
163 | print(result)
164 | ```
165 | 
166 | ## 🤝 Review Guidance
167 | 
168 | **Areas needing special attention:**
169 | - Focus on X because of Y
170 | - Pay attention to Z implementation
171 | - Verify A works correctly with B
172 | 
173 | **Questions for reviewers:**
174 | - Does the implementation approach make sense?
175 | - Are there any security concerns?
176 | - Is the documentation clear and complete?
177 | - Any suggestions for improvement?
178 | 
179 | ## 🚀 Deployment Notes
180 | 
181 | **Environment considerations:**
182 | - [ ] No environment changes required
183 | - [ ] New environment variables needed (documented)
184 | - [ ] Database migrations required
185 | - [ ] Cache invalidation needed
186 | 
187 | **Rollback plan:**
188 | - [ ] Changes are fully backward compatible
189 | - [ ] Database migrations are reversible
190 | - [ ] Rollback steps documented below
191 | 
192 | **Rollback steps (if needed):**
193 | 1. Step 1
194 | 2. Step 2
195 | 
196 | ## 🎓 Educational Impact
197 | 
198 | **Learning value:**
199 | - What financial concepts does this help teach?
200 | - How does this improve the developer experience?
201 | - What new capabilities does this enable for users?
202 | 
203 | **Community benefit:**
204 | - Who will benefit from these changes?
205 | - How does this advance the project's educational mission?
206 | - Any potential for broader community impact?
207 | 
208 | ---
209 | 
210 | **Additional Notes:**
211 | Any other information that would be helpful for reviewers.
```

--------------------------------------------------------------------------------
/tools/templates/new_router_template.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Template for creating new FastAPI routers.
  3 | 
  4 | Copy this file and modify it to create new routers quickly.
  5 | """
  6 | 
  7 | from typing import Any
  8 | 
  9 | from fastapi import APIRouter, Depends, HTTPException, Query
 10 | from pydantic import BaseModel, Field
 11 | from sqlalchemy.orm import Session
 12 | 
 13 | from maverick_mcp.data.models import get_db
 14 | from maverick_mcp.utils.logging import get_logger
 15 | 
 16 | logger = get_logger(__name__)
 17 | 
 18 | # Create router instance
 19 | router = APIRouter(
 20 |     prefix="/your_domain",  # Change this to your domain
 21 |     tags=["your_domain"],  # Change this to your domain
 22 | )
 23 | 
 24 | 
 25 | # Request/Response models
 26 | class YourRequest(BaseModel):
 27 |     """Request model for your endpoint."""
 28 | 
 29 |     field1: str = Field(..., description="Description of field1")
 30 |     field2: int = Field(10, ge=0, description="Description of field2")
 31 |     field3: bool = Field(True, description="Description of field3")
 32 | 
 33 | 
 34 | class YourResponse(BaseModel):
 35 |     """Response model for your endpoint."""
 36 | 
 37 |     status: str = Field(..., description="Operation status")
 38 |     result: dict[str, Any] = Field(..., description="Operation result")
 39 |     message: str | None = Field(None, description="Optional message")
 40 | 
 41 | 
 42 | # Endpoints
 43 | @router.get("/", response_model=list[YourResponse])
 44 | async def list_items(
 45 |     limit: int = Query(10, ge=1, le=100, description="Number of items to return"),
 46 |     offset: int = Query(0, ge=0, description="Number of items to skip"),
 47 |     db: Session = Depends(get_db),
 48 | ):
 49 |     """
 50 |     List items with pagination.
 51 | 
 52 |     Args:
 53 |         limit: Maximum number of items to return
 54 |         offset: Number of items to skip
 55 |         db: Database session
 56 | 
 57 |     Returns:
 58 |         List of items
 59 |     """
 60 |     try:
 61 |         # Your business logic here
 62 |         items = []  # Fetch from database
 63 | 
 64 |         logger.info(
 65 |             f"Listed {len(items)} items",
 66 |             extra={"limit": limit, "offset": offset},
 67 |         )
 68 | 
 69 |         return items
 70 | 
 71 |     except Exception as e:
 72 |         logger.error(f"Error listing items: {e}", exc_info=True)
 73 |         raise HTTPException(status_code=500, detail="Internal server error")
 74 | 
 75 | 
 76 | @router.get("/{item_id}", response_model=YourResponse)
 77 | async def get_item(
 78 |     item_id: int,
 79 |     db: Session = Depends(get_db),
 80 | ):
 81 |     """
 82 |     Get a specific item by ID.
 83 | 
 84 |     Args:
 85 |         item_id: The item ID
 86 |         db: Database session
 87 | 
 88 |     Returns:
 89 |         The requested item
 90 |     """
 91 |     try:
 92 |         # Your business logic here
 93 |         item = None  # Fetch from database
 94 | 
 95 |         if not item:
 96 |             raise HTTPException(status_code=404, detail="Item not found")
 97 | 
 98 |         return YourResponse(
 99 |             status="success",
100 |             result={"id": item_id, "data": "example"},
101 |         )
102 | 
103 |     except HTTPException:
104 |         raise
105 |     except Exception as e:
106 |         logger.error(f"Error getting item {item_id}: {e}", exc_info=True)
107 |         raise HTTPException(status_code=500, detail="Internal server error")
108 | 
109 | 
110 | @router.post("/", response_model=YourResponse)
111 | async def create_item(
112 |     request: YourRequest,
113 |     db: Session = Depends(get_db),
114 | ):
115 |     """
116 |     Create a new item.
117 | 
118 |     Args:
119 |         request: The item data
120 |         db: Database session
121 | 
122 |     Returns:
123 |         The created item
124 |     """
125 |     try:
126 |         logger.info(
127 |             "Creating new item",
128 |             extra={"request": request.model_dump()},
129 |         )
130 | 
131 |         # Your business logic here
132 |         # Example: Create in database
133 |         # new_item = YourModel(**request.model_dump())
134 |         # db.add(new_item)
135 |         # db.commit()
136 | 
137 |         return YourResponse(
138 |             status="success",
139 |             result={"id": 1, "created": True},
140 |             message="Item created successfully",
141 |         )
142 | 
143 |     except Exception as e:
144 |         logger.error(f"Error creating item: {e}", exc_info=True)
145 |         db.rollback()
146 |         raise HTTPException(status_code=500, detail="Internal server error")
147 | 
148 | 
149 | @router.put("/{item_id}", response_model=YourResponse)
150 | async def update_item(
151 |     item_id: int,
152 |     request: YourRequest,
153 |     db: Session = Depends(get_db),
154 | ):
155 |     """
156 |     Update an existing item.
157 | 
158 |     Args:
159 |         item_id: The item ID
160 |         request: The updated data
161 |         db: Database session
162 | 
163 |     Returns:
164 |         The updated item
165 |     """
166 |     try:
167 |         # Your business logic here
168 |         # Example: Update in database
169 |         # item = db.query(YourModel).filter(YourModel.id == item_id).first()
170 |         # if not item:
171 |         #     raise HTTPException(status_code=404, detail="Item not found")
172 | 
173 |         # Update fields
174 |         # for key, value in request.model_dump().items():
175 |         #     setattr(item, key, value)
176 |         # db.commit()
177 | 
178 |         return YourResponse(
179 |             status="success",
180 |             result={"id": item_id, "updated": True},
181 |             message="Item updated successfully",
182 |         )
183 | 
184 |     except HTTPException:
185 |         raise
186 |     except Exception as e:
187 |         logger.error(f"Error updating item {item_id}: {e}", exc_info=True)
188 |         db.rollback()
189 |         raise HTTPException(status_code=500, detail="Internal server error")
190 | 
191 | 
192 | @router.delete("/{item_id}")
193 | async def delete_item(
194 |     item_id: int,
195 |     db: Session = Depends(get_db),
196 | ):
197 |     """
198 |     Delete an item.
199 | 
200 |     Args:
201 |         item_id: The item ID
202 |         db: Database session
203 | 
204 |     Returns:
205 |         Deletion confirmation
206 |     """
207 |     try:
208 |         # Your business logic here
209 |         # Example: Delete from database
210 |         # item = db.query(YourModel).filter(YourModel.id == item_id).first()
211 |         # if not item:
212 |         #     raise HTTPException(status_code=404, detail="Item not found")
213 | 
214 |         # db.delete(item)
215 |         # db.commit()
216 | 
217 |         return {"status": "success", "message": f"Item {item_id} deleted"}
218 | 
219 |     except HTTPException:
220 |         raise
221 |     except Exception as e:
222 |         logger.error(f"Error deleting item {item_id}: {e}", exc_info=True)
223 |         db.rollback()
224 |         raise HTTPException(status_code=500, detail="Internal server error")
225 | 
226 | 
227 | # Health check endpoint
228 | @router.get("/health")
229 | async def health_check():
230 |     """Check if the router is healthy."""
231 |     return {
232 |         "status": "healthy",
233 |         "router": "your_domain",
234 |         "timestamp": "2024-01-01T00:00:00Z",
235 |     }
236 | 
```

--------------------------------------------------------------------------------
/tools/quick_test.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python
  2 | """
  3 | Quick test runner for Maverick-MCP.
  4 | 
  5 | This script allows rapid testing of individual functions or modules
  6 | without running the full test suite.
  7 | """
  8 | 
  9 | import asyncio
 10 | import os
 11 | import sys
 12 | import time
 13 | from pathlib import Path
 14 | 
 15 | # Add project root to Python path
 16 | project_root = Path(__file__).parent.parent
 17 | sys.path.insert(0, str(project_root))
 18 | 
 19 | # Set up minimal environment
 20 | os.environ.setdefault("AUTH_ENABLED", "false")
 21 | os.environ.setdefault("REDIS_HOST", "localhost")
 22 | os.environ.setdefault("DATABASE_URL", "sqlite:///:memory:")
 23 | 
 24 | 
 25 | def setup_test_environment():
 26 |     """Set up a minimal test environment."""
 27 |     # Configure logging
 28 |     import logging
 29 | 
 30 |     logging.basicConfig(
 31 |         level=logging.INFO,
 32 |         format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
 33 |     )
 34 | 
 35 |     # Disable noisy loggers
 36 |     for logger_name in ["httpx", "httpcore", "urllib3", "asyncio"]:
 37 |         logging.getLogger(logger_name).setLevel(logging.WARNING)
 38 | 
 39 | 
 40 | async def test_stock_data():
 41 |     """Quick test for stock data provider."""
 42 |     from maverick_mcp.providers.stock_data import StockDataProvider
 43 | 
 44 |     print("\n🧪 Testing StockDataProvider...")
 45 |     provider = StockDataProvider(use_cache=False)  # Skip cache for testing
 46 | 
 47 |     # Test getting stock data
 48 |     df = provider.get_stock_data("AAPL", "2024-01-01", "2024-01-10")
 49 |     print(f"✅ Got {len(df)} days of data for AAPL")
 50 |     print(f"   Columns: {list(df.columns)}")
 51 |     print(f"   Latest close: ${df['Close'].iloc[-1]:.2f}")
 52 | 
 53 | 
 54 | async def test_technical_analysis():
 55 |     """Quick test for technical analysis."""
 56 |     from maverick_mcp.core.technical_analysis import calculate_rsi, calculate_sma
 57 | 
 58 |     print("\n🧪 Testing Technical Analysis...")
 59 | 
 60 |     # Create sample data
 61 |     import pandas as pd
 62 | 
 63 |     prices = [100, 102, 101, 103, 105, 104, 106, 108, 107, 109] * 3
 64 |     df = pd.DataFrame({"Close": prices})
 65 | 
 66 |     # Test SMA
 67 |     sma = calculate_sma(df, period=5)
 68 |     print(f"✅ SMA calculated: {sma.iloc[-1]:.2f}")
 69 | 
 70 |     # Test RSI
 71 |     rsi = calculate_rsi(df, period=14)
 72 |     print(f"✅ RSI calculated: {rsi.iloc[-1]:.2f}")
 73 | 
 74 | 
 75 | async def test_auth_token():
 76 |     """Quick test for authentication token generation (disabled for personal use)."""
 77 |     print(
 78 |         "\n⚠️  Auth Token Test - Skipped (Authentication system removed for personal use)"
 79 |     )
 80 | 
 81 | 
 82 | async def run_custom_test():
 83 |     """
 84 |     Custom test function - modify this to test specific functionality.
 85 | 
 86 |     This is where you can quickly test any function or module.
 87 |     """
 88 |     print("\n🧪 Running custom test...")
 89 | 
 90 |     # Example: Test a specific function
 91 |     # from maverick_mcp.some_module import some_function
 92 |     # result = await some_function()
 93 |     # print(f"Result: {result}")
 94 | 
 95 |     print("✅ Custom test completed")
 96 | 
 97 | 
 98 | async def test_parallel_screening():
 99 |     """Test parallel screening performance improvement."""
100 |     print("\n🧪 Testing Parallel Screening Performance...")
101 | 
102 |     # Test symbols
103 |     test_symbols = ["AAPL", "MSFT", "GOOGL", "AMZN", "TSLA", "META", "NVDA", "JPM"]
104 | 
105 |     import time
106 | 
107 |     from maverick_mcp.utils.parallel_screening import (
108 |         ParallelScreener,
109 |         example_momentum_screen,
110 |     )
111 | 
112 |     # Sequential baseline
113 |     print("\n📊 Sequential screening (baseline):")
114 |     sequential_start = time.time()
115 |     sequential_results = []
116 |     for symbol in test_symbols:
117 |         result = example_momentum_screen(symbol)
118 |         sequential_results.append(result)
119 |         print(f"   {symbol}: {'✅' if result.get('passed') else '❌'}")
120 |     sequential_time = time.time() - sequential_start
121 | 
122 |     # Parallel screening
123 |     print("\n⚡ Parallel screening (4 workers):")
124 |     with ParallelScreener(max_workers=4) as screener:
125 |         parallel_start = time.time()
126 |         parallel_results = screener.screen_batch(
127 |             test_symbols, example_momentum_screen, batch_size=2
128 |         )
129 |         parallel_time = time.time() - parallel_start
130 | 
131 |     # Results
132 |     speedup = sequential_time / parallel_time
133 |     print("\n📈 Performance Results:")
134 |     print(f"   Sequential: {sequential_time:.2f}s")
135 |     print(f"   Parallel:   {parallel_time:.2f}s")
136 |     print(f"   Speedup:    {speedup:.1f}x")
137 |     print(
138 |         f"   Passed:     {len([r for r in parallel_results if r.get('passed')])} stocks"
139 |     )
140 | 
141 |     if speedup > 2:
142 |         print(f"\n🎉 Excellent! Achieved {speedup:.1f}x speedup!")
143 |     else:
144 |         print(f"\n✅ Good! Achieved {speedup:.1f}x speedup")
145 | 
146 | 
147 | async def main():
148 |     """Main test runner."""
149 |     import argparse
150 | 
151 |     parser = argparse.ArgumentParser(description="Quick test runner for Maverick-MCP")
152 |     parser.add_argument(
153 |         "--test",
154 |         choices=["stock", "technical", "auth", "custom", "parallel", "all"],
155 |         default="custom",
156 |         help="Which test to run",
157 |     )
158 |     parser.add_argument(
159 |         "--loop",
160 |         action="store_true",
161 |         help="Run test in a loop for performance testing",
162 |     )
163 |     parser.add_argument(
164 |         "--times",
165 |         type=int,
166 |         default=1,
167 |         help="Number of times to run the test",
168 |     )
169 |     args = parser.parse_args()
170 | 
171 |     setup_test_environment()
172 | 
173 |     print("🚀 Maverick-MCP Quick Test Runner")
174 |     print("=" * 50)
175 | 
176 |     # Map test names to functions
177 |     tests = {
178 |         "stock": test_stock_data,
179 |         "technical": test_technical_analysis,
180 |         "auth": test_auth_token,
181 |         "custom": run_custom_test,
182 |         "parallel": test_parallel_screening,
183 |     }
184 | 
185 |     # Run selected tests
186 |     start_time = time.time()
187 | 
188 |     for i in range(args.times):
189 |         if args.times > 1:
190 |             print(f"\n🔄 Run {i + 1}/{args.times}")
191 | 
192 |         if args.test == "all":
193 |             for test_name, test_func in tests.items():
194 |                 if test_name != "custom":  # Skip custom in "all" mode
195 |                     await test_func()
196 |         else:
197 |             await tests[args.test]()
198 | 
199 |         if args.loop and i < args.times - 1:
200 |             await asyncio.sleep(0.1)  # Small delay between runs
201 | 
202 |     elapsed = time.time() - start_time
203 |     print(f"\n⏱️  Total time: {elapsed:.2f} seconds")
204 | 
205 |     if args.times > 1:
206 |         print(f"📊 Average time per run: {elapsed / args.times:.2f} seconds")
207 | 
208 | 
209 | if __name__ == "__main__":
210 |     asyncio.run(main())
211 | 
```

--------------------------------------------------------------------------------
/tools/experiments/validation_examples.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Validation examples for testing the agentic workflow improvements.
  3 | 
  4 | This file demonstrates the 4 validation tasks:
  5 | 1. Add a new technical indicator tool
  6 | 2. Debug authentication issues
  7 | 3. Run tests for stock data provider
  8 | 4. Create a new screening strategy
  9 | """
 10 | 
 11 | import os
 12 | import subprocess
 13 | from typing import Any
 14 | 
 15 | import pandas as pd
 16 | 
 17 | from maverick_mcp.core.technical_analysis import calculate_sma
 18 | from maverick_mcp.providers.stock_data import StockDataProvider
 19 | from maverick_mcp.utils.agent_errors import agent_friendly_errors
 20 | from maverick_mcp.utils.quick_cache import get_cache_stats, quick_cache
 21 | 
 22 | print("🎯 Maverick-MCP Validation Examples")
 23 | print("=" * 60)
 24 | 
 25 | 
 26 | # Validation 1: Add a new technical indicator tool
 27 | print("\n📊 1. Adding a new technical indicator (Stochastic Oscillator)...")
 28 | 
 29 | 
 30 | def calculate_stochastic(
 31 |     df: pd.DataFrame, k_period: int = 14, d_period: int = 3
 32 | ) -> pd.DataFrame:
 33 |     """Calculate Stochastic Oscillator (%K and %D)."""
 34 |     high_roll = df["High"].rolling(k_period)
 35 |     low_roll = df["Low"].rolling(k_period)
 36 | 
 37 |     # %K = (Current Close - Lowest Low) / (Highest High - Lowest Low) * 100
 38 |     k_percent = 100 * (
 39 |         (df["Close"] - low_roll.min()) / (high_roll.max() - low_roll.min())
 40 |     )
 41 | 
 42 |     # %D = 3-day SMA of %K
 43 |     d_percent = k_percent.rolling(d_period).mean()
 44 | 
 45 |     result = pd.DataFrame({"%K": k_percent, "%D": d_percent})
 46 |     return result
 47 | 
 48 | 
 49 | # Mock tool registration (would normally use @mcp.tool())
 50 | @agent_friendly_errors
 51 | def get_stochastic_analysis(symbol: str, period: int = 14) -> dict[str, Any]:
 52 |     """
 53 |     Get Stochastic Oscillator analysis for a stock.
 54 | 
 55 |     This demonstrates adding a new technical indicator tool.
 56 |     """
 57 |     # Simulate getting data
 58 |     provider = StockDataProvider(use_cache=False)
 59 |     data = provider.get_stock_data(symbol, "2023-10-01", "2024-01-01")
 60 | 
 61 |     stoch = calculate_stochastic(data, k_period=period)
 62 | 
 63 |     current_k = stoch["%K"].iloc[-1]
 64 |     current_d = stoch["%D"].iloc[-1]
 65 | 
 66 |     # Determine signal
 67 |     signal = "neutral"
 68 |     if current_k > 80:
 69 |         signal = "overbought"
 70 |     elif current_k < 20:
 71 |         signal = "oversold"
 72 |     elif current_k > current_d:
 73 |         signal = "bullish_crossover"
 74 |     elif current_k < current_d:
 75 |         signal = "bearish_crossover"
 76 | 
 77 |     result = {
 78 |         "symbol": symbol,
 79 |         "stochastic_k": round(current_k, 2),
 80 |         "stochastic_d": round(current_d, 2),
 81 |         "signal": signal,
 82 |         "period": period,
 83 |     }
 84 | 
 85 |     print(
 86 |         f"✅ Stochastic indicator added: {symbol} - %K={result['stochastic_k']}, Signal={signal}"
 87 |     )
 88 |     return result
 89 | 
 90 | 
 91 | # Test the new indicator
 92 | try:
 93 |     stoch_result = get_stochastic_analysis("AAPL", period=14)
 94 | except Exception as e:
 95 |     print(f"❌ Error testing stochastic: {e}")
 96 | 
 97 | 
 98 | # Validation 2: Debug authentication
 99 | print("\n🔐 2. Debugging authentication...")
100 | 
101 | os.environ["AUTH_ENABLED"] = "false"  # Disable for testing
102 | 
103 | 
104 | # Test with agent error handler
105 | @agent_friendly_errors(reraise=False)
106 | def test_auth_error():
107 |     """Simulate an auth error to test error handling."""
108 |     # This would normally raise 401 Unauthorized
109 |     raise ValueError("401 Unauthorized")
110 | 
111 | 
112 | auth_result = test_auth_error()
113 | if isinstance(auth_result, dict) and "fix_suggestion" in auth_result:
114 |     print(f"✅ Auth error caught with fix: {auth_result['fix_suggestion']['fix']}")
115 | else:
116 |     print("✅ Auth disabled for development")
117 | 
118 | 
119 | # Validation 3: Run tests for stock data provider
120 | print("\n🧪 3. Running stock data provider tests...")
121 | 
122 | # Quick test using our test runner
123 | result = subprocess.run(
124 |     ["python", "tools/quick_test.py", "--test", "stock"],
125 |     capture_output=True,
126 |     text=True,
127 |     timeout=10,
128 | )
129 | 
130 | if result.returncode == 0:
131 |     print("✅ Stock data tests passed")
132 |     # Show last few lines of output
133 |     lines = result.stdout.strip().split("\n")
134 |     for line in lines[-3:]:
135 |         print(f"   {line}")
136 | else:
137 |     print(f"❌ Stock data tests failed: {result.stderr}")
138 | 
139 | 
140 | # Validation 4: Create a new screening strategy
141 | print("\n🔍 4. Creating a new screening strategy (Golden Cross)...")
142 | 
143 | 
144 | @quick_cache(ttl_seconds=300)  # Cache for 5 minutes
145 | def screen_golden_cross(symbol: str) -> dict[str, Any]:
146 |     """
147 |     Screen for Golden Cross pattern (50 SMA crosses above 200 SMA).
148 |     """
149 |     provider = StockDataProvider(use_cache=False)
150 |     data = provider.get_stock_data(symbol, "2023-01-01", "2024-01-01")
151 | 
152 |     if len(data) < 200:
153 |         return {"symbol": symbol, "passed": False, "reason": "Insufficient data"}
154 | 
155 |     # Calculate SMAs
156 |     sma_50 = calculate_sma(data, 50)
157 |     sma_200 = calculate_sma(data, 200)
158 | 
159 |     # Check for golden cross in last 10 days
160 |     golden_cross = False
161 |     for i in range(-10, 0):
162 |         if (
163 |             sma_50.iloc[i - 1] <= sma_200.iloc[i - 1]
164 |             and sma_50.iloc[i] > sma_200.iloc[i]
165 |         ):
166 |             golden_cross = True
167 |             break
168 | 
169 |     return {
170 |         "symbol": symbol,
171 |         "passed": golden_cross,
172 |         "current_price": round(data["Close"].iloc[-1], 2),
173 |         "sma_50": round(sma_50.iloc[-1], 2),
174 |         "sma_200": round(sma_200.iloc[-1], 2),
175 |         "above_50": data["Close"].iloc[-1] > sma_50.iloc[-1],
176 |         "above_200": data["Close"].iloc[-1] > sma_200.iloc[-1],
177 |     }
178 | 
179 | 
180 | # Test the new screening strategy
181 | test_symbols = ["AAPL", "MSFT", "GOOGL"]
182 | golden_cross_results = []
183 | 
184 | for symbol in test_symbols:
185 |     try:
186 |         result = screen_golden_cross(symbol)
187 |         golden_cross_results.append(result)
188 |         status = "✅ Golden Cross" if result["passed"] else "❌ No Golden Cross"
189 |         print(f"   {symbol}: {status} (Price=${result['current_price']})")
190 |     except Exception as e:
191 |         print(f"   {symbol}: ❌ Error - {e}")
192 | 
193 | 
194 | # Summary
195 | print("\n" + "=" * 60)
196 | print("🎉 Validation Summary:")
197 | print("   1. New Indicator Tool: ✅ Stochastic Oscillator added")
198 | print("   2. Auth Debugging: ✅ Error handler provides helpful fixes")
199 | print("   3. Test Running: ✅ Stock data tests executed")
200 | print("   4. New Screening: ✅ Golden Cross strategy created")
201 | print("\n✨ All validations completed successfully!")
202 | 
203 | # Cache stats
204 | cache_stats = get_cache_stats()
205 | if cache_stats["total"] > 0:
206 |     print(
207 |         f"\n📊 Cache Performance: {cache_stats['hit_rate']}% hit rate ({cache_stats['hits']} hits)"
208 |     )
209 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/langchain_tools/registry.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tool registry for managing MCP and LangChain tools.
  3 | """
  4 | 
  5 | import logging
  6 | from collections.abc import Callable
  7 | from typing import Any
  8 | 
  9 | from langchain_core.tools import BaseTool
 10 | 
 11 | from .adapters import mcp_to_langchain_adapter
 12 | 
 13 | logger = logging.getLogger(__name__)
 14 | 
 15 | 
 16 | class ToolRegistry:
 17 |     """Registry for managing tools from different sources."""
 18 | 
 19 |     def __init__(self):
 20 |         self.mcp_tools: dict[str, Callable] = {}
 21 |         self.langchain_tools: dict[str, BaseTool] = {}
 22 |         self.tool_metadata: dict[str, dict[str, Any]] = {}
 23 | 
 24 |     def register_mcp_tool(
 25 |         self,
 26 |         func: Callable,
 27 |         name: str | None = None,
 28 |         description: str | None = None,
 29 |         persona_aware: bool = False,
 30 |         categories: list[str] | None = None,
 31 |     ) -> None:
 32 |         """
 33 |         Register an MCP tool function.
 34 | 
 35 |         Args:
 36 |             func: The MCP tool function
 37 |             name: Optional custom name
 38 |             description: Optional description
 39 |             persona_aware: Whether tool should adapt to personas
 40 |             categories: Tool categories for organization
 41 |         """
 42 |         tool_name = name or func.__name__
 43 |         self.mcp_tools[tool_name] = func
 44 | 
 45 |         # Store metadata
 46 |         self.tool_metadata[tool_name] = {
 47 |             "source": "mcp",
 48 |             "description": description or func.__doc__,
 49 |             "persona_aware": persona_aware,
 50 |             "categories": categories or [],
 51 |             "original_func": func,
 52 |         }
 53 | 
 54 |         logger.info(f"Registered MCP tool: {tool_name}")
 55 | 
 56 |     def register_langchain_tool(
 57 |         self, tool: BaseTool, categories: list[str] | None = None
 58 |     ) -> None:
 59 |         """
 60 |         Register a LangChain tool.
 61 | 
 62 |         Args:
 63 |             tool: The LangChain tool
 64 |             categories: Tool categories for organization
 65 |         """
 66 |         self.langchain_tools[tool.name] = tool
 67 | 
 68 |         # Store metadata
 69 |         self.tool_metadata[tool.name] = {
 70 |             "source": "langchain",
 71 |             "description": tool.description,
 72 |             "persona_aware": tool.metadata.get("persona_aware", False)
 73 |             if hasattr(tool, "metadata") and tool.metadata
 74 |             else False,
 75 |             "categories": categories or [],
 76 |             "tool_instance": tool,
 77 |         }
 78 | 
 79 |         logger.info(f"Registered LangChain tool: {tool.name}")
 80 | 
 81 |     def get_tool(self, name: str, as_langchain: bool = True) -> Any | None:
 82 |         """
 83 |         Get a tool by name.
 84 | 
 85 |         Args:
 86 |             name: Tool name
 87 |             as_langchain: Whether to return as LangChain tool
 88 | 
 89 |         Returns:
 90 |             Tool instance or function
 91 |         """
 92 |         # Check if it's already a LangChain tool
 93 |         if name in self.langchain_tools:
 94 |             return self.langchain_tools[name]
 95 | 
 96 |         # Check if it's an MCP tool
 97 |         if name in self.mcp_tools:
 98 |             if as_langchain:
 99 |                 # Convert to LangChain tool on demand
100 |                 metadata = self.tool_metadata[name]
101 |                 return mcp_to_langchain_adapter(
102 |                     self.mcp_tools[name],
103 |                     name=name,
104 |                     description=metadata["description"],
105 |                     persona_aware=metadata["persona_aware"],
106 |                 )
107 |             else:
108 |                 return self.mcp_tools[name]
109 | 
110 |         return None
111 | 
112 |     def get_tools_by_category(
113 |         self, category: str, as_langchain: bool = True
114 |     ) -> list[Any]:
115 |         """
116 |         Get all tools in a category.
117 | 
118 |         Args:
119 |             category: Category name
120 |             as_langchain: Whether to return as LangChain tools
121 | 
122 |         Returns:
123 |             List of tools
124 |         """
125 |         tools = []
126 | 
127 |         for name, metadata in self.tool_metadata.items():
128 |             if category in metadata.get("categories", []):
129 |                 tool = self.get_tool(name, as_langchain=as_langchain)
130 |                 if tool:
131 |                     tools.append(tool)
132 | 
133 |         return tools
134 | 
135 |     def get_all_tools(self, as_langchain: bool = True) -> list[Any]:
136 |         """
137 |         Get all registered tools.
138 | 
139 |         Args:
140 |             as_langchain: Whether to return as LangChain tools
141 | 
142 |         Returns:
143 |             List of all tools
144 |         """
145 |         tools: list[Any] = []
146 | 
147 |         # Add all LangChain tools
148 |         if as_langchain:
149 |             tools.extend(self.langchain_tools.values())
150 | 
151 |         # Add all MCP tools
152 |         for name in self.mcp_tools:
153 |             if name not in self.langchain_tools:  # Avoid duplicates
154 |                 tool = self.get_tool(name, as_langchain=as_langchain)
155 |                 if tool:
156 |                     tools.append(tool)
157 | 
158 |         return tools
159 | 
160 |     def get_persona_aware_tools(self, as_langchain: bool = True) -> list[Any]:
161 |         """Get all persona-aware tools."""
162 |         tools = []
163 | 
164 |         for name, metadata in self.tool_metadata.items():
165 |             if metadata.get("persona_aware", False):
166 |                 tool = self.get_tool(name, as_langchain=as_langchain)
167 |                 if tool:
168 |                     tools.append(tool)
169 | 
170 |         return tools
171 | 
172 |     def list_tools(self) -> dict[str, dict[str, Any]]:
173 |         """List all tools with their metadata."""
174 |         return {
175 |             name: {
176 |                 "description": meta["description"],
177 |                 "source": meta["source"],
178 |                 "persona_aware": meta["persona_aware"],
179 |                 "categories": meta["categories"],
180 |             }
181 |             for name, meta in self.tool_metadata.items()
182 |         }
183 | 
184 | 
185 | # Global registry instance
186 | _tool_registry = None
187 | 
188 | 
189 | def get_tool_registry() -> ToolRegistry:
190 |     """Get the global tool registry instance."""
191 |     global _tool_registry
192 |     if _tool_registry is None:
193 |         _tool_registry = ToolRegistry()
194 |         _initialize_default_tools()
195 |     return _tool_registry
196 | 
197 | 
198 | def _initialize_default_tools():
199 |     """Initialize registry with default MCP tools."""
200 |     get_tool_registry()
201 | 
202 |     try:
203 |         # TODO: Fix router tool registration
204 |         # The router tools are FastMCP FunctionTool instances, not plain Callables
205 |         # Need to extract the underlying function or adapt the registration approach
206 | 
207 |         logger.info("Tool registry initialized (router tools registration pending)")
208 | 
209 |     except ImportError as e:
210 |         logger.warning(f"Could not import default MCP tools: {e}")
211 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/utils/tool_monitoring.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tool execution monitoring utilities.
  3 | 
  4 | This module provides functions for monitoring tool execution,
  5 | including timing, error handling, and performance analysis.
  6 | """
  7 | 
  8 | from __future__ import annotations
  9 | 
 10 | import time
 11 | from collections.abc import Awaitable, Callable
 12 | from dataclasses import dataclass
 13 | from typing import Any
 14 | 
 15 | from maverick_mcp.utils.logging import get_logger
 16 | 
 17 | logger = get_logger("maverick_mcp.utils.tool_monitoring")
 18 | 
 19 | 
 20 | @dataclass
 21 | class ExecutionResult:
 22 |     """Result of tool execution monitoring."""
 23 | 
 24 |     result: Any
 25 |     execution_time: float
 26 |     success: bool
 27 |     error: Exception | None = None
 28 | 
 29 | 
 30 | class ToolMonitor:
 31 |     """Monitors tool execution for performance and errors."""
 32 | 
 33 |     def __init__(self, tool_name: str, user_id: int | None = None):
 34 |         """
 35 |         Initialize tool monitor.
 36 | 
 37 |         Args:
 38 |             tool_name: Name of the tool being monitored
 39 |             user_id: ID of the user executing the tool
 40 |         """
 41 |         self.tool_name = tool_name
 42 |         self.user_id = user_id
 43 | 
 44 |     async def execute_with_monitoring(
 45 |         self,
 46 |         func: Callable[..., Awaitable[Any]],
 47 |         args: tuple[Any, ...],
 48 |         kwargs: dict[str, Any],
 49 |         estimation: dict[str, Any] | None = None,
 50 |     ) -> ExecutionResult:
 51 |         """
 52 |         Execute a tool function with comprehensive monitoring.
 53 | 
 54 |         Args:
 55 |             func: The async function to execute
 56 |             args: Positional arguments for the function
 57 |             kwargs: Keyword arguments for the function
 58 |             estimation: Optional estimation data for comparison
 59 | 
 60 |         Returns:
 61 |             ExecutionResult: Contains result, timing, and error information
 62 |         """
 63 |         start_time = time.time()
 64 | 
 65 |         try:
 66 |             result = await func(*args, **kwargs)
 67 |             execution_time = time.time() - start_time
 68 | 
 69 |             # Log successful execution
 70 |             self._log_successful_execution(execution_time, result, estimation)
 71 | 
 72 |             # Check for potential underestimation
 73 |             self._check_for_underestimation(execution_time, estimation)
 74 | 
 75 |             return ExecutionResult(
 76 |                 result=result,
 77 |                 execution_time=execution_time,
 78 |                 success=True,
 79 |             )
 80 | 
 81 |         except Exception as e:
 82 |             execution_time = time.time() - start_time
 83 | 
 84 |             # Log failed execution
 85 |             self._log_failed_execution(execution_time, e)
 86 | 
 87 |             return ExecutionResult(
 88 |                 result=None,
 89 |                 execution_time=execution_time,
 90 |                 success=False,
 91 |                 error=e,
 92 |             )
 93 | 
 94 |     def _log_successful_execution(
 95 |         self,
 96 |         execution_time: float,
 97 |         result: Any,
 98 |         estimation: dict[str, Any] | None,
 99 |     ) -> None:
100 |         """Log successful tool execution."""
101 |         log_data = {
102 |             "tool_name": self.tool_name,
103 |             "user_id": self.user_id,
104 |             "execution_time_seconds": round(execution_time, 3),
105 |             "has_result": result is not None,
106 |         }
107 | 
108 |         if estimation:
109 |             log_data.update(
110 |                 {
111 |                     "estimated_llm_calls": estimation.get("llm_calls", 0),
112 |                     "estimated_tokens": estimation.get("total_tokens", 0),
113 |                 }
114 |             )
115 | 
116 |         logger.info(f"Tool executed successfully: {self.tool_name}", extra=log_data)
117 | 
118 |     def _log_failed_execution(self, execution_time: float, error: Exception) -> None:
119 |         """Log failed tool execution."""
120 |         logger.error(
121 |             f"Tool execution failed: {self.tool_name}",
122 |             extra={
123 |                 "tool_name": self.tool_name,
124 |                 "user_id": self.user_id,
125 |                 "execution_time_seconds": round(execution_time, 3),
126 |                 "error": str(error),
127 |                 "error_type": type(error).__name__,
128 |             },
129 |         )
130 | 
131 |     def _check_for_underestimation(
132 |         self,
133 |         execution_time: float,
134 |         estimation: dict[str, Any] | None,
135 |     ) -> None:
136 |         """Check if execution time indicates potential underestimation."""
137 |         # Long execution time may indicate underestimation
138 |         if execution_time > 30:
139 |             log_data = {
140 |                 "tool_name": self.tool_name,
141 |                 "execution_time_seconds": round(execution_time, 3),
142 |                 "note": "Consider reviewing estimate if this persists",
143 |             }
144 | 
145 |             if estimation:
146 |                 log_data.update(
147 |                     {
148 |                         "estimated_llm_calls": estimation.get("llm_calls", 0),
149 |                         "estimated_tokens": estimation.get("total_tokens", 0),
150 |                         "complexity": estimation.get("complexity", "unknown"),
151 |                         "confidence": estimation.get("confidence", 0.5),
152 |                     }
153 |                 )
154 | 
155 |             logger.warning(
156 |                 f"Long execution time for tool: {self.tool_name}", extra=log_data
157 |             )
158 | 
159 |     def add_usage_info_to_result(
160 |         self, result: Any, usage_info: dict[str, Any] | None
161 |     ) -> Any:
162 |         """
163 |         Add usage information to the tool result.
164 | 
165 |         Args:
166 |             result: The tool execution result
167 |             usage_info: Usage information to add
168 | 
169 |         Returns:
170 |             The result with usage info added (if applicable)
171 |         """
172 |         if usage_info and isinstance(result, dict):
173 |             result["usage"] = usage_info
174 | 
175 |         return result
176 | 
177 | 
178 | def create_tool_monitor(tool_name: str, user_id: int | None = None) -> ToolMonitor:
179 |     """
180 |     Create a tool monitor instance.
181 | 
182 |     Args:
183 |         tool_name: Name of the tool being monitored
184 |         user_id: ID of the user executing the tool
185 | 
186 |     Returns:
187 |         ToolMonitor: Configured tool monitor
188 |     """
189 |     return ToolMonitor(tool_name, user_id)
190 | 
191 | 
192 | def should_alert_for_performance(
193 |     execution_time: float, threshold: float = 30.0
194 | ) -> tuple[bool, str]:
195 |     """
196 |     Check if an alert should be raised for performance issues.
197 | 
198 |     Args:
199 |         execution_time: Execution time in seconds
200 |         threshold: Performance threshold in seconds
201 | 
202 |     Returns:
203 |         tuple: (should_alert, alert_message)
204 |     """
205 |     if execution_time > threshold:
206 |         return (
207 |             True,
208 |             f"Tool execution took {execution_time:.2f}s (threshold: {threshold}s)",
209 |         )
210 | 
211 |     return False, ""
212 | 
```

--------------------------------------------------------------------------------
/tools/check_orchestration_config.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Configuration checker for orchestrated agent setup.
  4 | Verifies that all required dependencies and configurations are available.
  5 | """
  6 | 
  7 | import os
  8 | import sys
  9 | from typing import Any
 10 | 
 11 | 
 12 | def check_dependencies() -> dict[str, Any]:
 13 |     """Check if all required dependencies are available."""
 14 |     results = {"dependencies": {}, "status": "success", "missing": []}
 15 | 
 16 |     # Check core dependencies
 17 |     deps_to_check = [
 18 |         ("langchain_core", "LangChain core"),
 19 |         ("langgraph", "LangGraph"),
 20 |         ("fastmcp", "FastMCP"),
 21 |         ("exa_py", "Exa AI search (optional)"),
 22 |         ("tavily", "Tavily search (optional)"),
 23 |     ]
 24 | 
 25 |     for module, description in deps_to_check:
 26 |         try:
 27 |             __import__(module)
 28 |             results["dependencies"][module] = {
 29 |                 "status": "available",
 30 |                 "description": description,
 31 |             }
 32 |         except ImportError as e:
 33 |             results["dependencies"][module] = {
 34 |                 "status": "missing",
 35 |                 "description": description,
 36 |                 "error": str(e),
 37 |             }
 38 |             if module not in ["exa_py", "tavily"]:  # Optional dependencies
 39 |                 results["missing"].append(module)
 40 |                 results["status"] = "error"
 41 | 
 42 |     return results
 43 | 
 44 | 
 45 | def check_environment_variables() -> dict[str, Any]:
 46 |     """Check environment variables for API keys."""
 47 |     results = {"environment": {}, "status": "success", "warnings": []}
 48 | 
 49 |     # Required variables
 50 |     required_vars = [
 51 |         ("TIINGO_API_KEY", "Stock data provider", True),
 52 |     ]
 53 | 
 54 |     # Optional variables
 55 |     optional_vars = [
 56 |         ("OPENAI_API_KEY", "OpenAI LLM provider", False),
 57 |         ("ANTHROPIC_API_KEY", "Anthropic LLM provider", False),
 58 |         ("EXA_API_KEY", "Exa search provider", False),
 59 |         ("TAVILY_API_KEY", "Tavily search provider", False),
 60 |         ("FRED_API_KEY", "Economic data provider", False),
 61 |     ]
 62 | 
 63 |     all_vars = required_vars + optional_vars
 64 | 
 65 |     for var_name, description, required in all_vars:
 66 |         value = os.getenv(var_name)
 67 |         if value:
 68 |             results["environment"][var_name] = {
 69 |                 "status": "configured",
 70 |                 "description": description,
 71 |                 "has_value": bool(value and value.strip()),
 72 |             }
 73 |         else:
 74 |             results["environment"][var_name] = {
 75 |                 "status": "not_configured",
 76 |                 "description": description,
 77 |                 "required": required,
 78 |             }
 79 | 
 80 |             if required:
 81 |                 results["status"] = "error"
 82 |             else:
 83 |                 results["warnings"].append(
 84 |                     f"{var_name} not configured - {description} will not be available"
 85 |                 )
 86 | 
 87 |     return results
 88 | 
 89 | 
 90 | def check_agent_imports() -> dict[str, Any]:
 91 |     """Check if agent classes can be imported successfully."""
 92 |     results = {"agents": {}, "status": "success", "errors": []}
 93 | 
 94 |     agents_to_check = [
 95 |         ("maverick_mcp.agents.market_analysis", "MarketAnalysisAgent"),
 96 |         ("maverick_mcp.agents.supervisor", "SupervisorAgent"),
 97 |         ("maverick_mcp.agents.deep_research", "DeepResearchAgent"),
 98 |     ]
 99 | 
100 |     for module_path, class_name in agents_to_check:
101 |         try:
102 |             module = __import__(module_path, fromlist=[class_name])
103 |             getattr(module, class_name)
104 |             results["agents"][class_name] = {
105 |                 "status": "importable",
106 |                 "module": module_path,
107 |             }
108 |         except Exception as e:
109 |             results["agents"][class_name] = {
110 |                 "status": "error",
111 |                 "module": module_path,
112 |                 "error": str(e),
113 |             }
114 |             results["errors"].append(f"{class_name}: {str(e)}")
115 |             results["status"] = "error"
116 | 
117 |     return results
118 | 
119 | 
120 | def main():
121 |     """Run configuration checks."""
122 |     print("🔍 Checking MaverickMCP Orchestration Configuration...")
123 |     print("=" * 60)
124 | 
125 |     # Check dependencies
126 |     dep_results = check_dependencies()
127 |     print("\n📦 Dependencies:")
128 |     for dep, info in dep_results["dependencies"].items():
129 |         status_icon = "✅" if info["status"] == "available" else "❌"
130 |         print(f"  {status_icon} {dep}: {info['description']}")
131 |         if info["status"] == "missing":
132 |             print(f"    Error: {info['error']}")
133 | 
134 |     # Check environment variables
135 |     env_results = check_environment_variables()
136 |     print("\n🔧 Environment Variables:")
137 |     for var, info in env_results["environment"].items():
138 |         if info["status"] == "configured":
139 |             print(f"  ✅ {var}: {info['description']}")
140 |         else:
141 |             icon = "❌" if info.get("required") else "⚠️ "
142 |             print(f"  {icon} {var}: {info['description']} (not configured)")
143 | 
144 |     # Check agent imports
145 |     agent_results = check_agent_imports()
146 |     print("\n🤖 Agent Classes:")
147 |     for agent, info in agent_results["agents"].items():
148 |         status_icon = "✅" if info["status"] == "importable" else "❌"
149 |         print(f"  {status_icon} {agent}: {info['module']}")
150 |         if info["status"] == "error":
151 |             print(f"    Error: {info['error']}")
152 | 
153 |     # Summary
154 |     print("\n" + "=" * 60)
155 | 
156 |     all_status = [dep_results["status"], env_results["status"], agent_results["status"]]
157 |     overall_status = "error" if "error" in all_status else "success"
158 | 
159 |     if overall_status == "success":
160 |         print("✅ Configuration check PASSED!")
161 |         print("\nOrchestrated agents are ready to use.")
162 | 
163 |         if env_results["warnings"]:
164 |             print("\n⚠️  Warnings:")
165 |             for warning in env_results["warnings"]:
166 |                 print(f"  • {warning}")
167 |     else:
168 |         print("❌ Configuration check FAILED!")
169 |         print("\nPlease fix the errors above before using orchestrated agents.")
170 | 
171 |         if dep_results["missing"]:
172 |             print(f"\nMissing dependencies: {', '.join(dep_results['missing'])}")
173 |             print("Run: uv sync")
174 | 
175 |         if env_results["status"] == "error":
176 |             print("\nMissing required environment variables.")
177 |             print("Copy .env.example to .env and configure required API keys.")
178 | 
179 |         if agent_results["errors"]:
180 |             print("\nAgent import errors:")
181 |             for error in agent_results["errors"]:
182 |                 print(f"  • {error}")
183 | 
184 |     return 0 if overall_status == "success" else 1
185 | 
186 | 
187 | if __name__ == "__main__":
188 |     sys.exit(main())
189 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/providers/interfaces/macro_data.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Macroeconomic data provider interface.
  3 | 
  4 | This module defines the abstract interface for macroeconomic data operations,
  5 | including GDP, inflation, unemployment, and market sentiment indicators.
  6 | """
  7 | 
  8 | from typing import Any, Protocol, runtime_checkable
  9 | 
 10 | 
 11 | @runtime_checkable
 12 | class IMacroDataProvider(Protocol):
 13 |     """
 14 |     Interface for macroeconomic data operations.
 15 | 
 16 |     This interface defines the contract for retrieving economic indicators,
 17 |     market sentiment data, and related macroeconomic statistics.
 18 |     """
 19 | 
 20 |     async def get_gdp_growth_rate(self) -> dict[str, Any]:
 21 |         """
 22 |         Get GDP growth rate data.
 23 | 
 24 |         Returns:
 25 |             Dictionary with current and previous GDP growth rates
 26 |         """
 27 |         ...
 28 | 
 29 |     async def get_unemployment_rate(self) -> dict[str, Any]:
 30 |         """
 31 |         Get unemployment rate data.
 32 | 
 33 |         Returns:
 34 |             Dictionary with current and previous unemployment rates
 35 |         """
 36 |         ...
 37 | 
 38 |     async def get_inflation_rate(self) -> dict[str, Any]:
 39 |         """
 40 |         Get inflation rate data based on CPI.
 41 | 
 42 |         Returns:
 43 |             Dictionary with current and previous inflation rates and bounds
 44 |         """
 45 |         ...
 46 | 
 47 |     async def get_vix(self) -> float | None:
 48 |         """
 49 |         Get VIX (volatility index) data.
 50 | 
 51 |         Returns:
 52 |             Current VIX value or None if unavailable
 53 |         """
 54 |         ...
 55 | 
 56 |     async def get_sp500_performance(self) -> float:
 57 |         """
 58 |         Get S&P 500 performance over multiple timeframes.
 59 | 
 60 |         Returns:
 61 |             Weighted performance percentage
 62 |         """
 63 |         ...
 64 | 
 65 |     async def get_nasdaq_performance(self) -> float:
 66 |         """
 67 |         Get NASDAQ performance over multiple timeframes.
 68 | 
 69 |         Returns:
 70 |             Weighted performance percentage
 71 |         """
 72 |         ...
 73 | 
 74 |     async def get_sp500_momentum(self) -> float:
 75 |         """
 76 |         Get short-term S&P 500 momentum.
 77 | 
 78 |         Returns:
 79 |             Momentum percentage over short timeframes
 80 |         """
 81 |         ...
 82 | 
 83 |     async def get_nasdaq_momentum(self) -> float:
 84 |         """
 85 |         Get short-term NASDAQ momentum.
 86 | 
 87 |         Returns:
 88 |             Momentum percentage over short timeframes
 89 |         """
 90 |         ...
 91 | 
 92 |     async def get_usd_momentum(self) -> float:
 93 |         """
 94 |         Get USD momentum using broad dollar index.
 95 | 
 96 |         Returns:
 97 |             USD momentum percentage over short timeframes
 98 |         """
 99 |         ...
100 | 
101 |     async def get_macro_statistics(self) -> dict[str, Any]:
102 |         """
103 |         Get comprehensive macroeconomic statistics.
104 | 
105 |         Returns:
106 |             Dictionary with all macro indicators including:
107 |             - gdp_growth_rate: Current and previous GDP growth
108 |             - unemployment_rate: Current and previous unemployment
109 |             - inflation_rate: Current and previous inflation
110 |             - sp500_performance: S&P 500 performance
111 |             - nasdaq_performance: NASDAQ performance
112 |             - vix: Volatility index
113 |             - sentiment_score: Computed sentiment score
114 |             - historical_data: Time series data
115 |         """
116 |         ...
117 | 
118 |     async def get_historical_data(self) -> dict[str, Any]:
119 |         """
120 |         Get historical data for all indicators.
121 | 
122 |         Returns:
123 |             Dictionary with time series data for various indicators
124 |         """
125 |         ...
126 | 
127 | 
128 | class MacroDataConfig:
129 |     """
130 |     Configuration class for macroeconomic data providers.
131 | 
132 |     This class encapsulates macro data-related configuration parameters
133 |     to reduce coupling between providers and configuration sources.
134 |     """
135 | 
136 |     def __init__(
137 |         self,
138 |         fred_api_key: str = "",
139 |         window_days: int = 365,
140 |         lookback_days: int = 30,
141 |         request_timeout: int = 30,
142 |         max_retries: int = 3,
143 |         cache_ttl: int = 3600,
144 |         sentiment_weights: dict[str, float] | None = None,
145 |         smoothing_factor: float = 0.8,
146 |     ):
147 |         """
148 |         Initialize macro data configuration.
149 | 
150 |         Args:
151 |             fred_api_key: API key for FRED (Federal Reserve Economic Data)
152 |             window_days: Window for historical data bounds calculation
153 |             lookback_days: Lookback period for momentum calculations
154 |             request_timeout: Request timeout in seconds
155 |             max_retries: Maximum number of retry attempts
156 |             cache_ttl: Cache time-to-live in seconds
157 |             sentiment_weights: Weights for sentiment score calculation
158 |             smoothing_factor: Smoothing factor for sentiment score
159 |         """
160 |         self.fred_api_key = fred_api_key
161 |         self.window_days = window_days
162 |         self.lookback_days = lookback_days
163 |         self.request_timeout = request_timeout
164 |         self.max_retries = max_retries
165 |         self.cache_ttl = cache_ttl
166 |         self.smoothing_factor = smoothing_factor
167 | 
168 |         # Default sentiment weights
169 |         self.sentiment_weights = sentiment_weights or {
170 |             # Short-term signals (60% total)
171 |             "vix": 0.20,
172 |             "sp500_momentum": 0.20,
173 |             "nasdaq_momentum": 0.15,
174 |             "usd_momentum": 0.05,
175 |             # Macro signals (40% total)
176 |             "inflation_rate": 0.15,
177 |             "gdp_growth_rate": 0.15,
178 |             "unemployment_rate": 0.10,
179 |         }
180 | 
181 |     @property
182 |     def has_fred_key(self) -> bool:
183 |         """Check if FRED API key is configured."""
184 |         return bool(self.fred_api_key.strip())
185 | 
186 |     def get_sentiment_weight(self, indicator: str) -> float:
187 |         """Get sentiment weight for a specific indicator."""
188 |         return self.sentiment_weights.get(indicator, 0.0)
189 | 
190 |     def get_total_sentiment_weight(self) -> float:
191 |         """Get total sentiment weight (should sum to 1.0)."""
192 |         return sum(self.sentiment_weights.values())
193 | 
194 | 
195 | # FRED series IDs for common economic indicators
196 | FRED_SERIES_IDS = {
197 |     "gdp_growth_rate": "A191RL1Q225SBEA",
198 |     "unemployment_rate": "UNRATE",
199 |     "core_inflation": "CPILFESL",
200 |     "sp500": "SP500",
201 |     "nasdaq": "NASDAQ100",
202 |     "vix": "VIXCLS",
203 |     "usd_index": "DTWEXBGS",
204 | }
205 | 
206 | # Default bounds for normalization
207 | DEFAULT_INDICATOR_BOUNDS = {
208 |     "vix": {"min": 10.0, "max": 50.0},
209 |     "sp500_momentum": {"min": -15.0, "max": 15.0},
210 |     "nasdaq_momentum": {"min": -20.0, "max": 20.0},
211 |     "usd_momentum": {"min": -5.0, "max": 5.0},
212 |     "inflation_rate": {"min": 0.0, "max": 10.0},
213 |     "gdp_growth_rate": {"min": -2.0, "max": 6.0},
214 |     "unemployment_rate": {"min": 2.0, "max": 10.0},
215 | }
216 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/health/health_checker.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Health Checker Service
  3 | 
  4 | Extracts health checking logic from the main server to follow
  5 | Single Responsibility Principle.
  6 | """
  7 | 
  8 | import logging
  9 | from datetime import datetime
 10 | from typing import Any
 11 | 
 12 | from maverick_mcp.config.settings import settings
 13 | from maverick_mcp.data.session_management import get_db_session
 14 | 
 15 | logger = logging.getLogger(__name__)
 16 | 
 17 | 
 18 | class HealthStatus:
 19 |     """Health status enumeration."""
 20 | 
 21 |     HEALTHY = "healthy"
 22 |     UNHEALTHY = "unhealthy"
 23 |     DEGRADED = "degraded"
 24 |     UNKNOWN = "unknown"
 25 | 
 26 | 
 27 | class HealthChecker:
 28 |     """Service for checking system health status."""
 29 | 
 30 |     def __init__(self):
 31 |         self.logger = logger
 32 | 
 33 |     def check_all(self) -> dict[str, Any]:
 34 |         """
 35 |         Perform comprehensive health check.
 36 | 
 37 |         Returns:
 38 |             Dict containing overall status and component health
 39 |         """
 40 |         components = {
 41 |             "database": self._check_database(),
 42 |             "configuration": self._check_configuration(),
 43 |             "redis": self._check_redis(),
 44 |         }
 45 | 
 46 |         overall_status = self._determine_overall_status(components)
 47 | 
 48 |         return {
 49 |             "status": overall_status,
 50 |             "timestamp": datetime.utcnow().isoformat(),
 51 |             "components": components,
 52 |             "system": self._get_system_info(),
 53 |         }
 54 | 
 55 |     def _check_database(self) -> dict[str, Any]:
 56 |         """Check database connectivity and status."""
 57 |         try:
 58 |             with get_db_session() as session:
 59 |                 # Simple query to test connection
 60 |                 result = session.execute("SELECT 1 as test").fetchone()
 61 |                 if result and result[0] == 1:
 62 |                     return {
 63 |                         "status": HealthStatus.HEALTHY,
 64 |                         "message": "Database connection successful",
 65 |                         "response_time_ms": 0,  # Could add timing if needed
 66 |                     }
 67 |                 else:
 68 |                     return {
 69 |                         "status": HealthStatus.UNHEALTHY,
 70 |                         "message": "Database query failed",
 71 |                         "error": "Unexpected query result",
 72 |                     }
 73 |         except Exception as e:
 74 |             self.logger.error(f"Database health check failed: {e}")
 75 |             return {
 76 |                 "status": HealthStatus.UNHEALTHY,
 77 |                 "message": "Database connection failed",
 78 |                 "error": str(e),
 79 |             }
 80 | 
 81 |     def _check_redis(self) -> dict[str, Any]:
 82 |         """Check Redis connectivity if configured."""
 83 |         if not hasattr(settings, "redis") or not settings.redis.host:
 84 |             return {
 85 |                 "status": HealthStatus.HEALTHY,
 86 |                 "message": "Redis not configured (using in-memory cache)",
 87 |                 "note": "This is normal for personal use setup",
 88 |             }
 89 | 
 90 |         try:
 91 |             import redis
 92 | 
 93 |             redis_client = redis.Redis(
 94 |                 host=settings.redis.host,
 95 |                 port=settings.redis.port,
 96 |                 db=settings.redis.db,
 97 |                 decode_responses=True,
 98 |                 socket_timeout=5.0,
 99 |             )
100 | 
101 |             # Test Redis connection
102 |             redis_client.ping()
103 | 
104 |             return {
105 |                 "status": HealthStatus.HEALTHY,
106 |                 "message": "Redis connection successful",
107 |                 "host": settings.redis.host,
108 |                 "port": settings.redis.port,
109 |             }
110 | 
111 |         except ImportError:
112 |             return {
113 |                 "status": HealthStatus.DEGRADED,
114 |                 "message": "Redis library not available",
115 |                 "note": "Falling back to in-memory cache",
116 |             }
117 |         except Exception as e:
118 |             self.logger.warning(f"Redis health check failed: {e}")
119 |             return {
120 |                 "status": HealthStatus.DEGRADED,
121 |                 "message": "Redis connection failed, using in-memory cache",
122 |                 "error": str(e),
123 |             }
124 | 
125 |     def _check_configuration(self) -> dict[str, Any]:
126 |         """Check application configuration status."""
127 |         warnings = []
128 |         errors = []
129 | 
130 |         # Check required API keys
131 |         if not getattr(settings, "tiingo_api_key", None):
132 |             warnings.append("TIINGO_API_KEY not configured")
133 | 
134 |         # Check optional API keys
135 |         optional_keys = ["fred_api_key", "openai_api_key", "anthropic_api_key"]
136 |         missing_optional = []
137 | 
138 |         for key in optional_keys:
139 |             if not getattr(settings, key, None):
140 |                 missing_optional.append(key.upper())
141 | 
142 |         if missing_optional:
143 |             warnings.append(
144 |                 f"Optional API keys not configured: {', '.join(missing_optional)}"
145 |             )
146 | 
147 |         # Check database configuration
148 |         if not settings.database_url:
149 |             errors.append("DATABASE_URL not configured")
150 | 
151 |         # Determine status
152 |         if errors:
153 |             status = HealthStatus.UNHEALTHY
154 |             message = f"Configuration errors: {'; '.join(errors)}"
155 |         elif warnings:
156 |             status = HealthStatus.DEGRADED
157 |             message = f"Configuration warnings: {'; '.join(warnings)}"
158 |         else:
159 |             status = HealthStatus.HEALTHY
160 |             message = "Configuration is valid"
161 | 
162 |         result = {
163 |             "status": status,
164 |             "message": message,
165 |         }
166 | 
167 |         if warnings:
168 |             result["warnings"] = warnings
169 |         if errors:
170 |             result["errors"] = errors
171 | 
172 |         return result
173 | 
174 |     def _determine_overall_status(self, components: dict[str, dict[str, Any]]) -> str:
175 |         """Determine overall system status from component statuses."""
176 |         statuses = [comp["status"] for comp in components.values()]
177 | 
178 |         if HealthStatus.UNHEALTHY in statuses:
179 |             return HealthStatus.UNHEALTHY
180 |         elif HealthStatus.DEGRADED in statuses:
181 |             return HealthStatus.DEGRADED
182 |         elif all(status == HealthStatus.HEALTHY for status in statuses):
183 |             return HealthStatus.HEALTHY
184 |         else:
185 |             return HealthStatus.UNKNOWN
186 | 
187 |     def _get_system_info(self) -> dict[str, Any]:
188 |         """Get basic system information."""
189 |         return {
190 |             "app_name": settings.app_name,
191 |             "version": getattr(settings, "version", "0.1.0"),
192 |             "environment": getattr(settings, "environment", "development"),
193 |             "python_version": f"{__import__('sys').version_info.major}.{__import__('sys').version_info.minor}",
194 |         }
195 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/validation/responses.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Base response models for API standardization.
  3 | 
  4 | This module provides standard response formats for all API endpoints
  5 | to ensure consistency across the application.
  6 | """
  7 | 
  8 | from datetime import UTC, datetime
  9 | from typing import Any, TypeVar
 10 | 
 11 | from pydantic import BaseModel, Field
 12 | 
 13 | T = TypeVar("T")
 14 | 
 15 | 
 16 | class BaseResponse(BaseModel):
 17 |     """Base response model with standard fields."""
 18 | 
 19 |     success: bool = Field(..., description="Whether the request was successful")
 20 |     message: str | None = Field(None, description="Human-readable message")
 21 |     timestamp: datetime = Field(
 22 |         default_factory=lambda: datetime.now(UTC),
 23 |         description="Response timestamp",
 24 |     )
 25 |     request_id: str | None = Field(None, description="Request tracking ID")
 26 | 
 27 | 
 28 | class DataResponse[T](BaseResponse):
 29 |     """Response model with data payload."""
 30 | 
 31 |     data: T = Field(..., description="Response data")
 32 | 
 33 | 
 34 | class ListResponse[T](BaseResponse):
 35 |     """Response model for paginated lists."""
 36 | 
 37 |     data: list[T] = Field(..., description="List of items")
 38 |     total: int = Field(..., description="Total number of items")
 39 |     limit: int = Field(..., description="Number of items per page")
 40 |     offset: int = Field(..., description="Number of items skipped")
 41 |     has_more: bool = Field(..., description="Whether more items are available")
 42 | 
 43 | 
 44 | class ErrorDetail(BaseModel):
 45 |     """Detailed error information."""
 46 | 
 47 |     code: str = Field(..., description="Error code")
 48 |     field: str | None = Field(None, description="Field that caused the error")
 49 |     message: str = Field(..., description="Error message")
 50 |     context: dict[str, Any] | None = Field(None, description="Additional context")
 51 | 
 52 | 
 53 | class ErrorResponse(BaseResponse):
 54 |     """Standard error response model."""
 55 | 
 56 |     success: bool = Field(default=False, description="Always false for errors")
 57 |     error: ErrorDetail = Field(..., description="Error details")
 58 |     status_code: int = Field(..., description="HTTP status code")
 59 |     trace_id: str | None = Field(None, description="Error trace ID for debugging")
 60 | 
 61 | 
 62 | class ValidationErrorResponse(ErrorResponse):
 63 |     """Response for validation errors."""
 64 | 
 65 |     errors: list[ErrorDetail] = Field(..., description="List of validation errors")
 66 | 
 67 | 
 68 | class BatchOperationResult(BaseModel):
 69 |     """Result of a batch operation on a single item."""
 70 | 
 71 |     id: str = Field(..., description="Item identifier")
 72 |     success: bool = Field(..., description="Whether the operation succeeded")
 73 |     error: ErrorDetail | None = Field(None, description="Error if operation failed")
 74 |     data: dict[str, Any] | None = Field(None, description="Operation result data")
 75 | 
 76 | 
 77 | class BatchResponse(BaseResponse):
 78 |     """Response for batch operations."""
 79 | 
 80 |     results: list[BatchOperationResult] = Field(
 81 |         ..., description="Results for each item"
 82 |     )
 83 |     successful: int = Field(..., description="Number of successful operations")
 84 |     failed: int = Field(..., description="Number of failed operations")
 85 |     partial: bool = Field(..., description="Whether some operations failed")
 86 | 
 87 | 
 88 | class HealthStatus(BaseModel):
 89 |     """Health check status for a component."""
 90 | 
 91 |     name: str = Field(..., description="Component name")
 92 |     status: str = Field(..., description="Status (healthy, unhealthy, degraded)")
 93 |     latency_ms: float | None = Field(None, description="Response time in milliseconds")
 94 |     details: dict[str, Any] | None = Field(None, description="Additional details")
 95 | 
 96 | 
 97 | class HealthResponse(BaseResponse):
 98 |     """Health check response."""
 99 | 
100 |     status: str = Field(..., description="Overall status")
101 |     components: list[HealthStatus] = Field(
102 |         ..., description="Status of individual components"
103 |     )
104 |     version: str | None = Field(None, description="Application version")
105 |     uptime_seconds: int | None = Field(None, description="Uptime in seconds")
106 | 
107 | 
108 | class RateLimitInfo(BaseModel):
109 |     """Rate limit information."""
110 | 
111 |     limit: int = Field(..., description="Request limit")
112 |     remaining: int = Field(..., description="Remaining requests")
113 |     reset: datetime = Field(..., description="When the limit resets")
114 |     retry_after: int | None = Field(None, description="Seconds to wait before retrying")
115 | 
116 | 
117 | class RateLimitResponse(ErrorResponse):
118 |     """Response when rate limit is exceeded."""
119 | 
120 |     rate_limit: RateLimitInfo = Field(..., description="Rate limit details")
121 | 
122 | 
123 | class WebhookEvent(BaseModel):
124 |     """Webhook event payload."""
125 | 
126 |     event_id: str = Field(..., description="Unique event identifier")
127 |     event_type: str = Field(..., description="Type of event")
128 |     timestamp: datetime = Field(..., description="When the event occurred")
129 |     data: dict[str, Any] = Field(..., description="Event data")
130 |     signature: str | None = Field(None, description="Event signature for verification")
131 | 
132 | 
133 | class WebhookResponse(BaseResponse):
134 |     """Response for webhook endpoints."""
135 | 
136 |     event_id: str = Field(..., description="Processed event ID")
137 |     status: str = Field(..., description="Processing status")
138 | 
139 | 
140 | # Helper functions for creating responses
141 | def success_response(
142 |     data: Any = None,
143 |     message: str | None = None,
144 |     request_id: str | None = None,
145 | ) -> dict[str, Any]:
146 |     """Create a successful response."""
147 |     response = {"success": True, "timestamp": datetime.now(UTC).isoformat()}
148 | 
149 |     if message:
150 |         response["message"] = message
151 |     if request_id:
152 |         response["request_id"] = request_id
153 |     if data is not None:
154 |         response["data"] = data
155 | 
156 |     return response
157 | 
158 | 
159 | def error_response(
160 |     code: str,
161 |     message: str,
162 |     status_code: int,
163 |     field: str | None = None,
164 |     context: dict[str, Any] | None = None,
165 |     trace_id: str | None = None,
166 | ) -> dict[str, Any]:
167 |     """Create an error response."""
168 |     return {
169 |         "success": False,
170 |         "timestamp": datetime.now(UTC).isoformat(),
171 |         "error": {
172 |             "code": code,
173 |             "message": message,
174 |             "field": field,
175 |             "context": context,
176 |         },
177 |         "status_code": status_code,
178 |         "trace_id": trace_id,
179 |     }
180 | 
181 | 
182 | def validation_error_response(
183 |     errors: list[dict[str, Any]], trace_id: str | None = None
184 | ) -> dict[str, Any]:
185 |     """Create a validation error response."""
186 |     return {
187 |         "success": False,
188 |         "timestamp": datetime.now(UTC).isoformat(),
189 |         "error": {
190 |             "code": "VALIDATION_ERROR",
191 |             "message": "Validation failed",
192 |         },
193 |         "errors": errors,
194 |         "status_code": 422,
195 |         "trace_id": trace_id,
196 |     }
197 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/domain/value_objects/technical_indicators.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Value objects for technical indicators.
  3 | 
  4 | These are immutable objects representing technical analysis concepts
  5 | in the domain layer. They contain no infrastructure dependencies.
  6 | """
  7 | 
  8 | from dataclasses import dataclass
  9 | from enum import Enum
 10 | 
 11 | 
 12 | class Signal(Enum):
 13 |     """Trading signal types."""
 14 | 
 15 |     STRONG_BUY = "strong_buy"
 16 |     BUY = "buy"
 17 |     NEUTRAL = "neutral"
 18 |     SELL = "sell"
 19 |     STRONG_SELL = "strong_sell"
 20 | 
 21 | 
 22 | class TrendDirection(Enum):
 23 |     """Market trend directions."""
 24 | 
 25 |     STRONG_UPTREND = "strong_uptrend"
 26 |     UPTREND = "uptrend"
 27 |     SIDEWAYS = "sideways"
 28 |     DOWNTREND = "downtrend"
 29 |     STRONG_DOWNTREND = "strong_downtrend"
 30 | 
 31 | 
 32 | @dataclass(frozen=True)
 33 | class RSIIndicator:
 34 |     """Relative Strength Index value object."""
 35 | 
 36 |     value: float
 37 |     period: int = 14
 38 | 
 39 |     def __post_init__(self):
 40 |         if not 0 <= self.value <= 100:
 41 |             raise ValueError("RSI must be between 0 and 100")
 42 |         if self.period <= 0:
 43 |             raise ValueError("Period must be positive")
 44 | 
 45 |     @property
 46 |     def is_overbought(self) -> bool:
 47 |         """Check if RSI indicates overbought condition."""
 48 |         return self.value >= 70
 49 | 
 50 |     @property
 51 |     def is_oversold(self) -> bool:
 52 |         """Check if RSI indicates oversold condition."""
 53 |         return self.value <= 30
 54 | 
 55 |     @property
 56 |     def signal(self) -> Signal:
 57 |         """Get trading signal based on RSI value."""
 58 |         if self.value >= 80:
 59 |             return Signal.STRONG_SELL
 60 |         elif self.value >= 70:
 61 |             return Signal.SELL
 62 |         elif self.value <= 20:
 63 |             return Signal.STRONG_BUY
 64 |         elif self.value <= 30:
 65 |             return Signal.BUY
 66 |         else:
 67 |             return Signal.NEUTRAL
 68 | 
 69 | 
 70 | @dataclass(frozen=True)
 71 | class MACDIndicator:
 72 |     """MACD (Moving Average Convergence Divergence) value object."""
 73 | 
 74 |     macd_line: float
 75 |     signal_line: float
 76 |     histogram: float
 77 |     fast_period: int = 12
 78 |     slow_period: int = 26
 79 |     signal_period: int = 9
 80 | 
 81 |     @property
 82 |     def is_bullish_crossover(self) -> bool:
 83 |         """Check if MACD crossed above signal line."""
 84 |         return self.macd_line > self.signal_line and self.histogram > 0
 85 | 
 86 |     @property
 87 |     def is_bearish_crossover(self) -> bool:
 88 |         """Check if MACD crossed below signal line."""
 89 |         return self.macd_line < self.signal_line and self.histogram < 0
 90 | 
 91 |     @property
 92 |     def signal(self) -> Signal:
 93 |         """Get trading signal based on MACD."""
 94 |         if self.is_bullish_crossover and self.histogram > 0.5:
 95 |             return Signal.STRONG_BUY
 96 |         elif self.is_bullish_crossover:
 97 |             return Signal.BUY
 98 |         elif self.is_bearish_crossover and self.histogram < -0.5:
 99 |             return Signal.STRONG_SELL
100 |         elif self.is_bearish_crossover:
101 |             return Signal.SELL
102 |         else:
103 |             return Signal.NEUTRAL
104 | 
105 | 
106 | @dataclass(frozen=True)
107 | class BollingerBands:
108 |     """Bollinger Bands value object."""
109 | 
110 |     upper_band: float
111 |     middle_band: float
112 |     lower_band: float
113 |     current_price: float
114 |     period: int = 20
115 |     std_dev: int = 2
116 | 
117 |     @property
118 |     def bandwidth(self) -> float:
119 |         """Calculate bandwidth (volatility indicator)."""
120 |         return (self.upper_band - self.lower_band) / self.middle_band
121 | 
122 |     @property
123 |     def percent_b(self) -> float:
124 |         """Calculate %B (position within bands)."""
125 |         denominator = self.upper_band - self.lower_band
126 |         if denominator == 0:
127 |             return 0.5  # Return middle if bands are flat
128 |         return (self.current_price - self.lower_band) / denominator
129 | 
130 |     @property
131 |     def is_squeeze(self) -> bool:
132 |         """Check if bands are in a squeeze (low volatility)."""
133 |         return self.bandwidth < 0.1
134 | 
135 |     @property
136 |     def signal(self) -> Signal:
137 |         """Get trading signal based on Bollinger Bands."""
138 |         if self.current_price > self.upper_band:
139 |             return Signal.SELL
140 |         elif self.current_price < self.lower_band:
141 |             return Signal.BUY
142 |         elif self.percent_b > 0.8:
143 |             return Signal.SELL
144 |         elif self.percent_b < 0.2:
145 |             return Signal.BUY
146 |         else:
147 |             return Signal.NEUTRAL
148 | 
149 | 
150 | @dataclass(frozen=True)
151 | class StochasticOscillator:
152 |     """Stochastic Oscillator value object."""
153 | 
154 |     k_value: float
155 |     d_value: float
156 |     period: int = 14
157 | 
158 |     def __post_init__(self):
159 |         if not 0 <= self.k_value <= 100:
160 |             raise ValueError("%K must be between 0 and 100")
161 |         if not 0 <= self.d_value <= 100:
162 |             raise ValueError("%D must be between 0 and 100")
163 | 
164 |     @property
165 |     def is_overbought(self) -> bool:
166 |         """Check if stochastic indicates overbought."""
167 |         return self.k_value >= 80
168 | 
169 |     @property
170 |     def is_oversold(self) -> bool:
171 |         """Check if stochastic indicates oversold."""
172 |         return self.k_value <= 20
173 | 
174 |     @property
175 |     def signal(self) -> Signal:
176 |         """Get trading signal based on stochastic."""
177 |         if self.k_value > self.d_value and self.k_value < 20:
178 |             return Signal.BUY
179 |         elif self.k_value < self.d_value and self.k_value > 80:
180 |             return Signal.SELL
181 |         elif self.is_oversold:
182 |             return Signal.BUY
183 |         elif self.is_overbought:
184 |             return Signal.SELL
185 |         else:
186 |             return Signal.NEUTRAL
187 | 
188 | 
189 | @dataclass(frozen=True)
190 | class PriceLevel:
191 |     """Support or resistance price level."""
192 | 
193 |     price: float
194 |     strength: int  # 1-5, with 5 being strongest
195 |     touches: int  # Number of times price touched this level
196 | 
197 |     def __post_init__(self):
198 |         if self.price <= 0:
199 |             raise ValueError("Price must be positive")
200 |         if not 1 <= self.strength <= 5:
201 |             raise ValueError("Strength must be between 1 and 5")
202 |         if self.touches < 0:
203 |             raise ValueError("Touches must be non-negative")
204 | 
205 | 
206 | @dataclass(frozen=True)
207 | class VolumeProfile:
208 |     """Volume analysis value object."""
209 | 
210 |     current_volume: int
211 |     average_volume: float
212 |     volume_trend: TrendDirection
213 |     unusual_activity: bool
214 | 
215 |     @property
216 |     def relative_volume(self) -> float:
217 |         """Calculate volume relative to average."""
218 |         return (
219 |             self.current_volume / self.average_volume if self.average_volume > 0 else 0
220 |         )
221 | 
222 |     @property
223 |     def is_high_volume(self) -> bool:
224 |         """Check if volume is significantly above average."""
225 |         return self.relative_volume > 1.5
226 | 
227 |     @property
228 |     def is_low_volume(self) -> bool:
229 |         """Check if volume is significantly below average."""
230 |         return self.relative_volume < 0.5
231 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/utils/logging_example.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Example usage of structured logging in Maverick-MCP.
  3 | 
  4 | This file demonstrates how to use the structured logging system
  5 | in different parts of the application.
  6 | """
  7 | 
  8 | import asyncio
  9 | 
 10 | from maverick_mcp.utils.logging import (
 11 |     PerformanceMonitor,
 12 |     get_logger,
 13 |     log_cache_operation,
 14 |     log_database_query,
 15 |     log_external_api_call,
 16 |     setup_structured_logging,
 17 | )
 18 | from maverick_mcp.utils.mcp_logging import LoggingStockDataProvider, with_logging
 19 | 
 20 | 
 21 | async def example_basic_logging():
 22 |     """Example of basic structured logging."""
 23 |     # Get a logger for your module
 24 |     logger = get_logger("maverick_mcp.example")
 25 | 
 26 |     # Log with structured data
 27 |     logger.info(
 28 |         "Processing stock request",
 29 |         extra={"ticker": "AAPL", "action": "fetch_data", "user_id": "user123"},
 30 |     )
 31 | 
 32 |     # Log warnings with context
 33 |     logger.warning(
 34 |         "Rate limit approaching",
 35 |         extra={
 36 |             "current_requests": 95,
 37 |             "limit": 100,
 38 |             "reset_time": "2024-01-15T10:00:00Z",
 39 |         },
 40 |     )
 41 | 
 42 |     # Log errors with full context
 43 |     try:
 44 |         # Some operation that might fail
 45 |         raise ValueError("Invalid ticker symbol")
 46 |     except Exception:
 47 |         logger.error(
 48 |             "Failed to process request",
 49 |             exc_info=True,  # Includes full traceback
 50 |             extra={"ticker": "INVALID", "error_code": "INVALID_TICKER"},
 51 |         )
 52 | 
 53 | 
 54 | async def example_performance_monitoring():
 55 |     """Example of performance monitoring."""
 56 |     # Monitor a code block
 57 |     with PerformanceMonitor("data_processing"):
 58 |         # Simulate some work
 59 |         await asyncio.sleep(0.1)
 60 |         _ = [i**2 for i in range(10000)]  # Creating data for performance test
 61 | 
 62 |     # Monitor nested operations
 63 |     with PerformanceMonitor("full_analysis"):
 64 |         with PerformanceMonitor("fetch_data"):
 65 |             await asyncio.sleep(0.05)
 66 | 
 67 |         with PerformanceMonitor("calculate_indicators"):
 68 |             await asyncio.sleep(0.03)
 69 | 
 70 |         with PerformanceMonitor("generate_report"):
 71 |             await asyncio.sleep(0.02)
 72 | 
 73 | 
 74 | async def example_specialized_logging():
 75 |     """Example of specialized logging functions."""
 76 | 
 77 |     # Log cache operations
 78 |     cache_key = "stock:AAPL:2024-01-01:2024-01-31"
 79 | 
 80 |     # Cache miss
 81 |     log_cache_operation("get", cache_key, hit=False, duration_ms=5)
 82 | 
 83 |     # Cache hit
 84 |     log_cache_operation("get", cache_key, hit=True, duration_ms=2)
 85 | 
 86 |     # Cache set
 87 |     log_cache_operation("set", cache_key, duration_ms=10)
 88 | 
 89 |     # Log database queries
 90 |     query = "SELECT * FROM stocks WHERE ticker = :ticker"
 91 |     params = {"ticker": "AAPL"}
 92 | 
 93 |     log_database_query(query, params, duration_ms=45)
 94 | 
 95 |     # Log external API calls
 96 |     log_external_api_call(
 97 |         service="yfinance",
 98 |         endpoint="/quote/AAPL",
 99 |         method="GET",
100 |         status_code=200,
101 |         duration_ms=150,
102 |     )
103 | 
104 |     # Log API error
105 |     log_external_api_call(
106 |         service="alphavantage",
107 |         endpoint="/time_series",
108 |         method="GET",
109 |         error="Rate limit exceeded",
110 |     )
111 | 
112 | 
113 | # Example FastMCP tool with logging
114 | @with_logging("example_tool")
115 | async def example_mcp_tool(context, ticker: str, period: int = 20):
116 |     """
117 |     Example MCP tool with automatic logging.
118 | 
119 |     The @with_logging decorator automatically logs:
120 |     - Tool invocation with parameters
121 |     - Execution time
122 |     - Success/failure status
123 |     - Context information
124 |     """
125 |     logger = get_logger("maverick_mcp.tools.example")
126 | 
127 |     # Tool-specific logging
128 |     logger.info(
129 |         "Processing advanced analysis",
130 |         extra={"ticker": ticker, "period": period, "analysis_type": "comprehensive"},
131 |     )
132 | 
133 |     # Simulate work with progress reporting
134 |     if hasattr(context, "report_progress"):
135 |         await context.report_progress(50, 100, "Analyzing data...")
136 | 
137 |     # Return results
138 |     return {"ticker": ticker, "period": period, "result": "analysis_complete"}
139 | 
140 | 
141 | # Example of wrapping existing providers with logging
142 | async def example_provider_logging():
143 |     """Example of adding logging to data providers."""
144 |     from maverick_mcp.providers.stock_data import StockDataProvider
145 | 
146 |     # Wrap provider with logging
147 |     base_provider = StockDataProvider()
148 |     logging_provider = LoggingStockDataProvider(base_provider)
149 | 
150 |     # All calls are now automatically logged
151 |     _ = await logging_provider.get_stock_data(
152 |         ticker="AAPL", start_date="2024-01-01", end_date="2024-01-31"
153 |     )
154 | 
155 | 
156 | # Example configuration for different environments
157 | def setup_logging_for_environment(environment: str):
158 |     """Configure logging based on environment."""
159 | 
160 |     if environment == "development":
161 |         setup_structured_logging(
162 |             log_level="DEBUG",
163 |             log_format="text",  # Human-readable
164 |             log_file="dev.log",
165 |         )
166 |     elif environment == "production":
167 |         setup_structured_logging(
168 |             log_level="INFO",
169 |             log_format="json",  # Machine-readable for log aggregation
170 |             log_file="/var/log/maverick_mcp/app.log",
171 |         )
172 |     elif environment == "testing":
173 |         setup_structured_logging(
174 |             log_level="WARNING",
175 |             log_format="json",
176 |             log_file=None,  # Console only
177 |         )
178 | 
179 | 
180 | # Example of custom log analysis
181 | def analyze_logs_example():
182 |     """Example of analyzing structured logs."""
183 |     import json
184 | 
185 |     # Parse JSON logs
186 |     with open("app.log") as f:
187 |         for line in f:
188 |             try:
189 |                 log_entry = json.loads(line)
190 | 
191 |                 # Analyze slow queries
192 |                 if log_entry.get("duration_ms", 0) > 1000:
193 |                     print(
194 |                         f"Slow operation: {log_entry['operation']} - {log_entry['duration_ms']}ms"
195 |                     )
196 | 
197 |                 # Find errors
198 |                 if log_entry.get("level") == "ERROR":
199 |                     print(f"Error: {log_entry['message']} at {log_entry['timestamp']}")
200 | 
201 |                 # Track API usage
202 |                 if log_entry.get("tool_name"):
203 |                     print(
204 |                         f"Tool used: {log_entry['tool_name']} by {log_entry.get('user_id', 'unknown')}"
205 |                     )
206 | 
207 |             except json.JSONDecodeError:
208 |                 continue
209 | 
210 | 
211 | if __name__ == "__main__":
212 |     # Set up logging
213 |     setup_structured_logging(log_level="DEBUG", log_format="json")
214 | 
215 |     # Run examples
216 |     asyncio.run(example_basic_logging())
217 |     asyncio.run(example_performance_monitoring())
218 |     asyncio.run(example_specialized_logging())
219 | 
220 |     print("\nLogging examples completed. Check the console output for structured logs.")
221 | 
```

--------------------------------------------------------------------------------
/tests/test_exception_hierarchy.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Test the consolidated exception hierarchy.
  3 | """
  4 | 
  5 | from maverick_mcp.exceptions import (
  6 |     AuthenticationError,
  7 |     AuthorizationError,
  8 |     CacheConnectionError,
  9 |     CircuitBreakerError,
 10 |     ConfigurationError,
 11 |     ConflictError,
 12 |     DataIntegrityError,
 13 |     DataNotFoundError,
 14 |     DataProviderError,
 15 |     ExternalServiceError,
 16 |     MaverickException,
 17 |     MaverickMCPError,
 18 |     NotFoundError,
 19 |     RateLimitError,
 20 |     ValidationError,
 21 |     WebhookError,
 22 |     get_error_message,
 23 | )
 24 | 
 25 | 
 26 | class TestExceptionHierarchy:
 27 |     """Test the consolidated exception hierarchy."""
 28 | 
 29 |     def test_base_exception(self):
 30 |         """Test base MaverickException."""
 31 |         exc = MaverickException("Test error")
 32 |         assert exc.message == "Test error"
 33 |         assert exc.error_code == "INTERNAL_ERROR"
 34 |         assert exc.status_code == 500
 35 |         assert exc.field is None
 36 |         assert exc.context == {}
 37 |         assert exc.recoverable is True
 38 | 
 39 |     def test_base_exception_with_params(self):
 40 |         """Test base exception with custom parameters."""
 41 |         exc = MaverickException(
 42 |             "Custom error",
 43 |             error_code="CUSTOM_ERROR",
 44 |             status_code=400,
 45 |             field="test_field",
 46 |             context={"key": "value"},
 47 |             recoverable=False,
 48 |         )
 49 |         assert exc.message == "Custom error"
 50 |         assert exc.error_code == "CUSTOM_ERROR"
 51 |         assert exc.status_code == 400
 52 |         assert exc.field == "test_field"
 53 |         assert exc.context == {"key": "value"}
 54 |         assert exc.recoverable is False
 55 | 
 56 |     def test_validation_error(self):
 57 |         """Test ValidationError."""
 58 |         exc = ValidationError("Invalid email format", field="email")
 59 |         assert exc.message == "Invalid email format"
 60 |         assert exc.error_code == "VALIDATION_ERROR"
 61 |         assert exc.status_code == 422
 62 |         assert exc.field == "email"
 63 | 
 64 |     def test_authentication_error(self):
 65 |         """Test AuthenticationError."""
 66 |         exc = AuthenticationError()
 67 |         assert exc.message == "Authentication failed"
 68 |         assert exc.error_code == "AUTHENTICATION_ERROR"
 69 |         assert exc.status_code == 401
 70 | 
 71 |     def test_authorization_error(self):
 72 |         """Test AuthorizationError."""
 73 |         exc = AuthorizationError(resource="portfolio", action="rebalance")
 74 |         assert "Unauthorized access to portfolio for action 'rebalance'" in exc.message
 75 |         assert exc.error_code == "AUTHORIZATION_ERROR"
 76 |         assert exc.status_code == 403
 77 |         assert exc.context["resource"] == "portfolio"
 78 |         assert exc.context["action"] == "rebalance"
 79 | 
 80 |     def test_not_found_error(self):
 81 |         """Test NotFoundError."""
 82 |         exc = NotFoundError("Stock", identifier="AAPL")
 83 |         assert exc.message == "Stock not found: AAPL"
 84 |         assert exc.error_code == "NOT_FOUND"
 85 |         assert exc.status_code == 404
 86 |         assert exc.context["resource"] == "Stock"
 87 |         assert exc.context["identifier"] == "AAPL"
 88 | 
 89 |     def test_rate_limit_error(self):
 90 |         """Test RateLimitError."""
 91 |         exc = RateLimitError(retry_after=60)
 92 |         assert exc.message == "Rate limit exceeded"
 93 |         assert exc.error_code == "RATE_LIMIT_EXCEEDED"
 94 |         assert exc.status_code == 429
 95 |         assert exc.context["retry_after"] == 60
 96 | 
 97 |     def test_external_service_error(self):
 98 |         """Test ExternalServiceError."""
 99 |         exc = ExternalServiceError(
100 |             "MarketDataAPI",
101 |             "Service request failed",
102 |             original_error="Connection timeout",
103 |         )
104 |         assert exc.message == "Service request failed"
105 |         assert exc.error_code == "EXTERNAL_SERVICE_ERROR"
106 |         assert exc.status_code == 503
107 |         assert exc.context["service"] == "MarketDataAPI"
108 |         assert exc.context["original_error"] == "Connection timeout"
109 | 
110 |     def test_data_provider_error(self):
111 |         """Test DataProviderError."""
112 |         exc = DataProviderError("yfinance", "API request failed")
113 |         assert exc.message == "API request failed"
114 |         assert exc.error_code == "DATA_PROVIDER_ERROR"
115 |         assert exc.status_code == 503
116 |         assert exc.context["provider"] == "yfinance"
117 | 
118 |     def test_data_not_found_error(self):
119 |         """Test DataNotFoundError."""
120 |         exc = DataNotFoundError("AAPL", date_range=("2024-01-01", "2024-01-31"))
121 |         assert "Data not found for symbol 'AAPL'" in exc.message
122 |         assert "in range 2024-01-01 to 2024-01-31" in exc.message
123 |         assert exc.error_code == "DATA_NOT_FOUND"
124 |         assert exc.status_code == 404
125 |         assert exc.context["symbol"] == "AAPL"
126 |         assert exc.context["date_range"] == ("2024-01-01", "2024-01-31")
127 | 
128 |     def test_exception_to_dict(self):
129 |         """Test exception to_dict method."""
130 |         exc = ValidationError("Invalid value", field="age")
131 |         exc.context["min_value"] = 0
132 |         exc.context["max_value"] = 120
133 | 
134 |         result = exc.to_dict()
135 |         assert result == {
136 |             "code": "VALIDATION_ERROR",
137 |             "message": "Invalid value",
138 |             "field": "age",
139 |             "context": {"min_value": 0, "max_value": 120},
140 |         }
141 | 
142 |     def test_backward_compatibility(self):
143 |         """Test backward compatibility alias."""
144 |         assert MaverickMCPError is MaverickException
145 | 
146 |         # Old code should still work
147 |         exc = MaverickMCPError("Legacy error")
148 |         assert isinstance(exc, MaverickException)
149 |         assert exc.message == "Legacy error"
150 | 
151 |     def test_get_error_message(self):
152 |         """Test error message lookup."""
153 |         assert get_error_message("VALIDATION_ERROR") == "Request validation failed"
154 |         assert get_error_message("NOT_FOUND") == "Resource not found"
155 |         assert get_error_message("UNKNOWN_CODE") == "Unknown error"
156 | 
157 |     def test_inheritance_chain(self):
158 |         """Test that all custom exceptions inherit from MaverickException."""
159 |         exceptions = [
160 |             ValidationError("test"),
161 |             AuthenticationError(),
162 |             AuthorizationError(),
163 |             NotFoundError("test"),
164 |             ConflictError("test"),
165 |             RateLimitError(),
166 |             ExternalServiceError("test", "test"),
167 |             DataProviderError("test", "test"),
168 |             DataNotFoundError("test"),
169 |             DataIntegrityError("test"),
170 |             CacheConnectionError("test", "test"),
171 |             ConfigurationError("test"),
172 |             WebhookError("test"),
173 |             CircuitBreakerError("test", 5, 10),
174 |         ]
175 | 
176 |         for exc in exceptions:
177 |             assert isinstance(exc, MaverickException)
178 |             assert hasattr(exc, "error_code")
179 |             assert hasattr(exc, "status_code")
180 |             assert hasattr(exc, "message")
181 |             assert hasattr(exc, "context")
182 |             assert hasattr(exc, "to_dict")
183 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/langchain_tools/adapters.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Adapters to convert MCP tools to LangChain tools.
  3 | """
  4 | 
  5 | import inspect
  6 | import logging
  7 | from collections.abc import Callable
  8 | from typing import Any
  9 | 
 10 | from langchain_core.tools import BaseTool, StructuredTool
 11 | from pydantic import BaseModel, Field, create_model
 12 | 
 13 | logger = logging.getLogger(__name__)
 14 | 
 15 | 
 16 | def extract_tool_schema(func: Callable) -> type[BaseModel]:
 17 |     """
 18 |     Extract parameter schema from a function's signature and annotations.
 19 | 
 20 |     Args:
 21 |         func: Function to extract schema from
 22 | 
 23 |     Returns:
 24 |         Pydantic model representing the function's parameters
 25 |     """
 26 |     sig = inspect.signature(func)
 27 |     fields = {}
 28 | 
 29 |     for param_name, param in sig.parameters.items():
 30 |         if param_name in ["self", "cls"]:
 31 |             continue
 32 | 
 33 |         # Get type annotation
 34 |         param_type = (
 35 |             param.annotation if param.annotation != inspect.Parameter.empty else Any
 36 |         )
 37 | 
 38 |         # Get default value
 39 |         default = ... if param.default == inspect.Parameter.empty else param.default
 40 | 
 41 |         # Extract description from docstring if available
 42 |         description = f"Parameter {param_name}"
 43 |         if func.__doc__:
 44 |             # Simple extraction - could be improved with proper docstring parsing
 45 |             lines = func.__doc__.split("\n")
 46 |             for line in lines:
 47 |                 if param_name in line and ":" in line:
 48 |                     description = line.split(":", 1)[1].strip()
 49 |                     break
 50 | 
 51 |         fields[param_name] = (
 52 |             param_type,
 53 |             Field(default=default, description=description),
 54 |         )
 55 | 
 56 |     # Create dynamic model
 57 |     model_name = f"{func.__name__.title()}Schema"
 58 |     return create_model(model_name, **fields)
 59 | 
 60 | 
 61 | def mcp_to_langchain_adapter(
 62 |     mcp_tool: Callable,
 63 |     name: str | None = None,
 64 |     description: str | None = None,
 65 |     args_schema: type[BaseModel] | None = None,
 66 |     return_direct: bool = False,
 67 |     persona_aware: bool = False,
 68 | ) -> StructuredTool:
 69 |     """
 70 |     Convert an MCP tool function to a LangChain StructuredTool.
 71 | 
 72 |     Args:
 73 |         mcp_tool: The MCP tool function to convert
 74 |         name: Optional custom name for the tool
 75 |         description: Optional custom description
 76 |         args_schema: Optional Pydantic model for arguments
 77 |         return_direct: Whether to return tool output directly
 78 |         persona_aware: Whether this tool should be persona-aware
 79 | 
 80 |     Returns:
 81 |         LangChain StructuredTool
 82 |     """
 83 |     # Extract metadata
 84 |     tool_name = name or mcp_tool.__name__
 85 |     tool_description = description or mcp_tool.__doc__ or f"Tool: {tool_name}"
 86 | 
 87 |     # Extract or use provided schema
 88 |     if args_schema is None:
 89 |         args_schema = extract_tool_schema(mcp_tool)
 90 | 
 91 |     # Create wrapper function to handle any necessary conversions
 92 |     async def async_wrapper(**kwargs):
 93 |         """Async wrapper for MCP tool."""
 94 |         try:
 95 |             result = await mcp_tool(**kwargs)
 96 |             return _format_tool_result(result)
 97 |         except Exception as e:
 98 |             logger.error(f"Error in tool {tool_name}: {str(e)}")
 99 |             return {"error": str(e), "status": "error"}
100 | 
101 |     def sync_wrapper(**kwargs):
102 |         """Sync wrapper for MCP tool."""
103 |         try:
104 |             result = mcp_tool(**kwargs)
105 |             return _format_tool_result(result)
106 |         except Exception as e:
107 |             logger.error(f"Error in tool {tool_name}: {str(e)}")
108 |             return {"error": str(e), "status": "error"}
109 | 
110 |     # Determine if tool is async
111 |     is_async = inspect.iscoroutinefunction(mcp_tool)
112 | 
113 |     # Create the structured tool
114 |     if is_async:
115 |         tool = StructuredTool(
116 |             name=tool_name,
117 |             description=tool_description,
118 |             coroutine=async_wrapper,
119 |             args_schema=args_schema,
120 |             return_direct=return_direct,
121 |         )
122 |     else:
123 |         tool = StructuredTool(
124 |             name=tool_name,
125 |             description=tool_description,
126 |             func=sync_wrapper,
127 |             args_schema=args_schema,
128 |             return_direct=return_direct,
129 |         )
130 | 
131 |     # Mark if persona-aware
132 |     if persona_aware:
133 |         tool.metadata = {"persona_aware": True}
134 | 
135 |     return tool
136 | 
137 | 
138 | def _format_tool_result(result: Any) -> str | dict[str, Any]:
139 |     """
140 |     Format tool result for LangChain compatibility.
141 | 
142 |     Args:
143 |         result: Raw tool result
144 | 
145 |     Returns:
146 |         Formatted result
147 |     """
148 |     if isinstance(result, dict):
149 |         return result
150 |     elif isinstance(result, str):
151 |         return result
152 |     elif hasattr(result, "dict"):
153 |         # Pydantic model
154 |         return result.dict()
155 |     else:
156 |         # Convert to string as fallback
157 |         return str(result)
158 | 
159 | 
160 | def create_langchain_tool(
161 |     func: Callable | None = None,
162 |     *,
163 |     name: str | None = None,
164 |     description: str | None = None,
165 |     args_schema: type[BaseModel] | None = None,
166 |     return_direct: bool = False,
167 |     persona_aware: bool = False,
168 | ):
169 |     """
170 |     Decorator to create a LangChain tool from a function.
171 | 
172 |     Usage:
173 |         @create_langchain_tool(name="stock_screener", persona_aware=True)
174 |         def screen_stocks(strategy: str, limit: int = 20) -> dict:
175 |             ...
176 |     """
177 | 
178 |     def decorator(f: Callable) -> StructuredTool:
179 |         return mcp_to_langchain_adapter(
180 |             f,
181 |             name=name,
182 |             description=description,
183 |             args_schema=args_schema,
184 |             return_direct=return_direct,
185 |             persona_aware=persona_aware,
186 |         )
187 | 
188 |     if func is None:
189 |         return decorator
190 |     else:
191 |         return decorator(func)
192 | 
193 | 
194 | # Example persona-aware tool wrapper
195 | class PersonaAwareToolWrapper(BaseTool):
196 |     """Wrapper to make any tool persona-aware."""
197 | 
198 |     wrapped_tool: BaseTool
199 |     persona_adjuster: Callable | None = None
200 |     persona: str | None = None
201 | 
202 |     def __init__(
203 |         self,
204 |         tool: BaseTool,
205 |         adjuster: Callable | None = None,
206 |         persona: str | None = None,
207 |     ):
208 |         super().__init__(
209 |             name=f"persona_aware_{tool.name}", description=tool.description
210 |         )
211 |         self.wrapped_tool = tool
212 |         self.persona_adjuster = adjuster
213 |         self.persona = persona
214 | 
215 |     def _run(self, *args, **kwargs):
216 |         """Run tool with persona adjustments."""
217 |         # Apply persona adjustments if available
218 |         if self.persona_adjuster and hasattr(self, "persona"):
219 |             kwargs = self.persona_adjuster(kwargs, self.persona)
220 | 
221 |         return self.wrapped_tool._run(*args, **kwargs)
222 | 
223 |     async def _arun(self, *args, **kwargs):
224 |         """Async run tool with persona adjustments."""
225 |         # Apply persona adjustments if available
226 |         if self.persona_adjuster and hasattr(self, "persona"):
227 |             kwargs = self.persona_adjuster(kwargs, self.persona)
228 | 
229 |         return await self.wrapped_tool._arun(*args, **kwargs)
230 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/logging_config.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Structured logging configuration with correlation IDs and error tracking.
  3 | """
  4 | 
  5 | import json
  6 | import logging
  7 | import sys
  8 | import traceback
  9 | import uuid
 10 | from contextvars import ContextVar
 11 | from datetime import datetime
 12 | from functools import wraps
 13 | from typing import Any
 14 | 
 15 | # Context variable for correlation ID
 16 | correlation_id_var: ContextVar[str | None] = ContextVar("correlation_id", default=None)  # type: ignore[assignment]
 17 | 
 18 | 
 19 | class StructuredFormatter(logging.Formatter):
 20 |     """JSON formatter for structured logging."""
 21 | 
 22 |     def format(self, record: logging.LogRecord) -> str:
 23 |         """Format log record as JSON with additional metadata."""
 24 |         # Get correlation ID from context
 25 |         correlation_id = correlation_id_var.get()
 26 | 
 27 |         # Build structured log entry
 28 |         log_entry = {
 29 |             "timestamp": datetime.now().isoformat(),
 30 |             "level": record.levelname,
 31 |             "logger": record.name,
 32 |             "message": record.getMessage(),
 33 |             "correlation_id": correlation_id,
 34 |             "module": record.module,
 35 |             "function": record.funcName,
 36 |             "line": record.lineno,
 37 |         }
 38 | 
 39 |         # Add exception info if present
 40 |         if record.exc_info:
 41 |             log_entry["exception"] = {
 42 |                 "type": record.exc_info[0].__name__,
 43 |                 "message": str(record.exc_info[1]),
 44 |                 "traceback": traceback.format_exception(*record.exc_info),
 45 |             }
 46 | 
 47 |         # Add extra fields
 48 |         for key, value in record.__dict__.items():
 49 |             if key not in [
 50 |                 "name",
 51 |                 "msg",
 52 |                 "args",
 53 |                 "created",
 54 |                 "filename",
 55 |                 "funcName",
 56 |                 "levelname",
 57 |                 "levelno",
 58 |                 "lineno",
 59 |                 "module",
 60 |                 "msecs",
 61 |                 "message",
 62 |                 "pathname",
 63 |                 "process",
 64 |                 "processName",
 65 |                 "relativeCreated",
 66 |                 "thread",
 67 |                 "threadName",
 68 |                 "exc_info",
 69 |                 "exc_text",
 70 |                 "stack_info",
 71 |             ]:
 72 |                 log_entry[key] = value
 73 | 
 74 |         return json.dumps(log_entry)
 75 | 
 76 | 
 77 | class CorrelationIDMiddleware:
 78 |     """Middleware to inject correlation IDs into requests."""
 79 | 
 80 |     @staticmethod
 81 |     def generate_correlation_id() -> str:
 82 |         """Generate a unique correlation ID."""
 83 |         return f"mcp-{uuid.uuid4().hex[:8]}"
 84 | 
 85 |     @staticmethod
 86 |     def set_correlation_id(correlation_id: str | None = None) -> str:
 87 |         """Set correlation ID in context."""
 88 |         if not correlation_id:
 89 |             correlation_id = CorrelationIDMiddleware.generate_correlation_id()
 90 |         correlation_id_var.set(correlation_id)
 91 |         return correlation_id
 92 | 
 93 |     @staticmethod
 94 |     def get_correlation_id() -> str | None:
 95 |         """Get current correlation ID from context."""
 96 |         return correlation_id_var.get()
 97 | 
 98 | 
 99 | def with_correlation_id(func):
100 |     """Decorator to ensure correlation ID exists for function execution."""
101 | 
102 |     @wraps(func)
103 |     def wrapper(*args, **kwargs):
104 |         if not correlation_id_var.get():
105 |             CorrelationIDMiddleware.set_correlation_id()
106 |         return func(*args, **kwargs)
107 | 
108 |     @wraps(func)
109 |     async def async_wrapper(*args, **kwargs):
110 |         if not correlation_id_var.get():
111 |             CorrelationIDMiddleware.set_correlation_id()
112 |         return await func(*args, **kwargs)
113 | 
114 |     return async_wrapper if asyncio.iscoroutinefunction(func) else wrapper
115 | 
116 | 
117 | class ErrorLogger:
118 |     """Enhanced error logging with context and metrics."""
119 | 
120 |     def __init__(self, logger: logging.Logger):
121 |         self.logger = logger
122 |         self._error_counts: dict[str, int] = {}
123 | 
124 |     def log_error(
125 |         self,
126 |         error: Exception,
127 |         context: dict[str, Any],
128 |         level: int = logging.ERROR,
129 |         mask_sensitive: bool = True,
130 |     ):
131 |         """Log error with full context and tracking."""
132 |         error_type = type(error).__name__
133 |         self._error_counts[error_type] = self._error_counts.get(error_type, 0) + 1
134 | 
135 |         # Mask sensitive data if requested
136 |         if mask_sensitive:
137 |             context = self._mask_sensitive_data(context)
138 | 
139 |         # Create structured error log
140 |         self.logger.log(
141 |             level,
142 |             f"{error_type}: {str(error)}",
143 |             extra={
144 |                 "error_type": error_type,
145 |                 "error_message": str(error),
146 |                 "error_count": self._error_counts[error_type],
147 |                 "context": context,
148 |                 "stack_trace": traceback.format_exc() if sys.exc_info()[0] else None,
149 |             },
150 |         )
151 | 
152 |     def _mask_sensitive_data(self, data: dict[str, Any]) -> dict[str, Any]:
153 |         """Mask sensitive fields in logging data."""
154 |         sensitive_fields = {
155 |             "password",
156 |             "token",
157 |             "api_key",
158 |             "secret",
159 |             "card_number",
160 |             "ssn",
161 |             "email",
162 |             "phone",
163 |             "address",
164 |             "bearer",
165 |             "authorization",
166 |             "x-api-key",
167 |         }
168 | 
169 |         masked_data = {}
170 |         for key, value in data.items():
171 |             if any(sensitive in key.lower() for sensitive in sensitive_fields):
172 |                 masked_data[key] = "***MASKED***"
173 |             elif isinstance(value, dict):
174 |                 masked_data[key] = self._mask_sensitive_data(value)
175 |             else:
176 |                 masked_data[key] = value
177 | 
178 |         return masked_data
179 | 
180 |     def get_error_stats(self) -> dict[str, int]:
181 |         """Get error count statistics."""
182 |         return self._error_counts.copy()
183 | 
184 | 
185 | def setup_logging(
186 |     level: int = logging.INFO, use_json: bool = True, log_file: str | None = None
187 | ):
188 |     """Configure application logging with structured output."""
189 |     root_logger = logging.getLogger()
190 |     root_logger.setLevel(level)
191 | 
192 |     # Remove existing handlers
193 |     for handler in root_logger.handlers[:]:
194 |         root_logger.removeHandler(handler)
195 | 
196 |     # Console handler
197 |     console_handler = logging.StreamHandler(sys.stdout)
198 |     if use_json:
199 |         console_handler.setFormatter(StructuredFormatter())
200 |     else:
201 |         console_handler.setFormatter(
202 |             logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
203 |         )
204 |     root_logger.addHandler(console_handler)
205 | 
206 |     # File handler if specified
207 |     if log_file:
208 |         file_handler = logging.FileHandler(log_file)
209 |         file_handler.setFormatter(StructuredFormatter())
210 |         root_logger.addHandler(file_handler)
211 | 
212 |     # Set specific logger levels
213 |     logging.getLogger("urllib3").setLevel(logging.WARNING)
214 |     logging.getLogger("requests").setLevel(logging.WARNING)
215 |     logging.getLogger("httpx").setLevel(logging.WARNING)
216 | 
217 |     return root_logger
218 | 
219 | 
220 | # Import guard for asyncio
221 | try:
222 |     import asyncio
223 | except ImportError:
224 |     asyncio = None  # type: ignore[assignment]
225 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/api/services/market_service.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Market data service for MaverickMCP API.
  3 | 
  4 | Handles market overview, economic calendar, and market-related data operations.
  5 | Extracted from server.py to improve code organization and maintainability.
  6 | """
  7 | 
  8 | from typing import Any
  9 | 
 10 | from .base_service import BaseService
 11 | 
 12 | 
 13 | class MarketService(BaseService):
 14 |     """
 15 |     Service class for market data operations.
 16 | 
 17 |     Provides market overview, economic calendar, and related market data functionality.
 18 |     """
 19 | 
 20 |     def register_tools(self):
 21 |         """Register market data tools with MCP."""
 22 | 
 23 |         @self.mcp.tool()
 24 |         async def get_market_overview() -> dict[str, Any]:
 25 |             """
 26 |             Get comprehensive market overview including major indices, sectors, and market statistics.
 27 | 
 28 |             Provides real-time market data for major indices (S&P 500, NASDAQ, Dow Jones),
 29 |             sector performance, market breadth indicators, and key market statistics.
 30 |             Enhanced features available for authenticated users.
 31 | 
 32 |             Returns:
 33 |                 Dictionary containing comprehensive market overview data
 34 |             """
 35 |             return await self._get_market_overview()
 36 | 
 37 |         @self.mcp.tool()
 38 |         async def get_economic_calendar(days_ahead: int = 7) -> dict[str, Any]:
 39 |             """
 40 |             Get upcoming economic events and earnings announcements.
 41 | 
 42 |             Args:
 43 |                 days_ahead: Number of days ahead to fetch events (1-30, default: 7)
 44 | 
 45 |             Returns:
 46 |                 Dictionary containing economic calendar data with upcoming events
 47 |             """
 48 |             return await self._get_economic_calendar(days_ahead)
 49 | 
 50 |     async def _get_market_overview(self) -> dict[str, Any]:
 51 |         """Get market overview implementation."""
 52 |         try:
 53 |             from maverick_mcp.providers.market_data import MarketDataProvider
 54 | 
 55 |             market_provider = MarketDataProvider()
 56 | 
 57 |             # Get major indices
 58 |             indices_data = await market_provider.get_major_indices_async()
 59 | 
 60 |             # Get sector performance
 61 |             sector_data = await market_provider.get_sector_performance_async()
 62 | 
 63 |             # Get market breadth indicators
 64 |             breadth_data = await market_provider.get_market_breadth_async()
 65 | 
 66 |             # Get top movers
 67 |             movers_data = await market_provider.get_top_movers_async()
 68 | 
 69 |             overview = {
 70 |                 "timestamp": market_provider._get_current_timestamp(),
 71 |                 "market_status": "open",  # This would be determined by market hours
 72 |                 "indices": indices_data,
 73 |                 "sectors": sector_data,
 74 |                 "market_breadth": breadth_data,
 75 |                 "top_movers": movers_data,
 76 |                 "market_sentiment": {
 77 |                     "fear_greed_index": 45,  # Placeholder - would integrate with actual data
 78 |                     "vix": 18.5,
 79 |                     "put_call_ratio": 0.85,
 80 |                 },
 81 |                 "economic_highlights": [
 82 |                     "Fed meeting next week",
 83 |                     "Earnings season continues",
 84 |                     "GDP data released",
 85 |                 ],
 86 |             }
 87 | 
 88 |             self.log_tool_usage("get_market_overview")
 89 | 
 90 |             return overview
 91 | 
 92 |         except Exception as e:
 93 |             self.logger.error(f"Failed to get market overview: {e}")
 94 |             return {
 95 |                 "error": f"Failed to fetch market overview: {str(e)}",
 96 |                 "timestamp": self._get_current_timestamp(),
 97 |             }
 98 | 
 99 |     async def _get_economic_calendar(self, days_ahead: int = 7) -> dict[str, Any]:
100 |         """Get economic calendar implementation."""
101 |         # Validate input
102 |         if not isinstance(days_ahead, int) or days_ahead < 1 or days_ahead > 30:
103 |             return {
104 |                 "error": "days_ahead must be an integer between 1 and 30",
105 |                 "provided_value": days_ahead,
106 |             }
107 | 
108 |         try:
109 |             from datetime import UTC, datetime, timedelta
110 | 
111 |             from maverick_mcp.providers.market_data import MarketDataProvider
112 | 
113 |             market_provider = MarketDataProvider()
114 | 
115 |             # Calculate date range
116 |             start_date = datetime.now(UTC)
117 |             end_date = start_date + timedelta(days=days_ahead)
118 | 
119 |             # Get economic events
120 |             economic_events = await market_provider.get_economic_events_async(
121 |                 start_date=start_date.strftime("%Y-%m-%d"),
122 |                 end_date=end_date.strftime("%Y-%m-%d"),
123 |             )
124 | 
125 |             # Get earnings calendar
126 |             earnings_events = await market_provider.get_earnings_calendar_async(
127 |                 start_date=start_date.strftime("%Y-%m-%d"),
128 |                 end_date=end_date.strftime("%Y-%m-%d"),
129 |             )
130 | 
131 |             calendar_data = {
132 |                 "period": {
133 |                     "start_date": start_date.strftime("%Y-%m-%d"),
134 |                     "end_date": end_date.strftime("%Y-%m-%d"),
135 |                     "days_ahead": days_ahead,
136 |                 },
137 |                 "economic_events": economic_events,
138 |                 "earnings_events": earnings_events,
139 |                 "key_highlights": self._extract_key_highlights(
140 |                     economic_events, earnings_events
141 |                 ),
142 |                 "timestamp": market_provider._get_current_timestamp(),
143 |             }
144 | 
145 |             self.log_tool_usage("get_economic_calendar", days_ahead=days_ahead)
146 | 
147 |             return calendar_data
148 | 
149 |         except Exception as e:
150 |             self.logger.error(f"Failed to get economic calendar: {e}")
151 |             return {
152 |                 "error": f"Failed to fetch economic calendar: {str(e)}",
153 |                 "days_ahead": days_ahead,
154 |                 "timestamp": self._get_current_timestamp(),
155 |             }
156 | 
157 |     def _extract_key_highlights(
158 |         self, economic_events: list, earnings_events: list
159 |     ) -> list[str]:
160 |         """
161 |         Extract key highlights from economic and earnings events.
162 | 
163 |         Args:
164 |             economic_events: List of economic events
165 |             earnings_events: List of earnings events
166 | 
167 |         Returns:
168 |             List of key highlight strings
169 |         """
170 |         highlights = []
171 | 
172 |         # Extract high-impact economic events
173 |         high_impact_events = [
174 |             event
175 |             for event in economic_events
176 |             if event.get("impact", "").lower() in ["high", "critical"]
177 |         ]
178 | 
179 |         for event in high_impact_events[:3]:  # Top 3 high-impact events
180 |             highlights.append(
181 |                 f"{event.get('name', 'Economic event')} - {event.get('date', 'TBD')}"
182 |             )
183 | 
184 |         # Extract major earnings announcements
185 |         major_earnings = [
186 |             event
187 |             for event in earnings_events
188 |             if event.get("market_cap", 0) > 100_000_000_000  # $100B+ companies
189 |         ]
190 | 
191 |         for event in major_earnings[:2]:  # Top 2 major earnings
192 |             highlights.append(
193 |                 f"{event.get('symbol', 'Unknown')} earnings - {event.get('date', 'TBD')}"
194 |             )
195 | 
196 |         return highlights
197 | 
198 |     def _get_current_timestamp(self) -> str:
199 |         """Get current timestamp in ISO format."""
200 |         from datetime import UTC, datetime
201 | 
202 |         return datetime.now(UTC).isoformat()
203 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/connection_manager.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Enhanced connection management for FastMCP server stability.
  3 | 
  4 | Provides session persistence, connection monitoring, and tool registration consistency
  5 | to prevent tools from disappearing in Claude Desktop.
  6 | """
  7 | 
  8 | import asyncio
  9 | import logging
 10 | import time
 11 | import uuid
 12 | from typing import Any
 13 | 
 14 | from fastmcp import FastMCP
 15 | 
 16 | logger = logging.getLogger(__name__)
 17 | 
 18 | 
 19 | class ConnectionSession:
 20 |     """Represents a single MCP connection session."""
 21 | 
 22 |     def __init__(self, session_id: str):
 23 |         self.session_id = session_id
 24 |         self.created_at = time.time()
 25 |         self.last_activity = time.time()
 26 |         self.tools_registered = False
 27 |         self.is_active = True
 28 | 
 29 |     def update_activity(self):
 30 |         """Update last activity timestamp."""
 31 |         self.last_activity = time.time()
 32 | 
 33 |     def is_expired(self, timeout: float = 300.0) -> bool:
 34 |         """Check if session is expired (default 5 minutes)."""
 35 |         return time.time() - self.last_activity > timeout
 36 | 
 37 | 
 38 | class MCPConnectionManager:
 39 |     """
 40 |     Enhanced connection manager for FastMCP server stability.
 41 | 
 42 |     Features:
 43 |     - Single connection initialization pattern
 44 |     - Session persistence across reconnections
 45 |     - Tool registration consistency
 46 |     - Connection monitoring and debugging
 47 |     - Automatic cleanup of stale sessions
 48 |     """
 49 | 
 50 |     def __init__(self, mcp_server: FastMCP):
 51 |         self.mcp_server = mcp_server
 52 |         self.active_sessions: dict[str, ConnectionSession] = {}
 53 |         self.tools_registered = False
 54 |         self.initialization_lock = asyncio.Lock()
 55 |         self._cleanup_task: asyncio.Task | None = None
 56 | 
 57 |         # Connection monitoring
 58 |         self.connection_count = 0
 59 |         self.total_connections = 0
 60 |         self.failed_connections = 0
 61 | 
 62 |         logger.info("MCP Connection Manager initialized")
 63 | 
 64 |     async def handle_new_connection(self, session_id: str | None = None) -> str:
 65 |         """
 66 |         Handle new MCP connection with single initialization pattern.
 67 | 
 68 |         Args:
 69 |             session_id: Optional session ID, generates new one if not provided
 70 | 
 71 |         Returns:
 72 |             Session ID for the connection
 73 |         """
 74 |         if session_id is None:
 75 |             session_id = str(uuid.uuid4())
 76 | 
 77 |         async with self.initialization_lock:
 78 |             # Create new session
 79 |             session = ConnectionSession(session_id)
 80 |             self.active_sessions[session_id] = session
 81 |             self.connection_count += 1
 82 |             self.total_connections += 1
 83 | 
 84 |             logger.info(
 85 |                 f"New MCP connection: {session_id[:8]} "
 86 |                 f"(active: {self.connection_count}, total: {self.total_connections})"
 87 |             )
 88 | 
 89 |             # Ensure tools are registered only once
 90 |             if not self.tools_registered:
 91 |                 await self._register_tools_once()
 92 |                 self.tools_registered = True
 93 |                 session.tools_registered = True
 94 |                 logger.info("Tools registered for first connection")
 95 |             else:
 96 |                 logger.info("Tools already registered, skipping registration")
 97 | 
 98 |             # Start cleanup task if not already running
 99 |             if self._cleanup_task is None or self._cleanup_task.done():
100 |                 self._cleanup_task = asyncio.create_task(self._cleanup_loop())
101 | 
102 |         return session_id
103 | 
104 |     async def handle_connection_close(self, session_id: str):
105 |         """Handle connection close event."""
106 |         if session_id in self.active_sessions:
107 |             session = self.active_sessions[session_id]
108 |             session.is_active = False
109 |             self.connection_count = max(0, self.connection_count - 1)
110 | 
111 |             logger.info(
112 |                 f"Connection closed: {session_id[:8]} (active: {self.connection_count})"
113 |             )
114 | 
115 |             # Remove session after delay to handle quick reconnections
116 |             await asyncio.sleep(5.0)
117 |             self.active_sessions.pop(session_id, None)
118 | 
119 |     async def update_session_activity(self, session_id: str):
120 |         """Update session activity timestamp."""
121 |         if session_id in self.active_sessions:
122 |             self.active_sessions[session_id].update_activity()
123 | 
124 |     async def _register_tools_once(self):
125 |         """Register tools only once to prevent conflicts."""
126 |         try:
127 |             from maverick_mcp.api.routers.tool_registry import register_all_router_tools
128 | 
129 |             register_all_router_tools(self.mcp_server)
130 |             logger.info("Successfully registered all MCP tools")
131 |         except Exception as e:
132 |             logger.error(f"Failed to register tools: {e}")
133 |             self.failed_connections += 1
134 |             raise
135 | 
136 |     async def _cleanup_loop(self):
137 |         """Background cleanup of expired sessions."""
138 |         while True:
139 |             try:
140 |                 await asyncio.sleep(60)  # Check every minute
141 |                 time.time()
142 |                 expired_sessions = [
143 |                     sid
144 |                     for sid, session in self.active_sessions.items()
145 |                     if session.is_expired()
146 |                 ]
147 | 
148 |                 for session_id in expired_sessions:
149 |                     logger.info(f"Cleaning up expired session: {session_id[:8]}")
150 |                     self.active_sessions.pop(session_id, None)
151 | 
152 |             except asyncio.CancelledError:
153 |                 break
154 |             except Exception as e:
155 |                 logger.error(f"Error in cleanup loop: {e}")
156 | 
157 |     def get_connection_status(self) -> dict[str, Any]:
158 |         """Get current connection status for monitoring."""
159 |         active_count = sum(1 for s in self.active_sessions.values() if s.is_active)
160 | 
161 |         return {
162 |             "active_connections": active_count,
163 |             "total_sessions": len(self.active_sessions),
164 |             "total_connections": self.total_connections,
165 |             "failed_connections": self.failed_connections,
166 |             "tools_registered": self.tools_registered,
167 |             "sessions": [
168 |                 {
169 |                     "id": sid[:8],
170 |                     "active": session.is_active,
171 |                     "age_seconds": time.time() - session.created_at,
172 |                     "last_activity": time.time() - session.last_activity,
173 |                 }
174 |                 for sid, session in self.active_sessions.items()
175 |             ],
176 |         }
177 | 
178 |     async def shutdown(self):
179 |         """Cleanup on server shutdown."""
180 |         if self._cleanup_task:
181 |             self._cleanup_task.cancel()
182 |             try:
183 |                 await self._cleanup_task
184 |             except asyncio.CancelledError:
185 |                 pass
186 | 
187 |         logger.info("MCP Connection Manager shutdown complete")
188 | 
189 | 
190 | # Global connection manager instance
191 | _connection_manager: MCPConnectionManager | None = None
192 | 
193 | 
194 | def get_connection_manager(mcp_server: FastMCP) -> MCPConnectionManager:
195 |     """Get or create the global connection manager."""
196 |     global _connection_manager
197 |     if _connection_manager is None:
198 |         _connection_manager = MCPConnectionManager(mcp_server)
199 |     return _connection_manager
200 | 
201 | 
202 | async def initialize_connection_management(mcp_server: FastMCP) -> MCPConnectionManager:
203 |     """Initialize enhanced connection management."""
204 |     manager = get_connection_manager(mcp_server)
205 |     logger.info("Enhanced MCP connection management initialized")
206 |     return manager
207 | 
```

--------------------------------------------------------------------------------
/scripts/migrate_db.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Database migration script for MaverickMCP.
  4 | 
  5 | This script initializes the SQLite database with all necessary tables
  6 | and ensures the schema is properly set up for the application.
  7 | """
  8 | 
  9 | import logging
 10 | import os
 11 | import sys
 12 | from pathlib import Path
 13 | 
 14 | # Add the project root to the Python path
 15 | project_root = Path(__file__).parent.parent
 16 | sys.path.insert(0, str(project_root))
 17 | 
 18 | # noqa: E402 - imports must come after sys.path modification
 19 | from sqlalchemy import create_engine, text  # noqa: E402
 20 | 
 21 | from maverick_mcp.data.models import Base  # noqa: E402
 22 | 
 23 | # Set up logging
 24 | logging.basicConfig(
 25 |     level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
 26 | )
 27 | logger = logging.getLogger("maverick_mcp.migrate")
 28 | 
 29 | 
 30 | def get_database_url() -> str:
 31 |     """Get the database URL from environment or settings."""
 32 |     # Use environment variable if set, otherwise default to SQLite
 33 |     database_url = os.getenv("DATABASE_URL") or "sqlite:///maverick_mcp.db"
 34 |     logger.info(f"Using database URL: {database_url}")
 35 |     return database_url
 36 | 
 37 | 
 38 | def create_database_if_not_exists(database_url: str) -> None:
 39 |     """Create database file if it doesn't exist (for SQLite)."""
 40 |     if database_url.startswith("sqlite:///"):
 41 |         # Extract the file path from the URL
 42 |         db_path = database_url.replace("sqlite:///", "")
 43 |         if db_path != ":memory:" and not db_path.startswith("./"):
 44 |             # Handle absolute paths
 45 |             db_file = Path(db_path)
 46 |         else:
 47 |             # Handle relative paths
 48 |             db_file = Path(db_path.lstrip("./"))
 49 | 
 50 |         # Create directory if it doesn't exist
 51 |         db_file.parent.mkdir(parents=True, exist_ok=True)
 52 | 
 53 |         if not db_file.exists():
 54 |             logger.info(f"Creating SQLite database file: {db_file}")
 55 |             # Create empty file
 56 |             db_file.touch()
 57 |         else:
 58 |             logger.info(f"SQLite database already exists: {db_file}")
 59 | 
 60 | 
 61 | def test_database_connection(database_url: str) -> bool:
 62 |     """Test database connection."""
 63 |     try:
 64 |         logger.info("Testing database connection...")
 65 |         engine = create_engine(database_url, echo=False)
 66 | 
 67 |         with engine.connect() as conn:
 68 |             if database_url.startswith("sqlite"):
 69 |                 result = conn.execute(text("SELECT sqlite_version()"))
 70 |                 version = result.scalar()
 71 |                 logger.info(f"Connected to SQLite version: {version}")
 72 |             elif database_url.startswith("postgresql"):
 73 |                 result = conn.execute(text("SELECT version()"))
 74 |                 version = result.scalar()
 75 |                 logger.info(f"Connected to PostgreSQL: {version[:50]}...")
 76 |             else:
 77 |                 result = conn.execute(text("SELECT 1"))
 78 |                 logger.info("Database connection successful")
 79 | 
 80 |         engine.dispose()
 81 |         return True
 82 |     except Exception as e:
 83 |         logger.error(f"Database connection failed: {e}")
 84 |         return False
 85 | 
 86 | 
 87 | def create_tables(database_url: str) -> bool:
 88 |     """Create all tables using SQLAlchemy."""
 89 |     try:
 90 |         logger.info("Creating database tables...")
 91 |         engine = create_engine(database_url, echo=False)
 92 | 
 93 |         # Create all tables
 94 |         Base.metadata.create_all(bind=engine)
 95 | 
 96 |         # Verify tables were created
 97 |         with engine.connect() as conn:
 98 |             if database_url.startswith("sqlite"):
 99 |                 result = conn.execute(
100 |                     text("""
101 |                     SELECT name FROM sqlite_master
102 |                     WHERE type='table' AND name LIKE 'mcp_%'
103 |                     ORDER BY name
104 |                 """)
105 |                 )
106 |             else:
107 |                 result = conn.execute(
108 |                     text("""
109 |                     SELECT table_name FROM information_schema.tables
110 |                     WHERE table_schema='public' AND table_name LIKE 'mcp_%'
111 |                     ORDER BY table_name
112 |                 """)
113 |                 )
114 | 
115 |             tables = [row[0] for row in result.fetchall()]
116 |             logger.info(f"Created {len(tables)} tables: {', '.join(tables)}")
117 | 
118 |             # Verify expected tables exist
119 |             expected_tables = {
120 |                 "mcp_stocks",
121 |                 "mcp_price_cache",
122 |                 "mcp_maverick_stocks",
123 |                 "mcp_maverick_bear_stocks",
124 |                 "mcp_supply_demand_breakouts",
125 |                 "mcp_technical_cache",
126 |             }
127 | 
128 |             missing_tables = expected_tables - set(tables)
129 |             if missing_tables:
130 |                 logger.warning(f"Missing expected tables: {missing_tables}")
131 |             else:
132 |                 logger.info("All expected tables created successfully")
133 | 
134 |         engine.dispose()
135 |         return True
136 | 
137 |     except Exception as e:
138 |         logger.error(f"Table creation failed: {e}")
139 |         return False
140 | 
141 | 
142 | def verify_schema(database_url: str) -> bool:
143 |     """Verify the database schema is correct."""
144 |     try:
145 |         logger.info("Verifying database schema...")
146 |         engine = create_engine(database_url, echo=False)
147 | 
148 |         with engine.connect() as conn:
149 |             # Check that we can query each main table
150 |             test_queries = [
151 |                 ("mcp_stocks", "SELECT COUNT(*) FROM mcp_stocks"),
152 |                 ("mcp_price_cache", "SELECT COUNT(*) FROM mcp_price_cache"),
153 |                 ("mcp_maverick_stocks", "SELECT COUNT(*) FROM mcp_maverick_stocks"),
154 |                 (
155 |                     "mcp_maverick_bear_stocks",
156 |                     "SELECT COUNT(*) FROM mcp_maverick_bear_stocks",
157 |                 ),
158 |                 (
159 |                     "mcp_supply_demand_breakouts",
160 |                     "SELECT COUNT(*) FROM mcp_supply_demand_breakouts",
161 |                 ),
162 |                 ("mcp_technical_cache", "SELECT COUNT(*) FROM mcp_technical_cache"),
163 |             ]
164 | 
165 |             for table_name, query in test_queries:
166 |                 try:
167 |                     result = conn.execute(text(query))
168 |                     count = result.scalar()
169 |                     logger.info(f"✓ {table_name}: {count} records")
170 |                 except Exception as e:
171 |                     logger.error(f"✗ {table_name}: {e}")
172 |                     return False
173 | 
174 |         engine.dispose()
175 |         logger.info("Schema verification completed successfully")
176 |         return True
177 | 
178 |     except Exception as e:
179 |         logger.error(f"Schema verification failed: {e}")
180 |         return False
181 | 
182 | 
183 | def main():
184 |     """Main migration function."""
185 |     logger.info("Starting MaverickMCP database migration...")
186 | 
187 |     # Get database URL
188 |     database_url = get_database_url()
189 | 
190 |     # Create database file if needed (SQLite)
191 |     create_database_if_not_exists(database_url)
192 | 
193 |     # Test connection
194 |     if not test_database_connection(database_url):
195 |         logger.error("Database connection failed. Exiting.")
196 |         return False
197 | 
198 |     # Create tables
199 |     if not create_tables(database_url):
200 |         logger.error("Table creation failed. Exiting.")
201 |         return False
202 | 
203 |     # Verify schema
204 |     if not verify_schema(database_url):
205 |         logger.error("Schema verification failed. Exiting.")
206 |         return False
207 | 
208 |     logger.info("✅ Database migration completed successfully!")
209 |     logger.info(f"Database ready at: {database_url}")
210 | 
211 |     return True
212 | 
213 | 
214 | if __name__ == "__main__":
215 |     success = main()
216 |     if not success:
217 |         sys.exit(1)
218 | 
```

--------------------------------------------------------------------------------
/scripts/validate_setup.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Validate setup for the Tiingo data loader.
  4 | 
  5 | This script checks that all required dependencies are installed
  6 | and provides setup instructions if anything is missing.
  7 | """
  8 | 
  9 | import os
 10 | import sys
 11 | from pathlib import Path
 12 | 
 13 | 
 14 | def check_dependency(module_name, package_name=None, description=""):
 15 |     """Check if a dependency is available."""
 16 |     if package_name is None:
 17 |         package_name = module_name
 18 | 
 19 |     try:
 20 |         __import__(module_name)
 21 |         print(f"✅ {module_name}: Available")
 22 |         return True
 23 |     except ImportError:
 24 |         print(f"❌ {module_name}: Missing - {description}")
 25 |         print(f"   Install with: pip install {package_name}")
 26 |         return False
 27 | 
 28 | 
 29 | def check_environment():
 30 |     """Check Python environment and dependencies."""
 31 |     print("🐍 Python Environment Check")
 32 |     print("=" * 40)
 33 |     print(f"Python version: {sys.version}")
 34 |     print(f"Python executable: {sys.executable}")
 35 |     print()
 36 | 
 37 |     # Core dependencies
 38 |     print("📦 Core Dependencies")
 39 |     print("-" * 20)
 40 | 
 41 |     deps_ok = True
 42 | 
 43 |     # Required for async operations
 44 |     deps_ok &= check_dependency(
 45 |         "aiohttp", "aiohttp>=3.8.0", "HTTP client for async operations"
 46 |     )
 47 | 
 48 |     # Data processing
 49 |     deps_ok &= check_dependency(
 50 |         "pandas", "pandas>=2.0.0", "Data manipulation and analysis"
 51 |     )
 52 |     deps_ok &= check_dependency("numpy", "numpy>=1.24.0", "Numerical computing")
 53 | 
 54 |     # Technical indicators
 55 |     deps_ok &= check_dependency(
 56 |         "pandas_ta", "pandas-ta>=0.3.14b0", "Technical analysis indicators"
 57 |     )
 58 | 
 59 |     # Database
 60 |     deps_ok &= check_dependency(
 61 |         "sqlalchemy", "sqlalchemy>=2.0.0", "SQL toolkit and ORM"
 62 |     )
 63 |     deps_ok &= check_dependency(
 64 |         "psycopg2", "psycopg2-binary>=2.9.0", "PostgreSQL adapter"
 65 |     )
 66 | 
 67 |     print()
 68 | 
 69 |     # Optional dependencies
 70 |     print("🔧 Optional Dependencies")
 71 |     print("-" * 25)
 72 | 
 73 |     optional_deps = [
 74 |         ("requests", "requests>=2.28.0", "HTTP library for fallback operations"),
 75 |         ("pytest", "pytest>=7.0.0", "Testing framework"),
 76 |         ("asynctest", "asynctest>=0.13.0", "Async testing utilities"),
 77 |     ]
 78 | 
 79 |     for module, package, desc in optional_deps:
 80 |         check_dependency(module, package, desc)
 81 | 
 82 |     print()
 83 | 
 84 |     return deps_ok
 85 | 
 86 | 
 87 | def check_api_token():
 88 |     """Check if Tiingo API token is configured."""
 89 |     print("🔑 API Configuration")
 90 |     print("-" * 20)
 91 | 
 92 |     token = os.getenv("TIINGO_API_TOKEN")
 93 |     if token:
 94 |         print(f"✅ TIINGO_API_TOKEN: Set (length: {len(token)})")
 95 |         return True
 96 |     else:
 97 |         print("❌ TIINGO_API_TOKEN: Not set")
 98 |         print("   Get your free API token at: https://www.tiingo.com")
 99 |         print("   Set with: export TIINGO_API_TOKEN=your_token_here")
100 |         return False
101 | 
102 | 
103 | def check_database():
104 |     """Check database connection."""
105 |     print("\n🗄️  Database Configuration")
106 |     print("-" * 26)
107 | 
108 |     db_url = os.getenv("DATABASE_URL") or os.getenv("POSTGRES_URL")
109 |     if db_url:
110 |         # Mask password in URL for display
111 |         masked_url = db_url
112 |         if "@" in db_url and "://" in db_url:
113 |             parts = db_url.split("://", 1)
114 |             if len(parts) == 2 and "@" in parts[1]:
115 |                 user_pass, host_db = parts[1].split("@", 1)
116 |                 if ":" in user_pass:
117 |                     user, _ = user_pass.split(":", 1)
118 |                     masked_url = f"{parts[0]}://{user}:****@{host_db}"
119 | 
120 |         print("✅ DATABASE_URL: Set")
121 |         print(f"   URL: {masked_url}")
122 | 
123 |         # Try to connect if SQLAlchemy is available
124 |         try:
125 |             import sqlalchemy
126 | 
127 |             engine = sqlalchemy.create_engine(db_url)
128 |             with engine.connect() as conn:
129 |                 result = conn.execute(sqlalchemy.text("SELECT 1"))
130 |                 result.fetchone()
131 |             print("✅ Database connection: Success")
132 |             return True
133 |         except ImportError:
134 |             print("⚠️  Database connection: Cannot test (SQLAlchemy not installed)")
135 |             return True
136 |         except Exception as e:
137 |             print(f"❌ Database connection: Failed - {e}")
138 |             return False
139 |     else:
140 |         print("❌ DATABASE_URL: Not set")
141 |         print(
142 |             "   Set with: export DATABASE_URL=postgresql://user:pass@localhost/maverick_mcp"
143 |         )
144 |         return False
145 | 
146 | 
147 | def check_project_structure():
148 |     """Check that we're in the right directory structure."""
149 |     print("\n📁 Project Structure")
150 |     print("-" * 20)
151 | 
152 |     current_dir = Path.cwd()
153 |     script_dir = Path(__file__).parent
154 | 
155 |     print(f"Current directory: {current_dir}")
156 |     print(f"Script directory: {script_dir}")
157 | 
158 |     # Check for expected files
159 |     expected_files = [
160 |         "load_tiingo_data.py",
161 |         "tiingo_config.py",
162 |         "load_example.py",
163 |         "requirements_tiingo.txt",
164 |     ]
165 | 
166 |     all_present = True
167 |     for file in expected_files:
168 |         file_path = script_dir / file
169 |         if file_path.exists():
170 |             print(f"✅ {file}: Found")
171 |         else:
172 |             print(f"❌ {file}: Missing")
173 |             all_present = False
174 | 
175 |     # Check for parent project structure
176 |     parent_files = [
177 |         "../maverick_mcp/__init__.py",
178 |         "../maverick_mcp/data/models.py",
179 |         "../maverick_mcp/core/technical_analysis.py",
180 |     ]
181 | 
182 |     print("\nParent project files:")
183 |     for file in parent_files:
184 |         file_path = script_dir / file
185 |         if file_path.exists():
186 |             print(f"✅ {file}: Found")
187 |         else:
188 |             print(f"❌ {file}: Missing")
189 |             all_present = False
190 | 
191 |     return all_present
192 | 
193 | 
194 | def provide_setup_instructions():
195 |     """Provide setup instructions."""
196 |     print("\n🚀 Setup Instructions")
197 |     print("=" * 21)
198 | 
199 |     print("1. Install Python dependencies:")
200 |     print("   pip install -r scripts/requirements_tiingo.txt")
201 |     print()
202 | 
203 |     print("2. Get Tiingo API token:")
204 |     print("   - Sign up at https://www.tiingo.com")
205 |     print("   - Get your free API token from the dashboard")
206 |     print("   - export TIINGO_API_TOKEN=your_token_here")
207 |     print()
208 | 
209 |     print("3. Configure database:")
210 |     print("   - Ensure PostgreSQL is running")
211 |     print("   - Create maverick_mcp database")
212 |     print("   - export DATABASE_URL=postgresql://user:pass@localhost/maverick_mcp")
213 |     print()
214 | 
215 |     print("4. Test the setup:")
216 |     print("   python3 scripts/validate_setup.py")
217 |     print()
218 | 
219 |     print("5. Run a sample load:")
220 |     print("   python3 scripts/load_tiingo_data.py --symbols AAPL,MSFT --years 1")
221 | 
222 | 
223 | def main():
224 |     """Main validation function."""
225 |     print("Tiingo Data Loader Setup Validation")
226 |     print("=" * 38)
227 | 
228 |     # Check all components
229 |     deps_ok = check_environment()
230 |     api_ok = check_api_token()
231 |     db_ok = check_database()
232 |     structure_ok = check_project_structure()
233 | 
234 |     print("\n" + "=" * 40)
235 | 
236 |     if deps_ok and api_ok and db_ok and structure_ok:
237 |         print("🎉 Setup validation PASSED!")
238 |         print("You can now use the Tiingo data loader.")
239 |         print()
240 |         print("Quick start:")
241 |         print("  python3 scripts/load_tiingo_data.py --help")
242 |         print("  python3 scripts/load_example.py")
243 |         return 0
244 |     else:
245 |         print("❌ Setup validation FAILED!")
246 |         print("Please fix the issues above before proceeding.")
247 |         print()
248 |         provide_setup_instructions()
249 |         return 1
250 | 
251 | 
252 | if __name__ == "__main__":
253 |     sys.exit(main())
254 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/providers/interfaces/persistence.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Data persistence interface.
  3 | 
  4 | This module defines the abstract interface for database operations,
  5 | enabling different persistence implementations to be used interchangeably.
  6 | """
  7 | 
  8 | from typing import Any, Protocol, runtime_checkable
  9 | 
 10 | import pandas as pd
 11 | from sqlalchemy.orm import Session
 12 | 
 13 | 
 14 | @runtime_checkable
 15 | class IDataPersistence(Protocol):
 16 |     """
 17 |     Interface for data persistence operations.
 18 | 
 19 |     This interface abstracts database operations to enable different
 20 |     implementations (SQLAlchemy, MongoDB, etc.) to be used interchangeably.
 21 |     """
 22 | 
 23 |     async def get_session(self) -> Session:
 24 |         """
 25 |         Get a database session.
 26 | 
 27 |         Returns:
 28 |             Database session for operations
 29 |         """
 30 |         ...
 31 | 
 32 |     async def get_read_only_session(self) -> Session:
 33 |         """
 34 |         Get a read-only database session.
 35 | 
 36 |         Returns:
 37 |             Read-only database session for queries
 38 |         """
 39 |         ...
 40 | 
 41 |     async def save_price_data(
 42 |         self, session: Session, symbol: str, data: pd.DataFrame
 43 |     ) -> int:
 44 |         """
 45 |         Save stock price data to persistence layer.
 46 | 
 47 |         Args:
 48 |             session: Database session
 49 |             symbol: Stock ticker symbol
 50 |             data: DataFrame with OHLCV data
 51 | 
 52 |         Returns:
 53 |             Number of records saved
 54 |         """
 55 |         ...
 56 | 
 57 |     async def get_price_data(
 58 |         self,
 59 |         session: Session,
 60 |         symbol: str,
 61 |         start_date: str,
 62 |         end_date: str,
 63 |     ) -> pd.DataFrame:
 64 |         """
 65 |         Retrieve stock price data from persistence layer.
 66 | 
 67 |         Args:
 68 |             session: Database session
 69 |             symbol: Stock ticker symbol
 70 |             start_date: Start date in YYYY-MM-DD format
 71 |             end_date: End date in YYYY-MM-DD format
 72 | 
 73 |         Returns:
 74 |             DataFrame with historical price data
 75 |         """
 76 |         ...
 77 | 
 78 |     async def get_or_create_stock(self, session: Session, symbol: str) -> Any:
 79 |         """
 80 |         Get or create a stock record.
 81 | 
 82 |         Args:
 83 |             session: Database session
 84 |             symbol: Stock ticker symbol
 85 | 
 86 |         Returns:
 87 |             Stock entity/record
 88 |         """
 89 |         ...
 90 | 
 91 |     async def save_screening_results(
 92 |         self,
 93 |         session: Session,
 94 |         screening_type: str,
 95 |         results: list[dict[str, Any]],
 96 |     ) -> int:
 97 |         """
 98 |         Save stock screening results.
 99 | 
100 |         Args:
101 |             session: Database session
102 |             screening_type: Type of screening (e.g., 'maverick', 'bearish', 'trending')
103 |             results: List of screening results
104 | 
105 |         Returns:
106 |             Number of records saved
107 |         """
108 |         ...
109 | 
110 |     async def get_screening_results(
111 |         self,
112 |         session: Session,
113 |         screening_type: str,
114 |         limit: int | None = None,
115 |         min_score: float | None = None,
116 |     ) -> list[dict[str, Any]]:
117 |         """
118 |         Retrieve stock screening results.
119 | 
120 |         Args:
121 |             session: Database session
122 |             screening_type: Type of screening
123 |             limit: Maximum number of results
124 |             min_score: Minimum score filter
125 | 
126 |         Returns:
127 |             List of screening results
128 |         """
129 |         ...
130 | 
131 |     async def get_latest_screening_data(self) -> dict[str, list[dict[str, Any]]]:
132 |         """
133 |         Get the latest screening data for all types.
134 | 
135 |         Returns:
136 |             Dictionary with all screening types and their latest results
137 |         """
138 |         ...
139 | 
140 |     async def check_data_freshness(self, symbol: str, max_age_hours: int = 24) -> bool:
141 |         """
142 |         Check if cached data for a symbol is fresh enough.
143 | 
144 |         Args:
145 |             symbol: Stock ticker symbol
146 |             max_age_hours: Maximum age in hours before data is considered stale
147 | 
148 |         Returns:
149 |             True if data is fresh, False if stale or missing
150 |         """
151 |         ...
152 | 
153 |     async def bulk_save_price_data(
154 |         self, session: Session, symbol: str, data: pd.DataFrame
155 |     ) -> int:
156 |         """
157 |         Bulk save price data for better performance.
158 | 
159 |         Args:
160 |             session: Database session
161 |             symbol: Stock ticker symbol
162 |             data: DataFrame with OHLCV data
163 | 
164 |         Returns:
165 |             Number of records saved
166 |         """
167 |         ...
168 | 
169 |     async def get_symbols_with_data(
170 |         self, session: Session, limit: int | None = None
171 |     ) -> list[str]:
172 |         """
173 |         Get list of symbols that have price data.
174 | 
175 |         Args:
176 |             session: Database session
177 |             limit: Maximum number of symbols to return
178 | 
179 |         Returns:
180 |             List of stock symbols
181 |         """
182 |         ...
183 | 
184 |     async def cleanup_old_data(self, session: Session, days_to_keep: int = 365) -> int:
185 |         """
186 |         Clean up old data beyond retention period.
187 | 
188 |         Args:
189 |             session: Database session
190 |             days_to_keep: Number of days of data to retain
191 | 
192 |         Returns:
193 |             Number of records deleted
194 |         """
195 |         ...
196 | 
197 | 
198 | class DatabaseConfig:
199 |     """
200 |     Configuration class for database connections.
201 | 
202 |     This class encapsulates database-related configuration parameters
203 |     to reduce coupling between persistence implementations and configuration sources.
204 |     """
205 | 
206 |     def __init__(
207 |         self,
208 |         database_url: str = "sqlite:///maverick_mcp.db",
209 |         pool_size: int = 5,
210 |         max_overflow: int = 10,
211 |         pool_timeout: int = 30,
212 |         pool_recycle: int = 3600,
213 |         echo: bool = False,
214 |         autocommit: bool = False,
215 |         autoflush: bool = True,
216 |         expire_on_commit: bool = True,
217 |     ):
218 |         """
219 |         Initialize database configuration.
220 | 
221 |         Args:
222 |             database_url: Database connection URL
223 |             pool_size: Connection pool size
224 |             max_overflow: Maximum connection overflow
225 |             pool_timeout: Pool checkout timeout in seconds
226 |             pool_recycle: Connection recycle time in seconds
227 |             echo: Whether to echo SQL statements
228 |             autocommit: Whether to autocommit transactions
229 |             autoflush: Whether to autoflush sessions
230 |             expire_on_commit: Whether to expire objects on commit
231 |         """
232 |         self.database_url = database_url
233 |         self.pool_size = pool_size
234 |         self.max_overflow = max_overflow
235 |         self.pool_timeout = pool_timeout
236 |         self.pool_recycle = pool_recycle
237 |         self.echo = echo
238 |         self.autocommit = autocommit
239 |         self.autoflush = autoflush
240 |         self.expire_on_commit = expire_on_commit
241 | 
242 |     @property
243 |     def is_sqlite(self) -> bool:
244 |         """Check if database is SQLite."""
245 |         return self.database_url.startswith("sqlite")
246 | 
247 |     @property
248 |     def is_postgresql(self) -> bool:
249 |         """Check if database is PostgreSQL."""
250 |         return self.database_url.startswith("postgresql")
251 | 
252 |     @property
253 |     def supports_pooling(self) -> bool:
254 |         """Check if database supports connection pooling."""
255 |         return not self.is_sqlite  # SQLite doesn't benefit from pooling
256 | 
257 | 
258 | class PersistenceError(Exception):
259 |     """Base exception for persistence operations."""
260 | 
261 |     pass
262 | 
263 | 
264 | class DataNotFoundError(PersistenceError):
265 |     """Raised when requested data is not found."""
266 | 
267 |     pass
268 | 
269 | 
270 | class DataValidationError(PersistenceError):
271 |     """Raised when data validation fails."""
272 | 
273 |     pass
274 | 
275 | 
276 | class ConnectionError(PersistenceError):
277 |     """Raised when database connection fails."""
278 | 
279 |     pass
280 | 
```

--------------------------------------------------------------------------------
/maverick_mcp/providers/mocks/mock_market_data.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Mock market data provider implementation for testing.
  3 | """
  4 | 
  5 | from datetime import UTC, datetime, timedelta
  6 | from typing import Any
  7 | 
  8 | 
  9 | class MockMarketDataProvider:
 10 |     """
 11 |     Mock implementation of IMarketDataProvider for testing.
 12 |     """
 13 | 
 14 |     def __init__(self, test_data: dict[str, Any] | None = None):
 15 |         """
 16 |         Initialize the mock market data provider.
 17 | 
 18 |         Args:
 19 |             test_data: Optional test data to return
 20 |         """
 21 |         self._test_data = test_data or {}
 22 |         self._call_log: list[dict[str, Any]] = []
 23 | 
 24 |     async def get_market_summary(self) -> dict[str, Any]:
 25 |         """Get mock market summary."""
 26 |         self._log_call("get_market_summary", {})
 27 | 
 28 |         if "market_summary" in self._test_data:
 29 |             return self._test_data["market_summary"]
 30 | 
 31 |         return {
 32 |             "^GSPC": {
 33 |                 "name": "S&P 500",
 34 |                 "symbol": "^GSPC",
 35 |                 "price": 4500.25,
 36 |                 "change": 15.75,
 37 |                 "change_percent": 0.35,
 38 |             },
 39 |             "^DJI": {
 40 |                 "name": "Dow Jones",
 41 |                 "symbol": "^DJI",
 42 |                 "price": 35000.50,
 43 |                 "change": -50.25,
 44 |                 "change_percent": -0.14,
 45 |             },
 46 |             "^IXIC": {
 47 |                 "name": "NASDAQ",
 48 |                 "symbol": "^IXIC",
 49 |                 "price": 14000.75,
 50 |                 "change": 25.30,
 51 |                 "change_percent": 0.18,
 52 |             },
 53 |         }
 54 | 
 55 |     async def get_top_gainers(self, limit: int = 10) -> list[dict[str, Any]]:
 56 |         """Get mock top gainers."""
 57 |         self._log_call("get_top_gainers", {"limit": limit})
 58 | 
 59 |         if "top_gainers" in self._test_data:
 60 |             return self._test_data["top_gainers"][:limit]
 61 | 
 62 |         gainers = [
 63 |             {
 64 |                 "symbol": "GAINER1",
 65 |                 "price": 150.25,
 66 |                 "change": 15.50,
 67 |                 "change_percent": 11.50,
 68 |                 "volume": 2500000,
 69 |             },
 70 |             {
 71 |                 "symbol": "GAINER2",
 72 |                 "price": 85.75,
 73 |                 "change": 8.25,
 74 |                 "change_percent": 10.65,
 75 |                 "volume": 1800000,
 76 |             },
 77 |             {
 78 |                 "symbol": "GAINER3",
 79 |                 "price": 45.30,
 80 |                 "change": 4.15,
 81 |                 "change_percent": 10.08,
 82 |                 "volume": 3200000,
 83 |             },
 84 |         ]
 85 | 
 86 |         return gainers[:limit]
 87 | 
 88 |     async def get_top_losers(self, limit: int = 10) -> list[dict[str, Any]]:
 89 |         """Get mock top losers."""
 90 |         self._log_call("get_top_losers", {"limit": limit})
 91 | 
 92 |         if "top_losers" in self._test_data:
 93 |             return self._test_data["top_losers"][:limit]
 94 | 
 95 |         losers = [
 96 |             {
 97 |                 "symbol": "LOSER1",
 98 |                 "price": 25.50,
 99 |                 "change": -5.75,
100 |                 "change_percent": -18.38,
101 |                 "volume": 4500000,
102 |             },
103 |             {
104 |                 "symbol": "LOSER2",
105 |                 "price": 67.20,
106 |                 "change": -12.80,
107 |                 "change_percent": -16.00,
108 |                 "volume": 2100000,
109 |             },
110 |             {
111 |                 "symbol": "LOSER3",
112 |                 "price": 120.45,
113 |                 "change": -18.55,
114 |                 "change_percent": -13.35,
115 |                 "volume": 1600000,
116 |             },
117 |         ]
118 | 
119 |         return losers[:limit]
120 | 
121 |     async def get_most_active(self, limit: int = 10) -> list[dict[str, Any]]:
122 |         """Get mock most active stocks."""
123 |         self._log_call("get_most_active", {"limit": limit})
124 | 
125 |         if "most_active" in self._test_data:
126 |             return self._test_data["most_active"][:limit]
127 | 
128 |         active = [
129 |             {
130 |                 "symbol": "ACTIVE1",
131 |                 "price": 200.75,
132 |                 "change": 5.25,
133 |                 "change_percent": 2.68,
134 |                 "volume": 15000000,
135 |             },
136 |             {
137 |                 "symbol": "ACTIVE2",
138 |                 "price": 95.30,
139 |                 "change": -2.15,
140 |                 "change_percent": -2.21,
141 |                 "volume": 12500000,
142 |             },
143 |             {
144 |                 "symbol": "ACTIVE3",
145 |                 "price": 155.80,
146 |                 "change": 1.85,
147 |                 "change_percent": 1.20,
148 |                 "volume": 11200000,
149 |             },
150 |         ]
151 | 
152 |         return active[:limit]
153 | 
154 |     async def get_sector_performance(self) -> dict[str, float]:
155 |         """Get mock sector performance."""
156 |         self._log_call("get_sector_performance", {})
157 | 
158 |         if "sector_performance" in self._test_data:
159 |             return self._test_data["sector_performance"]
160 | 
161 |         return {
162 |             "Technology": 1.25,
163 |             "Healthcare": 0.85,
164 |             "Financials": -0.45,
165 |             "Consumer Discretionary": 0.65,
166 |             "Industrials": 0.35,
167 |             "Energy": -1.15,
168 |             "Utilities": 0.15,
169 |             "Materials": -0.25,
170 |             "Consumer Staples": 0.55,
171 |             "Real Estate": -0.75,
172 |             "Communication Services": 0.95,
173 |         }
174 | 
175 |     async def get_earnings_calendar(self, days: int = 7) -> list[dict[str, Any]]:
176 |         """Get mock earnings calendar."""
177 |         self._log_call("get_earnings_calendar", {"days": days})
178 | 
179 |         if "earnings_calendar" in self._test_data:
180 |             return self._test_data["earnings_calendar"]
181 | 
182 |         base_date = datetime.now(UTC).date()
183 | 
184 |         return [
185 |             {
186 |                 "ticker": "EARN1",
187 |                 "name": "Earnings Corp 1",
188 |                 "earnings_date": (base_date + timedelta(days=1)).strftime("%Y-%m-%d"),
189 |                 "eps_estimate": 1.25,
190 |             },
191 |             {
192 |                 "ticker": "EARN2",
193 |                 "name": "Earnings Corp 2",
194 |                 "earnings_date": (base_date + timedelta(days=3)).strftime("%Y-%m-%d"),
195 |                 "eps_estimate": 0.85,
196 |             },
197 |             {
198 |                 "ticker": "EARN3",
199 |                 "name": "Earnings Corp 3",
200 |                 "earnings_date": (base_date + timedelta(days=5)).strftime("%Y-%m-%d"),
201 |                 "eps_estimate": 2.15,
202 |             },
203 |         ]
204 | 
205 |     async def get_market_overview(self) -> dict[str, Any]:
206 |         """Get mock comprehensive market overview."""
207 |         self._log_call("get_market_overview", {})
208 | 
209 |         return {
210 |             "timestamp": datetime.now(UTC).isoformat(),
211 |             "market_summary": await self.get_market_summary(),
212 |             "top_gainers": await self.get_top_gainers(5),
213 |             "top_losers": await self.get_top_losers(5),
214 |             "sector_performance": await self.get_sector_performance(),
215 |         }
216 | 
217 |     # Testing utilities
218 | 
219 |     def _log_call(self, method: str, args: dict[str, Any]) -> None:
220 |         """Log method calls for testing verification."""
221 |         self._call_log.append(
222 |             {
223 |                 "method": method,
224 |                 "args": args,
225 |                 "timestamp": datetime.now(),
226 |             }
227 |         )
228 | 
229 |     def get_call_log(self) -> list[dict[str, Any]]:
230 |         """Get the log of method calls."""
231 |         return self._call_log.copy()
232 | 
233 |     def clear_call_log(self) -> None:
234 |         """Clear the method call log."""
235 |         self._call_log.clear()
236 | 
237 |     def set_test_data(self, key: str, data: Any) -> None:
238 |         """Set test data for a specific key."""
239 |         self._test_data[key] = data
240 | 
241 |     def clear_test_data(self) -> None:
242 |         """Clear all test data."""
243 |         self._test_data.clear()
244 | 
```
Page 4/39FirstPrevNextLast