This is page 3 of 39. Use http://codebase.md/wshobson/maverick-mcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .env.example
├── .github
│ ├── dependabot.yml
│ ├── FUNDING.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── config.yml
│ │ ├── feature_request.md
│ │ ├── question.md
│ │ └── security_report.md
│ ├── pull_request_template.md
│ └── workflows
│ ├── claude-code-review.yml
│ └── claude.yml
├── .gitignore
├── .python-version
├── .vscode
│ ├── launch.json
│ └── settings.json
├── alembic
│ ├── env.py
│ ├── script.py.mako
│ └── versions
│ ├── 001_initial_schema.py
│ ├── 003_add_performance_indexes.py
│ ├── 006_rename_metadata_columns.py
│ ├── 008_performance_optimization_indexes.py
│ ├── 009_rename_to_supply_demand.py
│ ├── 010_self_contained_schema.py
│ ├── 011_remove_proprietary_terms.py
│ ├── 013_add_backtest_persistence_models.py
│ ├── 014_add_portfolio_models.py
│ ├── 08e3945a0c93_merge_heads.py
│ ├── 9374a5c9b679_merge_heads_for_testing.py
│ ├── abf9b9afb134_merge_multiple_heads.py
│ ├── adda6d3fd84b_merge_proprietary_terms_removal_with_.py
│ ├── e0c75b0bdadb_fix_financial_data_precision_only.py
│ ├── f0696e2cac15_add_essential_performance_indexes.py
│ └── fix_database_integrity_issues.py
├── alembic.ini
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── DATABASE_SETUP.md
├── docker-compose.override.yml.example
├── docker-compose.yml
├── Dockerfile
├── docs
│ ├── api
│ │ └── backtesting.md
│ ├── BACKTESTING.md
│ ├── COST_BASIS_SPECIFICATION.md
│ ├── deep_research_agent.md
│ ├── exa_research_testing_strategy.md
│ ├── PORTFOLIO_PERSONALIZATION_PLAN.md
│ ├── PORTFOLIO.md
│ ├── SETUP_SELF_CONTAINED.md
│ └── speed_testing_framework.md
├── examples
│ ├── complete_speed_validation.py
│ ├── deep_research_integration.py
│ ├── llm_optimization_example.py
│ ├── llm_speed_demo.py
│ ├── monitoring_example.py
│ ├── parallel_research_example.py
│ ├── speed_optimization_demo.py
│ └── timeout_fix_demonstration.py
├── LICENSE
├── Makefile
├── MANIFEST.in
├── maverick_mcp
│ ├── __init__.py
│ ├── agents
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── circuit_breaker.py
│ │ ├── deep_research.py
│ │ ├── market_analysis.py
│ │ ├── optimized_research.py
│ │ ├── supervisor.py
│ │ └── technical_analysis.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── api_server.py
│ │ ├── connection_manager.py
│ │ ├── dependencies
│ │ │ ├── __init__.py
│ │ │ ├── stock_analysis.py
│ │ │ └── technical_analysis.py
│ │ ├── error_handling.py
│ │ ├── inspector_compatible_sse.py
│ │ ├── inspector_sse.py
│ │ ├── middleware
│ │ │ ├── error_handling.py
│ │ │ ├── mcp_logging.py
│ │ │ ├── rate_limiting_enhanced.py
│ │ │ └── security.py
│ │ ├── openapi_config.py
│ │ ├── routers
│ │ │ ├── __init__.py
│ │ │ ├── agents.py
│ │ │ ├── backtesting.py
│ │ │ ├── data_enhanced.py
│ │ │ ├── data.py
│ │ │ ├── health_enhanced.py
│ │ │ ├── health_tools.py
│ │ │ ├── health.py
│ │ │ ├── intelligent_backtesting.py
│ │ │ ├── introspection.py
│ │ │ ├── mcp_prompts.py
│ │ │ ├── monitoring.py
│ │ │ ├── news_sentiment_enhanced.py
│ │ │ ├── performance.py
│ │ │ ├── portfolio.py
│ │ │ ├── research.py
│ │ │ ├── screening_ddd.py
│ │ │ ├── screening_parallel.py
│ │ │ ├── screening.py
│ │ │ ├── technical_ddd.py
│ │ │ ├── technical_enhanced.py
│ │ │ ├── technical.py
│ │ │ └── tool_registry.py
│ │ ├── server.py
│ │ ├── services
│ │ │ ├── __init__.py
│ │ │ ├── base_service.py
│ │ │ ├── market_service.py
│ │ │ ├── portfolio_service.py
│ │ │ ├── prompt_service.py
│ │ │ └── resource_service.py
│ │ ├── simple_sse.py
│ │ └── utils
│ │ ├── __init__.py
│ │ ├── insomnia_export.py
│ │ └── postman_export.py
│ ├── application
│ │ ├── __init__.py
│ │ ├── commands
│ │ │ └── __init__.py
│ │ ├── dto
│ │ │ ├── __init__.py
│ │ │ └── technical_analysis_dto.py
│ │ ├── queries
│ │ │ ├── __init__.py
│ │ │ └── get_technical_analysis.py
│ │ └── screening
│ │ ├── __init__.py
│ │ ├── dtos.py
│ │ └── queries.py
│ ├── backtesting
│ │ ├── __init__.py
│ │ ├── ab_testing.py
│ │ ├── analysis.py
│ │ ├── batch_processing_stub.py
│ │ ├── batch_processing.py
│ │ ├── model_manager.py
│ │ ├── optimization.py
│ │ ├── persistence.py
│ │ ├── retraining_pipeline.py
│ │ ├── strategies
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── ml
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adaptive.py
│ │ │ │ ├── ensemble.py
│ │ │ │ ├── feature_engineering.py
│ │ │ │ └── regime_aware.py
│ │ │ ├── ml_strategies.py
│ │ │ ├── parser.py
│ │ │ └── templates.py
│ │ ├── strategy_executor.py
│ │ ├── vectorbt_engine.py
│ │ └── visualization.py
│ ├── config
│ │ ├── __init__.py
│ │ ├── constants.py
│ │ ├── database_self_contained.py
│ │ ├── database.py
│ │ ├── llm_optimization_config.py
│ │ ├── logging_settings.py
│ │ ├── plotly_config.py
│ │ ├── security_utils.py
│ │ ├── security.py
│ │ ├── settings.py
│ │ ├── technical_constants.py
│ │ ├── tool_estimation.py
│ │ └── validation.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── technical_analysis.py
│ │ └── visualization.py
│ ├── data
│ │ ├── __init__.py
│ │ ├── cache_manager.py
│ │ ├── cache.py
│ │ ├── django_adapter.py
│ │ ├── health.py
│ │ ├── models.py
│ │ ├── performance.py
│ │ ├── session_management.py
│ │ └── validation.py
│ ├── database
│ │ ├── __init__.py
│ │ ├── base.py
│ │ └── optimization.py
│ ├── dependencies.py
│ ├── domain
│ │ ├── __init__.py
│ │ ├── entities
│ │ │ ├── __init__.py
│ │ │ └── stock_analysis.py
│ │ ├── events
│ │ │ └── __init__.py
│ │ ├── portfolio.py
│ │ ├── screening
│ │ │ ├── __init__.py
│ │ │ ├── entities.py
│ │ │ ├── services.py
│ │ │ └── value_objects.py
│ │ ├── services
│ │ │ ├── __init__.py
│ │ │ └── technical_analysis_service.py
│ │ ├── stock_analysis
│ │ │ ├── __init__.py
│ │ │ └── stock_analysis_service.py
│ │ └── value_objects
│ │ ├── __init__.py
│ │ └── technical_indicators.py
│ ├── exceptions.py
│ ├── infrastructure
│ │ ├── __init__.py
│ │ ├── cache
│ │ │ └── __init__.py
│ │ ├── caching
│ │ │ ├── __init__.py
│ │ │ └── cache_management_service.py
│ │ ├── connection_manager.py
│ │ ├── data_fetching
│ │ │ ├── __init__.py
│ │ │ └── stock_data_service.py
│ │ ├── health
│ │ │ ├── __init__.py
│ │ │ └── health_checker.py
│ │ ├── persistence
│ │ │ ├── __init__.py
│ │ │ └── stock_repository.py
│ │ ├── providers
│ │ │ └── __init__.py
│ │ ├── screening
│ │ │ ├── __init__.py
│ │ │ └── repositories.py
│ │ └── sse_optimizer.py
│ ├── langchain_tools
│ │ ├── __init__.py
│ │ ├── adapters.py
│ │ └── registry.py
│ ├── logging_config.py
│ ├── memory
│ │ ├── __init__.py
│ │ └── stores.py
│ ├── monitoring
│ │ ├── __init__.py
│ │ ├── health_check.py
│ │ ├── health_monitor.py
│ │ ├── integration_example.py
│ │ ├── metrics.py
│ │ ├── middleware.py
│ │ └── status_dashboard.py
│ ├── providers
│ │ ├── __init__.py
│ │ ├── dependencies.py
│ │ ├── factories
│ │ │ ├── __init__.py
│ │ │ ├── config_factory.py
│ │ │ └── provider_factory.py
│ │ ├── implementations
│ │ │ ├── __init__.py
│ │ │ ├── cache_adapter.py
│ │ │ ├── macro_data_adapter.py
│ │ │ ├── market_data_adapter.py
│ │ │ ├── persistence_adapter.py
│ │ │ └── stock_data_adapter.py
│ │ ├── interfaces
│ │ │ ├── __init__.py
│ │ │ ├── cache.py
│ │ │ ├── config.py
│ │ │ ├── macro_data.py
│ │ │ ├── market_data.py
│ │ │ ├── persistence.py
│ │ │ └── stock_data.py
│ │ ├── llm_factory.py
│ │ ├── macro_data.py
│ │ ├── market_data.py
│ │ ├── mocks
│ │ │ ├── __init__.py
│ │ │ ├── mock_cache.py
│ │ │ ├── mock_config.py
│ │ │ ├── mock_macro_data.py
│ │ │ ├── mock_market_data.py
│ │ │ ├── mock_persistence.py
│ │ │ └── mock_stock_data.py
│ │ ├── openrouter_provider.py
│ │ ├── optimized_screening.py
│ │ ├── optimized_stock_data.py
│ │ └── stock_data.py
│ ├── README.md
│ ├── tests
│ │ ├── __init__.py
│ │ ├── README_INMEMORY_TESTS.md
│ │ ├── test_cache_debug.py
│ │ ├── test_fixes_validation.py
│ │ ├── test_in_memory_routers.py
│ │ ├── test_in_memory_server.py
│ │ ├── test_macro_data_provider.py
│ │ ├── test_mailgun_email.py
│ │ ├── test_market_calendar_caching.py
│ │ ├── test_mcp_tool_fixes_pytest.py
│ │ ├── test_mcp_tool_fixes.py
│ │ ├── test_mcp_tools.py
│ │ ├── test_models_functional.py
│ │ ├── test_server.py
│ │ ├── test_stock_data_enhanced.py
│ │ ├── test_stock_data_provider.py
│ │ └── test_technical_analysis.py
│ ├── tools
│ │ ├── __init__.py
│ │ ├── performance_monitoring.py
│ │ ├── portfolio_manager.py
│ │ ├── risk_management.py
│ │ └── sentiment_analysis.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── agent_errors.py
│ │ ├── batch_processing.py
│ │ ├── cache_warmer.py
│ │ ├── circuit_breaker_decorators.py
│ │ ├── circuit_breaker_services.py
│ │ ├── circuit_breaker.py
│ │ ├── data_chunking.py
│ │ ├── database_monitoring.py
│ │ ├── debug_utils.py
│ │ ├── fallback_strategies.py
│ │ ├── llm_optimization.py
│ │ ├── logging_example.py
│ │ ├── logging_init.py
│ │ ├── logging.py
│ │ ├── mcp_logging.py
│ │ ├── memory_profiler.py
│ │ ├── monitoring_middleware.py
│ │ ├── monitoring.py
│ │ ├── orchestration_logging.py
│ │ ├── parallel_research.py
│ │ ├── parallel_screening.py
│ │ ├── quick_cache.py
│ │ ├── resource_manager.py
│ │ ├── shutdown.py
│ │ ├── stock_helpers.py
│ │ ├── structured_logger.py
│ │ ├── tool_monitoring.py
│ │ ├── tracing.py
│ │ └── yfinance_pool.py
│ ├── validation
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── data.py
│ │ ├── middleware.py
│ │ ├── portfolio.py
│ │ ├── responses.py
│ │ ├── screening.py
│ │ └── technical.py
│ └── workflows
│ ├── __init__.py
│ ├── agents
│ │ ├── __init__.py
│ │ ├── market_analyzer.py
│ │ ├── optimizer_agent.py
│ │ ├── strategy_selector.py
│ │ └── validator_agent.py
│ ├── backtesting_workflow.py
│ └── state.py
├── PLANS.md
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│ ├── dev.sh
│ ├── INSTALLATION_GUIDE.md
│ ├── load_example.py
│ ├── load_market_data.py
│ ├── load_tiingo_data.py
│ ├── migrate_db.py
│ ├── README_TIINGO_LOADER.md
│ ├── requirements_tiingo.txt
│ ├── run_stock_screening.py
│ ├── run-migrations.sh
│ ├── seed_db.py
│ ├── seed_sp500.py
│ ├── setup_database.sh
│ ├── setup_self_contained.py
│ ├── setup_sp500_database.sh
│ ├── test_seeded_data.py
│ ├── test_tiingo_loader.py
│ ├── tiingo_config.py
│ └── validate_setup.py
├── SECURITY.md
├── server.json
├── setup.py
├── tests
│ ├── __init__.py
│ ├── conftest.py
│ ├── core
│ │ └── test_technical_analysis.py
│ ├── data
│ │ └── test_portfolio_models.py
│ ├── domain
│ │ ├── conftest.py
│ │ ├── test_portfolio_entities.py
│ │ └── test_technical_analysis_service.py
│ ├── fixtures
│ │ └── orchestration_fixtures.py
│ ├── integration
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── README.md
│ │ ├── run_integration_tests.sh
│ │ ├── test_api_technical.py
│ │ ├── test_chaos_engineering.py
│ │ ├── test_config_management.py
│ │ ├── test_full_backtest_workflow_advanced.py
│ │ ├── test_full_backtest_workflow.py
│ │ ├── test_high_volume.py
│ │ ├── test_mcp_tools.py
│ │ ├── test_orchestration_complete.py
│ │ ├── test_portfolio_persistence.py
│ │ ├── test_redis_cache.py
│ │ ├── test_security_integration.py.disabled
│ │ └── vcr_setup.py
│ ├── performance
│ │ ├── __init__.py
│ │ ├── test_benchmarks.py
│ │ ├── test_load.py
│ │ ├── test_profiling.py
│ │ └── test_stress.py
│ ├── providers
│ │ └── test_stock_data_simple.py
│ ├── README.md
│ ├── test_agents_router_mcp.py
│ ├── test_backtest_persistence.py
│ ├── test_cache_management_service.py
│ ├── test_cache_serialization.py
│ ├── test_circuit_breaker.py
│ ├── test_database_pool_config_simple.py
│ ├── test_database_pool_config.py
│ ├── test_deep_research_functional.py
│ ├── test_deep_research_integration.py
│ ├── test_deep_research_parallel_execution.py
│ ├── test_error_handling.py
│ ├── test_event_loop_integrity.py
│ ├── test_exa_research_integration.py
│ ├── test_exception_hierarchy.py
│ ├── test_financial_search.py
│ ├── test_graceful_shutdown.py
│ ├── test_integration_simple.py
│ ├── test_langgraph_workflow.py
│ ├── test_market_data_async.py
│ ├── test_market_data_simple.py
│ ├── test_mcp_orchestration_functional.py
│ ├── test_ml_strategies.py
│ ├── test_optimized_research_agent.py
│ ├── test_orchestration_integration.py
│ ├── test_orchestration_logging.py
│ ├── test_orchestration_tools_simple.py
│ ├── test_parallel_research_integration.py
│ ├── test_parallel_research_orchestrator.py
│ ├── test_parallel_research_performance.py
│ ├── test_performance_optimizations.py
│ ├── test_production_validation.py
│ ├── test_provider_architecture.py
│ ├── test_rate_limiting_enhanced.py
│ ├── test_runner_validation.py
│ ├── test_security_comprehensive.py.disabled
│ ├── test_security_cors.py
│ ├── test_security_enhancements.py.disabled
│ ├── test_security_headers.py
│ ├── test_security_penetration.py
│ ├── test_session_management.py
│ ├── test_speed_optimization_validation.py
│ ├── test_stock_analysis_dependencies.py
│ ├── test_stock_analysis_service.py
│ ├── test_stock_data_fetching_service.py
│ ├── test_supervisor_agent.py
│ ├── test_supervisor_functional.py
│ ├── test_tool_estimation_config.py
│ ├── test_visualization.py
│ └── utils
│ ├── test_agent_errors.py
│ ├── test_logging.py
│ ├── test_parallel_screening.py
│ └── test_quick_cache.py
├── tools
│ ├── check_orchestration_config.py
│ ├── experiments
│ │ ├── validation_examples.py
│ │ └── validation_fixed.py
│ ├── fast_dev.sh
│ ├── hot_reload.py
│ ├── quick_test.py
│ └── templates
│ ├── new_router_template.py
│ ├── new_tool_template.py
│ ├── screening_strategy_template.py
│ └── test_template.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/maverick_mcp/validation/technical.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Validation models for technical analysis tools.
3 |
4 | This module provides Pydantic models for validating inputs
5 | to all technical analysis tools.
6 | """
7 |
8 | from pydantic import Field, field_validator
9 |
10 | from .base import (
11 | PositiveInt,
12 | StrictBaseModel,
13 | TickerSymbol,
14 | TickerValidator,
15 | )
16 |
17 |
18 | class RSIAnalysisRequest(StrictBaseModel):
19 | """Validation for get_rsi_analysis tool."""
20 |
21 | ticker: TickerSymbol = Field(..., description="Stock ticker symbol")
22 | period: PositiveInt = Field(
23 | default=14, le=100, description="RSI period (typically 14)"
24 | )
25 | days: PositiveInt = Field(
26 | default=365,
27 | le=3650, # Max 10 years
28 | description="Number of days of historical data",
29 | )
30 |
31 | @field_validator("ticker")
32 | @classmethod
33 | def normalize_ticker(cls, v: str) -> str:
34 | """Normalize ticker to uppercase."""
35 | return TickerValidator.validate_ticker(v)
36 |
37 | model_config = {
38 | "json_schema_extra": {
39 | "examples": [
40 | {"ticker": "AAPL", "period": 14, "days": 365},
41 | {"ticker": "MSFT", "period": 21, "days": 90},
42 | ]
43 | }
44 | }
45 |
46 |
47 | class MACDAnalysisRequest(StrictBaseModel):
48 | """Validation for get_macd_analysis tool."""
49 |
50 | ticker: TickerSymbol = Field(..., description="Stock ticker symbol")
51 | fast_period: PositiveInt = Field(default=12, le=50, description="Fast EMA period")
52 | slow_period: PositiveInt = Field(default=26, le=100, description="Slow EMA period")
53 | signal_period: PositiveInt = Field(
54 | default=9, le=50, description="Signal line period"
55 | )
56 | days: PositiveInt = Field(
57 | default=365, le=3650, description="Number of days of historical data"
58 | )
59 |
60 | @field_validator("ticker")
61 | @classmethod
62 | def normalize_ticker(cls, v: str) -> str:
63 | """Normalize ticker to uppercase."""
64 | return TickerValidator.validate_ticker(v)
65 |
66 | @field_validator("slow_period")
67 | @classmethod
68 | def validate_slow_greater_than_fast(cls, v: int, info) -> int:
69 | """Ensure slow period is greater than fast period."""
70 | fast = info.data.get("fast_period", 12)
71 | if v <= fast:
72 | raise ValueError(
73 | f"Slow period ({v}) must be greater than fast period ({fast})"
74 | )
75 | return v
76 |
77 | model_config = {
78 | "json_schema_extra": {
79 | "examples": [
80 | {
81 | "ticker": "AAPL",
82 | "fast_period": 12,
83 | "slow_period": 26,
84 | "signal_period": 9,
85 | },
86 | {
87 | "ticker": "GOOGL",
88 | "fast_period": 10,
89 | "slow_period": 20,
90 | "signal_period": 5,
91 | "days": 180,
92 | },
93 | ]
94 | }
95 | }
96 |
97 |
98 | class SupportResistanceRequest(StrictBaseModel):
99 | """Validation for get_support_resistance tool."""
100 |
101 | ticker: TickerSymbol = Field(..., description="Stock ticker symbol")
102 | days: PositiveInt = Field(
103 | default=365, le=3650, description="Number of days of historical data"
104 | )
105 |
106 | @field_validator("ticker")
107 | @classmethod
108 | def normalize_ticker(cls, v: str) -> str:
109 | """Normalize ticker to uppercase."""
110 | return TickerValidator.validate_ticker(v)
111 |
112 |
113 | class TechnicalAnalysisRequest(StrictBaseModel):
114 | """Validation for get_full_technical_analysis tool."""
115 |
116 | ticker: TickerSymbol = Field(..., description="Stock ticker symbol")
117 | days: PositiveInt = Field(
118 | default=365, le=3650, description="Number of days of historical data"
119 | )
120 |
121 | @field_validator("ticker")
122 | @classmethod
123 | def normalize_ticker(cls, v: str) -> str:
124 | """Normalize ticker to uppercase."""
125 | return TickerValidator.validate_ticker(v)
126 |
127 | model_config = {
128 | "json_schema_extra": {
129 | "examples": [
130 | {"ticker": "AAPL", "days": 365},
131 | {"ticker": "TSLA", "days": 90},
132 | ]
133 | }
134 | }
135 |
136 |
137 | class StockChartRequest(StrictBaseModel):
138 | """Validation for get_stock_chart_analysis tool."""
139 |
140 | ticker: TickerSymbol = Field(..., description="Stock ticker symbol")
141 |
142 | @field_validator("ticker")
143 | @classmethod
144 | def normalize_ticker(cls, v: str) -> str:
145 | """Normalize ticker to uppercase."""
146 | return TickerValidator.validate_ticker(v)
147 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | name: Feature Request
3 | about: Suggest a new feature or improvement for MaverickMCP
4 | title: '[FEATURE] '
5 | labels: ['enhancement', 'needs-triage']
6 | assignees: ''
7 | ---
8 |
9 | ## 🚀 Feature Request Summary
10 |
11 | A clear and concise description of the feature you'd like to see added.
12 |
13 | ## 💰 Financial Disclaimer Acknowledgment
14 |
15 | - [ ] I understand this is educational software and not financial advice
16 | - [ ] This feature request is for educational/technical purposes, not investment recommendations
17 | - [ ] I understand that any financial analysis features will include appropriate disclaimers
18 |
19 | ## 🎯 Problem/Use Case
20 |
21 | **What problem does this feature solve?**
22 | A clear description of the problem or limitation you're experiencing.
23 |
24 | **Who would benefit from this feature?**
25 | - [ ] Individual traders learning technical analysis
26 | - [ ] MCP developers building financial tools
27 | - [ ] Educational institutions teaching finance
28 | - [ ] Open source contributors
29 | - [ ] Other: ___________
30 |
31 | ## 💡 Proposed Solution
32 |
33 | **Describe your ideal solution:**
34 | A clear and concise description of what you want to happen.
35 |
36 | **Alternative approaches you've considered:**
37 | Any alternative solutions or features you've thought about.
38 |
39 | ## 🔧 Technical Details
40 |
41 | **Component Area:**
42 | - [ ] Data fetching (new data sources, APIs)
43 | - [ ] Technical analysis (new indicators, calculations)
44 | - [ ] Stock screening (new strategies, filters)
45 | - [ ] Portfolio analysis (risk metrics, optimization)
46 | - [ ] MCP tools (new tools, tool improvements)
47 | - [ ] Database/Caching (performance, storage)
48 | - [ ] Claude Desktop integration
49 | - [ ] Developer experience (setup, debugging)
50 | - [ ] Documentation and examples
51 |
52 | **Implementation Complexity:**
53 | - [ ] Simple (few lines of code, existing patterns)
54 | - [ ] Medium (new functionality, moderate effort)
55 | - [ ] Complex (major architectural changes, significant effort)
56 | - [ ] I'm not sure
57 |
58 | **Dependencies:**
59 | - Does this require new external APIs or libraries?
60 | - Are there any known technical constraints?
61 |
62 | ## 📊 Examples/Mockups
63 |
64 | **Code examples, mockups, or references:**
65 | ```python
66 | # Example of how you envision using this feature
67 | result = new_feature_function(symbol="AAPL", period=20)
68 | ```
69 |
70 | **Reference implementations:**
71 | - Links to similar features in other projects
72 | - Academic papers or financial resources
73 | - Industry standards or best practices
74 |
75 | ## 🎓 Educational Value
76 |
77 | **Learning objectives:**
78 | - What financial concepts would this help users learn?
79 | - How does this contribute to financial education?
80 | - What technical skills would developers gain?
81 |
82 | **Documentation needs:**
83 | - [ ] Code examples needed
84 | - [ ] Tutorial/guide needed
85 | - [ ] Financial concept explanation needed
86 | - [ ] API documentation needed
87 |
88 | ## 🤝 Contribution
89 |
90 | **Are you willing to contribute to this feature?**
91 | - [ ] Yes, I can implement this myself
92 | - [ ] Yes, I can help with testing/documentation
93 | - [ ] Yes, I can provide domain expertise
94 | - [ ] I can help but need guidance
95 | - [ ] I cannot contribute but would love to use it
96 |
97 | **Your relevant experience:**
98 | - Financial analysis background?
99 | - Python development experience?
100 | - MCP protocol familiarity?
101 | - Other relevant skills?
102 |
103 | ## ✅ Pre-submission Checklist
104 |
105 | - [ ] I have searched existing issues to avoid duplicates
106 | - [ ] This feature aligns with educational/personal-use goals
107 | - [ ] I have considered the implementation complexity
108 | - [ ] I understand this won't provide financial advice or guarantees
109 | - [ ] I have provided clear examples and use cases
110 |
111 | ## 🏷️ Feature Classification
112 |
113 | **Priority:**
114 | - [ ] Critical (blocking important use cases)
115 | - [ ] High (significant improvement to user experience)
116 | - [ ] Medium (nice to have, moderate impact)
117 | - [ ] Low (minor enhancement)
118 |
119 | **Effort Estimate:**
120 | - [ ] Small (1-3 days)
121 | - [ ] Medium (1-2 weeks)
122 | - [ ] Large (1+ months)
123 | - [ ] Unknown
124 |
125 | **Release Timeline:**
126 | - [ ] Next minor version
127 | - [ ] Next major version
128 | - [ ] Future consideration
129 | - [ ] No preference
130 |
131 | ## 🌟 Additional Context
132 |
133 | **Related issues or discussions:**
134 | - Link to any related GitHub issues or discussions
135 | - References to community conversations
136 |
137 | **Financial domain considerations:**
138 | - Any regulatory or compliance aspects?
139 | - Specific financial methodologies or standards?
140 | - Data provider requirements or limitations?
141 |
142 | **Community impact:**
143 | - How many users would benefit?
144 | - Educational institutions that might use this?
145 | - Open source projects that could leverage this?
```
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
```toml
1 | [project]
2 | name = "maverick_mcp"
3 | version = "0.1.0"
4 | description = "Personal-use MCP server for Claude Desktop providing professional-grade stock analysis and technical indicators"
5 | readme = "README.md"
6 | requires-python = ">=3.12,<3.13"
7 | dependencies = [
8 | # Core MCP and server dependencies
9 | "fastmcp>=2.7.0",
10 | "mcp>=1.9.3",
11 | "fastapi>=0.115.12",
12 | "uvicorn>=0.35.0",
13 | "gunicorn>=23.0.0",
14 | "python-multipart>=0.0.20",
15 | "aiofiles>=24.1.0",
16 | "httpx>=0.28.1",
17 | "python-dotenv>=1.0.1",
18 | # LangChain and AI dependencies
19 | "langchain>=0.3.25",
20 | "langchain-anthropic>=0.3.15",
21 | "langchain-community>=0.3.24",
22 | "langchain-openai>=0.3.19",
23 | "langchain-mcp-adapters>=0.1.6",
24 | "langgraph>=0.4.8",
25 | "langgraph-supervisor>=0.0.18",
26 | "anthropic>=0.52.2",
27 | "openai>=1.84.0",
28 | "tiktoken>=0.6.0",
29 | # Deep research dependencies
30 | "exa-py>=1.0.19",
31 | # Database and caching
32 | "sqlalchemy>=2.0.40",
33 | "alembic>=1.16.1",
34 | "psycopg2-binary>=2.9.10",
35 | "aiosqlite>=0.20.0",
36 | "asyncpg>=0.30.0",
37 | "greenlet>=3.0.0",
38 | "redis>=6.2.0",
39 | "hiredis>=3.2.1",
40 | "msgpack>=1.0.7",
41 | "certifi>=2024.2.2",
42 | # Financial data and analysis (core)
43 | "numpy>=1.26.4",
44 | "pandas>=2.2.3",
45 | "yfinance>=0.2.63",
46 | "finvizfinance>=1.1.0",
47 | "pandas-ta>=0.3.14b0",
48 | "ta-lib>=0.6.3",
49 | # Backtesting
50 | "vectorbt>=0.26.0",
51 | "numba>=0.60.0",
52 | "scikit-learn>=1.6.1",
53 | "scipy>=1.15.3",
54 | "pytz>=2024.1",
55 | # Security (basic cryptography for data security)
56 | "cryptography>=42.0.0",
57 | # System monitoring (basic)
58 | "psutil>=6.0.0",
59 | "sentry-sdk[fastapi]>=2.22.0",
60 | # Prometheus metrics
61 | "prometheus-client>=0.21.1",
62 | # Trading
63 | "fredapi>=0.5.2",
64 | "pandas-datareader>=0.10.0",
65 | "pandas-market-calendars>=5.1.0",
66 | "tiingo>=0.16.1",
67 | # Visualization (essential only)
68 | "matplotlib>=3.10.3",
69 | "plotly>=5.0.0",
70 | "seaborn>=0.13.2",
71 | "kaleido>=0.2.1", # Required for Plotly image export
72 | # Development tools
73 | "watchdog>=6.0.0",
74 | "ty>=0.0.1a19",
75 | "pytest>=8.4.0",
76 | "pytest-asyncio>=1.1.0",
77 | "pytest-cov>=6.2.1",
78 | "vcrpy>=7.0.0",
79 | ]
80 |
81 | [project.optional-dependencies]
82 | dev = [
83 | "pytest>=8.3.5",
84 | "pytest-asyncio>=0.24.0",
85 | "pytest-cov>=4.1.0",
86 | "pytest-xdist>=3.6.0",
87 | "testcontainers[postgres,redis]>=4.5.0",
88 | "vcrpy>=6.0.1",
89 | "aiosqlite>=0.20.0",
90 | "greenlet>=3.0.0",
91 | "asyncpg>=0.30.0",
92 | "ruff>=0.11.10",
93 | "bandit>=1.7.5",
94 | "safety>=3.0.0",
95 | "types-requests>=2.31.0",
96 | "types-pytz>=2024.1.0",
97 | "ty>=0.0.1a19",
98 | ]
99 |
100 | [build-system]
101 | requires = ["hatchling"]
102 | build-backend = "hatchling.build"
103 |
104 | [tool.hatch.build.targets.wheel]
105 | include = ["*.py"]
106 |
107 | [tool.pytest.ini_options]
108 | minversion = "8.0"
109 | testpaths = ["tests"]
110 | python_files = ["test_*.py", "*_test.py"]
111 | python_classes = ["Test*"]
112 | python_functions = ["test_*"]
113 | # Markers for test categories
114 | markers = [
115 | "unit: marks tests as unit tests (deselect with '-m \"not unit\"')",
116 | "integration: marks tests as integration tests (deselect with '-m \"not integration\"')",
117 | "slow: marks tests as slow (deselect with '-m \"not slow\"')",
118 | "external: marks tests that require external APIs",
119 | "database: marks tests that require database access",
120 | "redis: marks tests that require Redis access",
121 | ]
122 | # Default to running only unit tests
123 | addopts = [
124 | "-v",
125 | "--strict-markers",
126 | "--tb=short",
127 | "-m", "not integration and not slow and not external",
128 | "--durations=10", # Show 10 slowest tests
129 | ]
130 | # Async configuration
131 | asyncio_mode = "auto"
132 | asyncio_default_fixture_loop_scope = "function"
133 |
134 | [tool.ruff]
135 | line-length = 88
136 | target-version = "py312"
137 |
138 | [tool.ruff.lint]
139 | select = [
140 | "E", # pycodestyle errors
141 | "W", # pycodestyle warnings
142 | "F", # pyflakes
143 | "I", # isort
144 | "B", # flake8-bugbear
145 | "C4", # flake8-comprehensions
146 | "UP", # pyupgrade
147 | ]
148 | ignore = [
149 | "E501", # line too long (handled by formatter)
150 | "B008", # do not perform function calls in argument defaults
151 | "B904", # raise without from inside except
152 | "W191", # indentation contains tabs
153 | ]
154 |
155 | [tool.ruff.lint.per-file-ignores]
156 | "tests/*" = ["F403", "F405"] # star imports allowed in tests
157 |
158 | [tool.ruff.format]
159 | quote-style = "double"
160 | indent-style = "space"
161 | skip-magic-trailing-comma = false
162 | line-ending = "auto"
163 |
164 | [dependency-groups]
165 | dev = [
166 | "testcontainers[postgres]>=4.10.0",
167 | ]
168 |
169 |
```
--------------------------------------------------------------------------------
/scripts/test_seeded_data.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Test script to verify seeded data works with MCP tools.
4 |
5 | This script tests the key MCP screening tools to ensure they return
6 | results from the seeded database.
7 | """
8 |
9 | import logging
10 | import os
11 | import sys
12 | from pathlib import Path
13 |
14 | # Add the project root to the Python path
15 | project_root = Path(__file__).parent.parent
16 | sys.path.insert(0, str(project_root))
17 |
18 | # noqa: E402 - imports must come after sys.path modification
19 | from sqlalchemy import create_engine # noqa: E402
20 | from sqlalchemy.orm import sessionmaker # noqa: E402
21 |
22 | from maverick_mcp.providers.stock_data import EnhancedStockDataProvider # noqa: E402
23 |
24 | # Set up logging
25 | logging.basicConfig(
26 | level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
27 | )
28 | logger = logging.getLogger("test_seeded_data")
29 |
30 |
31 | def test_screening_tools():
32 | """Test the main screening tools with seeded data."""
33 | logger.info("Testing MCP screening tools with seeded data...")
34 |
35 | # Set up database connection
36 | database_url = os.getenv("DATABASE_URL") or "sqlite:///maverick_mcp.db"
37 | engine = create_engine(database_url, echo=False)
38 | SessionLocal = sessionmaker(bind=engine)
39 |
40 | with SessionLocal() as session:
41 | # Create provider
42 | provider = EnhancedStockDataProvider(db_session=session)
43 |
44 | # Test 1: Maverick recommendations (bullish)
45 | logger.info("=== Testing Maverick Recommendations (Bullish) ===")
46 | try:
47 | maverick_results = provider.get_maverick_recommendations(limit=5)
48 | logger.info(f"✅ Found {len(maverick_results)} Maverick recommendations")
49 | for i, stock in enumerate(maverick_results[:3]):
50 | logger.info(
51 | f" {i + 1}. {stock['ticker']} - Score: {stock.get('combined_score', 'N/A')}"
52 | )
53 | except Exception as e:
54 | logger.error(f"❌ Maverick recommendations failed: {e}")
55 |
56 | # Test 2: Bear recommendations
57 | logger.info("\n=== Testing Bear Recommendations ===")
58 | try:
59 | bear_results = provider.get_maverick_bear_recommendations(limit=5)
60 | logger.info(f"✅ Found {len(bear_results)} Bear recommendations")
61 | for i, stock in enumerate(bear_results[:3]):
62 | logger.info(
63 | f" {i + 1}. {stock['ticker']} - Score: {stock.get('score', 'N/A')}"
64 | )
65 | except Exception as e:
66 | logger.error(f"❌ Bear recommendations failed: {e}")
67 |
68 | # Test 3: Supply/Demand breakouts
69 | logger.info("\n=== Testing Supply/Demand Breakouts ===")
70 | try:
71 | breakout_results = provider.get_supply_demand_breakout_recommendations(
72 | limit=5
73 | )
74 | logger.info(f"✅ Found {len(breakout_results)} Supply/Demand breakouts")
75 | for i, stock in enumerate(breakout_results[:3]):
76 | logger.info(
77 | f" {i + 1}. {stock['ticker']} - Score: {stock.get('momentum_score', 'N/A')}"
78 | )
79 | except Exception as e:
80 | logger.error(f"❌ Supply/Demand breakouts failed: {e}")
81 |
82 | # Test 4: Individual stock data
83 | logger.info("\n=== Testing Individual Stock Data ===")
84 | try:
85 | # Test with AAPL (should have price data)
86 | stock_data = provider.get_stock_data(
87 | "AAPL", start_date="2025-08-01", end_date="2025-08-23"
88 | )
89 | logger.info(f"✅ AAPL price data: {len(stock_data)} records")
90 | if not stock_data.empty:
91 | latest = stock_data.iloc[-1]
92 | logger.info(f" Latest: {latest.name} - Close: ${latest['close']:.2f}")
93 | except Exception as e:
94 | logger.error(f"❌ Individual stock data failed: {e}")
95 |
96 | # Test 5: All screening recommendations
97 | logger.info("\n=== Testing All Screening Recommendations ===")
98 | try:
99 | all_results = provider.get_all_screening_recommendations()
100 | total = sum(len(stocks) for stocks in all_results.values())
101 | logger.info(f"✅ Total screening results across all categories: {total}")
102 | for category, stocks in all_results.items():
103 | logger.info(f" {category}: {len(stocks)} stocks")
104 | except Exception as e:
105 | logger.error(f"❌ All screening recommendations failed: {e}")
106 |
107 | logger.info("\n🎉 MCP screening tools test completed!")
108 |
109 |
110 | if __name__ == "__main__":
111 | test_screening_tools()
112 |
```
--------------------------------------------------------------------------------
/DATABASE_SETUP.md:
--------------------------------------------------------------------------------
```markdown
1 | # MaverickMCP Database Setup
2 |
3 | This guide explains how to set up and seed the SQLite database for MaverickMCP with sample stock data.
4 |
5 | ## Quick Start
6 |
7 | ### 1. Run Complete Setup (Recommended)
8 |
9 | ```bash
10 | # Set your database URL (optional - defaults to SQLite)
11 | export DATABASE_URL=sqlite:///maverick_mcp.db
12 |
13 | # Run the complete setup script
14 | ./scripts/setup_database.sh
15 | ```
16 |
17 | This will:
18 |
19 | - ✅ Create SQLite database with all tables
20 | - ✅ Seed with 40 sample stocks (AAPL, MSFT, GOOGL, etc.)
21 | - ✅ Populate with 1,370+ price records
22 | - ✅ Generate sample screening results (Maverick, Bear, Supply/Demand)
23 | - ✅ Add technical indicators cache
24 |
25 | ### 2. Manual Step-by-Step Setup
26 |
27 | ```bash
28 | # Step 1: Create database tables
29 | python scripts/migrate_db.py
30 |
31 | # Step 2: Seed with sample data (no API key required)
32 | python scripts/seed_db.py
33 |
34 | # Step 3: Test the setup
35 | python scripts/test_seeded_data.py
36 | ```
37 |
38 | ## Database Configuration
39 |
40 | ### Default Configuration (SQLite)
41 |
42 | - **Database**: `sqlite:///maverick_mcp.db`
43 | - **Location**: Project root directory
44 | - **No setup required**: Works out of the box
45 |
46 | ### PostgreSQL (Optional)
47 |
48 | ```bash
49 | # Set environment variable
50 | export DATABASE_URL=postgresql://localhost/maverick_mcp
51 |
52 | # Create PostgreSQL database
53 | createdb maverick_mcp
54 |
55 | # Run migration
56 | python scripts/migrate_db.py
57 | ```
58 |
59 | ## Sample Data Overview
60 |
61 | ### Stocks Included (40 total)
62 |
63 | - **Large Cap**: AAPL, MSFT, GOOGL, AMZN, TSLA, NVDA, META, BRK-B, JNJ, V
64 | - **Growth**: AMD, CRM, SHOP, ROKU, ZM, DOCU, SNOW, PLTR, RBLX, U
65 | - **Value**: KO, PFE, XOM, CVX, JPM, BAC, WMT, PG, T, VZ
66 | - **Small Cap**: UPST, SOFI, OPEN, WISH, CLOV, SPCE, LCID, RIVN, BYND, PTON
67 |
68 | ### Generated Data
69 |
70 | - **1,370+ Price Records**: 200 days of historical data for 10 stocks
71 | - **24 Maverick Stocks**: Bullish momentum recommendations
72 | - **16 Bear Stocks**: Bearish setups with technical indicators
73 | - **16 Supply/Demand Breakouts**: Accumulation breakout candidates
74 | - **600 Technical Indicators**: RSI, SMA cache for analysis
75 |
76 | ## Testing MCP Tools
77 |
78 | After seeding, test that the screening tools work:
79 |
80 | ```bash
81 | python scripts/test_seeded_data.py
82 | ```
83 |
84 | Expected output:
85 |
86 | ```
87 | ✅ Found 5 Maverick recommendations
88 | 1. PTON - Score: 100
89 | 2. BYND - Score: 100
90 | 3. RIVN - Score: 100
91 |
92 | ✅ Found 5 Bear recommendations
93 | 1. MSFT - Score: 37
94 | 2. JNJ - Score: 32
95 | 3. TSLA - Score: 32
96 |
97 | ✅ Total screening results across all categories: 56
98 | ```
99 |
100 | ## Using with Claude Desktop
101 |
102 | After database setup, start the MCP server:
103 |
104 | ```bash
105 | # Start the server
106 | make dev
107 |
108 | # Or manually
109 | uvicorn maverick_mcp.api.server:app --host 0.0.0.0 --port 8003
110 | ```
111 |
112 | Then connect with Claude Desktop using `mcp-remote`:
113 |
114 | ```json
115 | {
116 | "mcpServers": {
117 | "maverick-mcp": {
118 | "command": "npx",
119 | "args": ["-y", "mcp-remote", "http://localhost:8003/mcp"]
120 | }
121 | }
122 | }
123 | ```
124 |
125 | Test with prompts like:
126 |
127 | - "Show me the top maverick stock recommendations"
128 | - "Get technical analysis for AAPL"
129 | - "Find bearish stocks with high RSI"
130 |
131 | ## Database Schema
132 |
133 | ### Core Tables
134 |
135 | - **mcp_stocks**: Stock symbols and company information
136 | - **mcp_price_cache**: Historical OHLCV price data
137 | - **mcp_technical_cache**: Calculated technical indicators
138 |
139 | ### Screening Tables
140 |
141 | - **mcp_maverick_stocks**: Bullish momentum screening results
142 | - **mcp_maverick_bear_stocks**: Bearish setup screening results
143 | - **mcp_supply_demand_breakouts**: Breakout pattern screening results
144 |
145 | ## Troubleshooting
146 |
147 | ### Database Connection Issues
148 |
149 | ```bash
150 | # Check database exists
151 | ls -la maverick_mcp.db
152 |
153 | # Test SQLite connection
154 | sqlite3 maverick_mcp.db "SELECT COUNT(*) FROM mcp_stocks;"
155 | ```
156 |
157 | ### No Screening Results
158 |
159 | ```bash
160 | # Verify data was seeded
161 | sqlite3 maverick_mcp.db "
162 | SELECT
163 | (SELECT COUNT(*) FROM mcp_stocks) as stocks,
164 | (SELECT COUNT(*) FROM mcp_price_cache) as prices,
165 | (SELECT COUNT(*) FROM mcp_maverick_stocks) as maverick;
166 | "
167 | ```
168 |
169 | ### MCP Server Connection
170 |
171 | ```bash
172 | # Check server is running
173 | curl http://localhost:8003/health
174 |
175 | # Check MCP endpoint
176 | curl http://localhost:8003/mcp/capabilities
177 | ```
178 |
179 | ## Advanced Configuration
180 |
181 | ### Environment Variables
182 |
183 | ```bash
184 | # Database
185 | DATABASE_URL=sqlite:///maverick_mcp.db
186 |
187 | # Optional: Enable debug logging
188 | LOG_LEVEL=debug
189 |
190 | # Optional: Redis caching
191 | REDIS_HOST=localhost
192 | REDIS_PORT=6379
193 | ```
194 |
195 | ### Custom Stock Lists
196 |
197 | Edit `scripts/seed_db.py` and modify `SAMPLE_STOCKS` to include your preferred stock symbols.
198 |
199 | ### Production Setup
200 |
201 | - Use PostgreSQL for better performance
202 | - Enable Redis caching
203 | - Set up proper logging
204 | - Configure rate limiting
205 |
206 | ---
207 |
208 | ✅ **Database ready!** Your MaverickMCP instance now has a complete SQLite database with sample stock data and screening results.
209 |
```
--------------------------------------------------------------------------------
/alembic/env.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Alembic environment configuration for Maverick-MCP.
3 |
4 | This file configures Alembic to work with the existing Django database,
5 | managing only tables with the mcp_ prefix.
6 | """
7 |
8 | import os
9 | import sys
10 | from logging.config import fileConfig
11 | from pathlib import Path
12 |
13 | from sqlalchemy import engine_from_config, pool
14 |
15 | from alembic import context
16 |
17 | # Add project root to Python path
18 | sys.path.insert(0, str(Path(__file__).parent.parent))
19 |
20 | # Import models
21 | from maverick_mcp.data.models import Base as DataBase
22 |
23 | # Use data models metadata (auth removed for personal version)
24 | combined_metadata = DataBase.metadata
25 |
26 | # this is the Alembic Config object, which provides
27 | # access to the values within the .ini file in use.
28 | config = context.config
29 |
30 | # Interpret the config file for Python logging.
31 | # This line sets up loggers basically.
32 | if config.config_file_name is not None:
33 | fileConfig(config.config_file_name)
34 |
35 | # Get database URL from environment or use default
36 | DATABASE_URL = os.getenv(
37 | "DATABASE_URL",
38 | os.getenv("POSTGRES_URL", "postgresql://localhost/local_production_snapshot"),
39 | )
40 |
41 | # Override sqlalchemy.url in alembic.ini
42 | config.set_main_option("sqlalchemy.url", DATABASE_URL)
43 |
44 | # add your model's MetaData object here
45 | # for 'autogenerate' support
46 | # Use the combined metadata from both Base objects
47 | target_metadata = combined_metadata
48 |
49 | # other values from the config, defined by the needs of env.py,
50 | # can be acquired:
51 | # my_important_option = config.get_main_option("my_important_option")
52 | # ... etc.
53 |
54 |
55 | def include_object(object, name, type_, reflected, compare_to):
56 | """
57 | Include only MCP-prefixed tables and stock-related tables.
58 |
59 | This ensures Alembic only manages tables that belong to Maverick-MCP,
60 | not Django tables.
61 | """
62 | if type_ == "table":
63 | # Include MCP tables and stock tables
64 | return (
65 | name.startswith("mcp_")
66 | or name.startswith("stocks_")
67 | or name
68 | in ["maverick_stocks", "maverick_bear_stocks", "supply_demand_breakouts"]
69 | )
70 | elif type_ in [
71 | "index",
72 | "unique_constraint",
73 | "foreign_key_constraint",
74 | "check_constraint",
75 | ]:
76 | # Include indexes and constraints for our tables
77 | if hasattr(object, "table") and object.table is not None:
78 | table_name = object.table.name
79 | return (
80 | table_name.startswith("mcp_")
81 | or table_name.startswith("stocks_")
82 | or table_name
83 | in [
84 | "maverick_stocks",
85 | "maverick_bear_stocks",
86 | "supply_demand_breakouts",
87 | ]
88 | )
89 | # For reflected objects, check the table name in the name
90 | return any(
91 | name.startswith(prefix)
92 | for prefix in [
93 | "idx_mcp_",
94 | "uq_mcp_",
95 | "fk_mcp_",
96 | "ck_mcp_",
97 | "idx_stocks_",
98 | "uq_stocks_",
99 | "fk_stocks_",
100 | "ck_stocks_",
101 | "ck_pricecache_",
102 | "ck_maverick_",
103 | "ck_supply_demand_",
104 | ]
105 | )
106 | return True
107 |
108 |
109 | def run_migrations_offline() -> None:
110 | """Run migrations in 'offline' mode.
111 |
112 | This configures the context with just a URL
113 | and not an Engine, though an Engine is acceptable
114 | here as well. By skipping the Engine creation
115 | we don't even need a DBAPI to be available.
116 |
117 | Calls to context.execute() here emit the given string to the
118 | script output.
119 |
120 | """
121 | url = config.get_main_option("sqlalchemy.url")
122 | context.configure(
123 | url=url,
124 | target_metadata=target_metadata,
125 | literal_binds=True,
126 | dialect_opts={"paramstyle": "named"},
127 | include_object=include_object,
128 | )
129 |
130 | with context.begin_transaction():
131 | context.run_migrations()
132 |
133 |
134 | def run_migrations_online() -> None:
135 | """Run migrations in 'online' mode.
136 |
137 | In this scenario we need to create an Engine
138 | and associate a connection with the context.
139 |
140 | """
141 | connectable = engine_from_config(
142 | config.get_section(config.config_ini_section, {}),
143 | prefix="sqlalchemy.",
144 | poolclass=pool.NullPool,
145 | )
146 |
147 | with connectable.connect() as connection:
148 | context.configure(
149 | connection=connection,
150 | target_metadata=target_metadata,
151 | include_object=include_object,
152 | )
153 |
154 | with context.begin_transaction():
155 | context.run_migrations()
156 |
157 |
158 | if context.is_offline_mode():
159 | run_migrations_offline()
160 | else:
161 | run_migrations_online()
162 |
```
--------------------------------------------------------------------------------
/maverick_mcp/providers/implementations/market_data_adapter.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Market data provider adapter.
3 |
4 | This module provides adapters that make the existing MarketDataProvider
5 | compatible with the new IMarketDataProvider interface.
6 | """
7 |
8 | import asyncio
9 | import logging
10 | from typing import Any
11 |
12 | from maverick_mcp.providers.interfaces.market_data import (
13 | IMarketDataProvider,
14 | MarketDataConfig,
15 | )
16 | from maverick_mcp.providers.market_data import MarketDataProvider
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 |
21 | class MarketDataAdapter(IMarketDataProvider):
22 | """
23 | Adapter that makes the existing MarketDataProvider compatible with IMarketDataProvider interface.
24 |
25 | This adapter wraps the existing provider and exposes it through the new
26 | interface contracts, enabling gradual migration to the new architecture.
27 | """
28 |
29 | def __init__(self, config: MarketDataConfig | None = None):
30 | """
31 | Initialize the market data adapter.
32 |
33 | Args:
34 | config: Market data configuration (optional)
35 | """
36 | self._config = config
37 | self._provider = MarketDataProvider()
38 |
39 | logger.debug("MarketDataAdapter initialized")
40 |
41 | async def get_market_summary(self) -> dict[str, Any]:
42 | """
43 | Get a summary of major market indices (async wrapper).
44 |
45 | Returns:
46 | Dictionary with market index data including prices and changes
47 | """
48 | loop = asyncio.get_event_loop()
49 | return await loop.run_in_executor(None, self._provider.get_market_summary)
50 |
51 | async def get_top_gainers(self, limit: int = 10) -> list[dict[str, Any]]:
52 | """
53 | Get top gaining stocks in the market (async wrapper).
54 |
55 | Args:
56 | limit: Maximum number of stocks to return
57 |
58 | Returns:
59 | List of dictionaries with stock data for top gainers
60 | """
61 | loop = asyncio.get_event_loop()
62 | return await loop.run_in_executor(None, self._provider.get_top_gainers, limit)
63 |
64 | async def get_top_losers(self, limit: int = 10) -> list[dict[str, Any]]:
65 | """
66 | Get top losing stocks in the market (async wrapper).
67 |
68 | Args:
69 | limit: Maximum number of stocks to return
70 |
71 | Returns:
72 | List of dictionaries with stock data for top losers
73 | """
74 | loop = asyncio.get_event_loop()
75 | return await loop.run_in_executor(None, self._provider.get_top_losers, limit)
76 |
77 | async def get_most_active(self, limit: int = 10) -> list[dict[str, Any]]:
78 | """
79 | Get most active stocks by volume (async wrapper).
80 |
81 | Args:
82 | limit: Maximum number of stocks to return
83 |
84 | Returns:
85 | List of dictionaries with stock data for most active stocks
86 | """
87 | loop = asyncio.get_event_loop()
88 | return await loop.run_in_executor(None, self._provider.get_most_active, limit)
89 |
90 | async def get_sector_performance(self) -> dict[str, float]:
91 | """
92 | Get sector performance data (async wrapper).
93 |
94 | Returns:
95 | Dictionary mapping sector names to performance percentages
96 | """
97 | loop = asyncio.get_event_loop()
98 | return await loop.run_in_executor(None, self._provider.get_sector_performance)
99 |
100 | async def get_earnings_calendar(self, days: int = 7) -> list[dict[str, Any]]:
101 | """
102 | Get upcoming earnings announcements (async wrapper).
103 |
104 | Args:
105 | days: Number of days to look ahead
106 |
107 | Returns:
108 | List of dictionaries with earnings announcement data
109 | """
110 | loop = asyncio.get_event_loop()
111 | return await loop.run_in_executor(
112 | None, self._provider.get_earnings_calendar, days
113 | )
114 |
115 | async def get_market_overview(self) -> dict[str, Any]:
116 | """
117 | Get comprehensive market overview (async wrapper).
118 |
119 | Returns:
120 | Dictionary with comprehensive market data including:
121 | - market_summary: Index data
122 | - top_gainers: Daily gainers
123 | - top_losers: Daily losers
124 | - sector_performance: Sector data
125 | - timestamp: Data timestamp
126 | """
127 | # Use the existing async method if available, otherwise wrap the sync version
128 | if hasattr(self._provider, "get_market_overview_async"):
129 | return await self._provider.get_market_overview_async()
130 | else:
131 | loop = asyncio.get_event_loop()
132 | return await loop.run_in_executor(None, self._provider.get_market_overview)
133 |
134 | def get_sync_provider(self) -> MarketDataProvider:
135 | """
136 | Get the underlying synchronous provider for backward compatibility.
137 |
138 | Returns:
139 | The wrapped MarketDataProvider instance
140 | """
141 | return self._provider
142 |
```
--------------------------------------------------------------------------------
/maverick_mcp/api/connection_manager.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | MCP Connection Manager for persistent tool registration and session management.
3 | """
4 |
5 | import asyncio
6 | import logging
7 | from dataclasses import dataclass, field
8 | from datetime import datetime
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | @dataclass
14 | class ConnectionSession:
15 | """Represents an active MCP connection session."""
16 |
17 | session_id: str
18 | client_info: str
19 | connected_at: datetime = field(default_factory=datetime.now)
20 | last_activity: datetime = field(default_factory=datetime.now)
21 | tools_registered: bool = False
22 | is_active: bool = True
23 |
24 |
25 | class MCPConnectionManager:
26 | """
27 | Manages MCP connection sessions and ensures persistent tool registration.
28 |
29 | Fixes:
30 | - Single connection initialization to prevent tool registration conflicts
31 | - Session persistence to maintain tool availability across connection cycles
32 | - Connection monitoring and cleanup
33 | """
34 |
35 | def __init__(self):
36 | self.sessions: dict[str, ConnectionSession] = {}
37 | self.tools_initialized = False
38 | self.startup_time = datetime.now()
39 | self._lock = asyncio.Lock()
40 |
41 | async def register_connection(
42 | self, session_id: str, client_info: str = "unknown"
43 | ) -> ConnectionSession:
44 | """Register a new connection session."""
45 | async with self._lock:
46 | logger.info(
47 | f"Registering new MCP connection: {session_id} from {client_info}"
48 | )
49 |
50 | # Clean up any existing session with same ID
51 | if session_id in self.sessions:
52 | await self.cleanup_session(session_id)
53 |
54 | # Create new session
55 | session = ConnectionSession(session_id=session_id, client_info=client_info)
56 | self.sessions[session_id] = session
57 |
58 | # Ensure tools are registered (only once globally)
59 | if not self.tools_initialized:
60 | logger.info("Initializing tools for first connection")
61 | self.tools_initialized = True
62 | session.tools_registered = True
63 | else:
64 | logger.info("Tools already initialized, reusing registration")
65 | session.tools_registered = True
66 |
67 | logger.info(
68 | f"Connection registered successfully. Active sessions: {len(self.sessions)}"
69 | )
70 | return session
71 |
72 | async def update_activity(self, session_id: str):
73 | """Update last activity timestamp for a session."""
74 | if session_id in self.sessions:
75 | self.sessions[session_id].last_activity = datetime.now()
76 |
77 | async def cleanup_session(self, session_id: str):
78 | """Clean up a specific session."""
79 | if session_id in self.sessions:
80 | session = self.sessions[session_id]
81 | session.is_active = False
82 | logger.info(
83 | f"Cleaning up session {session_id} (connected for {datetime.now() - session.connected_at})"
84 | )
85 | del self.sessions[session_id]
86 |
87 | async def cleanup_stale_sessions(self, timeout_seconds: int = 300):
88 | """Clean up sessions that haven't been active recently."""
89 | now = datetime.now()
90 | stale_sessions = []
91 |
92 | for session_id, session in self.sessions.items():
93 | if (now - session.last_activity).total_seconds() > timeout_seconds:
94 | stale_sessions.append(session_id)
95 |
96 | for session_id in stale_sessions:
97 | await self.cleanup_session(session_id)
98 |
99 | def get_connection_status(self) -> dict:
100 | """Get current connection status for debugging."""
101 | now = datetime.now()
102 | return {
103 | "active_sessions": len(self.sessions),
104 | "tools_initialized": self.tools_initialized,
105 | "server_uptime": str(now - self.startup_time),
106 | "sessions": [
107 | {
108 | "session_id": session.session_id,
109 | "client_info": session.client_info,
110 | "connected_duration": str(now - session.connected_at),
111 | "last_activity": str(now - session.last_activity),
112 | "tools_registered": session.tools_registered,
113 | "is_active": session.is_active,
114 | }
115 | for session in self.sessions.values()
116 | ],
117 | }
118 |
119 | async def ensure_tools_available(self) -> bool:
120 | """Ensure tools are available for connections."""
121 | return self.tools_initialized and len(self.sessions) > 0
122 |
123 |
124 | # Global connection manager instance
125 | connection_manager = MCPConnectionManager()
126 |
127 |
128 | async def get_connection_manager() -> MCPConnectionManager:
129 | """Get the global connection manager instance."""
130 | return connection_manager
131 |
```
--------------------------------------------------------------------------------
/maverick_mcp/config/plotly_config.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Plotly configuration module for Maverick MCP.
3 |
4 | This module configures Plotly defaults using the modern plotly.io.defaults API
5 | to avoid deprecation warnings from the legacy kaleido.scope API.
6 | """
7 |
8 | import logging
9 | import warnings
10 | from typing import Any
11 |
12 | try:
13 | import plotly.io as pio
14 |
15 | PLOTLY_AVAILABLE = True
16 | except ImportError:
17 | PLOTLY_AVAILABLE = False
18 |
19 | logger = logging.getLogger(__name__)
20 |
21 |
22 | def configure_plotly_defaults() -> None:
23 | """
24 | Configure Plotly defaults using the modern plotly.io.defaults API.
25 |
26 | This replaces the deprecated plotly.io.kaleido.scope configuration
27 | and helps reduce deprecation warnings.
28 | """
29 | if not PLOTLY_AVAILABLE:
30 | logger.warning("Plotly not available, skipping configuration")
31 | return
32 |
33 | try:
34 | # Configure modern Plotly defaults (replaces kaleido.scope configuration)
35 | pio.defaults.default_format = "png"
36 | pio.defaults.default_width = 800
37 | pio.defaults.default_height = 600
38 | pio.defaults.default_scale = 1.0
39 |
40 | # Configure additional defaults that don't trigger deprecation warnings
41 | if hasattr(pio.defaults, "mathjax"):
42 | pio.defaults.mathjax = None
43 | if hasattr(pio.defaults, "plotlyjs"):
44 | pio.defaults.plotlyjs = "auto"
45 |
46 | # Note: We avoid setting kaleido.scope properties directly to prevent warnings
47 | # The modern pio.defaults API should be used instead
48 |
49 | logger.info("✓ Plotly defaults configured successfully")
50 |
51 | except Exception as e:
52 | logger.error(f"Error configuring Plotly defaults: {e}")
53 |
54 |
55 | def suppress_plotly_warnings() -> None:
56 | """
57 | Suppress specific Plotly/Kaleido deprecation warnings.
58 |
59 | These warnings come from the library internals and can't be fixed
60 | at the user code level until the libraries are updated.
61 | """
62 | try:
63 | # Comprehensive suppression of all kaleido-related deprecation warnings
64 | deprecation_patterns = [
65 | r".*plotly\.io\.kaleido\.scope\..*is deprecated.*",
66 | r".*Use of plotly\.io\.kaleido\.scope\..*is deprecated.*",
67 | r".*default_format.*deprecated.*",
68 | r".*default_width.*deprecated.*",
69 | r".*default_height.*deprecated.*",
70 | r".*default_scale.*deprecated.*",
71 | r".*mathjax.*deprecated.*",
72 | r".*plotlyjs.*deprecated.*",
73 | ]
74 |
75 | for pattern in deprecation_patterns:
76 | warnings.filterwarnings(
77 | "ignore",
78 | category=DeprecationWarning,
79 | message=pattern,
80 | )
81 |
82 | # Also suppress by module
83 | warnings.filterwarnings(
84 | "ignore",
85 | category=DeprecationWarning,
86 | module=r".*kaleido.*",
87 | )
88 |
89 | warnings.filterwarnings(
90 | "ignore",
91 | category=DeprecationWarning,
92 | module=r"plotly\.io\._kaleido",
93 | )
94 |
95 | logger.debug("✓ Plotly deprecation warnings suppressed")
96 |
97 | except Exception as e:
98 | logger.error(f"Error suppressing Plotly warnings: {e}")
99 |
100 |
101 | def setup_plotly() -> None:
102 | """
103 | Complete Plotly setup with modern configuration and warning suppression.
104 |
105 | This function should be called once during application initialization.
106 | """
107 | if not PLOTLY_AVAILABLE:
108 | logger.warning("Plotly not available, skipping setup")
109 | return
110 |
111 | # First suppress warnings to avoid noise during configuration
112 | suppress_plotly_warnings()
113 |
114 | # Then configure with modern API
115 | configure_plotly_defaults()
116 |
117 | logger.info("✓ Plotly setup completed")
118 |
119 |
120 | def get_plotly_config() -> dict[str, Any]:
121 | """
122 | Get current Plotly configuration for debugging.
123 |
124 | Returns:
125 | Dictionary with current Plotly configuration settings
126 | """
127 | if not PLOTLY_AVAILABLE:
128 | return {"error": "Plotly not available"}
129 |
130 | config = {}
131 |
132 | try:
133 | # Modern defaults
134 | config["defaults"] = {
135 | "default_format": getattr(pio.defaults, "default_format", "unknown"),
136 | "default_width": getattr(pio.defaults, "default_width", "unknown"),
137 | "default_height": getattr(pio.defaults, "default_height", "unknown"),
138 | "default_scale": getattr(pio.defaults, "default_scale", "unknown"),
139 | }
140 |
141 | # Kaleido scope (if available)
142 | if hasattr(pio, "kaleido") and hasattr(pio.kaleido, "scope"):
143 | scope = pio.kaleido.scope
144 | config["kaleido_scope"] = {
145 | "mathjax": getattr(scope, "mathjax", "unknown"),
146 | "plotlyjs": getattr(scope, "plotlyjs", "unknown"),
147 | "configured": getattr(scope, "_configured", False),
148 | }
149 |
150 | except Exception as e:
151 | config["error"] = f"Error getting config: {e}"
152 |
153 | return config
154 |
```
--------------------------------------------------------------------------------
/maverick_mcp/api/routers/technical_ddd.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Technical analysis router with Domain-Driven Design.
3 |
4 | This is the refactored version that delegates all business logic
5 | to the domain and application layers.
6 | """
7 |
8 | from typing import Any
9 |
10 | from fastmcp import FastMCP
11 |
12 | from maverick_mcp.api.dependencies.technical_analysis import (
13 | get_technical_analysis_query,
14 | )
15 | from maverick_mcp.utils.logging import get_logger
16 |
17 | logger = get_logger("maverick_mcp.routers.technical_ddd")
18 |
19 | # Create the technical analysis router
20 | technical_ddd_router: FastMCP = FastMCP("Technical_Analysis_DDD")
21 |
22 |
23 | async def get_technical_analysis_ddd(
24 | ticker: str,
25 | days: int = 365,
26 | ) -> dict[str, Any]:
27 | """
28 | Get comprehensive technical analysis for a stock using Domain-Driven Design.
29 |
30 | This is a thin controller that delegates all business logic to the
31 | application and domain layers, following DDD principles.
32 |
33 | Args:
34 | ticker: Stock ticker symbol
35 | days: Number of days of historical data (default: 365)
36 |
37 | Returns:
38 | Complete technical analysis with all indicators
39 | """
40 | try:
41 | # Get the query handler through dependency injection
42 | query = get_technical_analysis_query()
43 |
44 | # Execute the query - all business logic is in the domain/application layers
45 | analysis_dto = await query.execute(symbol=ticker, days=days)
46 |
47 | # Convert DTO to dict for MCP response
48 | return {
49 | "ticker": ticker,
50 | "analysis": analysis_dto.model_dump(),
51 | "status": "success",
52 | }
53 |
54 | except ValueError as e:
55 | logger.warning(f"Invalid input for {ticker}: {str(e)}")
56 | return {
57 | "ticker": ticker,
58 | "error": str(e),
59 | "status": "invalid_input",
60 | }
61 | except Exception as e:
62 | logger.error(f"Error analyzing {ticker}: {str(e)}", exc_info=True)
63 | return {
64 | "ticker": ticker,
65 | "error": "Technical analysis failed",
66 | "status": "error",
67 | }
68 |
69 |
70 | async def get_rsi_analysis_ddd(
71 | ticker: str,
72 | period: int = 14,
73 | days: int = 365,
74 | ) -> dict[str, Any]:
75 | """
76 | Get RSI analysis using Domain-Driven Design approach.
77 |
78 | Args:
79 | ticker: Stock ticker symbol
80 | period: RSI period (default: 14)
81 | days: Number of days of historical data (default: 365)
82 |
83 | Returns:
84 | RSI analysis results
85 | """
86 | try:
87 | # Get query handler
88 | query = get_technical_analysis_query()
89 |
90 | # Execute query for RSI only
91 | analysis_dto = await query.execute(
92 | symbol=ticker,
93 | days=days,
94 | indicators=["rsi"],
95 | rsi_period=period,
96 | )
97 |
98 | if not analysis_dto.rsi:
99 | return {
100 | "ticker": ticker,
101 | "error": "RSI calculation failed",
102 | "status": "error",
103 | }
104 |
105 | return {
106 | "ticker": ticker,
107 | "period": period,
108 | "analysis": analysis_dto.rsi.model_dump(),
109 | "status": "success",
110 | }
111 |
112 | except Exception as e:
113 | logger.error(f"Error in RSI analysis for {ticker}: {str(e)}")
114 | return {
115 | "ticker": ticker,
116 | "error": str(e),
117 | "status": "error",
118 | }
119 |
120 |
121 | async def get_support_resistance_ddd(
122 | ticker: str,
123 | days: int = 365,
124 | ) -> dict[str, Any]:
125 | """
126 | Get support and resistance levels using DDD approach.
127 |
128 | Args:
129 | ticker: Stock ticker symbol
130 | days: Number of days of historical data (default: 365)
131 |
132 | Returns:
133 | Support and resistance levels
134 | """
135 | try:
136 | # Get query handler
137 | query = get_technical_analysis_query()
138 |
139 | # Execute query
140 | analysis_dto = await query.execute(
141 | symbol=ticker,
142 | days=days,
143 | indicators=[], # No indicators needed, just levels
144 | )
145 |
146 | return {
147 | "ticker": ticker,
148 | "current_price": analysis_dto.current_price,
149 | "support_levels": [
150 | {
151 | "price": level.price,
152 | "strength": level.strength,
153 | "distance": level.distance_from_current,
154 | }
155 | for level in analysis_dto.support_levels
156 | ],
157 | "resistance_levels": [
158 | {
159 | "price": level.price,
160 | "strength": level.strength,
161 | "distance": level.distance_from_current,
162 | }
163 | for level in analysis_dto.resistance_levels
164 | ],
165 | "status": "success",
166 | }
167 |
168 | except Exception as e:
169 | logger.error(f"Error in support/resistance analysis for {ticker}: {str(e)}")
170 | return {
171 | "ticker": ticker,
172 | "error": str(e),
173 | "status": "error",
174 | }
175 |
```
--------------------------------------------------------------------------------
/maverick_mcp/api/middleware/error_handling.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Error handling middleware for FastAPI applications.
3 |
4 | This middleware provides centralized error handling, logging, and monitoring
5 | integration for all unhandled exceptions in the API.
6 | """
7 |
8 | import time
9 | import uuid
10 | from collections.abc import Callable
11 |
12 | from fastapi import Request, Response
13 | from starlette.middleware.base import BaseHTTPMiddleware
14 |
15 | from maverick_mcp.api.error_handling import handle_api_error
16 | from maverick_mcp.utils.logging import get_logger
17 | from maverick_mcp.utils.monitoring import get_monitoring_service
18 |
19 | logger = get_logger(__name__)
20 | monitoring = get_monitoring_service()
21 |
22 |
23 | class ErrorHandlingMiddleware(BaseHTTPMiddleware):
24 | """
25 | Middleware to catch and handle all unhandled exceptions.
26 |
27 | This middleware:
28 | 1. Catches any unhandled exceptions from route handlers
29 | 2. Logs errors with full context
30 | 3. Sends errors to monitoring (Sentry)
31 | 4. Returns structured error responses to clients
32 | 5. Adds request IDs for tracing
33 | """
34 |
35 | async def dispatch(self, request: Request, call_next: Callable) -> Response:
36 | """Process the request and handle any exceptions."""
37 | # Generate request ID
38 | request_id = str(uuid.uuid4())
39 | request.state.request_id = request_id
40 |
41 | # Add request ID to response headers
42 | start_time = time.time()
43 |
44 | try:
45 | # Add breadcrumb for monitoring
46 | monitoring.add_breadcrumb(
47 | message=f"{request.method} {request.url.path}",
48 | category="request",
49 | level="info",
50 | data={
51 | "request_id": request_id,
52 | "method": request.method,
53 | "path": request.url.path,
54 | "query": str(request.url.query),
55 | },
56 | )
57 |
58 | # Process the request
59 | response = await call_next(request)
60 |
61 | # Add request ID to response headers
62 | response.headers["X-Request-ID"] = request_id
63 |
64 | # Log successful request
65 | duration = time.time() - start_time
66 | logger.info(
67 | f"{request.method} {request.url.path} completed",
68 | extra={
69 | "request_id": request_id,
70 | "status_code": response.status_code,
71 | "duration": duration,
72 | },
73 | )
74 |
75 | return response
76 |
77 | except Exception as exc:
78 | # Calculate request duration
79 | duration = time.time() - start_time
80 |
81 | # Log the error
82 | logger.error(
83 | f"Unhandled exception in {request.method} {request.url.path}",
84 | exc_info=True,
85 | extra={
86 | "request_id": request_id,
87 | "duration": duration,
88 | "error_type": type(exc).__name__,
89 | },
90 | )
91 |
92 | # Handle the error and get structured response
93 | error_response = handle_api_error(
94 | request,
95 | exc,
96 | context={
97 | "request_id": request_id,
98 | "duration": duration,
99 | },
100 | )
101 |
102 | # Add request ID to error response
103 | error_response.headers["X-Request-ID"] = request_id
104 |
105 | return error_response
106 |
107 |
108 | class RequestTracingMiddleware(BaseHTTPMiddleware):
109 | """
110 | Middleware to add request tracing information.
111 |
112 | This middleware adds:
113 | 1. Request IDs to all requests
114 | 2. User context for authenticated requests
115 | 3. Performance tracking
116 | """
117 |
118 | async def dispatch(self, request: Request, call_next: Callable) -> Response:
119 | """Add tracing context to requests."""
120 | # Check if request ID already exists (from error handling middleware)
121 | if not hasattr(request.state, "request_id"):
122 | request.state.request_id = str(uuid.uuid4())
123 |
124 | # Extract user context if available
125 | user_id = None
126 | if hasattr(request.state, "user"):
127 | user_id = getattr(request.state.user, "id", None)
128 | monitoring.set_user_context(user_id)
129 |
130 | # Add monitoring context
131 | monitoring.add_breadcrumb(
132 | message="Request context",
133 | category="request",
134 | data={
135 | "request_id": request.state.request_id,
136 | "user_id": user_id,
137 | "path": request.url.path,
138 | },
139 | )
140 |
141 | # Process request with monitoring transaction
142 | with monitoring.transaction(
143 | name=f"{request.method} {request.url.path}", op="http.server"
144 | ):
145 | response = await call_next(request)
146 |
147 | # Clear user context after request
148 | if user_id:
149 | monitoring.set_user_context(None)
150 |
151 | return response
152 |
```
--------------------------------------------------------------------------------
/maverick_mcp/providers/interfaces/market_data.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Market data provider interface.
3 |
4 | This module defines the abstract interface for market-wide data operations,
5 | including market indices, gainers/losers, sector performance, and earnings calendar.
6 | """
7 |
8 | from typing import Any, Protocol, runtime_checkable
9 |
10 |
11 | @runtime_checkable
12 | class IMarketDataProvider(Protocol):
13 | """
14 | Interface for market-wide data operations.
15 |
16 | This interface defines the contract for retrieving market overview data,
17 | including indices, top movers, sector performance, and earnings calendar.
18 | """
19 |
20 | async def get_market_summary(self) -> dict[str, Any]:
21 | """
22 | Get a summary of major market indices.
23 |
24 | Returns:
25 | Dictionary with market index data including prices and changes
26 | """
27 | ...
28 |
29 | async def get_top_gainers(self, limit: int = 10) -> list[dict[str, Any]]:
30 | """
31 | Get top gaining stocks in the market.
32 |
33 | Args:
34 | limit: Maximum number of stocks to return
35 |
36 | Returns:
37 | List of dictionaries with stock data for top gainers
38 | """
39 | ...
40 |
41 | async def get_top_losers(self, limit: int = 10) -> list[dict[str, Any]]:
42 | """
43 | Get top losing stocks in the market.
44 |
45 | Args:
46 | limit: Maximum number of stocks to return
47 |
48 | Returns:
49 | List of dictionaries with stock data for top losers
50 | """
51 | ...
52 |
53 | async def get_most_active(self, limit: int = 10) -> list[dict[str, Any]]:
54 | """
55 | Get most active stocks by volume.
56 |
57 | Args:
58 | limit: Maximum number of stocks to return
59 |
60 | Returns:
61 | List of dictionaries with stock data for most active stocks
62 | """
63 | ...
64 |
65 | async def get_sector_performance(self) -> dict[str, float]:
66 | """
67 | Get sector performance data.
68 |
69 | Returns:
70 | Dictionary mapping sector names to performance percentages
71 | """
72 | ...
73 |
74 | async def get_earnings_calendar(self, days: int = 7) -> list[dict[str, Any]]:
75 | """
76 | Get upcoming earnings announcements.
77 |
78 | Args:
79 | days: Number of days to look ahead
80 |
81 | Returns:
82 | List of dictionaries with earnings announcement data
83 | """
84 | ...
85 |
86 | async def get_market_overview(self) -> dict[str, Any]:
87 | """
88 | Get comprehensive market overview including summary, gainers, losers, and sectors.
89 |
90 | Returns:
91 | Dictionary with comprehensive market data including:
92 | - market_summary: Index data
93 | - top_gainers: Daily gainers
94 | - top_losers: Daily losers
95 | - sector_performance: Sector data
96 | - timestamp: Data timestamp
97 | """
98 | ...
99 |
100 |
101 | class MarketDataConfig:
102 | """
103 | Configuration class for market data providers.
104 |
105 | This class encapsulates market data-related configuration parameters
106 | to reduce coupling between providers and configuration sources.
107 | """
108 |
109 | def __init__(
110 | self,
111 | external_api_key: str = "",
112 | tiingo_api_key: str = "",
113 | request_timeout: int = 30,
114 | max_retries: int = 3,
115 | rate_limit_delay: float = 0.1,
116 | default_limit: int = 10,
117 | use_fallback_providers: bool = True,
118 | ):
119 | """
120 | Initialize market data configuration.
121 |
122 | Args:
123 | external_api_key: API key for External API service
124 | tiingo_api_key: API key for Tiingo service
125 | request_timeout: Request timeout in seconds
126 | max_retries: Maximum number of retry attempts
127 | rate_limit_delay: Delay between requests in seconds
128 | default_limit: Default number of results to return
129 | use_fallback_providers: Whether to use fallback data sources
130 | """
131 | self.external_api_key = external_api_key
132 | self.tiingo_api_key = tiingo_api_key
133 | self.request_timeout = request_timeout
134 | self.max_retries = max_retries
135 | self.rate_limit_delay = rate_limit_delay
136 | self.default_limit = default_limit
137 | self.use_fallback_providers = use_fallback_providers
138 |
139 | @property
140 | def has_external_api_key(self) -> bool:
141 | """Check if External API key is configured."""
142 | return bool(self.external_api_key.strip())
143 |
144 | @property
145 | def has_tiingo_key(self) -> bool:
146 | """Check if Tiingo API key is configured."""
147 | return bool(self.tiingo_api_key.strip())
148 |
149 |
150 | # Market data constants that can be used by implementations
151 | MARKET_INDICES = {
152 | "^GSPC": "S&P 500",
153 | "^DJI": "Dow Jones",
154 | "^IXIC": "NASDAQ",
155 | "^RUT": "Russell 2000",
156 | "^VIX": "VIX",
157 | "^TNX": "10Y Treasury",
158 | }
159 |
160 | SECTOR_ETFS = {
161 | "Technology": "XLK",
162 | "Healthcare": "XLV",
163 | "Financials": "XLF",
164 | "Consumer Discretionary": "XLY",
165 | "Industrials": "XLI",
166 | "Energy": "XLE",
167 | "Utilities": "XLU",
168 | "Materials": "XLB",
169 | "Consumer Staples": "XLP",
170 | "Real Estate": "XLRE",
171 | "Communication Services": "XLC",
172 | }
173 |
```
--------------------------------------------------------------------------------
/maverick_mcp/api/routers/health.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Comprehensive health check router for backtesting system.
3 |
4 | Provides detailed health monitoring including:
5 | - Component status (database, cache, external APIs)
6 | - Circuit breaker monitoring
7 | - Resource utilization
8 | - Readiness and liveness probes
9 | - Performance metrics
10 | """
11 |
12 | import logging
13 | from datetime import UTC, datetime
14 |
15 | from fastapi import APIRouter
16 | from pydantic import BaseModel, Field
17 |
18 | from maverick_mcp.config.settings import get_settings
19 | from maverick_mcp.utils.circuit_breaker import get_circuit_breaker_status
20 |
21 | logger = logging.getLogger(__name__)
22 | settings = get_settings()
23 |
24 | router = APIRouter(prefix="/health", tags=["Health"])
25 |
26 |
27 | class CircuitBreakerStatus(BaseModel):
28 | """Circuit breaker status information."""
29 |
30 | name: str = Field(description="Circuit breaker name")
31 | state: str = Field(description="Current state (closed/open/half_open)")
32 | failure_count: int = Field(description="Current consecutive failure count")
33 | time_until_retry: float | None = Field(description="Seconds until retry allowed")
34 | metrics: dict = Field(description="Performance metrics")
35 |
36 |
37 | class HealthStatus(BaseModel):
38 | """Overall health status."""
39 |
40 | status: str = Field(description="Overall health status")
41 | timestamp: str = Field(description="Current timestamp")
42 | version: str = Field(description="Application version")
43 | circuit_breakers: dict[str, CircuitBreakerStatus] = Field(
44 | description="Circuit breaker statuses"
45 | )
46 | services: dict[str, str] = Field(description="External service statuses")
47 |
48 |
49 | @router.get("/", response_model=HealthStatus)
50 | async def health_check() -> HealthStatus:
51 | """
52 | Get comprehensive health status including circuit breakers.
53 |
54 | Returns:
55 | HealthStatus: Current health information
56 | """
57 | # Get circuit breaker status
58 | cb_status = get_circuit_breaker_status()
59 |
60 | # Convert to response models
61 | circuit_breakers = {}
62 | for name, status in cb_status.items():
63 | circuit_breakers[name] = CircuitBreakerStatus(
64 | name=status["name"],
65 | state=status["state"],
66 | failure_count=status["consecutive_failures"],
67 | time_until_retry=status["time_until_retry"],
68 | metrics=status["metrics"],
69 | )
70 |
71 | # Determine overall health
72 | any_open = any(cb["state"] == "open" for cb in cb_status.values())
73 | overall_status = "degraded" if any_open else "healthy"
74 |
75 | # Check service statuses based on circuit breakers
76 | services = {
77 | "yfinance": "down"
78 | if cb_status.get("yfinance", {}).get("state") == "open"
79 | else "up",
80 | "finviz": "down"
81 | if cb_status.get("finviz", {}).get("state") == "open"
82 | else "up",
83 | "fred_api": "down"
84 | if cb_status.get("fred_api", {}).get("state") == "open"
85 | else "up",
86 | "external_api": "down"
87 | if cb_status.get("external_api", {}).get("state") == "open"
88 | else "up",
89 | "news_api": "down"
90 | if cb_status.get("news_api", {}).get("state") == "open"
91 | else "up",
92 | }
93 |
94 | return HealthStatus(
95 | status=overall_status,
96 | timestamp=datetime.now(UTC).isoformat(),
97 | version=getattr(settings, "version", "0.1.0"),
98 | circuit_breakers=circuit_breakers,
99 | services=services,
100 | )
101 |
102 |
103 | @router.get("/circuit-breakers", response_model=dict[str, CircuitBreakerStatus])
104 | async def get_circuit_breakers() -> dict[str, CircuitBreakerStatus]:
105 | """
106 | Get detailed circuit breaker status.
107 |
108 | Returns:
109 | Dictionary of circuit breaker statuses
110 | """
111 | cb_status = get_circuit_breaker_status()
112 |
113 | result = {}
114 | for name, status in cb_status.items():
115 | result[name] = CircuitBreakerStatus(
116 | name=status["name"],
117 | state=status["state"],
118 | failure_count=status["consecutive_failures"],
119 | time_until_retry=status["time_until_retry"],
120 | metrics=status["metrics"],
121 | )
122 |
123 | return result
124 |
125 |
126 | @router.post("/circuit-breakers/{name}/reset")
127 | async def reset_circuit_breaker(name: str) -> dict:
128 | """
129 | Reset a specific circuit breaker.
130 |
131 | Args:
132 | name: Circuit breaker name
133 |
134 | Returns:
135 | Success response
136 | """
137 | from maverick_mcp.utils.circuit_breaker import get_circuit_breaker
138 |
139 | breaker = get_circuit_breaker(name)
140 | if not breaker:
141 | return {"status": "error", "message": f"Circuit breaker '{name}' not found"}
142 |
143 | breaker.reset()
144 | logger.info(f"Circuit breaker '{name}' reset via API")
145 |
146 | return {"status": "success", "message": f"Circuit breaker '{name}' reset"}
147 |
148 |
149 | @router.post("/circuit-breakers/reset-all")
150 | async def reset_all_circuit_breakers() -> dict:
151 | """
152 | Reset all circuit breakers.
153 |
154 | Returns:
155 | Success response
156 | """
157 | from maverick_mcp.utils.circuit_breaker import reset_all_circuit_breakers
158 |
159 | reset_all_circuit_breakers()
160 | logger.info("All circuit breakers reset via API")
161 |
162 | return {"status": "success", "message": "All circuit breakers reset"}
163 |
```
--------------------------------------------------------------------------------
/maverick_mcp/tests/test_server.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Integration tests for the Maverick-MCP server.
3 | """
4 |
5 | import json
6 | import os
7 | import subprocess
8 | import time
9 | import unittest
10 | from typing import Any
11 |
12 | import pytest
13 | import requests
14 |
15 | # Constants
16 | SERVER_URL = "http://localhost:8000"
17 | SERVER_START_TIMEOUT = 10 # seconds
18 |
19 |
20 | @pytest.mark.integration
21 | class TestMaverickMCPServer(unittest.TestCase):
22 | """Integration tests for the Maverick-MCP server."""
23 |
24 | process: subprocess.Popen[bytes] | None = None
25 |
26 | @classmethod
27 | def setUpClass(cls):
28 | """Start the server before running tests."""
29 | # Skip server startup if USE_RUNNING_SERVER environment variable is set
30 | cls.process = None
31 | if os.environ.get("USE_RUNNING_SERVER") != "1":
32 | print("Starting Maverick-MCP server...")
33 | # Start the server as a subprocess
34 | cls.process = subprocess.Popen(
35 | ["python", "-m", "maverick_mcp.api.server"],
36 | # Redirect stdout and stderr to prevent output in test results
37 | stdout=subprocess.PIPE,
38 | stderr=subprocess.PIPE,
39 | )
40 |
41 | # Wait for the server to start
42 | start_time = time.time()
43 | while time.time() - start_time < SERVER_START_TIMEOUT:
44 | try:
45 | response = requests.get(f"{SERVER_URL}/health")
46 | if response.status_code == 200:
47 | print("Server started successfully")
48 | break
49 | except requests.exceptions.ConnectionError:
50 | pass
51 | time.sleep(0.5)
52 | else:
53 | # If the server didn't start within the timeout, kill it and fail
54 | cls.tearDownClass()
55 | raise TimeoutError("Server did not start within the timeout period")
56 |
57 | @classmethod
58 | def tearDownClass(cls):
59 | """Stop the server after tests are done."""
60 | if cls.process:
61 | print("Stopping Maverick-MCP server...")
62 | # Send SIGTERM signal to the process
63 | cls.process.terminate()
64 | try:
65 | # Wait for the process to terminate
66 | cls.process.wait(timeout=5)
67 | except subprocess.TimeoutExpired:
68 | # If the process doesn't terminate within 5 seconds, kill it
69 | cls.process.kill()
70 | cls.process.wait()
71 |
72 | def test_health_endpoint(self):
73 | """Test the health endpoint."""
74 | response = requests.get(f"{SERVER_URL}/health")
75 | self.assertEqual(response.status_code, 200)
76 | data = response.json()
77 | self.assertEqual(data["status"], "ok")
78 | # Version should be present
79 | self.assertIn("version", data)
80 |
81 | def test_mcp_endpoint(self):
82 | """Test the MCP endpoint."""
83 | # This is a simple request to test if the MCP endpoint is responding
84 | sse_url = f"{SERVER_URL}/sse"
85 | response = requests.get(sse_url)
86 | # Just check that the endpoint exists and responds with success
87 | self.assertIn(
88 | response.status_code, [200, 405]
89 | ) # 200 OK or 405 Method Not Allowed
90 |
91 | def send_mcp_request(self, method: str, params: list[Any]) -> dict[str, Any]:
92 | """
93 | Send a request to the MCP server.
94 |
95 | Args:
96 | method: The method name
97 | params: The parameters for the method
98 |
99 | Returns:
100 | The response from the server
101 | """
102 | request_body = {"jsonrpc": "2.0", "id": 1, "method": method, "params": params}
103 |
104 | response = requests.post(
105 | f"{SERVER_URL}/messages/",
106 | json=request_body,
107 | headers={"Content-Type": "application/json"},
108 | )
109 |
110 | # Check that the request was successful
111 | self.assertEqual(response.status_code, 200)
112 |
113 | # Parse the response
114 | data = response.json()
115 |
116 | # Check that the response is valid JSON-RPC
117 | self.assertEqual(data["jsonrpc"], "2.0")
118 | self.assertEqual(data["id"], 1)
119 |
120 | return data # type: ignore[no-any-return]
121 |
122 | def test_fetch_stock_data(self):
123 | """Test the fetch_stock_data tool."""
124 | # Send a request to fetch stock data for a known symbol (AAPL)
125 | response_data = self.send_mcp_request("fetch_stock_data", ["AAPL"])
126 |
127 | # Check that the result is present and contains stock data
128 | self.assertIn("result", response_data)
129 | result = response_data["result"]
130 |
131 | # Parse the result as JSON
132 | stock_data = json.loads(result)
133 |
134 | # Check that the stock data contains the expected fields
135 | self.assertIn("index", stock_data)
136 | self.assertIn("columns", stock_data)
137 | self.assertIn("data", stock_data)
138 |
139 | # Check that the columns include OHLCV
140 | for column in ["open", "high", "low", "close", "volume"]:
141 | self.assertIn(
142 | column.lower(), [col.lower() for col in stock_data["columns"]]
143 | )
144 |
145 |
146 | # Run the tests if this script is executed directly
147 | if __name__ == "__main__":
148 | unittest.main()
149 |
```
--------------------------------------------------------------------------------
/examples/timeout_fix_demonstration.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Demonstration of Search Provider Timeout Fixes
4 |
5 | This script shows how the timeout issues identified by the debugger subagent have been resolved:
6 |
7 | BEFORE (Issues):
8 | - Complex queries failed at exactly 10 seconds
9 | - Circuit breakers were too aggressive (5 failures = disabled)
10 | - No distinction between timeout and other failure types
11 | - Budget allocation wasn't optimal
12 |
13 | AFTER (Fixed):
14 | - Complex queries get up to 25 seconds
15 | - Circuit breakers are more tolerant (8 failures, faster recovery)
16 | - Timeout failures have separate, higher threshold (12 vs 6)
17 | - Better budget allocation with minimum timeout protection
18 | """
19 |
20 | import sys
21 | from pathlib import Path
22 |
23 | from maverick_mcp.agents.deep_research import WebSearchProvider
24 | from maverick_mcp.config.settings import get_settings
25 |
26 | # Add project root to path
27 | project_root = Path(__file__).parent.parent
28 | sys.path.insert(0, str(project_root))
29 |
30 |
31 | def demonstrate_timeout_improvements():
32 | """Show the specific improvements made to resolve timeout issues."""
33 |
34 | print("🐛 SEARCH PROVIDER TIMEOUT FIXES")
35 | print("=" * 50)
36 |
37 | # Create test provider to demonstrate calculations
38 | class DemoProvider(WebSearchProvider):
39 | async def search(self, query, num_results=10, timeout_budget=None):
40 | return []
41 |
42 | provider = DemoProvider(api_key="demo")
43 | settings = get_settings()
44 |
45 | # The problematic query from the debugger report
46 | complex_query = "Google Microsoft OpenAI AI services competition revenue market share 2024 2025 growth forecast Claude Gemini GPT"
47 |
48 | print("🔍 COMPLEX QUERY EXAMPLE:")
49 | print(f" Query: {complex_query}")
50 | print(f" Words: {len(complex_query.split())}")
51 |
52 | # Show timeout calculation
53 | timeout = provider._calculate_timeout(complex_query)
54 | print(f" ✅ NEW Timeout: {timeout:.1f}s (was 10s → now 25s)")
55 |
56 | # Show budget scenarios
57 | tight_budget_timeout = provider._calculate_timeout(
58 | complex_query, timeout_budget=15.0
59 | )
60 | good_budget_timeout = provider._calculate_timeout(
61 | complex_query, timeout_budget=50.0
62 | )
63 |
64 | print(f" ✅ With 15s budget: {tight_budget_timeout:.1f}s (min 8s protection)")
65 | print(f" ✅ With 50s budget: {good_budget_timeout:.1f}s (full 25s)")
66 |
67 | print("\n📊 FAILURE TOLERANCE IMPROVEMENTS:")
68 |
69 | # Show tolerance thresholds
70 | timeout_threshold = getattr(
71 | settings.performance, "search_timeout_failure_threshold", 12
72 | )
73 | circuit_threshold = getattr(
74 | settings.performance, "search_circuit_breaker_failure_threshold", 8
75 | )
76 | circuit_recovery = getattr(
77 | settings.performance, "search_circuit_breaker_recovery_timeout", 30
78 | )
79 |
80 | print(f" ✅ Timeout failures before disable: {timeout_threshold} (was 3)")
81 | print(f" ✅ Circuit breaker threshold: {circuit_threshold} (was 5)")
82 | print(f" ✅ Circuit breaker recovery: {circuit_recovery}s (was 60s)")
83 |
84 | print("\n🎯 KEY FIXES SUMMARY:")
85 | print(" ✅ Complex queries (9+ words): 25s timeout instead of 10s")
86 | print(" ✅ Medium queries (4-8 words): 17s timeout instead of 10s")
87 | print(" ✅ Minimum timeout protection: Never below 8s for complex queries")
88 | print(" ✅ Budget efficiency: 85% allocation (was 80%)")
89 | print(" ✅ Timeout-specific tolerance: 12 failures (was 3)")
90 | print(" ✅ Search circuit breakers: 8 failures, 30s recovery")
91 |
92 | print("\n🔬 TECHNICAL DETAILS:")
93 | print(" • Timeout calculation is adaptive based on query complexity")
94 | print(" • Budget constraints respect minimum timeout requirements")
95 | print(" • Separate failure tracking for timeout vs other errors")
96 | print(" • Circuit breakers tuned specifically for search operations")
97 | print(" • Enhanced debug logging for troubleshooting")
98 |
99 |
100 | def show_before_after_comparison():
101 | """Show specific before/after comparisons for the identified issues."""
102 |
103 | print("\n📋 BEFORE vs AFTER COMPARISON")
104 | print("=" * 50)
105 |
106 | test_cases = [
107 | ("AAPL", "Simple 1-word query"),
108 | ("Google Microsoft OpenAI competition", "Medium 4-word query"),
109 | (
110 | "Google Microsoft OpenAI AI services competition revenue market share 2024 2025 growth forecast",
111 | "Complex 13-word query",
112 | ),
113 | ]
114 |
115 | for query, description in test_cases:
116 | words = len(query.split())
117 |
118 | # Calculate OLD timeout (all queries got 10s)
119 | old_timeout = 10.0
120 |
121 | # Calculate NEW timeout
122 | provider = WebSearchProvider(api_key="demo")
123 | new_timeout = provider._calculate_timeout(query)
124 |
125 | improvement = "🟰" if old_timeout == new_timeout else "📈"
126 | print(f" {improvement} {description} ({words} words):")
127 | print(f" BEFORE: {old_timeout:.1f}s | AFTER: {new_timeout:.1f}s")
128 |
129 |
130 | if __name__ == "__main__":
131 | demonstrate_timeout_improvements()
132 | show_before_after_comparison()
133 |
134 | print("\n✅ The search provider timeout issues have been fully resolved!")
135 | print(
136 | " Complex queries like the 15-word example will now get 25s instead of failing at 10s."
137 | )
138 |
```
--------------------------------------------------------------------------------
/maverick_mcp/validation/data.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Validation models for data-related tools.
3 |
4 | This module provides Pydantic models for validating inputs
5 | to all data fetching and caching tools.
6 | """
7 |
8 | from pydantic import Field, field_validator, model_validator
9 |
10 | from .base import (
11 | DateRangeMixin,
12 | DateString,
13 | DateValidator,
14 | StrictBaseModel,
15 | TickerSymbol,
16 | TickerValidator,
17 | )
18 |
19 |
20 | class FetchStockDataRequest(StrictBaseModel, DateRangeMixin):
21 | """Validation for fetch_stock_data tool."""
22 |
23 | ticker: TickerSymbol = Field(
24 | ...,
25 | description="Stock ticker symbol (e.g., AAPL, MSFT)",
26 | json_schema_extra={"examples": ["AAPL", "MSFT", "GOOGL"]},
27 | )
28 |
29 | @field_validator("ticker")
30 | @classmethod
31 | def normalize_ticker(cls, v: str) -> str:
32 | """Normalize ticker to uppercase."""
33 | return TickerValidator.validate_ticker(v)
34 |
35 | model_config = {
36 | "json_schema_extra": {
37 | "examples": [
38 | {
39 | "ticker": "AAPL",
40 | "start_date": "2024-01-01",
41 | "end_date": "2024-12-31",
42 | },
43 | {"ticker": "MSFT"},
44 | ]
45 | }
46 | }
47 |
48 |
49 | class StockDataBatchRequest(StrictBaseModel, DateRangeMixin):
50 | """Validation for fetch_stock_data_batch tool."""
51 |
52 | tickers: list[TickerSymbol] = Field(
53 | ...,
54 | min_length=1,
55 | max_length=50,
56 | description="List of ticker symbols (max 50)",
57 | json_schema_extra={"examples": [["AAPL", "MSFT", "GOOGL"]]},
58 | )
59 |
60 | @field_validator("tickers")
61 | @classmethod
62 | def validate_tickers(cls, v: list[str]) -> list[str]:
63 | """Validate and normalize ticker list."""
64 | return TickerValidator.validate_ticker_list(v)
65 |
66 | model_config = {
67 | "json_schema_extra": {
68 | "examples": [
69 | {"tickers": ["AAPL", "MSFT", "GOOGL"], "start_date": "2024-01-01"},
70 | {
71 | "tickers": ["SPY", "QQQ", "IWM"],
72 | "start_date": "2024-06-01",
73 | "end_date": "2024-12-31",
74 | },
75 | ]
76 | }
77 | }
78 |
79 |
80 | class GetStockInfoRequest(StrictBaseModel):
81 | """Validation for get_stock_info tool."""
82 |
83 | ticker: TickerSymbol = Field(
84 | ..., description="Stock ticker symbol", json_schema_extra={"examples": ["AAPL"]}
85 | )
86 |
87 | @field_validator("ticker")
88 | @classmethod
89 | def normalize_ticker(cls, v: str) -> str:
90 | """Normalize ticker to uppercase."""
91 | return TickerValidator.validate_ticker(v)
92 |
93 |
94 | class GetNewsRequest(StrictBaseModel):
95 | """Validation for get_news_sentiment tool."""
96 |
97 | ticker: TickerSymbol = Field(
98 | ..., description="Stock ticker symbol", json_schema_extra={"examples": ["AAPL"]}
99 | )
100 |
101 | limit: int = Field(
102 | default=10,
103 | ge=1,
104 | le=100,
105 | description="Maximum number of news articles to return",
106 | json_schema_extra={"examples": [10, 20, 50]},
107 | )
108 |
109 | @field_validator("ticker")
110 | @classmethod
111 | def normalize_ticker(cls, v: str) -> str:
112 | """Normalize ticker to uppercase."""
113 | return TickerValidator.validate_ticker(v)
114 |
115 |
116 | class GetChartLinksRequest(StrictBaseModel):
117 | """Validation for get_chart_links tool."""
118 |
119 | ticker: TickerSymbol = Field(
120 | ..., description="Stock ticker symbol", json_schema_extra={"examples": ["AAPL"]}
121 | )
122 |
123 | @field_validator("ticker")
124 | @classmethod
125 | def normalize_ticker(cls, v: str) -> str:
126 | """Normalize ticker to uppercase."""
127 | return TickerValidator.validate_ticker(v)
128 |
129 |
130 | class CachedPriceDataRequest(StrictBaseModel):
131 | """Validation for get_cached_price_data tool."""
132 |
133 | ticker: TickerSymbol = Field(..., description="Stock ticker symbol")
134 | start_date: DateString = Field(..., description="Start date in YYYY-MM-DD format")
135 | end_date: DateString | None = Field(
136 | default=None, description="End date in YYYY-MM-DD format (defaults to today)"
137 | )
138 |
139 | @field_validator("ticker")
140 | @classmethod
141 | def normalize_ticker(cls, v: str) -> str:
142 | """Normalize ticker to uppercase."""
143 | return TickerValidator.validate_ticker(v)
144 |
145 | @field_validator("start_date", "end_date")
146 | @classmethod
147 | def validate_date(cls, v: str | None) -> str | None:
148 | """Validate date format."""
149 | if v is not None:
150 | DateValidator.validate_date_string(v)
151 | return v
152 |
153 | @model_validator(mode="after")
154 | def validate_date_range(self):
155 | """Ensure end_date is after start_date."""
156 | if self.end_date is not None:
157 | DateValidator.validate_date_range(self.start_date, self.end_date)
158 | return self
159 |
160 |
161 | class ClearCacheRequest(StrictBaseModel):
162 | """Validation for clear_cache tool."""
163 |
164 | ticker: TickerSymbol | None = Field(
165 | default=None, description="Specific ticker to clear (None to clear all)"
166 | )
167 |
168 | @field_validator("ticker")
169 | @classmethod
170 | def normalize_ticker(cls, v: str | None) -> str | None:
171 | """Normalize ticker to uppercase if provided."""
172 | if v is not None:
173 | return TickerValidator.validate_ticker(v)
174 | return v
175 |
```
--------------------------------------------------------------------------------
/maverick_mcp/validation/base.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Base validation models and common validators for Maverick-MCP.
3 |
4 | This module provides base classes and common validation functions
5 | used across all validation models.
6 | """
7 |
8 | import re
9 | from datetime import UTC, datetime
10 | from typing import Annotated
11 |
12 | from pydantic import BaseModel, ConfigDict, Field, field_validator
13 |
14 | from maverick_mcp.config.settings import get_settings
15 |
16 | settings = get_settings()
17 |
18 | # Custom type annotations
19 | TickerSymbol = Annotated[
20 | str,
21 | Field(
22 | min_length=settings.validation.min_symbol_length,
23 | max_length=settings.validation.max_symbol_length,
24 | pattern=r"^[A-Z0-9\-\.]{1,10}$",
25 | description="Stock ticker symbol (e.g., AAPL, BRK.B, SPY)",
26 | ),
27 | ]
28 |
29 | DateString = Annotated[
30 | str, Field(pattern=r"^\d{4}-\d{2}-\d{2}$", description="Date in YYYY-MM-DD format")
31 | ]
32 |
33 | PositiveInt = Annotated[int, Field(gt=0, description="Positive integer value")]
34 |
35 | PositiveFloat = Annotated[float, Field(gt=0.0, description="Positive float value")]
36 |
37 | Percentage = Annotated[
38 | float, Field(ge=0.0, le=100.0, description="Percentage value (0-100)")
39 | ]
40 |
41 |
42 | class StrictBaseModel(BaseModel):
43 | """
44 | Base model with strict validation settings.
45 |
46 | - Forbids extra fields
47 | - Validates on assignment
48 | - Uses strict mode for type coercion
49 | """
50 |
51 | model_config = ConfigDict(
52 | extra="forbid",
53 | validate_assignment=True,
54 | strict=True,
55 | str_strip_whitespace=True,
56 | json_schema_extra={"examples": []},
57 | )
58 |
59 |
60 | class TickerValidator:
61 | """Common ticker validation methods."""
62 |
63 | @staticmethod
64 | def validate_ticker(value: str) -> str:
65 | """Validate and normalize ticker symbol."""
66 | # Convert to uppercase
67 | ticker = value.upper().strip()
68 |
69 | # Check pattern
70 | pattern = f"^[A-Z0-9\\-\\.]{{1,{settings.validation.max_symbol_length}}}$"
71 | if not re.match(pattern, ticker):
72 | raise ValueError(
73 | f"Invalid ticker symbol: {value}. "
74 | f"Must be {settings.validation.min_symbol_length}-{settings.validation.max_symbol_length} characters, alphanumeric with optional . or -"
75 | )
76 |
77 | return ticker
78 |
79 | @staticmethod
80 | def validate_ticker_list(values: list[str]) -> list[str]:
81 | """Validate and normalize a list of tickers."""
82 | if not values:
83 | raise ValueError("At least one ticker symbol is required")
84 |
85 | # Remove duplicates while preserving order
86 | seen = set()
87 | unique_tickers = []
88 |
89 | for ticker in values:
90 | normalized = TickerValidator.validate_ticker(ticker)
91 | if normalized not in seen:
92 | seen.add(normalized)
93 | unique_tickers.append(normalized)
94 |
95 | return unique_tickers
96 |
97 |
98 | class DateValidator:
99 | """Common date validation methods."""
100 |
101 | @staticmethod
102 | def validate_date_string(value: str) -> str:
103 | """Validate date string format."""
104 | try:
105 | datetime.strptime(value, "%Y-%m-%d")
106 | except ValueError:
107 | raise ValueError(f"Invalid date format: {value}. Must be YYYY-MM-DD")
108 | return value
109 |
110 | @staticmethod
111 | def validate_date_range(start_date: str, end_date: str) -> tuple[str, str]:
112 | """Validate that end_date is after start_date."""
113 | start = datetime.strptime(start_date, "%Y-%m-%d")
114 | end = datetime.strptime(end_date, "%Y-%m-%d")
115 |
116 | if end < start:
117 | raise ValueError(
118 | f"End date ({end_date}) must be after start date ({start_date})"
119 | )
120 |
121 | # Check dates aren't too far in the future
122 | today = datetime.now(UTC).date()
123 | if end.date() > today:
124 | raise ValueError(f"End date ({end_date}) cannot be in the future")
125 |
126 | return start_date, end_date
127 |
128 |
129 | class PaginationMixin(BaseModel):
130 | """Mixin for pagination parameters."""
131 |
132 | limit: PositiveInt = Field(
133 | default=20, le=100, description="Maximum number of results to return"
134 | )
135 | offset: int = Field(default=0, ge=0, description="Number of results to skip")
136 |
137 |
138 | class DateRangeMixin(BaseModel):
139 | """Mixin for date range parameters."""
140 |
141 | start_date: DateString | None = Field(
142 | default=None, description="Start date in YYYY-MM-DD format"
143 | )
144 | end_date: DateString | None = Field(
145 | default=None, description="End date in YYYY-MM-DD format"
146 | )
147 |
148 | @field_validator("end_date")
149 | @classmethod
150 | def validate_date_range(cls, v: str | None, info) -> str | None:
151 | """Ensure end_date is after start_date if both are provided."""
152 | if v is None:
153 | return v
154 |
155 | start = info.data.get("start_date")
156 | if start is not None:
157 | DateValidator.validate_date_range(start, v)
158 |
159 | return v
160 |
161 |
162 | class BaseRequest(BaseModel):
163 | """Base class for all API request models."""
164 |
165 | model_config = ConfigDict(
166 | str_strip_whitespace=True,
167 | validate_assignment=True,
168 | extra="forbid",
169 | )
170 |
171 |
172 | class BaseResponse(BaseModel):
173 | """Base class for all API response models."""
174 |
175 | model_config = ConfigDict(
176 | validate_assignment=True,
177 | use_enum_values=True,
178 | )
179 |
```
--------------------------------------------------------------------------------
/alembic/versions/f0696e2cac15_add_essential_performance_indexes.py:
--------------------------------------------------------------------------------
```python
1 | """Add essential performance indexes
2 |
3 | Revision ID: f0696e2cac15
4 | Revises: 007_enhance_audit_logging
5 | Create Date: 2025-06-25 17:28:38.473307
6 |
7 | """
8 |
9 | import sqlalchemy as sa
10 |
11 | from alembic import op
12 |
13 | # revision identifiers, used by Alembic.
14 | revision = "f0696e2cac15"
15 | down_revision = "007_enhance_audit_logging"
16 | branch_labels = None
17 | depends_on = None
18 |
19 |
20 | def upgrade() -> None:
21 | """Add essential performance indexes for existing tables only."""
22 |
23 | print("Creating essential performance indexes...")
24 |
25 | # 1. Stock data performance indexes (for large stocks_pricecache table)
26 | try:
27 | op.create_index(
28 | "idx_stocks_pricecache_stock_date",
29 | "stocks_pricecache",
30 | ["stock_id", "date"],
31 | postgresql_using="btree",
32 | if_not_exists=True,
33 | )
34 | print("✓ Created stock price cache index")
35 | except Exception as e:
36 | print(f"Warning: Could not create stock price cache index: {e}")
37 |
38 | # 2. Stock lookup optimization
39 | try:
40 | op.execute(
41 | "CREATE INDEX IF NOT EXISTS idx_stocks_stock_ticker_lower "
42 | "ON stocks_stock (LOWER(ticker_symbol))"
43 | )
44 | print("✓ Created case-insensitive ticker lookup index")
45 | except Exception as e:
46 | print(f"Warning: Could not create ticker lookup index: {e}")
47 |
48 | # 3. MCP API keys performance (critical for authentication)
49 | try:
50 | op.create_index(
51 | "idx_mcp_api_keys_active_lookup",
52 | "mcp_api_keys",
53 | ["is_active", "expires_at"],
54 | postgresql_using="btree",
55 | if_not_exists=True,
56 | )
57 | print("✓ Created API keys performance index")
58 | except Exception as e:
59 | print(f"Warning: Could not create API keys index: {e}")
60 |
61 | # 4. Requests tracking performance
62 | try:
63 | op.create_index(
64 | "idx_mcp_requests_user_time",
65 | "mcp_requests",
66 | ["user_id", sa.text("created_at DESC")],
67 | postgresql_using="btree",
68 | if_not_exists=True,
69 | )
70 | print("✓ Created requests tracking index")
71 | except Exception as e:
72 | print(f"Warning: Could not create requests index: {e}")
73 |
74 | # 5. Auth audit log performance
75 | try:
76 | op.create_index(
77 | "idx_mcp_auth_audit_log_user_time",
78 | "mcp_auth_audit_log",
79 | ["user_id", sa.text("created_at DESC")],
80 | postgresql_using="btree",
81 | if_not_exists=True,
82 | )
83 | print("✓ Created auth audit log index")
84 | except Exception as e:
85 | print(f"Warning: Could not create auth audit index: {e}")
86 |
87 | # 6. Screening tables performance (if they exist)
88 | try:
89 | op.create_index(
90 | "idx_maverick_stocks_combined_score",
91 | "maverick_stocks",
92 | [sa.text('"COMBINED_SCORE" DESC')],
93 | postgresql_using="btree",
94 | if_not_exists=True,
95 | )
96 | print("✓ Created maverick stocks performance index")
97 | except Exception as e:
98 | print(f"Warning: Could not create maverick stocks index: {e}")
99 |
100 | try:
101 | op.create_index(
102 | "idx_maverick_bear_stocks_score",
103 | "maverick_bear_stocks",
104 | [sa.text('"SCORE" DESC')],
105 | postgresql_using="btree",
106 | if_not_exists=True,
107 | )
108 | print("✓ Created maverick bear stocks performance index")
109 | except Exception as e:
110 | print(f"Warning: Could not create maverick bear stocks index: {e}")
111 |
112 | try:
113 | op.create_index(
114 | "idx_supply_demand_breakouts_rs_rating",
115 | "supply_demand_breakouts",
116 | [sa.text('"RS_RATING" DESC')],
117 | postgresql_using="btree",
118 | if_not_exists=True,
119 | )
120 | print("✓ Created supply/demand breakouts performance index")
121 | except Exception as e:
122 | print(f"Warning: Could not create supply/demand breakouts index: {e}")
123 |
124 | print("Essential performance indexes creation completed!")
125 |
126 |
127 | def downgrade() -> None:
128 | """Remove essential performance indexes."""
129 |
130 | print("Removing essential performance indexes...")
131 |
132 | # Remove indexes (order doesn't matter for drops)
133 | indexes_to_drop = [
134 | ("idx_stocks_pricecache_stock_date", "stocks_pricecache"),
135 | ("idx_mcp_api_keys_active_lookup", "mcp_api_keys"),
136 | ("idx_mcp_requests_user_time", "mcp_requests"),
137 | ("idx_mcp_auth_audit_log_user_time", "mcp_auth_audit_log"),
138 | ("idx_maverick_stocks_combined_score", "maverick_stocks"),
139 | ("idx_maverick_bear_stocks_score", "maverick_bear_stocks"),
140 | ("idx_supply_demand_breakouts_rs_rating", "supply_demand_breakouts"),
141 | ]
142 |
143 | for index_name, table_name in indexes_to_drop:
144 | try:
145 | op.drop_index(index_name, table_name, if_exists=True)
146 | print(f"✓ Dropped {index_name}")
147 | except Exception as e:
148 | print(f"Warning: Could not drop {index_name}: {e}")
149 |
150 | # Drop special indexes
151 | try:
152 | op.execute("DROP INDEX IF EXISTS idx_stocks_stock_ticker_lower")
153 | print("✓ Dropped ticker lookup index")
154 | except Exception as e:
155 | print(f"Warning: Could not drop ticker lookup index: {e}")
156 |
157 | print("Essential performance indexes removal completed!")
158 |
```
--------------------------------------------------------------------------------
/maverick_mcp/memory/stores.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Memory stores for agent conversations and user data.
3 | """
4 |
5 | import logging
6 | from datetime import datetime, timedelta
7 | from typing import Any
8 |
9 | logger = logging.getLogger(__name__)
10 |
11 |
12 | class MemoryStore:
13 | """Base class for memory storage."""
14 |
15 | def __init__(self, ttl_hours: float = 24.0):
16 | self.ttl_hours = ttl_hours
17 | self.store: dict[str, dict[str, Any]] = {}
18 |
19 | def set(self, key: str, value: Any, ttl_hours: float | None = None) -> None:
20 | """Store a value with optional custom TTL."""
21 | ttl = ttl_hours or self.ttl_hours
22 | expiry = datetime.now() + timedelta(hours=ttl)
23 |
24 | self.store[key] = {
25 | "value": value,
26 | "expiry": expiry.isoformat(),
27 | "created": datetime.now().isoformat(),
28 | }
29 |
30 | def get(self, key: str) -> Any | None:
31 | """Get a value if not expired."""
32 | if key not in self.store:
33 | return None
34 |
35 | entry = self.store[key]
36 | expiry = datetime.fromisoformat(entry["expiry"])
37 |
38 | if datetime.now() > expiry:
39 | del self.store[key]
40 | return None
41 |
42 | return entry["value"]
43 |
44 | def delete(self, key: str) -> None:
45 | """Delete a value."""
46 | if key in self.store:
47 | del self.store[key]
48 |
49 | def clear_expired(self) -> int:
50 | """Clear all expired entries."""
51 | now = datetime.now()
52 | expired_keys = []
53 |
54 | for key, entry in self.store.items():
55 | if now > datetime.fromisoformat(entry["expiry"]):
56 | expired_keys.append(key)
57 |
58 | for key in expired_keys:
59 | del self.store[key]
60 |
61 | return len(expired_keys)
62 |
63 |
64 | class ConversationStore(MemoryStore):
65 | """Store for conversation-specific data."""
66 |
67 | def save_analysis(
68 | self, session_id: str, symbol: str, analysis_type: str, data: dict[str, Any]
69 | ) -> None:
70 | """Save analysis results for a conversation."""
71 | key = f"{session_id}:analysis:{symbol}:{analysis_type}"
72 |
73 | analysis_record = {
74 | "symbol": symbol,
75 | "type": analysis_type,
76 | "data": data,
77 | "timestamp": datetime.now().isoformat(),
78 | }
79 |
80 | self.set(key, analysis_record)
81 |
82 | def get_analysis(
83 | self, session_id: str, symbol: str, analysis_type: str
84 | ) -> dict[str, Any] | None:
85 | """Get cached analysis for a symbol."""
86 | key = f"{session_id}:analysis:{symbol}:{analysis_type}"
87 | return self.get(key)
88 |
89 | def save_context(self, session_id: str, context_type: str, data: Any) -> None:
90 | """Save conversation context."""
91 | key = f"{session_id}:context:{context_type}"
92 | self.set(key, data)
93 |
94 | def get_context(self, session_id: str, context_type: str) -> Any | None:
95 | """Get conversation context."""
96 | key = f"{session_id}:context:{context_type}"
97 | return self.get(key)
98 |
99 | def list_analyses(self, session_id: str) -> list[dict[str, Any]]:
100 | """List all analyses for a session."""
101 | analyses = []
102 | prefix = f"{session_id}:analysis:"
103 |
104 | for key, entry in self.store.items():
105 | if key.startswith(prefix):
106 | analyses.append(entry["value"])
107 |
108 | return analyses
109 |
110 |
111 | class UserMemoryStore(MemoryStore):
112 | """Store for user-specific long-term memory."""
113 |
114 | def __init__(self, ttl_hours: float = 168.0): # 1 week default
115 | super().__init__(ttl_hours)
116 |
117 | def save_preference(self, user_id: str, preference_type: str, value: Any) -> None:
118 | """Save user preference."""
119 | key = f"user:{user_id}:pref:{preference_type}"
120 | self.set(key, value, ttl_hours=self.ttl_hours * 4) # Longer TTL for preferences
121 |
122 | def get_preference(self, user_id: str, preference_type: str) -> Any | None:
123 | """Get user preference."""
124 | key = f"user:{user_id}:pref:{preference_type}"
125 | return self.get(key)
126 |
127 | def save_trade_history(self, user_id: str, trade: dict[str, Any]) -> None:
128 | """Save trade to history."""
129 | key = f"user:{user_id}:trades"
130 |
131 | trades = self.get(key) or []
132 | trades.append({**trade, "timestamp": datetime.now().isoformat()})
133 |
134 | # Keep last 100 trades
135 | trades = trades[-100:]
136 | self.set(key, trades)
137 |
138 | def get_trade_history(self, user_id: str, limit: int = 50) -> list[dict[str, Any]]:
139 | """Get user's trade history."""
140 | key = f"user:{user_id}:trades"
141 | trades = self.get(key) or []
142 | return trades[-limit:]
143 |
144 | def save_watchlist(self, user_id: str, symbols: list[str]) -> None:
145 | """Save user's watchlist."""
146 | key = f"user:{user_id}:watchlist"
147 | self.set(key, symbols)
148 |
149 | def get_watchlist(self, user_id: str) -> list[str]:
150 | """Get user's watchlist."""
151 | key = f"user:{user_id}:watchlist"
152 | return self.get(key) or []
153 |
154 | def update_risk_profile(self, user_id: str, profile: dict[str, Any]) -> None:
155 | """Update user's risk profile."""
156 | key = f"user:{user_id}:risk_profile"
157 | self.set(key, profile, ttl_hours=self.ttl_hours * 4)
158 |
159 | def get_risk_profile(self, user_id: str) -> dict[str, Any] | None:
160 | """Get user's risk profile."""
161 | key = f"user:{user_id}:risk_profile"
162 | return self.get(key)
163 |
```
--------------------------------------------------------------------------------
/tools/templates/test_template.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Template for creating new test files.
3 |
4 | Copy this file and modify it to create new tests quickly.
5 | """
6 |
7 | from datetime import UTC, datetime
8 | from unittest.mock import Mock, patch
9 |
10 | import pytest
11 |
12 | # Import what you're testing
13 | # from maverick_mcp.your_module import YourClass, your_function
14 |
15 |
16 | class TestYourClass:
17 | """Test suite for YourClass."""
18 |
19 | @pytest.fixture
20 | def mock_dependencies(self):
21 | """Set up common mocks for tests."""
22 | with patch("maverick_mcp.your_module.external_dependency") as mock_dep:
23 | mock_dep.return_value = Mock()
24 | yield {
25 | "dependency": mock_dep,
26 | }
27 |
28 | @pytest.fixture
29 | def sample_data(self):
30 | """Provide sample test data."""
31 | return {
32 | "id": 1,
33 | "name": "Test Item",
34 | "value": 42.0,
35 | "created_at": datetime.now(UTC),
36 | }
37 |
38 | def test_initialization(self):
39 | """Test class initialization."""
40 | # obj = YourClass(param1="value1", param2=42)
41 | # assert obj.param1 == "value1"
42 | # assert obj.param2 == 42
43 | pass
44 |
45 | def test_method_success(self, mock_dependencies, sample_data):
46 | """Test successful method execution."""
47 | # Arrange
48 | # obj = YourClass()
49 | # mock_dependencies["dependency"].some_method.return_value = "expected"
50 |
51 | # Act
52 | # result = obj.your_method(sample_data)
53 |
54 | # Assert
55 | # assert result == "expected"
56 | # mock_dependencies["dependency"].some_method.assert_called_once_with(sample_data)
57 | pass
58 |
59 | def test_method_validation_error(self):
60 | """Test method with invalid input."""
61 | # obj = YourClass()
62 |
63 | # with pytest.raises(ValueError, match="Invalid input"):
64 | # obj.your_method(None)
65 | pass
66 |
67 | @pytest.mark.asyncio
68 | async def test_async_method(self, mock_dependencies):
69 | """Test asynchronous method."""
70 | # Arrange
71 | # obj = YourClass()
72 | # mock_dependencies["dependency"].async_method = AsyncMock(return_value="async_result")
73 |
74 | # Act
75 | # result = await obj.async_method()
76 |
77 | # Assert
78 | # assert result == "async_result"
79 | pass
80 |
81 |
82 | class TestYourFunction:
83 | """Test suite for standalone functions."""
84 |
85 | def test_function_basic(self):
86 | """Test basic function behavior."""
87 | # result = your_function("input")
88 | # assert result == "expected_output"
89 | pass
90 |
91 | def test_function_edge_cases(self):
92 | """Test edge cases."""
93 | # Test empty input
94 | # assert your_function("") == ""
95 |
96 | # Test None input
97 | # with pytest.raises(TypeError):
98 | # your_function(None)
99 |
100 | # Test large input
101 | # large_input = "x" * 10000
102 | # assert len(your_function(large_input)) <= 10000
103 | pass
104 |
105 | @pytest.mark.parametrize(
106 | "input_value,expected",
107 | [
108 | ("test1", "result1"),
109 | ("test2", "result2"),
110 | ("", ""),
111 | ("special!@#", "special"),
112 | ],
113 | )
114 | def test_function_parametrized(self, input_value, expected):
115 | """Test function with multiple inputs."""
116 | # result = your_function(input_value)
117 | # assert result == expected
118 | pass
119 |
120 |
121 | class TestIntegration:
122 | """Integration tests (marked for optional execution)."""
123 |
124 | @pytest.mark.integration
125 | def test_database_integration(self, db_session):
126 | """Test database operations."""
127 | # This test requires a real database connection
128 | # from maverick_mcp.your_module import create_item, get_item
129 |
130 | # # Create
131 | # item = create_item(db_session, name="Test", value=42)
132 | # assert item.id is not None
133 |
134 | # # Read
135 | # retrieved = get_item(db_session, item.id)
136 | # assert retrieved.name == "Test"
137 | # assert retrieved.value == 42
138 | pass
139 |
140 | @pytest.mark.integration
141 | @pytest.mark.asyncio
142 | async def test_external_api_integration(self):
143 | """Test external API calls."""
144 | # This test makes real API calls
145 | # from maverick_mcp.your_module import fetch_external_data
146 |
147 | # result = await fetch_external_data("AAPL")
148 | # assert result is not None
149 | # assert "price" in result
150 | pass
151 |
152 |
153 | # Fixtures that can be reused across tests
154 | @pytest.fixture
155 | def mock_redis():
156 | """Mock Redis client."""
157 | with patch("maverick_mcp.data.cache.get_redis_client") as mock:
158 | redis_mock = Mock()
159 | redis_mock.get.return_value = None
160 | redis_mock.set.return_value = True
161 | mock.return_value = redis_mock
162 | yield redis_mock
163 |
164 |
165 | @pytest.fixture
166 | def mock_settings():
167 | """Mock settings for testing."""
168 | with patch("maverick_mcp.config.settings.settings") as mock:
169 | mock.auth.enabled = False
170 | mock.api.debug = True
171 | yield mock
172 |
173 |
174 | # Performance tests (optional)
175 | @pytest.mark.slow
176 | class TestPerformance:
177 | """Performance tests (excluded by default)."""
178 |
179 | def test_large_dataset_processing(self):
180 | """Test processing of large datasets."""
181 | # import time
182 | # from maverick_mcp.your_module import process_data
183 |
184 | # large_data = list(range(1_000_000))
185 | # start = time.time()
186 | # result = process_data(large_data)
187 | # duration = time.time() - start
188 |
189 | # assert len(result) == 1_000_000
190 | # assert duration < 1.0 # Should complete in under 1 second
191 | pass
192 |
```
--------------------------------------------------------------------------------
/maverick_mcp/providers/mocks/mock_macro_data.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Mock macro data provider implementation for testing.
3 | """
4 |
5 | from datetime import datetime
6 | from typing import Any
7 |
8 |
9 | class MockMacroDataProvider:
10 | """
11 | Mock implementation of IMacroDataProvider for testing.
12 | """
13 |
14 | def __init__(self, test_data: dict[str, Any] | None = None):
15 | """
16 | Initialize the mock macro data provider.
17 |
18 | Args:
19 | test_data: Optional test data to return
20 | """
21 | self._test_data = test_data or {}
22 | self._call_log: list[dict[str, Any]] = []
23 |
24 | async def get_gdp_growth_rate(self) -> dict[str, Any]:
25 | """Get mock GDP growth rate."""
26 | self._log_call("get_gdp_growth_rate", {})
27 |
28 | if "gdp_growth_rate" in self._test_data:
29 | return self._test_data["gdp_growth_rate"]
30 |
31 | return {
32 | "current": 2.5,
33 | "previous": 2.3,
34 | }
35 |
36 | async def get_unemployment_rate(self) -> dict[str, Any]:
37 | """Get mock unemployment rate."""
38 | self._log_call("get_unemployment_rate", {})
39 |
40 | if "unemployment_rate" in self._test_data:
41 | return self._test_data["unemployment_rate"]
42 |
43 | return {
44 | "current": 3.8,
45 | "previous": 3.9,
46 | }
47 |
48 | async def get_inflation_rate(self) -> dict[str, Any]:
49 | """Get mock inflation rate."""
50 | self._log_call("get_inflation_rate", {})
51 |
52 | if "inflation_rate" in self._test_data:
53 | return self._test_data["inflation_rate"]
54 |
55 | return {
56 | "current": 3.2,
57 | "previous": 3.4,
58 | "bounds": (1.5, 6.8),
59 | }
60 |
61 | async def get_vix(self) -> float | None:
62 | """Get mock VIX data."""
63 | self._log_call("get_vix", {})
64 |
65 | if "vix" in self._test_data:
66 | return self._test_data["vix"]
67 |
68 | return 18.5
69 |
70 | async def get_sp500_performance(self) -> float:
71 | """Get mock S&P 500 performance."""
72 | self._log_call("get_sp500_performance", {})
73 |
74 | if "sp500_performance" in self._test_data:
75 | return self._test_data["sp500_performance"]
76 |
77 | return 1.25
78 |
79 | async def get_nasdaq_performance(self) -> float:
80 | """Get mock NASDAQ performance."""
81 | self._log_call("get_nasdaq_performance", {})
82 |
83 | if "nasdaq_performance" in self._test_data:
84 | return self._test_data["nasdaq_performance"]
85 |
86 | return 1.85
87 |
88 | async def get_sp500_momentum(self) -> float:
89 | """Get mock S&P 500 momentum."""
90 | self._log_call("get_sp500_momentum", {})
91 |
92 | if "sp500_momentum" in self._test_data:
93 | return self._test_data["sp500_momentum"]
94 |
95 | return 0.75
96 |
97 | async def get_nasdaq_momentum(self) -> float:
98 | """Get mock NASDAQ momentum."""
99 | self._log_call("get_nasdaq_momentum", {})
100 |
101 | if "nasdaq_momentum" in self._test_data:
102 | return self._test_data["nasdaq_momentum"]
103 |
104 | return 1.15
105 |
106 | async def get_usd_momentum(self) -> float:
107 | """Get mock USD momentum."""
108 | self._log_call("get_usd_momentum", {})
109 |
110 | if "usd_momentum" in self._test_data:
111 | return self._test_data["usd_momentum"]
112 |
113 | return -0.35
114 |
115 | async def get_macro_statistics(self) -> dict[str, Any]:
116 | """Get mock comprehensive macro statistics."""
117 | self._log_call("get_macro_statistics", {})
118 |
119 | if "macro_statistics" in self._test_data:
120 | return self._test_data["macro_statistics"]
121 |
122 | return {
123 | "gdp_growth_rate": 2.5,
124 | "gdp_growth_rate_previous": 2.3,
125 | "unemployment_rate": 3.8,
126 | "unemployment_rate_previous": 3.9,
127 | "inflation_rate": 3.2,
128 | "inflation_rate_previous": 3.4,
129 | "sp500_performance": 1.25,
130 | "nasdaq_performance": 1.85,
131 | "vix": 18.5,
132 | "sentiment_score": 65.5,
133 | "historical_data": self._generate_mock_historical_data(),
134 | }
135 |
136 | async def get_historical_data(self) -> dict[str, Any]:
137 | """Get mock historical data."""
138 | self._log_call("get_historical_data", {})
139 |
140 | if "historical_data" in self._test_data:
141 | return self._test_data["historical_data"]
142 |
143 | return self._generate_mock_historical_data()
144 |
145 | def _generate_mock_historical_data(self) -> dict[str, Any]:
146 | """Generate mock historical data for indicators."""
147 | return {
148 | "sp500_performance": [1.0, 1.1, 1.2, 1.25, 1.3],
149 | "nasdaq_performance": [1.5, 1.6, 1.7, 1.8, 1.85],
150 | "vix": [20.0, 19.5, 18.8, 18.2, 18.5],
151 | "gdp_growth_rate": [2.1, 2.2, 2.3, 2.4, 2.5],
152 | "unemployment_rate": [4.2, 4.1, 4.0, 3.9, 3.8],
153 | "inflation_rate": [3.8, 3.6, 3.5, 3.4, 3.2],
154 | }
155 |
156 | # Testing utilities
157 |
158 | def _log_call(self, method: str, args: dict[str, Any]) -> None:
159 | """Log method calls for testing verification."""
160 | self._call_log.append(
161 | {
162 | "method": method,
163 | "args": args,
164 | "timestamp": datetime.now(),
165 | }
166 | )
167 |
168 | def get_call_log(self) -> list[dict[str, Any]]:
169 | """Get the log of method calls."""
170 | return self._call_log.copy()
171 |
172 | def clear_call_log(self) -> None:
173 | """Clear the method call log."""
174 | self._call_log.clear()
175 |
176 | def set_test_data(self, key: str, data: Any) -> None:
177 | """Set test data for a specific key."""
178 | self._test_data[key] = data
179 |
180 | def clear_test_data(self) -> None:
181 | """Clear all test data."""
182 | self._test_data.clear()
183 |
```
--------------------------------------------------------------------------------
/maverick_mcp/domain/entities/stock_analysis.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Stock analysis entity.
3 |
4 | This entity represents a complete technical analysis of a stock.
5 | """
6 |
7 | from dataclasses import dataclass
8 | from datetime import datetime
9 |
10 | from maverick_mcp.domain.value_objects.technical_indicators import (
11 | BollingerBands,
12 | MACDIndicator,
13 | PriceLevel,
14 | RSIIndicator,
15 | Signal,
16 | StochasticOscillator,
17 | TrendDirection,
18 | VolumeProfile,
19 | )
20 |
21 |
22 | @dataclass
23 | class StockAnalysis:
24 | """
25 | Entity representing a comprehensive technical analysis of a stock.
26 |
27 | This is a domain entity that aggregates various technical indicators
28 | and analysis results for a specific stock at a point in time.
29 | """
30 |
31 | # Basic information
32 | symbol: str
33 | analysis_date: datetime
34 | current_price: float
35 |
36 | # Trend analysis
37 | trend_direction: TrendDirection
38 | trend_strength: float # 0-100
39 |
40 | # Technical indicators
41 | rsi: RSIIndicator | None = None
42 | macd: MACDIndicator | None = None
43 | bollinger_bands: BollingerBands | None = None
44 | stochastic: StochasticOscillator | None = None
45 |
46 | # Price levels
47 | support_levels: list[PriceLevel] | None = None
48 | resistance_levels: list[PriceLevel] | None = None
49 |
50 | # Volume analysis
51 | volume_profile: VolumeProfile | None = None
52 |
53 | # Composite analysis
54 | composite_signal: Signal = Signal.NEUTRAL
55 | confidence_score: float = 0.0 # 0-100
56 |
57 | # Analysis metadata
58 | analysis_period_days: int = 365
59 | indicators_used: list[str] | None = None
60 |
61 | def __post_init__(self):
62 | """Initialize default values."""
63 | if self.support_levels is None:
64 | self.support_levels = []
65 | if self.resistance_levels is None:
66 | self.resistance_levels = []
67 | if self.indicators_used is None:
68 | self.indicators_used = []
69 |
70 | @property
71 | def has_bullish_setup(self) -> bool:
72 | """Check if the analysis indicates a bullish setup."""
73 | bullish_signals = [
74 | Signal.BUY,
75 | Signal.STRONG_BUY,
76 | ]
77 | return self.composite_signal in bullish_signals
78 |
79 | @property
80 | def has_bearish_setup(self) -> bool:
81 | """Check if the analysis indicates a bearish setup."""
82 | bearish_signals = [
83 | Signal.SELL,
84 | Signal.STRONG_SELL,
85 | ]
86 | return self.composite_signal in bearish_signals
87 |
88 | @property
89 | def nearest_support(self) -> PriceLevel | None:
90 | """Get the nearest support level below current price."""
91 | if not self.support_levels:
92 | return None
93 | below_price = [s for s in self.support_levels if s.price < self.current_price]
94 | if below_price:
95 | return max(below_price, key=lambda x: x.price)
96 | return None
97 |
98 | @property
99 | def nearest_resistance(self) -> PriceLevel | None:
100 | """Get the nearest resistance level above current price."""
101 | if not self.resistance_levels:
102 | return None
103 | above_price = [
104 | r for r in self.resistance_levels if r.price > self.current_price
105 | ]
106 | if above_price:
107 | return min(above_price, key=lambda x: x.price)
108 | return None
109 |
110 | @property
111 | def risk_reward_ratio(self) -> float | None:
112 | """Calculate risk/reward ratio based on nearest support/resistance."""
113 | support = self.nearest_support
114 | resistance = self.nearest_resistance
115 |
116 | if not support or not resistance:
117 | return None
118 |
119 | risk = self.current_price - support.price
120 | reward = resistance.price - self.current_price
121 |
122 | if risk <= 0:
123 | return None
124 |
125 | return reward / risk
126 |
127 | def get_indicator_summary(self) -> dict[str, str]:
128 | """Get a summary of all indicator signals."""
129 | summary = {}
130 |
131 | if self.rsi:
132 | summary["RSI"] = f"{self.rsi.value:.1f} ({self.rsi.signal.value})"
133 |
134 | if self.macd:
135 | summary["MACD"] = self.macd.signal.value
136 |
137 | if self.bollinger_bands:
138 | summary["Bollinger"] = self.bollinger_bands.signal.value
139 |
140 | if self.stochastic:
141 | summary["Stochastic"] = (
142 | f"{self.stochastic.k_value:.1f} ({self.stochastic.signal.value})"
143 | )
144 |
145 | if self.volume_profile:
146 | summary["Volume"] = f"{self.volume_profile.relative_volume:.1f}x average"
147 |
148 | return summary
149 |
150 | def get_key_levels(self) -> dict[str, float]:
151 | """Get key price levels for trading decisions."""
152 | levels = {
153 | "current_price": self.current_price,
154 | }
155 |
156 | if self.nearest_support:
157 | levels["nearest_support"] = self.nearest_support.price
158 |
159 | if self.nearest_resistance:
160 | levels["nearest_resistance"] = self.nearest_resistance.price
161 |
162 | if self.bollinger_bands:
163 | levels["bollinger_upper"] = self.bollinger_bands.upper_band
164 | levels["bollinger_lower"] = self.bollinger_bands.lower_band
165 |
166 | return levels
167 |
168 | def to_dict(self) -> dict:
169 | """Convert the analysis to a dictionary for serialization."""
170 | return {
171 | "symbol": self.symbol,
172 | "analysis_date": self.analysis_date.isoformat(),
173 | "current_price": self.current_price,
174 | "trend": {
175 | "direction": self.trend_direction.value,
176 | "strength": self.trend_strength,
177 | },
178 | "indicators": self.get_indicator_summary(),
179 | "levels": self.get_key_levels(),
180 | "signal": self.composite_signal.value,
181 | "confidence": self.confidence_score,
182 | "risk_reward_ratio": self.risk_reward_ratio,
183 | }
184 |
```
--------------------------------------------------------------------------------
/server.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "$schema": "https://static.modelcontextprotocol.io/schemas/2025-07-09/server.schema.json",
3 | "name": "io.github.wshobson/maverick-mcp",
4 | "description": "Stock analysis MCP server with S&P 500 data, technical indicators, and AI research tools.",
5 | "status": "active",
6 | "repository": {
7 | "url": "https://github.com/wshobson/maverick-mcp",
8 | "source": "github"
9 | },
10 | "version": "0.1.0",
11 | "remotes": [
12 | {
13 | "name": "sse",
14 | "description": "SSE transport for web-based clients and remote connections",
15 | "transport": {
16 | "type": "sse",
17 | "url": "http://localhost:8003/sse/"
18 | },
19 | "setup_instructions": [
20 | "Clone repository: git clone https://github.com/wshobson/maverick-mcp.git",
21 | "Install dependencies: uv sync (or pip install -e .)",
22 | "Copy .env.example to .env and add your TIINGO_API_KEY",
23 | "Start server: make dev (or uv run python -m maverick_mcp.api.server --transport sse --port 8003)"
24 | ],
25 | "environment_variables": [
26 | {
27 | "name": "TIINGO_API_KEY",
28 | "description": "Required API key for Tiingo stock data provider. Get free key at https://tiingo.com (500 requests/day free tier)",
29 | "is_required": true,
30 | "is_secret": true
31 | },
32 | {
33 | "name": "OPENROUTER_API_KEY",
34 | "description": "Optional API key for OpenRouter (400+ AI models with intelligent cost optimization). Get at https://openrouter.ai",
35 | "is_required": false,
36 | "is_secret": true
37 | },
38 | {
39 | "name": "EXA_API_KEY",
40 | "description": "Optional API key for Exa web search (advanced research features). Get at https://exa.ai",
41 | "is_required": false,
42 | "is_secret": true
43 | },
44 | {
45 | "name": "TAVILY_API_KEY",
46 | "description": "Optional API key for Tavily web search (research features). Get at https://tavily.com",
47 | "is_required": false,
48 | "is_secret": true
49 | },
50 | {
51 | "name": "FRED_API_KEY",
52 | "description": "Optional API key for Federal Reserve Economic Data (macroeconomic indicators). Get at https://fred.stlouisfed.org/docs/api/",
53 | "is_required": false,
54 | "is_secret": true
55 | },
56 | {
57 | "name": "DATABASE_URL",
58 | "description": "Optional database URL. Defaults to SQLite (sqlite:///maverick_mcp.db) if not provided. PostgreSQL supported for better performance.",
59 | "is_required": false,
60 | "is_secret": false
61 | },
62 | {
63 | "name": "REDIS_HOST",
64 | "description": "Optional Redis host for enhanced caching performance. Defaults to in-memory caching if not provided.",
65 | "is_required": false,
66 | "is_secret": false
67 | },
68 | {
69 | "name": "REDIS_PORT",
70 | "description": "Optional Redis port (default: 6379)",
71 | "is_required": false,
72 | "is_secret": false
73 | }
74 | ]
75 | },
76 | {
77 | "name": "streamable-http",
78 | "description": "Streamable HTTP transport for remote access via mcp-remote bridge",
79 | "transport": {
80 | "type": "streamable-http",
81 | "url": "http://localhost:8003/mcp/"
82 | },
83 | "setup_instructions": [
84 | "Clone repository: git clone https://github.com/wshobson/maverick-mcp.git",
85 | "Install dependencies: uv sync (or pip install -e .)",
86 | "Copy .env.example to .env and add your TIINGO_API_KEY",
87 | "Start server: make dev (or uv run python -m maverick_mcp.api.server --transport streamable-http --port 8003)",
88 | "Connect via mcp-remote: npx mcp-remote http://localhost:8003/mcp/"
89 | ],
90 | "environment_variables": [
91 | {
92 | "name": "TIINGO_API_KEY",
93 | "description": "Required API key for Tiingo stock data provider. Get free key at https://tiingo.com (500 requests/day free tier)",
94 | "is_required": true,
95 | "is_secret": true
96 | },
97 | {
98 | "name": "OPENROUTER_API_KEY",
99 | "description": "Optional API key for OpenRouter (400+ AI models with intelligent cost optimization). Get at https://openrouter.ai",
100 | "is_required": false,
101 | "is_secret": true
102 | },
103 | {
104 | "name": "EXA_API_KEY",
105 | "description": "Optional API key for Exa web search (advanced research features). Get at https://exa.ai",
106 | "is_required": false,
107 | "is_secret": true
108 | },
109 | {
110 | "name": "TAVILY_API_KEY",
111 | "description": "Optional API key for Tavily web search (research features). Get at https://tavily.com",
112 | "is_required": false,
113 | "is_secret": true
114 | },
115 | {
116 | "name": "FRED_API_KEY",
117 | "description": "Optional API key for Federal Reserve Economic Data (macroeconomic indicators). Get at https://fred.stlouisfed.org/docs/api/",
118 | "is_required": false,
119 | "is_secret": true
120 | },
121 | {
122 | "name": "DATABASE_URL",
123 | "description": "Optional database URL. Defaults to SQLite (sqlite:///maverick_mcp.db) if not provided. PostgreSQL supported for better performance.",
124 | "is_required": false,
125 | "is_secret": false
126 | },
127 | {
128 | "name": "REDIS_HOST",
129 | "description": "Optional Redis host for enhanced caching performance. Defaults to in-memory caching if not provided.",
130 | "is_required": false,
131 | "is_secret": false
132 | },
133 | {
134 | "name": "REDIS_PORT",
135 | "description": "Optional Redis port (default: 6379)",
136 | "is_required": false,
137 | "is_secret": false
138 | }
139 | ]
140 | }
141 | ]
142 | }
143 |
```
--------------------------------------------------------------------------------
/maverick_mcp/providers/implementations/macro_data_adapter.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Macro data provider adapter.
3 |
4 | This module provides adapters that make the existing MacroDataProvider
5 | compatible with the new IMacroDataProvider interface.
6 | """
7 |
8 | import asyncio
9 | import logging
10 | from typing import Any
11 |
12 | from maverick_mcp.providers.interfaces.macro_data import (
13 | IMacroDataProvider,
14 | MacroDataConfig,
15 | )
16 | from maverick_mcp.providers.macro_data import MacroDataProvider
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 |
21 | class MacroDataAdapter(IMacroDataProvider):
22 | """
23 | Adapter that makes the existing MacroDataProvider compatible with IMacroDataProvider interface.
24 |
25 | This adapter wraps the existing provider and exposes it through the new
26 | interface contracts, enabling gradual migration to the new architecture.
27 | """
28 |
29 | def __init__(self, config: MacroDataConfig | None = None):
30 | """
31 | Initialize the macro data adapter.
32 |
33 | Args:
34 | config: Macro data configuration (optional)
35 | """
36 | self._config = config
37 |
38 | # Initialize the existing provider with configuration
39 | window_days = config.window_days if config else 365
40 | self._provider = MacroDataProvider(window_days=window_days)
41 |
42 | logger.debug("MacroDataAdapter initialized")
43 |
44 | async def get_gdp_growth_rate(self) -> dict[str, Any]:
45 | """
46 | Get GDP growth rate data (async wrapper).
47 |
48 | Returns:
49 | Dictionary with current and previous GDP growth rates
50 | """
51 | loop = asyncio.get_event_loop()
52 | return await loop.run_in_executor(None, self._provider.get_gdp_growth_rate)
53 |
54 | async def get_unemployment_rate(self) -> dict[str, Any]:
55 | """
56 | Get unemployment rate data (async wrapper).
57 |
58 | Returns:
59 | Dictionary with current and previous unemployment rates
60 | """
61 | loop = asyncio.get_event_loop()
62 | return await loop.run_in_executor(None, self._provider.get_unemployment_rate)
63 |
64 | async def get_inflation_rate(self) -> dict[str, Any]:
65 | """
66 | Get inflation rate data based on CPI (async wrapper).
67 |
68 | Returns:
69 | Dictionary with current and previous inflation rates and bounds
70 | """
71 | loop = asyncio.get_event_loop()
72 | return await loop.run_in_executor(None, self._provider.get_inflation_rate)
73 |
74 | async def get_vix(self) -> float | None:
75 | """
76 | Get VIX (volatility index) data (async wrapper).
77 |
78 | Returns:
79 | Current VIX value or None if unavailable
80 | """
81 | loop = asyncio.get_event_loop()
82 | return await loop.run_in_executor(None, self._provider.get_vix)
83 |
84 | async def get_sp500_performance(self) -> float:
85 | """
86 | Get S&P 500 performance over multiple timeframes (async wrapper).
87 |
88 | Returns:
89 | Weighted performance percentage
90 | """
91 | loop = asyncio.get_event_loop()
92 | return await loop.run_in_executor(None, self._provider.get_sp500_performance)
93 |
94 | async def get_nasdaq_performance(self) -> float:
95 | """
96 | Get NASDAQ performance over multiple timeframes (async wrapper).
97 |
98 | Returns:
99 | Weighted performance percentage
100 | """
101 | loop = asyncio.get_event_loop()
102 | return await loop.run_in_executor(None, self._provider.get_nasdaq_performance)
103 |
104 | async def get_sp500_momentum(self) -> float:
105 | """
106 | Get short-term S&P 500 momentum (async wrapper).
107 |
108 | Returns:
109 | Momentum percentage over short timeframes
110 | """
111 | loop = asyncio.get_event_loop()
112 | return await loop.run_in_executor(None, self._provider.get_sp500_momentum)
113 |
114 | async def get_nasdaq_momentum(self) -> float:
115 | """
116 | Get short-term NASDAQ momentum (async wrapper).
117 |
118 | Returns:
119 | Momentum percentage over short timeframes
120 | """
121 | loop = asyncio.get_event_loop()
122 | return await loop.run_in_executor(None, self._provider.get_nasdaq_momentum)
123 |
124 | async def get_usd_momentum(self) -> float:
125 | """
126 | Get USD momentum using broad dollar index (async wrapper).
127 |
128 | Returns:
129 | USD momentum percentage over short timeframes
130 | """
131 | loop = asyncio.get_event_loop()
132 | return await loop.run_in_executor(None, self._provider.get_usd_momentum)
133 |
134 | async def get_macro_statistics(self) -> dict[str, Any]:
135 | """
136 | Get comprehensive macroeconomic statistics (async wrapper).
137 |
138 | Returns:
139 | Dictionary with all macro indicators including:
140 | - gdp_growth_rate: Current and previous GDP growth
141 | - unemployment_rate: Current and previous unemployment
142 | - inflation_rate: Current and previous inflation
143 | - sp500_performance: S&P 500 performance
144 | - nasdaq_performance: NASDAQ performance
145 | - vix: Volatility index
146 | - sentiment_score: Computed sentiment score
147 | - historical_data: Time series data
148 | """
149 | loop = asyncio.get_event_loop()
150 | return await loop.run_in_executor(None, self._provider.get_macro_statistics)
151 |
152 | async def get_historical_data(self) -> dict[str, Any]:
153 | """
154 | Get historical data for all indicators (async wrapper).
155 |
156 | Returns:
157 | Dictionary with time series data for various indicators
158 | """
159 | loop = asyncio.get_event_loop()
160 | return await loop.run_in_executor(None, self._provider.get_historical_data)
161 |
162 | def get_sync_provider(self) -> MacroDataProvider:
163 | """
164 | Get the underlying synchronous provider for backward compatibility.
165 |
166 | Returns:
167 | The wrapped MacroDataProvider instance
168 | """
169 | return self._provider
170 |
```
--------------------------------------------------------------------------------
/maverick_mcp/tests/README_INMEMORY_TESTS.md:
--------------------------------------------------------------------------------
```markdown
1 | # In-Memory Testing Guide for Maverick-MCP
2 |
3 | This guide explains the in-memory testing patterns implemented for Maverick-MCP using FastMCP's testing capabilities.
4 |
5 | ## Overview
6 |
7 | In-memory testing allows us to test the MCP server without:
8 | - Starting external processes
9 | - Making network calls
10 | - Managing server lifecycle
11 | - Dealing with port conflicts
12 |
13 | This results in faster, more reliable tests that can run in any environment.
14 |
15 | ## Test Files
16 |
17 | ### 1. `test_in_memory_server.py`
18 | Basic in-memory server tests covering:
19 | - Health endpoint validation
20 | - Stock data fetching
21 | - Technical analysis tools
22 | - Batch operations
23 | - Input validation
24 | - Error handling
25 | - Resource management
26 |
27 | ### 2. `test_in_memory_routers.py`
28 | Domain-specific router tests:
29 | - Technical analysis router (RSI, MACD, support/resistance)
30 | - Screening router (Maverick, Trending Breakout)
31 | - Portfolio router (risk analysis, correlation)
32 | - Data router (batch fetching, caching)
33 | - Concurrent router operations
34 |
35 | ### 3. `test_advanced_patterns.py`
36 | Advanced testing patterns:
37 | - External dependency mocking (yfinance, Redis)
38 | - Performance and load testing
39 | - Error recovery patterns
40 | - Integration scenarios
41 | - Monitoring and metrics
42 |
43 | ## Running the Tests
44 |
45 | ### Run all in-memory tests:
46 | ```bash
47 | pytest maverick_mcp/tests/test_in_memory*.py -v
48 | ```
49 |
50 | ### Run specific test file:
51 | ```bash
52 | pytest maverick_mcp/tests/test_in_memory_server.py -v
53 | ```
54 |
55 | ### Run with coverage:
56 | ```bash
57 | pytest maverick_mcp/tests/test_in_memory*.py --cov=maverick_mcp --cov-report=html
58 | ```
59 |
60 | ### Run specific test class:
61 | ```bash
62 | pytest maverick_mcp/tests/test_in_memory_routers.py::TestTechnicalRouter -v
63 | ```
64 |
65 | ## Key Testing Patterns
66 |
67 | ### 1. In-Memory Database
68 | ```python
69 | @pytest.fixture
70 | def test_db():
71 | """Create an in-memory SQLite database for testing."""
72 | engine = create_engine("sqlite:///:memory:")
73 | Base.metadata.create_all(engine)
74 | # Add test data...
75 | yield engine
76 | ```
77 |
78 | ### 2. Mock External Services
79 | ```python
80 | @pytest.fixture
81 | def mock_redis():
82 | """Mock Redis client for testing."""
83 | with patch('maverick_mcp.data.cache.RedisCache') as mock:
84 | cache_instance = Mock()
85 | # Configure mock behavior...
86 | yield cache_instance
87 | ```
88 |
89 | ### 3. FastMCP Client Testing
90 | ```python
91 | async with Client(mcp) as client:
92 | result = await client.call_tool("tool_name", {"param": "value"})
93 | assert result.text is not None
94 | ```
95 |
96 | ### 4. Router Isolation
97 | ```python
98 | test_mcp = FastMCP("TestServer")
99 | test_mcp.mount("/technical", technical_router)
100 | async with Client(test_mcp) as client:
101 | # Test only technical router
102 | ```
103 |
104 | ## Benefits
105 |
106 | ### 1. **Speed**
107 | - No process startup overhead
108 | - No network latency
109 | - Instant test execution
110 |
111 | ### 2. **Reliability**
112 | - No port conflicts
113 | - No external dependencies
114 | - Deterministic results
115 |
116 | ### 3. **Isolation**
117 | - Each test runs in isolation
118 | - No shared state between tests
119 | - Easy to debug failures
120 |
121 | ### 4. **Flexibility**
122 | - Easy to mock dependencies
123 | - Test specific scenarios
124 | - Control external service behavior
125 |
126 | ## Best Practices
127 |
128 | ### 1. Use Fixtures
129 | Create reusable fixtures for common test setup:
130 | ```python
131 | @pytest.fixture
132 | def populated_db(test_db):
133 | """Database with test data."""
134 | # Add stocks, prices, etc.
135 | return test_db
136 | ```
137 |
138 | ### 2. Mock External APIs
139 | Always mock external services:
140 | ```python
141 | with patch('yfinance.download') as mock_yf:
142 | mock_yf.return_value = test_data
143 | # Run tests
144 | ```
145 |
146 | ### 3. Test Error Scenarios
147 | Include tests for failure cases:
148 | ```python
149 | mock_yf.side_effect = Exception("API Error")
150 | # Verify graceful handling
151 | ```
152 |
153 | ### 4. Measure Performance
154 | Use timing to ensure performance:
155 | ```python
156 | start_time = time.time()
157 | await client.call_tool("tool_name", params)
158 | duration = time.time() - start_time
159 | assert duration < 1.0 # Should complete in under 1 second
160 | ```
161 |
162 | ## Debugging Tests
163 |
164 | ### Enable logging:
165 | ```python
166 | import logging
167 | logging.basicConfig(level=logging.DEBUG)
168 | ```
169 |
170 | ### Use pytest debugging:
171 | ```bash
172 | pytest -vv --pdb # Drop into debugger on failure
173 | ```
174 |
175 | ### Capture output:
176 | ```bash
177 | pytest -s # Don't capture stdout
178 | ```
179 |
180 | ## CI/CD Integration
181 |
182 | These tests are perfect for CI/CD pipelines:
183 |
184 | ```yaml
185 | # .github/workflows/test.yml
186 | - name: Run in-memory tests
187 | run: |
188 | pytest maverick_mcp/tests/test_in_memory*.py \
189 | --cov=maverick_mcp \
190 | --cov-report=xml \
191 | --junit-xml=test-results.xml
192 | ```
193 |
194 | ## Extending the Tests
195 |
196 | To add new test cases:
197 |
198 | 1. Choose the appropriate test file based on what you're testing
199 | 2. Use existing fixtures or create new ones
200 | 3. Follow the async pattern with `Client(mcp)`
201 | 4. Mock external dependencies
202 | 5. Assert both success and failure cases
203 |
204 | Example:
205 | ```python
206 | @pytest.mark.asyncio
207 | async def test_new_feature(test_db, mock_redis):
208 | """Test description."""
209 | async with Client(mcp) as client:
210 | result = await client.call_tool("new_tool", {
211 | "param": "value"
212 | })
213 |
214 | assert result.text is not None
215 | data = eval(result.text)
216 | assert data["expected_key"] == "expected_value"
217 | ```
218 |
219 | ## Troubleshooting
220 |
221 | ### Common Issues:
222 |
223 | 1. **Import Errors**: Ensure maverick_mcp is installed: `pip install -e .`
224 | 2. **Async Warnings**: Use `pytest-asyncio` for async tests
225 | 3. **Mock Not Working**: Check patch path matches actual import
226 | 4. **Database Errors**: Ensure models are imported before `create_all()`
227 |
228 | ### Tips:
229 |
230 | - Run tests in isolation first to identify issues
231 | - Check fixture dependencies
232 | - Verify mock configurations
233 | - Use debugger to inspect test state
234 |
235 | ## Conclusion
236 |
237 | These in-memory tests provide comprehensive coverage of Maverick-MCP functionality while maintaining fast execution and reliability. They demonstrate best practices for testing MCP servers and can be easily extended for new features.
```
--------------------------------------------------------------------------------
/maverick_mcp/application/dto/technical_analysis_dto.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Data Transfer Objects for technical analysis.
3 |
4 | These DTOs are used to transfer data between the application layer
5 | and the API layer, providing a stable contract for API responses.
6 | """
7 |
8 | from datetime import datetime
9 |
10 | from pydantic import BaseModel, Field
11 |
12 |
13 | class RSIAnalysisDTO(BaseModel):
14 | """RSI analysis response DTO."""
15 |
16 | current_value: float = Field(..., description="Current RSI value")
17 | period: int = Field(..., description="RSI calculation period")
18 | signal: str = Field(..., description="Trading signal")
19 | is_overbought: bool = Field(..., description="Whether RSI indicates overbought")
20 | is_oversold: bool = Field(..., description="Whether RSI indicates oversold")
21 | interpretation: str = Field(..., description="Human-readable interpretation")
22 |
23 |
24 | class MACDAnalysisDTO(BaseModel):
25 | """MACD analysis response DTO."""
26 |
27 | macd_line: float = Field(..., description="MACD line value")
28 | signal_line: float = Field(..., description="Signal line value")
29 | histogram: float = Field(..., description="MACD histogram value")
30 | signal: str = Field(..., description="Trading signal")
31 | is_bullish_crossover: bool = Field(..., description="Bullish crossover detected")
32 | is_bearish_crossover: bool = Field(..., description="Bearish crossover detected")
33 | interpretation: str = Field(..., description="Human-readable interpretation")
34 |
35 |
36 | class BollingerBandsDTO(BaseModel):
37 | """Bollinger Bands analysis response DTO."""
38 |
39 | upper_band: float = Field(..., description="Upper band value")
40 | middle_band: float = Field(..., description="Middle band (SMA) value")
41 | lower_band: float = Field(..., description="Lower band value")
42 | current_price: float = Field(..., description="Current stock price")
43 | bandwidth: float = Field(..., description="Band width (volatility indicator)")
44 | percent_b: float = Field(..., description="Position within bands (0-1)")
45 | signal: str = Field(..., description="Trading signal")
46 | interpretation: str = Field(..., description="Human-readable interpretation")
47 |
48 |
49 | class StochasticDTO(BaseModel):
50 | """Stochastic oscillator response DTO."""
51 |
52 | k_value: float = Field(..., description="%K value")
53 | d_value: float = Field(..., description="%D value")
54 | signal: str = Field(..., description="Trading signal")
55 | is_overbought: bool = Field(..., description="Whether indicating overbought")
56 | is_oversold: bool = Field(..., description="Whether indicating oversold")
57 | interpretation: str = Field(..., description="Human-readable interpretation")
58 |
59 |
60 | class PriceLevelDTO(BaseModel):
61 | """Price level (support/resistance) DTO."""
62 |
63 | price: float = Field(..., description="Price level")
64 | strength: int = Field(..., ge=1, le=5, description="Level strength (1-5)")
65 | touches: int = Field(..., description="Number of times tested")
66 | distance_from_current: float = Field(
67 | ..., description="Distance from current price (%)"
68 | )
69 |
70 |
71 | class VolumeAnalysisDTO(BaseModel):
72 | """Volume analysis response DTO."""
73 |
74 | current_volume: int = Field(..., description="Current trading volume")
75 | average_volume: float = Field(..., description="Average volume")
76 | relative_volume: float = Field(..., description="Volume relative to average")
77 | volume_trend: str = Field(..., description="Volume trend direction")
78 | unusual_activity: bool = Field(..., description="Unusual volume detected")
79 | interpretation: str = Field(..., description="Human-readable interpretation")
80 |
81 |
82 | class TrendAnalysisDTO(BaseModel):
83 | """Trend analysis response DTO."""
84 |
85 | direction: str = Field(..., description="Trend direction")
86 | strength: float = Field(..., ge=0, le=100, description="Trend strength (0-100)")
87 | interpretation: str = Field(..., description="Human-readable interpretation")
88 |
89 |
90 | class TechnicalAnalysisRequestDTO(BaseModel):
91 | """Request DTO for technical analysis."""
92 |
93 | symbol: str = Field(..., description="Stock ticker symbol")
94 | days: int = Field(
95 | default=365, ge=30, le=1825, description="Days of historical data"
96 | )
97 | indicators: list[str] | None = Field(
98 | default=None, description="Specific indicators to calculate (default: all)"
99 | )
100 |
101 |
102 | class CompleteTechnicalAnalysisDTO(BaseModel):
103 | """Complete technical analysis response DTO."""
104 |
105 | symbol: str = Field(..., description="Stock ticker symbol")
106 | analysis_date: datetime = Field(..., description="Analysis timestamp")
107 | current_price: float = Field(..., description="Current stock price")
108 |
109 | # Trend
110 | trend: TrendAnalysisDTO = Field(..., description="Trend analysis")
111 |
112 | # Indicators
113 | rsi: RSIAnalysisDTO | None = Field(None, description="RSI analysis")
114 | macd: MACDAnalysisDTO | None = Field(None, description="MACD analysis")
115 | bollinger_bands: BollingerBandsDTO | None = Field(
116 | None, description="Bollinger Bands"
117 | )
118 | stochastic: StochasticDTO | None = Field(None, description="Stochastic oscillator")
119 |
120 | # Levels
121 | support_levels: list[PriceLevelDTO] = Field(
122 | default_factory=list, description="Support levels"
123 | )
124 | resistance_levels: list[PriceLevelDTO] = Field(
125 | default_factory=list, description="Resistance levels"
126 | )
127 |
128 | # Volume
129 | volume_analysis: VolumeAnalysisDTO | None = Field(
130 | None, description="Volume analysis"
131 | )
132 |
133 | # Overall analysis
134 | composite_signal: str = Field(..., description="Overall trading signal")
135 | confidence_score: float = Field(
136 | ..., ge=0, le=100, description="Analysis confidence (0-100)"
137 | )
138 | risk_reward_ratio: float | None = Field(None, description="Risk/reward ratio")
139 |
140 | # Summary
141 | summary: str = Field(..., description="Executive summary of analysis")
142 | key_levels: dict[str, float] = Field(
143 | ..., description="Key price levels for trading"
144 | )
145 |
146 | class Config:
147 | json_encoders = {datetime: lambda v: v.isoformat()}
148 |
```
--------------------------------------------------------------------------------
/scripts/load_example.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Example usage of the Tiingo data loader.
4 |
5 | This script demonstrates common usage patterns for loading market data
6 | from Tiingo API into the Maverick-MCP database.
7 | """
8 |
9 | import asyncio
10 | import logging
11 | import os
12 | import sys
13 | from pathlib import Path
14 |
15 | # Add parent directory to path
16 | sys.path.insert(0, str(Path(__file__).parent.parent))
17 |
18 | from maverick_mcp.data.models import SessionLocal
19 | from scripts.load_tiingo_data import ProgressTracker, TiingoDataLoader
20 |
21 | # Configure logging
22 | logging.basicConfig(level=logging.INFO)
23 | logger = logging.getLogger(__name__)
24 |
25 |
26 | async def load_sample_stocks():
27 | """Load a small sample of stocks for testing."""
28 | symbols = ["AAPL", "MSFT", "GOOGL", "AMZN", "TSLA"]
29 |
30 | print(f"Loading sample stocks: {', '.join(symbols)}")
31 |
32 | # Create progress tracker
33 | progress = ProgressTracker("sample_load_progress.json")
34 | progress.total_symbols = len(symbols)
35 |
36 | async with TiingoDataLoader(
37 | batch_size=10, max_concurrent=3, progress_tracker=progress
38 | ) as loader:
39 | # Load 1 year of data with indicators
40 | start_date = "2023-01-01"
41 |
42 | successful, failed = await loader.load_batch_symbols(
43 | symbols, start_date, calculate_indicators=True, store_indicators=True
44 | )
45 |
46 | print(f"\nCompleted: {successful} successful, {failed} failed")
47 |
48 | # Run screening
49 | if successful > 0:
50 | print("Running screening algorithms...")
51 | with SessionLocal() as session:
52 | screening_results = loader.run_screening_algorithms(session)
53 |
54 | print("Screening results:")
55 | for screen_type, count in screening_results.items():
56 | print(f" {screen_type}: {count} stocks")
57 |
58 |
59 | async def load_sector_stocks():
60 | """Load stocks from a specific sector."""
61 | from scripts.tiingo_config import MARKET_SECTORS
62 |
63 | sector = "technology"
64 | symbols = MARKET_SECTORS[sector][:10] # Just first 10 for demo
65 |
66 | print(f"Loading {sector} sector stocks: {len(symbols)} symbols")
67 |
68 | progress = ProgressTracker(f"{sector}_load_progress.json")
69 | progress.total_symbols = len(symbols)
70 |
71 | async with TiingoDataLoader(
72 | batch_size=5, max_concurrent=2, progress_tracker=progress
73 | ) as loader:
74 | # Load 2 years of data
75 | start_date = "2022-01-01"
76 |
77 | successful, failed = await loader.load_batch_symbols(
78 | symbols, start_date, calculate_indicators=True, store_indicators=True
79 | )
80 |
81 | print(f"\nSector loading completed: {successful} successful, {failed} failed")
82 |
83 |
84 | async def resume_interrupted_load():
85 | """Demonstrate resuming from a checkpoint."""
86 | checkpoint_file = "sample_load_progress.json"
87 |
88 | if not os.path.exists(checkpoint_file):
89 | print(f"No checkpoint file found: {checkpoint_file}")
90 | return
91 |
92 | print("Resuming from checkpoint...")
93 |
94 | # Load progress
95 | progress = ProgressTracker(checkpoint_file)
96 | progress.load_checkpoint()
97 |
98 | # Get remaining symbols (this would normally come from your original symbol list)
99 | all_symbols = ["AAPL", "MSFT", "GOOGL", "AMZN", "TSLA", "NVDA", "META", "ADBE"]
100 | remaining_symbols = [s for s in all_symbols if s not in progress.completed_symbols]
101 |
102 | if not remaining_symbols:
103 | print("All symbols already completed!")
104 | return
105 |
106 | print(f"Resuming with {len(remaining_symbols)} remaining symbols")
107 |
108 | async with TiingoDataLoader(
109 | batch_size=3, max_concurrent=2, progress_tracker=progress
110 | ) as loader:
111 | successful, failed = await loader.load_batch_symbols(
112 | remaining_symbols,
113 | "2023-01-01",
114 | calculate_indicators=True,
115 | store_indicators=True,
116 | )
117 |
118 | print(f"Resume completed: {successful} successful, {failed} failed")
119 |
120 |
121 | def print_database_stats():
122 | """Print current database statistics."""
123 | from maverick_mcp.data.models import (
124 | MaverickStocks,
125 | PriceCache,
126 | Stock,
127 | TechnicalCache,
128 | )
129 |
130 | with SessionLocal() as session:
131 | stats = {
132 | "stocks": session.query(Stock).count(),
133 | "price_records": session.query(PriceCache).count(),
134 | "technical_indicators": session.query(TechnicalCache).count(),
135 | "maverick_stocks": session.query(MaverickStocks).count(),
136 | }
137 |
138 | print("\n📊 Current Database Statistics:")
139 | for key, value in stats.items():
140 | print(f" {key}: {value:,}")
141 |
142 |
143 | async def main():
144 | """Main demonstration function."""
145 | print("Tiingo Data Loader Examples")
146 | print("=" * 40)
147 |
148 | # Check for API token
149 | if not os.getenv("TIINGO_API_TOKEN"):
150 | print("❌ TIINGO_API_TOKEN environment variable not set")
151 | print("Please set your Tiingo API token:")
152 | print("export TIINGO_API_TOKEN=your_token_here")
153 | return
154 |
155 | print("✅ Tiingo API token found")
156 |
157 | # Show current database stats
158 | print_database_stats()
159 |
160 | # Menu of examples
161 | print("\nSelect an example to run:")
162 | print("1. Load sample stocks (5 symbols)")
163 | print("2. Load technology sector stocks (10 symbols)")
164 | print("3. Resume interrupted load")
165 | print("4. Show database stats")
166 | print("0. Exit")
167 |
168 | try:
169 | choice = input("\nEnter your choice (0-4): ").strip()
170 |
171 | if choice == "1":
172 | await load_sample_stocks()
173 | elif choice == "2":
174 | await load_sector_stocks()
175 | elif choice == "3":
176 | await resume_interrupted_load()
177 | elif choice == "4":
178 | print_database_stats()
179 | elif choice == "0":
180 | print("Goodbye!")
181 | return
182 | else:
183 | print("Invalid choice")
184 | return
185 |
186 | # Show updated stats
187 | print_database_stats()
188 |
189 | except KeyboardInterrupt:
190 | print("\nOperation cancelled")
191 | except Exception as e:
192 | logger.error(f"Error: {e}")
193 |
194 |
195 | if __name__ == "__main__":
196 | asyncio.run(main())
197 |
```