This is page 2 of 39. Use http://codebase.md/wshobson/maverick-mcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .env.example
├── .github
│ ├── dependabot.yml
│ ├── FUNDING.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── config.yml
│ │ ├── feature_request.md
│ │ ├── question.md
│ │ └── security_report.md
│ ├── pull_request_template.md
│ └── workflows
│ ├── claude-code-review.yml
│ └── claude.yml
├── .gitignore
├── .python-version
├── .vscode
│ ├── launch.json
│ └── settings.json
├── alembic
│ ├── env.py
│ ├── script.py.mako
│ └── versions
│ ├── 001_initial_schema.py
│ ├── 003_add_performance_indexes.py
│ ├── 006_rename_metadata_columns.py
│ ├── 008_performance_optimization_indexes.py
│ ├── 009_rename_to_supply_demand.py
│ ├── 010_self_contained_schema.py
│ ├── 011_remove_proprietary_terms.py
│ ├── 013_add_backtest_persistence_models.py
│ ├── 014_add_portfolio_models.py
│ ├── 08e3945a0c93_merge_heads.py
│ ├── 9374a5c9b679_merge_heads_for_testing.py
│ ├── abf9b9afb134_merge_multiple_heads.py
│ ├── adda6d3fd84b_merge_proprietary_terms_removal_with_.py
│ ├── e0c75b0bdadb_fix_financial_data_precision_only.py
│ ├── f0696e2cac15_add_essential_performance_indexes.py
│ └── fix_database_integrity_issues.py
├── alembic.ini
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── DATABASE_SETUP.md
├── docker-compose.override.yml.example
├── docker-compose.yml
├── Dockerfile
├── docs
│ ├── api
│ │ └── backtesting.md
│ ├── BACKTESTING.md
│ ├── COST_BASIS_SPECIFICATION.md
│ ├── deep_research_agent.md
│ ├── exa_research_testing_strategy.md
│ ├── PORTFOLIO_PERSONALIZATION_PLAN.md
│ ├── PORTFOLIO.md
│ ├── SETUP_SELF_CONTAINED.md
│ └── speed_testing_framework.md
├── examples
│ ├── complete_speed_validation.py
│ ├── deep_research_integration.py
│ ├── llm_optimization_example.py
│ ├── llm_speed_demo.py
│ ├── monitoring_example.py
│ ├── parallel_research_example.py
│ ├── speed_optimization_demo.py
│ └── timeout_fix_demonstration.py
├── LICENSE
├── Makefile
├── MANIFEST.in
├── maverick_mcp
│ ├── __init__.py
│ ├── agents
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── circuit_breaker.py
│ │ ├── deep_research.py
│ │ ├── market_analysis.py
│ │ ├── optimized_research.py
│ │ ├── supervisor.py
│ │ └── technical_analysis.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── api_server.py
│ │ ├── connection_manager.py
│ │ ├── dependencies
│ │ │ ├── __init__.py
│ │ │ ├── stock_analysis.py
│ │ │ └── technical_analysis.py
│ │ ├── error_handling.py
│ │ ├── inspector_compatible_sse.py
│ │ ├── inspector_sse.py
│ │ ├── middleware
│ │ │ ├── error_handling.py
│ │ │ ├── mcp_logging.py
│ │ │ ├── rate_limiting_enhanced.py
│ │ │ └── security.py
│ │ ├── openapi_config.py
│ │ ├── routers
│ │ │ ├── __init__.py
│ │ │ ├── agents.py
│ │ │ ├── backtesting.py
│ │ │ ├── data_enhanced.py
│ │ │ ├── data.py
│ │ │ ├── health_enhanced.py
│ │ │ ├── health_tools.py
│ │ │ ├── health.py
│ │ │ ├── intelligent_backtesting.py
│ │ │ ├── introspection.py
│ │ │ ├── mcp_prompts.py
│ │ │ ├── monitoring.py
│ │ │ ├── news_sentiment_enhanced.py
│ │ │ ├── performance.py
│ │ │ ├── portfolio.py
│ │ │ ├── research.py
│ │ │ ├── screening_ddd.py
│ │ │ ├── screening_parallel.py
│ │ │ ├── screening.py
│ │ │ ├── technical_ddd.py
│ │ │ ├── technical_enhanced.py
│ │ │ ├── technical.py
│ │ │ └── tool_registry.py
│ │ ├── server.py
│ │ ├── services
│ │ │ ├── __init__.py
│ │ │ ├── base_service.py
│ │ │ ├── market_service.py
│ │ │ ├── portfolio_service.py
│ │ │ ├── prompt_service.py
│ │ │ └── resource_service.py
│ │ ├── simple_sse.py
│ │ └── utils
│ │ ├── __init__.py
│ │ ├── insomnia_export.py
│ │ └── postman_export.py
│ ├── application
│ │ ├── __init__.py
│ │ ├── commands
│ │ │ └── __init__.py
│ │ ├── dto
│ │ │ ├── __init__.py
│ │ │ └── technical_analysis_dto.py
│ │ ├── queries
│ │ │ ├── __init__.py
│ │ │ └── get_technical_analysis.py
│ │ └── screening
│ │ ├── __init__.py
│ │ ├── dtos.py
│ │ └── queries.py
│ ├── backtesting
│ │ ├── __init__.py
│ │ ├── ab_testing.py
│ │ ├── analysis.py
│ │ ├── batch_processing_stub.py
│ │ ├── batch_processing.py
│ │ ├── model_manager.py
│ │ ├── optimization.py
│ │ ├── persistence.py
│ │ ├── retraining_pipeline.py
│ │ ├── strategies
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── ml
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adaptive.py
│ │ │ │ ├── ensemble.py
│ │ │ │ ├── feature_engineering.py
│ │ │ │ └── regime_aware.py
│ │ │ ├── ml_strategies.py
│ │ │ ├── parser.py
│ │ │ └── templates.py
│ │ ├── strategy_executor.py
│ │ ├── vectorbt_engine.py
│ │ └── visualization.py
│ ├── config
│ │ ├── __init__.py
│ │ ├── constants.py
│ │ ├── database_self_contained.py
│ │ ├── database.py
│ │ ├── llm_optimization_config.py
│ │ ├── logging_settings.py
│ │ ├── plotly_config.py
│ │ ├── security_utils.py
│ │ ├── security.py
│ │ ├── settings.py
│ │ ├── technical_constants.py
│ │ ├── tool_estimation.py
│ │ └── validation.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── technical_analysis.py
│ │ └── visualization.py
│ ├── data
│ │ ├── __init__.py
│ │ ├── cache_manager.py
│ │ ├── cache.py
│ │ ├── django_adapter.py
│ │ ├── health.py
│ │ ├── models.py
│ │ ├── performance.py
│ │ ├── session_management.py
│ │ └── validation.py
│ ├── database
│ │ ├── __init__.py
│ │ ├── base.py
│ │ └── optimization.py
│ ├── dependencies.py
│ ├── domain
│ │ ├── __init__.py
│ │ ├── entities
│ │ │ ├── __init__.py
│ │ │ └── stock_analysis.py
│ │ ├── events
│ │ │ └── __init__.py
│ │ ├── portfolio.py
│ │ ├── screening
│ │ │ ├── __init__.py
│ │ │ ├── entities.py
│ │ │ ├── services.py
│ │ │ └── value_objects.py
│ │ ├── services
│ │ │ ├── __init__.py
│ │ │ └── technical_analysis_service.py
│ │ ├── stock_analysis
│ │ │ ├── __init__.py
│ │ │ └── stock_analysis_service.py
│ │ └── value_objects
│ │ ├── __init__.py
│ │ └── technical_indicators.py
│ ├── exceptions.py
│ ├── infrastructure
│ │ ├── __init__.py
│ │ ├── cache
│ │ │ └── __init__.py
│ │ ├── caching
│ │ │ ├── __init__.py
│ │ │ └── cache_management_service.py
│ │ ├── connection_manager.py
│ │ ├── data_fetching
│ │ │ ├── __init__.py
│ │ │ └── stock_data_service.py
│ │ ├── health
│ │ │ ├── __init__.py
│ │ │ └── health_checker.py
│ │ ├── persistence
│ │ │ ├── __init__.py
│ │ │ └── stock_repository.py
│ │ ├── providers
│ │ │ └── __init__.py
│ │ ├── screening
│ │ │ ├── __init__.py
│ │ │ └── repositories.py
│ │ └── sse_optimizer.py
│ ├── langchain_tools
│ │ ├── __init__.py
│ │ ├── adapters.py
│ │ └── registry.py
│ ├── logging_config.py
│ ├── memory
│ │ ├── __init__.py
│ │ └── stores.py
│ ├── monitoring
│ │ ├── __init__.py
│ │ ├── health_check.py
│ │ ├── health_monitor.py
│ │ ├── integration_example.py
│ │ ├── metrics.py
│ │ ├── middleware.py
│ │ └── status_dashboard.py
│ ├── providers
│ │ ├── __init__.py
│ │ ├── dependencies.py
│ │ ├── factories
│ │ │ ├── __init__.py
│ │ │ ├── config_factory.py
│ │ │ └── provider_factory.py
│ │ ├── implementations
│ │ │ ├── __init__.py
│ │ │ ├── cache_adapter.py
│ │ │ ├── macro_data_adapter.py
│ │ │ ├── market_data_adapter.py
│ │ │ ├── persistence_adapter.py
│ │ │ └── stock_data_adapter.py
│ │ ├── interfaces
│ │ │ ├── __init__.py
│ │ │ ├── cache.py
│ │ │ ├── config.py
│ │ │ ├── macro_data.py
│ │ │ ├── market_data.py
│ │ │ ├── persistence.py
│ │ │ └── stock_data.py
│ │ ├── llm_factory.py
│ │ ├── macro_data.py
│ │ ├── market_data.py
│ │ ├── mocks
│ │ │ ├── __init__.py
│ │ │ ├── mock_cache.py
│ │ │ ├── mock_config.py
│ │ │ ├── mock_macro_data.py
│ │ │ ├── mock_market_data.py
│ │ │ ├── mock_persistence.py
│ │ │ └── mock_stock_data.py
│ │ ├── openrouter_provider.py
│ │ ├── optimized_screening.py
│ │ ├── optimized_stock_data.py
│ │ └── stock_data.py
│ ├── README.md
│ ├── tests
│ │ ├── __init__.py
│ │ ├── README_INMEMORY_TESTS.md
│ │ ├── test_cache_debug.py
│ │ ├── test_fixes_validation.py
│ │ ├── test_in_memory_routers.py
│ │ ├── test_in_memory_server.py
│ │ ├── test_macro_data_provider.py
│ │ ├── test_mailgun_email.py
│ │ ├── test_market_calendar_caching.py
│ │ ├── test_mcp_tool_fixes_pytest.py
│ │ ├── test_mcp_tool_fixes.py
│ │ ├── test_mcp_tools.py
│ │ ├── test_models_functional.py
│ │ ├── test_server.py
│ │ ├── test_stock_data_enhanced.py
│ │ ├── test_stock_data_provider.py
│ │ └── test_technical_analysis.py
│ ├── tools
│ │ ├── __init__.py
│ │ ├── performance_monitoring.py
│ │ ├── portfolio_manager.py
│ │ ├── risk_management.py
│ │ └── sentiment_analysis.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── agent_errors.py
│ │ ├── batch_processing.py
│ │ ├── cache_warmer.py
│ │ ├── circuit_breaker_decorators.py
│ │ ├── circuit_breaker_services.py
│ │ ├── circuit_breaker.py
│ │ ├── data_chunking.py
│ │ ├── database_monitoring.py
│ │ ├── debug_utils.py
│ │ ├── fallback_strategies.py
│ │ ├── llm_optimization.py
│ │ ├── logging_example.py
│ │ ├── logging_init.py
│ │ ├── logging.py
│ │ ├── mcp_logging.py
│ │ ├── memory_profiler.py
│ │ ├── monitoring_middleware.py
│ │ ├── monitoring.py
│ │ ├── orchestration_logging.py
│ │ ├── parallel_research.py
│ │ ├── parallel_screening.py
│ │ ├── quick_cache.py
│ │ ├── resource_manager.py
│ │ ├── shutdown.py
│ │ ├── stock_helpers.py
│ │ ├── structured_logger.py
│ │ ├── tool_monitoring.py
│ │ ├── tracing.py
│ │ └── yfinance_pool.py
│ ├── validation
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── data.py
│ │ ├── middleware.py
│ │ ├── portfolio.py
│ │ ├── responses.py
│ │ ├── screening.py
│ │ └── technical.py
│ └── workflows
│ ├── __init__.py
│ ├── agents
│ │ ├── __init__.py
│ │ ├── market_analyzer.py
│ │ ├── optimizer_agent.py
│ │ ├── strategy_selector.py
│ │ └── validator_agent.py
│ ├── backtesting_workflow.py
│ └── state.py
├── PLANS.md
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│ ├── dev.sh
│ ├── INSTALLATION_GUIDE.md
│ ├── load_example.py
│ ├── load_market_data.py
│ ├── load_tiingo_data.py
│ ├── migrate_db.py
│ ├── README_TIINGO_LOADER.md
│ ├── requirements_tiingo.txt
│ ├── run_stock_screening.py
│ ├── run-migrations.sh
│ ├── seed_db.py
│ ├── seed_sp500.py
│ ├── setup_database.sh
│ ├── setup_self_contained.py
│ ├── setup_sp500_database.sh
│ ├── test_seeded_data.py
│ ├── test_tiingo_loader.py
│ ├── tiingo_config.py
│ └── validate_setup.py
├── SECURITY.md
├── server.json
├── setup.py
├── tests
│ ├── __init__.py
│ ├── conftest.py
│ ├── core
│ │ └── test_technical_analysis.py
│ ├── data
│ │ └── test_portfolio_models.py
│ ├── domain
│ │ ├── conftest.py
│ │ ├── test_portfolio_entities.py
│ │ └── test_technical_analysis_service.py
│ ├── fixtures
│ │ └── orchestration_fixtures.py
│ ├── integration
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── README.md
│ │ ├── run_integration_tests.sh
│ │ ├── test_api_technical.py
│ │ ├── test_chaos_engineering.py
│ │ ├── test_config_management.py
│ │ ├── test_full_backtest_workflow_advanced.py
│ │ ├── test_full_backtest_workflow.py
│ │ ├── test_high_volume.py
│ │ ├── test_mcp_tools.py
│ │ ├── test_orchestration_complete.py
│ │ ├── test_portfolio_persistence.py
│ │ ├── test_redis_cache.py
│ │ ├── test_security_integration.py.disabled
│ │ └── vcr_setup.py
│ ├── performance
│ │ ├── __init__.py
│ │ ├── test_benchmarks.py
│ │ ├── test_load.py
│ │ ├── test_profiling.py
│ │ └── test_stress.py
│ ├── providers
│ │ └── test_stock_data_simple.py
│ ├── README.md
│ ├── test_agents_router_mcp.py
│ ├── test_backtest_persistence.py
│ ├── test_cache_management_service.py
│ ├── test_cache_serialization.py
│ ├── test_circuit_breaker.py
│ ├── test_database_pool_config_simple.py
│ ├── test_database_pool_config.py
│ ├── test_deep_research_functional.py
│ ├── test_deep_research_integration.py
│ ├── test_deep_research_parallel_execution.py
│ ├── test_error_handling.py
│ ├── test_event_loop_integrity.py
│ ├── test_exa_research_integration.py
│ ├── test_exception_hierarchy.py
│ ├── test_financial_search.py
│ ├── test_graceful_shutdown.py
│ ├── test_integration_simple.py
│ ├── test_langgraph_workflow.py
│ ├── test_market_data_async.py
│ ├── test_market_data_simple.py
│ ├── test_mcp_orchestration_functional.py
│ ├── test_ml_strategies.py
│ ├── test_optimized_research_agent.py
│ ├── test_orchestration_integration.py
│ ├── test_orchestration_logging.py
│ ├── test_orchestration_tools_simple.py
│ ├── test_parallel_research_integration.py
│ ├── test_parallel_research_orchestrator.py
│ ├── test_parallel_research_performance.py
│ ├── test_performance_optimizations.py
│ ├── test_production_validation.py
│ ├── test_provider_architecture.py
│ ├── test_rate_limiting_enhanced.py
│ ├── test_runner_validation.py
│ ├── test_security_comprehensive.py.disabled
│ ├── test_security_cors.py
│ ├── test_security_enhancements.py.disabled
│ ├── test_security_headers.py
│ ├── test_security_penetration.py
│ ├── test_session_management.py
│ ├── test_speed_optimization_validation.py
│ ├── test_stock_analysis_dependencies.py
│ ├── test_stock_analysis_service.py
│ ├── test_stock_data_fetching_service.py
│ ├── test_supervisor_agent.py
│ ├── test_supervisor_functional.py
│ ├── test_tool_estimation_config.py
│ ├── test_visualization.py
│ └── utils
│ ├── test_agent_errors.py
│ ├── test_logging.py
│ ├── test_parallel_screening.py
│ └── test_quick_cache.py
├── tools
│ ├── check_orchestration_config.py
│ ├── experiments
│ │ ├── validation_examples.py
│ │ └── validation_fixed.py
│ ├── fast_dev.sh
│ ├── hot_reload.py
│ ├── quick_test.py
│ └── templates
│ ├── new_router_template.py
│ ├── new_tool_template.py
│ ├── screening_strategy_template.py
│ └── test_template.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/maverick_mcp/dependencies.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Dependency injection utilities for Maverick-MCP.
3 |
4 | This module provides factory functions and dependency injection helpers
5 | for creating instances of data providers and other services.
6 | """
7 |
8 | from typing import Annotated
9 |
10 | from fastapi import Depends
11 | from sqlalchemy.orm import Session
12 |
13 | from maverick_mcp.data.models import get_db
14 | from maverick_mcp.providers import (
15 | MacroDataProvider,
16 | MarketDataProvider,
17 | StockDataProvider,
18 | )
19 |
20 |
21 | def get_stock_data_provider(db: Session = Depends(get_db)) -> StockDataProvider:
22 | """
23 | Get a StockDataProvider instance with database session.
24 |
25 | Args:
26 | db: Database session (injected by FastAPI)
27 |
28 | Returns:
29 | StockDataProvider instance configured with the database session
30 | """
31 | return StockDataProvider(db_session=db)
32 |
33 |
34 | def get_market_data_provider() -> MarketDataProvider:
35 | """
36 | Get a MarketDataProvider instance.
37 |
38 | Returns:
39 | MarketDataProvider instance
40 | """
41 | return MarketDataProvider()
42 |
43 |
44 | def get_macro_data_provider() -> MacroDataProvider:
45 | """
46 | Get a MacroDataProvider instance.
47 |
48 | Returns:
49 | MacroDataProvider instance
50 | """
51 | return MacroDataProvider()
52 |
53 |
54 | # Type aliases for cleaner code in FastAPI routes
55 | StockDataProviderDep = Annotated[StockDataProvider, Depends(get_stock_data_provider)]
56 | MarketDataProviderDep = Annotated[MarketDataProvider, Depends(get_market_data_provider)]
57 | MacroDataProviderDep = Annotated[MacroDataProvider, Depends(get_macro_data_provider)]
58 |
```
--------------------------------------------------------------------------------
/maverick_mcp/api/dependencies/technical_analysis.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Dependency injection for technical analysis.
3 |
4 | This module provides FastAPI dependencies for the technical analysis
5 | domain services and application queries.
6 | """
7 |
8 | from functools import lru_cache
9 |
10 | from maverick_mcp.application.queries.get_technical_analysis import (
11 | GetTechnicalAnalysisQuery,
12 | )
13 | from maverick_mcp.domain.services.technical_analysis_service import (
14 | TechnicalAnalysisService,
15 | )
16 | from maverick_mcp.infrastructure.persistence.stock_repository import (
17 | StockDataProviderAdapter,
18 | )
19 | from maverick_mcp.providers.stock_data import StockDataProvider
20 |
21 |
22 | @lru_cache
23 | def get_technical_analysis_service() -> TechnicalAnalysisService:
24 | """
25 | Get the technical analysis domain service.
26 |
27 | This is a pure domain service with no infrastructure dependencies.
28 | Using lru_cache ensures we reuse the same instance.
29 | """
30 | return TechnicalAnalysisService()
31 |
32 |
33 | @lru_cache
34 | def get_stock_repository() -> StockDataProviderAdapter:
35 | """
36 | Get the stock repository.
37 |
38 | This adapts the existing StockDataProvider to the repository interface.
39 | """
40 | # Reuse existing provider instance to maintain compatibility
41 | stock_provider = StockDataProvider()
42 | return StockDataProviderAdapter(stock_provider)
43 |
44 |
45 | def get_technical_analysis_query() -> GetTechnicalAnalysisQuery:
46 | """
47 | Get the technical analysis query handler.
48 |
49 | This is the application layer query that orchestrates
50 | domain services and repositories.
51 | """
52 | return GetTechnicalAnalysisQuery(
53 | stock_repository=get_stock_repository(),
54 | technical_service=get_technical_analysis_service(),
55 | )
56 |
```
--------------------------------------------------------------------------------
/maverick_mcp/backtesting/strategies/base.py:
--------------------------------------------------------------------------------
```python
1 | """Base strategy class for VectorBT."""
2 |
3 | from abc import ABC, abstractmethod
4 | from typing import Any
5 |
6 | from pandas import DataFrame, Series
7 |
8 |
9 | class Strategy(ABC):
10 | """Abstract base class for trading strategies."""
11 |
12 | def __init__(self, parameters: dict[str, Any] = None):
13 | """Initialize strategy with parameters.
14 |
15 | Args:
16 | parameters: Strategy parameters
17 | """
18 | self.parameters = parameters or {}
19 |
20 | @abstractmethod
21 | def generate_signals(self, data: DataFrame) -> tuple[Series, Series]:
22 | """Generate entry and exit signals.
23 |
24 | Args:
25 | data: Price data with OHLCV columns
26 |
27 | Returns:
28 | Tuple of (entry_signals, exit_signals) as boolean Series
29 | """
30 | pass
31 |
32 | @property
33 | @abstractmethod
34 | def name(self) -> str:
35 | """Get strategy name."""
36 | pass
37 |
38 | @property
39 | @abstractmethod
40 | def description(self) -> str:
41 | """Get strategy description."""
42 | pass
43 |
44 | def validate_parameters(self) -> bool:
45 | """Validate strategy parameters.
46 |
47 | Returns:
48 | True if parameters are valid
49 | """
50 | return True
51 |
52 | def get_default_parameters(self) -> dict[str, Any]:
53 | """Get default parameters for the strategy.
54 |
55 | Returns:
56 | Dictionary of default parameters
57 | """
58 | return {}
59 |
60 | def to_dict(self) -> dict[str, Any]:
61 | """Convert strategy to dictionary representation.
62 |
63 | Returns:
64 | Dictionary with strategy details
65 | """
66 | return {
67 | "name": self.name,
68 | "description": self.description,
69 | "parameters": self.parameters,
70 | "default_parameters": self.get_default_parameters(),
71 | }
72 |
```
--------------------------------------------------------------------------------
/maverick_mcp/tests/test_fixes_validation.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Simple test to validate MCP tool fixes are working.
3 |
4 | This test runs the comprehensive fix validation script
5 | and ensures it passes all checks.
6 | """
7 |
8 | import subprocess
9 | import sys
10 | from pathlib import Path
11 |
12 | import pytest
13 |
14 |
15 | @pytest.mark.integration
16 | def test_mcp_tool_fixes_validation():
17 | """
18 | Test that all MCP tool fixes are working by running the validation script.
19 |
20 | This test executes the comprehensive test script and verifies all fixes pass.
21 | """
22 | # Get the path to the test script
23 | test_script = Path(__file__).parent / "test_mcp_tool_fixes.py"
24 |
25 | # Run the test script
26 | result = subprocess.run(
27 | [sys.executable, str(test_script)],
28 | capture_output=True,
29 | text=True,
30 | timeout=120, # 2 minute timeout
31 | )
32 |
33 | # Check that the script succeeded
34 | assert result.returncode == 0, (
35 | f"MCP tool fixes validation failed:\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
36 | )
37 |
38 | # Verify expected success messages are in output
39 | output = result.stdout
40 | assert "🎉 All MCP tool fixes are working correctly!" in output, (
41 | "Expected success message not found"
42 | )
43 | assert "✅ Passed: 4/4" in output, "Expected 4/4 tests to pass"
44 | assert "❌ Failed: 0/4" in output, "Expected 0/4 tests to fail"
45 |
46 | # Verify individual fixes
47 | assert "✅ Research tools return actual content" in output, (
48 | "Research fix not validated"
49 | )
50 | assert "✅ Portfolio risk analysis works" in output, "Portfolio fix not validated"
51 | assert "✅ Stock info graceful fallback" in output, "Stock info fix not validated"
52 | assert "✅ LLM configuration compatible" in output, "LLM fix not validated"
53 |
54 |
55 | if __name__ == "__main__":
56 | # Allow running this test directly
57 | test_mcp_tool_fixes_validation()
58 |
```
--------------------------------------------------------------------------------
/tests/domain/conftest.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Minimal conftest for domain tests only.
3 |
4 | This conftest avoids importing heavy dependencies like testcontainers,
5 | httpx, or database connections since domain tests should be isolated
6 | from infrastructure concerns.
7 | """
8 |
9 | import os
10 |
11 | import pytest
12 |
13 | # Set test environment
14 | os.environ["MAVERICK_TEST_ENV"] = "true"
15 |
16 |
17 | # Override session-scoped fixtures from parent conftest to prevent
18 | # Docker containers from being started for domain tests
19 | @pytest.fixture(scope="session")
20 | def postgres_container():
21 | """Domain tests don't need PostgreSQL containers."""
22 | pytest.skip("Domain tests don't require database containers")
23 |
24 |
25 | @pytest.fixture(scope="session")
26 | def redis_container():
27 | """Domain tests don't need Redis containers."""
28 | pytest.skip("Domain tests don't require cache containers")
29 |
30 |
31 | @pytest.fixture(scope="session")
32 | def database_url():
33 | """Domain tests don't need database URLs."""
34 | pytest.skip("Domain tests don't require database connections")
35 |
36 |
37 | @pytest.fixture(scope="session")
38 | def redis_url():
39 | """Domain tests don't need Redis URLs."""
40 | pytest.skip("Domain tests don't require cache connections")
41 |
42 |
43 | @pytest.fixture(scope="session")
44 | def engine():
45 | """Domain tests don't need database engines."""
46 | pytest.skip("Domain tests don't require database engines")
47 |
48 |
49 | @pytest.fixture(scope="function")
50 | def db_session():
51 | """Domain tests don't need database sessions."""
52 | pytest.skip("Domain tests don't require database sessions")
53 |
54 |
55 | @pytest.fixture(scope="session", autouse=True)
56 | def setup_test_env():
57 | """Minimal test environment setup for domain tests."""
58 | os.environ["ENVIRONMENT"] = "test"
59 | os.environ["LOG_LEVEL"] = "INFO"
60 | # Domain tests run without authentication or usage gating
61 | os.environ["AUTH_ENABLED"] = "false"
62 | yield
63 |
```
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python
2 |
3 | from setuptools import find_packages, setup
4 |
5 | # Read the contents of the README file
6 | with open("README.md", encoding="utf-8") as f:
7 | long_description = f.read()
8 |
9 | # Read the project metadata from pyproject.toml (for dependencies)
10 | # This is a simple implementation; in a production setup you might want to use tomli
11 | dependencies = []
12 | with open("pyproject.toml") as f:
13 | content = f.read()
14 | # Find the dependencies section
15 | if "dependencies = [" in content:
16 | dependencies_section = content.split("dependencies = [")[1].split("]")[0]
17 | # Extract each dependency
18 | for line in dependencies_section.strip().split("\n"):
19 | dep = line.strip().strip(",").strip('"').strip("'")
20 | if dep and not dep.startswith("#"):
21 | dependencies.append(dep)
22 |
23 | setup(
24 | name="maverick_mcp",
25 | version="0.1.0",
26 | description="Maverick-MCP is a Python MCP server for financial market analysis and trading strategies.",
27 | long_description=long_description,
28 | long_description_content_type="text/markdown",
29 | author="MaverickMCP Contributors",
30 | author_email="",
31 | url="https://github.com/wshobson/maverick-mcp",
32 | packages=find_packages(),
33 | include_package_data=True,
34 | python_requires=">=3.12",
35 | install_requires=dependencies,
36 | classifiers=[
37 | "Development Status :: 3 - Alpha",
38 | "Intended Audience :: Developers",
39 | "Intended Audience :: Financial and Insurance Industry",
40 | "License :: OSI Approved :: MIT License",
41 | "Programming Language :: Python :: 3",
42 | "Programming Language :: Python :: 3.12",
43 | "Topic :: Software Development :: Libraries :: Python Modules",
44 | "Topic :: Office/Business :: Financial :: Investment",
45 | ],
46 | # No console scripts needed as we're running the API server directly
47 | )
48 |
```
--------------------------------------------------------------------------------
/tests/test_market_data_simple.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Simple test to verify market data provider functionality.
4 | """
5 |
6 | import pytest
7 |
8 | from maverick_mcp.providers.market_data import MarketDataProvider
9 |
10 |
11 | @pytest.mark.integration
12 | @pytest.mark.external
13 | def test_market_data():
14 | """Test market data provider functions."""
15 | provider = MarketDataProvider()
16 |
17 | print("Testing Market Data Provider")
18 | print("=" * 50)
19 |
20 | # Test market summary
21 | print("\n1. Testing market summary...")
22 | summary = provider.get_market_summary()
23 | print(f" Found {len(summary)} indices")
24 | if summary:
25 | for _, data in list(summary.items())[:3]:
26 | print(f" {data['name']}: ${data['price']} ({data['change_percent']}%)")
27 |
28 | # Test top gainers
29 | print("\n2. Testing top gainers...")
30 | gainers = provider.get_top_gainers(5)
31 | print(f" Found {len(gainers)} gainers")
32 | for stock in gainers[:3]:
33 | print(f" {stock['symbol']}: ${stock['price']} (+{stock['change_percent']}%)")
34 |
35 | # Test top losers
36 | print("\n3. Testing top losers...")
37 | losers = provider.get_top_losers(5)
38 | print(f" Found {len(losers)} losers")
39 | for stock in losers[:3]:
40 | print(f" {stock['symbol']}: ${stock['price']} ({stock['change_percent']}%)")
41 |
42 | # Test most active
43 | print("\n4. Testing most active...")
44 | active = provider.get_most_active(5)
45 | print(f" Found {len(active)} active stocks")
46 | for stock in active[:3]:
47 | print(f" {stock['symbol']}: ${stock['price']} (Vol: {stock['volume']:,})")
48 |
49 | # Test sector performance
50 | print("\n5. Testing sector performance...")
51 | sectors = provider.get_sector_performance()
52 | print(f" Found {len(sectors)} sectors")
53 | for sector, perf in list(sectors.items())[:3]:
54 | print(f" {sector}: {perf}%")
55 |
56 | print("\n✅ Test completed!")
57 |
58 |
59 | if __name__ == "__main__":
60 | test_market_data()
61 |
```
--------------------------------------------------------------------------------
/maverick_mcp/api/dependencies/stock_analysis.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Dependency injection for stock analysis services.
3 | """
4 |
5 | from fastapi import Depends
6 | from sqlalchemy.orm import Session
7 |
8 | from maverick_mcp.data.session_management import get_db_session
9 | from maverick_mcp.domain.stock_analysis import StockAnalysisService
10 | from maverick_mcp.infrastructure.caching import CacheManagementService
11 | from maverick_mcp.infrastructure.data_fetching import StockDataFetchingService
12 |
13 |
14 | def get_stock_data_fetching_service() -> StockDataFetchingService:
15 | """
16 | Create stock data fetching service.
17 |
18 | Returns:
19 | StockDataFetchingService instance
20 | """
21 | return StockDataFetchingService(timeout=30, max_retries=3)
22 |
23 |
24 | def get_cache_management_service(
25 | db_session: Session | None = Depends(get_db_session),
26 | ) -> CacheManagementService:
27 | """
28 | Create cache management service with database session.
29 |
30 | Args:
31 | db_session: Database session for dependency injection
32 |
33 | Returns:
34 | CacheManagementService instance
35 | """
36 | return CacheManagementService(db_session=db_session, cache_days=1)
37 |
38 |
39 | def get_stock_analysis_service(
40 | data_fetching_service: StockDataFetchingService = Depends(
41 | get_stock_data_fetching_service
42 | ),
43 | cache_service: CacheManagementService = Depends(get_cache_management_service),
44 | db_session: Session | None = Depends(get_db_session),
45 | ) -> StockAnalysisService:
46 | """
47 | Create stock analysis service with all dependencies.
48 |
49 | Args:
50 | data_fetching_service: Service for fetching data from external sources
51 | cache_service: Service for cache management
52 | db_session: Database session for dependency injection
53 |
54 | Returns:
55 | StockAnalysisService instance with injected dependencies
56 | """
57 | return StockAnalysisService(
58 | data_fetching_service=data_fetching_service,
59 | cache_service=cache_service,
60 | db_session=db_session,
61 | )
62 |
```
--------------------------------------------------------------------------------
/.github/workflows/claude.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Claude Code
2 |
3 | on:
4 | issue_comment:
5 | types: [created]
6 | pull_request_review_comment:
7 | types: [created]
8 | issues:
9 | types: [opened, assigned]
10 | pull_request_review:
11 | types: [submitted]
12 |
13 | jobs:
14 | claude:
15 | if: |
16 | (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
17 | (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
18 | (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
19 | (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
20 | runs-on: ubuntu-latest
21 | permissions:
22 | contents: read
23 | pull-requests: read
24 | issues: read
25 | id-token: write
26 | actions: read # Required for Claude to read CI results on PRs
27 | steps:
28 | - name: Checkout repository
29 | uses: actions/checkout@v4
30 | with:
31 | fetch-depth: 1
32 |
33 | - name: Run Claude Code
34 | id: claude
35 | uses: anthropics/claude-code-action@v1
36 | with:
37 | claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
38 |
39 | # This is an optional setting that allows Claude to read CI results on PRs
40 | additional_permissions: |
41 | actions: read
42 |
43 | # Optional: Give a custom prompt to Claude. If this is not specified, Claude will perform the instructions specified in the comment that tagged it.
44 | # prompt: 'Update the pull request description to include a summary of changes.'
45 |
46 | # Optional: Add claude_args to customize behavior and configuration
47 | # See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
48 | # or https://docs.claude.com/en/docs/claude-code/cli-reference for available options
49 | # claude_args: '--allowed-tools Bash(gh pr:*)'
50 |
51 |
```
--------------------------------------------------------------------------------
/tests/test_cache_serialization.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for secure cache serialization helpers."""
2 |
3 | from __future__ import annotations
4 |
5 | import pandas as pd
6 | import pandas.testing as pdt
7 | import pytest
8 |
9 | from maverick_mcp.data import cache as cache_module
10 |
11 |
12 | @pytest.fixture(autouse=True)
13 | def _memory_cache_cleanup(monkeypatch: pytest.MonkeyPatch) -> None:
14 | """Ensure Redis is not used and memory cache starts clean."""
15 |
16 | monkeypatch.setattr(cache_module, "get_redis_client", lambda: None)
17 | cache_module._memory_cache.clear()
18 |
19 |
20 | def test_dataframe_round_trip() -> None:
21 | """DataFrames should round-trip through the cache without pickle usage."""
22 |
23 | key = "test:dataframe"
24 | df = pd.DataFrame(
25 | {"open": [1.0, 2.0], "close": [1.5, 2.5]},
26 | index=pd.to_datetime(["2024-01-01", "2024-01-02"]),
27 | )
28 |
29 | assert cache_module.save_to_cache(key, df, ttl=60)
30 | cached = cache_module.get_from_cache(key)
31 | assert isinstance(cached, pd.DataFrame)
32 | pdt.assert_frame_equal(cached, df)
33 |
34 |
35 | def test_dict_with_dataframe_round_trip() -> None:
36 | """Dictionaries containing DataFrames should round-trip safely."""
37 |
38 | key = "test:dict"
39 | frame = pd.DataFrame(
40 | {"volume": [100, 200]},
41 | index=pd.to_datetime(["2024-01-03", "2024-01-04"]),
42 | )
43 | payload = {
44 | "meta": {"status": "ok"},
45 | "frame": frame,
46 | "values": [1, 2, 3],
47 | }
48 |
49 | assert cache_module.save_to_cache(key, payload, ttl=60)
50 | cached = cache_module.get_from_cache(key)
51 | assert isinstance(cached, dict)
52 | assert cached["meta"] == payload["meta"]
53 | assert cached["values"] == payload["values"]
54 | pdt.assert_frame_equal(cached["frame"], frame)
55 |
56 |
57 | def test_unsupported_type_not_cached() -> None:
58 | """Unsupported data types should not be cached silently."""
59 |
60 | class _Unsupported:
61 | pass
62 |
63 | key = "test:unsupported"
64 | assert not cache_module.save_to_cache(key, _Unsupported(), ttl=60)
65 | assert key not in cache_module._memory_cache
66 |
```
--------------------------------------------------------------------------------
/.github/workflows/claude-code-review.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Claude Code Review
2 |
3 | on:
4 | pull_request:
5 | types: [opened, synchronize]
6 | # Optional: Only run on specific file changes
7 | # paths:
8 | # - "src/**/*.ts"
9 | # - "src/**/*.tsx"
10 | # - "src/**/*.js"
11 | # - "src/**/*.jsx"
12 |
13 | jobs:
14 | claude-review:
15 | # Optional: Filter by PR author
16 | # if: |
17 | # github.event.pull_request.user.login == 'external-contributor' ||
18 | # github.event.pull_request.user.login == 'new-developer' ||
19 | # github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR'
20 |
21 | runs-on: ubuntu-latest
22 | permissions:
23 | contents: read
24 | pull-requests: read
25 | issues: read
26 | id-token: write
27 |
28 | steps:
29 | - name: Checkout repository
30 | uses: actions/checkout@v4
31 | with:
32 | fetch-depth: 1
33 |
34 | - name: Run Claude Code Review
35 | id: claude-review
36 | uses: anthropics/claude-code-action@v1
37 | with:
38 | claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
39 | prompt: |
40 | REPO: ${{ github.repository }}
41 | PR NUMBER: ${{ github.event.pull_request.number }}
42 |
43 | Please review this pull request and provide feedback on:
44 | - Code quality and best practices
45 | - Potential bugs or issues
46 | - Performance considerations
47 | - Security concerns
48 | - Test coverage
49 |
50 | Use the repository's CLAUDE.md for guidance on style and conventions. Be constructive and helpful in your feedback.
51 |
52 | Use `gh pr comment` with your Bash tool to leave your review as a comment on the PR.
53 |
54 | # See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
55 | # or https://docs.claude.com/en/docs/claude-code/cli-reference for available options
56 | claude_args: '--allowed-tools "Bash(gh issue view:*),Bash(gh search:*),Bash(gh issue list:*),Bash(gh pr comment:*),Bash(gh pr diff:*),Bash(gh pr view:*),Bash(gh pr list:*)"'
57 |
58 |
```
--------------------------------------------------------------------------------
/tests/test_stock_analysis_dependencies.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Tests for stock analysis service dependencies.
3 | """
4 |
5 | from unittest.mock import Mock
6 |
7 | from maverick_mcp.api.dependencies.stock_analysis import (
8 | get_cache_management_service,
9 | get_stock_analysis_service,
10 | get_stock_data_fetching_service,
11 | )
12 | from maverick_mcp.domain.stock_analysis import StockAnalysisService
13 | from maverick_mcp.infrastructure.caching import CacheManagementService
14 | from maverick_mcp.infrastructure.data_fetching import StockDataFetchingService
15 |
16 |
17 | class TestStockAnalysisDependencies:
18 | """Test cases for stock analysis service dependency injection."""
19 |
20 | def test_get_stock_data_fetching_service(self):
21 | """Test stock data fetching service creation."""
22 | service = get_stock_data_fetching_service()
23 |
24 | # Assertions
25 | assert isinstance(service, StockDataFetchingService)
26 | assert service.timeout == 30
27 | assert service.max_retries == 3
28 |
29 | def test_get_cache_management_service(self):
30 | """Test cache management service creation."""
31 | mock_session = Mock()
32 |
33 | service = get_cache_management_service(db_session=mock_session)
34 |
35 | # Assertions
36 | assert isinstance(service, CacheManagementService)
37 | assert service._db_session == mock_session
38 | assert service.cache_days == 1
39 |
40 | def test_get_stock_analysis_service(self):
41 | """Test stock analysis service creation with all dependencies."""
42 | mock_data_fetching_service = Mock(spec=StockDataFetchingService)
43 | mock_cache_service = Mock(spec=CacheManagementService)
44 | mock_db_session = Mock()
45 |
46 | service = get_stock_analysis_service(
47 | data_fetching_service=mock_data_fetching_service,
48 | cache_service=mock_cache_service,
49 | db_session=mock_db_session,
50 | )
51 |
52 | # Assertions
53 | assert isinstance(service, StockAnalysisService)
54 | assert service.data_fetching_service == mock_data_fetching_service
55 | assert service.cache_service == mock_cache_service
56 | assert service.db_session == mock_db_session
57 |
```
--------------------------------------------------------------------------------
/tests/integration/vcr_setup.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | VCR.py setup for mocking external API calls.
3 | """
4 |
5 | from pathlib import Path
6 |
7 | import vcr
8 |
9 | # Base directory for cassettes
10 | CASSETTE_DIR = Path(__file__).parent.parent / "fixtures" / "vcr_cassettes"
11 | CASSETTE_DIR.mkdir(parents=True, exist_ok=True)
12 |
13 |
14 | def get_vcr_config():
15 | """Get default VCR configuration."""
16 | return {
17 | "cassette_library_dir": str(CASSETTE_DIR),
18 | "record_mode": "once", # Record once, then replay
19 | "match_on": ["method", "scheme", "host", "port", "path", "query"],
20 | "filter_headers": [
21 | "authorization",
22 | "api-key",
23 | "x-api-key",
24 | "cookie",
25 | "set-cookie",
26 | ],
27 | "filter_query_parameters": ["apikey", "token", "key"],
28 | "filter_post_data_parameters": ["api_key", "token", "password"],
29 | "decode_compressed_response": True,
30 | "allow_playback_repeats": True,
31 | }
32 |
33 |
34 | # Pre-configured VCR instance
35 | configured_vcr = vcr.VCR(**get_vcr_config())
36 |
37 |
38 | def use_cassette(cassette_name: str):
39 | """
40 | Decorator to use a VCR cassette for a test.
41 |
42 | Example:
43 | @use_cassette("test_external_api.yaml")
44 | async def test_something():
45 | # Make external API calls here
46 | pass
47 | """
48 | return configured_vcr.use_cassette(cassette_name)
49 |
50 |
51 | # Specific VCR configurations for different APIs
52 | def yfinance_vcr():
53 | """VCR configuration specific to yfinance API."""
54 | config = get_vcr_config()
55 | config["match_on"] = ["method", "host", "path"] # Less strict for yfinance
56 | config["filter_query_parameters"].extend(["period1", "period2", "interval"])
57 | return vcr.VCR(**config)
58 |
59 |
60 | def external_api_vcr():
61 | """VCR configuration specific to External API."""
62 | config = get_vcr_config()
63 | config["filter_headers"].append("x-rapidapi-key")
64 | config["filter_headers"].append("x-rapidapi-host")
65 | return vcr.VCR(**config)
66 |
67 |
68 | def finviz_vcr():
69 | """VCR configuration specific to finvizfinance."""
70 | config = get_vcr_config()
71 | config["match_on"] = ["method", "host", "path", "query"]
72 | return vcr.VCR(**config)
73 |
```
--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/persistence/stock_repository.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Stock repository implementation.
3 |
4 | This is the infrastructure layer implementation that adapts
5 | the existing StockDataProvider to the domain repository interface.
6 | """
7 |
8 | import pandas as pd
9 |
10 | from maverick_mcp.providers.stock_data import StockDataProvider
11 |
12 |
13 | class StockDataProviderAdapter:
14 | """
15 | Adapter that wraps the existing StockDataProvider for DDD architecture.
16 |
17 | This adapter allows the existing StockDataProvider to work with
18 | the new domain-driven architecture, maintaining backwards compatibility.
19 | """
20 |
21 | def __init__(self, stock_provider: StockDataProvider | None = None):
22 | """
23 | Initialize the repository.
24 |
25 | Args:
26 | stock_provider: Existing stock data provider (creates new if None)
27 | """
28 | self.stock_provider = stock_provider or StockDataProvider()
29 |
30 | def get_price_data(
31 | self, symbol: str, start_date: str, end_date: str
32 | ) -> pd.DataFrame:
33 | """
34 | Get historical price data for a stock.
35 |
36 | This method adapts the existing StockDataProvider interface
37 | to the domain repository interface.
38 |
39 | Args:
40 | symbol: Stock ticker symbol
41 | start_date: Start date in YYYY-MM-DD format
42 | end_date: End date in YYYY-MM-DD format
43 |
44 | Returns:
45 | DataFrame with price data (columns: open, high, low, close, volume)
46 | """
47 | # Use existing provider, which handles caching and fallbacks
48 | df = self.stock_provider.get_stock_data(symbol, start_date, end_date)
49 |
50 | # Ensure column names are lowercase for consistency
51 | df.columns = df.columns.str.lower()
52 |
53 | return df
54 |
55 | async def get_price_data_async(
56 | self, symbol: str, start_date: str, end_date: str
57 | ) -> pd.DataFrame:
58 | """
59 | Async version of get_price_data.
60 |
61 | Currently wraps the sync version, but can be optimized
62 | with true async implementation later.
63 | """
64 | # For now, just call the sync version
65 | # In future, this could use async database queries
66 | return self.get_price_data(symbol, start_date, end_date)
67 |
```
--------------------------------------------------------------------------------
/scripts/setup_sp500_database.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # Colors for output
6 | RED='\033[0;31m'
7 | GREEN='\033[0;32m'
8 | BLUE='\033[0;34m'
9 | YELLOW='\033[0;33m'
10 | NC='\033[0m' # No Color
11 |
12 | echo -e "${BLUE}🚀 MaverickMCP S&P 500 Database Setup${NC}"
13 | echo "======================================"
14 |
15 | # Check environment
16 | echo -e "${YELLOW}📋 Environment Check:${NC}"
17 |
18 | # Check for required environment variables
19 | if [[ -z "${TIINGO_API_KEY}" ]]; then
20 | echo -e " TIINGO_API_KEY: ${YELLOW}⚠️ Not set (optional for yfinance)${NC}"
21 | else
22 | echo -e " TIINGO_API_KEY: ${GREEN}✅ Set${NC}"
23 | fi
24 |
25 | # Show database URL
26 | DATABASE_URL=${DATABASE_URL:-"sqlite:///maverick.db"}
27 | echo " DATABASE_URL: $DATABASE_URL"
28 |
29 | # Clear existing database for fresh S&P 500 start
30 | if [[ "$DATABASE_URL" == "sqlite:///"* ]]; then
31 | DB_FILE=$(echo $DATABASE_URL | sed 's/sqlite:\/\/\///g')
32 | if [[ -f "$DB_FILE" ]]; then
33 | echo -e "${YELLOW}🗑️ Removing existing database for fresh S&P 500 setup...${NC}"
34 | rm "$DB_FILE"
35 | fi
36 | fi
37 |
38 | # Run database migration
39 | echo -e "${BLUE}1️⃣ Running database migration...${NC}"
40 | echo "--------------------------------"
41 | if uv run python scripts/migrate_db.py; then
42 | echo -e "${GREEN}✅ Migration completed successfully${NC}"
43 | else
44 | echo -e "${RED}❌ Migration failed${NC}"
45 | exit 1
46 | fi
47 |
48 | # Run S&P 500 seeding
49 | echo -e "${BLUE}2️⃣ Running S&P 500 database seeding...${NC}"
50 | echo "-------------------------------------"
51 | if uv run python scripts/seed_sp500.py; then
52 | echo -e "${GREEN}✅ S&P 500 seeding completed successfully${NC}"
53 | else
54 | echo -e "${RED}❌ S&P 500 seeding failed${NC}"
55 | exit 1
56 | fi
57 |
58 | echo ""
59 | echo -e "${GREEN}🎉 S&P 500 database setup completed successfully!${NC}"
60 | echo ""
61 | echo -e "${BLUE}Next steps:${NC}"
62 | echo "1. Run the MCP server: ${YELLOW}make dev${NC}"
63 | echo "2. Connect with Claude Desktop using mcp-remote"
64 | echo "3. Test with: ${YELLOW}'Show me top S&P 500 momentum stocks'${NC}"
65 | echo ""
66 | echo -e "${BLUE}Available S&P 500 screening tools:${NC}"
67 | echo "- get_maverick_recommendations (bullish momentum stocks)"
68 | echo "- get_maverick_bear_recommendations (bearish setups)"
69 | echo "- get_trending_breakout_recommendations (supply/demand breakouts)"
```
--------------------------------------------------------------------------------
/tools/fast_dev.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 | #
3 | # Fast Development Startup Script
4 | # Skips all checks and uses in-memory database for < 3 second startup
5 | #
6 |
7 | set -e
8 |
9 | # Colors for output
10 | GREEN='\033[0;32m'
11 | YELLOW='\033[1;33m'
12 | CYAN='\033[0;36m'
13 | NC='\033[0m'
14 |
15 | echo -e "${CYAN}⚡ Fast Dev Mode - Skipping all checks for speed${NC}"
16 |
17 | # Set ultra-fast environment
18 | export AUTH_ENABLED=false
19 | export DATABASE_URL="sqlite:///:memory:"
20 | export REDIS_HOST="none" # Skip Redis
21 | export SKIP_VALIDATION=true
22 | export SKIP_MIGRATIONS=true
23 | export LOG_LEVEL=WARNING # Reduce log noise
24 | export STARTUP_MODE=fast
25 |
26 | # Change to project root
27 | cd "$(dirname "$0")/.."
28 |
29 | # Create minimal .env if not exists
30 | if [ ! -f .env ]; then
31 | cat > .env << EOF
32 | AUTH_ENABLED=false
33 | DATABASE_URL=sqlite:///:memory:
34 | REDIS_HOST=none
35 | SKIP_VALIDATION=true
36 | LOG_LEVEL=WARNING
37 | EOF
38 | echo -e "${YELLOW}Created minimal .env for fast mode${NC}"
39 | fi
40 |
41 | # Start time tracking
42 | START_TIME=$(date +%s)
43 |
44 | # Launch server directly without checks
45 | echo -e "${GREEN}Starting server in fast mode...${NC}"
46 |
47 | # Create a minimal launcher that skips all initialization
48 | python -c "
49 | import os
50 | os.environ['STARTUP_MODE'] = 'fast'
51 | os.environ['AUTH_ENABLED'] = 'false'
52 | os.environ['DATABASE_URL'] = 'sqlite:///:memory:'
53 | os.environ['SKIP_VALIDATION'] = 'true'
54 |
55 | # Minimal imports only
56 | import asyncio
57 | import uvicorn
58 | from fastmcp import FastMCP
59 |
60 | # Create minimal server
61 | mcp = FastMCP(
62 | name='MaverickMCP-Fast',
63 | debug=True,
64 | log_level='WARNING'
65 | )
66 |
67 | # Add one test tool to verify it's working
68 | @mcp.tool()
69 | async def test_fast_mode():
70 | return {'status': 'Fast mode active!', 'startup_time': '< 3 seconds'}
71 |
72 | # Direct startup without any checks
73 | if __name__ == '__main__':
74 | print('🚀 Server starting on http://localhost:8000')
75 | mcp.run(transport='sse', port=8000, host='0.0.0.0')
76 | " &
77 |
78 | SERVER_PID=$!
79 |
80 | # Calculate startup time
81 | END_TIME=$(date +%s)
82 | STARTUP_TIME=$((END_TIME - START_TIME))
83 |
84 | echo -e "${GREEN}✨ Server started in ${STARTUP_TIME} seconds!${NC}"
85 | echo -e "${CYAN}Access at: http://localhost:8000/sse${NC}"
86 | echo -e "${YELLOW}Note: This is a minimal server - add your tools to test${NC}"
87 | echo -e "\nPress Ctrl+C to stop"
88 |
89 | # Wait for server
90 | wait $SERVER_PID
```
--------------------------------------------------------------------------------
/scripts/setup_database.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 | """
3 | Complete database setup script for MaverickMCP.
4 |
5 | This script runs the migration and seeding process to set up a complete
6 | working database for the MaverickMCP application.
7 | """
8 |
9 | set -e # Exit on any error
10 |
11 | echo "🚀 MaverickMCP Database Setup"
12 | echo "=============================="
13 |
14 | # Change to project root directory
15 | cd "$(dirname "$0")/.."
16 |
17 | # Check if virtual environment is activated
18 | if [[ -z "${VIRTUAL_ENV}" ]] && [[ ! -d ".venv" ]]; then
19 | echo "⚠️ Warning: No virtual environment detected"
20 | echo " Consider running: python -m venv .venv && source .venv/bin/activate"
21 | echo ""
22 | fi
23 |
24 | # Check for required environment variables
25 | if [[ -z "${TIINGO_API_KEY}" ]]; then
26 | echo "❌ TIINGO_API_KEY environment variable is required!"
27 | echo ""
28 | echo "To get started:"
29 | echo "1. Sign up for a free account at https://tiingo.com"
30 | echo "2. Get your API key from the dashboard"
31 | echo "3. Add it to your .env file: TIINGO_API_KEY=your_api_key_here"
32 | echo "4. Or export it: export TIINGO_API_KEY=your_api_key_here"
33 | echo ""
34 | exit 1
35 | fi
36 |
37 | echo "📋 Environment Check:"
38 | echo " TIINGO_API_KEY: ✅ Set"
39 | if [[ -n "${DATABASE_URL}" ]]; then
40 | echo " DATABASE_URL: ${DATABASE_URL}"
41 | else
42 | echo " DATABASE_URL: sqlite:///./maverick_mcp.db (default)"
43 | fi
44 | echo ""
45 |
46 | echo "1️⃣ Running database migration..."
47 | echo "--------------------------------"
48 | python scripts/migrate_db.py
49 | if [ $? -eq 0 ]; then
50 | echo "✅ Migration completed successfully"
51 | else
52 | echo "❌ Migration failed"
53 | exit 1
54 | fi
55 | echo ""
56 |
57 | echo "2️⃣ Running database seeding..."
58 | echo "------------------------------"
59 | python scripts/seed_db.py
60 | if [ $? -eq 0 ]; then
61 | echo "✅ Seeding completed successfully"
62 | else
63 | echo "❌ Seeding failed"
64 | exit 1
65 | fi
66 | echo ""
67 |
68 | echo "🎉 Database setup completed successfully!"
69 | echo ""
70 | echo "Next steps:"
71 | echo "1. Run the MCP server: make dev"
72 | echo "2. Connect with Claude Desktop using mcp-remote"
73 | echo "3. Test with: 'Show me technical analysis for AAPL'"
74 | echo ""
75 | echo "Available screening tools:"
76 | echo "- get_maverick_recommendations (bullish momentum stocks)"
77 | echo "- get_maverick_bear_recommendations (bearish setups)"
78 | echo "- get_trending_breakout_recommendations (breakout candidates)"
```
--------------------------------------------------------------------------------
/maverick_mcp/api/simple_sse.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Simple SSE implementation for MCP Inspector compatibility.
3 |
4 | This implements a direct SSE handler that works with MCP Inspector's expectations.
5 | """
6 |
7 | import asyncio
8 | import logging
9 | from uuid import uuid4
10 |
11 | from mcp import types
12 | from mcp.server.session import ServerSession
13 | from starlette.requests import Request
14 | from starlette.responses import StreamingResponse
15 |
16 | logger = logging.getLogger(__name__)
17 |
18 |
19 | class SimpleSSEHandler:
20 | """Simple SSE handler for MCP Inspector."""
21 |
22 | def __init__(self):
23 | self.sessions: dict[str, ServerSession] = {}
24 |
25 | async def handle_sse(self, request: Request):
26 | """Handle SSE connection with bidirectional JSON-RPC over SSE."""
27 | session_id = str(uuid4())
28 | logger.info(f"New Simple SSE connection: {session_id}")
29 |
30 | # Create MCP session
31 | session = ServerSession(
32 | create_initialization_options=lambda: types.InitializationOptions(
33 | server_name="MaverickMCP", server_version="1.0.0"
34 | )
35 | )
36 | self.sessions[session_id] = session
37 |
38 | async def event_generator():
39 | """Generate SSE events."""
40 | try:
41 | # Just keep the connection alive - Inspector will send messages via POST
42 | while True:
43 | # Send keepalive every 30 seconds
44 | await asyncio.sleep(30)
45 | yield ": keepalive\n\n"
46 |
47 | finally:
48 | # Cleanup on disconnect
49 | if session_id in self.sessions:
50 | del self.sessions[session_id]
51 | logger.info(f"Simple SSE connection closed: {session_id}")
52 |
53 | # Return SSE response with proper headers
54 | return StreamingResponse(
55 | event_generator(),
56 | media_type="text/event-stream",
57 | headers={
58 | "Cache-Control": "no-cache",
59 | "Connection": "keep-alive",
60 | "X-Accel-Buffering": "no",
61 | "Access-Control-Allow-Origin": "*",
62 | "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
63 | "Access-Control-Allow-Headers": "*",
64 | "Access-Control-Allow-Credentials": "true",
65 | },
66 | )
67 |
68 |
69 | # Create global handler instance
70 | simple_sse = SimpleSSEHandler()
71 |
```
--------------------------------------------------------------------------------
/maverick_mcp/backtesting/batch_processing_stub.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Lightweight batch processing stub for import compatibility.
3 |
4 | This module provides basic batch processing method stubs that can be imported
5 | even when heavy dependencies like VectorBT, NumPy, etc. are not available.
6 | """
7 |
8 | import logging
9 | from typing import Any
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 |
14 | class BatchProcessingStub:
15 | """Lightweight batch processing stub class."""
16 |
17 | async def run_batch_backtest(
18 | self,
19 | batch_configs: list[dict[str, Any]],
20 | max_workers: int = 6,
21 | chunk_size: int = 10,
22 | validate_data: bool = True,
23 | fail_fast: bool = False,
24 | ) -> dict[str, Any]:
25 | """Stub for run_batch_backtest method."""
26 | raise ImportError("Batch processing requires VectorBT and other dependencies")
27 |
28 | async def batch_optimize_parameters(
29 | self,
30 | optimization_configs: list[dict[str, Any]],
31 | max_workers: int = 4,
32 | optimization_method: str = "grid_search",
33 | max_iterations: int = 100,
34 | ) -> dict[str, Any]:
35 | """Stub for batch_optimize_parameters method."""
36 | raise ImportError("Batch processing requires VectorBT and other dependencies")
37 |
38 | async def batch_validate_strategies(
39 | self,
40 | validation_configs: list[dict[str, Any]],
41 | validation_start_date: str,
42 | validation_end_date: str,
43 | max_workers: int = 6,
44 | ) -> dict[str, Any]:
45 | """Stub for batch_validate_strategies method."""
46 | raise ImportError("Batch processing requires VectorBT and other dependencies")
47 |
48 | async def get_batch_results(
49 | self, batch_id: str, include_detailed_results: bool = False
50 | ) -> dict[str, Any] | None:
51 | """Stub for get_batch_results method."""
52 | raise ImportError("Batch processing requires VectorBT and other dependencies")
53 |
54 | # Alias method for backward compatibility
55 | async def batch_optimize(self, *args, **kwargs):
56 | """Alias for batch_optimize_parameters for backward compatibility."""
57 | return await self.batch_optimize_parameters(*args, **kwargs)
58 |
59 |
60 | class VectorBTEngineStub(BatchProcessingStub):
61 | """Stub VectorBT engine that provides batch processing methods."""
62 |
63 | def __init__(self, *args, **kwargs):
64 | """Initialize stub engine."""
65 | logger.warning(
66 | "VectorBT dependencies not available - using stub implementation"
67 | )
68 |
69 | def __getattr__(self, name):
70 | """Provide stubs for any missing methods."""
71 | if name.startswith("batch") or name in ["run_backtest", "optimize_strategy"]:
72 |
73 | async def stub_method(*args, **kwargs):
74 | raise ImportError(
75 | f"Method {name} requires VectorBT and other dependencies"
76 | )
77 |
78 | return stub_method
79 | raise AttributeError(
80 | f"'{self.__class__.__name__}' object has no attribute '{name}'"
81 | )
82 |
```
--------------------------------------------------------------------------------
/maverick_mcp/api/services/base_service.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Base service class for MaverickMCP API services.
3 |
4 | Provides common functionality and dependency injection patterns
5 | for all service classes.
6 | """
7 |
8 | from abc import ABC, abstractmethod
9 | from typing import Any
10 |
11 | from fastmcp import FastMCP
12 | from sqlalchemy.ext.asyncio import AsyncSession
13 |
14 | # Auth imports removed in personal use version
15 | # from maverick_mcp.auth.jwt_enhanced import EnhancedJWTManager
16 | # from maverick_mcp.auth.key_manager_jwt import KeyManager
17 | from maverick_mcp.config.settings import settings
18 | from maverick_mcp.utils.logging import get_logger
19 |
20 |
21 | class BaseService(ABC):
22 | """
23 | Base service class providing common functionality for all services.
24 |
25 | This class implements dependency injection patterns and provides
26 | shared utilities that all services need.
27 | """
28 |
29 | def __init__(
30 | self,
31 | mcp: FastMCP,
32 | db_session_factory: Any = None,
33 | ):
34 | """
35 | Initialize base service with dependencies.
36 |
37 | Args:
38 | mcp: FastMCP instance for tool/resource registration
39 | db_session_factory: Optional async database session factory
40 | """
41 | self.mcp = mcp
42 | self.db_session_factory = db_session_factory
43 | self.logger = get_logger(
44 | f"maverick_mcp.services.{self.__class__.__name__.lower()}"
45 | )
46 |
47 | @property
48 | def settings(self):
49 | """Get application settings."""
50 | return settings
51 |
52 | async def get_db_session(self) -> AsyncSession:
53 | """
54 | Get async database session.
55 |
56 | Returns:
57 | AsyncSession instance
58 |
59 | Raises:
60 | RuntimeError: If database session factory not available
61 | """
62 | if not self.db_session_factory:
63 | raise RuntimeError("Database session factory not configured")
64 | return self.db_session_factory()
65 |
66 | def is_auth_enabled(self) -> bool:
67 | """Check if authentication is enabled."""
68 | return False # Auth disabled in personal use version
69 |
70 | def is_debug_mode(self) -> bool:
71 | """Check if debug mode is enabled."""
72 | return settings.api.debug
73 |
74 | def log_tool_usage(self, tool_name: str, user_id: int | None = None, **kwargs):
75 | """
76 | Log tool usage for monitoring purposes.
77 |
78 | Args:
79 | tool_name: Name of the tool being used
80 | user_id: Optional user ID if authenticated
81 | **kwargs: Additional context for logging
82 | """
83 | context = {
84 | "tool_name": tool_name,
85 | "user_id": user_id,
86 | "auth_enabled": self.is_auth_enabled(),
87 | **kwargs,
88 | }
89 | self.logger.info(f"Tool usage: {tool_name}", extra=context)
90 |
91 | @abstractmethod
92 | def register_tools(self):
93 | """
94 | Register service tools with the MCP instance.
95 |
96 | This method should be implemented by subclasses to register
97 | their specific tools and resources.
98 | """
99 | pass
100 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | name: Bug Report
3 | about: Create a report to help us improve MaverickMCP
4 | title: '[BUG] '
5 | labels: ['bug', 'needs-triage']
6 | assignees: ''
7 | ---
8 |
9 | ## 🐛 Bug Description
10 |
11 | A clear and concise description of what the bug is.
12 |
13 | ## 💰 Financial Disclaimer Acknowledgment
14 |
15 | - [ ] I understand this is educational software and not financial advice
16 | - [ ] I am not expecting investment recommendations or guaranteed returns
17 | - [ ] This bug report is about technical functionality, not financial performance
18 |
19 | ## 📋 Reproduction Steps
20 |
21 | Steps to reproduce the behavior:
22 |
23 | 1. Go to '...'
24 | 2. Click on '....'
25 | 3. Scroll down to '....'
26 | 4. See error
27 |
28 | ## 🎯 Expected Behavior
29 |
30 | A clear and concise description of what you expected to happen.
31 |
32 | ## 📸 Screenshots
33 |
34 | If applicable, add screenshots to help explain your problem.
35 |
36 | ## 💻 Environment Information
37 |
38 | **Desktop/Server:**
39 | - OS: [e.g. macOS, Ubuntu, Windows]
40 | - Python Version: [e.g. 3.12.0]
41 | - MaverickMCP Version: [e.g. 0.1.0]
42 | - Installation Method: [e.g. pip, uv, git clone]
43 |
44 | **Claude Desktop (if applicable):**
45 | - Claude Desktop Version: [e.g. 1.0.0]
46 | - mcp-remote Version: [if using Claude Desktop]
47 |
48 | **Dependencies:**
49 | - FastMCP Version: [e.g. 2.7.0]
50 | - FastAPI Version: [e.g. 0.115.0]
51 | - Database: [SQLite, PostgreSQL]
52 | - Redis: [Yes/No, version if yes]
53 |
54 | ## 📋 Configuration
55 |
56 | **Environment Variables (remove sensitive data):**
57 | ```
58 | TIINGO_API_KEY=***
59 | DATABASE_URL=***
60 | REDIS_HOST=***
61 | # ... other relevant config
62 | ```
63 |
64 | **Relevant .env settings:**
65 | ```
66 | LOG_LEVEL=DEBUG
67 | CACHE_ENABLED=true
68 | # ... other settings
69 | ```
70 |
71 | ## 📊 Error Messages/Logs
72 |
73 | **Error message:**
74 | ```
75 | Paste the full error message here
76 | ```
77 |
78 | **Server logs (if available):**
79 | ```
80 | Paste relevant server logs here (remove API keys)
81 | ```
82 |
83 | **Console/Terminal output:**
84 | ```
85 | Paste terminal output here
86 | ```
87 |
88 | ## 🔧 Additional Context
89 |
90 | - Are you using any specific financial data providers?
91 | - What stock symbols were you analyzing when this occurred?
92 | - Any specific time ranges or parameters involved?
93 | - Any custom configuration or modifications?
94 |
95 | ## ✅ Pre-submission Checklist
96 |
97 | - [ ] I have searched existing issues to avoid duplicates
98 | - [ ] I have removed all sensitive data (API keys, personal info)
99 | - [ ] I can reproduce this bug consistently
100 | - [ ] I have included relevant error messages and logs
101 | - [ ] I understand this is educational software with no financial guarantees
102 |
103 | ## 🏷️ Bug Classification
104 |
105 | **Severity:**
106 | - [ ] Critical (crashes, data loss)
107 | - [ ] High (major feature broken)
108 | - [ ] Medium (feature partially working)
109 | - [ ] Low (minor issue, workaround available)
110 |
111 | **Component:**
112 | - [ ] Data fetching (Tiingo, Yahoo Finance)
113 | - [ ] Technical analysis calculations
114 | - [ ] Stock screening
115 | - [ ] Database operations
116 | - [ ] Caching (Redis)
117 | - [ ] MCP server/tools
118 | - [ ] Claude Desktop integration
119 | - [ ] Installation/Setup
120 |
121 | **Additional Labels:**
122 | - [ ] documentation (if docs need updating)
123 | - [ ] good first issue (if suitable for newcomers)
124 | - [ ] help wanted (if community help is needed)
```
--------------------------------------------------------------------------------
/maverick_mcp/providers/llm_factory.py:
--------------------------------------------------------------------------------
```python
1 | """LLM factory for creating language model instances.
2 |
3 | This module provides a factory function to create LLM instances with intelligent model selection.
4 | """
5 |
6 | import logging
7 | import os
8 | from typing import Any
9 |
10 | from langchain_community.llms import FakeListLLM
11 |
12 | from maverick_mcp.providers.openrouter_provider import (
13 | TaskType,
14 | get_openrouter_llm,
15 | )
16 |
17 | logger = logging.getLogger(__name__)
18 |
19 |
20 | def get_llm(
21 | task_type: TaskType = TaskType.GENERAL,
22 | prefer_fast: bool = False,
23 | prefer_cheap: bool = True, # Default to cost-effective
24 | prefer_quality: bool = False,
25 | model_override: str | None = None,
26 | ) -> Any:
27 | """Create and return an LLM instance with intelligent model selection.
28 |
29 | Args:
30 | task_type: Type of task to optimize model selection for
31 | prefer_fast: Prioritize speed over quality
32 | prefer_cheap: Prioritize cost over quality (default True)
33 | prefer_quality: Use premium models regardless of cost
34 | model_override: Override automatic model selection
35 |
36 | Returns:
37 | An LLM instance optimized for the task.
38 |
39 | Priority order:
40 | 1. OpenRouter API if OPENROUTER_API_KEY is available (with smart model selection)
41 | 2. OpenAI ChatOpenAI if OPENAI_API_KEY is available (fallback)
42 | 3. Anthropic ChatAnthropic if ANTHROPIC_API_KEY is available (fallback)
43 | 4. FakeListLLM as fallback for testing
44 | """
45 | # Check for OpenRouter first (preferred)
46 | openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
47 | if openrouter_api_key:
48 | logger.info(
49 | f"Using OpenRouter with intelligent model selection for task: {task_type}"
50 | )
51 | return get_openrouter_llm(
52 | api_key=openrouter_api_key,
53 | task_type=task_type,
54 | prefer_fast=prefer_fast,
55 | prefer_cheap=prefer_cheap,
56 | prefer_quality=prefer_quality,
57 | model_override=model_override,
58 | )
59 |
60 | # Fallback to OpenAI
61 | openai_api_key = os.getenv("OPENAI_API_KEY")
62 | if openai_api_key:
63 | logger.info("Falling back to OpenAI API")
64 | try:
65 | from langchain_openai import ChatOpenAI
66 |
67 | return ChatOpenAI(model="gpt-4o-mini", temperature=0.3, streaming=False)
68 | except ImportError:
69 | pass
70 |
71 | # Fallback to Anthropic
72 | anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
73 | if anthropic_api_key:
74 | logger.info("Falling back to Anthropic API")
75 | try:
76 | from langchain_anthropic import ChatAnthropic
77 |
78 | return ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0.3)
79 | except ImportError:
80 | pass
81 |
82 | # Final fallback to fake LLM for testing
83 | logger.warning("No LLM API keys found - using FakeListLLM for testing")
84 | return FakeListLLM(
85 | responses=[
86 | "Mock analysis response for testing purposes.",
87 | "This is a simulated LLM response.",
88 | "Market analysis: Moderate bullish sentiment detected.",
89 | ]
90 | )
91 |
```
--------------------------------------------------------------------------------
/maverick_mcp/api/utils/postman_export.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Postman Collection Export Utility
3 |
4 | Converts OpenAPI specifications to Postman collection format.
5 | """
6 |
7 | from typing import Any
8 |
9 |
10 | def convert_to_postman(openapi_dict: dict[str, Any]) -> dict[str, Any]:
11 | """
12 | Convert OpenAPI specification to Postman collection format.
13 |
14 | Args:
15 | openapi_dict: OpenAPI specification dictionary
16 |
17 | Returns:
18 | Postman collection dictionary
19 | """
20 | info = openapi_dict.get("info", {})
21 |
22 | collection = {
23 | "info": {
24 | "name": info.get("title", "API Collection"),
25 | "description": info.get("description", "Exported from OpenAPI spec"),
26 | "version": info.get("version", "1.0.0"),
27 | "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json",
28 | },
29 | "item": [],
30 | "variable": [],
31 | }
32 |
33 | # Add server variables
34 | servers = openapi_dict.get("servers", [])
35 | if servers:
36 | collection["variable"].append(
37 | {
38 | "key": "baseUrl",
39 | "value": servers[0].get("url", "http://localhost:8000"),
40 | "type": "string",
41 | }
42 | )
43 |
44 | # Convert paths to Postman requests
45 | paths = openapi_dict.get("paths", {})
46 | for path, methods in paths.items():
47 | for method, operation in methods.items():
48 | if method.upper() in ["GET", "POST", "PUT", "DELETE", "PATCH"]:
49 | item = {
50 | "name": operation.get("summary", f"{method.upper()} {path}"),
51 | "request": {
52 | "method": method.upper(),
53 | "header": [],
54 | "url": {
55 | "raw": "{{baseUrl}}" + path,
56 | "host": ["{{baseUrl}}"],
57 | "path": path.split("/")[1:]
58 | if path.startswith("/")
59 | else path.split("/"),
60 | },
61 | },
62 | "response": [],
63 | }
64 |
65 | # Add request body if present
66 | if "requestBody" in operation:
67 | content = operation["requestBody"].get("content", {})
68 | if "application/json" in content:
69 | item["request"]["header"].append(
70 | {
71 | "key": "Content-Type",
72 | "value": "application/json",
73 | "type": "text",
74 | }
75 | )
76 |
77 | # Add example body if available
78 | schema = content["application/json"].get("schema", {})
79 | if "example" in schema:
80 | item["request"]["body"] = {
81 | "mode": "raw",
82 | "raw": str(schema["example"]),
83 | }
84 |
85 | collection["item"].append(item)
86 |
87 | return collection
88 |
```
--------------------------------------------------------------------------------
/maverick_mcp/validation/screening.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Validation models for stock screening tools.
3 |
4 | This module provides Pydantic models for validating inputs
5 | to all screening-related tools.
6 | """
7 |
8 | from typing import Literal
9 |
10 | from pydantic import Field, field_validator
11 |
12 | from .base import (
13 | BaseRequest,
14 | PaginationMixin,
15 | PositiveFloat,
16 | PositiveInt,
17 | StrictBaseModel,
18 | )
19 |
20 |
21 | class MaverickScreeningRequest(StrictBaseModel, PaginationMixin):
22 | """Validation for get_maverick_stocks tool."""
23 |
24 | limit: PositiveInt = Field(
25 | default=20, le=100, description="Maximum number of stocks to return"
26 | )
27 |
28 | model_config = {"json_schema_extra": {"examples": [{"limit": 20}, {"limit": 50}]}}
29 |
30 |
31 | class SupplyDemandBreakoutRequest(StrictBaseModel, PaginationMixin):
32 | """Validation for get_supply_demand_breakouts tool."""
33 |
34 | limit: PositiveInt = Field(
35 | default=20, le=100, description="Maximum number of stocks to return"
36 | )
37 | filter_moving_averages: bool = Field(
38 | default=False,
39 | description="If True, only return stocks in demand expansion phase (above all moving averages)",
40 | )
41 |
42 | model_config = {
43 | "json_schema_extra": {
44 | "examples": [
45 | {"limit": 20, "filter_moving_averages": False},
46 | {"limit": 15, "filter_moving_averages": True},
47 | ]
48 | }
49 | }
50 |
51 |
52 | class CustomScreeningRequest(BaseRequest, PaginationMixin):
53 | """Validation for get_screening_by_criteria tool."""
54 |
55 | min_momentum_score: float | None = Field(
56 | default=None,
57 | ge=0.0,
58 | le=100.0,
59 | description="Minimum momentum score (0-100)",
60 | )
61 | min_volume: PositiveInt | None = Field(
62 | default=None, description="Minimum average daily volume"
63 | )
64 | max_price: PositiveFloat | None = Field(
65 | default=None, description="Maximum stock price"
66 | )
67 | sector: str | None = Field(
68 | default=None,
69 | max_length=100,
70 | description="Specific sector to filter (e.g., 'Technology')",
71 | )
72 | limit: PositiveInt = Field(
73 | default=20, le=100, description="Maximum number of results"
74 | )
75 |
76 | @field_validator("sector")
77 | @classmethod
78 | def normalize_sector(cls, v: str | None) -> str | None:
79 | """Normalize sector name."""
80 | if v is not None:
81 | # Title case for consistency
82 | return v.strip().title()
83 | return v
84 |
85 | model_config = {
86 | "json_schema_extra": {
87 | "examples": [
88 | {"min_momentum_score": 85.0, "min_volume": 1000000, "limit": 20},
89 | {
90 | "max_price": 50.0,
91 | "sector": "Technology",
92 | "min_momentum_score": 80.0,
93 | "limit": 30,
94 | },
95 | ]
96 | }
97 | }
98 |
99 |
100 | class ScreeningType(StrictBaseModel):
101 | """Enum for screening types."""
102 |
103 | screening_type: Literal[
104 | "maverick_bullish", "maverick_bearish", "supply_demand_breakout", "all"
105 | ] = Field(default="all", description="Type of screening to retrieve")
106 |
```
--------------------------------------------------------------------------------
/tests/test_event_loop_integrity.py:
--------------------------------------------------------------------------------
```python
1 | """Tests ensuring temporary event loops are restored correctly."""
2 |
3 | from __future__ import annotations
4 |
5 | import asyncio
6 | from typing import Any
7 |
8 | import pytest
9 |
10 | from maverick_mcp.api.server import health_resource, status_dashboard_resource
11 | from maverick_mcp.backtesting.strategy_executor import (
12 | ExecutionContext,
13 | StrategyExecutor,
14 | )
15 | from maverick_mcp.utils.quick_cache import quick_cache
16 |
17 |
18 | def _assert_loop_clean() -> None:
19 | """Assert that no closed event loop remains configured."""
20 |
21 | policy = asyncio.get_event_loop_policy()
22 | try:
23 | loop = policy.get_event_loop()
24 | except RuntimeError:
25 | loop = None
26 |
27 | if loop is not None:
28 | assert not loop.is_closed()
29 | asyncio.set_event_loop(None)
30 |
31 |
32 | def test_health_resource_restores_event_loop(monkeypatch: pytest.MonkeyPatch) -> None:
33 | """Calling the health resource twice should not leave a closed loop."""
34 |
35 | async def _stub_health() -> dict[str, Any]:
36 | return {"status": "healthy"}
37 |
38 | monkeypatch.setattr(
39 | "maverick_mcp.api.routers.health_enhanced._get_detailed_health_status",
40 | _stub_health,
41 | )
42 |
43 | result = health_resource()
44 | assert result["status"] == "healthy"
45 | second_result = health_resource()
46 | assert second_result["status"] == "healthy"
47 |
48 | _assert_loop_clean()
49 |
50 |
51 | def test_status_dashboard_restores_event_loop(monkeypatch: pytest.MonkeyPatch) -> None:
52 | """The status dashboard resource should restore the previous loop."""
53 |
54 | async def _stub_dashboard() -> dict[str, Any]:
55 | return {"status": "ok"}
56 |
57 | monkeypatch.setattr(
58 | "maverick_mcp.monitoring.status_dashboard.get_dashboard_data",
59 | _stub_dashboard,
60 | )
61 |
62 | result = status_dashboard_resource()
63 | assert result["status"] == "ok"
64 | again = status_dashboard_resource()
65 | assert again["status"] == "ok"
66 |
67 | _assert_loop_clean()
68 |
69 |
70 | def test_quick_cache_sync_wrapper_restores_loop() -> None:
71 | """Synchronous quick_cache wrapper should not leave a closed loop behind."""
72 |
73 | call_count = {"count": 0}
74 |
75 | @quick_cache(ttl_seconds=60)
76 | def _compute(value: int) -> int:
77 | call_count["count"] += 1
78 | return value * 2
79 |
80 | assert _compute(2) == 4
81 | assert _compute(2) == 4
82 | assert call_count["count"] == 1
83 |
84 | _assert_loop_clean()
85 |
86 |
87 | def test_strategy_executor_sync_runner_restores_loop() -> None:
88 | """Running a backtest synchronously should restore the previous loop."""
89 |
90 | executor = StrategyExecutor(max_concurrent_strategies=1)
91 |
92 | class _DummyEngine:
93 | async def run_backtest(self, **_: Any) -> dict[str, Any]:
94 | return {"status": "ok"}
95 |
96 | context = ExecutionContext(
97 | strategy_id="test",
98 | symbol="AAPL",
99 | strategy_type="demo",
100 | parameters={},
101 | start_date="2024-01-01",
102 | end_date="2024-01-02",
103 | )
104 |
105 | engine = _DummyEngine()
106 | result = executor._run_backtest_sync(engine, context)
107 | assert result["status"] == "ok"
108 |
109 | _assert_loop_clean()
110 |
111 | executor._thread_pool.shutdown(wait=True)
112 |
```
--------------------------------------------------------------------------------
/maverick_mcp/config/technical_constants.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Technical Analysis Constants and Configuration
3 |
4 | This module centralizes all technical analysis parameters and thresholds
5 | to follow Open/Closed Principle and eliminate magic numbers.
6 | """
7 |
8 | from dataclasses import dataclass
9 | from typing import Final
10 |
11 |
12 | @dataclass(frozen=True)
13 | class TechnicalAnalysisConfig:
14 | """Configuration class for technical analysis parameters."""
15 |
16 | # RSI Configuration
17 | RSI_PERIOD: int = 14
18 | RSI_OVERBOUGHT: float = 70.0
19 | RSI_OVERSOLD: float = 30.0
20 |
21 | # Moving Average Configuration
22 | SMA_SHORT_PERIOD: int = 50
23 | SMA_LONG_PERIOD: int = 200
24 | EMA_PERIOD: int = 21
25 | EMA_FAST_PERIOD: int = 12
26 | EMA_SLOW_PERIOD: int = 26
27 |
28 | # MACD Configuration
29 | MACD_FAST_PERIOD: int = 12
30 | MACD_SLOW_PERIOD: int = 26
31 | MACD_SIGNAL_PERIOD: int = 9
32 |
33 | # Bollinger Bands Configuration
34 | BOLLINGER_PERIOD: int = 20
35 | BOLLINGER_STD_DEV: float = 2.0
36 |
37 | # Stochastic Oscillator Configuration
38 | STOCH_K_PERIOD: int = 14
39 | STOCH_D_PERIOD: int = 3
40 | STOCH_OVERBOUGHT: float = 80.0
41 | STOCH_OVERSOLD: float = 20.0
42 |
43 | # Volume Analysis Configuration
44 | HIGH_VOLUME_THRESHOLD: float = 1.5 # 1.5x average volume
45 | LOW_VOLUME_THRESHOLD: float = 0.7 # 0.7x average volume
46 | VOLUME_SMA_PERIOD: int = 20
47 |
48 | # Chart Pattern Configuration
49 | PATTERN_SIMILARITY_THRESHOLD: float = 0.05
50 | PATTERN_MIN_SEPARATION: int = 5
51 |
52 | # Support and Resistance Configuration
53 | SUPPORT_RESISTANCE_LOOKBACK: int = 20
54 | SUPPORT_RESISTANCE_TOLERANCE: float = 0.02 # 2% tolerance
55 |
56 | # ATR Configuration
57 | ATR_PERIOD: int = 14
58 |
59 | # CCI Configuration
60 | CCI_PERIOD: int = 20
61 | CCI_OVERBOUGHT: float = 100.0
62 | CCI_OVERSOLD: float = -100.0
63 |
64 | # Williams %R Configuration
65 | WILLIAMS_R_PERIOD: int = 14
66 | WILLIAMS_R_OVERBOUGHT: float = -20.0
67 | WILLIAMS_R_OVERSOLD: float = -80.0
68 |
69 |
70 | # Global configuration instance
71 | TECHNICAL_CONFIG: Final[TechnicalAnalysisConfig] = TechnicalAnalysisConfig()
72 |
73 |
74 | # Screening Strategy Configuration
75 | @dataclass(frozen=True)
76 | class ScreeningConfig:
77 | """Configuration for stock screening strategies."""
78 |
79 | # Maverick Bullish Strategy
80 | MIN_VOLUME: int = 1_000_000
81 | MIN_PRICE: float = 5.0
82 | MAX_PRICE: float = 500.0
83 | MIN_MARKET_CAP: float = 100_000_000 # $100M
84 |
85 | # RSI Requirements
86 | RSI_MIN_BULLISH: float = 30.0
87 | RSI_MAX_BULLISH: float = 70.0
88 |
89 | # Volume Requirements
90 | VOLUME_SPIKE_THRESHOLD: float = 1.5 # 1.5x average volume
91 |
92 | # Moving Average Requirements
93 | MA_CROSSOVER_PERIOD: int = 5 # Days to check for crossover
94 |
95 | # Bear Strategy Thresholds
96 | RSI_MAX_BEARISH: float = 30.0
97 | PRICE_DECLINE_THRESHOLD: float = -0.10 # 10% decline
98 |
99 | # Trending Breakout Strategy
100 | BREAKOUT_VOLUME_MULTIPLIER: float = 2.0
101 | BREAKOUT_PRICE_THRESHOLD: float = 0.05 # 5% price increase
102 |
103 | # General Filtering
104 | EXCLUDE_PENNY_STOCKS: bool = True
105 | EXCLUDE_ETFS: bool = False
106 | MAX_RESULTS_PER_STRATEGY: int = 50
107 |
108 |
109 | # Global screening configuration instance
110 | SCREENING_CONFIG: Final[ScreeningConfig] = ScreeningConfig()
111 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/security_report.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | name: Security Vulnerability Report
3 | about: Report a security issue (use this template for public issues only)
4 | title: '[SECURITY] '
5 | labels: ['security', 'needs-triage']
6 | assignees: ''
7 | ---
8 |
9 | # ⚠️ Security Vulnerability Report
10 |
11 | ## ⚠️ IMPORTANT NOTICE
12 |
13 | **For serious security vulnerabilities, please DO NOT create a public issue.**
14 |
15 | Instead, report them privately via:
16 | - **GitHub Security Advisories**: [Security Tab](https://github.com/wshobson/maverick-mcp/security) (Recommended)
17 |
18 | ## Public Security Issues Only
19 |
20 | **Use this template only for:**
21 | - [ ] Minor security improvements
22 | - [ ] Documentation security issues
23 | - [ ] Public security discussions
24 | - [ ] Low-impact security suggestions
25 |
26 | ## Issue Description
27 |
28 | **Security concern:**
29 | Describe the security issue or improvement suggestion.
30 |
31 | **Impact level:**
32 | - [ ] Critical - Immediate attention required
33 | - [ ] High - Important security flaw
34 | - [ ] Medium - Security improvement needed
35 | - [ ] Low - Minor security suggestion
36 |
37 | ## Category
38 |
39 | **Type of security issue:**
40 | - [ ] Authentication/Authorization
41 | - [ ] Input validation
42 | - [ ] Data exposure
43 | - [ ] Configuration issue
44 | - [ ] Dependency vulnerability
45 | - [ ] Code injection
46 | - [ ] Cross-site scripting (XSS)
47 | - [ ] SQL injection
48 | - [ ] Path traversal
49 | - [ ] Information disclosure
50 | - [ ] Denial of service
51 | - [ ] Cryptographic issue
52 | - [ ] Other: ___
53 |
54 | ## Affected Components
55 |
56 | **Which parts of the system are affected?**
57 | - [ ] MCP server
58 | - [ ] Authentication system
59 | - [ ] Database layer
60 | - [ ] API endpoints
61 | - [ ] Configuration files
62 | - [ ] Dependencies
63 | - [ ] Documentation
64 | - [ ] Other: ___
65 |
66 | ## Environment
67 |
68 | **System information:**
69 | - MaverickMCP version: [e.g., 0.1.0]
70 | - Python version: [e.g., 3.12.0]
71 | - Operating system: [e.g., Ubuntu 22.04]
72 | - Database: [PostgreSQL/SQLite version]
73 |
74 | ## Reproduction Steps (if applicable)
75 |
76 | **For demonstrable issues only (no sensitive details):**
77 |
78 | 1. Step 1
79 | 2. Step 2
80 | 3. Step 3
81 |
82 | ## Expected Security Behavior
83 |
84 | **What should happen from a security perspective?**
85 |
86 | ## Actual Behavior
87 |
88 | **What actually happens?**
89 |
90 | ## Suggested Solution
91 |
92 | **How do you think this should be fixed?**
93 |
94 | ## References
95 |
96 | **Related security standards or best practices:**
97 | - [ ] OWASP Top 10
98 | - [ ] CWE (Common Weakness Enumeration)
99 | - [ ] NIST guidelines
100 | - [ ] Industry standards
101 | - [ ] Other: ___
102 |
103 | **Links to documentation or examples:**
104 | - [Link 1]
105 | - [Link 2]
106 |
107 | ## Additional Context
108 |
109 | **Additional information:**
110 | Add any other context about the security concern.
111 |
112 | **Risk assessment:**
113 | - [ ] Could lead to data breach
114 | - [ ] Could allow unauthorized access
115 | - [ ] Could cause service disruption
116 | - [ ] Could expose sensitive information
117 | - [ ] Low impact improvement
118 | - [ ] Other: ___
119 |
120 | ## Disclosure
121 |
122 | **For public issues:**
123 | - [ ] I confirm this is not a serious vulnerability
124 | - [ ] I understand serious issues should be reported privately
125 | - [ ] This is a general security improvement suggestion
126 | - [ ] This is a documentation or process improvement
127 |
128 | ---
129 |
130 | **Remember:** For any serious security vulnerabilities, please report privately through GitHub Security Advisories.
```
--------------------------------------------------------------------------------
/maverick_mcp/validation/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Comprehensive validation models for Maverick-MCP API.
3 |
4 | This module provides Pydantic models for validating all tool inputs,
5 | API requests, and responses, ensuring data integrity and providing
6 | clear error messages with standardized response formats.
7 | """
8 |
9 | # Auth validation removed for personal use
10 | from .base import (
11 | DateRangeMixin,
12 | DateString,
13 | DateValidator,
14 | PaginationMixin,
15 | Percentage,
16 | PositiveFloat,
17 | PositiveInt,
18 | StrictBaseModel,
19 | TickerSymbol,
20 | TickerValidator,
21 | )
22 |
23 | # Billing validation removed for personal use
24 | from .data import (
25 | ClearCacheRequest,
26 | FetchStockDataRequest,
27 | GetChartLinksRequest,
28 | GetNewsRequest,
29 | GetStockInfoRequest,
30 | StockDataBatchRequest,
31 | )
32 |
33 | # Error imports removed - use maverick_mcp.exceptions instead
34 | from .middleware import (
35 | RateLimitMiddleware,
36 | SecurityMiddleware,
37 | ValidationMiddleware,
38 | )
39 | from .portfolio import (
40 | CorrelationAnalysisRequest,
41 | PortfolioComparisonRequest,
42 | RiskAnalysisRequest,
43 | )
44 | from .responses import (
45 | BaseResponse,
46 | BatchOperationResult,
47 | BatchResponse,
48 | DataResponse,
49 | ErrorDetail,
50 | ErrorResponse,
51 | HealthResponse,
52 | HealthStatus,
53 | ListResponse,
54 | RateLimitInfo,
55 | RateLimitResponse,
56 | ValidationErrorResponse,
57 | WebhookEvent,
58 | WebhookResponse,
59 | error_response,
60 | success_response,
61 | validation_error_response,
62 | )
63 | from .screening import (
64 | CustomScreeningRequest,
65 | MaverickScreeningRequest,
66 | SupplyDemandBreakoutRequest,
67 | )
68 | from .technical import (
69 | MACDAnalysisRequest,
70 | RSIAnalysisRequest,
71 | StockChartRequest,
72 | SupportResistanceRequest,
73 | TechnicalAnalysisRequest,
74 | )
75 |
76 | # Webhook validation removed for personal use
77 |
78 | __all__ = [
79 | # Base validation
80 | "DateRangeMixin",
81 | "DateString",
82 | "DateValidator",
83 | "PaginationMixin",
84 | "Percentage",
85 | "PositiveFloat",
86 | "PositiveInt",
87 | "StrictBaseModel",
88 | "TickerSymbol",
89 | "TickerValidator",
90 | # Data validation
91 | "FetchStockDataRequest",
92 | "StockDataBatchRequest",
93 | "GetStockInfoRequest",
94 | "GetNewsRequest",
95 | "GetChartLinksRequest",
96 | "ClearCacheRequest",
97 | # Middleware
98 | "RateLimitMiddleware",
99 | "SecurityMiddleware",
100 | "ValidationMiddleware",
101 | # Portfolio validation
102 | "RiskAnalysisRequest",
103 | "PortfolioComparisonRequest",
104 | "CorrelationAnalysisRequest",
105 | # Response models
106 | "BaseResponse",
107 | "BatchOperationResult",
108 | "BatchResponse",
109 | "DataResponse",
110 | "ErrorDetail",
111 | "ErrorResponse",
112 | "HealthResponse",
113 | "HealthStatus",
114 | "ListResponse",
115 | "RateLimitInfo",
116 | "RateLimitResponse",
117 | "ValidationErrorResponse",
118 | "WebhookEvent",
119 | "WebhookResponse",
120 | "error_response",
121 | "success_response",
122 | "validation_error_response",
123 | # Screening validation
124 | "MaverickScreeningRequest",
125 | "SupplyDemandBreakoutRequest",
126 | "CustomScreeningRequest",
127 | # Technical validation
128 | "RSIAnalysisRequest",
129 | "MACDAnalysisRequest",
130 | "SupportResistanceRequest",
131 | "TechnicalAnalysisRequest",
132 | "StockChartRequest",
133 | ]
134 |
```
--------------------------------------------------------------------------------
/tests/test_market_data_async.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Async test to verify market data provider non-blocking functionality.
4 | """
5 |
6 | import asyncio
7 | import time
8 |
9 | import pytest
10 |
11 | from maverick_mcp.providers.market_data import MarketDataProvider
12 |
13 |
14 | @pytest.mark.asyncio
15 | @pytest.mark.integration
16 | @pytest.mark.external
17 | async def test_market_data_async():
18 | """Test market data provider async functions."""
19 | provider = MarketDataProvider()
20 |
21 | print("Testing Market Data Provider (Async/Non-blocking)")
22 | print("=" * 50)
23 |
24 | # Test market overview with concurrent execution
25 | print("\nTesting concurrent market overview fetch...")
26 | start_time = time.time()
27 |
28 | overview = await provider.get_market_overview_async()
29 |
30 | elapsed = time.time() - start_time
31 | print(f"✅ Fetched complete market overview in {elapsed:.2f} seconds")
32 |
33 | # Show results
34 | print(f"\nMarket Summary: {len(overview['market_summary'])} indices")
35 | print(f"Top Gainers: {len(overview['top_gainers'])} stocks")
36 | print(f"Top Losers: {len(overview['top_losers'])} stocks")
37 | print(f"Sectors: {len(overview['sector_performance'])} sectors")
38 |
39 | # Test individual async methods concurrently
40 | print("\n\nTesting individual methods concurrently...")
41 | start_time = time.time()
42 |
43 | # Create multiple tasks
44 | tasks = [
45 | provider.get_market_summary_async(),
46 | provider.get_top_gainers_async(10),
47 | provider.get_top_losers_async(10),
48 | provider.get_most_active_async(10),
49 | provider.get_sector_performance_async(),
50 | ]
51 |
52 | # Run all tasks concurrently
53 | results = await asyncio.gather(*tasks)
54 |
55 | elapsed = time.time() - start_time
56 | print(f"✅ Fetched all data concurrently in {elapsed:.2f} seconds")
57 |
58 | summary, gainers, losers, active, sectors = results
59 |
60 | # Display sample results
61 | print("\nResults:")
62 | print(f" - Market indices: {len(summary)}")
63 | print(f" - Top gainers: {len(gainers)}")
64 | print(f" - Top losers: {len(losers)}")
65 | print(f" - Most active: {len(active)}")
66 | print(f" - Sectors: {len(sectors)}")
67 |
68 | if gainers and isinstance(gainers, list) and len(gainers) > 0:
69 | print("\nTop 3 Gainers:")
70 | for stock in gainers[:3]:
71 | if isinstance(stock, dict):
72 | print(
73 | f" {stock['symbol']}: ${stock['price']} (+{stock['change_percent']}%)"
74 | )
75 |
76 | print("\n✅ All async tests completed!")
77 |
78 |
79 | @pytest.mark.asyncio
80 | @pytest.mark.integration
81 | @pytest.mark.external
82 | async def test_with_timeout():
83 | """Test with timeout to demonstrate non-blocking behavior."""
84 | provider = MarketDataProvider()
85 |
86 | print("\nTesting with timeout (5 seconds)...")
87 | try:
88 | # Run with a timeout
89 | await asyncio.wait_for(provider.get_market_overview_async(), timeout=5.0)
90 | print("✅ Data fetched within timeout")
91 | except TimeoutError:
92 | print("❌ Operation timed out (data source may be slow)")
93 |
94 |
95 | async def main():
96 | """Run all async tests."""
97 | await test_market_data_async()
98 | await test_with_timeout()
99 |
100 |
101 | if __name__ == "__main__":
102 | # Run the async main function
103 | asyncio.run(main())
104 |
```
--------------------------------------------------------------------------------
/maverick_mcp/api/inspector_sse.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | MCP Inspector-compatible SSE handler.
3 |
4 | This module implements an SSE handler that's compatible with MCP Inspector's
5 | expectations, where JSON-RPC messages are exchanged directly over the SSE
6 | connection rather than via a separate POST endpoint.
7 | """
8 |
9 | import json
10 | import logging
11 | from uuid import uuid4
12 |
13 | from starlette.requests import Request
14 | from starlette.responses import StreamingResponse
15 |
16 | from maverick_mcp.api.server import mcp
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 |
21 | class InspectorSSEHandler:
22 | """SSE handler compatible with MCP Inspector."""
23 |
24 | def __init__(self, mcp_instance):
25 | self.mcp = mcp_instance
26 | self.sessions = {}
27 |
28 | async def handle_sse(self, request: Request):
29 | """Handle SSE connection from MCP Inspector."""
30 | session_id = str(uuid4())
31 | logger.info(f"New SSE connection: {session_id}")
32 |
33 | async def event_generator():
34 | """Generate SSE events."""
35 | # Send initial connection event
36 | yield f"data: {json.dumps({'type': 'connection', 'sessionId': session_id})}\n\n"
37 |
38 | # Keep connection alive
39 | while True:
40 | # In a real implementation, we'd process incoming messages here
41 | # For now, just keep the connection alive
42 | import asyncio
43 |
44 | await asyncio.sleep(30)
45 | yield ": keepalive\n\n"
46 |
47 | return StreamingResponse(
48 | event_generator(),
49 | media_type="text/event-stream",
50 | headers={
51 | "Cache-Control": "no-cache",
52 | "Connection": "keep-alive",
53 | "X-Accel-Buffering": "no",
54 | },
55 | )
56 |
57 | async def handle_message(self, request: Request):
58 | """Handle JSON-RPC message from client."""
59 | # Get session ID from query params or headers
60 | session_id = request.query_params.get("session_id")
61 | if not session_id:
62 | return {"error": "Missing session_id"}
63 |
64 | # Get JSON-RPC message
65 | try:
66 | message = await request.json()
67 | except Exception as e:
68 | logger.error(f"Failed to parse JSON: {e}")
69 | return {"error": "Invalid JSON"}
70 |
71 | logger.info(f"Received message for session {session_id}: {message}")
72 |
73 | # Process the message through MCP
74 | # This is where we'd integrate with the actual MCP server
75 | # For now, return a mock response
76 | if message.get("method") == "initialize":
77 | return {
78 | "jsonrpc": "2.0",
79 | "id": message.get("id"),
80 | "result": {
81 | "protocolVersion": "2024-11-05",
82 | "capabilities": {
83 | "tools": {"listChanged": True},
84 | "resources": {"listChanged": False},
85 | "prompts": {"listChanged": False},
86 | },
87 | "serverInfo": {"name": "MaverickMCP", "version": "1.0.0"},
88 | },
89 | }
90 |
91 | return {"jsonrpc": "2.0", "id": message.get("id"), "result": {}}
92 |
93 |
94 | # Create global handler instance
95 | inspector_handler = InspectorSSEHandler(mcp)
96 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | name: Question
3 | about: Ask a question about using or contributing to MaverickMCP
4 | title: '[QUESTION] '
5 | labels: ['question', 'documentation']
6 | assignees: ''
7 | ---
8 |
9 | ## Question Summary
10 |
11 | **What would you like to know?**
12 | Ask your question clearly and concisely.
13 |
14 | ## Context
15 |
16 | **What are you trying to accomplish?**
17 | Describe what you're working on or what you want to achieve.
18 |
19 | **Your experience level:**
20 | - [ ] New to MaverickMCP
21 | - [ ] Familiar with MCP but new to MaverickMCP
22 | - [ ] Experienced with MaverickMCP
23 | - [ ] Financial analysis background
24 | - [ ] Software development background
25 |
26 | ## Environment (if applicable)
27 |
28 | **System Information:**
29 | - OS: [e.g., macOS, Linux, Windows]
30 | - Python version: [if relevant]
31 | - Installation method: [uv, pip, docker]
32 |
33 | ## What You've Tried
34 |
35 | **Research done:**
36 | - [ ] Checked documentation
37 | - [ ] Searched existing issues
38 | - [ ] Looked at code examples
39 | - [ ] Tried the Claude Desktop setup guide
40 |
41 | **Attempted solutions:**
42 | Describe what you've already tried.
43 |
44 | ## Specific Questions
45 |
46 | **Please be specific about what you need help with:**
47 |
48 | 1. [Your first question]
49 | 2. [Your second question, if any]
50 | 3. [Additional questions]
51 |
52 | ## Question Category
53 |
54 | **What type of question is this?**
55 | - [ ] Installation and setup
56 | - [ ] Configuration and API keys
57 | - [ ] Using specific MCP tools
58 | - [ ] Financial analysis methodology
59 | - [ ] Technical indicators and calculations
60 | - [ ] Stock screening strategies
61 | - [ ] Portfolio analysis
62 | - [ ] Performance optimization
63 | - [ ] Integration with Claude Desktop
64 | - [ ] Contributing to the project
65 | - [ ] Architecture and design
66 | - [ ] Error troubleshooting
67 | - [ ] Other: ___
68 |
69 | ## Code Example (if applicable)
70 |
71 | **If your question involves code, please provide a minimal example:**
72 |
73 | ```python
74 | # Your code here
75 | ```
76 |
77 | **Expected behavior:**
78 | What did you expect to happen?
79 |
80 | **Actual behavior:**
81 | What actually happened?
82 |
83 | ## Financial Context (if applicable)
84 |
85 | **Market/Asset class:**
86 | - [ ] US Stocks (NYSE, NASDAQ)
87 | - [ ] International stocks
88 | - [ ] Crypto
89 | - [ ] Forex
90 | - [ ] Commodities
91 | - [ ] Other: ___
92 |
93 | **Analysis type:**
94 | - [ ] Technical analysis
95 | - [ ] Fundamental analysis
96 | - [ ] Portfolio optimization
97 | - [ ] Risk analysis
98 | - [ ] Backtesting
99 | - [ ] Real-time monitoring
100 | - [ ] Other: ___
101 |
102 | ## Documentation Improvement
103 |
104 | **Could this be better documented?**
105 | - [ ] Yes, this should be added to documentation
106 | - [ ] Yes, existing docs need clarification
107 | - [ ] No, it's clearly documented but I missed it
108 | - [ ] Not sure
109 |
110 | **Where would you expect to find this information?**
111 | - [ ] README
112 | - [ ] CLAUDE.md project guide
113 | - [ ] API documentation
114 | - [ ] Examples directory
115 | - [ ] Contributing guide
116 | - [ ] Other: ___
117 |
118 | ## Additional Context
119 |
120 | **Anything else that might be helpful:**
121 | Add any other context, screenshots, or information that might help answer your question.
122 |
123 | ## Urgency
124 |
125 | **How urgent is this question?**
126 | - [ ] Urgent - blocking my work
127 | - [ ] High - important for current task
128 | - [ ] Medium - helpful to know
129 | - [ ] Low - general curiosity
130 |
131 | ## Follow-up
132 |
133 | **Would you be willing to:**
134 | - [ ] Help improve documentation based on the answer
135 | - [ ] Submit a PR with examples
136 | - [ ] Help other users with similar questions
137 | - [ ] Test proposed solutions
```
--------------------------------------------------------------------------------
/tools/templates/new_tool_template.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Template for creating new MCP tools.
3 |
4 | Copy this file and modify it to create new tools quickly.
5 | """
6 |
7 | from typing import Any
8 |
9 | from maverick_mcp.api.server import mcp
10 | from maverick_mcp.utils.logging import get_logger
11 |
12 | logger = get_logger(__name__)
13 |
14 |
15 | @mcp.tool()
16 | async def tool_name(
17 | param1: str,
18 | param2: int = 10,
19 | param3: bool = True,
20 | ) -> dict[str, Any]:
21 | """
22 | Brief description of what this tool does.
23 |
24 | This tool performs [specific action] and returns [expected output].
25 |
26 | Args:
27 | param1: Description of first parameter
28 | param2: Description of second parameter (default: 10)
29 | param3: Description of third parameter (default: True)
30 |
31 | Returns:
32 | dict containing:
33 | - result: The main result of the operation
34 | - status: Success/failure status
35 | - details: Additional details about the operation
36 |
37 | Raises:
38 | ValueError: If parameters are invalid
39 | Exception: For other errors
40 | """
41 | # Log tool execution
42 | logger.info(
43 | "Executing tool_name",
44 | extra={
45 | "param1": param1,
46 | "param2": param2,
47 | "param3": param3,
48 | },
49 | )
50 |
51 | try:
52 | # Validate inputs
53 | if not param1:
54 | raise ValueError("param1 cannot be empty")
55 |
56 | if param2 < 0:
57 | raise ValueError("param2 must be non-negative")
58 |
59 | # Main tool logic here
60 | # Example: Fetch data, process it, return results
61 |
62 | # For tools that need database access:
63 | # from maverick_mcp.data.models import get_db
64 | # db = next(get_db())
65 | # try:
66 | # # Database operations
67 | # finally:
68 | # db.close()
69 |
70 | # For tools that need async operations:
71 | # import asyncio
72 | # results = await asyncio.gather(
73 | # async_operation1(),
74 | # async_operation2(),
75 | # )
76 |
77 | # Prepare response
78 | result = {
79 | "result": f"Processed {param1} with settings {param2}, {param3}",
80 | "status": "success",
81 | "details": {
82 | "processed_at": "2024-01-01T00:00:00Z",
83 | "item_count": 42,
84 | },
85 | }
86 |
87 | logger.info(
88 | "Tool completed successfully",
89 | extra={"tool": "tool_name", "result_keys": list(result.keys())},
90 | )
91 |
92 | return result
93 |
94 | except ValueError as e:
95 | logger.error(f"Validation error in tool_name: {e}")
96 | return {
97 | "status": "error",
98 | "error": str(e),
99 | "error_type": "validation",
100 | }
101 | except Exception as e:
102 | logger.error(
103 | f"Unexpected error in tool_name: {e}",
104 | exc_info=True,
105 | )
106 | return {
107 | "status": "error",
108 | "error": str(e),
109 | "error_type": "unexpected",
110 | }
111 |
112 |
113 | # Example of a tool that doesn't require authentication
114 | @mcp.tool()
115 | async def public_tool_name(query: str) -> dict[str, Any]:
116 | """
117 | A public tool that doesn't require authentication.
118 |
119 | Args:
120 | query: The query to process
121 |
122 | Returns:
123 | dict with query results
124 | """
125 | return {
126 | "query": query,
127 | "results": ["result1", "result2"],
128 | "count": 2,
129 | }
130 |
```
--------------------------------------------------------------------------------
/maverick_mcp/data/django_adapter.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Django database adapter for Maverick-MCP.
3 |
4 | This module provides integration between Maverick-MCP and an existing
5 | Django database, allowing MCP to read Django-owned
6 | tables while maintaining separation of concerns.
7 | """
8 |
9 | import logging
10 | from typing import Any
11 |
12 | from sqlalchemy import BigInteger, Boolean, Column, String
13 | from sqlalchemy.ext.declarative import declarative_base
14 | from sqlalchemy.orm import Session
15 |
16 | logger = logging.getLogger(__name__)
17 |
18 | # Create a separate Base for Django table mappings
19 | DjangoBase: Any = declarative_base()
20 |
21 |
22 | class DjangoUser(DjangoBase):
23 | """Read-only mapping to Django's users_customuser table."""
24 |
25 | __tablename__ = "users_customuser"
26 | __table_args__ = {"extend_existing": True}
27 |
28 | id = Column(BigInteger, primary_key=True)
29 | username = Column(String(150), nullable=False)
30 | email = Column(String(254), nullable=False)
31 | first_name = Column(String(150))
32 | last_name = Column(String(150))
33 | is_active = Column(Boolean, default=True)
34 | is_staff = Column(Boolean, default=False)
35 |
36 | def __repr__(self):
37 | return (
38 | f"<DjangoUser(id={self.id}, username={self.username}, email={self.email})>"
39 | )
40 |
41 |
42 | class DjangoStock(DjangoBase):
43 | """Read-only mapping to Django's stocks_stock table."""
44 |
45 | __tablename__ = "stocks_stock"
46 | __table_args__ = {"extend_existing": True}
47 |
48 | id = Column(BigInteger, primary_key=True)
49 | symbol = Column(String(20), nullable=False, unique=True)
50 | name = Column(String(255))
51 | sector = Column(String(100))
52 | industry = Column(String(100))
53 | market_cap = Column(BigInteger)
54 |
55 | def __repr__(self):
56 | return f"<DjangoStock(symbol={self.symbol}, name={self.name})>"
57 |
58 |
59 | class DjangoAdapter:
60 | """
61 | Adapter for accessing Django-owned database tables.
62 |
63 | This adapter provides read-only access to Django tables,
64 | ensuring MCP doesn't modify Django-managed data.
65 | """
66 |
67 | def __init__(self, session: Session):
68 | self.session = session
69 |
70 | def get_user_by_email(self, email: str) -> DjangoUser | None:
71 | """Get Django user by email address."""
72 | return self.session.query(DjangoUser).filter(DjangoUser.email == email).first()
73 |
74 | def get_user_by_id(self, user_id: int) -> DjangoUser | None:
75 | """Get Django user by ID."""
76 | return self.session.query(DjangoUser).filter(DjangoUser.id == user_id).first()
77 |
78 | def get_stock_by_symbol(self, symbol: str) -> DjangoStock | None:
79 | """Get stock by symbol from Django table."""
80 | return (
81 | self.session.query(DjangoStock)
82 | .filter(DjangoStock.symbol == symbol.upper())
83 | .first()
84 | )
85 |
86 | def link_mcp_user_to_django(self, email: str) -> dict | None:
87 | """
88 | Link MCP API key to Django user via email.
89 |
90 | Returns user info with placeholder subscription metadata.
91 | """
92 | # Find Django user
93 | django_user = self.get_user_by_email(email)
94 | if not django_user:
95 | return None
96 |
97 | return {
98 | "user_id": django_user.id,
99 | "username": django_user.username,
100 | "email": django_user.email,
101 | "is_active": django_user.is_active,
102 | "has_subscription": False,
103 | "subscription_status": None,
104 | "external_customer_id": None,
105 | }
106 |
```
--------------------------------------------------------------------------------
/tests/integration/run_integration_tests.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 | """
3 | Integration Test Runner for MaverickMCP Orchestration Tools
4 |
5 | This script runs the comprehensive integration test suite with proper environment setup
6 | and provides clear output for validation of all orchestration capabilities.
7 | """
8 |
9 | set -e # Exit on any error
10 |
11 | echo "🚀 MaverickMCP Orchestration Integration Test Runner"
12 | echo "=================================================="
13 |
14 | # Check if we're in the right directory
15 | if [[ ! -f "test_orchestration_complete.py" ]]; then
16 | echo "❌ Error: Must run from tests/integration directory"
17 | exit 1
18 | fi
19 |
20 | # Navigate to project root for proper imports
21 | cd "$(dirname "$0")/../.."
22 |
23 | # Check Python environment
24 | echo "🔍 Checking Python environment..."
25 | if command -v uv >/dev/null 2>&1; then
26 | echo "✅ Using uv for Python environment"
27 | PYTHON_CMD="uv run python"
28 | elif [[ -f ".venv/bin/activate" ]]; then
29 | echo "✅ Using virtual environment"
30 | source .venv/bin/activate
31 | PYTHON_CMD="python"
32 | else
33 | echo "⚠️ No virtual environment detected, using system Python"
34 | PYTHON_CMD="python"
35 | fi
36 |
37 | # Check required dependencies
38 | echo "🔍 Checking dependencies..."
39 | $PYTHON_CMD -c "import maverick_mcp; print('✅ maverick_mcp package found')" || {
40 | echo "❌ maverick_mcp package not installed. Run 'make setup' first."
41 | exit 1
42 | }
43 |
44 | # Check if MCP server dependencies are available
45 | $PYTHON_CMD -c "from maverick_mcp.api.routers.agents import orchestrated_analysis; print('✅ Orchestration tools available')" || {
46 | echo "❌ Orchestration tools not available. Check agent dependencies."
47 | exit 1
48 | }
49 |
50 | # Set up test environment
51 | echo "🛠️ Setting up test environment..."
52 |
53 | # Check for API keys (optional)
54 | if [[ -z "$OPENAI_API_KEY" ]]; then
55 | echo "⚠️ OPENAI_API_KEY not set - tests will use mock responses"
56 | else
57 | echo "✅ OPENAI_API_KEY found"
58 | fi
59 |
60 | if [[ -z "$EXA_API_KEY" ]]; then
61 | echo "⚠️ EXA_API_KEY not set - deep research may have limited functionality"
62 | else
63 | echo "✅ EXA_API_KEY found"
64 | fi
65 |
66 | # Create logs directory if it doesn't exist
67 | mkdir -p logs
68 |
69 | echo ""
70 | echo "🧪 Starting comprehensive integration tests..."
71 | echo " This will test all orchestration capabilities including:"
72 | echo " - agents_orchestrated_analysis with multiple personas/routing"
73 | echo " - agents_deep_research_financial with various depths/focus areas"
74 | echo " - agents_compare_multi_agent_analysis with different combinations"
75 | echo " - Error handling and edge cases"
76 | echo " - Concurrent execution performance"
77 | echo " - Memory usage monitoring"
78 | echo ""
79 |
80 | # Run the comprehensive test suite
81 | $PYTHON_CMD tests/integration/test_orchestration_complete.py
82 |
83 | # Capture exit code
84 | TEST_EXIT_CODE=$?
85 |
86 | echo ""
87 | echo "=================================================="
88 |
89 | if [[ $TEST_EXIT_CODE -eq 0 ]]; then
90 | echo "🎉 ALL INTEGRATION TESTS PASSED!"
91 | echo " The orchestration tools are working correctly and ready for production use."
92 | elif [[ $TEST_EXIT_CODE -eq 1 ]]; then
93 | echo "⚠️ SOME TESTS FAILED"
94 | echo " Check the test output above and log files for details."
95 | elif [[ $TEST_EXIT_CODE -eq 130 ]]; then
96 | echo "🛑 TESTS INTERRUPTED BY USER"
97 | else
98 | echo "💥 TEST SUITE EXECUTION FAILED"
99 | echo " Check the error output and ensure all dependencies are properly installed."
100 | fi
101 |
102 | echo ""
103 | echo "📊 Test artifacts:"
104 | echo " - Detailed logs: integration_test_*.log"
105 | echo " - JSON results: integration_test_results_*.json"
106 | echo ""
107 |
108 | exit $TEST_EXIT_CODE
```
--------------------------------------------------------------------------------
/maverick_mcp/api/api_server.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Simplified FastAPI HTTP API Server for MaverickMCP Personal Use.
3 |
4 | This module provides a minimal FastAPI server for testing compatibility.
5 | Most functionality has been moved to the main MCP server for personal use.
6 | """
7 |
8 | from contextlib import asynccontextmanager
9 |
10 | from fastapi import FastAPI
11 | from fastapi.middleware.cors import CORSMiddleware
12 |
13 | from maverick_mcp.api.middleware.error_handling import (
14 | ErrorHandlingMiddleware,
15 | RequestTracingMiddleware,
16 | )
17 | from maverick_mcp.api.middleware.security import SecurityHeadersMiddleware
18 | from maverick_mcp.api.routers.health import router as health_router
19 | from maverick_mcp.config.settings import settings
20 | from maverick_mcp.utils.logging import get_logger
21 |
22 | logger = get_logger(__name__)
23 |
24 |
25 | @asynccontextmanager
26 | async def lifespan(app: FastAPI):
27 | """Manage application lifecycle."""
28 | logger.info("Starting simplified MaverickMCP API server")
29 |
30 | # Initialize monitoring systems
31 | try:
32 | from maverick_mcp.utils.monitoring import initialize_monitoring
33 |
34 | logger.info("Initializing monitoring systems...")
35 | initialize_monitoring()
36 | logger.info("Monitoring systems initialized successfully")
37 | except Exception as e:
38 | logger.error(f"Failed to initialize monitoring systems: {e}")
39 |
40 | # Initialize performance systems
41 | try:
42 | from maverick_mcp.data.performance import initialize_performance_systems
43 |
44 | logger.info("Initializing performance optimization systems...")
45 | performance_status = await initialize_performance_systems()
46 | logger.info(f"Performance systems initialized: {performance_status}")
47 | except Exception as e:
48 | logger.error(f"Failed to initialize performance systems: {e}")
49 |
50 | yield
51 |
52 | # Cleanup performance systems
53 | try:
54 | from maverick_mcp.data.performance import cleanup_performance_systems
55 |
56 | logger.info("Cleaning up performance systems...")
57 | await cleanup_performance_systems()
58 | logger.info("Performance systems cleaned up")
59 | except Exception as e:
60 | logger.error(f"Error cleaning up performance systems: {e}")
61 |
62 | logger.info("Shutting down simplified MaverickMCP API server")
63 |
64 |
65 | def create_api_app() -> FastAPI:
66 | """Create and configure a minimal FastAPI application for testing."""
67 |
68 | # Create FastAPI app
69 | app = FastAPI(
70 | title=f"{settings.app_name} API (Personal Use)",
71 | description="Simplified HTTP API endpoints for MaverickMCP personal use",
72 | version="1.0.0",
73 | lifespan=lifespan,
74 | docs_url="/api/docs" if settings.api.debug else None,
75 | redoc_url="/api/redoc" if settings.api.debug else None,
76 | openapi_url="/api/openapi.json" if settings.api.debug else None,
77 | )
78 |
79 | # Add minimal middleware
80 | app.add_middleware(ErrorHandlingMiddleware)
81 | app.add_middleware(RequestTracingMiddleware)
82 | app.add_middleware(SecurityHeadersMiddleware)
83 |
84 | # Add CORS middleware
85 | app.add_middleware(
86 | CORSMiddleware,
87 | allow_origins=settings.api.cors_origins,
88 | allow_credentials=True,
89 | allow_methods=["GET", "POST", "PUT", "DELETE"],
90 | allow_headers=["*"],
91 | )
92 |
93 | # Add only essential routers (health check)
94 | app.include_router(health_router, prefix="/api")
95 |
96 | logger.info("Simplified MaverickMCP API server configured for personal use")
97 | return app
98 |
99 |
100 | # Create the app instance
101 | api_app = create_api_app()
102 |
103 | if __name__ == "__main__":
104 | import uvicorn
105 |
106 | uvicorn.run(
107 | "maverick_mcp.api.api_server:api_app",
108 | host="127.0.0.1",
109 | port=8001,
110 | reload=True,
111 | )
112 |
```
--------------------------------------------------------------------------------
/maverick_mcp/tests/test_market_calendar_caching.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Test market calendar integration with stock data caching.
4 | """
5 |
6 | import logging
7 |
8 | from maverick_mcp.providers.stock_data import StockDataProvider
9 |
10 | # Set up logging
11 | logging.basicConfig(
12 | level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
13 | )
14 |
15 |
16 | def test_weekend_handling():
17 | """Test that weekends are handled properly."""
18 | provider = StockDataProvider()
19 |
20 | # Test 1: Request data ending on a Sunday (today)
21 | print("\nTest 1: Request data ending on Sunday (should adjust to Friday)")
22 | symbol = "AAPL"
23 | end_date = "2025-05-25" # Sunday
24 | start_date = "2025-05-19" # Monday
25 |
26 | print(f"Requesting {symbol} from {start_date} to {end_date}")
27 | df = provider.get_stock_data(symbol, start_date, end_date)
28 | print(f"Received {len(df)} rows")
29 | if not df.empty:
30 | print(f"Data range: {df.index.min()} to {df.index.max()}")
31 |
32 | # Test 2: Request data for a holiday weekend
33 | print("\n\nTest 2: Request data including Memorial Day weekend 2024")
34 | end_date = "2024-05-27" # Memorial Day
35 | start_date = "2024-05-24" # Friday before
36 |
37 | print(f"Requesting {symbol} from {start_date} to {end_date}")
38 | df = provider.get_stock_data(symbol, start_date, end_date)
39 | print(f"Received {len(df)} rows")
40 | if not df.empty:
41 | print(f"Data range: {df.index.min()} to {df.index.max()}")
42 |
43 | # Test 3: Verify no unnecessary yfinance calls for non-trading days
44 | print("\n\nTest 3: Second request for same data (should use cache)")
45 | df2 = provider.get_stock_data(symbol, start_date, end_date)
46 | print(f"Received {len(df2)} rows from cache")
47 |
48 |
49 | def test_trading_day_detection():
50 | """Test trading day detection methods."""
51 | provider = StockDataProvider()
52 |
53 | print("\n\nTesting trading day detection:")
54 |
55 | # Test specific dates
56 | test_dates = [
57 | ("2024-05-24", "Friday - should be trading day"),
58 | ("2024-05-25", "Saturday - should NOT be trading day"),
59 | ("2024-05-26", "Sunday - should NOT be trading day"),
60 | ("2024-05-27", "Memorial Day - should NOT be trading day"),
61 | ("2024-12-25", "Christmas - should NOT be trading day"),
62 | ("2024-07-04", "Independence Day - should NOT be trading day"),
63 | ]
64 |
65 | for date_str, description in test_dates:
66 | is_trading = provider._is_trading_day(date_str) # type: ignore[attr-defined]
67 | print(
68 | f"{date_str} ({description}): {'Trading' if is_trading else 'Non-trading'}"
69 | )
70 |
71 | # Test getting trading days in a range
72 | print("\n\nTrading days in May 2024:")
73 | trading_days = provider._get_trading_days("2024-05-20", "2024-05-31") # type: ignore[attr-defined]
74 | for day in trading_days:
75 | print(f" {day.strftime('%Y-%m-%d %A')}")
76 |
77 |
78 | def test_year_boundary():
79 | """Test caching across year boundaries."""
80 | provider = StockDataProvider()
81 |
82 | print("\n\nTest 4: Year boundary request")
83 | symbol = "MSFT"
84 | start_date = "2023-12-28"
85 | end_date = "2024-01-03"
86 |
87 | print(f"Requesting {symbol} from {start_date} to {end_date}")
88 | df = provider.get_stock_data(symbol, start_date, end_date)
89 | print(f"Received {len(df)} rows")
90 | if not df.empty:
91 | print("Trading days found:")
92 | for date in df.index:
93 | print(f" {date.strftime('%Y-%m-%d %A')}")
94 |
95 |
96 | if __name__ == "__main__":
97 | print("=" * 60)
98 | print("Testing Market Calendar Integration")
99 | print("=" * 60)
100 |
101 | test_weekend_handling()
102 | test_trading_day_detection()
103 | test_year_boundary()
104 |
105 | print("\n" + "=" * 60)
106 | print("All tests completed!")
107 | print("=" * 60)
108 |
```
--------------------------------------------------------------------------------
/tests/test_orchestration_tools_simple.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Simple test script to verify the orchestration tools are working correctly.
4 | """
5 |
6 | import asyncio
7 | import os
8 | import sys
9 |
10 | # Add the project root to the Python path
11 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
12 |
13 | from maverick_mcp.api.routers.agents import (
14 | deep_research_financial,
15 | list_available_agents,
16 | orchestrated_analysis,
17 | )
18 |
19 |
20 | async def test_list_available_agents():
21 | """Test the list_available_agents function."""
22 | print("🧪 Testing list_available_agents...")
23 | try:
24 | result = list_available_agents()
25 | print(f"✅ Success: {result['status']}")
26 | print(f"📊 Available agents: {list(result['agents'].keys())}")
27 | print(f"🎭 Available personas: {result['personas']}")
28 | return True
29 | except Exception as e:
30 | print(f"❌ Error: {e}")
31 | return False
32 |
33 |
34 | async def test_orchestrated_analysis():
35 | """Test the orchestrated_analysis function with a simple query."""
36 | print("\n🧪 Testing orchestrated_analysis...")
37 | try:
38 | result = await orchestrated_analysis(
39 | query="What's the technical outlook for Apple stock?",
40 | persona="moderate",
41 | routing_strategy="rule_based", # Use rule_based to avoid LLM calls
42 | max_agents=2,
43 | )
44 | print(f"✅ Success: {result['status']}")
45 | if result["status"] == "success":
46 | print(f"📈 Agent Type: {result.get('agent_type', 'unknown')}")
47 | print(f"🎭 Persona: {result.get('persona', 'unknown')}")
48 | print(f"⏱️ Execution Time: {result.get('execution_time_ms', 0):.2f}ms")
49 | return result["status"] == "success"
50 | except Exception as e:
51 | print(f"❌ Error: {e}")
52 | import traceback
53 |
54 | traceback.print_exc()
55 | return False
56 |
57 |
58 | async def test_deep_research_financial():
59 | """Test the deep_research_financial function."""
60 | print("\n🧪 Testing deep_research_financial...")
61 | try:
62 | result = await deep_research_financial(
63 | research_topic="Apple Inc",
64 | persona="moderate",
65 | research_depth="basic", # Use basic depth to minimize processing
66 | timeframe="7d",
67 | )
68 | print(f"✅ Success: {result['status']}")
69 | if result["status"] == "success":
70 | print(f"🔍 Agent Type: {result.get('agent_type', 'unknown')}")
71 | print(f"📚 Research Topic: {result.get('research_topic', 'unknown')}")
72 | return result["status"] == "success"
73 | except Exception as e:
74 | print(f"❌ Error: {e}")
75 | import traceback
76 |
77 | traceback.print_exc()
78 | return False
79 |
80 |
81 | async def main():
82 | """Run all tests."""
83 | print("🚀 Testing Orchestration Tools\n" + "=" * 50)
84 |
85 | # Test 1: List available agents
86 | test1_passed = await test_list_available_agents()
87 |
88 | # Test 2: Orchestrated analysis
89 | test2_passed = await test_orchestrated_analysis()
90 |
91 | # Test 3: Deep research
92 | test3_passed = await test_deep_research_financial()
93 |
94 | # Summary
95 | print("\n" + "=" * 50)
96 | print("📊 Test Results Summary:")
97 | print(f" List Available Agents: {'✅' if test1_passed else '❌'}")
98 | print(f" Orchestrated Analysis: {'✅' if test2_passed else '❌'}")
99 | print(f" Deep Research: {'✅' if test3_passed else '❌'}")
100 |
101 | total_passed = sum([test1_passed, test2_passed, test3_passed])
102 | print(f"\n🎯 Total: {total_passed}/3 tests passed")
103 |
104 | if total_passed == 3:
105 | print("🎉 All orchestration tools are working correctly!")
106 | return True
107 | else:
108 | print("⚠️ Some tests failed - check the errors above")
109 | return False
110 |
111 |
112 | if __name__ == "__main__":
113 | success = asyncio.run(main())
114 | sys.exit(0 if success else 1)
115 |
```
--------------------------------------------------------------------------------
/alembic.ini:
--------------------------------------------------------------------------------
```
1 | # A generic, single database configuration.
2 |
3 | [alembic]
4 | # path to migration scripts
5 | script_location = alembic
6 |
7 | # template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
8 | # Uncomment the line below if you want the files to be prepended with date and time
9 | # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
10 | # for all available tokens
11 | # file_template = %%(year)d%%(month).2d%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
12 |
13 | # sys.path path, will be prepended to sys.path if present.
14 | # defaults to the current working directory.
15 | prepend_sys_path = .
16 |
17 | # timezone to use when rendering the date within the migration file
18 | # as well as the filename.
19 | # If specified, requires the python>=3.9 or backports.zoneinfo library.
20 | # Any required deps can installed by adding `alembic[tz]` to the pip requirements
21 | # string value is passed to ZoneInfo()
22 | # leave blank for localtime
23 | # timezone =
24 |
25 | # max length of characters to apply to the
26 | # "slug" field
27 | # truncate_slug_length = 40
28 |
29 | # set to 'true' to run the environment during
30 | # the 'revision' command, regardless of autogenerate
31 | # revision_environment = false
32 |
33 | # set to 'true' to allow .pyc and .pyo files without
34 | # a source .py file to be detected as revisions in the
35 | # versions/ directory
36 | # sourceless = false
37 |
38 | # version location specification; This defaults
39 | # to maverick_mcp/data/migrations/versions. When using multiple version
40 | # directories, initial revisions must be specified with --version-path.
41 | # The path separator used here should be the separator specified by "version_path_separator" below.
42 | # version_locations = %(here)s/bar:%(here)s/bat:alembic/versions
43 |
44 | # version path separator; As mentioned above, this is the character used to split
45 | # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
46 | # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
47 | # Valid values for version_path_separator are:
48 | #
49 | # version_path_separator = :
50 | # version_path_separator = ;
51 | # version_path_separator = space
52 | version_path_separator = os # Use os.pathsep.
53 | # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
54 |
55 | # set to 'true' to search source files recursively
56 | # in each "version_locations" directory
57 | # new in Alembic version 1.10
58 | # recursive_version_locations = false
59 |
60 | # the output encoding used when revision files
61 | # are written from script.py.mako
62 | # output_encoding = utf-8
63 |
64 | # Set via environment variable or env.py
65 | # sqlalchemy.url = postgresql://user:pass@localhost/local_production_snapshot
66 |
67 |
68 | [post_write_hooks]
69 | # post_write_hooks defines scripts or Python functions that are run
70 | # on newly generated revision scripts. See the documentation for further
71 | # detail and examples
72 |
73 | # format using "black" - use the console_scripts runner, against the "black" entrypoint
74 | # hooks = black
75 | # black.type = console_scripts
76 | # black.entrypoint = black
77 | # black.options = -l 79 REVISION_SCRIPT_FILENAME
78 |
79 | # lint with attempts to fix using "ruff" - use the exec runner, execute a binary
80 | # hooks = ruff
81 | # ruff.type = exec
82 | # ruff.executable = %(here)s/.venv/bin/ruff
83 | # ruff.options = --fix REVISION_SCRIPT_FILENAME
84 |
85 | # Logging configuration
86 | [loggers]
87 | keys = root,sqlalchemy,alembic
88 |
89 | [handlers]
90 | keys = console
91 |
92 | [formatters]
93 | keys = generic
94 |
95 | [logger_root]
96 | level = WARN
97 | handlers = console
98 | qualname =
99 |
100 | [logger_sqlalchemy]
101 | level = WARN
102 | handlers =
103 | qualname = sqlalchemy.engine
104 |
105 | [logger_alembic]
106 | level = INFO
107 | handlers =
108 | qualname = alembic
109 |
110 | [handler_console]
111 | class = StreamHandler
112 | args = (sys.stderr,)
113 | level = NOTSET
114 | formatter = generic
115 |
116 | [formatter_generic]
117 | format = %(levelname)-5.5s [%(name)s] %(message)s
118 | datefmt = %H:%M:%S
```
--------------------------------------------------------------------------------
/maverick_mcp/validation/portfolio.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Validation models for portfolio analysis tools.
3 |
4 | This module provides Pydantic models for validating inputs
5 | to all portfolio-related tools.
6 | """
7 |
8 | from pydantic import Field, field_validator
9 |
10 | from .base import (
11 | Percentage,
12 | PositiveInt,
13 | StrictBaseModel,
14 | TickerSymbol,
15 | TickerValidator,
16 | )
17 |
18 |
19 | class RiskAnalysisRequest(StrictBaseModel):
20 | """Validation for risk_adjusted_analysis tool."""
21 |
22 | ticker: TickerSymbol = Field(..., description="Stock ticker symbol")
23 | risk_level: Percentage = Field(
24 | default=50.0,
25 | description="Risk tolerance from 0 (conservative) to 100 (aggressive)",
26 | )
27 |
28 | @field_validator("ticker")
29 | @classmethod
30 | def normalize_ticker(cls, v: str) -> str:
31 | """Normalize ticker to uppercase."""
32 | return TickerValidator.validate_ticker(v)
33 |
34 | model_config = {
35 | "json_schema_extra": {
36 | "examples": [
37 | {"ticker": "AAPL", "risk_level": 50.0},
38 | {"ticker": "TSLA", "risk_level": 75.0},
39 | {"ticker": "JNJ", "risk_level": 25.0},
40 | ]
41 | }
42 | }
43 |
44 |
45 | class PortfolioComparisonRequest(StrictBaseModel):
46 | """Validation for compare_tickers tool."""
47 |
48 | tickers: list[TickerSymbol] = Field(
49 | ...,
50 | min_length=2,
51 | max_length=20,
52 | description="List of ticker symbols to compare (2-20 tickers)",
53 | )
54 | days: PositiveInt = Field(
55 | default=90,
56 | le=1825, # Max 5 years
57 | description="Number of days of historical data for comparison",
58 | )
59 |
60 | @field_validator("tickers")
61 | @classmethod
62 | def validate_tickers(cls, v: list[str]) -> list[str]:
63 | """Validate and normalize ticker list."""
64 | tickers = TickerValidator.validate_ticker_list(v)
65 | if len(tickers) < 2:
66 | raise ValueError("At least 2 unique tickers are required for comparison")
67 | return tickers
68 |
69 | model_config = {
70 | "json_schema_extra": {
71 | "examples": [
72 | {"tickers": ["AAPL", "MSFT", "GOOGL"], "days": 90},
73 | {"tickers": ["SPY", "QQQ", "IWM", "DIA"], "days": 180},
74 | ]
75 | }
76 | }
77 |
78 |
79 | class CorrelationAnalysisRequest(StrictBaseModel):
80 | """Validation for portfolio_correlation_analysis tool."""
81 |
82 | tickers: list[TickerSymbol] = Field(
83 | ...,
84 | min_length=2,
85 | max_length=30,
86 | description="List of ticker symbols for correlation analysis",
87 | )
88 | days: PositiveInt = Field(
89 | default=252, # 1 trading year
90 | ge=30, # Need at least 30 days for meaningful correlation
91 | le=2520, # Max 10 years
92 | description="Number of days for correlation calculation",
93 | )
94 |
95 | @field_validator("tickers")
96 | @classmethod
97 | def validate_tickers(cls, v: list[str]) -> list[str]:
98 | """Validate and normalize ticker list."""
99 | tickers = TickerValidator.validate_ticker_list(v)
100 | if len(tickers) < 2:
101 | raise ValueError(
102 | "At least 2 unique tickers are required for correlation analysis"
103 | )
104 | return tickers
105 |
106 | @field_validator("days")
107 | @classmethod
108 | def validate_days_for_correlation(cls, v: int) -> int:
109 | """Ensure enough days for meaningful correlation."""
110 | if v < 30:
111 | raise ValueError(
112 | "At least 30 days of data required for meaningful correlation analysis"
113 | )
114 | return v
115 |
116 | model_config = {
117 | "json_schema_extra": {
118 | "examples": [
119 | {"tickers": ["AAPL", "MSFT", "GOOGL", "AMZN"], "days": 252},
120 | {
121 | "tickers": ["SPY", "TLT", "GLD", "DBC", "VNQ"],
122 | "days": 504, # 2 years
123 | },
124 | ]
125 | }
126 | }
127 |
```
--------------------------------------------------------------------------------
/tools/experiments/validation_fixed.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Fixed validation examples that work with the current codebase.
3 | """
4 |
5 | import os
6 | import random
7 | import subprocess
8 | import time
9 |
10 | import pandas as pd
11 |
12 | from maverick_mcp.utils.agent_errors import agent_friendly_errors
13 | from maverick_mcp.utils.parallel_screening import ParallelScreener
14 | from maverick_mcp.utils.quick_cache import get_cache_stats, quick_cache
15 |
16 | print("🎯 Maverick-MCP Validation - Fixed Version")
17 | print("=" * 60)
18 |
19 | # Validation 1: Using the agent error handler
20 | print("\n🔐 1. Testing Agent Error Handler...")
21 |
22 |
23 | @agent_friendly_errors(reraise=False)
24 | def test_column_error():
25 | """Test DataFrame column error handling."""
26 | df = pd.DataFrame({"Close": [100, 101, 102]})
27 | # This will raise KeyError
28 | return df["close"] # Wrong case!
29 |
30 |
31 | result = test_column_error()
32 | if isinstance(result, dict) and "fix_suggestion" in result:
33 | print(f"✅ Error caught with fix: {result['fix_suggestion']['fix']}")
34 | print(f" Example: {result['fix_suggestion']['example']}")
35 |
36 |
37 | # Validation 2: Testing the quick cache
38 | print("\n💾 2. Testing Quick Cache...")
39 |
40 |
41 | @quick_cache(ttl_seconds=5)
42 | def expensive_operation(value: int) -> int:
43 | """Simulate expensive operation."""
44 | time.sleep(0.5) # Simulate work
45 | return value * 2
46 |
47 |
48 | # First call - cache miss
49 | start = time.time()
50 | result1 = expensive_operation(42)
51 | time1 = time.time() - start
52 |
53 | # Second call - cache hit
54 | start = time.time()
55 | result2 = expensive_operation(42)
56 | time2 = time.time() - start
57 |
58 | stats = get_cache_stats()
59 | print(f"✅ Cache working: First call {time1:.3f}s, Second call {time2:.3f}s")
60 | print(
61 | f" Cache stats: {stats['hits']} hits, {stats['misses']} misses, {stats['hit_rate']}% hit rate"
62 | )
63 |
64 |
65 | # Validation 3: Testing parallel screening
66 | print("\n⚡ 3. Testing Parallel Screening...")
67 |
68 |
69 | def simple_screen(symbol: str) -> dict:
70 | """Simple screening function for testing."""
71 | time.sleep(0.1) # Simulate work
72 | return {
73 | "symbol": symbol,
74 | "passed": random.random() > 0.5,
75 | "score": random.randint(60, 95),
76 | }
77 |
78 |
79 | test_symbols = ["AAPL", "MSFT", "GOOGL", "AMZN", "TSLA", "META"]
80 |
81 | # Sequential
82 | start = time.time()
83 | seq_results = [simple_screen(s) for s in test_symbols]
84 | seq_time = time.time() - start
85 |
86 | # Parallel
87 | with ParallelScreener(max_workers=3) as screener:
88 | start = time.time()
89 | par_results = screener.screen_batch(test_symbols, simple_screen, batch_size=2)
90 | par_time = time.time() - start
91 |
92 | speedup = seq_time / par_time if par_time > 0 else 1
93 | print(f"✅ Parallel screening: {speedup:.1f}x speedup")
94 | print(f" Sequential: {seq_time:.2f}s, Parallel: {par_time:.2f}s")
95 |
96 |
97 | # Validation 4: Testing experiment harness
98 | print("\n🧪 4. Testing Experiment Harness...")
99 |
100 | os.makedirs("tools/experiments", exist_ok=True)
101 |
102 | # Check if experiment harness would work
103 | if os.path.exists("tools/experiment.py"):
104 | print("✅ Experiment harness is available")
105 | print(" Drop .py files in tools/experiments/ to auto-execute")
106 | else:
107 | print("❌ Experiment harness not found")
108 |
109 |
110 | # Validation 5: Testing fast commands
111 | print("\n🚀 5. Testing Fast Commands...")
112 |
113 | # Test make command
114 | result = subprocess.run(["make", "help"], capture_output=True, text=True)
115 | if result.returncode == 0:
116 | print("✅ Makefile commands working")
117 | # Show some key commands
118 | for line in result.stdout.split("\n")[2:6]:
119 | if line.strip():
120 | print(f" {line}")
121 |
122 |
123 | # Summary
124 | print("\n" + "=" * 60)
125 | print("🎉 Validation Summary:")
126 | print(" 1. Agent Error Handler: ✅ Provides helpful fixes")
127 | print(" 2. Quick Cache: ✅ Speeds up repeated calls")
128 | print(" 3. Parallel Screening: ✅ Multi-core speedup")
129 | print(" 4. Experiment Harness: ✅ Auto-execution ready")
130 | print(" 5. Fast Commands: ✅ Makefile working")
131 | print("\n✨ All core improvements validated successfully!")
132 |
```
--------------------------------------------------------------------------------
/maverick_mcp/tests/test_cache_debug.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Debug test for stock data caching issues.
4 | """
5 |
6 | import logging
7 | from datetime import datetime, timedelta
8 |
9 | from maverick_mcp.providers.stock_data import StockDataProvider
10 |
11 | # Set up detailed logging for debugging
12 | logging.basicConfig(
13 | level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
14 | )
15 |
16 |
17 | def test_stock_data_caching_debug():
18 | """Test stock data caching functionality with detailed logging."""
19 | # Initialize provider
20 | provider = StockDataProvider()
21 |
22 | # Test parameters
23 | symbol = "MSFT"
24 | end_date = datetime.now().strftime("%Y-%m-%d")
25 | start_date = (datetime.now() - timedelta(days=30)).strftime("%Y-%m-%d")
26 |
27 | print("\nTest parameters:")
28 | print(f" Symbol: {symbol}")
29 | print(f" Start date: {start_date}")
30 | print(f" End date: {end_date}")
31 |
32 | # Test 1: Fetch data (should check cache first, then fetch missing)
33 | print("\n1. Fetching data (should prioritize cache)...")
34 | df1 = provider.get_stock_data(symbol, start_date, end_date)
35 | assert not df1.empty, "First fetch returned empty DataFrame"
36 | print(f" Fetched {len(df1)} rows")
37 |
38 | # Test 2: Fetch same data again (should use cache entirely)
39 | print("\n2. Fetching same data again (should use cache entirely)...")
40 | df2 = provider.get_stock_data(symbol, start_date, end_date)
41 | assert not df2.empty, "Second fetch returned empty DataFrame"
42 | print(f" Fetched {len(df2)} rows")
43 |
44 | # Verify data consistency
45 | assert len(df1) == len(df2), "Data length mismatch between fetches"
46 |
47 | # Test 3: Force fresh data
48 | print("\n3. Forcing fresh data (use_cache=False)...")
49 | df3 = provider.get_stock_data(symbol, start_date, end_date, use_cache=False)
50 | assert not df3.empty, "Fresh fetch returned empty DataFrame"
51 | print(f" Fetched {len(df3)} rows")
52 |
53 | # Test 4: Test partial cache hit (request wider date range)
54 | wider_start = (datetime.now() - timedelta(days=60)).strftime("%Y-%m-%d")
55 | print(
56 | f"\n4. Testing partial cache hit (wider range: {wider_start} to {end_date})..."
57 | )
58 | df4 = provider.get_stock_data(symbol, wider_start, end_date)
59 | assert not df4.empty, "Wider range fetch returned empty DataFrame"
60 | print(f" Fetched {len(df4)} rows (should fetch only missing data)")
61 |
62 | # Display sample data
63 | if not df1.empty:
64 | print("\nSample data (first 5 rows):")
65 | print(df1.head())
66 |
67 | print("\nTest completed successfully!")
68 |
69 |
70 | def test_smart_caching_behavior():
71 | """Test that smart caching truly prioritizes database over yfinance."""
72 | provider = StockDataProvider()
73 |
74 | # Use a less common stock to ensure we're testing our cache
75 | symbol = "AAPL"
76 |
77 | # Test 1: Request recent data (might be partially cached)
78 | end_date = datetime.now().strftime("%Y-%m-%d")
79 | start_date = (datetime.now() - timedelta(days=10)).strftime("%Y-%m-%d")
80 |
81 | print(f"\nTest 1: Recent data request ({start_date} to {end_date})")
82 | df1 = provider.get_stock_data(symbol, start_date, end_date)
83 | print(f"Fetched {len(df1)} rows")
84 |
85 | # Test 2: Request same data again - should be fully cached
86 | print("\nTest 2: Same request again - should use cache entirely")
87 | df2 = provider.get_stock_data(symbol, start_date, end_date)
88 | print(f"Fetched {len(df2)} rows")
89 |
90 | # Test 3: Request historical data that might be fully cached
91 | hist_end = (datetime.now() - timedelta(days=30)).strftime("%Y-%m-%d")
92 | hist_start = (datetime.now() - timedelta(days=60)).strftime("%Y-%m-%d")
93 |
94 | print(f"\nTest 3: Historical data ({hist_start} to {hist_end})")
95 | df3 = provider.get_stock_data(symbol, hist_start, hist_end)
96 | print(f"Fetched {len(df3)} rows")
97 |
98 | print("\nSmart caching test completed!")
99 |
100 |
101 | if __name__ == "__main__":
102 | test_stock_data_caching_debug()
103 |
```
--------------------------------------------------------------------------------
/alembic/versions/014_add_portfolio_models.py:
--------------------------------------------------------------------------------
```python
1 | """Add portfolio management models
2 |
3 | Revision ID: 014_add_portfolio_models
4 | Revises: 013_add_backtest_persistence_models
5 | Create Date: 2025-11-01 12:00:00.000000
6 |
7 | This migration adds portfolio management models for tracking user investment holdings:
8 | 1. UserPortfolio - Portfolio metadata with user identification
9 | 2. PortfolioPosition - Individual position records with cost basis tracking
10 |
11 | Features:
12 | - Average cost basis tracking for educational simplicity
13 | - High-precision Decimal types for financial accuracy (Numeric(12,4) for prices, Numeric(20,8) for shares)
14 | - Support for fractional shares
15 | - Single-user design with user_id="default"
16 | - Cascade delete for data integrity
17 | - Comprehensive indexes for common query patterns
18 | """
19 |
20 | import sqlalchemy as sa
21 | from sqlalchemy.dialects import postgresql
22 |
23 | from alembic import op
24 |
25 | # revision identifiers, used by Alembic.
26 | revision = "014_add_portfolio_models"
27 | down_revision = "013_add_backtest_persistence_models"
28 | branch_labels = None
29 | depends_on = None
30 |
31 |
32 | def upgrade() -> None:
33 | """Create portfolio management tables."""
34 |
35 | # Create portfolios table
36 | op.create_table(
37 | "mcp_portfolios",
38 | sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
39 | sa.Column(
40 | "user_id",
41 | sa.String(100),
42 | nullable=False,
43 | server_default="default",
44 | ),
45 | sa.Column(
46 | "name",
47 | sa.String(200),
48 | nullable=False,
49 | server_default="My Portfolio",
50 | ),
51 | sa.Column(
52 | "created_at",
53 | sa.DateTime(timezone=True),
54 | server_default=sa.func.now(),
55 | nullable=False,
56 | ),
57 | sa.Column(
58 | "updated_at",
59 | sa.DateTime(timezone=True),
60 | server_default=sa.func.now(),
61 | nullable=False,
62 | ),
63 | )
64 |
65 | # Create indexes on portfolios
66 | op.create_index("idx_portfolio_user", "mcp_portfolios", ["user_id"])
67 | op.create_unique_constraint(
68 | "uq_user_portfolio_name", "mcp_portfolios", ["user_id", "name"]
69 | )
70 |
71 | # Create positions table
72 | op.create_table(
73 | "mcp_portfolio_positions",
74 | sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
75 | sa.Column("portfolio_id", postgresql.UUID(as_uuid=True), nullable=False),
76 | sa.Column("ticker", sa.String(20), nullable=False),
77 | sa.Column("shares", sa.Numeric(20, 8), nullable=False),
78 | sa.Column("average_cost_basis", sa.Numeric(12, 4), nullable=False),
79 | sa.Column("total_cost", sa.Numeric(20, 4), nullable=False),
80 | sa.Column("purchase_date", sa.DateTime(timezone=True), nullable=False),
81 | sa.Column("notes", sa.Text, nullable=True),
82 | sa.Column(
83 | "created_at",
84 | sa.DateTime(timezone=True),
85 | server_default=sa.func.now(),
86 | nullable=False,
87 | ),
88 | sa.Column(
89 | "updated_at",
90 | sa.DateTime(timezone=True),
91 | server_default=sa.func.now(),
92 | nullable=False,
93 | ),
94 | sa.ForeignKeyConstraint(
95 | ["portfolio_id"], ["mcp_portfolios.id"], ondelete="CASCADE"
96 | ),
97 | )
98 |
99 | # Create indexes on positions
100 | op.create_index(
101 | "idx_position_portfolio", "mcp_portfolio_positions", ["portfolio_id"]
102 | )
103 | op.create_index("idx_position_ticker", "mcp_portfolio_positions", ["ticker"])
104 | op.create_index(
105 | "idx_position_portfolio_ticker",
106 | "mcp_portfolio_positions",
107 | ["portfolio_id", "ticker"],
108 | )
109 | op.create_unique_constraint(
110 | "uq_portfolio_position_ticker",
111 | "mcp_portfolio_positions",
112 | ["portfolio_id", "ticker"],
113 | )
114 |
115 |
116 | def downgrade() -> None:
117 | """Drop portfolio management tables."""
118 | # Drop positions table first (due to foreign key)
119 | op.drop_table("mcp_portfolio_positions")
120 |
121 | # Drop portfolios table
122 | op.drop_table("mcp_portfolios")
123 |
```
--------------------------------------------------------------------------------
/maverick_mcp/api/utils/insomnia_export.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Insomnia Collection Export Utility
3 |
4 | Converts OpenAPI specifications to Insomnia workspace format.
5 | """
6 |
7 | import uuid
8 | from typing import Any
9 |
10 |
11 | def convert_to_insomnia(openapi_dict: dict[str, Any]) -> dict[str, Any]:
12 | """
13 | Convert OpenAPI specification to Insomnia workspace format.
14 |
15 | Args:
16 | openapi_dict: OpenAPI specification dictionary
17 |
18 | Returns:
19 | Insomnia workspace dictionary
20 | """
21 | info = openapi_dict.get("info", {})
22 |
23 | workspace = {
24 | "_type": "export",
25 | "__export_format": 4,
26 | "__export_date": "2024-01-01T00:00:00.000Z",
27 | "__export_source": "maverick-mcp:openapi",
28 | "resources": [],
29 | }
30 |
31 | # Create workspace resource
32 | workspace_id = f"wrk_{uuid.uuid4().hex[:12]}"
33 | workspace["resources"].append(
34 | {
35 | "_id": workspace_id,
36 | "_type": "workspace",
37 | "name": info.get("title", "API Workspace"),
38 | "description": info.get("description", "Exported from OpenAPI spec"),
39 | "scope": "collection",
40 | }
41 | )
42 |
43 | # Create environment for base URL
44 | env_id = f"env_{uuid.uuid4().hex[:12]}"
45 | servers = openapi_dict.get("servers", [])
46 | base_url = (
47 | servers[0].get("url", "http://localhost:8000")
48 | if servers
49 | else "http://localhost:8000"
50 | )
51 |
52 | workspace["resources"].append(
53 | {
54 | "_id": env_id,
55 | "_type": "environment",
56 | "name": "Base Environment",
57 | "data": {"base_url": base_url},
58 | "dataPropertyOrder": {"&": ["base_url"]},
59 | "color": "#7d69cb",
60 | "isPrivate": False,
61 | "metaSortKey": 1,
62 | "parentId": workspace_id,
63 | }
64 | )
65 |
66 | # Convert paths to Insomnia requests
67 | paths = openapi_dict.get("paths", {})
68 | for path, methods in paths.items():
69 | for method, operation in methods.items():
70 | if method.upper() in ["GET", "POST", "PUT", "DELETE", "PATCH"]:
71 | request_id = f"req_{uuid.uuid4().hex[:12]}"
72 |
73 | request = {
74 | "_id": request_id,
75 | "_type": "request",
76 | "parentId": workspace_id,
77 | "name": operation.get("summary", f"{method.upper()} {path}"),
78 | "description": operation.get("description", ""),
79 | "url": "{{ _.base_url }}" + path,
80 | "method": method.upper(),
81 | "headers": [],
82 | "parameters": [],
83 | "body": {},
84 | "authentication": {},
85 | }
86 |
87 | # Add request body if present
88 | if "requestBody" in operation:
89 | content = operation["requestBody"].get("content", {})
90 | if "application/json" in content:
91 | request["headers"].append(
92 | {"name": "Content-Type", "value": "application/json"}
93 | )
94 |
95 | request["body"] = {"mimeType": "application/json", "text": "{}"}
96 |
97 | # Add example if available
98 | schema = content["application/json"].get("schema", {})
99 | if "example" in schema:
100 | request["body"]["text"] = str(schema["example"])
101 |
102 | # Add query parameters if present
103 | if "parameters" in operation:
104 | for param in operation["parameters"]:
105 | if param.get("in") == "query":
106 | request["parameters"].append(
107 | {
108 | "name": param["name"],
109 | "value": "",
110 | "description": param.get("description", ""),
111 | "disabled": not param.get("required", False),
112 | }
113 | )
114 |
115 | workspace["resources"].append(request)
116 |
117 | return workspace
118 |
```
--------------------------------------------------------------------------------
/tools/hot_reload.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python
2 | """
3 | Hot reload development tool for Maverick-MCP.
4 |
5 | This script watches for file changes and automatically restarts the server,
6 | providing instant feedback during development.
7 | """
8 |
9 | import os
10 | import subprocess
11 | import sys
12 | import time
13 | from pathlib import Path
14 | from typing import Any
15 |
16 | try:
17 | from watchdog.events import FileSystemEventHandler
18 | from watchdog.observers import Observer
19 | except ImportError:
20 | print("Installing watchdog for file watching...")
21 | subprocess.check_call([sys.executable, "-m", "pip", "install", "watchdog"])
22 | from watchdog.events import FileSystemEventHandler
23 | from watchdog.observers import Observer
24 |
25 |
26 | class ReloadHandler(FileSystemEventHandler):
27 | """Handler that restarts the server on file changes."""
28 |
29 | def __init__(self, command: list[str], debounce_seconds: float = 0.5):
30 | self.command = command
31 | self.debounce_seconds = debounce_seconds
32 | self.last_reload = 0
33 | self.process: subprocess.Popen[Any] | None = None
34 | self.start_process()
35 |
36 | def start_process(self):
37 | """Start the development process."""
38 | if self.process:
39 | print("🔄 Stopping previous process...")
40 | self.process.terminate()
41 | try:
42 | self.process.wait(timeout=5)
43 | except subprocess.TimeoutExpired:
44 | self.process.kill()
45 |
46 | print(f"🚀 Starting: {' '.join(self.command)}")
47 | self.process = subprocess.Popen(self.command)
48 | self.last_reload = time.time()
49 |
50 | def on_modified(self, event):
51 | """Handle file modification events."""
52 | if event.is_directory:
53 | return
54 |
55 | # Skip certain files
56 | path = Path(event.src_path)
57 | if any(
58 | pattern in str(path)
59 | for pattern in [
60 | "__pycache__",
61 | ".pyc",
62 | ".git",
63 | ".pytest_cache",
64 | ".log",
65 | ".db",
66 | ".sqlite",
67 | ]
68 | ):
69 | return
70 |
71 | # Only reload Python files and config files
72 | if path.suffix not in [".py", ".toml", ".yaml", ".yml", ".env"]:
73 | return
74 |
75 | # Debounce rapid changes
76 | current_time = time.time()
77 | if current_time - self.last_reload < self.debounce_seconds:
78 | return
79 |
80 | print(f"\n📝 File changed: {path}")
81 | self.start_process()
82 |
83 | def cleanup(self):
84 | """Clean up the running process."""
85 | if self.process:
86 | self.process.terminate()
87 | self.process.wait()
88 |
89 |
90 | def main():
91 | """Main entry point for hot reload."""
92 | import argparse
93 |
94 | parser = argparse.ArgumentParser(description="Hot reload for Maverick-MCP")
95 | parser.add_argument(
96 | "--command",
97 | default="make backend",
98 | help="Command to run (default: make backend)",
99 | )
100 | parser.add_argument(
101 | "--watch",
102 | action="append",
103 | default=["maverick_mcp"],
104 | help="Directories to watch (can be specified multiple times)",
105 | )
106 | parser.add_argument(
107 | "--exclude",
108 | action="append",
109 | default=[],
110 | help="Patterns to exclude from watching",
111 | )
112 | args = parser.parse_args()
113 |
114 | # Parse command
115 | command = args.command.split() if isinstance(args.command, str) else args.command
116 |
117 | # Set up file watcher
118 | event_handler = ReloadHandler(command)
119 | observer = Observer()
120 |
121 | # Watch specified directories
122 | for watch_dir in args.watch:
123 | if os.path.exists(watch_dir):
124 | print(f"👀 Watching: {watch_dir}")
125 | observer.schedule(event_handler, watch_dir, recursive=True)
126 | else:
127 | print(f"⚠️ Directory not found: {watch_dir}")
128 |
129 | observer.start()
130 |
131 | print("\n✨ Hot reload active! Press Ctrl+C to stop.\n")
132 |
133 | try:
134 | while True:
135 | time.sleep(1)
136 | except KeyboardInterrupt:
137 | print("\n👋 Stopping hot reload...")
138 | observer.stop()
139 | event_handler.cleanup()
140 |
141 | observer.join()
142 |
143 |
144 | if __name__ == "__main__":
145 | main()
146 |
```
--------------------------------------------------------------------------------
/maverick_mcp/tests/test_mailgun_email.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Test script for Mailgun email integration.
3 |
4 | Run this script to test your Mailgun configuration:
5 | python maverick_mcp/tests/test_mailgun_email.py
6 | """
7 |
8 | import asyncio
9 | import os
10 | import sys
11 | from pathlib import Path
12 |
13 | import pytest
14 |
15 | # Add parent directory to path
16 | sys.path.insert(0, str(Path(__file__).parent.parent.parent))
17 |
18 | from maverick_mcp.config.settings import settings
19 | from maverick_mcp.utils.email_service import (
20 | MailgunService,
21 | send_api_key_email,
22 | send_welcome_email,
23 | )
24 |
25 |
26 | @pytest.mark.asyncio
27 | @pytest.mark.integration
28 | async def test_mailgun_config():
29 | """Test Mailgun configuration."""
30 | print("=" * 60)
31 | print("Testing Mailgun Configuration")
32 | print("=" * 60)
33 |
34 | print(f"Email Enabled: {settings.email.enabled}")
35 | print(f"Mailgun Domain: {settings.email.mailgun_domain}")
36 | print(f"From Address: {settings.email.from_address}")
37 | print(f"From Name: {settings.email.from_name}")
38 | print(f"API Key Set: {'Yes' if settings.email.mailgun_api_key else 'No'}")
39 |
40 | if not settings.email.mailgun_api_key:
41 | print("\n❌ Mailgun API key not configured!")
42 | print("Please set MAILGUN_API_KEY in your .env file")
43 | return False
44 |
45 | if not settings.email.mailgun_domain:
46 | print("\n❌ Mailgun domain not configured!")
47 | print("Please set MAILGUN_DOMAIN in your .env file")
48 | return False
49 |
50 | print("\n✅ Mailgun configuration looks good!")
51 | return True
52 |
53 |
54 | @pytest.mark.asyncio
55 | @pytest.mark.integration
56 | async def test_send_email():
57 | """Test sending a basic email."""
58 | print("\n" + "=" * 60)
59 | print("Testing Basic Email Send")
60 | print("=" * 60)
61 |
62 | # Get test email from environment or use default
63 | test_email = os.getenv("TEST_EMAIL", "[email protected]")
64 |
65 | service = MailgunService()
66 |
67 | success = await service.send_email(
68 | to=test_email,
69 | subject="Test Email from Maverick-MCP",
70 | text="This is a test email to verify Mailgun integration.",
71 | html="<h1>Test Email</h1><p>This is a test email to verify Mailgun integration.</p>",
72 | tags=["test", "integration"],
73 | metadata={"test": "true", "source": "test_script"},
74 | )
75 |
76 | if success:
77 | print(f"✅ Test email sent successfully to {test_email}")
78 | else:
79 | print(f"❌ Failed to send test email to {test_email}")
80 |
81 | return success
82 |
83 |
84 | @pytest.mark.asyncio
85 | @pytest.mark.integration
86 | async def test_email_templates():
87 | """Test all email templates."""
88 | print("\n" + "=" * 60)
89 | print("Testing Email Templates")
90 | print("=" * 60)
91 |
92 | test_email = os.getenv("TEST_EMAIL", "[email protected]")
93 | test_name = "Test User"
94 |
95 | # Test welcome email
96 | print("\n1. Testing Welcome Email...")
97 | success = await send_welcome_email(test_email, test_name)
98 | print("✅ Welcome email sent" if success else "❌ Welcome email failed")
99 |
100 | # Test API key email
101 | print("\n2. Testing API Key Email...")
102 | success = await send_api_key_email(test_email, test_name, "test_1234567890")
103 | print("✅ API key email sent" if success else "❌ API key email failed")
104 |
105 |
106 | async def main():
107 | """Run all tests."""
108 | print("\nMaverick-MCP Mailgun Email Test Suite")
109 | print("=====================================\n")
110 |
111 | # Test configuration
112 | if not await test_mailgun_config():
113 | print("\nPlease configure Mailgun before running tests.")
114 | print("See .env.mailgun.example for configuration details.")
115 | return
116 |
117 | # Ask if user wants to send test emails
118 | print("\nWould you like to send test emails? (y/n)")
119 | response = input().strip().lower()
120 |
121 | if response == "y":
122 | test_email = input(
123 | "Enter test email address (or press Enter for default): "
124 | ).strip()
125 | if test_email:
126 | os.environ["TEST_EMAIL"] = test_email
127 |
128 | # Send test emails
129 | await test_send_email()
130 |
131 | print("\nWould you like to test all email templates? (y/n)")
132 | if input().strip().lower() == "y":
133 | await test_email_templates()
134 |
135 | print("\n" + "=" * 60)
136 | print("Test Complete!")
137 | print("=" * 60)
138 |
139 |
140 | if __name__ == "__main__":
141 | asyncio.run(main())
142 |
```
--------------------------------------------------------------------------------
/maverick_mcp/data/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Data utilities for Maverick-MCP.
3 |
4 | This package contains data caching, processing and storage utilities.
5 | """
6 |
7 | # Core data functionality - conditional imports to handle missing dependencies
8 | __all__ = []
9 |
10 | # Try to import core cache and model functionality
11 | try:
12 | from .cache import get_from_cache as _get_from_cache
13 | from .cache import save_to_cache as _save_to_cache
14 |
15 | get_from_cache = _get_from_cache
16 | save_to_cache = _save_to_cache
17 | __all__.extend(["get_from_cache", "save_to_cache"])
18 | except ImportError:
19 | # Cache functionality not available (missing msgpack)
20 | pass
21 |
22 | try:
23 | from .models import (
24 | MaverickBearStocks as _MaverickBearStocks,
25 | )
26 | from .models import (
27 | MaverickStocks as _MaverickStocks,
28 | )
29 | from .models import (
30 | PriceCache as _PriceCache,
31 | )
32 | from .models import (
33 | SessionLocal as _SessionLocal,
34 | )
35 | from .models import (
36 | Stock as _Stock,
37 | )
38 | from .models import (
39 | SupplyDemandBreakoutStocks as _SupplyDemandBreakoutStocks,
40 | )
41 | from .models import (
42 | bulk_insert_price_data as _bulk_insert_price_data,
43 | )
44 | from .models import (
45 | ensure_database_schema as _ensure_database_schema,
46 | )
47 | from .models import (
48 | get_db as _get_db,
49 | )
50 | from .models import (
51 | get_latest_maverick_screening as _get_latest_maverick_screening,
52 | )
53 | from .models import (
54 | init_db as _init_db,
55 | )
56 |
57 | MaverickBearStocks = _MaverickBearStocks
58 | MaverickStocks = _MaverickStocks
59 | PriceCache = _PriceCache
60 | SessionLocal = _SessionLocal
61 | Stock = _Stock
62 | SupplyDemandBreakoutStocks = _SupplyDemandBreakoutStocks
63 | bulk_insert_price_data = _bulk_insert_price_data
64 | ensure_database_schema = _ensure_database_schema
65 | get_db = _get_db
66 | get_latest_maverick_screening = _get_latest_maverick_screening
67 | init_db = _init_db
68 |
69 | __all__.extend(
70 | [
71 | "Stock",
72 | "PriceCache",
73 | "MaverickStocks",
74 | "MaverickBearStocks",
75 | "SupplyDemandBreakoutStocks",
76 | "SessionLocal",
77 | "get_db",
78 | "init_db",
79 | "ensure_database_schema",
80 | "bulk_insert_price_data",
81 | "get_latest_maverick_screening",
82 | ]
83 | )
84 | except ImportError:
85 | # Model functionality not available (missing SQLAlchemy or other deps)
86 | pass
87 |
88 | # Always try to import validation - it's critical for production validation test
89 | try:
90 | from .validation import (
91 | DataValidator,
92 | validate_backtest_data,
93 | validate_stock_data,
94 | )
95 |
96 | # Create module-level validation instance for easy access
97 | validation = DataValidator()
98 |
99 | __all__.extend(
100 | [
101 | "DataValidator",
102 | "validate_stock_data",
103 | "validate_backtest_data",
104 | "validation",
105 | ]
106 | )
107 | except ImportError as import_error:
108 | # If validation can't be imported, create a minimal stub
109 | error_message = (
110 | f"Validation functionality requires additional dependencies: {import_error}"
111 | )
112 |
113 | def _raise_validation_import_error() -> None:
114 | raise ImportError(error_message)
115 |
116 | class ValidationStub:
117 | """Minimal validation stub when dependencies aren't available."""
118 |
119 | def __getattr__(self, name):
120 | _raise_validation_import_error()
121 |
122 | # Static method stubs
123 | @staticmethod
124 | def validate_date_range(*args, **kwargs):
125 | _raise_validation_import_error()
126 |
127 | @staticmethod
128 | def validate_data_quality(*args, **kwargs):
129 | _raise_validation_import_error()
130 |
131 | @staticmethod
132 | def validate_price_data(*args, **kwargs):
133 | _raise_validation_import_error()
134 |
135 | @staticmethod
136 | def validate_batch_data(*args, **kwargs):
137 | _raise_validation_import_error()
138 |
139 | validation = ValidationStub()
140 | DataValidator = ValidationStub
141 |
142 | def validate_stock_data(*args, **kwargs):
143 | return {"error": "Dependencies not available"}
144 |
145 | def validate_backtest_data(*args, **kwargs):
146 | return {"error": "Dependencies not available"}
147 |
148 | __all__.extend(
149 | [
150 | "DataValidator",
151 | "validate_stock_data",
152 | "validate_backtest_data",
153 | "validation",
154 | ]
155 | )
156 |
```
--------------------------------------------------------------------------------
/maverick_mcp/utils/stock_helpers.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Stock data helper utilities for routers.
3 |
4 | This module provides common stock data fetching and processing utilities
5 | that are shared across multiple routers to avoid code duplication.
6 | """
7 |
8 | import asyncio
9 | from concurrent.futures import ThreadPoolExecutor
10 | from datetime import UTC, datetime, timedelta
11 |
12 | import pandas as pd
13 |
14 | from maverick_mcp.providers.stock_data import EnhancedStockDataProvider
15 |
16 | # Thread pool for async operations
17 | executor = ThreadPoolExecutor(max_workers=4)
18 |
19 |
20 | def get_stock_dataframe(ticker: str, days: int = 365) -> pd.DataFrame:
21 | """
22 | Get stock data as a DataFrame with technical indicators.
23 |
24 | Args:
25 | ticker: Stock ticker symbol (e.g., "AAPL")
26 | days: Number of days of historical data to fetch (default: 365)
27 |
28 | Returns:
29 | DataFrame with stock price data and technical indicators
30 |
31 | Raises:
32 | ValueError: If ticker is invalid or data cannot be fetched
33 | """
34 | from maverick_mcp.core.technical_analysis import add_technical_indicators
35 |
36 | # Calculate date range
37 | end_date = datetime.now(UTC)
38 | start_date = end_date - timedelta(days=days)
39 |
40 | start_str = start_date.strftime("%Y-%m-%d")
41 | end_str = end_date.strftime("%Y-%m-%d")
42 |
43 | # Get stock data provider
44 | stock_provider = EnhancedStockDataProvider()
45 |
46 | # Fetch data and add technical indicators
47 | df = stock_provider.get_stock_data(ticker, start_str, end_str)
48 | df = add_technical_indicators(df)
49 |
50 | return df
51 |
52 |
53 | async def get_stock_dataframe_async(ticker: str, days: int = 365) -> pd.DataFrame:
54 | """
55 | Async wrapper for get_stock_dataframe to avoid blocking the event loop.
56 |
57 | Args:
58 | ticker: Stock ticker symbol (e.g., "AAPL")
59 | days: Number of days of historical data to fetch (default: 365)
60 |
61 | Returns:
62 | DataFrame with stock price data and technical indicators
63 |
64 | Raises:
65 | ValueError: If ticker is invalid or data cannot be fetched
66 | """
67 | loop = asyncio.get_event_loop()
68 | return await loop.run_in_executor(executor, get_stock_dataframe, ticker, days)
69 |
70 |
71 | async def get_multiple_stock_dataframes_async(
72 | tickers: list[str], days: int = 365
73 | ) -> dict[str, pd.DataFrame]:
74 | """
75 | Fetch multiple stock dataframes concurrently.
76 |
77 | Args:
78 | tickers: List of stock ticker symbols
79 | days: Number of days of historical data to fetch (default: 365)
80 |
81 | Returns:
82 | Dictionary mapping ticker symbols to their DataFrames
83 |
84 | Raises:
85 | ValueError: If any ticker is invalid or data cannot be fetched
86 | """
87 | tasks = [get_stock_dataframe_async(ticker, days) for ticker in tickers]
88 | results = await asyncio.gather(*tasks)
89 |
90 | return dict(zip(tickers, results, strict=False))
91 |
92 |
93 | def validate_ticker(ticker: str) -> str:
94 | """
95 | Validate and normalize a stock ticker symbol.
96 |
97 | Args:
98 | ticker: Stock ticker symbol to validate
99 |
100 | Returns:
101 | Normalized ticker symbol (uppercase, stripped)
102 |
103 | Raises:
104 | ValueError: If ticker is invalid
105 | """
106 | if not ticker or not isinstance(ticker, str): # type: ignore[arg-type]
107 | raise ValueError("Ticker must be a non-empty string")
108 |
109 | ticker = ticker.strip().upper()
110 |
111 | if not ticker:
112 | raise ValueError("Ticker cannot be empty")
113 |
114 | # Basic validation - ticker should be alphanumeric with possible dots/hyphens
115 | if not ticker.replace(".", "").replace("-", "").isalnum():
116 | raise ValueError("Ticker contains invalid characters")
117 |
118 | if len(ticker) > 10:
119 | raise ValueError("Ticker is too long (max 10 characters)")
120 |
121 | return ticker
122 |
123 |
124 | def calculate_date_range(days: int) -> tuple[str, str]:
125 | """
126 | Calculate start and end date strings for stock data fetching.
127 |
128 | Args:
129 | days: Number of days of historical data
130 |
131 | Returns:
132 | Tuple of (start_date_str, end_date_str) in YYYY-MM-DD format
133 |
134 | Raises:
135 | ValueError: If days is not a positive integer
136 | """
137 | if not isinstance(days, int) or days <= 0: # type: ignore[arg-type]
138 | raise ValueError("Days must be a positive integer")
139 |
140 | if days > 3650: # ~10 years
141 | raise ValueError("Days cannot exceed 3650 (10 years)")
142 |
143 | end_date = datetime.now(UTC)
144 | start_date = end_date - timedelta(days=days)
145 |
146 | start_str = start_date.strftime("%Y-%m-%d")
147 | end_str = end_date.strftime("%Y-%m-%d")
148 |
149 | return start_str, end_str
150 |
```