This is page 1 of 29. Use http://codebase.md/wshobson/maverick-mcp?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .env.example
├── .github
│ ├── dependabot.yml
│ ├── FUNDING.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── config.yml
│ │ ├── feature_request.md
│ │ ├── question.md
│ │ └── security_report.md
│ ├── pull_request_template.md
│ └── workflows
│ ├── claude-code-review.yml
│ └── claude.yml
├── .gitignore
├── .python-version
├── .vscode
│ ├── launch.json
│ └── settings.json
├── alembic
│ ├── env.py
│ ├── script.py.mako
│ └── versions
│ ├── 001_initial_schema.py
│ ├── 003_add_performance_indexes.py
│ ├── 006_rename_metadata_columns.py
│ ├── 008_performance_optimization_indexes.py
│ ├── 009_rename_to_supply_demand.py
│ ├── 010_self_contained_schema.py
│ ├── 011_remove_proprietary_terms.py
│ ├── 013_add_backtest_persistence_models.py
│ ├── 014_add_portfolio_models.py
│ ├── 08e3945a0c93_merge_heads.py
│ ├── 9374a5c9b679_merge_heads_for_testing.py
│ ├── abf9b9afb134_merge_multiple_heads.py
│ ├── adda6d3fd84b_merge_proprietary_terms_removal_with_.py
│ ├── e0c75b0bdadb_fix_financial_data_precision_only.py
│ ├── f0696e2cac15_add_essential_performance_indexes.py
│ └── fix_database_integrity_issues.py
├── alembic.ini
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── DATABASE_SETUP.md
├── docker-compose.override.yml.example
├── docker-compose.yml
├── Dockerfile
├── docs
│ ├── api
│ │ └── backtesting.md
│ ├── BACKTESTING.md
│ ├── COST_BASIS_SPECIFICATION.md
│ ├── deep_research_agent.md
│ ├── exa_research_testing_strategy.md
│ ├── PORTFOLIO_PERSONALIZATION_PLAN.md
│ ├── PORTFOLIO.md
│ ├── SETUP_SELF_CONTAINED.md
│ └── speed_testing_framework.md
├── examples
│ ├── complete_speed_validation.py
│ ├── deep_research_integration.py
│ ├── llm_optimization_example.py
│ ├── llm_speed_demo.py
│ ├── monitoring_example.py
│ ├── parallel_research_example.py
│ ├── speed_optimization_demo.py
│ └── timeout_fix_demonstration.py
├── LICENSE
├── Makefile
├── MANIFEST.in
├── maverick_mcp
│ ├── __init__.py
│ ├── agents
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── circuit_breaker.py
│ │ ├── deep_research.py
│ │ ├── market_analysis.py
│ │ ├── optimized_research.py
│ │ ├── supervisor.py
│ │ └── technical_analysis.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── api_server.py
│ │ ├── connection_manager.py
│ │ ├── dependencies
│ │ │ ├── __init__.py
│ │ │ ├── stock_analysis.py
│ │ │ └── technical_analysis.py
│ │ ├── error_handling.py
│ │ ├── inspector_compatible_sse.py
│ │ ├── inspector_sse.py
│ │ ├── middleware
│ │ │ ├── error_handling.py
│ │ │ ├── mcp_logging.py
│ │ │ ├── rate_limiting_enhanced.py
│ │ │ └── security.py
│ │ ├── openapi_config.py
│ │ ├── routers
│ │ │ ├── __init__.py
│ │ │ ├── agents.py
│ │ │ ├── backtesting.py
│ │ │ ├── data_enhanced.py
│ │ │ ├── data.py
│ │ │ ├── health_enhanced.py
│ │ │ ├── health_tools.py
│ │ │ ├── health.py
│ │ │ ├── intelligent_backtesting.py
│ │ │ ├── introspection.py
│ │ │ ├── mcp_prompts.py
│ │ │ ├── monitoring.py
│ │ │ ├── news_sentiment_enhanced.py
│ │ │ ├── performance.py
│ │ │ ├── portfolio.py
│ │ │ ├── research.py
│ │ │ ├── screening_ddd.py
│ │ │ ├── screening_parallel.py
│ │ │ ├── screening.py
│ │ │ ├── technical_ddd.py
│ │ │ ├── technical_enhanced.py
│ │ │ ├── technical.py
│ │ │ └── tool_registry.py
│ │ ├── server.py
│ │ ├── services
│ │ │ ├── __init__.py
│ │ │ ├── base_service.py
│ │ │ ├── market_service.py
│ │ │ ├── portfolio_service.py
│ │ │ ├── prompt_service.py
│ │ │ └── resource_service.py
│ │ ├── simple_sse.py
│ │ └── utils
│ │ ├── __init__.py
│ │ ├── insomnia_export.py
│ │ └── postman_export.py
│ ├── application
│ │ ├── __init__.py
│ │ ├── commands
│ │ │ └── __init__.py
│ │ ├── dto
│ │ │ ├── __init__.py
│ │ │ └── technical_analysis_dto.py
│ │ ├── queries
│ │ │ ├── __init__.py
│ │ │ └── get_technical_analysis.py
│ │ └── screening
│ │ ├── __init__.py
│ │ ├── dtos.py
│ │ └── queries.py
│ ├── backtesting
│ │ ├── __init__.py
│ │ ├── ab_testing.py
│ │ ├── analysis.py
│ │ ├── batch_processing_stub.py
│ │ ├── batch_processing.py
│ │ ├── model_manager.py
│ │ ├── optimization.py
│ │ ├── persistence.py
│ │ ├── retraining_pipeline.py
│ │ ├── strategies
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── ml
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adaptive.py
│ │ │ │ ├── ensemble.py
│ │ │ │ ├── feature_engineering.py
│ │ │ │ └── regime_aware.py
│ │ │ ├── ml_strategies.py
│ │ │ ├── parser.py
│ │ │ └── templates.py
│ │ ├── strategy_executor.py
│ │ ├── vectorbt_engine.py
│ │ └── visualization.py
│ ├── config
│ │ ├── __init__.py
│ │ ├── constants.py
│ │ ├── database_self_contained.py
│ │ ├── database.py
│ │ ├── llm_optimization_config.py
│ │ ├── logging_settings.py
│ │ ├── plotly_config.py
│ │ ├── security_utils.py
│ │ ├── security.py
│ │ ├── settings.py
│ │ ├── technical_constants.py
│ │ ├── tool_estimation.py
│ │ └── validation.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── technical_analysis.py
│ │ └── visualization.py
│ ├── data
│ │ ├── __init__.py
│ │ ├── cache_manager.py
│ │ ├── cache.py
│ │ ├── django_adapter.py
│ │ ├── health.py
│ │ ├── models.py
│ │ ├── performance.py
│ │ ├── session_management.py
│ │ └── validation.py
│ ├── database
│ │ ├── __init__.py
│ │ ├── base.py
│ │ └── optimization.py
│ ├── dependencies.py
│ ├── domain
│ │ ├── __init__.py
│ │ ├── entities
│ │ │ ├── __init__.py
│ │ │ └── stock_analysis.py
│ │ ├── events
│ │ │ └── __init__.py
│ │ ├── portfolio.py
│ │ ├── screening
│ │ │ ├── __init__.py
│ │ │ ├── entities.py
│ │ │ ├── services.py
│ │ │ └── value_objects.py
│ │ ├── services
│ │ │ ├── __init__.py
│ │ │ └── technical_analysis_service.py
│ │ ├── stock_analysis
│ │ │ ├── __init__.py
│ │ │ └── stock_analysis_service.py
│ │ └── value_objects
│ │ ├── __init__.py
│ │ └── technical_indicators.py
│ ├── exceptions.py
│ ├── infrastructure
│ │ ├── __init__.py
│ │ ├── cache
│ │ │ └── __init__.py
│ │ ├── caching
│ │ │ ├── __init__.py
│ │ │ └── cache_management_service.py
│ │ ├── connection_manager.py
│ │ ├── data_fetching
│ │ │ ├── __init__.py
│ │ │ └── stock_data_service.py
│ │ ├── health
│ │ │ ├── __init__.py
│ │ │ └── health_checker.py
│ │ ├── persistence
│ │ │ ├── __init__.py
│ │ │ └── stock_repository.py
│ │ ├── providers
│ │ │ └── __init__.py
│ │ ├── screening
│ │ │ ├── __init__.py
│ │ │ └── repositories.py
│ │ └── sse_optimizer.py
│ ├── langchain_tools
│ │ ├── __init__.py
│ │ ├── adapters.py
│ │ └── registry.py
│ ├── logging_config.py
│ ├── memory
│ │ ├── __init__.py
│ │ └── stores.py
│ ├── monitoring
│ │ ├── __init__.py
│ │ ├── health_check.py
│ │ ├── health_monitor.py
│ │ ├── integration_example.py
│ │ ├── metrics.py
│ │ ├── middleware.py
│ │ └── status_dashboard.py
│ ├── providers
│ │ ├── __init__.py
│ │ ├── dependencies.py
│ │ ├── factories
│ │ │ ├── __init__.py
│ │ │ ├── config_factory.py
│ │ │ └── provider_factory.py
│ │ ├── implementations
│ │ │ ├── __init__.py
│ │ │ ├── cache_adapter.py
│ │ │ ├── macro_data_adapter.py
│ │ │ ├── market_data_adapter.py
│ │ │ ├── persistence_adapter.py
│ │ │ └── stock_data_adapter.py
│ │ ├── interfaces
│ │ │ ├── __init__.py
│ │ │ ├── cache.py
│ │ │ ├── config.py
│ │ │ ├── macro_data.py
│ │ │ ├── market_data.py
│ │ │ ├── persistence.py
│ │ │ └── stock_data.py
│ │ ├── llm_factory.py
│ │ ├── macro_data.py
│ │ ├── market_data.py
│ │ ├── mocks
│ │ │ ├── __init__.py
│ │ │ ├── mock_cache.py
│ │ │ ├── mock_config.py
│ │ │ ├── mock_macro_data.py
│ │ │ ├── mock_market_data.py
│ │ │ ├── mock_persistence.py
│ │ │ └── mock_stock_data.py
│ │ ├── openrouter_provider.py
│ │ ├── optimized_screening.py
│ │ ├── optimized_stock_data.py
│ │ └── stock_data.py
│ ├── README.md
│ ├── tests
│ │ ├── __init__.py
│ │ ├── README_INMEMORY_TESTS.md
│ │ ├── test_cache_debug.py
│ │ ├── test_fixes_validation.py
│ │ ├── test_in_memory_routers.py
│ │ ├── test_in_memory_server.py
│ │ ├── test_macro_data_provider.py
│ │ ├── test_mailgun_email.py
│ │ ├── test_market_calendar_caching.py
│ │ ├── test_mcp_tool_fixes_pytest.py
│ │ ├── test_mcp_tool_fixes.py
│ │ ├── test_mcp_tools.py
│ │ ├── test_models_functional.py
│ │ ├── test_server.py
│ │ ├── test_stock_data_enhanced.py
│ │ ├── test_stock_data_provider.py
│ │ └── test_technical_analysis.py
│ ├── tools
│ │ ├── __init__.py
│ │ ├── performance_monitoring.py
│ │ ├── portfolio_manager.py
│ │ ├── risk_management.py
│ │ └── sentiment_analysis.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── agent_errors.py
│ │ ├── batch_processing.py
│ │ ├── cache_warmer.py
│ │ ├── circuit_breaker_decorators.py
│ │ ├── circuit_breaker_services.py
│ │ ├── circuit_breaker.py
│ │ ├── data_chunking.py
│ │ ├── database_monitoring.py
│ │ ├── debug_utils.py
│ │ ├── fallback_strategies.py
│ │ ├── llm_optimization.py
│ │ ├── logging_example.py
│ │ ├── logging_init.py
│ │ ├── logging.py
│ │ ├── mcp_logging.py
│ │ ├── memory_profiler.py
│ │ ├── monitoring_middleware.py
│ │ ├── monitoring.py
│ │ ├── orchestration_logging.py
│ │ ├── parallel_research.py
│ │ ├── parallel_screening.py
│ │ ├── quick_cache.py
│ │ ├── resource_manager.py
│ │ ├── shutdown.py
│ │ ├── stock_helpers.py
│ │ ├── structured_logger.py
│ │ ├── tool_monitoring.py
│ │ ├── tracing.py
│ │ └── yfinance_pool.py
│ ├── validation
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── data.py
│ │ ├── middleware.py
│ │ ├── portfolio.py
│ │ ├── responses.py
│ │ ├── screening.py
│ │ └── technical.py
│ └── workflows
│ ├── __init__.py
│ ├── agents
│ │ ├── __init__.py
│ │ ├── market_analyzer.py
│ │ ├── optimizer_agent.py
│ │ ├── strategy_selector.py
│ │ └── validator_agent.py
│ ├── backtesting_workflow.py
│ └── state.py
├── PLANS.md
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│ ├── dev.sh
│ ├── INSTALLATION_GUIDE.md
│ ├── load_example.py
│ ├── load_market_data.py
│ ├── load_tiingo_data.py
│ ├── migrate_db.py
│ ├── README_TIINGO_LOADER.md
│ ├── requirements_tiingo.txt
│ ├── run_stock_screening.py
│ ├── run-migrations.sh
│ ├── seed_db.py
│ ├── seed_sp500.py
│ ├── setup_database.sh
│ ├── setup_self_contained.py
│ ├── setup_sp500_database.sh
│ ├── test_seeded_data.py
│ ├── test_tiingo_loader.py
│ ├── tiingo_config.py
│ └── validate_setup.py
├── SECURITY.md
├── server.json
├── setup.py
├── tests
│ ├── __init__.py
│ ├── conftest.py
│ ├── core
│ │ └── test_technical_analysis.py
│ ├── data
│ │ └── test_portfolio_models.py
│ ├── domain
│ │ ├── conftest.py
│ │ ├── test_portfolio_entities.py
│ │ └── test_technical_analysis_service.py
│ ├── fixtures
│ │ └── orchestration_fixtures.py
│ ├── integration
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── README.md
│ │ ├── run_integration_tests.sh
│ │ ├── test_api_technical.py
│ │ ├── test_chaos_engineering.py
│ │ ├── test_config_management.py
│ │ ├── test_full_backtest_workflow_advanced.py
│ │ ├── test_full_backtest_workflow.py
│ │ ├── test_high_volume.py
│ │ ├── test_mcp_tools.py
│ │ ├── test_orchestration_complete.py
│ │ ├── test_portfolio_persistence.py
│ │ ├── test_redis_cache.py
│ │ ├── test_security_integration.py.disabled
│ │ └── vcr_setup.py
│ ├── performance
│ │ ├── __init__.py
│ │ ├── test_benchmarks.py
│ │ ├── test_load.py
│ │ ├── test_profiling.py
│ │ └── test_stress.py
│ ├── providers
│ │ └── test_stock_data_simple.py
│ ├── README.md
│ ├── test_agents_router_mcp.py
│ ├── test_backtest_persistence.py
│ ├── test_cache_management_service.py
│ ├── test_cache_serialization.py
│ ├── test_circuit_breaker.py
│ ├── test_database_pool_config_simple.py
│ ├── test_database_pool_config.py
│ ├── test_deep_research_functional.py
│ ├── test_deep_research_integration.py
│ ├── test_deep_research_parallel_execution.py
│ ├── test_error_handling.py
│ ├── test_event_loop_integrity.py
│ ├── test_exa_research_integration.py
│ ├── test_exception_hierarchy.py
│ ├── test_financial_search.py
│ ├── test_graceful_shutdown.py
│ ├── test_integration_simple.py
│ ├── test_langgraph_workflow.py
│ ├── test_market_data_async.py
│ ├── test_market_data_simple.py
│ ├── test_mcp_orchestration_functional.py
│ ├── test_ml_strategies.py
│ ├── test_optimized_research_agent.py
│ ├── test_orchestration_integration.py
│ ├── test_orchestration_logging.py
│ ├── test_orchestration_tools_simple.py
│ ├── test_parallel_research_integration.py
│ ├── test_parallel_research_orchestrator.py
│ ├── test_parallel_research_performance.py
│ ├── test_performance_optimizations.py
│ ├── test_production_validation.py
│ ├── test_provider_architecture.py
│ ├── test_rate_limiting_enhanced.py
│ ├── test_runner_validation.py
│ ├── test_security_comprehensive.py.disabled
│ ├── test_security_cors.py
│ ├── test_security_enhancements.py.disabled
│ ├── test_security_headers.py
│ ├── test_security_penetration.py
│ ├── test_session_management.py
│ ├── test_speed_optimization_validation.py
│ ├── test_stock_analysis_dependencies.py
│ ├── test_stock_analysis_service.py
│ ├── test_stock_data_fetching_service.py
│ ├── test_supervisor_agent.py
│ ├── test_supervisor_functional.py
│ ├── test_tool_estimation_config.py
│ ├── test_visualization.py
│ └── utils
│ ├── test_agent_errors.py
│ ├── test_logging.py
│ ├── test_parallel_screening.py
│ └── test_quick_cache.py
├── tools
│ ├── check_orchestration_config.py
│ ├── experiments
│ │ ├── validation_examples.py
│ │ └── validation_fixed.py
│ ├── fast_dev.sh
│ ├── hot_reload.py
│ ├── quick_test.py
│ └── templates
│ ├── new_router_template.py
│ ├── new_tool_template.py
│ ├── screening_strategy_template.py
│ └── test_template.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
```
3.12
```
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
```
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
.venv/
venv/
ENV/
env/
*.egg-info/
dist/
build/
.pytest_cache/
.coverage
.mypy_cache/
.ruff_cache/
# Environment files
.env
.env.*
!.env.example
# Git
.git/
.gitignore
# Docker
Dockerfile
docker-compose.yml
.dockerignore
# IDE
.vscode/
.idea/
*.swp
*.swo
*~
# OS
.DS_Store
Thumbs.db
# Project specific
*.log
*.db
*.sqlite
*.sqlite3
test_cache_debug.py
test_market_calendar_caching.py
# Documentation
*.md
!README.md
docs/
# CI/CD
.github/
.gitlab-ci.yml
.travis.yml
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
# Python bytecode
__pycache__/
*.py[cod]
*$py.class
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
tests/output/
.ruff_cache/
# Static analysis outputs
pyright_analysis.txt
# Jupyter Notebook
.ipynb_checkpoints
# Virtual environments
.env
.env.prod
.env.local
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# IDE files
.idea/
*.swp
*.swo
# OS specific files
.DS_Store
Thumbs.db
# Project specific
*.log
# Test files in root (should be in tests/)
/test_*.py
# Generated API clients
/generated/
/openapi-spec.json
# database
*.db
# Docker local overrides
docker-compose.override.yml
```
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
```
# MaverickMCP Environment Configuration Example
# Copy this file to .env and update with your values
# Personal stock analysis MCP server - no authentication or billing required
# Application Settings
APP_NAME=MaverickMCP
ENVIRONMENT=development
LOG_LEVEL=info
API_VERSION=v1
# API Settings
API_HOST=0.0.0.0
API_PORT=8000
API_DEBUG=false
# Database Settings (for stock data caching and storage)
# For simple setup, use SQLite: sqlite:///maverick_mcp.db
# For more performance, use PostgreSQL: postgresql://localhost/maverick_mcp_development
DATABASE_URL=sqlite:///maverick_mcp.db
# Redis Configuration (for caching market data)
REDIS_HOST=localhost
REDIS_PORT=6379
REDIS_DB=0
REDIS_PASSWORD=
REDIS_SSL=false
# Cache Configuration
CACHE_TTL_SECONDS=604800 # 7 days
CACHE_ENABLED=true
# CORS Configuration (for local development)
ALLOWED_ORIGINS=http://localhost:3000,http://localhost:3001
# Monitoring & Analytics (optional)
SENTRY_DSN=https://[email protected]/project_id
# Required API Keys
# Get free Tiingo key at https://tiingo.com (500 requests/day free)
TIINGO_API_KEY=your_tiingo_api_key_here
# Optional API Keys
# FRED_API_KEY=your_fred_api_key_here
# OPENAI_API_KEY=your_openai_api_key_here
# ANTHROPIC_API_KEY=your_anthropic_api_key_here
# Web Search API Keys (for DeepResearchAgent)
# Get Exa API key at https://exa.ai (free tier available)
EXA_API_KEY=your_exa_api_key_here
# Get Tavily API key at https://tavily.com (free tier available)
TAVILY_API_KEY=your_tavily_api_key_here
# OpenRouter API Key (for 400+ AI models with intelligent cost optimization)
# Get key at https://openrouter.ai (pay-as-you-go pricing)
OPENROUTER_API_KEY=your_openrouter_api_key_here
# Yahoo Finance Configuration
YFINANCE_TIMEOUT_SECONDS=30
# Data Provider Settings
DATA_PROVIDER_USE_CACHE=true
DATA_PROVIDER_CACHE_DIR=/tmp/maverick_mcp/cache
DATA_PROVIDER_CACHE_EXPIRY=86400
DATA_PROVIDER_RATE_LIMIT=5
# Rate Limiting (basic protection)
RATE_LIMIT_PER_IP=100 # requests per minute
# Feature Flags
MAINTENANCE_MODE=false
```
--------------------------------------------------------------------------------
/maverick_mcp/README.md:
--------------------------------------------------------------------------------
```markdown
# Maverick-MCP Directory Structure
## Overview
**⚠️ IMPORTANT FINANCIAL DISCLAIMER**: This software is for educational and informational purposes only. It is NOT financial advice. Always consult with a qualified financial advisor before making investment decisions.
The Maverick-MCP package is organized into the following modules:
- **core/**: Core client functionality and rate limiting
- **api/**: MCP API server and client
- **tools/**: Financial analysis tools
- **providers/**: Data providers for stocks, market, and macro data
- **data/**: Data handling utilities, including caching
- **config/**: Configuration constants and settings
- **cli/**: Command-line interface tools
- **examples/**: Example scripts and usage patterns
## Module Details
### core/
- `client.py` - Base Anthropic client implementation with rate limiting
- `rate_limiter.py` - Anthropic API rate limiter
### api/
- `mcp_client.py` - MCP protocol client implementation
- `server.py` - FastMCP server implementation
### tools/
- `portfolio_manager.py` - Portfolio management and optimization tools
### providers/
- `stock_data.py` - Stock data provider utilities
- `market_data.py` - Market data provider utilities
- `macro_data.py` - Macroeconomic data provider utilities
### data/
- `cache.py` - Cache implementation (Redis and in-memory)
### config/
- `constants.py` - Configuration constants and environment variable handling
### cli/
- `server.py` - Server CLI implementation
### examples/
- Various example scripts showing how to use the Maverick-MCP tools
## Usage
**Personal Use Only**: This server is designed for individual educational use with Claude Desktop.
To start the Maverick-MCP server:
```bash
# Recommended: Use the Makefile
make dev
# Alternative: Direct FastMCP server
python -m maverick_mcp.api.server --transport streamable-http --port 8003
# Development mode with hot reload
./scripts/dev.sh
```
Note: The server will start using streamable-http transport on port 8003. The streamable-http transport is compatible with mcp-remote, while SSE transport is not (SSE requires GET requests but mcp-remote sends POST requests).
When the server starts, you can access it at:
- http://localhost:8003
You can also start the server programmatically:
```python
from maverick_mcp.api.server import mcp
# Start the server with SSE transport
# NOTE: All financial analysis tools include appropriate disclaimers
mcp.run(transport="sse")
```
## Financial Analysis Tools
MaverickMCP provides comprehensive financial analysis capabilities:
### Stock Data Tools
- Historical price data with intelligent caching
- Real-time quotes and market data
- Company information and fundamentals
### Technical Analysis Tools
- 20+ technical indicators (RSI, MACD, Bollinger Bands, etc.)
- Support and resistance level identification
- Trend analysis and pattern recognition
### Portfolio Tools
- Risk assessment and correlation analysis
- Portfolio optimization using Modern Portfolio Theory
- Position sizing and risk management
### Screening Tools
- Momentum-based stock screening
- Breakout pattern identification
- Custom filtering and ranking systems
**All tools include appropriate financial disclaimers and are for educational purposes only.**
```
--------------------------------------------------------------------------------
/tests/integration/README.md:
--------------------------------------------------------------------------------
```markdown
# Integration Tests for MaverickMCP Orchestration
This directory contains comprehensive integration tests for the multi-agent orchestration system implemented in MaverickMCP.
## Files
- **`test_orchestration_complete.py`** - Comprehensive integration test suite with 18+ test scenarios
- **`run_integration_tests.sh`** - Test runner script with environment setup
- **`README.md`** - This documentation file
## Test Coverage
### Orchestration Tools Tested
1. **`agents_orchestrated_analysis`** - Multi-agent financial analysis with intelligent routing
2. **`agents_deep_research_financial`** - Web search-powered research with AI analysis
3. **`agents_compare_multi_agent_analysis`** - Multi-perspective agent comparison
### Test Categories
- **Functional Tests** (12 scenarios) - Core functionality validation
- **Error Handling** (3 scenarios) - Graceful error response testing
- **Concurrent Execution** (3 scenarios) - Performance under concurrent load
- **Performance Benchmarks** - Execution time and memory usage monitoring
## Usage
### Quick Test Run
```bash
# From tests/integration directory
./run_integration_tests.sh
# Or run directly
python test_orchestration_complete.py
```
### From Project Root
```bash
# Run integration tests
cd tests/integration && ./run_integration_tests.sh
# Run specific test with uv
uv run python tests/integration/test_orchestration_complete.py
```
## Test Scenarios
### Orchestrated Analysis (4 tests)
- Conservative LLM-powered routing
- Aggressive rule-based routing
- Moderate hybrid routing
- Day trader fast execution
### Deep Research Financial (4 tests)
- Basic company research
- Standard sector research
- Comprehensive market research
- Exhaustive crypto research
### Multi-Agent Comparison (3 tests)
- Market vs supervisor stock analysis
- Conservative multi-agent portfolio
- Aggressive growth strategy
### Error Handling (3 tests)
- Invalid parameters for each tool
- Graceful error response validation
- Exception handling verification
### Performance Tests (3 tests)
- Concurrent execution stress test
- Memory usage monitoring
- Response time benchmarking
## Expected Results
**Successful Test Run:**
```
🎉 ALL TESTS PASSED! (18/18)
Total Execution Time: 45.67s
Average execution time: 2340ms
```
**Performance Benchmarks:**
- Orchestrated Analysis: ~1500ms average
- Deep Research: ~25000ms average (includes web search)
- Multi-Agent Comparison: ~3000ms average
## Test Configuration
Tests are configured in `test_orchestration_complete.py`:
```python
TEST_CONFIG = {
"timeout_seconds": 300, # 5 minutes max per test
"concurrent_limit": 3, # Maximum concurrent tests
"performance_monitoring": True,
"detailed_validation": True,
"save_results": True,
}
```
## Output Files
- **Log file**: `integration_test_YYYYMMDD_HHMMSS.log`
- **Results file**: `integration_test_results_YYYYMMDD_HHMMSS.json`
## Requirements
- Python 3.12+
- All MaverickMCP dependencies installed
- Optional: OPENAI_API_KEY for real LLM calls
- Optional: EXA_API_KEY, TAVILY_API_KEY for web search testing
## Troubleshooting
**Import Errors:**
- Ensure you're running from the correct directory
- Check that all dependencies are installed: `uv sync`
**API Errors:**
- Tests use mock responses if API keys aren't configured
- Add API keys to `.env` file for full functionality testing
**Timeout Errors:**
- Increase `timeout_seconds` in TEST_CONFIG for slower systems
- Some tests (especially deep research) take 20-30 seconds
## Integration with CI/CD
The test suite returns proper exit codes:
- `0` - All tests passed
- `1` - Some tests failed
- `2` - Test suite execution failed
- `130` - Interrupted by user (SIGINT)
Example CI usage:
```yaml
- name: Run Integration Tests
run: |
cd tests/integration
./run_integration_tests.sh
```
```
--------------------------------------------------------------------------------
/tests/README.md:
--------------------------------------------------------------------------------
```markdown
# MaverickMCP Test Suite
This comprehensive test suite covers both **Phase 5.1 (End-to-End Integration Tests)** and **Phase 5.2 (Performance Testing Suite)** for the VectorBT backtesting system.
## Overview
The test suite includes:
### Phase 5.1: End-to-End Integration Tests
- **Complete workflow integration** from data fetch to results visualization
- **All 15 strategies testing** (9 traditional + 6 ML strategies)
- **Parallel execution capabilities** with concurrency testing
- **Cache behavior optimization** and performance validation
- **MCP tools integration** for Claude Desktop interaction
- **High-volume production scenarios** with 100+ symbols
- **Chaos engineering** for resilience testing
### Phase 5.2: Performance Testing Suite
- **Load testing** for 10, 50, and 100 concurrent users
- **Benchmark testing** against performance targets
- **Stress testing** for resource usage monitoring
- **Profiling** for bottleneck identification
## Test Structure
```
tests/
├── integration/ # Phase 5.1: Integration Tests
│ ├── test_full_backtest_workflow_advanced.py
│ ├── test_mcp_tools.py
│ ├── test_high_volume.py
│ └── test_chaos_engineering.py
├── performance/ # Phase 5.2: Performance Tests
│ ├── test_load.py
│ ├── test_benchmarks.py
│ ├── test_stress.py
│ └── test_profiling.py
├── conftest.py # Shared fixtures and configuration
└── README.md # This file
```
## Quick Start
### Running All Tests
```bash
# Run all integration and performance tests
make test-full
# Run only integration tests
pytest tests/integration/ -v
# Run only performance tests
pytest tests/performance/ -v
```
### Running Specific Test Categories
```bash
# Integration tests
pytest tests/integration/test_full_backtest_workflow_advanced.py -v
pytest tests/integration/test_mcp_tools.py -v
pytest tests/integration/test_high_volume.py -v
pytest tests/integration/test_chaos_engineering.py -v
# Performance tests
pytest tests/performance/test_load.py -v
pytest tests/performance/test_benchmarks.py -v
pytest tests/performance/test_stress.py -v
pytest tests/performance/test_profiling.py -v
```
## Test Categories Detailed
### Integration Tests
#### 1. Advanced Full Backtest Workflow (`test_full_backtest_workflow_advanced.py`)
- **All 15 strategies integration testing** (traditional + ML)
- **Parallel execution capabilities** with async performance
- **Cache behavior optimization** and hit rate validation
- **Database persistence integration** with PostgreSQL
- **Visualization integration** with chart generation
- **Error recovery mechanisms** across the workflow
- **Resource management** and cleanup testing
- **Memory leak prevention** validation
**Key Tests:**
- `test_all_15_strategies_integration()` - Tests all available strategies
- `test_parallel_execution_capabilities()` - Concurrent backtest execution
- `test_cache_behavior_and_optimization()` - Cache efficiency validation
- `test_resource_management_comprehensive()` - Memory and thread management
#### 2. MCP Tools Integration (`test_mcp_tools.py`)
- **All MCP tool registrations** for Claude Desktop
- **Tool parameter validation** and error handling
- **Tool response formats** and data integrity
- **Claude Desktop simulation** with realistic usage patterns
- **Performance and timeout handling** for MCP calls
**Key Tests:**
- `test_all_mcp_tools_registration()` - Validates all tools are registered
- `test_run_backtest_tool_comprehensive()` - Core backtesting tool validation
- `test_claude_desktop_simulation()` - Realistic usage pattern simulation
#### 3. High Volume Production Scenarios (`test_high_volume.py`)
- **Large symbol set backtesting** (100+ symbols)
- **Multi-year historical data** processing
- **Memory management under load** with leak detection
- **Concurrent user scenarios** simulation
- **Database performance under load** testing
- **Cache efficiency** with large datasets
**Key Tests:**
- `test_large_symbol_set_backtesting()` - 100+ symbol processing
- `test_concurrent_user_scenarios()` - Multi-user simulation
- `test_memory_management_large_datasets()` - Memory leak prevention
#### 4. Chaos Engineering (`test_chaos_engineering.py`)
- **API failures and recovery** mechanisms
- **Database connection drops** and reconnection
- **Cache failures and fallback** behavior
- **Circuit breaker behavior** under load
- **Network instability** injection
- **Memory pressure scenarios** testing
- **CPU overload situations** handling
- **Cascading failure recovery** validation
**Key Tests:**
- `test_api_failures_and_recovery()` - API resilience testing
- `test_circuit_breaker_behavior()` - Circuit breaker validation
- `test_cascading_failure_recovery()` - Multi-component failure handling
### Performance Tests
#### 1. Load Testing (`test_load.py`)
- **Concurrent user load testing** (10, 50, 100 users)
- **Response time and throughput** measurement
- **Memory usage under load** monitoring
- **Database performance** with multiple connections
- **System stability** under sustained load
**Performance Targets:**
- 10 users: ≥2.0 req/s, ≤5.0s avg response time
- 50 users: ≥5.0 req/s, ≤8.0s avg response time
- 100 users: ≥3.0 req/s, ≤15.0s avg response time
**Key Tests:**
- `test_concurrent_users_10()`, `test_concurrent_users_50()`, `test_concurrent_users_100()`
- `test_load_scalability_analysis()` - Performance scaling analysis
- `test_sustained_load_stability()` - Long-duration stability testing
#### 2. Benchmark Testing (`test_benchmarks.py`)
- **Backtest execution < 2 seconds** per backtest
- **Memory usage < 500MB** per backtest
- **Cache hit rate > 80%** efficiency
- **API failure rate < 0.1%** reliability
- **Database query performance < 100ms** speed
- **Response time SLA compliance** validation
**Key Benchmarks:**
- Execution time targets
- Memory efficiency targets
- Cache performance targets
- Database performance targets
- SLA compliance targets
**Key Tests:**
- `test_backtest_execution_time_benchmark()` - Speed validation
- `test_memory_usage_benchmark()` - Memory efficiency
- `test_cache_hit_rate_benchmark()` - Cache performance
- `test_comprehensive_benchmark_suite()` - Full benchmark report
#### 3. Stress Testing (`test_stress.py`)
- **Sustained load testing** (15+ minutes)
- **Memory leak detection** over time
- **CPU utilization monitoring** under stress
- **Database connection pool** exhaustion testing
- **File descriptor limits** testing
- **Queue overflow scenarios** handling
**Key Tests:**
- `test_sustained_load_15_minutes()` - Extended load testing
- `test_memory_leak_detection()` - Memory leak validation
- `test_cpu_stress_resilience()` - CPU stress handling
- `test_database_connection_stress()` - DB connection pool testing
#### 4. Profiling and Bottleneck Identification (`test_profiling.py`)
- **CPU profiling** with cProfile integration
- **Memory allocation hotspots** identification
- **Database query performance** analysis
- **I/O vs CPU-bound** operation analysis
- **Optimization recommendations** generation
**Key Tests:**
- `test_profile_backtest_execution()` - CPU bottleneck identification
- `test_profile_database_query_performance()` - DB query analysis
- `test_profile_memory_allocation_patterns()` - Memory optimization
- `test_comprehensive_profiling_suite()` - Full profiling report
## Performance Targets
### Execution Performance
- **Backtest execution**: < 2 seconds per backtest
- **Data loading**: < 0.5 seconds average
- **Database saves**: < 50ms average
- **Database queries**: < 20ms average
### Throughput Targets
- **Sequential**: ≥ 2.0 backtests/second
- **Concurrent**: ≥ 5.0 backtests/second
- **10 concurrent users**: ≥ 2.0 requests/second
- **50 concurrent users**: ≥ 5.0 requests/second
### Resource Efficiency
- **Memory usage**: < 500MB per backtest
- **Memory growth**: < 100MB/hour sustained
- **Cache hit rate**: > 80%
- **API failure rate**: < 0.1%
### Response Time SLA
- **50th percentile**: < 1.5 seconds
- **95th percentile**: < 3.0 seconds
- **99th percentile**: < 5.0 seconds
- **SLA compliance**: > 95% of requests
## Test Configuration
### Environment Setup
All tests use containerized PostgreSQL and Redis for consistency:
```python
# Automatic container setup in conftest.py
@pytest.fixture(scope="session")
def postgres_container():
with PostgresContainer("postgres:15-alpine") as postgres:
yield postgres
@pytest.fixture(scope="session")
def redis_container():
with RedisContainer("redis:7-alpine") as redis:
yield redis
```
### Mock Data Providers
Tests use optimized mock data providers for consistent, fast testing:
```python
# Realistic stock data generation
def generate_stock_data(symbol: str) -> pd.DataFrame:
# 3 years of realistic OHLCV data
# Different market regimes (bull, sideways, bear)
# Deterministic but varied based on symbol hash
```
### Parallel Execution
Tests are designed for parallel execution where possible:
```python
# Concurrent backtest execution
async def run_parallel_backtests(symbols, strategies):
semaphore = asyncio.Semaphore(8) # Control concurrency
tasks = [run_with_semaphore(backtest) for backtest in all_backtests]
results = await asyncio.gather(*tasks, return_exceptions=True)
```
## Running Tests in CI/CD
### GitHub Actions Configuration
```yaml
- name: Run Integration Tests
run: |
pytest tests/integration/ -v --tb=short --timeout=600
- name: Run Performance Tests
run: |
pytest tests/performance/ -v --tb=short --timeout=1800
```
### Test Markers
```bash
# Run only fast tests
pytest -m "not slow"
# Run only integration tests
pytest -m integration
# Run only performance tests
pytest -m performance
# Run stress tests (extended duration)
pytest -m stress
```
## Expected Test Results
### Integration Test Results
- **Strategy Coverage**: All 15 strategies (9 traditional + 6 ML) tested
- **Success Rate**: ≥ 80% success rate across all tests
- **Parallel Efficiency**: ≥ 2x speedup with concurrent execution
- **Error Recovery**: Graceful handling of all failure scenarios
### Performance Test Results
- **Load Testing**: Successful handling of 100 concurrent users
- **Benchmark Compliance**: ≥ 80% of benchmarks passed
- **Stress Testing**: Stable operation under extended load
- **Profiling**: Identification of optimization opportunities
## Troubleshooting
### Common Issues
#### Test Timeouts
```bash
# Increase timeout for long-running tests
pytest tests/performance/test_stress.py --timeout=1800
```
#### Memory Issues
```bash
# Monitor memory usage during tests
pytest tests/integration/test_high_volume.py -s --tb=short
```
#### Database Connection Issues
```bash
# Check container status
docker ps | grep postgres
docker logs <container_id>
```
#### Performance Assertion Failures
Check the test output for specific performance metrics that failed and compare against targets.
### Debug Mode
```bash
# Run with detailed logging
pytest tests/ -v -s --log-cli-level=INFO
# Run specific test with profiling
pytest tests/performance/test_profiling.py::test_comprehensive_profiling_suite -v -s
```
## Contributing
When adding new tests:
1. **Follow the existing patterns** for fixtures and mocks
2. **Add appropriate performance assertions** with clear targets
3. **Include comprehensive logging** for debugging
4. **Document expected behavior** and performance characteristics
5. **Use realistic test data** that represents production scenarios
### Test Categories
- Mark integration tests with `@pytest.mark.integration`
- Mark slow tests with `@pytest.mark.slow`
- Mark performance tests with `@pytest.mark.performance`
- Mark stress tests with `@pytest.mark.stress`
## Results and Reporting
### Test Reports
All tests generate comprehensive reports including:
- Performance metrics and benchmarks
- Resource usage analysis
- Error rates and success rates
- Optimization recommendations
### Performance Dashboards
Key metrics are logged for dashboard visualization:
- Execution times and throughput
- Memory usage patterns
- Database performance
- Cache hit rates
- Error rates and recovery times
This comprehensive test suite ensures the MaverickMCP backtesting system meets all performance and reliability requirements for production use.
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
# MaverickMCP - Personal Stock Analysis MCP Server
[](https://opensource.org/licenses/MIT)
[](https://www.python.org/downloads/)
[](https://github.com/jlowin/fastmcp)
[](https://github.com/wshobson/maverick-mcp)
[](https://github.com/wshobson/maverick-mcp/issues)
[](https://github.com/wshobson/maverick-mcp/network/members)
**MaverickMCP** is a personal-use FastMCP 2.0 server that provides professional-grade financial data analysis, technical indicators, and portfolio optimization tools directly to your Claude Desktop interface. Built for individual traders and investors, it offers comprehensive stock analysis capabilities without any authentication or billing complexity.
The server comes pre-seeded with all 520 S&P 500 stocks and provides advanced screening recommendations across multiple strategies. It runs locally with HTTP/SSE/STDIO transport options for seamless integration with Claude Desktop and other MCP clients.
## Why MaverickMCP?
MaverickMCP provides professional-grade financial analysis tools directly within your Claude Desktop interface. Perfect for individual traders and investors who want comprehensive stock analysis capabilities without the complexity of expensive platforms or commercial services.
**Key Benefits:**
- **No Setup Complexity**: Simple `make dev` command gets you running (or `uv sync` + `make dev`)
- **Modern Python Tooling**: Built with `uv` for lightning-fast dependency management
- **Claude Desktop Integration**: Native MCP support for seamless AI-powered analysis
- **Comprehensive Analysis**: 29+ financial tools covering technical indicators, screening, and portfolio optimization
- **Smart Caching**: Redis-powered performance with graceful fallbacks
- **Fast Development**: Hot reload, smart error handling, and parallel processing
- **Open Source**: MIT licensed, community-driven development
- **Educational Focus**: Perfect for learning financial analysis and MCP development
## Features
- **Pre-seeded Database**: 520 S&P 500 stocks with comprehensive screening recommendations
- **Advanced Backtesting**: VectorBT-powered engine with 15+ built-in strategies and ML algorithms
- **Fast Development**: Comprehensive Makefile, smart error handling, hot reload, and parallel processing
- **Stock Data Access**: Historical and real-time stock data with intelligent caching
- **Technical Analysis**: 20+ indicators including SMA, EMA, RSI, MACD, Bollinger Bands, and more
- **Stock Screening**: Multiple strategies (Maverick Bullish/Bearish, Trending Breakouts) with parallel processing
- **Portfolio Tools**: Correlation analysis, returns calculation, and optimization
- **Market Data**: Sector performance, market movers, and earnings information
- **Smart Caching**: Redis-powered performance with automatic fallback to in-memory storage
- **Database Support**: SQLAlchemy integration with PostgreSQL/SQLite (defaults to SQLite)
- **Multi-Transport Support**: HTTP, SSE, and STDIO transports for all MCP clients
## Quick Start
### Prerequisites
- **Python 3.12+**: Core runtime environment
- **[uv](https://docs.astral.sh/uv/)**: Modern Python package manager (recommended)
- **TA-Lib**: Technical analysis library for advanced indicators
- Redis (optional, for enhanced caching)
- PostgreSQL or SQLite (optional, for data persistence)
#### Installing TA-Lib
TA-Lib is required for technical analysis calculations.
**macOS and Linux (Homebrew):**
```bash
brew install ta-lib
```
**Windows (Multiple Options):**
**Option 1: Conda/Anaconda (Recommended - Easiest)**
```bash
conda install -c conda-forge ta-lib
```
**Option 2: Pre-compiled Wheels**
1. Download the appropriate wheel for your Python version from:
- [cgohlke/talib-build releases](https://github.com/cgohlke/talib-build/releases)
- Choose the file matching your Python version (e.g., `TA_Lib-0.4.28-cp312-cp312-win_amd64.whl` for Python 3.12 64-bit)
2. Install using pip:
```bash
pip install path/to/downloaded/TA_Lib-X.X.X-cpXXX-cpXXX-win_amd64.whl
```
**Option 3: Alternative Pre-compiled Package**
```bash
pip install TA-Lib-Precompiled
```
**Option 4: Build from Source (Advanced)**
If other methods fail, you can build from source:
1. Install Microsoft C++ Build Tools
2. Download and extract ta-lib C library to `C:\ta-lib`
3. Build using Visual Studio tools
4. Run `pip install ta-lib`
**Verification:**
Test your installation:
```bash
python -c "import talib; print(talib.__version__)"
```
#### Installing uv (Recommended)
```bash
# macOS/Linux
curl -LsSf https://astral.sh/uv/install.sh | sh
# Windows
powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
# Alternative: via pip
pip install uv
```
### Installation
#### Option 1: Using uv (Recommended - Fastest)
```bash
# Clone the repository
git clone https://github.com/wshobson/maverick-mcp.git
cd maverick-mcp
# Install dependencies and create virtual environment in one command
uv sync
# Copy environment template
cp .env.example .env
# Add your Tiingo API key (free at tiingo.com)
```
#### Option 2: Using pip (Traditional)
```bash
# Clone the repository
git clone https://github.com/wshobson/maverick-mcp.git
cd maverick-mcp
# Create virtual environment and install
python -m venv .venv
source .venv/bin/activate # On Windows: .venv\Scripts\activate
pip install -e .
# Copy environment template
cp .env.example .env
# Add your Tiingo API key (free at tiingo.com)
```
### Start the Server
```bash
# One command to start everything (includes S&P 500 data seeding on first run)
make dev
# The server is now running with:
# - HTTP endpoint: http://localhost:8003/mcp/
# - SSE endpoint: http://localhost:8003/sse/
# - 520 S&P 500 stocks pre-loaded with screening data
```
### Connect to Claude Desktop
**Recommended: SSE Connection (Stable and Reliable)**
This configuration provides stable tool registration and prevents tools from disappearing:
```json
{
"mcpServers": {
"maverick-mcp": {
"command": "npx",
"args": ["-y", "mcp-remote", "http://localhost:8003/sse/"]
}
}
}
```
> **Important**: Note the trailing slash in `/sse/` - this is REQUIRED to prevent redirect issues!
**Config File Location:**
- macOS: `~/Library/Application Support/Claude/claude_desktop_config.json`
- Windows: `%APPDATA%\Claude\claude_desktop_config.json`
**Why This Configuration Works Best:**
- Stable tool registration - tools don't disappear after initial connection
- Reliable connection management through SSE transport
- Proper session persistence for long-running analysis tasks
- All 29+ financial tools available consistently
**Alternative: Direct STDIO Connection (Development Only)**
```json
{
"mcpServers": {
"maverick-mcp": {
"command": "uv",
"args": [
"run",
"python",
"-m",
"maverick_mcp.api.server",
"--transport",
"stdio"
],
"cwd": "/path/to/maverick-mcp"
}
}
}
```
> **Important**: Always **restart Claude Desktop** after making configuration changes. The SSE configuration via mcp-remote has been tested and confirmed to provide stable, persistent tool access without connection drops.
That's it! MaverickMCP tools will now be available in your Claude Desktop interface.
#### Claude Desktop (Most Popular) - Recommended Configuration
**Config Location**:
- macOS: `~/Library/Application Support/Claude/claude_desktop_config.json`
- Windows: `%APPDATA%\Claude\claude_desktop_config.json`
#### Cursor IDE - STDIO and SSE
**Option 1: STDIO (via mcp-remote)**:
```json
{
"mcpServers": {
"maverick-mcp": {
"command": "npx",
"args": ["-y", "mcp-remote", "http://localhost:8003/sse/"]
}
}
}
```
**Option 2: Direct SSE**:
```json
{
"mcpServers": {
"maverick-mcp": {
"url": "http://localhost:8003/sse/"
}
}
}
```
**Config Location**: Cursor → Settings → MCP Servers
#### Claude Code CLI - All Transports
**HTTP Transport (Recommended)**:
```bash
claude mcp add --transport http maverick-mcp http://localhost:8003/mcp/
```
**SSE Transport (Alternative)**:
```bash
claude mcp add --transport sse maverick-mcp http://localhost:8003/sse/
```
**STDIO Transport (Development)**:
```bash
claude mcp add maverick-mcp uv run python -m maverick_mcp.api.server --transport stdio
```
#### Windsurf IDE - STDIO and SSE
**Option 1: STDIO (via mcp-remote)**:
```json
{
"mcpServers": {
"maverick-mcp": {
"command": "npx",
"args": ["-y", "mcp-remote", "http://localhost:8003/mcp/"]
}
}
}
```
**Option 2: Direct SSE**:
```json
{
"mcpServers": {
"maverick-mcp": {
"serverUrl": "http://localhost:8003/sse/"
}
}
}
```
**Config Location**: Windsurf → Settings → Advanced Settings → MCP Servers
#### Why mcp-remote is Needed
The `mcp-remote` tool bridges the gap between STDIO-only clients (like Claude Desktop) and HTTP/SSE servers. Without it, these clients cannot connect to remote MCP servers:
- **Without mcp-remote**: Client tries STDIO → Server expects HTTP → Connection fails
- **With mcp-remote**: Client uses STDIO → mcp-remote converts to HTTP → Server receives HTTP → Success
## Available Tools
MaverickMCP provides 39+ financial analysis tools organized into focused categories, including advanced AI-powered research agents:
### Development Commands
```bash
# Start the server (one command!)
make dev
# Alternative startup methods
./scripts/start-backend.sh --dev # Script-based startup
./tools/fast_dev.sh # Ultra-fast startup (< 3 seconds)
uv run python tools/hot_reload.py # Auto-restart on file changes
# Server will be available at:
# - HTTP endpoint: http://localhost:8003/mcp/ (streamable-http - use with mcp-remote)
# - SSE endpoint: http://localhost:8003/sse/ (SSE - direct connection only, not mcp-remote)
# - Health check: http://localhost:8003/health
```
### Testing
```bash
# Quick test commands
make test # Run unit tests (5-10 seconds)
make test-specific TEST=test_name # Run specific test
make test-watch # Auto-run tests on file changes
# Using uv (recommended)
uv run pytest # Unit tests only
uv run pytest --cov=maverick_mcp # With coverage
uv run pytest -m "" # All tests (requires PostgreSQL/Redis)
# Alternative: Direct pytest (if activated in venv)
pytest # Unit tests only
pytest --cov=maverick_mcp # With coverage
pytest -m "" # All tests (requires PostgreSQL/Redis)
```
### Code Quality
```bash
# Quick quality commands
make lint # Check code quality (ruff)
make format # Auto-format code (ruff)
make typecheck # Run type checking (ty)
# Using uv (recommended)
uv run ruff check . # Linting
uv run ruff format . # Formatting
uv run ty check . # Type checking (Astral's modern type checker)
# Alternative: Direct commands (if activated in venv)
ruff check . # Linting
ruff format . # Formatting
ty check . # Type checking
# Ultra-fast one-liner (no installation needed)
uvx ty check . # Run ty directly without installing
```
## Configuration
Configure MaverickMCP via `.env` file or environment variables:
**Essential Settings:**
- `REDIS_HOST`, `REDIS_PORT` - Redis cache (optional, defaults to localhost:6379)
- `DATABASE_URL` - PostgreSQL connection or `sqlite:///maverick_mcp.db` for SQLite (default)
- `LOG_LEVEL` - Logging verbosity (INFO, DEBUG, ERROR)
- S&P 500 data automatically seeds on first startup
**Required API Keys:**
- `TIINGO_API_KEY` - Stock data provider (free tier available at [tiingo.com](https://tiingo.com))
**Optional API Keys:**
- `OPENROUTER_API_KEY` - **Strongly Recommended for Research**: Access to 400+ AI models with intelligent cost optimization (40-60% cost savings)
- `EXA_API_KEY` - **Recommended for Research**: Web search capabilities for comprehensive research
- `OPENAI_API_KEY` - Direct OpenAI access (fallback)
- `ANTHROPIC_API_KEY` - Direct Anthropic access (fallback)
- `FRED_API_KEY` - Federal Reserve economic data
- `TAVILY_API_KEY` - Alternative web search provider
**Performance:**
- `CACHE_ENABLED=true` - Enable Redis caching
- `CACHE_TTL_SECONDS=3600` - Cache duration
## Usage Examples
### Backtesting Example
Once connected to Claude Desktop, you can use natural language to run backtests:
```
"Run a backtest on AAPL using the momentum strategy for the last 6 months"
"Compare the performance of mean reversion vs trend following strategies on SPY"
"Optimize the RSI strategy parameters for TSLA with walk-forward analysis"
"Show me the Sharpe ratio and maximum drawdown for a portfolio of tech stocks using the adaptive ML strategy"
"Generate a detailed backtest report for the ensemble strategy on the S&P 500 sectors"
```
### Technical Analysis Example
```
"Show me the RSI and MACD analysis for NVDA"
"Identify support and resistance levels for MSFT"
"Get full technical analysis for the top 5 momentum stocks"
```
### Portfolio Management Example (NEW)
```
"Add 10 shares of AAPL I bought at $150.50"
"Show me my portfolio with current prices"
"Compare my portfolio holdings" # No tickers needed!
"Analyze correlation in my portfolio" # Auto-detects your positions
"Remove 5 shares of MSFT"
```
### Portfolio Optimization Example
```
"Optimize a portfolio of AAPL, GOOGL, MSFT, and AMZN for maximum Sharpe ratio"
"Calculate the correlation matrix for my tech portfolio"
"Analyze the risk-adjusted returns for energy sector stocks"
```
## Tools
MaverickMCP provides 39+ financial analysis tools organized by category, including advanced AI-powered research agents:
### Stock Data Tools
- `fetch_stock_data` - Get historical stock data with intelligent caching
- `fetch_stock_data_batch` - Fetch data for multiple tickers simultaneously
- `get_news_sentiment` - Analyze news sentiment for any ticker
- `clear_cache` / `get_cache_info` - Cache management utilities
### Technical Analysis Tools
- `get_rsi_analysis` - RSI calculation with buy/sell signals
- `get_macd_analysis` - MACD analysis with trend identification
- `get_support_resistance` - Identify key price levels
- `get_full_technical_analysis` - Comprehensive technical analysis
- `get_stock_chart_analysis` - Visual chart generation
### Portfolio Management Tools (NEW) - Personal Portfolio Tracking
- `portfolio_add_position` - Add or update positions with automatic cost basis averaging
- `portfolio_get_my_portfolio` - View portfolio with live P&L calculations
- `portfolio_remove_position` - Remove partial or full positions
- `portfolio_clear_portfolio` - Clear all positions with safety confirmation
**Portfolio Features:**
- **Persistent Storage**: Track your actual holdings with cost basis
- **Automatic Averaging**: Cost basis updates automatically on repeat purchases
- **Live P&L**: Real-time unrealized gains/losses on all positions
- **Portfolio Resource**: `portfolio://my-holdings` provides AI context automatically
- **Multi-Portfolio Support**: Track multiple portfolios (IRA, 401k, taxable, etc.)
- **Fractional Shares**: Full support for partial share positions
### Portfolio Analysis Tools
- `risk_adjusted_analysis` - Risk-based position sizing with position awareness
- `compare_tickers` - Side-by-side ticker comparison (auto-uses your portfolio)
- `portfolio_correlation_analysis` - Correlation matrix analysis (auto-analyzes your holdings)
**Smart Integration:**
- All analysis tools auto-detect your portfolio positions
- No need to manually enter tickers you already own
- Position-aware recommendations (averaging up/down, profit taking)
### Stock Screening Tools (Pre-seeded with S&P 500)
- `get_maverick_stocks` - Bullish momentum screening from 520 S&P 500 stocks
- `get_maverick_bear_stocks` - Bearish setup identification from pre-analyzed data
- `get_trending_breakout_stocks` - Strong uptrend phase screening with supply/demand analysis
- `get_all_screening_recommendations` - Combined screening results across all strategies
- Database includes comprehensive screening data updated regularly
### Advanced Research Tools (NEW) - AI-Powered Deep Analysis
- `research_comprehensive` - Full parallel research with multiple AI agents (7-256x faster)
- `research_company` - Company-specific deep research with financial analysis
- `analyze_market_sentiment` - Multi-source sentiment analysis with confidence tracking
- `coordinate_agents` - Multi-agent supervisor for complex research orchestration
**Research Features:**
- **Parallel Execution**: 7-256x speedup with intelligent agent orchestration
- **Adaptive Timeouts**: 120s-600s based on research depth and complexity
- **Smart Model Selection**: Automatic selection from 400+ models via OpenRouter
- **Cost Optimization**: 40-60% cost reduction through intelligent model routing
- **Early Termination**: Confidence-based early stopping to save time and costs
- **Content Filtering**: High-credibility source prioritization
- **Error Recovery**: Circuit breakers and comprehensive error handling
### Backtesting Tools (NEW) - Production-Ready Strategy Testing
- `run_backtest` - Execute backtests with VectorBT engine for any strategy
- `compare_strategies` - A/B testing framework for strategy comparison
- `optimize_strategy` - Walk-forward optimization with parameter tuning
- `analyze_backtest_results` - Comprehensive performance metrics and risk analysis
- `get_backtest_report` - Generate detailed HTML reports with visualizations
**Backtesting Features:**
- **15+ Built-in Strategies**: Including ML-powered adaptive, ensemble, and regime-aware algorithms
- **VectorBT Integration**: High-performance vectorized backtesting engine
- **Parallel Processing**: 7-256x speedup for multi-strategy evaluation
- **Advanced Metrics**: Sharpe, Sortino, Calmar ratios, maximum drawdown, win rate
- **Walk-Forward Optimization**: Out-of-sample testing and validation
- **Monte Carlo Simulations**: Robustness testing with confidence intervals
- **Multi-Timeframe Support**: From 1-minute to monthly data
- **Custom Strategy Development**: Easy-to-use templates for custom strategies
### Market Data Tools
- Market overview, sector performance, earnings calendars
- Economic indicators and Federal Reserve data
- Real-time market movers and sentiment analysis
## Resources
- `stock://{ticker}` - Latest year of stock data
- `stock://{ticker}/{start_date}/{end_date}` - Custom date range
- `stock_info://{ticker}` - Basic stock information
## Prompts
- `stock_analysis(ticker)` - Comprehensive stock analysis prompt
- `market_comparison(tickers)` - Compare multiple stocks
- `portfolio_optimization(tickers, risk_profile)` - Portfolio optimization guidance
## Test Examples - Validate All Features
Test the comprehensive research capabilities and parallel processing improvements with these examples:
### Core Research Features
1. **Basic Research with Timeout Protection**
```
"Research the current state of the AI semiconductor industry and identify the top 3 investment opportunities"
```
- Tests: Basic research, adaptive timeouts, industry analysis
2. **Comprehensive Company Research with Parallel Agents**
```
"Provide comprehensive research on NVDA including fundamental analysis, technical indicators, competitive positioning, and market sentiment using multiple research approaches"
```
- Tests: Parallel orchestration, multi-agent coordination, company research
3. **Cost-Optimized Quick Research**
```
"Give me a quick overview of AAPL's recent earnings and stock performance"
```
- Tests: Intelligent model selection, cost optimization, quick analysis
### Performance Testing
4. **Parallel Performance Benchmark**
```
"Research and compare MSFT, GOOGL, and AMZN simultaneously focusing on cloud computing revenue growth"
```
- Tests: Parallel execution speedup (7-256x), multi-company analysis
5. **Deep Research with Early Termination**
```
"Conduct exhaustive research on Tesla's autonomous driving technology and its impact on the stock valuation"
```
- Tests: Deep research depth, confidence tracking, early termination (0.85 threshold)
### Error Handling & Recovery
6. **Error Recovery and Circuit Breaker Test**
```
"Research 10 penny stocks with unusual options activity and provide risk assessments for each"
```
- Tests: Circuit breaker activation, error handling, fallback mechanisms
7. **Supervisor Agent Coordination**
```
"Analyze the renewable energy sector using both technical and fundamental analysis approaches, then synthesize the findings into actionable investment recommendations"
```
- Tests: Supervisor routing, agent coordination, result synthesis
### Advanced Features
8. **Sentiment Analysis with Content Filtering**
```
"Analyze market sentiment for Bitcoin and cryptocurrency stocks over the past week, filtering for high-credibility sources only"
```
- Tests: Sentiment analysis, content filtering, source credibility
9. **Timeout Stress Test**
```
"Research the entire S&P 500 technology sector companies and rank them by growth potential"
```
- Tests: Timeout management, large-scale analysis, performance under load
10. **Multi-Modal Research Integration**
```
"Research AMD using technical analysis, then find recent news about their AI chips, analyze competitor Intel's position, and provide a comprehensive investment thesis with risk assessment"
```
- Tests: All research modes, integration, synthesis, risk assessment
### Bonus Edge Case Tests
11. **Empty/Invalid Query Handling**
```
"Research [intentionally leave blank or use symbol that doesn't exist like XYZABC]"
```
- Tests: Error messages, helpful fix suggestions
12. **Token Budget Optimization**
```
"Provide the most comprehensive possible analysis of the entire semiconductor industry including all major players, supply chain dynamics, geopolitical factors, and 5-year projections"
```
- Tests: Progressive token allocation, budget management, depth vs breadth
### Expected Performance Metrics
When running these tests, you should observe:
- **Parallel Speedup**: 7-256x faster for multi-entity queries
- **Response Times**: Simple queries ~10s, complex research 30-120s
- **Cost Efficiency**: 60-80% reduction vs premium-only models
- **Confidence Scores**: Early termination when confidence > 0.85
- **Error Recovery**: Graceful degradation without crashes
- **Model Selection**: Automatic routing to optimal models per task
## Docker (Optional)
For containerized deployment:
```bash
# Copy and configure environment
cp .env.example .env
# Using uv in Docker (recommended for faster builds)
docker build -t maverick_mcp .
docker run -p 8003:8003 --env-file .env maverick_mcp
# Or start with docker-compose
docker-compose up -d
```
**Note**: The Dockerfile uses `uv` for fast dependency installation and smaller image sizes.
## Troubleshooting
### Common Issues
**Tools Disappearing in Claude Desktop:**
- **Solution**: Ensure SSE endpoint has trailing slash: `http://localhost:8003/sse/`
- The 307 redirect from `/sse` to `/sse/` causes tool registration to fail
- Always use the exact configuration with trailing slash shown above
**Research Tool Timeouts:**
- Research tools have adaptive timeouts (120s-600s)
- Deep research may take 2-10 minutes depending on complexity
- Monitor progress in server logs with `make tail-log`
**OpenRouter Not Working:**
- Ensure `OPENROUTER_API_KEY` is set in `.env`
- Check API key validity at [openrouter.ai](https://openrouter.ai)
- System falls back to direct providers if OpenRouter unavailable
```bash
# Common development issues
make tail-log # View server logs
make stop # Stop services if ports are in use
make clean # Clean up cache files
# Quick fixes:
# Port 8003 in use → make stop
# Redis connection refused → brew services start redis
# Tests failing → make test (unit tests only)
# Slow startup → ./tools/fast_dev.sh
# Missing S&P 500 data → uv run python scripts/seed_sp500.py
# Research timeouts → Check logs, increase timeout settings
```
## Extending MaverickMCP
Add custom financial analysis tools with simple decorators:
```python
@mcp.tool()
def my_custom_indicator(ticker: str, period: int = 14):
"""Calculate custom technical indicator."""
# Your analysis logic here
return {"ticker": ticker, "signal": "buy", "confidence": 0.85}
@mcp.resource("custom://analysis/{ticker}")
def custom_analysis(ticker: str):
"""Custom analysis resource."""
# Your resource logic here
return f"Custom analysis for {ticker}"
```
## Development Tools
### Quick Development Workflow
```bash
make dev # Start everything
make stop # Stop services
make tail-log # Follow server logs
make test # Run tests quickly
make experiment # Test custom analysis scripts
```
### Smart Error Handling
MaverickMCP includes helpful error diagnostics:
- DataFrame column case sensitivity → Shows correct column name
- Connection failures → Provides specific fix commands
- Import errors → Shows exact install commands
- Database issues → Suggests SQLite fallback
### Fast Development Options
- **Hot Reload**: `uv run python tools/hot_reload.py` - Auto-restart on changes
- **Fast Startup**: `./tools/fast_dev.sh` - < 3 second startup
- **Quick Testing**: `uv run python tools/quick_test.py --test stock` - Test specific features
- **Experiment Harness**: Drop .py files in `tools/experiments/` for auto-execution
### Performance Features
- **Parallel Screening**: 4x faster stock analysis with ProcessPoolExecutor
- **Smart Caching**: `@quick_cache` decorator for instant re-runs
- **Optimized Tests**: Unit tests complete in 5-10 seconds
## Getting Help
For issues or questions:
1. **Check Documentation**: Start with this README and [CLAUDE.md](CLAUDE.md)
2. **Search Issues**: Look through existing [GitHub issues](https://github.com/wshobson/maverick-mcp/issues)
3. **Report Bugs**: Create a new [issue](https://github.com/wshobson/maverick-mcp/issues/new) with details
4. **Request Features**: Suggest improvements via GitHub issues
5. **Contribute**: See our [Contributing Guide](CONTRIBUTING.md) for development setup
## Recent Updates
### Production-Ready Backtesting Framework (NEW)
- **VectorBT Integration**: High-performance vectorized backtesting engine for institutional-grade performance
- **15+ Built-in Strategies**: Including ML-powered adaptive, ensemble, and regime-aware algorithms
- **Parallel Processing**: 7-256x speedup for multi-strategy evaluation and optimization
- **Advanced Analytics**: Comprehensive metrics including Sharpe, Sortino, Calmar ratios, and drawdown analysis
- **Walk-Forward Optimization**: Out-of-sample testing with automatic parameter tuning
- **Monte Carlo Simulations**: Robustness testing with confidence intervals
- **LangGraph Workflow**: Multi-agent orchestration for intelligent strategy selection and validation
- **Production Features**: Database persistence, batch processing, and HTML reporting
### Advanced Research Agents
- **Parallel Research Execution**: Achieved 7-256x speedup (exceeded 2x target) with intelligent agent orchestration
- **Adaptive Timeout Protection**: Dynamic timeouts (120s-600s) based on research depth and complexity
- **Intelligent Model Selection**: OpenRouter integration with 400+ models, 40-60% cost reduction
- **Comprehensive Error Handling**: Circuit breakers, retry logic, and graceful degradation
- **Early Termination**: Confidence-based stopping to optimize time and costs
- **Content Filtering**: High-credibility source prioritization for quality results
- **Multi-Agent Orchestration**: Supervisor pattern for complex research coordination
### Performance Improvements
- **Parallel Agent Execution**: Increased concurrent agents from 4 to 6
- **Optimized Semaphores**: BoundedSemaphore for better resource management
- **Reduced Rate Limiting**: Delays decreased from 0.5s to 0.05s
- **Batch Processing**: Improved throughput for multiple research tasks
- **Smart Caching**: Redis-powered with in-memory fallback
### Testing & Quality
- **84% Test Coverage**: 93 tests with comprehensive coverage
- **Zero Linting Errors**: Fixed 947 issues for clean codebase
- **Full Type Annotations**: Complete type coverage for research components
- **Error Recovery Testing**: Comprehensive failure scenario coverage
### Personal Use Optimization
- **No Authentication Required**: Removed all authentication/billing complexity for personal use
- **Pre-seeded S&P 500 Database**: 520 stocks with comprehensive screening recommendations
- **Simplified Architecture**: Clean, focused codebase for core stock analysis functionality
- **Multi-Transport Support**: HTTP, SSE, and STDIO for all MCP clients
### Development Experience Improvements
- **Comprehensive Makefile**: One command (`make dev`) starts everything including database seeding
- **Smart Error Handling**: Automatic fix suggestions for common issues
- **Fast Development**: < 3 second startup with `./tools/fast_dev.sh`
- **Parallel Processing**: 4x speedup for stock screening operations
- **Enhanced Tooling**: Hot reload, experiment harness, quick testing
### Technical Improvements
- **Modern Tooling**: Migrated to uv and ty for faster dependency management and type checking
- **Market Data**: Improved fallback logic and async support
- **Caching**: Smart Redis caching with graceful in-memory fallback
- **Database**: SQLite default with PostgreSQL option for enhanced performance
## Acknowledgments
MaverickMCP builds on these excellent open-source projects:
- **[FastMCP](https://github.com/jlowin/fastmcp)** - MCP framework powering the server
- **[yfinance](https://github.com/ranaroussi/yfinance)** - Market data access
- **[TA-Lib](https://github.com/mrjbq7/ta-lib)** - Technical analysis indicators
- **[pandas](https://pandas.pydata.org/)** & **[NumPy](https://numpy.org/)** - Data analysis
- **[FastAPI](https://fastapi.tiangolo.com/)** - Modern web framework
- The entire Python open-source community
## License
MIT License - see [LICENSE](LICENSE) file for details. Free to use for personal and commercial purposes.
## Support
If you find MaverickMCP useful:
- Star the repository
- Report bugs via GitHub issues
- Suggest features
- Improve documentation
---
Built for traders and investors. Happy Trading!
[](https://mseep.ai/app/wshobson-maverick-mcp)
**Read the full build guide**: [How to Build an MCP Stock Analysis Server](https://sethhobson.com/2025/08/how-to-build-an-mcp-stock-analysis-server/)
## Disclaimer
<sub>**This software is for educational and informational purposes only. It is NOT financial advice.**</sub>
<sub>**Investment Risk Warning**: Past performance does not guarantee future results. All investments carry risk of loss, including total loss of capital. Technical analysis and screening results are not predictive of future performance. Market data may be delayed, inaccurate, or incomplete.</sub>
<sub>**No Professional Advice**: This tool provides data analysis, not investment recommendations. Always consult with a qualified financial advisor before making investment decisions. The developers are not licensed financial advisors or investment professionals. Nothing in this software constitutes professional financial, investment, legal, or tax advice.</sub>
<sub>**Data and Accuracy**: Market data provided by third-party sources (Tiingo, Yahoo Finance, FRED). Data may contain errors, delays, or omissions. Technical indicators are mathematical calculations based on historical data. No warranty is made regarding data accuracy or completeness.</sub>
<sub>**Regulatory Compliance**: US Users - This software is not registered with the SEC, CFTC, or other regulatory bodies. International Users - Check local financial software regulations before use. Users are responsible for compliance with all applicable laws and regulations. Some features may not be available in certain jurisdictions.</sub>
<sub>**Limitation of Liability**: Developers disclaim all liability for investment losses or damages. Use this software at your own risk. No guarantee is made regarding software availability or functionality.</sub>
<sub>By using MaverickMCP, you acknowledge these risks and agree to use the software for educational purposes only.</sub>
```
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
```markdown
# Security Policy
## Reporting Security Vulnerabilities
The MaverickMCP team takes security seriously. We appreciate your efforts to responsibly disclose your findings and will make every effort to acknowledge your contributions.
## Reporting a Vulnerability
**Please DO NOT report security vulnerabilities through public GitHub issues.**
Instead, please report them via GitHub Security Advisories (recommended).
Please include:
- Type of vulnerability
- Full paths of affected source files
- Location of the affected code (tag/branch/commit or direct URL)
- Any special configuration required to reproduce
- Step-by-step instructions to reproduce
- Proof-of-concept or exploit code (if possible)
- Impact of the issue, including how an attacker might exploit it
You should receive a response within 48 hours. If for some reason you do not, please follow up via email to ensure we received your original message.
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| 0.1.x | :white_check_mark: |
| < 0.1 | :x: |
## Security Features
MaverickMCP implements security measures appropriate for personal-use software:
### Personal Use Security Model
- **Local Deployment**: Designed to run locally for individual users
- **No Network Authentication**: Simplicity over complex auth systems
- **Environment Variable Security**: All API keys stored as environment variables
- **Basic Rate Limiting**: Protection against excessive API calls
### Data Protection
- **Input Validation**: Comprehensive Pydantic validation on all inputs
- **SQL Injection Prevention**: SQLAlchemy ORM with parameterized queries
- **API Key Security**: Secure handling of financial data provider credentials
- **Local Data Storage**: All analysis data stored locally by default
### Infrastructure Security
- **Environment Variables**: All secrets externalized, no hardcoded credentials
- **Secure Headers**: HSTS, CSP, X-Frame-Options, X-Content-Type-Options
- **Audit Logging**: Comprehensive security event logging
- **Circuit Breakers**: Protection against cascade failures
## Security Best Practices for Contributors
### Configuration
- Never commit secrets or API keys
- Use environment variables for all sensitive configuration
- Follow the `.env.example` template
- Use strong, unique passwords for development databases
### Code Guidelines
- Always validate and sanitize user input
- Use parameterized queries (SQLAlchemy ORM)
- Implement proper error handling without exposing sensitive information
- Follow the principle of least privilege
- Add rate limiting to new endpoints
### Dependencies
- Keep dependencies up to date
- Review security advisories regularly
- Run `safety check` before releases
- Use `bandit` for static security analysis
## Security Checklist for Pull Requests
- [ ] No hardcoded secrets or credentials
- [ ] Input validation on all user-provided data
- [ ] Proper error handling without information leakage
- [ ] API key handling follows environment variable patterns
- [ ] Financial data handling includes appropriate disclaimers
- [ ] Security tests for new features
- [ ] No vulnerable dependencies introduced
- [ ] Personal-use security model maintained (no complex auth)
## Running Security Audits
### Dependency Scanning
```bash
# Install security tools
pip install safety bandit
# Check for known vulnerabilities
safety check
# Static security analysis
bandit -r maverick_mcp/
```
### Additional Security Tools
```bash
# OWASP dependency check
pip install pip-audit
pip-audit
# Advanced static analysis
pip install semgrep
semgrep --config=auto maverick_mcp/
```
## Security Headers Configuration
The application implements the following security headers:
- `Strict-Transport-Security: max-age=31536000; includeSubDomains`
- `X-Content-Type-Options: nosniff`
- `X-Frame-Options: DENY`
- `X-XSS-Protection: 1; mode=block`
- `Referrer-Policy: strict-origin-when-cross-origin`
- `Content-Security-Policy: default-src 'self'`
## Incident Response
In case of a security incident:
1. **Immediate Response**: Assess the severity and impact
2. **Containment**: Isolate affected systems
3. **Investigation**: Determine root cause and extent
4. **Remediation**: Fix the vulnerability
5. **Recovery**: Restore normal operations
6. **Post-Incident**: Document lessons learned
## Security Contacts
- **Primary**: [GitHub Security Advisories](https://github.com/wshobson/maverick-mcp/security) (Recommended)
- **Alternative**: [GitHub Issues](https://github.com/wshobson/maverick-mcp/issues) (Public security issues only)
- **Community**: [GitHub Discussions](https://github.com/wshobson/maverick-mcp/discussions)
## Acknowledgments
We would like to thank the following individuals for responsibly disclosing security issues:
*This list will be updated as vulnerabilities are reported and fixed.*
## Financial Data Security
### Investment Data Protection
- **Personal Investment Information**: Never share account details, positions, or personal financial data
- **API Keys**: Secure storage of financial data provider API keys (Tiingo, FRED, etc.)
- **Market Data**: Ensure compliance with data provider terms of service and usage restrictions
- **Analysis Results**: Be aware that financial analysis outputs may contain sensitive investment insights
### Compliance Considerations
- **Financial Regulations**: Users must comply with applicable securities laws (SEC, CFTC, etc.)
- **Data Privacy**: Market analysis and portfolio data should be treated as confidential
- **Audit Trails**: Financial analysis activities may need to be logged for regulatory purposes
- **Cross-border Data**: Consider regulations when using financial data across international boundaries
## Financial Disclaimer for Security Context
**IMPORTANT**: This security policy covers the technical security of the software only. The financial analysis and investment tools provided by MaverickMCP are for educational purposes only and do not constitute financial advice. Always consult with qualified financial professionals for investment decisions.
## Resources
- [OWASP Top 10](https://owasp.org/www-project-top-ten/)
- [CWE Top 25](https://cwe.mitre.org/top25/)
- [Python Security Best Practices](https://python.readthedocs.io/en/latest/library/security_warnings.html)
- [FastAPI Security](https://fastapi.tiangolo.com/tutorial/security/)
- [SEC Cybersecurity Guidelines](https://www.sec.gov/spotlight/cybersecurity)
- [Financial Data Security Best Practices](https://www.cisa.gov/financial-services)
---
Thank you for helping keep MaverickMCP and its users safe!
```
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
```markdown
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
- Demonstrating empathy and kindness toward other people
- Being respectful of differing opinions, viewpoints, and experiences
- Giving and gracefully accepting constructive feedback
- Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
- Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
- The use of sexualized language or imagery, and sexual attention or
advances of any kind
- Trolling, insulting or derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or email
address, without their explicit permission
- Other conduct which could reasonably be considered inappropriate in a
professional setting
## Financial Software Specific Guidelines
Given the nature of this financial analysis software, we have additional community standards:
### Educational Focus
- **Maintain Educational Purpose**: All discussions and contributions should focus on the educational and learning aspects of financial analysis
- **No Investment Advice**: Contributors and community members must not provide specific investment recommendations or financial advice
- **Respect Disclaimers**: Always include appropriate disclaimers when discussing financial concepts or analysis
### Professional Discourse
- **Factual Discussions**: Keep financial discussions factual and educational, avoiding speculation or promotional content
- **Risk Awareness**: Acknowledge the risks inherent in financial markets when discussing trading or investment concepts
- **Data Accuracy**: Strive for accuracy in financial data and calculations, and acknowledge limitations
### Inclusive Learning Environment
- **Welcome All Experience Levels**: From beginners learning basic concepts to experts contributing advanced features
- **Patient Teaching**: Help newcomers understand both programming concepts and financial analysis principles
- **Diverse Perspectives**: Welcome different approaches to financial analysis and technical implementation
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Financial Disclaimer for Community Interactions
**IMPORTANT**: All discussions, code contributions, and educational content in this community are for educational purposes only. Nothing in our community interactions should be construed as financial advice.
### Community Member Responsibilities
- **No Financial Advice**: Do not provide specific investment recommendations or financial advice
- **Educational Purpose**: Frame all discussions in educational contexts
- **Risk Acknowledgment**: Acknowledge that all financial analysis carries risk
- **Disclaimer Inclusion**: Include appropriate disclaimers in financial-related content
### Project Maintainer Responsibilities
- **Consistent Disclaimers**: Ensure all financial analysis features include appropriate disclaimers
- **Educational Focus**: Guide discussions toward learning and technical implementation
- **Regulatory Awareness**: Be mindful of financial regulations and compliance considerations
- **Community Safety**: Protect community members from potentially harmful financial advice
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available
at [https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations
---
## Contact Information
- **GitHub Discussions**: [Community Discussions](https://github.com/wshobson/maverick-mcp/discussions)
- **Security Issues**: [Security Policy](SECURITY.md)
Remember: We're building an educational financial analysis tool together. Let's keep our community welcoming, professional, and focused on learning!
```
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
```markdown
# Contributing to MaverickMCP
Welcome to MaverickMCP! We're excited to have you contribute to this open-source financial analysis MCP server.
## Table of Contents
- [Development Setup](#development-setup)
- [Project Structure](#project-structure)
- [Running Tests](#running-tests)
- [Code Style](#code-style)
- [Making Changes](#making-changes)
- [Submitting Pull Requests](#submitting-pull-requests)
- [Reporting Issues](#reporting-issues)
- [Financial Domain Guidelines](#financial-domain-guidelines)
## Development Setup
### Prerequisites
- Python 3.12+
- [uv](https://docs.astral.sh/uv/) for dependency management
- PostgreSQL (optional, SQLite works for development)
- Redis (optional for development)
### Quick Start
1. **Clone the repository**
```bash
git clone https://github.com/wshobson/maverick-mcp.git
cd maverick-mcp
```
2. **Install dependencies**
```bash
uv sync --extra dev
```
3. **Set up environment**
```bash
cp .env.example .env
# Edit .env with your configuration
```
4. **Start development server**
```bash
make dev
# Or: ./scripts/start-backend.sh --dev
```
### Development Commands
- `make dev` - Start everything (recommended)
- `make test` - Run unit tests (5-10 seconds)
- `make lint` - Check code quality
- `make format` - Auto-format code
- `make typecheck` - Run type checking
### Pre-commit Hooks (Optional but Recommended)
We provide pre-commit hooks to ensure code quality:
```bash
# Install pre-commit (one time setup)
pip install pre-commit
# Install hooks for this repository
pre-commit install
# Run hooks on all files (optional)
pre-commit run --all-files
```
Pre-commit hooks will automatically run on every commit and include:
- Code formatting (ruff)
- Linting (ruff)
- Security scanning (bandit, safety)
- Custom financial domain validations
**Note**: Pre-commit hooks are optional for contributors but recommended for maintainers.
## Project Structure
MaverickMCP follows Domain-Driven Design (DDD) principles:
```
maverick_mcp/
├── api/ # FastAPI routers and server
├── domain/ # Core business logic (entities, services)
├── application/ # Use cases and DTOs
├── infrastructure/# External services (database, APIs)
├── auth/ # Authentication and security
├── config/ # Settings and configuration
└── tests/ # Test suite
```
## Running Tests
We use pytest with multiple test categories:
```bash
# Unit tests only (fast, ~5-10 seconds)
make test
# All tests including integration
make test-all
# Specific test
make test-specific TEST=test_name
# With coverage
pytest --cov=maverick_mcp
```
**Note**: Integration tests require PostgreSQL and Redis. They're excluded from CI by default.
## Code Style
We enforce strict code quality standards:
### Tools
- **ruff** for linting and formatting
- **pyright** for type checking
- **pytest** for testing
### Guidelines
1. **Type Hints**: Required for all functions and variables
2. **Docstrings**: Google-style docstrings for public APIs
3. **Error Handling**: Proper exception handling with specific error types
4. **Security**: Never hardcode secrets, always use environment variables
### Before Submitting
```bash
# Run all quality checks
make check # Runs lint + typecheck
# Auto-format code
make format
```
## Making Changes
### Development Workflow
1. **Start with an issue** - Create or find an existing issue
2. **Create a branch** - Use descriptive branch names:
- `feature/add-new-indicator`
- `fix/authentication-bug`
- `docs/improve-setup-guide`
3. **Make focused commits** - One logical change per commit
4. **Write tests** - Add tests for new features or bug fixes
5. **Update documentation** - Keep docs current with changes
### Financial Calculations
When working with financial logic:
- **Accuracy is critical** - Double-check all calculations
- **Use proper data types** - `Decimal` for currency, `float` for ratios
- **Include validation** - Validate input ranges and edge cases
- **Add comprehensive tests** - Test edge cases and boundary conditions
- **Document assumptions** - Explain the financial logic in docstrings
## Submitting Pull Requests
### Checklist
- [ ] Tests pass (`make test`)
- [ ] Code is formatted (`make format`)
- [ ] Type checking passes (`make typecheck`)
- [ ] Pre-commit hooks pass (`pre-commit run --all-files`)
- [ ] Documentation is updated
- [ ] Financial calculations are validated
- [ ] No hardcoded secrets or credentials
### PR Template
```markdown
## Description
Brief description of the changes
## Type of Change
- [ ] Bug fix
- [ ] New feature
- [ ] Documentation update
- [ ] Refactoring
## Financial Impact
- [ ] No financial calculations affected
- [ ] Financial calculations verified for accuracy
- [ ] New financial calculations added with tests
## Testing
- [ ] Unit tests added/updated
- [ ] Integration tests added (if applicable)
- [ ] Manual testing completed
## Screenshots (if applicable)
```
## Reporting Issues
### Bug Reports
Use the bug report template and include:
- **Environment details** (Python version, OS, dependencies)
- **Reproduction steps** - Clear, minimal steps to reproduce
- **Expected vs actual behavior**
- **Error messages** - Full stack traces when available
- **Financial context** - If related to calculations or market data
### Feature Requests
Include:
- **Use case** - What problem does this solve?
- **Proposed solution** - How should it work?
- **Financial domain knowledge** - Any domain-specific requirements
- **Implementation considerations** - Technical constraints or preferences
## Financial Domain Guidelines
### Market Data
- **Respect rate limits** - All providers have API limits
- **Cache appropriately** - Balance freshness with performance
- **Handle market closures** - Account for weekends and holidays
- **Validate symbols** - Check ticker symbol formats
### Technical Indicators
- **Use established formulas** - Follow industry-standard calculations
- **Document data requirements** - Specify minimum periods needed
- **Handle edge cases** - Division by zero, insufficient data
- **Test with real market data** - Verify against known examples
### Risk Management
- **Position sizing** - Implement proper risk controls
- **Stop loss calculations** - Accurate risk/reward ratios
- **Portfolio limits** - Respect maximum position sizes
- **Backtesting accuracy** - Avoid look-ahead bias
### Financial Compliance for Contributors
- **Educational Purpose**: All financial calculations and analysis tools must be clearly marked as educational
- **No Investment Advice**: Never include language that could be construed as investment recommendations
- **Disclaimer Requirements**: Include appropriate disclaimers in docstrings for financial functions
- **Data Attribution**: Properly attribute data sources (Tiingo, Yahoo Finance, FRED, etc.)
- **Risk Warnings**: Include risk warnings in documentation for portfolio and trading-related features
- **Regulatory Awareness**: Be mindful of securities regulations (SEC, CFTC, international equivalents)
#### Financial Function Documentation Template
```python
def calculate_risk_metric(data: pd.DataFrame) -> float:
"""
Calculate a financial risk metric.
DISCLAIMER: This is for educational purposes only and does not
constitute financial advice. Past performance does not guarantee
future results.
Args:
data: Historical price data
Returns:
Risk metric value
"""
```
## Architecture Guidelines
### Domain-Driven Design
- **Domain layer** - Pure business logic, no external dependencies
- **Application layer** - Use cases and orchestration
- **Infrastructure layer** - Database, APIs, external services
- **API layer** - HTTP handlers, validation, serialization
### MCP Integration
- **Tool design** - Each tool should have a single, clear purpose
- **Resource management** - Implement proper caching and cleanup
- **Error handling** - Return meaningful error messages
- **Documentation** - Include usage examples and parameter descriptions
## Getting Help
- **Discussions** - Use GitHub Discussions for questions
- **Issues** - Create issues for bugs or feature requests
- **Code Review** - Participate in PR reviews to learn
- **Documentation** - Check existing docs and CLAUDE.md for project context
## Contributing to Open Source
### Community Standards
MaverickMCP follows the [Contributor Covenant Code of Conduct](https://www.contributor-covenant.org/version/2/1/code_of_conduct/):
- **Be welcoming** - Help newcomers feel welcome
- **Be respectful** - Treat all contributors with respect
- **Be patient** - Allow time for responses and reviews
- **Be constructive** - Focus on improving the project
- **Be inclusive** - Welcome diverse perspectives and backgrounds
### Recognition
Contributors are recognized in multiple ways:
- **CHANGELOG.md** - All contributors listed in release notes
- **GitHub contributors** - Automatic recognition via commits
- **Special mentions** - Outstanding contributions highlighted in README
- **Hall of Fame** - Major contributors featured in documentation
### Continuous Integration
Our CI/CD pipeline ensures code quality:
- **Automated testing** - All PRs run comprehensive test suites
- **Security scanning** - Automated vulnerability detection
- **Code quality checks** - Linting, formatting, and type checking
- **Performance testing** - Benchmark validation on PRs
- **Documentation validation** - Ensures docs stay current
### Current Architecture (Simplified for Personal Use)
MaverickMCP has been cleaned up and simplified:
- **No Complex Auth**: Removed enterprise JWT/OAuth systems for simplicity
- **No Billing System**: Personal-use focused, no subscription management
- **Local First**: Designed to run locally with Claude Desktop
- **Educational Focus**: Built for learning and personal financial analysis
- **Clean Dependencies**: Removed unnecessary enterprise features
This makes the codebase much more approachable for contributors!
## License
By contributing, you agree that your contributions will be licensed under the MIT License.
## Financial Software Disclaimer for Contributors
**IMPORTANT**: By contributing to MaverickMCP, you acknowledge that:
- All financial analysis tools are for educational purposes only
- No content should be construed as investment advice or recommendations
- Contributors are not responsible for user investment decisions or outcomes
- All financial calculations should include appropriate disclaimers and risk warnings
- Data accuracy cannot be guaranteed and users must verify information independently
Contributors should review the full financial disclaimer in the LICENSE file and README.md.
## Recognition
Contributors will be acknowledged in our CHANGELOG and can be featured in project documentation. We appreciate all contributions, from code to documentation to issue reports!
---
Thank you for contributing to MaverickMCP! Your efforts help make sophisticated financial analysis tools accessible to everyone.
```
--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------
```markdown
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with the MaverickMCP codebase.
**🚀 QUICK START**: Run `make dev` to start the server. Connect with Claude Desktop using `mcp-remote`. See "Claude Desktop Setup" section below.
## Project Overview
MaverickMCP is a personal stock analysis MCP server built for Claude Desktop. It provides:
- Pre-seeded database with all 520 S&P 500 stocks and screening recommendations
- Real-time and historical stock data access with intelligent caching
- Advanced technical analysis tools (RSI, MACD, Bollinger Bands, etc.)
- Multiple stock screening strategies (Maverick Bullish/Bearish, Supply/Demand Breakouts)
- **Personal portfolio tracking with cost basis averaging and live P&L** (NEW)
- Portfolio optimization and correlation analysis with auto-detection
- Market and macroeconomic data integration
- SQLAlchemy-based database integration with SQLite default (PostgreSQL optional)
- Redis caching for high performance (optional)
- Clean, personal-use architecture without authentication complexity
## Project Structure
- `maverick_mcp/`
- `api/`: MCP server implementation
- `server.py`: Main FastMCP server (simple stock analysis mode)
- `routers/`: Domain-specific routers for organized tool groups
- `config/`: Configuration and settings
- `core/`: Core financial analysis functions
- `data/`: Data handling, caching, and database models
- `providers/`: Stock, market, and macro data providers
- `utils/`: Development utilities and performance optimizations
- `tests/`: Comprehensive test suite
- `validation/`: Request/response validation
- `tools/`: Development tools for faster workflows
- `docs/`: Architecture documentation
- `scripts/`: Startup and utility scripts
- `Makefile`: Central command interface
## Environment Setup
1. **Prerequisites**:
- **Python 3.12+**: Core runtime environment
- **[uv](https://docs.astral.sh/uv/)**: Modern Python package manager (recommended)
- Redis server (optional, for enhanced caching performance)
- PostgreSQL (optional, SQLite works fine for personal use)
2. **Installation**:
```bash
# Clone the repository
git clone https://github.com/wshobson/maverick-mcp.git
cd maverick-mcp
# Install dependencies using uv (recommended - fastest)
uv sync
# Or use traditional pip
python -m venv .venv
source .venv/bin/activate # On Windows: .venv\Scripts\activate
pip install -e .
# Set up environment
cp .env.example .env
# Add your Tiingo API key (required)
```
3. **Required Configuration** (add to `.env`):
```
# Required - Stock data provider (free tier available)
TIINGO_API_KEY=your-tiingo-key
```
4. **Optional Configuration** (add to `.env`):
```
# OpenRouter API (strongly recommended for research - access to 400+ AI models with intelligent cost optimization)
OPENROUTER_API_KEY=your-openrouter-key
# Web Search API (recommended for research features)
EXA_API_KEY=your-exa-key
# Enhanced data providers (optional)
FRED_API_KEY=your-fred-key
# Database (optional - uses SQLite by default)
DATABASE_URL=postgresql://localhost/maverick_mcp
# Redis (optional - works without caching)
REDIS_HOST=localhost
REDIS_PORT=6379
```
**Get a free Tiingo API key**: Sign up at [tiingo.com](https://tiingo.com) - free tier includes 500 requests/day.
**OpenRouter API (Recommended)**: Sign up at [openrouter.ai](https://openrouter.ai) for access to 400+ AI models with intelligent cost optimization. The system automatically selects optimal models based on task requirements.
## Quick Start Commands
### Essential Commands (Powered by Makefile)
```bash
# Start the MCP server
make dev # Start with SSE transport (default, recommended)
make dev-sse # Start with SSE transport (same as dev)
make dev-http # Start with Streamable-HTTP transport (for testing/debugging)
make dev-stdio # Start with STDIO transport (direct connection)
# Development
make backend # Start backend server only
make tail-log # Follow logs in real-time
make stop # Stop all services
# Testing
make test # Run unit tests (5-10 seconds)
make test-watch # Auto-run tests on file changes
make test-cov # Run with coverage report
# Code Quality
make lint # Check code quality
make format # Auto-format code
make typecheck # Run type checking
make check # Run all checks
# Database
make migrate # Run database migrations
make setup # Initial setup
# Utilities
make clean # Clean up generated files
make redis-start # Start Redis (if using caching)
# Quick shortcuts
make d # Alias for make dev
make dh # Alias for make dev-http
make ds # Alias for make dev-stdio
make t # Alias for make test
make l # Alias for make lint
make c # Alias for make check
```
## Claude Desktop Setup
### Connection Methods
**✅ RECOMMENDED**: Claude Desktop works best with the **SSE endpoint via mcp-remote bridge**. This configuration has been tested and **prevents tools from disappearing** after initial connection.
#### Method A: SSE Server with mcp-remote Bridge (Recommended - Stable)
This is the **tested and proven method for Claude Desktop** - provides stable tool registration:
1. **Start the SSE server**:
```bash
make dev # Runs SSE server on port 8003
```
2. **Configure with mcp-remote bridge**:
Add to `~/Library/Application Support/Claude/claude_desktop_config.json`:
```json
{
"mcpServers": {
"maverick-mcp": {
"command": "npx",
"args": ["-y", "mcp-remote", "http://localhost:8003/sse"]
}
}
}
```
**Why This Configuration Works Best**:
- ✅ **Prevents Tool Disappearing**: Tools remain available throughout your session
- ✅ **Stable Connection**: SSE transport provides consistent communication
- ✅ **Session Persistence**: Maintains connection state for complex analysis workflows
- ✅ **All 35+ Tools Available**: Reliable access to all financial and research tools
- ✅ **Tested and Confirmed**: This exact configuration has been verified to work
- ✅ **No Trailing Slash Issues**: Server automatically handles both `/sse` and `/sse/` paths
#### Method B: HTTP Streamable Server with mcp-remote Bridge (Alternative)
1. **Start the HTTP Streamable server**:
```bash
make dev # Runs HTTP streamable server on port 8003
```
2. **Configure with mcp-remote bridge**:
Add to `~/Library/Application Support/Claude/claude_desktop_config.json`:
```json
{
"mcpServers": {
"maverick-mcp": {
"command": "npx",
"args": ["-y", "mcp-remote", "http://localhost:8003/mcp/"]
}
}
}
```
**Benefits**:
- ✅ Uses HTTP Streamable transport
- ✅ Alternative to SSE endpoint
- ✅ Supports remote access
#### Method C: Remote via Claude.ai (Alternative)
For native remote server support, use [Claude.ai web interface](https://claude.ai/settings/integrations) instead of Claude Desktop.
3. **Restart Claude Desktop** and test with: "Show me technical analysis for AAPL"
### Other Popular MCP Clients
> ⚠️ **Critical Transport Warning**: MCP clients have specific transport limitations. Using incorrect configurations will cause connection failures. Always verify which transports your client supports.
#### Transport Compatibility Matrix
| MCP Client | STDIO | HTTP | SSE | Optimal Method |
|----------------------|-------|------|-----|-----------------------------------------------|
| **Claude Desktop** | ❌ | ❌ | ✅ | **SSE via mcp-remote** (stable, tested) |
| **Cursor IDE** | ✅ | ❌ | ✅ | SSE and STDIO supported |
| **Claude Code CLI** | ✅ | ✅ | ✅ | All transports supported |
| **Continue.dev** | ✅ | ❌ | ✅ | SSE and STDIO supported |
| **Windsurf IDE** | ✅ | ❌ | ✅ | SSE and STDIO supported |
#### Claude Desktop (Most Commonly Used)
**✅ TESTED CONFIGURATION**: Use SSE endpoint with mcp-remote bridge - prevents tools from disappearing and ensures stable connection.
**Configuration Location:**
- macOS: `~/Library/Application Support/Claude/claude_desktop_config.json`
- Windows: `%APPDATA%\Claude\claude_desktop_config.json`
- Linux: `~/.config/Claude/claude_desktop_config.json`
**SSE Connection with mcp-remote (Tested and Stable):**
1. Start the server:
```bash
make dev # Starts SSE server on port 8003
```
2. Configure Claude Desktop:
```json
{
"mcpServers": {
"maverick-mcp": {
"command": "npx",
"args": ["-y", "mcp-remote", "http://localhost:8003/sse"]
}
}
}
```
**Important**: This exact configuration has been tested and confirmed to prevent the common issue where tools appear initially but then disappear from Claude Desktop. The server now accepts both `/sse` and `/sse/` paths without redirects.
**Restart Required:** Always restart Claude Desktop after config changes.
#### Cursor IDE - SSE and STDIO Support
**Option 1: Direct SSE (Recommended):**
```json
{
"mcpServers": {
"maverick-mcp": {
"url": "http://localhost:8003/sse"
}
}
}
```
**Location:** Cursor → Settings → MCP Servers
#### Claude Code CLI - Full Transport Support
**SSE Transport (Recommended):**
```bash
claude mcp add --transport sse maverick-mcp http://localhost:8003/sse
```
**HTTP Transport (Alternative):**
```bash
claude mcp add --transport http maverick-mcp http://localhost:8003/mcp/
```
**STDIO Transport (Development only):**
```bash
claude mcp add maverick-mcp uv run python -m maverick_mcp.api.server --transport stdio
```
#### Continue.dev - SSE and STDIO Support
**Option 1: Direct SSE (Recommended):**
```json
{
"mcpServers": {
"maverick-mcp": {
"url": "http://localhost:8003/sse"
}
}
}
```
**Option 2: SSE via mcp-remote (Alternative):**
```json
{
"experimental": {
"modelContextProtocolServer": {
"transport": {
"type": "stdio",
"command": "npx",
"args": ["-y", "mcp-remote", "http://localhost:8003/sse"]
}
}
}
}
```
**Location:** `~/.continue/config.json`
#### Windsurf IDE - SSE and STDIO Support
**Option 1: Direct SSE (Recommended):**
```json
{
"mcpServers": {
"maverick-mcp": {
"serverUrl": "http://localhost:8003/sse"
}
}
}
```
**Option 2: SSE via mcp-remote (Alternative):**
```json
{
"mcpServers": {
"maverick-mcp": {
"command": "npx",
"args": ["-y", "mcp-remote", "http://localhost:8003/sse"]
}
}
}
```
**Location:** Windsurf → Settings → Advanced Settings → MCP Servers
### How It Works
**Connection Architecture:**
- **STDIO Mode (Optimal for Claude Desktop)**: Direct subprocess communication - fastest, most reliable
- **Streamable-HTTP Endpoint**: `http://localhost:8003/` - For remote access via mcp-remote bridge
- **SSE Endpoint**: `http://localhost:8003/sse` - For other clients with native SSE support (accepts both `/sse` and `/sse/`)
> **Key Finding**: Direct STDIO is the optimal transport for Claude Desktop. HTTP/SSE require the mcp-remote bridge tool, adding latency and complexity. SSE is particularly problematic as it's incompatible with mcp-remote (GET vs POST mismatch).
**Transport Limitations by Client:**
- **Claude Desktop**: STDIO-only, cannot directly connect to HTTP/SSE
- **Most Other Clients**: Support STDIO + SSE (but not HTTP)
- **Claude Code CLI**: Full transport support (STDIO, HTTP, SSE)
**mcp-remote Bridge Tool:**
- **Purpose**: Converts STDIO client calls to HTTP/SSE server requests
- **Why Needed**: Bridges the gap between STDIO-only clients and HTTP/SSE servers
- **Connection Flow**: Client (STDIO) ↔ mcp-remote ↔ HTTP/SSE Server
- **Installation**: `npx mcp-remote <server-url>`
**Key Transport Facts:**
- **STDIO**: All clients support this for local connections
- **HTTP**: Only Claude Code CLI supports direct HTTP connections
- **SSE**: Cursor, Continue.dev, Windsurf support direct SSE connections
- **Claude Desktop Limitation**: Cannot connect to HTTP/SSE without mcp-remote bridge
**Alternatives for Remote Access:**
- Use Claude.ai web interface for native remote server support (no mcp-remote needed)
## Key Features
### Stock Analysis
- Historical price data with database caching
- Technical indicators (SMA, EMA, RSI, MACD, Bollinger Bands)
- Support/resistance levels
- Volume analysis and patterns
### Stock Screening (Pre-seeded S&P 500 Data)
- **Maverick Bullish**: High momentum stocks with strong technicals from 520 S&P 500 stocks
- **Maverick Bearish**: Weak setups for short opportunities with pre-analyzed data
- **Supply/Demand Breakouts**: Stocks in confirmed uptrend phases with technical breakout patterns
- All screening data is pre-calculated and stored in database for instant results
### Portfolio Analysis
- Portfolio optimization using Modern Portfolio Theory
- Risk analysis and correlation matrices
- Performance metrics and comparisons
### Market Data
- Real-time quotes and market indices
- Sector performance analysis
- Economic indicators from FRED API
## Available Tools
All tools are organized into logical groups (39+ tools total):
### Data Tools (`/data/*`) - S&P 500 Pre-seeded
- `get_stock_data` - Historical price data with database caching
- `get_stock_info` - Company information from pre-seeded S&P 500 database
- `get_multiple_stocks_data` - Batch data fetching with optimized queries
### Technical Analysis (`/technical/*`)
- `calculate_sma`, `calculate_ema` - Moving averages
- `calculate_rsi` - Relative Strength Index
- `calculate_macd` - MACD indicator
- `calculate_bollinger_bands` - Bollinger Bands
- `get_full_technical_analysis` - Complete analysis suite
### Screening (`/screening/*`) - Pre-calculated Results
- `get_maverick_recommendations` - Bullish momentum stocks from S&P 500 database
- `get_maverick_bear_recommendations` - Bearish setups with pre-analyzed data
- `get_trending_breakout_recommendations` - Supply/demand breakout candidates from 520 stocks
- All screening results are pre-calculated and stored for instant access
### Advanced Research Tools (`/research/*`) - NEW AI-Powered Analysis
- `research_comprehensive` - Full parallel research with multiple AI agents (7-256x faster)
- `research_company` - Company-specific deep research with financial analysis
- `analyze_market_sentiment` - Multi-source sentiment analysis with confidence tracking
- `coordinate_agents` - Multi-agent supervisor for complex research orchestration
**Research Features:**
- **Parallel Execution**: 7-256x speedup with intelligent agent orchestration
- **Adaptive Timeouts**: 120s-600s based on research depth and complexity
- **Smart Model Selection**: Automatic selection from 400+ models via OpenRouter
- **Cost Optimization**: 40-60% cost reduction through intelligent model routing
- **Early Termination**: Confidence-based early stopping to save time and costs
- **Content Filtering**: High-credibility source prioritization
- **Error Recovery**: Circuit breakers and comprehensive error handling
### Portfolio Management (`/portfolio/*`) - Personal Holdings Tracking (NEW)
- `portfolio_add_position` - Add or update positions with automatic cost basis averaging
- `portfolio_get_my_portfolio` - View portfolio with live P&L calculations
- `portfolio_remove_position` - Remove partial or full positions
- `portfolio_clear_portfolio` - Clear all positions with safety confirmation
**Key Features:**
- Persistent storage with cost basis tracking (average cost method)
- Live unrealized P&L calculations with real-time prices
- Automatic cost averaging on repeat purchases
- Support for fractional shares and high-precision decimals
- Multi-portfolio support (track IRA, 401k, taxable separately)
- Portfolio resource (`portfolio://my-holdings`) for AI context
### Portfolio Analysis (`/portfolio/*`) - Intelligent Integration
- `risk_adjusted_analysis` - Risk-based position sizing (shows your existing positions)
- `compare_tickers` - Side-by-side comparison (auto-uses portfolio if no tickers provided)
- `portfolio_correlation_analysis` - Correlation matrix (auto-analyzes your holdings)
**Smart Features:**
- Tools auto-detect your portfolio positions
- Position-aware recommendations (averaging up/down, profit taking)
- No manual ticker entry needed for portfolio analysis
### Backtesting (`/backtesting/*`) - VectorBT-Powered Strategy Testing
- `run_backtest` - Execute backtests with any strategy
- `compare_strategies` - A/B testing for strategy comparison
- `optimize_strategy` - Walk-forward optimization and parameter tuning
- `analyze_backtest_results` - Comprehensive performance analytics
- `get_backtest_report` - Generate detailed HTML reports
**Capabilities:**
- 15+ built-in strategies including ML algorithms
- VectorBT engine for vectorized performance
- Parallel processing with 7-256x speedup
- Monte Carlo simulations and robustness testing
- Multi-timeframe support (1min to monthly)
### Market Data
- `get_market_overview` - Indices, sectors, market breadth
- `get_watchlist` - Sample portfolio with real-time data
## Development Commands
### Running the Server
```bash
# Development mode (recommended - Makefile commands)
make dev # SSE transport (default, recommended for Claude Desktop)
make dev-http # Streamable-HTTP transport (for testing with curl/Postman)
make dev-stdio # STDIO transport (direct connection)
# Alternative: Direct commands (manual)
uv run python -m maverick_mcp.api.server --transport sse --port 8003
uv run python -m maverick_mcp.api.server --transport streamable-http --port 8003
uv run python -m maverick_mcp.api.server --transport stdio
# Script-based startup (with environment variable)
./scripts/dev.sh # Defaults to SSE
MAVERICK_TRANSPORT=streamable-http ./scripts/dev.sh
```
**When to use each transport:**
- **SSE** (`make dev` or `make dev-sse`): Best for Claude Desktop - tested and stable
- **Streamable-HTTP** (`make dev-http`): Ideal for testing with curl/Postman, debugging transport issues
- **STDIO** (`make dev-stdio`): Direct connection without network layer, good for development
### Testing
```bash
# Quick testing
make test # Unit tests only (5-10 seconds)
make test-specific TEST=test_name # Run specific test
make test-watch # Auto-run on changes
# Using uv (recommended)
uv run pytest # Manual pytest execution
uv run pytest --cov=maverick_mcp # With coverage
uv run pytest -m integration # Integration tests (requires PostgreSQL/Redis)
# Alternative: Direct pytest (if activated in venv)
pytest # Manual pytest execution
pytest --cov=maverick_mcp # With coverage
pytest -m integration # Integration tests (requires PostgreSQL/Redis)
```
### Code Quality
```bash
# Automated quality checks
make format # Auto-format with ruff
make lint # Check code quality with ruff
make typecheck # Type check with ty (Astral's modern type checker)
make check # Run all checks
# Using uv (recommended)
uv run ruff check . # Linting
uv run ruff format . # Formatting
uv run ty check . # Type checking (Astral's modern type checker)
# Ultra-fast one-liner (no installation needed)
uvx ty check . # Run ty directly without installing
# Alternative: Direct commands (if activated in venv)
ruff check . # Linting
ruff format . # Formatting
ty check . # Type checking
```
## Configuration
### Database Options
**SQLite (Default - No Setup Required, includes S&P 500 data)**:
```bash
# Uses SQLite automatically with S&P 500 data seeding on first run
make dev
```
**PostgreSQL (Optional - Better Performance)**:
```bash
# In .env file
DATABASE_URL=postgresql://localhost/maverick_mcp
# Create database
createdb maverick_mcp
make migrate
```
### Caching Options
**No Caching (Default)**:
- Works out of the box, uses in-memory caching
**Redis Caching (Optional - Better Performance)**:
```bash
# Install and start Redis
brew install redis
brew services start redis
# Or use make command
make redis-start
# Server automatically detects Redis and uses it
```
## Code Guidelines
### General Principles
- Python 3.12+ with modern features
- Type hints for all functions
- Google-style docstrings for public APIs
- Comprehensive error handling
- Performance-first design with caching
### Financial Analysis
- Use pandas_ta for technical indicators
- Document all financial calculations
- Validate input data ranges
- Cache expensive computations
- Use vectorized operations for performance
### MCP Integration
- Register tools with `@mcp.tool()` decorator
- Return JSON-serializable results
- Implement graceful error handling
- Use database caching for persistence
- Follow FastMCP 2.0 patterns
## Troubleshooting
### Common Issues
**Server won't start**:
```bash
make stop # Stop any running processes
make clean # Clean temporary files
make dev # Restart
```
**Port already in use**:
```bash
lsof -i :8003 # Find what's using port 8003
make stop # Stop MaverickMCP services
```
**Redis connection errors** (optional):
```bash
brew services start redis # Start Redis
# Or disable caching by not setting REDIS_HOST
```
**Database errors**:
```bash
# Use SQLite (no setup required)
unset DATABASE_URL
make dev
# Or fix PostgreSQL
createdb maverick_mcp
make migrate
```
**Claude Desktop not connecting**:
1. Verify server is running: `lsof -i :8003` (check if port 8003 is in use)
2. Check `claude_desktop_config.json` syntax and correct port (8003)
3. **Use the tested SSE configuration**: `http://localhost:8003/sse` with mcp-remote
4. Restart Claude Desktop completely
5. Test with: "Get AAPL stock data"
**Tools appearing then disappearing**:
1. **FIXED**: Server now accepts both `/sse` and `/sse/` without 307 redirects
2. Use the recommended SSE configuration with mcp-remote bridge
3. Ensure you're using the exact configuration shown above
4. The SSE + mcp-remote setup has been tested and prevents tool disappearing
5. **No trailing slash required**: Server automatically handles path normalization
**Research Tool Issues**:
1. **Timeouts**: Research tools have adaptive timeouts (120s-600s)
2. Deep research may take 2-10 minutes depending on complexity
3. Monitor progress in server logs with `make tail-log`
4. Ensure `OPENROUTER_API_KEY` and `EXA_API_KEY` are set for full functionality
**Missing S&P 500 screening data**:
```bash
# Manually seed S&P 500 database if needed
uv run python scripts/seed_sp500.py
```
### Performance Tips
- **Use Redis caching** for better performance
- **PostgreSQL over SQLite** for larger datasets
- **Parallel screening** is enabled by default (4x speedup)
- **Parallel research** achieves 7-256x speedup with agent orchestration
- **In-memory caching** reduces API calls
- **Smart model selection** reduces costs by 40-60% with OpenRouter
## Quick Testing
Test the server is working:
```bash
# Test server is running
lsof -i :8003
# Test MCP endpoint (after connecting with mcp-remote)
# Use Claude Desktop with: "List available tools"
```
### Test Backtesting Features
Once connected to Claude Desktop, test the backtesting framework:
```
# Basic backtest
"Run a backtest on SPY using the momentum strategy for 2024"
# Strategy comparison
"Compare RSI vs MACD strategies on AAPL for the last year"
# ML strategy test
"Test the adaptive ML strategy on tech sector stocks"
# Performance analysis
"Show me detailed metrics for a mean reversion strategy on QQQ"
```
## Recent Updates
### Production-Ready Backtesting Framework (NEW)
- **VectorBT Integration**: High-performance vectorized backtesting engine
- **15+ Built-in Strategies**: Including ML-powered adaptive, ensemble, and regime-aware algorithms
- **Parallel Processing**: 7-256x speedup for multi-strategy evaluation
- **Advanced Analytics**: Sharpe, Sortino, Calmar ratios, maximum drawdown, win rate analysis
- **Walk-Forward Optimization**: Out-of-sample testing with parameter tuning
- **Monte Carlo Simulations**: Robustness testing with confidence intervals
- **LangGraph Workflow**: Multi-agent orchestration for intelligent strategy selection
- **Comprehensive Reporting**: HTML reports with interactive visualizations
### Advanced Research Agents (Major Feature Release)
- **Parallel Research Execution**: Achieved 7-256x speedup (exceeded 2x target) with intelligent agent orchestration
- **Adaptive Timeout Protection**: Dynamic timeouts (120s-600s) based on research depth and complexity
- **Intelligent Model Selection**: OpenRouter integration with 400+ models, 40-60% cost reduction
- **Comprehensive Error Handling**: Circuit breakers, retry logic, and graceful degradation
- **Early Termination**: Confidence-based stopping to optimize time and costs
- **Content Filtering**: High-credibility source prioritization for quality results
- **Multi-Agent Orchestration**: Supervisor pattern for complex research coordination
- **New Research Tools**: `research_comprehensive`, `research_company`, `analyze_market_sentiment`, `coordinate_agents`
### Performance Improvements
- **Parallel Agent Execution**: Increased concurrent agents from 4 to 6
- **Optimized Semaphores**: BoundedSemaphore for better resource management
- **Reduced Rate Limiting**: Delays decreased from 0.5s to 0.05s
- **Batch Processing**: Improved throughput for multiple research tasks
- **Smart Caching**: Redis-powered with in-memory fallback
- **Stock Screening**: 4x faster with parallel processing
### Testing & Quality
- **84% Test Coverage**: 93 tests with comprehensive coverage
- **Zero Linting Errors**: Fixed 947 issues for clean codebase
- **Full Type Annotations**: Complete type coverage for research components
- **Error Recovery Testing**: Comprehensive failure scenario coverage
### Personal Use Optimization
- **No Authentication/Billing**: Completely removed for personal use simplicity
- **Pre-seeded S&P 500 Database**: 520 stocks with comprehensive screening data on first startup
- **Simplified Architecture**: Clean, focused codebase without commercial complexity
- **Multi-Transport Support**: HTTP, SSE, and STDIO for all MCP clients
- **SQLite Default**: No database setup required, PostgreSQL optional for performance
### AI/LLM Integration
- **OpenRouter Integration**: Access to 400+ AI models with intelligent cost optimization
- **Smart Model Selection**: Automatic model selection based on task requirements (sentiment analysis, market research, technical analysis)
- **Cost-Efficient by Default**: Prioritizes cost-effectiveness while maintaining quality, 40-60% cost savings over premium-only approaches
- **Multiple Model Support**: Claude Opus 4.1, Claude Sonnet 4, Claude 3.5 Haiku, GPT-5, GPT-5 Nano, Gemini 2.5 Pro, DeepSeek R1, and more
### Developer Experience
- Comprehensive Makefile for all common tasks
- Smart error handling with automatic fix suggestions
- Hot reload development mode
- Extensive test suite with quick unit tests
- Type checking with ty (Astral's extremely fast type checker) for better IDE support
## Additional Resources
- **Architecture docs**: `docs/` directory
- **Portfolio Guide**: `docs/PORTFOLIO.md` - Complete guide to portfolio features
- **Test examples**: `tests/` directory
- **Development tools**: `tools/` directory
- **Example scripts**: `scripts/` directory
For detailed technical information and advanced usage, see the full documentation in the `docs/` directory.
---
**Note**: This project is designed for personal use. It provides powerful stock analysis tools for Claude Desktop with pre-seeded S&P 500 data, without the complexity of multi-user systems, authentication, or billing. The database automatically seeds with 520 S&P 500 stocks and screening recommendations on first startup.
```
--------------------------------------------------------------------------------
/maverick_mcp/application/commands/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/maverick_mcp/application/dto/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/maverick_mcp/application/queries/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/maverick_mcp/domain/entities/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/maverick_mcp/domain/events/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/maverick_mcp/domain/services/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/maverick_mcp/domain/value_objects/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/cache/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/persistence/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/providers/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
```yaml
github: wshobson
```
--------------------------------------------------------------------------------
/maverick_mcp/api/utils/__init__.py:
--------------------------------------------------------------------------------
```python
"""API utility modules."""
```
--------------------------------------------------------------------------------
/tests/integration/__init__.py:
--------------------------------------------------------------------------------
```python
# Integration tests module marker
```
--------------------------------------------------------------------------------
/maverick_mcp/tests/__init__.py:
--------------------------------------------------------------------------------
```python
"""Test package for Maverick-MCP."""
```
--------------------------------------------------------------------------------
/maverick_mcp/utils/__init__.py:
--------------------------------------------------------------------------------
```python
"""Utility modules for Maverick-MCP."""
```
--------------------------------------------------------------------------------
/maverick_mcp/api/dependencies/__init__.py:
--------------------------------------------------------------------------------
```python
"""API dependencies for dependency injection."""
```
--------------------------------------------------------------------------------
/maverick_mcp/database/__init__.py:
--------------------------------------------------------------------------------
```python
"""Database package for MaverickMCP."""
from .base import Base
__all__ = ["Base"]
```
--------------------------------------------------------------------------------
/maverick_mcp/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Maverick-MCP package for financial analysis using MCP protocol.
"""
__all__ = []
```
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
```yaml
version: 2
updates:
- package-ecosystem: uv
directory: "/"
schedule:
interval: weekly
```
--------------------------------------------------------------------------------
/maverick_mcp/api/__init__.py:
--------------------------------------------------------------------------------
```python
"""
API components for Maverick-MCP.
This package contains the API server implementation.
"""
__all__: list[str] = []
```
--------------------------------------------------------------------------------
/maverick_mcp/application/screening/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Screening application layer.
This module contains application services, queries, and DTOs
for the screening domain.
"""
```
--------------------------------------------------------------------------------
/maverick_mcp/tools/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Financial analysis tools for Maverick-MCP.
"""
from .portfolio_manager import PortfolioManager
__all__ = ["PortfolioManager"]
```
--------------------------------------------------------------------------------
/maverick_mcp/domain/stock_analysis/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Domain layer stock analysis services.
"""
from .stock_analysis_service import StockAnalysisService
__all__ = ["StockAnalysisService"]
```
--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/health/__init__.py:
--------------------------------------------------------------------------------
```python
"""Health checking infrastructure."""
from .health_checker import HealthChecker, HealthStatus
__all__ = ["HealthChecker", "HealthStatus"]
```
--------------------------------------------------------------------------------
/maverick_mcp/domain/screening/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Screening domain module.
This module contains the pure business logic for stock screening,
following Domain-Driven Design principles.
"""
```
--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/caching/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Infrastructure layer caching services.
"""
from .cache_management_service import CacheManagementService
__all__ = ["CacheManagementService"]
```
--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/data_fetching/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Infrastructure layer data fetching services.
"""
from .stock_data_service import StockDataFetchingService
__all__ = ["StockDataFetchingService"]
```
--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/screening/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Screening infrastructure layer.
This module contains infrastructure adapters for the screening domain,
including repository implementations and external service adapters.
"""
```
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
```json
{
"configurations": [
{
"name": "Maverick-MCP CLI",
"type": "debugpy",
"request": "launch",
"module": "maverick_mcp.cli.server",
"args": ["--debug"]
}
]
}
```
--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Infrastructure layer services.
"""
from .caching import CacheManagementService
from .data_fetching import StockDataFetchingService
__all__ = ["CacheManagementService", "StockDataFetchingService"]
```
--------------------------------------------------------------------------------
/maverick_mcp/core/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Core functionality for Maverick-MCP.
This package contains the core business logic and utilities for the Maverick-MCP service.
"""
from . import technical_analysis, visualization
__all__ = ["technical_analysis", "visualization"]
```
--------------------------------------------------------------------------------
/tests/integration/test_api_technical.py:
--------------------------------------------------------------------------------
```python
"""Placeholder integration tests for technical analysis endpoints."""
import pytest
pytest.skip(
"Technical analysis integration tests require external services not available in the open-source build.",
allow_module_level=True,
)
```
--------------------------------------------------------------------------------
/maverick_mcp/memory/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Memory and persistence management for Maverick-MCP agents.
"""
from langgraph.checkpoint.memory import MemorySaver
from .stores import ConversationStore, MemoryStore
__all__ = [
"MemorySaver",
"MemoryStore",
"ConversationStore",
]
```
--------------------------------------------------------------------------------
/maverick_mcp/langchain_tools/__init__.py:
--------------------------------------------------------------------------------
```python
"""
LangChain tool adapters for Maverick-MCP.
"""
from .adapters import create_langchain_tool, mcp_to_langchain_adapter
from .registry import ToolRegistry, get_tool_registry
__all__ = [
"mcp_to_langchain_adapter",
"create_langchain_tool",
"ToolRegistry",
"get_tool_registry",
]
```
--------------------------------------------------------------------------------
/tests/performance/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Performance Testing Suite for MaverickMCP Backtesting System.
This package contains comprehensive performance tests including:
- Load testing for concurrent users
- Benchmark tests against performance targets
- Stress testing for resource usage monitoring
- Profiling tests for bottleneck identification
"""
```
--------------------------------------------------------------------------------
/maverick_mcp/backtesting/strategies/ml_strategies.py:
--------------------------------------------------------------------------------
```python
"""ML strategies bridge module for easier imports."""
from .ml.adaptive import AdaptiveStrategy as OnlineLearningStrategy
from .ml.ensemble import StrategyEnsemble as EnsembleStrategy
from .ml.regime_aware import RegimeAwareStrategy
__all__ = ["OnlineLearningStrategy", "RegimeAwareStrategy", "EnsembleStrategy"]
```
--------------------------------------------------------------------------------
/maverick_mcp/providers/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Data provider modules for Maverick-MCP.
This package contains provider classes for fetching different types of financial data.
"""
from .macro_data import MacroDataProvider
from .market_data import MarketDataProvider
from .stock_data import StockDataProvider
__all__ = ["StockDataProvider", "MacroDataProvider", "MarketDataProvider"]
```
--------------------------------------------------------------------------------
/maverick_mcp/database/base.py:
--------------------------------------------------------------------------------
```python
"""
Shared database base class for all SQLAlchemy models.
This module provides a common Base class to avoid circular imports
and ensure all models are registered with the same metadata.
"""
from sqlalchemy.orm import DeclarativeBase
class Base(DeclarativeBase):
"""Base class for all SQLAlchemy models using SQLAlchemy 2.0+ style."""
pass
```
--------------------------------------------------------------------------------
/maverick_mcp/providers/factories/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Provider factories for Maverick-MCP.
This package contains factory classes that handle provider instantiation,
dependency injection, and lifecycle management following the Factory pattern.
"""
from .config_factory import ConfigurationFactory
from .provider_factory import ProviderFactory
__all__ = [
"ProviderFactory",
"ConfigurationFactory",
]
```
--------------------------------------------------------------------------------
/alembic/versions/08e3945a0c93_merge_heads.py:
--------------------------------------------------------------------------------
```python
"""merge_heads
Revision ID: 08e3945a0c93
Revises: 005_add_user_table, 416a0259129d
Create Date: 2025-06-08 13:23:35.875210
"""
# revision identifiers, used by Alembic.
revision = "08e3945a0c93"
down_revision = ("005_add_user_table", "416a0259129d")
branch_labels = None
depends_on = None
def upgrade() -> None:
pass
def downgrade() -> None:
pass
```
--------------------------------------------------------------------------------
/maverick_mcp/backtesting/strategies/ml/__init__.py:
--------------------------------------------------------------------------------
```python
"""Machine learning enhanced trading strategies."""
from .adaptive import AdaptiveStrategy
from .ensemble import StrategyEnsemble
from .feature_engineering import FeatureExtractor, MLPredictor
from .regime_aware import RegimeAwareStrategy
__all__ = [
"AdaptiveStrategy",
"FeatureExtractor",
"MLPredictor",
"RegimeAwareStrategy",
"StrategyEnsemble",
]
```
--------------------------------------------------------------------------------
/alembic/versions/fix_database_integrity_issues.py:
--------------------------------------------------------------------------------
```python
"""Legacy migration stub for OSS build without billing tables."""
# Revision identifiers, used by Alembic.
revision = "fix_database_integrity"
down_revision = "e0c75b0bdadb"
branch_labels = None
depends_on = None
def upgrade() -> None:
"""No-op migration for the open-source build."""
pass
def downgrade() -> None:
"""No-op downgrade for the open-source build."""
pass
```
--------------------------------------------------------------------------------
/alembic/versions/adda6d3fd84b_merge_proprietary_terms_removal_with_.py:
--------------------------------------------------------------------------------
```python
"""Merge proprietary terms removal with async jobs
Revision ID: adda6d3fd84b
Revises: 0004, 011_remove_proprietary_terms
Create Date: 2025-08-10 15:31:44.179603
"""
# revision identifiers, used by Alembic.
revision = "adda6d3fd84b"
down_revision = ("0004", "011_remove_proprietary_terms")
branch_labels = None
depends_on = None
def upgrade() -> None:
pass
def downgrade() -> None:
pass
```
--------------------------------------------------------------------------------
/alembic/versions/e0c75b0bdadb_fix_financial_data_precision_only.py:
--------------------------------------------------------------------------------
```python
"""Legacy precision migration stub (billing tables removed in OSS build)."""
# Revision identifiers, used by Alembic.
revision = "e0c75b0bdadb"
down_revision = "add_stripe_webhook_events"
branch_labels = None
depends_on = None
def upgrade() -> None:
"""No-op migration for the open-source build."""
pass
def downgrade() -> None:
"""No-op downgrade for the open-source build."""
pass
```
--------------------------------------------------------------------------------
/alembic/versions/9374a5c9b679_merge_heads_for_testing.py:
--------------------------------------------------------------------------------
```python
"""merge_heads_for_testing
Revision ID: 9374a5c9b679
Revises: 012_remove_frontend_billing_system, adda6d3fd84b
Create Date: 2025-08-10 19:10:32.920060
"""
# revision identifiers, used by Alembic.
revision = "9374a5c9b679"
down_revision = ("012_remove_frontend_billing_system", "adda6d3fd84b")
branch_labels = None
depends_on = None
def upgrade() -> None:
pass
def downgrade() -> None:
pass
```
--------------------------------------------------------------------------------
/alembic/versions/abf9b9afb134_merge_multiple_heads.py:
--------------------------------------------------------------------------------
```python
"""merge_multiple_heads
Revision ID: abf9b9afb134
Revises: 008_performance_optimization_indexes, f0696e2cac15
Create Date: 2025-07-04 14:57:35.435208
"""
# revision identifiers, used by Alembic.
revision = "abf9b9afb134"
down_revision = ("008_performance_optimization_indexes", "f0696e2cac15")
branch_labels = None
depends_on = None
def upgrade() -> None:
pass
def downgrade() -> None:
pass
```
--------------------------------------------------------------------------------
/scripts/requirements_tiingo.txt:
--------------------------------------------------------------------------------
```
# Requirements for the Tiingo data loader
# Core dependencies
aiohttp>=3.8.0
pandas>=2.0.0
pandas-ta>=0.3.14b0
sqlalchemy>=2.0.0
psycopg2-binary>=2.9.0 # PostgreSQL adapter
# Optional dependencies for enhanced functionality
numpy>=1.24.0
python-dateutil>=2.8.0
asyncio-throttle>=1.0.0 # Alternative rate limiting
requests>=2.28.0 # Fallback for sync operations
# Development and testing
pytest>=7.0.0
pytest-asyncio>=0.21.0
asynctest>=0.13.0
```
--------------------------------------------------------------------------------
/maverick_mcp/agents/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Maverick-MCP Agents Module.
This module contains LangGraph-based agents for financial analysis workflows.
"""
from .base import INVESTOR_PERSONAS, PersonaAwareAgent, PersonaAwareTool
from .circuit_breaker import circuit_breaker, circuit_manager
from .deep_research import DeepResearchAgent
from .supervisor import SupervisorAgent
__all__ = [
"PersonaAwareAgent",
"PersonaAwareTool",
"INVESTOR_PERSONAS",
"circuit_breaker",
"circuit_manager",
"DeepResearchAgent",
"SupervisorAgent",
]
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
```yaml
blank_issues_enabled: false
contact_links:
- name: 💬 GitHub Discussions
url: https://github.com/wshobson/maverick-mcp/discussions
about: Ask questions and discuss ideas with the community
- name: 📖 Documentation
url: https://github.com/wshobson/maverick-mcp/blob/main/README.md
about: Read the documentation and setup guides
- name: 🔒 Security Advisories
url: https://github.com/wshobson/maverick-mcp/security/advisories/new
about: Report security vulnerabilities privately (recommended for serious issues)
```
--------------------------------------------------------------------------------
/maverick_mcp/api/services/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Services package for MaverickMCP API.
This package contains service classes extracted from the large server.py file
to improve code organization and maintainability following SOLID principles.
"""
from .base_service import BaseService
from .market_service import MarketService
from .portfolio_service import PortfolioService
from .prompt_service import PromptService
from .resource_service import ResourceService
__all__ = [
"BaseService",
"MarketService",
"PortfolioService",
"PromptService",
"ResourceService",
]
```
--------------------------------------------------------------------------------
/maverick_mcp/workflows/agents/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Intelligent agents for backtesting workflow orchestration.
This module contains specialized agents for market analysis, strategy selection,
parameter optimization, and results validation within the LangGraph backtesting workflow.
"""
from .market_analyzer import MarketAnalyzerAgent
from .optimizer_agent import OptimizerAgent
from .strategy_selector import StrategySelectorAgent
from .validator_agent import ValidatorAgent
__all__ = [
"MarketAnalyzerAgent",
"OptimizerAgent",
"StrategySelectorAgent",
"ValidatorAgent",
]
```
--------------------------------------------------------------------------------
/pyrightconfig.json:
--------------------------------------------------------------------------------
```json
{
"include": [
"maverick_mcp/api/server.py",
"maverick_mcp/data/cache.py",
"maverick_mcp/data/performance.py",
"maverick_mcp/providers/optimized_stock_data.py",
"maverick_mcp/utils/quick_cache.py",
"maverick_mcp/backtesting/strategy_executor.py",
"maverick_mcp/tools/performance_monitoring.py"
],
"reportMissingImports": "warning",
"reportMissingModuleSource": "warning",
"reportMissingTypeStubs": "warning",
"pythonVersion": "3.12",
"typeCheckingMode": "standard",
"executionEnvironments": [
{
"root": ".",
"pythonVersion": "3.12",
"extraPaths": ["."]
}
]
}
```
--------------------------------------------------------------------------------
/maverick_mcp/providers/implementations/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Provider implementations for Maverick-MCP.
This package contains concrete implementations of the provider interfaces,
including adapters for existing providers and new implementations that
fully embrace the interface-based architecture.
"""
from .cache_adapter import RedisCacheAdapter
from .macro_data_adapter import MacroDataAdapter
from .market_data_adapter import MarketDataAdapter
from .persistence_adapter import SQLAlchemyPersistenceAdapter
from .stock_data_adapter import StockDataAdapter
__all__ = [
"RedisCacheAdapter",
"StockDataAdapter",
"MarketDataAdapter",
"MacroDataAdapter",
"SQLAlchemyPersistenceAdapter",
]
```
--------------------------------------------------------------------------------
/maverick_mcp/config/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Configuration utilities for Maverick-MCP.
"""
from .constants import CACHE_TTL, CONFIG, clean_env_var
from .database import (
DatabasePoolConfig,
create_engine_with_enhanced_config,
get_default_pool_config,
get_development_pool_config,
get_high_concurrency_pool_config,
get_pool_config_from_settings,
validate_production_config,
)
__all__ = [
"CONFIG",
"CACHE_TTL",
"clean_env_var",
"DatabasePoolConfig",
"get_default_pool_config",
"get_development_pool_config",
"get_high_concurrency_pool_config",
"get_pool_config_from_settings",
"create_engine_with_enhanced_config",
"validate_production_config",
]
```
--------------------------------------------------------------------------------
/maverick_mcp/application/__init__.py:
--------------------------------------------------------------------------------
```python
"""Application layer - contains use cases and orchestration logic."""
from maverick_mcp.application.dto.technical_analysis_dto import (
BollingerBandsDTO,
CompleteTechnicalAnalysisDTO,
MACDAnalysisDTO,
PriceLevelDTO,
RSIAnalysisDTO,
StochasticDTO,
TrendAnalysisDTO,
VolumeAnalysisDTO,
)
from maverick_mcp.application.queries.get_technical_analysis import (
GetTechnicalAnalysisQuery,
)
__all__ = [
# Queries
"GetTechnicalAnalysisQuery",
# DTOs
"CompleteTechnicalAnalysisDTO",
"RSIAnalysisDTO",
"MACDAnalysisDTO",
"BollingerBandsDTO",
"StochasticDTO",
"TrendAnalysisDTO",
"VolumeAnalysisDTO",
"PriceLevelDTO",
]
```
--------------------------------------------------------------------------------
/maverick_mcp/workflows/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Workflow orchestration module for Maverick MCP.
This module provides workflow orchestration capabilities using LangGraph
for complex multi-agent trading and analysis workflows.
"""
from .backtesting_workflow import BacktestingWorkflow
from .state import (
BacktestingWorkflowState,
BaseAgentState,
DeepResearchState,
MarketAnalysisState,
PortfolioState,
RiskManagementState,
SupervisorState,
TechnicalAnalysisState,
)
__all__ = [
"BaseAgentState",
"MarketAnalysisState",
"TechnicalAnalysisState",
"RiskManagementState",
"PortfolioState",
"SupervisorState",
"DeepResearchState",
"BacktestingWorkflowState",
"BacktestingWorkflow",
]
```
--------------------------------------------------------------------------------
/maverick_mcp/providers/mocks/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Mock provider implementations for testing.
This package contains mock implementations of all provider interfaces
that can be used for fast, predictable testing without external dependencies.
"""
from .mock_cache import MockCacheManager
from .mock_config import MockConfigurationProvider
from .mock_macro_data import MockMacroDataProvider
from .mock_market_data import MockMarketDataProvider
from .mock_persistence import MockDataPersistence
from .mock_stock_data import MockStockDataFetcher, MockStockScreener
__all__ = [
"MockCacheManager",
"MockStockDataFetcher",
"MockStockScreener",
"MockMarketDataProvider",
"MockMacroDataProvider",
"MockDataPersistence",
"MockConfigurationProvider",
]
```
--------------------------------------------------------------------------------
/maverick_mcp/data/cache_manager.py:
--------------------------------------------------------------------------------
```python
"""
Cache manager module.
This module provides a bridge to import CacheManager and related cache utilities.
The actual implementation is in cache.py but this module provides the expected import path.
"""
from .cache import (
CacheManager,
cleanup_redis_pool,
clear_cache,
ensure_timezone_naive,
generate_cache_key,
get_cache_stats,
get_from_cache,
get_redis_client,
normalize_timezone,
reset_cache_stats,
save_to_cache,
)
__all__ = [
"CacheManager",
"get_cache_stats",
"reset_cache_stats",
"get_from_cache",
"save_to_cache",
"clear_cache",
"generate_cache_key",
"ensure_timezone_naive",
"normalize_timezone",
"get_redis_client",
"cleanup_redis_pool",
]
```
--------------------------------------------------------------------------------
/alembic/versions/001_initial_schema.py:
--------------------------------------------------------------------------------
```python
"""Initial schema - MCP-specific tables only
Revision ID: 001_initial_schema
Revises:
Create Date: 2025-01-06 12:00:00.000000
Note: This migration creates MCP-specific tables with mcp_ prefix.
Django-owned tables (stocks_stock, stocks_pricecache, maverick_stocks,
maverick_bear_stocks, supply_demand_breakouts) are not managed by Alembic.
"""
# revision identifiers, used by Alembic.
revision = "001_initial_schema"
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
# This migration is now empty as all screening tables
# (maverick_stocks, maverick_bear_stocks, supply_demand_breakouts)
# are Django-owned and should not be created by MCP
pass
def downgrade() -> None:
# No tables to drop as screening tables are Django-owned
pass
```
--------------------------------------------------------------------------------
/alembic/versions/006_rename_metadata_columns.py:
--------------------------------------------------------------------------------
```python
"""rename metadata columns
Revision ID: 006_rename_metadata_columns
Revises: f976356b6f07
Create Date: 2025-06-05
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "006_rename_metadata_columns"
down_revision = "f976356b6f07"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Rename metadata columns to avoid SQLAlchemy reserved word conflict
op.alter_column("mcp_auth_audit_log", "metadata", new_column_name="event_metadata")
op.alter_column(
"mcp_user_subscriptions", "metadata", new_column_name="subscription_metadata"
)
def downgrade() -> None:
# Revert column names
op.alter_column("mcp_auth_audit_log", "event_metadata", new_column_name="metadata")
op.alter_column(
"mcp_user_subscriptions", "subscription_metadata", new_column_name="metadata"
)
```
--------------------------------------------------------------------------------
/maverick_mcp/providers/interfaces/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Provider interfaces for Maverick-MCP.
This package contains abstract interfaces that define contracts for all data providers,
caching systems, and persistence layers. These interfaces enable dependency injection,
improve testability, and reduce coupling between components.
The interfaces follow the Interface Segregation Principle, providing focused contracts
for specific concerns rather than monolithic interfaces.
"""
from .cache import ICacheManager
from .config import IConfigurationProvider
from .macro_data import IMacroDataProvider
from .market_data import IMarketDataProvider
from .persistence import IDataPersistence
from .stock_data import IStockDataFetcher, IStockScreener
__all__ = [
"ICacheManager",
"IConfigurationProvider",
"IDataPersistence",
"IMarketDataProvider",
"IMacroDataProvider",
"IStockDataFetcher",
"IStockScreener",
]
```
--------------------------------------------------------------------------------
/maverick_mcp/backtesting/strategies/__init__.py:
--------------------------------------------------------------------------------
```python
"""Strategy modules for VectorBT backtesting."""
from .base import Strategy
# ML-enhanced strategies
from .ml import (
AdaptiveStrategy,
FeatureExtractor,
MLPredictor,
RegimeAwareStrategy,
StrategyEnsemble,
)
from .ml.adaptive import HybridAdaptiveStrategy, OnlineLearningStrategy
from .ml.ensemble import RiskAdjustedEnsemble
from .ml.regime_aware import AdaptiveRegimeStrategy, MarketRegimeDetector
from .parser import StrategyParser
from .templates import STRATEGY_TEMPLATES
__all__ = [
"Strategy",
"StrategyParser",
"STRATEGY_TEMPLATES",
# ML strategies
"AdaptiveStrategy",
"FeatureExtractor",
"MLPredictor",
"RegimeAwareStrategy",
"StrategyEnsemble",
# Advanced ML strategies
"OnlineLearningStrategy",
"HybridAdaptiveStrategy",
"RiskAdjustedEnsemble",
"MarketRegimeDetector",
"AdaptiveRegimeStrategy",
]
```
--------------------------------------------------------------------------------
/maverick_mcp/domain/__init__.py:
--------------------------------------------------------------------------------
```python
"""Domain layer - contains pure business logic with no infrastructure dependencies."""
from maverick_mcp.domain.entities.stock_analysis import StockAnalysis
from maverick_mcp.domain.portfolio import Portfolio, Position
from maverick_mcp.domain.services.technical_analysis_service import (
TechnicalAnalysisService,
)
from maverick_mcp.domain.stock_analysis import StockAnalysisService
from maverick_mcp.domain.value_objects.technical_indicators import (
BollingerBands,
MACDIndicator,
PriceLevel,
RSIIndicator,
Signal,
StochasticOscillator,
TrendDirection,
VolumeProfile,
)
__all__ = [
# Entities
"StockAnalysis",
# Portfolio Entities
"Portfolio",
"Position",
# Services
"TechnicalAnalysisService",
"StockAnalysisService",
# Value Objects
"RSIIndicator",
"MACDIndicator",
"BollingerBands",
"StochasticOscillator",
"PriceLevel",
"VolumeProfile",
"Signal",
"TrendDirection",
]
```
--------------------------------------------------------------------------------
/maverick_mcp/monitoring/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Monitoring package for MaverickMCP backtesting system.
This package provides comprehensive monitoring capabilities including:
- Prometheus metrics for backtesting performance
- Strategy execution monitoring
- API rate limiting and failure tracking
- Anomaly detection and alerting
"""
from .health_check import (
ComponentHealth,
HealthChecker,
HealthStatus,
SystemHealth,
check_system_health,
get_health_checker,
)
from .metrics import (
BacktestingMetricsCollector,
get_backtesting_metrics,
track_anomaly_detection,
track_api_call_metrics,
track_backtest_execution,
track_strategy_performance,
)
__all__ = [
"BacktestingMetricsCollector",
"get_backtesting_metrics",
"track_backtest_execution",
"track_strategy_performance",
"track_api_call_metrics",
"track_anomaly_detection",
"HealthChecker",
"HealthStatus",
"ComponentHealth",
"SystemHealth",
"check_system_health",
"get_health_checker",
]
```
--------------------------------------------------------------------------------
/maverick_mcp/api/middleware/security.py:
--------------------------------------------------------------------------------
```python
"""
Simple security middleware for Maverick-MCP personal use.
This module provides basic security headers for personal use.
Advanced security features have been removed.
"""
from fastapi import Request
from starlette.middleware.base import BaseHTTPMiddleware
from maverick_mcp.utils.logging import get_logger
logger = get_logger(__name__)
class SecurityHeadersMiddleware(BaseHTTPMiddleware):
"""Add basic security headers to all responses."""
async def dispatch(self, request: Request, call_next):
response = await call_next(request)
# Basic security headers for personal use
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["X-Frame-Options"] = "DENY"
response.headers["X-XSS-Protection"] = "1; mode=block"
response.headers["Referrer-Policy"] = "strict-origin-when-cross-origin"
return response
# Additional middleware classes removed for simplicity
# Only keeping SecurityHeadersMiddleware for basic security
```
--------------------------------------------------------------------------------
/alembic/versions/003_add_performance_indexes.py:
--------------------------------------------------------------------------------
```python
"""Add performance indexes for Maverick-MCP
Revision ID: 003_add_performance_indexes
Revises: 002_add_authentication_tables
Create Date: 2025-06-03 12:00:00
This migration adds performance indexes to improve query speed
for MCP-specific tables only. Django-owned tables are not modified.
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "003_add_performance_indexes"
down_revision = "002_add_authentication_tables"
branch_labels = None
depends_on = None
def upgrade():
"""Add performance indexes for MCP tables only."""
# API key usage performance indexes
op.create_index(
"idx_mcp_api_key_usage_api_key_id",
"mcp_api_key_usage",
["api_key_id"],
postgresql_using="btree",
)
print("Performance indexes for MCP tables created successfully!")
def downgrade():
"""Remove performance indexes from MCP tables."""
# Drop API key usage index
op.drop_index("idx_mcp_api_key_usage_api_key_id", "mcp_api_key_usage")
print("Performance indexes removed from MCP tables.")
```
--------------------------------------------------------------------------------
/maverick_mcp/api/routers/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Router modules for organizing Maverick-MCP endpoints by domain.
This module contains domain-specific routers that organize
the MCP tools into logical groups for better maintainability.
Personal-use stock analysis MCP server.
"""
from .data import data_router
from .health_enhanced import router as health_enhanced_router
from .performance import get_performance_router
from .portfolio import portfolio_router
from .screening import screening_router
from .technical import technical_router
# Initialize performance router
performance_router = get_performance_router()
# Optional: LangGraph agents router
try:
from .agents import agents_router
has_agents = True
except ImportError:
agents_router = None # type: ignore[assignment]
has_agents = False
__all__ = [
"data_router",
"health_enhanced_router",
"performance_router",
"portfolio_router",
"screening_router",
"technical_router",
]
if has_agents:
__all__.append("agents_router")
# Export health router as both names for compatibility
health_router = health_enhanced_router
```
--------------------------------------------------------------------------------
/maverick_mcp/backtesting/__init__.py:
--------------------------------------------------------------------------------
```python
"""
Backtesting module for MaverickMCP.
Provides backtesting engines and utilities with conditional imports
to handle missing dependencies gracefully.
"""
__all__ = []
# Try to import full VectorBT engine
try:
from .vectorbt_engine import VectorBTEngine
__all__.append("VectorBTEngine")
except ImportError:
# If VectorBT dependencies aren't available, use stub
from .batch_processing_stub import VectorBTEngineStub as _VectorBTEngine
VectorBTEngine = _VectorBTEngine
__all__.append("VectorBTEngine")
# Try to import other backtesting components
try:
from .analysis import BacktestAnalyzer as _BacktestAnalyzer
BacktestAnalyzer = _BacktestAnalyzer
__all__.append("BacktestAnalyzer")
except ImportError:
pass
try:
from .optimization import StrategyOptimizer as _StrategyOptimizer
StrategyOptimizer = _StrategyOptimizer
__all__.append("StrategyOptimizer")
except ImportError:
pass
try:
from .strategy_executor import StrategyExecutor as _StrategyExecutor
StrategyExecutor = _StrategyExecutor
__all__.append("StrategyExecutor")
except ImportError:
pass
```
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
```json
{
"makefile.configureOnOpen": false,
"editor.formatOnSave": true,
"notebook.formatOnSave.enabled": true,
"python.analysis.autoImportCompletions": true,
"python.REPL.enableREPLSmartSend": false,
"files.associations": {
"*.html": "jinja-html",
"*.js": "javascript",
"*.jsx": "javascriptreact",
"*.jinja2": "jinja-html",
"*.jinja": "jinja-html"
},
"[python]": {
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.fixAll": "explicit",
"source.organizeImports": "explicit"
},
"editor.defaultFormatter": "charliermarsh.ruff"
},
"[javascript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true
},
"[javascriptreact]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true
},
"emmet.includeLanguages": {
"jinja-html": "html"
},
"css.lint.unknownAtRules": "ignore",
"css.lint.unknownProperties": "ignore",
"yaml.customTags": ["!reference sequence", "!Ref", "!Sub", "!secret"],
"yaml.validate": false,
"cursorpyright.analysis.autoImportCompletions": true
}
```
--------------------------------------------------------------------------------
/maverick_mcp/config/constants.py:
--------------------------------------------------------------------------------
```python
"""
Constants for the Maverick-MCP package.
"""
import os
from typing import Any
def clean_env_var(var_name, default):
"""Clean environment variable value to handle comments"""
value = os.getenv(var_name, default)
if value and isinstance(value, str):
# Remove any trailing comments (anything after # that's not inside quotes)
return value.split("#", 1)[0].strip()
return value
# Configuration with defaults
CONFIG: dict[str, Any] = {
"redis": {
"host": clean_env_var("REDIS_HOST", "localhost"),
"port": int(clean_env_var("REDIS_PORT", "6379")),
"db": int(clean_env_var("REDIS_DB", "0")),
"username": clean_env_var("REDIS_USERNAME", None),
"password": clean_env_var("REDIS_PASSWORD", None),
"ssl": clean_env_var("REDIS_SSL", "False").lower() == "true",
},
"cache": {
"ttl": int(clean_env_var("CACHE_TTL_SECONDS", "604800")), # 7 days default
"enabled": clean_env_var("CACHE_ENABLED", "True").lower() == "true",
},
"yfinance": {
"timeout": int(clean_env_var("YFINANCE_TIMEOUT_SECONDS", "30")),
},
}
# Cache TTL in seconds
CACHE_TTL = CONFIG["cache"]["ttl"]
```
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
```yaml
services:
backend:
build: .
ports:
- "8003:8003"
environment:
- DATABASE_URL=postgresql://postgres:postgres@postgres:5432/maverick_mcp
- REDIS_HOST=redis
# Required API Keys
- TIINGO_API_KEY=${TIINGO_API_KEY}
# Optional API Keys for Enhanced Features
- OPENROUTER_API_KEY=${OPENROUTER_API_KEY}
- EXA_API_KEY=${EXA_API_KEY}
- TAVILY_API_KEY=${TAVILY_API_KEY}
- FRED_API_KEY=${FRED_API_KEY}
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
# Server Configuration
- API_HOST=0.0.0.0
- API_PORT=8003
- LOG_LEVEL=${LOG_LEVEL:-info}
- ENVIRONMENT=${ENVIRONMENT:-production}
volumes:
- ./maverick_mcp:/app/maverick_mcp
- ./alembic:/app/alembic
- ./.env:/app/.env
depends_on:
- postgres
- redis
postgres:
image: postgres:15-alpine
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=maverick_mcp
volumes:
- postgres-data:/var/lib/postgresql/data
ports:
- "5432:5432"
redis:
image: redis:7-alpine
volumes:
- redis-data:/data
ports:
- "6379:6379"
volumes:
postgres-data:
redis-data:
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
# Dockerfile for Maverick-MCP
# Python-only MCP server
FROM python:3.12-slim
WORKDIR /app
# Install system dependencies and TA-Lib
RUN apt-get update && apt-get install -yqq \
build-essential \
python3-dev \
libpq-dev \
wget \
curl \
&& rm -rf /var/lib/apt/lists/*
# Install uv for fast Python package management
RUN pip install --no-cache-dir uv
# Install and compile TA-Lib
ENV TALIB_DIR=/usr/local
RUN wget https://github.com/ta-lib/ta-lib/releases/download/v0.6.4/ta-lib-0.6.4-src.tar.gz \
&& tar -xzf ta-lib-0.6.4-src.tar.gz \
&& cd ta-lib-0.6.4/ \
&& ./configure --prefix=$TALIB_DIR \
&& make -j$(nproc) \
&& make install \
&& cd .. \
&& rm -rf ta-lib-0.6.4-src.tar.gz ta-lib-0.6.4/
# Copy dependency files first for better caching
COPY pyproject.toml uv.lock README.md ./
# Install Python dependencies
RUN uv sync --frozen
# Copy application code
COPY maverick_mcp ./maverick_mcp
COPY alembic ./alembic
COPY alembic.ini setup.py ./
# Set environment variables
ENV PYTHONUNBUFFERED=1
ENV PYTHONDONTWRITEBYTECODE=1
# Create non-root user
RUN groupadd -g 1000 maverick && \
useradd -u 1000 -g maverick -s /bin/sh -m maverick && \
chown -R maverick:maverick /app
USER maverick
EXPOSE 8000
# Start MCP server
CMD ["uv", "run", "python", "-m", "maverick_mcp.api.server", "--transport", "sse", "--host", "0.0.0.0", "--port", "8000"]
```
--------------------------------------------------------------------------------
/maverick_mcp/dependencies.py:
--------------------------------------------------------------------------------
```python
"""
Dependency injection utilities for Maverick-MCP.
This module provides factory functions and dependency injection helpers
for creating instances of data providers and other services.
"""
from typing import Annotated
from fastapi import Depends
from sqlalchemy.orm import Session
from maverick_mcp.data.models import get_db
from maverick_mcp.providers import (
MacroDataProvider,
MarketDataProvider,
StockDataProvider,
)
def get_stock_data_provider(db: Session = Depends(get_db)) -> StockDataProvider:
"""
Get a StockDataProvider instance with database session.
Args:
db: Database session (injected by FastAPI)
Returns:
StockDataProvider instance configured with the database session
"""
return StockDataProvider(db_session=db)
def get_market_data_provider() -> MarketDataProvider:
"""
Get a MarketDataProvider instance.
Returns:
MarketDataProvider instance
"""
return MarketDataProvider()
def get_macro_data_provider() -> MacroDataProvider:
"""
Get a MacroDataProvider instance.
Returns:
MacroDataProvider instance
"""
return MacroDataProvider()
# Type aliases for cleaner code in FastAPI routes
StockDataProviderDep = Annotated[StockDataProvider, Depends(get_stock_data_provider)]
MarketDataProviderDep = Annotated[MarketDataProvider, Depends(get_market_data_provider)]
MacroDataProviderDep = Annotated[MacroDataProvider, Depends(get_macro_data_provider)]
```
--------------------------------------------------------------------------------
/maverick_mcp/api/dependencies/technical_analysis.py:
--------------------------------------------------------------------------------
```python
"""
Dependency injection for technical analysis.
This module provides FastAPI dependencies for the technical analysis
domain services and application queries.
"""
from functools import lru_cache
from maverick_mcp.application.queries.get_technical_analysis import (
GetTechnicalAnalysisQuery,
)
from maverick_mcp.domain.services.technical_analysis_service import (
TechnicalAnalysisService,
)
from maverick_mcp.infrastructure.persistence.stock_repository import (
StockDataProviderAdapter,
)
from maverick_mcp.providers.stock_data import StockDataProvider
@lru_cache
def get_technical_analysis_service() -> TechnicalAnalysisService:
"""
Get the technical analysis domain service.
This is a pure domain service with no infrastructure dependencies.
Using lru_cache ensures we reuse the same instance.
"""
return TechnicalAnalysisService()
@lru_cache
def get_stock_repository() -> StockDataProviderAdapter:
"""
Get the stock repository.
This adapts the existing StockDataProvider to the repository interface.
"""
# Reuse existing provider instance to maintain compatibility
stock_provider = StockDataProvider()
return StockDataProviderAdapter(stock_provider)
def get_technical_analysis_query() -> GetTechnicalAnalysisQuery:
"""
Get the technical analysis query handler.
This is the application layer query that orchestrates
domain services and repositories.
"""
return GetTechnicalAnalysisQuery(
stock_repository=get_stock_repository(),
technical_service=get_technical_analysis_service(),
)
```
--------------------------------------------------------------------------------
/maverick_mcp/backtesting/strategies/base.py:
--------------------------------------------------------------------------------
```python
"""Base strategy class for VectorBT."""
from abc import ABC, abstractmethod
from typing import Any
from pandas import DataFrame, Series
class Strategy(ABC):
"""Abstract base class for trading strategies."""
def __init__(self, parameters: dict[str, Any] = None):
"""Initialize strategy with parameters.
Args:
parameters: Strategy parameters
"""
self.parameters = parameters or {}
@abstractmethod
def generate_signals(self, data: DataFrame) -> tuple[Series, Series]:
"""Generate entry and exit signals.
Args:
data: Price data with OHLCV columns
Returns:
Tuple of (entry_signals, exit_signals) as boolean Series
"""
pass
@property
@abstractmethod
def name(self) -> str:
"""Get strategy name."""
pass
@property
@abstractmethod
def description(self) -> str:
"""Get strategy description."""
pass
def validate_parameters(self) -> bool:
"""Validate strategy parameters.
Returns:
True if parameters are valid
"""
return True
def get_default_parameters(self) -> dict[str, Any]:
"""Get default parameters for the strategy.
Returns:
Dictionary of default parameters
"""
return {}
def to_dict(self) -> dict[str, Any]:
"""Convert strategy to dictionary representation.
Returns:
Dictionary with strategy details
"""
return {
"name": self.name,
"description": self.description,
"parameters": self.parameters,
"default_parameters": self.get_default_parameters(),
}
```
--------------------------------------------------------------------------------
/maverick_mcp/tests/test_fixes_validation.py:
--------------------------------------------------------------------------------
```python
"""
Simple test to validate MCP tool fixes are working.
This test runs the comprehensive fix validation script
and ensures it passes all checks.
"""
import subprocess
import sys
from pathlib import Path
import pytest
@pytest.mark.integration
def test_mcp_tool_fixes_validation():
"""
Test that all MCP tool fixes are working by running the validation script.
This test executes the comprehensive test script and verifies all fixes pass.
"""
# Get the path to the test script
test_script = Path(__file__).parent / "test_mcp_tool_fixes.py"
# Run the test script
result = subprocess.run(
[sys.executable, str(test_script)],
capture_output=True,
text=True,
timeout=120, # 2 minute timeout
)
# Check that the script succeeded
assert result.returncode == 0, (
f"MCP tool fixes validation failed:\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
)
# Verify expected success messages are in output
output = result.stdout
assert "🎉 All MCP tool fixes are working correctly!" in output, (
"Expected success message not found"
)
assert "✅ Passed: 4/4" in output, "Expected 4/4 tests to pass"
assert "❌ Failed: 0/4" in output, "Expected 0/4 tests to fail"
# Verify individual fixes
assert "✅ Research tools return actual content" in output, (
"Research fix not validated"
)
assert "✅ Portfolio risk analysis works" in output, "Portfolio fix not validated"
assert "✅ Stock info graceful fallback" in output, "Stock info fix not validated"
assert "✅ LLM configuration compatible" in output, "LLM fix not validated"
if __name__ == "__main__":
# Allow running this test directly
test_mcp_tool_fixes_validation()
```
--------------------------------------------------------------------------------
/tests/domain/conftest.py:
--------------------------------------------------------------------------------
```python
"""
Minimal conftest for domain tests only.
This conftest avoids importing heavy dependencies like testcontainers,
httpx, or database connections since domain tests should be isolated
from infrastructure concerns.
"""
import os
import pytest
# Set test environment
os.environ["MAVERICK_TEST_ENV"] = "true"
# Override session-scoped fixtures from parent conftest to prevent
# Docker containers from being started for domain tests
@pytest.fixture(scope="session")
def postgres_container():
"""Domain tests don't need PostgreSQL containers."""
pytest.skip("Domain tests don't require database containers")
@pytest.fixture(scope="session")
def redis_container():
"""Domain tests don't need Redis containers."""
pytest.skip("Domain tests don't require cache containers")
@pytest.fixture(scope="session")
def database_url():
"""Domain tests don't need database URLs."""
pytest.skip("Domain tests don't require database connections")
@pytest.fixture(scope="session")
def redis_url():
"""Domain tests don't need Redis URLs."""
pytest.skip("Domain tests don't require cache connections")
@pytest.fixture(scope="session")
def engine():
"""Domain tests don't need database engines."""
pytest.skip("Domain tests don't require database engines")
@pytest.fixture(scope="function")
def db_session():
"""Domain tests don't need database sessions."""
pytest.skip("Domain tests don't require database sessions")
@pytest.fixture(scope="session", autouse=True)
def setup_test_env():
"""Minimal test environment setup for domain tests."""
os.environ["ENVIRONMENT"] = "test"
os.environ["LOG_LEVEL"] = "INFO"
# Domain tests run without authentication or usage gating
os.environ["AUTH_ENABLED"] = "false"
yield
```
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
```python
#!/usr/bin/env python
from setuptools import find_packages, setup
# Read the contents of the README file
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
# Read the project metadata from pyproject.toml (for dependencies)
# This is a simple implementation; in a production setup you might want to use tomli
dependencies = []
with open("pyproject.toml") as f:
content = f.read()
# Find the dependencies section
if "dependencies = [" in content:
dependencies_section = content.split("dependencies = [")[1].split("]")[0]
# Extract each dependency
for line in dependencies_section.strip().split("\n"):
dep = line.strip().strip(",").strip('"').strip("'")
if dep and not dep.startswith("#"):
dependencies.append(dep)
setup(
name="maverick_mcp",
version="0.1.0",
description="Maverick-MCP is a Python MCP server for financial market analysis and trading strategies.",
long_description=long_description,
long_description_content_type="text/markdown",
author="MaverickMCP Contributors",
author_email="",
url="https://github.com/wshobson/maverick-mcp",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.12",
install_requires=dependencies,
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Financial and Insurance Industry",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.12",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Office/Business :: Financial :: Investment",
],
# No console scripts needed as we're running the API server directly
)
```
--------------------------------------------------------------------------------
/tests/test_market_data_simple.py:
--------------------------------------------------------------------------------
```python
#!/usr/bin/env python3
"""
Simple test to verify market data provider functionality.
"""
import pytest
from maverick_mcp.providers.market_data import MarketDataProvider
@pytest.mark.integration
@pytest.mark.external
def test_market_data():
"""Test market data provider functions."""
provider = MarketDataProvider()
print("Testing Market Data Provider")
print("=" * 50)
# Test market summary
print("\n1. Testing market summary...")
summary = provider.get_market_summary()
print(f" Found {len(summary)} indices")
if summary:
for _, data in list(summary.items())[:3]:
print(f" {data['name']}: ${data['price']} ({data['change_percent']}%)")
# Test top gainers
print("\n2. Testing top gainers...")
gainers = provider.get_top_gainers(5)
print(f" Found {len(gainers)} gainers")
for stock in gainers[:3]:
print(f" {stock['symbol']}: ${stock['price']} (+{stock['change_percent']}%)")
# Test top losers
print("\n3. Testing top losers...")
losers = provider.get_top_losers(5)
print(f" Found {len(losers)} losers")
for stock in losers[:3]:
print(f" {stock['symbol']}: ${stock['price']} ({stock['change_percent']}%)")
# Test most active
print("\n4. Testing most active...")
active = provider.get_most_active(5)
print(f" Found {len(active)} active stocks")
for stock in active[:3]:
print(f" {stock['symbol']}: ${stock['price']} (Vol: {stock['volume']:,})")
# Test sector performance
print("\n5. Testing sector performance...")
sectors = provider.get_sector_performance()
print(f" Found {len(sectors)} sectors")
for sector, perf in list(sectors.items())[:3]:
print(f" {sector}: {perf}%")
print("\n✅ Test completed!")
if __name__ == "__main__":
test_market_data()
```
--------------------------------------------------------------------------------
/maverick_mcp/api/dependencies/stock_analysis.py:
--------------------------------------------------------------------------------
```python
"""
Dependency injection for stock analysis services.
"""
from fastapi import Depends
from sqlalchemy.orm import Session
from maverick_mcp.data.session_management import get_db_session
from maverick_mcp.domain.stock_analysis import StockAnalysisService
from maverick_mcp.infrastructure.caching import CacheManagementService
from maverick_mcp.infrastructure.data_fetching import StockDataFetchingService
def get_stock_data_fetching_service() -> StockDataFetchingService:
"""
Create stock data fetching service.
Returns:
StockDataFetchingService instance
"""
return StockDataFetchingService(timeout=30, max_retries=3)
def get_cache_management_service(
db_session: Session | None = Depends(get_db_session),
) -> CacheManagementService:
"""
Create cache management service with database session.
Args:
db_session: Database session for dependency injection
Returns:
CacheManagementService instance
"""
return CacheManagementService(db_session=db_session, cache_days=1)
def get_stock_analysis_service(
data_fetching_service: StockDataFetchingService = Depends(
get_stock_data_fetching_service
),
cache_service: CacheManagementService = Depends(get_cache_management_service),
db_session: Session | None = Depends(get_db_session),
) -> StockAnalysisService:
"""
Create stock analysis service with all dependencies.
Args:
data_fetching_service: Service for fetching data from external sources
cache_service: Service for cache management
db_session: Database session for dependency injection
Returns:
StockAnalysisService instance with injected dependencies
"""
return StockAnalysisService(
data_fetching_service=data_fetching_service,
cache_service=cache_service,
db_session=db_session,
)
```
--------------------------------------------------------------------------------
/.github/workflows/claude.yml:
--------------------------------------------------------------------------------
```yaml
name: Claude Code
on:
issue_comment:
types: [created]
pull_request_review_comment:
types: [created]
issues:
types: [opened, assigned]
pull_request_review:
types: [submitted]
jobs:
claude:
if: |
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
issues: read
id-token: write
actions: read # Required for Claude to read CI results on PRs
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run Claude Code
id: claude
uses: anthropics/claude-code-action@v1
with:
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
# This is an optional setting that allows Claude to read CI results on PRs
additional_permissions: |
actions: read
# Optional: Give a custom prompt to Claude. If this is not specified, Claude will perform the instructions specified in the comment that tagged it.
# prompt: 'Update the pull request description to include a summary of changes.'
# Optional: Add claude_args to customize behavior and configuration
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
# or https://docs.claude.com/en/docs/claude-code/cli-reference for available options
# claude_args: '--allowed-tools Bash(gh pr:*)'
```
--------------------------------------------------------------------------------
/tests/test_cache_serialization.py:
--------------------------------------------------------------------------------
```python
"""Tests for secure cache serialization helpers."""
from __future__ import annotations
import pandas as pd
import pandas.testing as pdt
import pytest
from maverick_mcp.data import cache as cache_module
@pytest.fixture(autouse=True)
def _memory_cache_cleanup(monkeypatch: pytest.MonkeyPatch) -> None:
"""Ensure Redis is not used and memory cache starts clean."""
monkeypatch.setattr(cache_module, "get_redis_client", lambda: None)
cache_module._memory_cache.clear()
def test_dataframe_round_trip() -> None:
"""DataFrames should round-trip through the cache without pickle usage."""
key = "test:dataframe"
df = pd.DataFrame(
{"open": [1.0, 2.0], "close": [1.5, 2.5]},
index=pd.to_datetime(["2024-01-01", "2024-01-02"]),
)
assert cache_module.save_to_cache(key, df, ttl=60)
cached = cache_module.get_from_cache(key)
assert isinstance(cached, pd.DataFrame)
pdt.assert_frame_equal(cached, df)
def test_dict_with_dataframe_round_trip() -> None:
"""Dictionaries containing DataFrames should round-trip safely."""
key = "test:dict"
frame = pd.DataFrame(
{"volume": [100, 200]},
index=pd.to_datetime(["2024-01-03", "2024-01-04"]),
)
payload = {
"meta": {"status": "ok"},
"frame": frame,
"values": [1, 2, 3],
}
assert cache_module.save_to_cache(key, payload, ttl=60)
cached = cache_module.get_from_cache(key)
assert isinstance(cached, dict)
assert cached["meta"] == payload["meta"]
assert cached["values"] == payload["values"]
pdt.assert_frame_equal(cached["frame"], frame)
def test_unsupported_type_not_cached() -> None:
"""Unsupported data types should not be cached silently."""
class _Unsupported:
pass
key = "test:unsupported"
assert not cache_module.save_to_cache(key, _Unsupported(), ttl=60)
assert key not in cache_module._memory_cache
```
--------------------------------------------------------------------------------
/.github/workflows/claude-code-review.yml:
--------------------------------------------------------------------------------
```yaml
name: Claude Code Review
on:
pull_request:
types: [opened, synchronize]
# Optional: Only run on specific file changes
# paths:
# - "src/**/*.ts"
# - "src/**/*.tsx"
# - "src/**/*.js"
# - "src/**/*.jsx"
jobs:
claude-review:
# Optional: Filter by PR author
# if: |
# github.event.pull_request.user.login == 'external-contributor' ||
# github.event.pull_request.user.login == 'new-developer' ||
# github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR'
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
issues: read
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run Claude Code Review
id: claude-review
uses: anthropics/claude-code-action@v1
with:
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
prompt: |
REPO: ${{ github.repository }}
PR NUMBER: ${{ github.event.pull_request.number }}
Please review this pull request and provide feedback on:
- Code quality and best practices
- Potential bugs or issues
- Performance considerations
- Security concerns
- Test coverage
Use the repository's CLAUDE.md for guidance on style and conventions. Be constructive and helpful in your feedback.
Use `gh pr comment` with your Bash tool to leave your review as a comment on the PR.
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
# or https://docs.claude.com/en/docs/claude-code/cli-reference for available options
claude_args: '--allowed-tools "Bash(gh issue view:*),Bash(gh search:*),Bash(gh issue list:*),Bash(gh pr comment:*),Bash(gh pr diff:*),Bash(gh pr view:*),Bash(gh pr list:*)"'
```
--------------------------------------------------------------------------------
/tests/test_stock_analysis_dependencies.py:
--------------------------------------------------------------------------------
```python
"""
Tests for stock analysis service dependencies.
"""
from unittest.mock import Mock
from maverick_mcp.api.dependencies.stock_analysis import (
get_cache_management_service,
get_stock_analysis_service,
get_stock_data_fetching_service,
)
from maverick_mcp.domain.stock_analysis import StockAnalysisService
from maverick_mcp.infrastructure.caching import CacheManagementService
from maverick_mcp.infrastructure.data_fetching import StockDataFetchingService
class TestStockAnalysisDependencies:
"""Test cases for stock analysis service dependency injection."""
def test_get_stock_data_fetching_service(self):
"""Test stock data fetching service creation."""
service = get_stock_data_fetching_service()
# Assertions
assert isinstance(service, StockDataFetchingService)
assert service.timeout == 30
assert service.max_retries == 3
def test_get_cache_management_service(self):
"""Test cache management service creation."""
mock_session = Mock()
service = get_cache_management_service(db_session=mock_session)
# Assertions
assert isinstance(service, CacheManagementService)
assert service._db_session == mock_session
assert service.cache_days == 1
def test_get_stock_analysis_service(self):
"""Test stock analysis service creation with all dependencies."""
mock_data_fetching_service = Mock(spec=StockDataFetchingService)
mock_cache_service = Mock(spec=CacheManagementService)
mock_db_session = Mock()
service = get_stock_analysis_service(
data_fetching_service=mock_data_fetching_service,
cache_service=mock_cache_service,
db_session=mock_db_session,
)
# Assertions
assert isinstance(service, StockAnalysisService)
assert service.data_fetching_service == mock_data_fetching_service
assert service.cache_service == mock_cache_service
assert service.db_session == mock_db_session
```
--------------------------------------------------------------------------------
/tests/integration/vcr_setup.py:
--------------------------------------------------------------------------------
```python
"""
VCR.py setup for mocking external API calls.
"""
from pathlib import Path
import vcr
# Base directory for cassettes
CASSETTE_DIR = Path(__file__).parent.parent / "fixtures" / "vcr_cassettes"
CASSETTE_DIR.mkdir(parents=True, exist_ok=True)
def get_vcr_config():
"""Get default VCR configuration."""
return {
"cassette_library_dir": str(CASSETTE_DIR),
"record_mode": "once", # Record once, then replay
"match_on": ["method", "scheme", "host", "port", "path", "query"],
"filter_headers": [
"authorization",
"api-key",
"x-api-key",
"cookie",
"set-cookie",
],
"filter_query_parameters": ["apikey", "token", "key"],
"filter_post_data_parameters": ["api_key", "token", "password"],
"decode_compressed_response": True,
"allow_playback_repeats": True,
}
# Pre-configured VCR instance
configured_vcr = vcr.VCR(**get_vcr_config())
def use_cassette(cassette_name: str):
"""
Decorator to use a VCR cassette for a test.
Example:
@use_cassette("test_external_api.yaml")
async def test_something():
# Make external API calls here
pass
"""
return configured_vcr.use_cassette(cassette_name)
# Specific VCR configurations for different APIs
def yfinance_vcr():
"""VCR configuration specific to yfinance API."""
config = get_vcr_config()
config["match_on"] = ["method", "host", "path"] # Less strict for yfinance
config["filter_query_parameters"].extend(["period1", "period2", "interval"])
return vcr.VCR(**config)
def external_api_vcr():
"""VCR configuration specific to External API."""
config = get_vcr_config()
config["filter_headers"].append("x-rapidapi-key")
config["filter_headers"].append("x-rapidapi-host")
return vcr.VCR(**config)
def finviz_vcr():
"""VCR configuration specific to finvizfinance."""
config = get_vcr_config()
config["match_on"] = ["method", "host", "path", "query"]
return vcr.VCR(**config)
```
--------------------------------------------------------------------------------
/maverick_mcp/infrastructure/persistence/stock_repository.py:
--------------------------------------------------------------------------------
```python
"""
Stock repository implementation.
This is the infrastructure layer implementation that adapts
the existing StockDataProvider to the domain repository interface.
"""
import pandas as pd
from maverick_mcp.providers.stock_data import StockDataProvider
class StockDataProviderAdapter:
"""
Adapter that wraps the existing StockDataProvider for DDD architecture.
This adapter allows the existing StockDataProvider to work with
the new domain-driven architecture, maintaining backwards compatibility.
"""
def __init__(self, stock_provider: StockDataProvider | None = None):
"""
Initialize the repository.
Args:
stock_provider: Existing stock data provider (creates new if None)
"""
self.stock_provider = stock_provider or StockDataProvider()
def get_price_data(
self, symbol: str, start_date: str, end_date: str
) -> pd.DataFrame:
"""
Get historical price data for a stock.
This method adapts the existing StockDataProvider interface
to the domain repository interface.
Args:
symbol: Stock ticker symbol
start_date: Start date in YYYY-MM-DD format
end_date: End date in YYYY-MM-DD format
Returns:
DataFrame with price data (columns: open, high, low, close, volume)
"""
# Use existing provider, which handles caching and fallbacks
df = self.stock_provider.get_stock_data(symbol, start_date, end_date)
# Ensure column names are lowercase for consistency
df.columns = df.columns.str.lower()
return df
async def get_price_data_async(
self, symbol: str, start_date: str, end_date: str
) -> pd.DataFrame:
"""
Async version of get_price_data.
Currently wraps the sync version, but can be optimized
with true async implementation later.
"""
# For now, just call the sync version
# In future, this could use async database queries
return self.get_price_data(symbol, start_date, end_date)
```
--------------------------------------------------------------------------------
/scripts/setup_sp500_database.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
echo -e "${BLUE}🚀 MaverickMCP S&P 500 Database Setup${NC}"
echo "======================================"
# Check environment
echo -e "${YELLOW}📋 Environment Check:${NC}"
# Check for required environment variables
if [[ -z "${TIINGO_API_KEY}" ]]; then
echo -e " TIINGO_API_KEY: ${YELLOW}⚠️ Not set (optional for yfinance)${NC}"
else
echo -e " TIINGO_API_KEY: ${GREEN}✅ Set${NC}"
fi
# Show database URL
DATABASE_URL=${DATABASE_URL:-"sqlite:///maverick.db"}
echo " DATABASE_URL: $DATABASE_URL"
# Clear existing database for fresh S&P 500 start
if [[ "$DATABASE_URL" == "sqlite:///"* ]]; then
DB_FILE=$(echo $DATABASE_URL | sed 's/sqlite:\/\/\///g')
if [[ -f "$DB_FILE" ]]; then
echo -e "${YELLOW}🗑️ Removing existing database for fresh S&P 500 setup...${NC}"
rm "$DB_FILE"
fi
fi
# Run database migration
echo -e "${BLUE}1️⃣ Running database migration...${NC}"
echo "--------------------------------"
if uv run python scripts/migrate_db.py; then
echo -e "${GREEN}✅ Migration completed successfully${NC}"
else
echo -e "${RED}❌ Migration failed${NC}"
exit 1
fi
# Run S&P 500 seeding
echo -e "${BLUE}2️⃣ Running S&P 500 database seeding...${NC}"
echo "-------------------------------------"
if uv run python scripts/seed_sp500.py; then
echo -e "${GREEN}✅ S&P 500 seeding completed successfully${NC}"
else
echo -e "${RED}❌ S&P 500 seeding failed${NC}"
exit 1
fi
echo ""
echo -e "${GREEN}🎉 S&P 500 database setup completed successfully!${NC}"
echo ""
echo -e "${BLUE}Next steps:${NC}"
echo "1. Run the MCP server: ${YELLOW}make dev${NC}"
echo "2. Connect with Claude Desktop using mcp-remote"
echo "3. Test with: ${YELLOW}'Show me top S&P 500 momentum stocks'${NC}"
echo ""
echo -e "${BLUE}Available S&P 500 screening tools:${NC}"
echo "- get_maverick_recommendations (bullish momentum stocks)"
echo "- get_maverick_bear_recommendations (bearish setups)"
echo "- get_trending_breakout_recommendations (supply/demand breakouts)"
```
--------------------------------------------------------------------------------
/tools/fast_dev.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
#
# Fast Development Startup Script
# Skips all checks and uses in-memory database for < 3 second startup
#
set -e
# Colors for output
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
NC='\033[0m'
echo -e "${CYAN}⚡ Fast Dev Mode - Skipping all checks for speed${NC}"
# Set ultra-fast environment
export AUTH_ENABLED=false
export DATABASE_URL="sqlite:///:memory:"
export REDIS_HOST="none" # Skip Redis
export SKIP_VALIDATION=true
export SKIP_MIGRATIONS=true
export LOG_LEVEL=WARNING # Reduce log noise
export STARTUP_MODE=fast
# Change to project root
cd "$(dirname "$0")/.."
# Create minimal .env if not exists
if [ ! -f .env ]; then
cat > .env << EOF
AUTH_ENABLED=false
DATABASE_URL=sqlite:///:memory:
REDIS_HOST=none
SKIP_VALIDATION=true
LOG_LEVEL=WARNING
EOF
echo -e "${YELLOW}Created minimal .env for fast mode${NC}"
fi
# Start time tracking
START_TIME=$(date +%s)
# Launch server directly without checks
echo -e "${GREEN}Starting server in fast mode...${NC}"
# Create a minimal launcher that skips all initialization
python -c "
import os
os.environ['STARTUP_MODE'] = 'fast'
os.environ['AUTH_ENABLED'] = 'false'
os.environ['DATABASE_URL'] = 'sqlite:///:memory:'
os.environ['SKIP_VALIDATION'] = 'true'
# Minimal imports only
import asyncio
import uvicorn
from fastmcp import FastMCP
# Create minimal server
mcp = FastMCP(
name='MaverickMCP-Fast',
debug=True,
log_level='WARNING'
)
# Add one test tool to verify it's working
@mcp.tool()
async def test_fast_mode():
return {'status': 'Fast mode active!', 'startup_time': '< 3 seconds'}
# Direct startup without any checks
if __name__ == '__main__':
print('🚀 Server starting on http://localhost:8000')
mcp.run(transport='sse', port=8000, host='0.0.0.0')
" &
SERVER_PID=$!
# Calculate startup time
END_TIME=$(date +%s)
STARTUP_TIME=$((END_TIME - START_TIME))
echo -e "${GREEN}✨ Server started in ${STARTUP_TIME} seconds!${NC}"
echo -e "${CYAN}Access at: http://localhost:8000/sse${NC}"
echo -e "${YELLOW}Note: This is a minimal server - add your tools to test${NC}"
echo -e "\nPress Ctrl+C to stop"
# Wait for server
wait $SERVER_PID
```
--------------------------------------------------------------------------------
/scripts/setup_database.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
"""
Complete database setup script for MaverickMCP.
This script runs the migration and seeding process to set up a complete
working database for the MaverickMCP application.
"""
set -e # Exit on any error
echo "🚀 MaverickMCP Database Setup"
echo "=============================="
# Change to project root directory
cd "$(dirname "$0")/.."
# Check if virtual environment is activated
if [[ -z "${VIRTUAL_ENV}" ]] && [[ ! -d ".venv" ]]; then
echo "⚠️ Warning: No virtual environment detected"
echo " Consider running: python -m venv .venv && source .venv/bin/activate"
echo ""
fi
# Check for required environment variables
if [[ -z "${TIINGO_API_KEY}" ]]; then
echo "❌ TIINGO_API_KEY environment variable is required!"
echo ""
echo "To get started:"
echo "1. Sign up for a free account at https://tiingo.com"
echo "2. Get your API key from the dashboard"
echo "3. Add it to your .env file: TIINGO_API_KEY=your_api_key_here"
echo "4. Or export it: export TIINGO_API_KEY=your_api_key_here"
echo ""
exit 1
fi
echo "📋 Environment Check:"
echo " TIINGO_API_KEY: ✅ Set"
if [[ -n "${DATABASE_URL}" ]]; then
echo " DATABASE_URL: ${DATABASE_URL}"
else
echo " DATABASE_URL: sqlite:///./maverick_mcp.db (default)"
fi
echo ""
echo "1️⃣ Running database migration..."
echo "--------------------------------"
python scripts/migrate_db.py
if [ $? -eq 0 ]; then
echo "✅ Migration completed successfully"
else
echo "❌ Migration failed"
exit 1
fi
echo ""
echo "2️⃣ Running database seeding..."
echo "------------------------------"
python scripts/seed_db.py
if [ $? -eq 0 ]; then
echo "✅ Seeding completed successfully"
else
echo "❌ Seeding failed"
exit 1
fi
echo ""
echo "🎉 Database setup completed successfully!"
echo ""
echo "Next steps:"
echo "1. Run the MCP server: make dev"
echo "2. Connect with Claude Desktop using mcp-remote"
echo "3. Test with: 'Show me technical analysis for AAPL'"
echo ""
echo "Available screening tools:"
echo "- get_maverick_recommendations (bullish momentum stocks)"
echo "- get_maverick_bear_recommendations (bearish setups)"
echo "- get_trending_breakout_recommendations (breakout candidates)"
```
--------------------------------------------------------------------------------
/maverick_mcp/api/simple_sse.py:
--------------------------------------------------------------------------------
```python
"""
Simple SSE implementation for MCP Inspector compatibility.
This implements a direct SSE handler that works with MCP Inspector's expectations.
"""
import asyncio
import logging
from uuid import uuid4
from mcp import types
from mcp.server.session import ServerSession
from starlette.requests import Request
from starlette.responses import StreamingResponse
logger = logging.getLogger(__name__)
class SimpleSSEHandler:
"""Simple SSE handler for MCP Inspector."""
def __init__(self):
self.sessions: dict[str, ServerSession] = {}
async def handle_sse(self, request: Request):
"""Handle SSE connection with bidirectional JSON-RPC over SSE."""
session_id = str(uuid4())
logger.info(f"New Simple SSE connection: {session_id}")
# Create MCP session
session = ServerSession(
create_initialization_options=lambda: types.InitializationOptions(
server_name="MaverickMCP", server_version="1.0.0"
)
)
self.sessions[session_id] = session
async def event_generator():
"""Generate SSE events."""
try:
# Just keep the connection alive - Inspector will send messages via POST
while True:
# Send keepalive every 30 seconds
await asyncio.sleep(30)
yield ": keepalive\n\n"
finally:
# Cleanup on disconnect
if session_id in self.sessions:
del self.sessions[session_id]
logger.info(f"Simple SSE connection closed: {session_id}")
# Return SSE response with proper headers
return StreamingResponse(
event_generator(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET, POST, OPTIONS",
"Access-Control-Allow-Headers": "*",
"Access-Control-Allow-Credentials": "true",
},
)
# Create global handler instance
simple_sse = SimpleSSEHandler()
```
--------------------------------------------------------------------------------
/maverick_mcp/backtesting/batch_processing_stub.py:
--------------------------------------------------------------------------------
```python
"""
Lightweight batch processing stub for import compatibility.
This module provides basic batch processing method stubs that can be imported
even when heavy dependencies like VectorBT, NumPy, etc. are not available.
"""
import logging
from typing import Any
logger = logging.getLogger(__name__)
class BatchProcessingStub:
"""Lightweight batch processing stub class."""
async def run_batch_backtest(
self,
batch_configs: list[dict[str, Any]],
max_workers: int = 6,
chunk_size: int = 10,
validate_data: bool = True,
fail_fast: bool = False,
) -> dict[str, Any]:
"""Stub for run_batch_backtest method."""
raise ImportError("Batch processing requires VectorBT and other dependencies")
async def batch_optimize_parameters(
self,
optimization_configs: list[dict[str, Any]],
max_workers: int = 4,
optimization_method: str = "grid_search",
max_iterations: int = 100,
) -> dict[str, Any]:
"""Stub for batch_optimize_parameters method."""
raise ImportError("Batch processing requires VectorBT and other dependencies")
async def batch_validate_strategies(
self,
validation_configs: list[dict[str, Any]],
validation_start_date: str,
validation_end_date: str,
max_workers: int = 6,
) -> dict[str, Any]:
"""Stub for batch_validate_strategies method."""
raise ImportError("Batch processing requires VectorBT and other dependencies")
async def get_batch_results(
self, batch_id: str, include_detailed_results: bool = False
) -> dict[str, Any] | None:
"""Stub for get_batch_results method."""
raise ImportError("Batch processing requires VectorBT and other dependencies")
# Alias method for backward compatibility
async def batch_optimize(self, *args, **kwargs):
"""Alias for batch_optimize_parameters for backward compatibility."""
return await self.batch_optimize_parameters(*args, **kwargs)
class VectorBTEngineStub(BatchProcessingStub):
"""Stub VectorBT engine that provides batch processing methods."""
def __init__(self, *args, **kwargs):
"""Initialize stub engine."""
logger.warning(
"VectorBT dependencies not available - using stub implementation"
)
def __getattr__(self, name):
"""Provide stubs for any missing methods."""
if name.startswith("batch") or name in ["run_backtest", "optimize_strategy"]:
async def stub_method(*args, **kwargs):
raise ImportError(
f"Method {name} requires VectorBT and other dependencies"
)
return stub_method
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{name}'"
)
```
--------------------------------------------------------------------------------
/maverick_mcp/api/services/base_service.py:
--------------------------------------------------------------------------------
```python
"""
Base service class for MaverickMCP API services.
Provides common functionality and dependency injection patterns
for all service classes.
"""
from abc import ABC, abstractmethod
from typing import Any
from fastmcp import FastMCP
from sqlalchemy.ext.asyncio import AsyncSession
# Auth imports removed in personal use version
# from maverick_mcp.auth.jwt_enhanced import EnhancedJWTManager
# from maverick_mcp.auth.key_manager_jwt import KeyManager
from maverick_mcp.config.settings import settings
from maverick_mcp.utils.logging import get_logger
class BaseService(ABC):
"""
Base service class providing common functionality for all services.
This class implements dependency injection patterns and provides
shared utilities that all services need.
"""
def __init__(
self,
mcp: FastMCP,
db_session_factory: Any = None,
):
"""
Initialize base service with dependencies.
Args:
mcp: FastMCP instance for tool/resource registration
db_session_factory: Optional async database session factory
"""
self.mcp = mcp
self.db_session_factory = db_session_factory
self.logger = get_logger(
f"maverick_mcp.services.{self.__class__.__name__.lower()}"
)
@property
def settings(self):
"""Get application settings."""
return settings
async def get_db_session(self) -> AsyncSession:
"""
Get async database session.
Returns:
AsyncSession instance
Raises:
RuntimeError: If database session factory not available
"""
if not self.db_session_factory:
raise RuntimeError("Database session factory not configured")
return self.db_session_factory()
def is_auth_enabled(self) -> bool:
"""Check if authentication is enabled."""
return False # Auth disabled in personal use version
def is_debug_mode(self) -> bool:
"""Check if debug mode is enabled."""
return settings.api.debug
def log_tool_usage(self, tool_name: str, user_id: int | None = None, **kwargs):
"""
Log tool usage for monitoring purposes.
Args:
tool_name: Name of the tool being used
user_id: Optional user ID if authenticated
**kwargs: Additional context for logging
"""
context = {
"tool_name": tool_name,
"user_id": user_id,
"auth_enabled": self.is_auth_enabled(),
**kwargs,
}
self.logger.info(f"Tool usage: {tool_name}", extra=context)
@abstractmethod
def register_tools(self):
"""
Register service tools with the MCP instance.
This method should be implemented by subclasses to register
their specific tools and resources.
"""
pass
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
```markdown
---
name: Bug Report
about: Create a report to help us improve MaverickMCP
title: '[BUG] '
labels: ['bug', 'needs-triage']
assignees: ''
---
## 🐛 Bug Description
A clear and concise description of what the bug is.
## 💰 Financial Disclaimer Acknowledgment
- [ ] I understand this is educational software and not financial advice
- [ ] I am not expecting investment recommendations or guaranteed returns
- [ ] This bug report is about technical functionality, not financial performance
## 📋 Reproduction Steps
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
## 🎯 Expected Behavior
A clear and concise description of what you expected to happen.
## 📸 Screenshots
If applicable, add screenshots to help explain your problem.
## 💻 Environment Information
**Desktop/Server:**
- OS: [e.g. macOS, Ubuntu, Windows]
- Python Version: [e.g. 3.12.0]
- MaverickMCP Version: [e.g. 0.1.0]
- Installation Method: [e.g. pip, uv, git clone]
**Claude Desktop (if applicable):**
- Claude Desktop Version: [e.g. 1.0.0]
- mcp-remote Version: [if using Claude Desktop]
**Dependencies:**
- FastMCP Version: [e.g. 2.7.0]
- FastAPI Version: [e.g. 0.115.0]
- Database: [SQLite, PostgreSQL]
- Redis: [Yes/No, version if yes]
## 📋 Configuration
**Environment Variables (remove sensitive data):**
```
TIINGO_API_KEY=***
DATABASE_URL=***
REDIS_HOST=***
# ... other relevant config
```
**Relevant .env settings:**
```
LOG_LEVEL=DEBUG
CACHE_ENABLED=true
# ... other settings
```
## 📊 Error Messages/Logs
**Error message:**
```
Paste the full error message here
```
**Server logs (if available):**
```
Paste relevant server logs here (remove API keys)
```
**Console/Terminal output:**
```
Paste terminal output here
```
## 🔧 Additional Context
- Are you using any specific financial data providers?
- What stock symbols were you analyzing when this occurred?
- Any specific time ranges or parameters involved?
- Any custom configuration or modifications?
## ✅ Pre-submission Checklist
- [ ] I have searched existing issues to avoid duplicates
- [ ] I have removed all sensitive data (API keys, personal info)
- [ ] I can reproduce this bug consistently
- [ ] I have included relevant error messages and logs
- [ ] I understand this is educational software with no financial guarantees
## 🏷️ Bug Classification
**Severity:**
- [ ] Critical (crashes, data loss)
- [ ] High (major feature broken)
- [ ] Medium (feature partially working)
- [ ] Low (minor issue, workaround available)
**Component:**
- [ ] Data fetching (Tiingo, Yahoo Finance)
- [ ] Technical analysis calculations
- [ ] Stock screening
- [ ] Database operations
- [ ] Caching (Redis)
- [ ] MCP server/tools
- [ ] Claude Desktop integration
- [ ] Installation/Setup
**Additional Labels:**
- [ ] documentation (if docs need updating)
- [ ] good first issue (if suitable for newcomers)
- [ ] help wanted (if community help is needed)
```
--------------------------------------------------------------------------------
/maverick_mcp/providers/llm_factory.py:
--------------------------------------------------------------------------------
```python
"""LLM factory for creating language model instances.
This module provides a factory function to create LLM instances with intelligent model selection.
"""
import logging
import os
from typing import Any
from langchain_community.llms import FakeListLLM
from maverick_mcp.providers.openrouter_provider import (
TaskType,
get_openrouter_llm,
)
logger = logging.getLogger(__name__)
def get_llm(
task_type: TaskType = TaskType.GENERAL,
prefer_fast: bool = False,
prefer_cheap: bool = True, # Default to cost-effective
prefer_quality: bool = False,
model_override: str | None = None,
) -> Any:
"""Create and return an LLM instance with intelligent model selection.
Args:
task_type: Type of task to optimize model selection for
prefer_fast: Prioritize speed over quality
prefer_cheap: Prioritize cost over quality (default True)
prefer_quality: Use premium models regardless of cost
model_override: Override automatic model selection
Returns:
An LLM instance optimized for the task.
Priority order:
1. OpenRouter API if OPENROUTER_API_KEY is available (with smart model selection)
2. OpenAI ChatOpenAI if OPENAI_API_KEY is available (fallback)
3. Anthropic ChatAnthropic if ANTHROPIC_API_KEY is available (fallback)
4. FakeListLLM as fallback for testing
"""
# Check for OpenRouter first (preferred)
openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
if openrouter_api_key:
logger.info(
f"Using OpenRouter with intelligent model selection for task: {task_type}"
)
return get_openrouter_llm(
api_key=openrouter_api_key,
task_type=task_type,
prefer_fast=prefer_fast,
prefer_cheap=prefer_cheap,
prefer_quality=prefer_quality,
model_override=model_override,
)
# Fallback to OpenAI
openai_api_key = os.getenv("OPENAI_API_KEY")
if openai_api_key:
logger.info("Falling back to OpenAI API")
try:
from langchain_openai import ChatOpenAI
return ChatOpenAI(model="gpt-4o-mini", temperature=0.3, streaming=False)
except ImportError:
pass
# Fallback to Anthropic
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
if anthropic_api_key:
logger.info("Falling back to Anthropic API")
try:
from langchain_anthropic import ChatAnthropic
return ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0.3)
except ImportError:
pass
# Final fallback to fake LLM for testing
logger.warning("No LLM API keys found - using FakeListLLM for testing")
return FakeListLLM(
responses=[
"Mock analysis response for testing purposes.",
"This is a simulated LLM response.",
"Market analysis: Moderate bullish sentiment detected.",
]
)
```