#
tokens: 25669/50000 1/435 files (page 38/39)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 38 of 39. Use http://codebase.md/wshobson/maverick-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.example
├── .github
│   ├── dependabot.yml
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── feature_request.md
│   │   ├── question.md
│   │   └── security_report.md
│   ├── pull_request_template.md
│   └── workflows
│       ├── claude-code-review.yml
│       └── claude.yml
├── .gitignore
├── .python-version
├── .vscode
│   ├── launch.json
│   └── settings.json
├── alembic
│   ├── env.py
│   ├── script.py.mako
│   └── versions
│       ├── 001_initial_schema.py
│       ├── 003_add_performance_indexes.py
│       ├── 006_rename_metadata_columns.py
│       ├── 008_performance_optimization_indexes.py
│       ├── 009_rename_to_supply_demand.py
│       ├── 010_self_contained_schema.py
│       ├── 011_remove_proprietary_terms.py
│       ├── 013_add_backtest_persistence_models.py
│       ├── 014_add_portfolio_models.py
│       ├── 08e3945a0c93_merge_heads.py
│       ├── 9374a5c9b679_merge_heads_for_testing.py
│       ├── abf9b9afb134_merge_multiple_heads.py
│       ├── adda6d3fd84b_merge_proprietary_terms_removal_with_.py
│       ├── e0c75b0bdadb_fix_financial_data_precision_only.py
│       ├── f0696e2cac15_add_essential_performance_indexes.py
│       └── fix_database_integrity_issues.py
├── alembic.ini
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── DATABASE_SETUP.md
├── docker-compose.override.yml.example
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── api
│   │   └── backtesting.md
│   ├── BACKTESTING.md
│   ├── COST_BASIS_SPECIFICATION.md
│   ├── deep_research_agent.md
│   ├── exa_research_testing_strategy.md
│   ├── PORTFOLIO_PERSONALIZATION_PLAN.md
│   ├── PORTFOLIO.md
│   ├── SETUP_SELF_CONTAINED.md
│   └── speed_testing_framework.md
├── examples
│   ├── complete_speed_validation.py
│   ├── deep_research_integration.py
│   ├── llm_optimization_example.py
│   ├── llm_speed_demo.py
│   ├── monitoring_example.py
│   ├── parallel_research_example.py
│   ├── speed_optimization_demo.py
│   └── timeout_fix_demonstration.py
├── LICENSE
├── Makefile
├── MANIFEST.in
├── maverick_mcp
│   ├── __init__.py
│   ├── agents
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── circuit_breaker.py
│   │   ├── deep_research.py
│   │   ├── market_analysis.py
│   │   ├── optimized_research.py
│   │   ├── supervisor.py
│   │   └── technical_analysis.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── api_server.py
│   │   ├── connection_manager.py
│   │   ├── dependencies
│   │   │   ├── __init__.py
│   │   │   ├── stock_analysis.py
│   │   │   └── technical_analysis.py
│   │   ├── error_handling.py
│   │   ├── inspector_compatible_sse.py
│   │   ├── inspector_sse.py
│   │   ├── middleware
│   │   │   ├── error_handling.py
│   │   │   ├── mcp_logging.py
│   │   │   ├── rate_limiting_enhanced.py
│   │   │   └── security.py
│   │   ├── openapi_config.py
│   │   ├── routers
│   │   │   ├── __init__.py
│   │   │   ├── agents.py
│   │   │   ├── backtesting.py
│   │   │   ├── data_enhanced.py
│   │   │   ├── data.py
│   │   │   ├── health_enhanced.py
│   │   │   ├── health_tools.py
│   │   │   ├── health.py
│   │   │   ├── intelligent_backtesting.py
│   │   │   ├── introspection.py
│   │   │   ├── mcp_prompts.py
│   │   │   ├── monitoring.py
│   │   │   ├── news_sentiment_enhanced.py
│   │   │   ├── performance.py
│   │   │   ├── portfolio.py
│   │   │   ├── research.py
│   │   │   ├── screening_ddd.py
│   │   │   ├── screening_parallel.py
│   │   │   ├── screening.py
│   │   │   ├── technical_ddd.py
│   │   │   ├── technical_enhanced.py
│   │   │   ├── technical.py
│   │   │   └── tool_registry.py
│   │   ├── server.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   ├── base_service.py
│   │   │   ├── market_service.py
│   │   │   ├── portfolio_service.py
│   │   │   ├── prompt_service.py
│   │   │   └── resource_service.py
│   │   ├── simple_sse.py
│   │   └── utils
│   │       ├── __init__.py
│   │       ├── insomnia_export.py
│   │       └── postman_export.py
│   ├── application
│   │   ├── __init__.py
│   │   ├── commands
│   │   │   └── __init__.py
│   │   ├── dto
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_dto.py
│   │   ├── queries
│   │   │   ├── __init__.py
│   │   │   └── get_technical_analysis.py
│   │   └── screening
│   │       ├── __init__.py
│   │       ├── dtos.py
│   │       └── queries.py
│   ├── backtesting
│   │   ├── __init__.py
│   │   ├── ab_testing.py
│   │   ├── analysis.py
│   │   ├── batch_processing_stub.py
│   │   ├── batch_processing.py
│   │   ├── model_manager.py
│   │   ├── optimization.py
│   │   ├── persistence.py
│   │   ├── retraining_pipeline.py
│   │   ├── strategies
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   ├── ml
│   │   │   │   ├── __init__.py
│   │   │   │   ├── adaptive.py
│   │   │   │   ├── ensemble.py
│   │   │   │   ├── feature_engineering.py
│   │   │   │   └── regime_aware.py
│   │   │   ├── ml_strategies.py
│   │   │   ├── parser.py
│   │   │   └── templates.py
│   │   ├── strategy_executor.py
│   │   ├── vectorbt_engine.py
│   │   └── visualization.py
│   ├── config
│   │   ├── __init__.py
│   │   ├── constants.py
│   │   ├── database_self_contained.py
│   │   ├── database.py
│   │   ├── llm_optimization_config.py
│   │   ├── logging_settings.py
│   │   ├── plotly_config.py
│   │   ├── security_utils.py
│   │   ├── security.py
│   │   ├── settings.py
│   │   ├── technical_constants.py
│   │   ├── tool_estimation.py
│   │   └── validation.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── technical_analysis.py
│   │   └── visualization.py
│   ├── data
│   │   ├── __init__.py
│   │   ├── cache_manager.py
│   │   ├── cache.py
│   │   ├── django_adapter.py
│   │   ├── health.py
│   │   ├── models.py
│   │   ├── performance.py
│   │   ├── session_management.py
│   │   └── validation.py
│   ├── database
│   │   ├── __init__.py
│   │   ├── base.py
│   │   └── optimization.py
│   ├── dependencies.py
│   ├── domain
│   │   ├── __init__.py
│   │   ├── entities
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis.py
│   │   ├── events
│   │   │   └── __init__.py
│   │   ├── portfolio.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   ├── entities.py
│   │   │   ├── services.py
│   │   │   └── value_objects.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_service.py
│   │   ├── stock_analysis
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis_service.py
│   │   └── value_objects
│   │       ├── __init__.py
│   │       └── technical_indicators.py
│   ├── exceptions.py
│   ├── infrastructure
│   │   ├── __init__.py
│   │   ├── cache
│   │   │   └── __init__.py
│   │   ├── caching
│   │   │   ├── __init__.py
│   │   │   └── cache_management_service.py
│   │   ├── connection_manager.py
│   │   ├── data_fetching
│   │   │   ├── __init__.py
│   │   │   └── stock_data_service.py
│   │   ├── health
│   │   │   ├── __init__.py
│   │   │   └── health_checker.py
│   │   ├── persistence
│   │   │   ├── __init__.py
│   │   │   └── stock_repository.py
│   │   ├── providers
│   │   │   └── __init__.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   └── repositories.py
│   │   └── sse_optimizer.py
│   ├── langchain_tools
│   │   ├── __init__.py
│   │   ├── adapters.py
│   │   └── registry.py
│   ├── logging_config.py
│   ├── memory
│   │   ├── __init__.py
│   │   └── stores.py
│   ├── monitoring
│   │   ├── __init__.py
│   │   ├── health_check.py
│   │   ├── health_monitor.py
│   │   ├── integration_example.py
│   │   ├── metrics.py
│   │   ├── middleware.py
│   │   └── status_dashboard.py
│   ├── providers
│   │   ├── __init__.py
│   │   ├── dependencies.py
│   │   ├── factories
│   │   │   ├── __init__.py
│   │   │   ├── config_factory.py
│   │   │   └── provider_factory.py
│   │   ├── implementations
│   │   │   ├── __init__.py
│   │   │   ├── cache_adapter.py
│   │   │   ├── macro_data_adapter.py
│   │   │   ├── market_data_adapter.py
│   │   │   ├── persistence_adapter.py
│   │   │   └── stock_data_adapter.py
│   │   ├── interfaces
│   │   │   ├── __init__.py
│   │   │   ├── cache.py
│   │   │   ├── config.py
│   │   │   ├── macro_data.py
│   │   │   ├── market_data.py
│   │   │   ├── persistence.py
│   │   │   └── stock_data.py
│   │   ├── llm_factory.py
│   │   ├── macro_data.py
│   │   ├── market_data.py
│   │   ├── mocks
│   │   │   ├── __init__.py
│   │   │   ├── mock_cache.py
│   │   │   ├── mock_config.py
│   │   │   ├── mock_macro_data.py
│   │   │   ├── mock_market_data.py
│   │   │   ├── mock_persistence.py
│   │   │   └── mock_stock_data.py
│   │   ├── openrouter_provider.py
│   │   ├── optimized_screening.py
│   │   ├── optimized_stock_data.py
│   │   └── stock_data.py
│   ├── README.md
│   ├── tests
│   │   ├── __init__.py
│   │   ├── README_INMEMORY_TESTS.md
│   │   ├── test_cache_debug.py
│   │   ├── test_fixes_validation.py
│   │   ├── test_in_memory_routers.py
│   │   ├── test_in_memory_server.py
│   │   ├── test_macro_data_provider.py
│   │   ├── test_mailgun_email.py
│   │   ├── test_market_calendar_caching.py
│   │   ├── test_mcp_tool_fixes_pytest.py
│   │   ├── test_mcp_tool_fixes.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_models_functional.py
│   │   ├── test_server.py
│   │   ├── test_stock_data_enhanced.py
│   │   ├── test_stock_data_provider.py
│   │   └── test_technical_analysis.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── performance_monitoring.py
│   │   ├── portfolio_manager.py
│   │   ├── risk_management.py
│   │   └── sentiment_analysis.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── agent_errors.py
│   │   ├── batch_processing.py
│   │   ├── cache_warmer.py
│   │   ├── circuit_breaker_decorators.py
│   │   ├── circuit_breaker_services.py
│   │   ├── circuit_breaker.py
│   │   ├── data_chunking.py
│   │   ├── database_monitoring.py
│   │   ├── debug_utils.py
│   │   ├── fallback_strategies.py
│   │   ├── llm_optimization.py
│   │   ├── logging_example.py
│   │   ├── logging_init.py
│   │   ├── logging.py
│   │   ├── mcp_logging.py
│   │   ├── memory_profiler.py
│   │   ├── monitoring_middleware.py
│   │   ├── monitoring.py
│   │   ├── orchestration_logging.py
│   │   ├── parallel_research.py
│   │   ├── parallel_screening.py
│   │   ├── quick_cache.py
│   │   ├── resource_manager.py
│   │   ├── shutdown.py
│   │   ├── stock_helpers.py
│   │   ├── structured_logger.py
│   │   ├── tool_monitoring.py
│   │   ├── tracing.py
│   │   └── yfinance_pool.py
│   ├── validation
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── data.py
│   │   ├── middleware.py
│   │   ├── portfolio.py
│   │   ├── responses.py
│   │   ├── screening.py
│   │   └── technical.py
│   └── workflows
│       ├── __init__.py
│       ├── agents
│       │   ├── __init__.py
│       │   ├── market_analyzer.py
│       │   ├── optimizer_agent.py
│       │   ├── strategy_selector.py
│       │   └── validator_agent.py
│       ├── backtesting_workflow.py
│       └── state.py
├── PLANS.md
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│   ├── dev.sh
│   ├── INSTALLATION_GUIDE.md
│   ├── load_example.py
│   ├── load_market_data.py
│   ├── load_tiingo_data.py
│   ├── migrate_db.py
│   ├── README_TIINGO_LOADER.md
│   ├── requirements_tiingo.txt
│   ├── run_stock_screening.py
│   ├── run-migrations.sh
│   ├── seed_db.py
│   ├── seed_sp500.py
│   ├── setup_database.sh
│   ├── setup_self_contained.py
│   ├── setup_sp500_database.sh
│   ├── test_seeded_data.py
│   ├── test_tiingo_loader.py
│   ├── tiingo_config.py
│   └── validate_setup.py
├── SECURITY.md
├── server.json
├── setup.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── core
│   │   └── test_technical_analysis.py
│   ├── data
│   │   └── test_portfolio_models.py
│   ├── domain
│   │   ├── conftest.py
│   │   ├── test_portfolio_entities.py
│   │   └── test_technical_analysis_service.py
│   ├── fixtures
│   │   └── orchestration_fixtures.py
│   ├── integration
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── README.md
│   │   ├── run_integration_tests.sh
│   │   ├── test_api_technical.py
│   │   ├── test_chaos_engineering.py
│   │   ├── test_config_management.py
│   │   ├── test_full_backtest_workflow_advanced.py
│   │   ├── test_full_backtest_workflow.py
│   │   ├── test_high_volume.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_orchestration_complete.py
│   │   ├── test_portfolio_persistence.py
│   │   ├── test_redis_cache.py
│   │   ├── test_security_integration.py.disabled
│   │   └── vcr_setup.py
│   ├── performance
│   │   ├── __init__.py
│   │   ├── test_benchmarks.py
│   │   ├── test_load.py
│   │   ├── test_profiling.py
│   │   └── test_stress.py
│   ├── providers
│   │   └── test_stock_data_simple.py
│   ├── README.md
│   ├── test_agents_router_mcp.py
│   ├── test_backtest_persistence.py
│   ├── test_cache_management_service.py
│   ├── test_cache_serialization.py
│   ├── test_circuit_breaker.py
│   ├── test_database_pool_config_simple.py
│   ├── test_database_pool_config.py
│   ├── test_deep_research_functional.py
│   ├── test_deep_research_integration.py
│   ├── test_deep_research_parallel_execution.py
│   ├── test_error_handling.py
│   ├── test_event_loop_integrity.py
│   ├── test_exa_research_integration.py
│   ├── test_exception_hierarchy.py
│   ├── test_financial_search.py
│   ├── test_graceful_shutdown.py
│   ├── test_integration_simple.py
│   ├── test_langgraph_workflow.py
│   ├── test_market_data_async.py
│   ├── test_market_data_simple.py
│   ├── test_mcp_orchestration_functional.py
│   ├── test_ml_strategies.py
│   ├── test_optimized_research_agent.py
│   ├── test_orchestration_integration.py
│   ├── test_orchestration_logging.py
│   ├── test_orchestration_tools_simple.py
│   ├── test_parallel_research_integration.py
│   ├── test_parallel_research_orchestrator.py
│   ├── test_parallel_research_performance.py
│   ├── test_performance_optimizations.py
│   ├── test_production_validation.py
│   ├── test_provider_architecture.py
│   ├── test_rate_limiting_enhanced.py
│   ├── test_runner_validation.py
│   ├── test_security_comprehensive.py.disabled
│   ├── test_security_cors.py
│   ├── test_security_enhancements.py.disabled
│   ├── test_security_headers.py
│   ├── test_security_penetration.py
│   ├── test_session_management.py
│   ├── test_speed_optimization_validation.py
│   ├── test_stock_analysis_dependencies.py
│   ├── test_stock_analysis_service.py
│   ├── test_stock_data_fetching_service.py
│   ├── test_supervisor_agent.py
│   ├── test_supervisor_functional.py
│   ├── test_tool_estimation_config.py
│   ├── test_visualization.py
│   └── utils
│       ├── test_agent_errors.py
│       ├── test_logging.py
│       ├── test_parallel_screening.py
│       └── test_quick_cache.py
├── tools
│   ├── check_orchestration_config.py
│   ├── experiments
│   │   ├── validation_examples.py
│   │   └── validation_fixed.py
│   ├── fast_dev.sh
│   ├── hot_reload.py
│   ├── quick_test.py
│   └── templates
│       ├── new_router_template.py
│       ├── new_tool_template.py
│       ├── screening_strategy_template.py
│       └── test_template.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/maverick_mcp/data/models.py:
--------------------------------------------------------------------------------

```python
   1 | """
   2 | SQLAlchemy models for MaverickMCP.
   3 | 
   4 | This module defines database models for financial data storage and analysis,
   5 | including PriceCache and Maverick screening models.
   6 | """
   7 | 
   8 | from __future__ import annotations
   9 | 
  10 | import logging
  11 | import os
  12 | import threading
  13 | import uuid
  14 | from collections.abc import AsyncGenerator, Sequence
  15 | from datetime import UTC, date, datetime, timedelta
  16 | from decimal import Decimal
  17 | 
  18 | import pandas as pd
  19 | from sqlalchemy import (
  20 |     JSON,
  21 |     BigInteger,
  22 |     Boolean,
  23 |     Column,
  24 |     Date,
  25 |     DateTime,
  26 |     ForeignKey,
  27 |     Index,
  28 |     Integer,
  29 |     Numeric,
  30 |     String,
  31 |     Text,
  32 |     UniqueConstraint,
  33 |     Uuid,
  34 |     create_engine,
  35 |     inspect,
  36 | )
  37 | from sqlalchemy.exc import SQLAlchemyError
  38 | from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
  39 | from sqlalchemy.orm import Session, relationship, sessionmaker
  40 | from sqlalchemy.pool import NullPool, QueuePool
  41 | 
  42 | from maverick_mcp.config.settings import get_settings
  43 | from maverick_mcp.database.base import Base
  44 | 
  45 | # Set up logging
  46 | logger = logging.getLogger("maverick_mcp.data.models")
  47 | settings = get_settings()
  48 | 
  49 | 
  50 | # Helper function to get the right integer type for autoincrement primary keys
  51 | def get_primary_key_type():
  52 |     """Get the appropriate primary key type based on database backend."""
  53 |     # SQLite works better with INTEGER for autoincrement, PostgreSQL can use BIGINT
  54 |     if "sqlite" in DATABASE_URL:
  55 |         return Integer
  56 |     else:
  57 |         return BigInteger
  58 | 
  59 | 
  60 | # Database connection setup
  61 | # Try multiple possible environment variable names
  62 | # Use SQLite in-memory for GitHub Actions or test environments
  63 | if os.getenv("GITHUB_ACTIONS") == "true" or os.getenv("CI") == "true":
  64 |     DATABASE_URL = "sqlite:///:memory:"
  65 | else:
  66 |     DATABASE_URL = (
  67 |         os.getenv("DATABASE_URL")
  68 |         or os.getenv("POSTGRES_URL")
  69 |         or "sqlite:///maverick_mcp.db"  # Default to SQLite
  70 |     )
  71 | 
  72 | # Database configuration from settings
  73 | DB_POOL_SIZE = settings.db.pool_size
  74 | DB_MAX_OVERFLOW = settings.db.pool_max_overflow
  75 | DB_POOL_TIMEOUT = settings.db.pool_timeout
  76 | DB_POOL_RECYCLE = int(os.getenv("DB_POOL_RECYCLE", "3600"))  # 1 hour
  77 | DB_POOL_PRE_PING = os.getenv("DB_POOL_PRE_PING", "true").lower() == "true"
  78 | DB_ECHO = os.getenv("DB_ECHO", "false").lower() == "true"
  79 | DB_USE_POOLING = os.getenv("DB_USE_POOLING", "true").lower() == "true"
  80 | 
  81 | # Log the connection string (without password) for debugging
  82 | if DATABASE_URL:
  83 |     # Mask password in URL for logging
  84 |     masked_url = DATABASE_URL
  85 |     if "@" in DATABASE_URL and "://" in DATABASE_URL:
  86 |         parts = DATABASE_URL.split("://", 1)
  87 |         if len(parts) == 2 and "@" in parts[1]:
  88 |             user_pass, host_db = parts[1].split("@", 1)
  89 |             if ":" in user_pass:
  90 |                 user, _ = user_pass.split(":", 1)
  91 |                 masked_url = f"{parts[0]}://{user}:****@{host_db}"
  92 |     logger.info(f"Using database URL: {masked_url}")
  93 |     logger.info(f"Connection pooling: {'ENABLED' if DB_USE_POOLING else 'DISABLED'}")
  94 |     if DB_USE_POOLING:
  95 |         logger.info(
  96 |             f"Pool config: size={DB_POOL_SIZE}, max_overflow={DB_MAX_OVERFLOW}, "
  97 |             f"timeout={DB_POOL_TIMEOUT}s, recycle={DB_POOL_RECYCLE}s"
  98 |         )
  99 | 
 100 | # Create engine with configurable connection pooling
 101 | if DB_USE_POOLING:
 102 |     # Prepare connection arguments based on database type
 103 |     if "postgresql" in DATABASE_URL:
 104 |         # PostgreSQL-specific connection args
 105 |         sync_connect_args = {
 106 |             "connect_timeout": 10,
 107 |             "application_name": "maverick_mcp",
 108 |             "options": f"-c statement_timeout={settings.db.statement_timeout}",
 109 |         }
 110 |     elif "sqlite" in DATABASE_URL:
 111 |         # SQLite-specific args - no SSL parameters
 112 |         sync_connect_args = {"check_same_thread": False}
 113 |     else:
 114 |         # Default - no connection args
 115 |         sync_connect_args = {}
 116 | 
 117 |     # Use QueuePool for production environments
 118 |     engine = create_engine(
 119 |         DATABASE_URL,
 120 |         poolclass=QueuePool,
 121 |         pool_size=DB_POOL_SIZE,
 122 |         max_overflow=DB_MAX_OVERFLOW,
 123 |         pool_timeout=DB_POOL_TIMEOUT,
 124 |         pool_recycle=DB_POOL_RECYCLE,
 125 |         pool_pre_ping=DB_POOL_PRE_PING,
 126 |         echo=DB_ECHO,
 127 |         connect_args=sync_connect_args,
 128 |     )
 129 | else:
 130 |     # Prepare minimal connection arguments for NullPool
 131 |     if "sqlite" in DATABASE_URL:
 132 |         sync_connect_args = {"check_same_thread": False}
 133 |     else:
 134 |         sync_connect_args = {}
 135 | 
 136 |     # Use NullPool for serverless/development environments
 137 |     engine = create_engine(
 138 |         DATABASE_URL,
 139 |         poolclass=NullPool,
 140 |         echo=DB_ECHO,
 141 |         connect_args=sync_connect_args,
 142 |     )
 143 | 
 144 | # Create session factory
 145 | _session_factory = sessionmaker(autocommit=False, autoflush=False, bind=engine)
 146 | 
 147 | _schema_lock = threading.Lock()
 148 | _schema_initialized = False
 149 | 
 150 | 
 151 | def ensure_database_schema(force: bool = False) -> bool:
 152 |     """Ensure the database schema exists for the configured engine.
 153 | 
 154 |     Args:
 155 |         force: When ``True`` the schema will be (re)created even if it appears
 156 |             to exist already.
 157 | 
 158 |     Returns:
 159 |         ``True`` if the schema creation routine executed, ``False`` otherwise.
 160 |     """
 161 | 
 162 |     global _schema_initialized
 163 | 
 164 |     # Fast path: skip inspection once the schema has been verified unless the
 165 |     # caller explicitly requests a forced refresh.
 166 |     if not force and _schema_initialized:
 167 |         return False
 168 | 
 169 |     with _schema_lock:
 170 |         if not force and _schema_initialized:
 171 |             return False
 172 | 
 173 |         try:
 174 |             inspector = inspect(engine)
 175 |             existing_tables = set(inspector.get_table_names())
 176 |         except SQLAlchemyError as exc:  # pragma: no cover - safety net
 177 |             logger.warning(
 178 |                 "Unable to inspect database schema; attempting to create tables anyway",
 179 |                 exc_info=exc,
 180 |             )
 181 |             existing_tables = set()
 182 | 
 183 |         defined_tables = set(Base.metadata.tables.keys())
 184 |         missing_tables = defined_tables - existing_tables
 185 | 
 186 |         should_create = force or bool(missing_tables)
 187 |         if should_create:
 188 |             if missing_tables:
 189 |                 logger.info(
 190 |                     "Creating missing database tables: %s",
 191 |                     ", ".join(sorted(missing_tables)),
 192 |                 )
 193 |             else:
 194 |                 logger.info("Ensuring database schema is up to date")
 195 | 
 196 |             Base.metadata.create_all(bind=engine)
 197 |             _schema_initialized = True
 198 |             return True
 199 | 
 200 |         _schema_initialized = True
 201 |         return False
 202 | 
 203 | 
 204 | class _SessionFactoryWrapper:
 205 |     """Session factory that ensures the schema exists before creating sessions."""
 206 | 
 207 |     def __init__(self, factory: sessionmaker):
 208 |         self._factory = factory
 209 | 
 210 |     def __call__(self, *args, **kwargs):
 211 |         ensure_database_schema()
 212 |         return self._factory(*args, **kwargs)
 213 | 
 214 |     def __getattr__(self, name):
 215 |         return getattr(self._factory, name)
 216 | 
 217 | 
 218 | SessionLocal = _SessionFactoryWrapper(_session_factory)
 219 | 
 220 | # Create async engine - cached globally for reuse
 221 | _async_engine = None
 222 | _async_session_factory = None
 223 | 
 224 | 
 225 | def _get_async_engine():
 226 |     """Get or create the async engine singleton."""
 227 |     global _async_engine
 228 |     if _async_engine is None:
 229 |         # Convert sync URL to async URL
 230 |         if DATABASE_URL.startswith("sqlite://"):
 231 |             async_url = DATABASE_URL.replace("sqlite://", "sqlite+aiosqlite://")
 232 |         else:
 233 |             async_url = DATABASE_URL.replace("postgresql://", "postgresql+asyncpg://")
 234 | 
 235 |         # Create async engine - don't specify poolclass for async engines
 236 |         # SQLAlchemy will use the appropriate async pool automatically
 237 |         if DB_USE_POOLING:
 238 |             # Prepare connection arguments based on database type
 239 |             if "postgresql" in async_url:
 240 |                 # PostgreSQL-specific connection args
 241 |                 async_connect_args = {
 242 |                     "server_settings": {
 243 |                         "application_name": "maverick_mcp_async",
 244 |                         "statement_timeout": str(settings.db.statement_timeout),
 245 |                     }
 246 |                 }
 247 |             elif "sqlite" in async_url:
 248 |                 # SQLite-specific args - no SSL parameters
 249 |                 async_connect_args = {"check_same_thread": False}
 250 |             else:
 251 |                 # Default - no connection args
 252 |                 async_connect_args = {}
 253 | 
 254 |             _async_engine = create_async_engine(
 255 |                 async_url,
 256 |                 # Don't specify poolclass - let SQLAlchemy choose the async pool
 257 |                 pool_size=DB_POOL_SIZE,
 258 |                 max_overflow=DB_MAX_OVERFLOW,
 259 |                 pool_timeout=DB_POOL_TIMEOUT,
 260 |                 pool_recycle=DB_POOL_RECYCLE,
 261 |                 pool_pre_ping=DB_POOL_PRE_PING,
 262 |                 echo=DB_ECHO,
 263 |                 connect_args=async_connect_args,
 264 |             )
 265 |         else:
 266 |             # Prepare minimal connection arguments for NullPool
 267 |             if "sqlite" in async_url:
 268 |                 async_connect_args = {"check_same_thread": False}
 269 |             else:
 270 |                 async_connect_args = {}
 271 | 
 272 |             _async_engine = create_async_engine(
 273 |                 async_url,
 274 |                 poolclass=NullPool,
 275 |                 echo=DB_ECHO,
 276 |                 connect_args=async_connect_args,
 277 |             )
 278 |         logger.info("Created async database engine")
 279 |     return _async_engine
 280 | 
 281 | 
 282 | def _get_async_session_factory():
 283 |     """Get or create the async session factory singleton."""
 284 |     global _async_session_factory
 285 |     if _async_session_factory is None:
 286 |         engine = _get_async_engine()
 287 |         _async_session_factory = async_sessionmaker(
 288 |             engine, class_=AsyncSession, expire_on_commit=False
 289 |         )
 290 |         logger.info("Created async session factory")
 291 |     return _async_session_factory
 292 | 
 293 | 
 294 | def get_db():
 295 |     """Get database session."""
 296 |     ensure_database_schema()
 297 |     db = SessionLocal()
 298 |     try:
 299 |         yield db
 300 |     finally:
 301 |         db.close()
 302 | 
 303 | 
 304 | # Async database support - imports moved to top of file
 305 | 
 306 | 
 307 | async def get_async_db() -> AsyncGenerator[AsyncSession, None]:
 308 |     """Get an async database session using the cached engine."""
 309 |     # Get the cached session factory
 310 |     async_session_factory = _get_async_session_factory()
 311 | 
 312 |     # Create and yield a session
 313 |     async with async_session_factory() as session:
 314 |         try:
 315 |             yield session
 316 |         finally:
 317 |             await session.close()
 318 | 
 319 | 
 320 | async def close_async_db_connections():
 321 |     """Close the async database engine and cleanup connections."""
 322 |     global _async_engine, _async_session_factory
 323 |     if _async_engine:
 324 |         await _async_engine.dispose()
 325 |         _async_engine = None
 326 |         _async_session_factory = None
 327 |         logger.info("Closed async database engine")
 328 | 
 329 | 
 330 | def init_db():
 331 |     """Initialize database by creating all tables."""
 332 | 
 333 |     ensure_database_schema(force=True)
 334 | 
 335 | 
 336 | class TimestampMixin:
 337 |     """Mixin for created_at and updated_at timestamps."""
 338 | 
 339 |     created_at = Column(
 340 |         DateTime(timezone=True),
 341 |         default=lambda: datetime.now(UTC),
 342 |         nullable=False,
 343 |     )
 344 |     updated_at = Column(
 345 |         DateTime(timezone=True),
 346 |         default=lambda: datetime.now(UTC),
 347 |         onupdate=lambda: datetime.now(UTC),
 348 |         nullable=False,
 349 |     )
 350 | 
 351 | 
 352 | class Stock(Base, TimestampMixin):
 353 |     """Stock model for storing basic stock information."""
 354 | 
 355 |     __tablename__ = "mcp_stocks"
 356 | 
 357 |     stock_id = Column(Uuid, primary_key=True, default=uuid.uuid4)
 358 |     ticker_symbol = Column(String(10), unique=True, nullable=False, index=True)
 359 |     company_name = Column(String(255))
 360 |     description = Column(Text)
 361 |     sector = Column(String(100))
 362 |     industry = Column(String(100))
 363 |     exchange = Column(String(50))
 364 |     country = Column(String(50))
 365 |     currency = Column(String(3))
 366 |     isin = Column(String(12))
 367 | 
 368 |     # Additional stock metadata
 369 |     market_cap = Column(BigInteger)
 370 |     shares_outstanding = Column(BigInteger)
 371 |     is_etf = Column(Boolean, default=False)
 372 |     is_active = Column(Boolean, default=True, index=True)
 373 | 
 374 |     # Relationships
 375 |     price_caches = relationship(
 376 |         "PriceCache",
 377 |         back_populates="stock",
 378 |         cascade="all, delete-orphan",
 379 |         lazy="selectin",  # Eager load price caches to prevent N+1 queries
 380 |     )
 381 |     maverick_stocks = relationship(
 382 |         "MaverickStocks", back_populates="stock", cascade="all, delete-orphan"
 383 |     )
 384 |     maverick_bear_stocks = relationship(
 385 |         "MaverickBearStocks", back_populates="stock", cascade="all, delete-orphan"
 386 |     )
 387 |     supply_demand_stocks = relationship(
 388 |         "SupplyDemandBreakoutStocks",
 389 |         back_populates="stock",
 390 |         cascade="all, delete-orphan",
 391 |     )
 392 |     technical_cache = relationship(
 393 |         "TechnicalCache", back_populates="stock", cascade="all, delete-orphan"
 394 |     )
 395 | 
 396 |     def __repr__(self):
 397 |         return f"<Stock(ticker={self.ticker_symbol}, name={self.company_name})>"
 398 | 
 399 |     @classmethod
 400 |     def get_or_create(cls, session: Session, ticker_symbol: str, **kwargs) -> Stock:
 401 |         """Get existing stock or create new one."""
 402 |         stock = (
 403 |             session.query(cls).filter_by(ticker_symbol=ticker_symbol.upper()).first()
 404 |         )
 405 |         if not stock:
 406 |             stock = cls(ticker_symbol=ticker_symbol.upper(), **kwargs)
 407 |             session.add(stock)
 408 |             session.commit()
 409 |         return stock
 410 | 
 411 | 
 412 | class PriceCache(Base, TimestampMixin):
 413 |     """Cache for historical stock price data."""
 414 | 
 415 |     __tablename__ = "mcp_price_cache"
 416 |     __table_args__ = (
 417 |         UniqueConstraint("stock_id", "date", name="mcp_price_cache_stock_date_unique"),
 418 |         Index("mcp_price_cache_stock_id_date_idx", "stock_id", "date"),
 419 |         Index("mcp_price_cache_ticker_date_idx", "stock_id", "date"),
 420 |     )
 421 | 
 422 |     price_cache_id = Column(Uuid, primary_key=True, default=uuid.uuid4)
 423 |     stock_id = Column(Uuid, ForeignKey("mcp_stocks.stock_id"), nullable=False)
 424 |     date = Column(Date, nullable=False)
 425 |     open_price = Column(Numeric(12, 4))
 426 |     high_price = Column(Numeric(12, 4))
 427 |     low_price = Column(Numeric(12, 4))
 428 |     close_price = Column(Numeric(12, 4))
 429 |     volume = Column(BigInteger)
 430 | 
 431 |     # Relationships
 432 |     stock = relationship(
 433 |         "Stock", back_populates="price_caches", lazy="joined"
 434 |     )  # Eager load stock info
 435 | 
 436 |     def __repr__(self):
 437 |         return f"<PriceCache(stock_id={self.stock_id}, date={self.date}, close={self.close_price})>"
 438 | 
 439 |     @classmethod
 440 |     def get_price_data(
 441 |         cls,
 442 |         session: Session,
 443 |         ticker_symbol: str,
 444 |         start_date: str,
 445 |         end_date: str | None = None,
 446 |     ) -> pd.DataFrame:
 447 |         """
 448 |         Return a pandas DataFrame of price data for the specified symbol and date range.
 449 | 
 450 |         Args:
 451 |             session: Database session
 452 |             ticker_symbol: Stock ticker symbol
 453 |             start_date: Start date in YYYY-MM-DD format
 454 |             end_date: End date in YYYY-MM-DD format (default: today)
 455 | 
 456 |         Returns:
 457 |             DataFrame with OHLCV data indexed by date
 458 |         """
 459 |         if not end_date:
 460 |             end_date = datetime.now(UTC).strftime("%Y-%m-%d")
 461 | 
 462 |         # Query with join to get ticker symbol
 463 |         query = (
 464 |             session.query(
 465 |                 cls.date,
 466 |                 cls.open_price.label("open"),
 467 |                 cls.high_price.label("high"),
 468 |                 cls.low_price.label("low"),
 469 |                 cls.close_price.label("close"),
 470 |                 cls.volume,
 471 |             )
 472 |             .join(Stock)
 473 |             .filter(
 474 |                 Stock.ticker_symbol == ticker_symbol.upper(),
 475 |                 cls.date >= pd.to_datetime(start_date).date(),
 476 |                 cls.date <= pd.to_datetime(end_date).date(),
 477 |             )
 478 |             .order_by(cls.date)
 479 |         )
 480 | 
 481 |         # Convert to DataFrame
 482 |         df = pd.DataFrame(query.all())
 483 | 
 484 |         if not df.empty:
 485 |             df["date"] = pd.to_datetime(df["date"])
 486 |             df.set_index("date", inplace=True)
 487 | 
 488 |             # Convert decimal types to float
 489 |             for col in ["open", "high", "low", "close"]:
 490 |                 df[col] = df[col].astype(float)
 491 | 
 492 |             df["volume"] = df["volume"].astype(int)
 493 |             df["symbol"] = ticker_symbol.upper()
 494 | 
 495 |         return df
 496 | 
 497 | 
 498 | class MaverickStocks(Base, TimestampMixin):
 499 |     """Maverick stocks screening results - self-contained model."""
 500 | 
 501 |     __tablename__ = "mcp_maverick_stocks"
 502 |     __table_args__ = (
 503 |         Index("mcp_maverick_stocks_combined_score_idx", "combined_score"),
 504 |         Index(
 505 |             "mcp_maverick_stocks_momentum_score_idx", "momentum_score"
 506 |         ),  # formerly rs_rating_idx
 507 |         Index("mcp_maverick_stocks_date_analyzed_idx", "date_analyzed"),
 508 |         Index("mcp_maverick_stocks_stock_date_idx", "stock_id", "date_analyzed"),
 509 |     )
 510 | 
 511 |     id = Column(get_primary_key_type(), primary_key=True, autoincrement=True)
 512 |     stock_id = Column(
 513 |         Uuid,
 514 |         ForeignKey("mcp_stocks.stock_id"),
 515 |         nullable=False,
 516 |         index=True,
 517 |     )
 518 |     date_analyzed = Column(
 519 |         Date, nullable=False, default=lambda: datetime.now(UTC).date()
 520 |     )
 521 |     # OHLCV Data
 522 |     open_price = Column(Numeric(12, 4), default=0)
 523 |     high_price = Column(Numeric(12, 4), default=0)
 524 |     low_price = Column(Numeric(12, 4), default=0)
 525 |     close_price = Column(Numeric(12, 4), default=0)
 526 |     volume = Column(BigInteger, default=0)
 527 | 
 528 |     # Technical Indicators
 529 |     ema_21 = Column(Numeric(12, 4), default=0)
 530 |     sma_50 = Column(Numeric(12, 4), default=0)
 531 |     sma_150 = Column(Numeric(12, 4), default=0)
 532 |     sma_200 = Column(Numeric(12, 4), default=0)
 533 |     momentum_score = Column(Numeric(5, 2), default=0)  # formerly rs_rating
 534 |     avg_vol_30d = Column(Numeric(15, 2), default=0)
 535 |     adr_pct = Column(Numeric(5, 2), default=0)
 536 |     atr = Column(Numeric(12, 4), default=0)
 537 | 
 538 |     # Pattern Analysis
 539 |     pattern_type = Column(String(50))  # 'pat' field
 540 |     squeeze_status = Column(String(50))  # 'sqz' field
 541 |     consolidation_status = Column(String(50))  # formerly vcp_status, 'vcp' field
 542 |     entry_signal = Column(String(50))  # 'entry' field
 543 | 
 544 |     # Scoring
 545 |     compression_score = Column(Integer, default=0)
 546 |     pattern_detected = Column(Integer, default=0)
 547 |     combined_score = Column(Integer, default=0)
 548 | 
 549 |     # Relationships
 550 |     stock = relationship("Stock", back_populates="maverick_stocks")
 551 | 
 552 |     def __repr__(self):
 553 |         return f"<MaverickStock(stock_id={self.stock_id}, close={self.close_price}, score={self.combined_score})>"
 554 | 
 555 |     @classmethod
 556 |     def get_top_stocks(
 557 |         cls, session: Session, limit: int = 20
 558 |     ) -> Sequence[MaverickStocks]:
 559 |         """Get top maverick stocks by combined score."""
 560 |         return (
 561 |             session.query(cls)
 562 |             .join(Stock)
 563 |             .order_by(cls.combined_score.desc())
 564 |             .limit(limit)
 565 |             .all()
 566 |         )
 567 | 
 568 |     @classmethod
 569 |     def get_latest_analysis(
 570 |         cls, session: Session, days_back: int = 1
 571 |     ) -> Sequence[MaverickStocks]:
 572 |         """Get latest maverick analysis within specified days."""
 573 |         cutoff_date = datetime.now(UTC).date() - timedelta(days=days_back)
 574 |         return (
 575 |             session.query(cls)
 576 |             .join(Stock)
 577 |             .filter(cls.date_analyzed >= cutoff_date)
 578 |             .order_by(cls.combined_score.desc())
 579 |             .all()
 580 |         )
 581 | 
 582 |     def to_dict(self) -> dict:
 583 |         """Convert to dictionary for JSON serialization."""
 584 |         return {
 585 |             "stock_id": str(self.stock_id),
 586 |             "ticker": self.stock.ticker_symbol if self.stock else None,
 587 |             "date_analyzed": self.date_analyzed.isoformat()
 588 |             if self.date_analyzed
 589 |             else None,
 590 |             "close": float(self.close_price) if self.close_price else 0,
 591 |             "volume": self.volume,
 592 |             "momentum_score": float(self.momentum_score)
 593 |             if self.momentum_score
 594 |             else 0,  # formerly rs_rating
 595 |             "adr_pct": float(self.adr_pct) if self.adr_pct else 0,
 596 |             "pattern": self.pattern_type,
 597 |             "squeeze": self.squeeze_status,
 598 |             "consolidation": self.consolidation_status,  # formerly vcp
 599 |             "entry": self.entry_signal,
 600 |             "combined_score": self.combined_score,
 601 |             "compression_score": self.compression_score,
 602 |             "pattern_detected": self.pattern_detected,
 603 |             "ema_21": float(self.ema_21) if self.ema_21 else 0,
 604 |             "sma_50": float(self.sma_50) if self.sma_50 else 0,
 605 |             "sma_150": float(self.sma_150) if self.sma_150 else 0,
 606 |             "sma_200": float(self.sma_200) if self.sma_200 else 0,
 607 |             "atr": float(self.atr) if self.atr else 0,
 608 |             "avg_vol_30d": float(self.avg_vol_30d) if self.avg_vol_30d else 0,
 609 |         }
 610 | 
 611 | 
 612 | class MaverickBearStocks(Base, TimestampMixin):
 613 |     """Maverick bear stocks screening results - self-contained model."""
 614 | 
 615 |     __tablename__ = "mcp_maverick_bear_stocks"
 616 |     __table_args__ = (
 617 |         Index("mcp_maverick_bear_stocks_score_idx", "score"),
 618 |         Index(
 619 |             "mcp_maverick_bear_stocks_momentum_score_idx", "momentum_score"
 620 |         ),  # formerly rs_rating_idx
 621 |         Index("mcp_maverick_bear_stocks_date_analyzed_idx", "date_analyzed"),
 622 |         Index("mcp_maverick_bear_stocks_stock_date_idx", "stock_id", "date_analyzed"),
 623 |     )
 624 | 
 625 |     id = Column(get_primary_key_type(), primary_key=True, autoincrement=True)
 626 |     stock_id = Column(
 627 |         Uuid,
 628 |         ForeignKey("mcp_stocks.stock_id"),
 629 |         nullable=False,
 630 |         index=True,
 631 |     )
 632 |     date_analyzed = Column(
 633 |         Date, nullable=False, default=lambda: datetime.now(UTC).date()
 634 |     )
 635 | 
 636 |     # OHLCV Data
 637 |     open_price = Column(Numeric(12, 4), default=0)
 638 |     high_price = Column(Numeric(12, 4), default=0)
 639 |     low_price = Column(Numeric(12, 4), default=0)
 640 |     close_price = Column(Numeric(12, 4), default=0)
 641 |     volume = Column(BigInteger, default=0)
 642 | 
 643 |     # Technical Indicators
 644 |     momentum_score = Column(Numeric(5, 2), default=0)  # formerly rs_rating
 645 |     ema_21 = Column(Numeric(12, 4), default=0)
 646 |     sma_50 = Column(Numeric(12, 4), default=0)
 647 |     sma_200 = Column(Numeric(12, 4), default=0)
 648 |     rsi_14 = Column(Numeric(5, 2), default=0)
 649 | 
 650 |     # MACD Indicators
 651 |     macd = Column(Numeric(12, 6), default=0)
 652 |     macd_signal = Column(Numeric(12, 6), default=0)
 653 |     macd_histogram = Column(Numeric(12, 6), default=0)
 654 | 
 655 |     # Additional Bear Market Indicators
 656 |     dist_days_20 = Column(Integer, default=0)  # Days from 20 SMA
 657 |     adr_pct = Column(Numeric(5, 2), default=0)
 658 |     atr_contraction = Column(Boolean, default=False)
 659 |     atr = Column(Numeric(12, 4), default=0)
 660 |     avg_vol_30d = Column(Numeric(15, 2), default=0)
 661 |     big_down_vol = Column(Boolean, default=False)
 662 | 
 663 |     # Pattern Analysis
 664 |     squeeze_status = Column(String(50))  # 'sqz' field
 665 |     consolidation_status = Column(String(50))  # formerly vcp_status, 'vcp' field
 666 | 
 667 |     # Scoring
 668 |     score = Column(Integer, default=0)
 669 | 
 670 |     # Relationships
 671 |     stock = relationship("Stock", back_populates="maverick_bear_stocks")
 672 | 
 673 |     def __repr__(self):
 674 |         return f"<MaverickBearStock(stock_id={self.stock_id}, close={self.close_price}, score={self.score})>"
 675 | 
 676 |     @classmethod
 677 |     def get_top_stocks(
 678 |         cls, session: Session, limit: int = 20
 679 |     ) -> Sequence[MaverickBearStocks]:
 680 |         """Get top maverick bear stocks by score."""
 681 |         return (
 682 |             session.query(cls).join(Stock).order_by(cls.score.desc()).limit(limit).all()
 683 |         )
 684 | 
 685 |     @classmethod
 686 |     def get_latest_analysis(
 687 |         cls, session: Session, days_back: int = 1
 688 |     ) -> Sequence[MaverickBearStocks]:
 689 |         """Get latest bear analysis within specified days."""
 690 |         cutoff_date = datetime.now(UTC).date() - timedelta(days=days_back)
 691 |         return (
 692 |             session.query(cls)
 693 |             .join(Stock)
 694 |             .filter(cls.date_analyzed >= cutoff_date)
 695 |             .order_by(cls.score.desc())
 696 |             .all()
 697 |         )
 698 | 
 699 |     def to_dict(self) -> dict:
 700 |         """Convert to dictionary for JSON serialization."""
 701 |         return {
 702 |             "stock_id": str(self.stock_id),
 703 |             "ticker": self.stock.ticker_symbol if self.stock else None,
 704 |             "date_analyzed": self.date_analyzed.isoformat()
 705 |             if self.date_analyzed
 706 |             else None,
 707 |             "close": float(self.close_price) if self.close_price else 0,
 708 |             "volume": self.volume,
 709 |             "momentum_score": float(self.momentum_score)
 710 |             if self.momentum_score
 711 |             else 0,  # formerly rs_rating
 712 |             "rsi_14": float(self.rsi_14) if self.rsi_14 else 0,
 713 |             "macd": float(self.macd) if self.macd else 0,
 714 |             "macd_signal": float(self.macd_signal) if self.macd_signal else 0,
 715 |             "macd_histogram": float(self.macd_histogram) if self.macd_histogram else 0,
 716 |             "adr_pct": float(self.adr_pct) if self.adr_pct else 0,
 717 |             "atr": float(self.atr) if self.atr else 0,
 718 |             "atr_contraction": self.atr_contraction,
 719 |             "avg_vol_30d": float(self.avg_vol_30d) if self.avg_vol_30d else 0,
 720 |             "big_down_vol": self.big_down_vol,
 721 |             "score": self.score,
 722 |             "squeeze": self.squeeze_status,
 723 |             "consolidation": self.consolidation_status,  # formerly vcp
 724 |             "ema_21": float(self.ema_21) if self.ema_21 else 0,
 725 |             "sma_50": float(self.sma_50) if self.sma_50 else 0,
 726 |             "sma_200": float(self.sma_200) if self.sma_200 else 0,
 727 |             "dist_days_20": self.dist_days_20,
 728 |         }
 729 | 
 730 | 
 731 | class SupplyDemandBreakoutStocks(Base, TimestampMixin):
 732 |     """Supply/demand breakout stocks screening results - self-contained model.
 733 | 
 734 |     This model identifies stocks experiencing accumulation breakouts with strong relative strength,
 735 |     indicating a potential shift from supply to demand dominance in the market structure.
 736 |     """
 737 | 
 738 |     __tablename__ = "mcp_supply_demand_breakouts"
 739 |     __table_args__ = (
 740 |         Index(
 741 |             "mcp_supply_demand_breakouts_momentum_score_idx", "momentum_score"
 742 |         ),  # formerly rs_rating_idx
 743 |         Index("mcp_supply_demand_breakouts_date_analyzed_idx", "date_analyzed"),
 744 |         Index(
 745 |             "mcp_supply_demand_breakouts_stock_date_idx", "stock_id", "date_analyzed"
 746 |         ),
 747 |         Index(
 748 |             "mcp_supply_demand_breakouts_ma_filter_idx",
 749 |             "close_price",
 750 |             "sma_50",
 751 |             "sma_150",
 752 |             "sma_200",
 753 |         ),
 754 |     )
 755 | 
 756 |     id = Column(get_primary_key_type(), primary_key=True, autoincrement=True)
 757 |     stock_id = Column(
 758 |         Uuid,
 759 |         ForeignKey("mcp_stocks.stock_id"),
 760 |         nullable=False,
 761 |         index=True,
 762 |     )
 763 |     date_analyzed = Column(
 764 |         Date, nullable=False, default=lambda: datetime.now(UTC).date()
 765 |     )
 766 | 
 767 |     # OHLCV Data
 768 |     open_price = Column(Numeric(12, 4), default=0)
 769 |     high_price = Column(Numeric(12, 4), default=0)
 770 |     low_price = Column(Numeric(12, 4), default=0)
 771 |     close_price = Column(Numeric(12, 4), default=0)
 772 |     volume = Column(BigInteger, default=0)
 773 | 
 774 |     # Technical Indicators
 775 |     ema_21 = Column(Numeric(12, 4), default=0)
 776 |     sma_50 = Column(Numeric(12, 4), default=0)
 777 |     sma_150 = Column(Numeric(12, 4), default=0)
 778 |     sma_200 = Column(Numeric(12, 4), default=0)
 779 |     momentum_score = Column(Numeric(5, 2), default=0)  # formerly rs_rating
 780 |     avg_volume_30d = Column(Numeric(15, 2), default=0)
 781 |     adr_pct = Column(Numeric(5, 2), default=0)
 782 |     atr = Column(Numeric(12, 4), default=0)
 783 | 
 784 |     # Pattern Analysis
 785 |     pattern_type = Column(String(50))  # 'pat' field
 786 |     squeeze_status = Column(String(50))  # 'sqz' field
 787 |     consolidation_status = Column(String(50))  # formerly vcp_status, 'vcp' field
 788 |     entry_signal = Column(String(50))  # 'entry' field
 789 | 
 790 |     # Supply/Demand Analysis
 791 |     accumulation_rating = Column(Numeric(5, 2), default=0)
 792 |     distribution_rating = Column(Numeric(5, 2), default=0)
 793 |     breakout_strength = Column(Numeric(5, 2), default=0)
 794 | 
 795 |     # Relationships
 796 |     stock = relationship("Stock", back_populates="supply_demand_stocks")
 797 | 
 798 |     def __repr__(self):
 799 |         return f"<SupplyDemandBreakoutStock(stock_id={self.stock_id}, close={self.close_price}, momentum={self.momentum_score})>"  # formerly rs
 800 | 
 801 |     @classmethod
 802 |     def get_top_stocks(
 803 |         cls, session: Session, limit: int = 20
 804 |     ) -> Sequence[SupplyDemandBreakoutStocks]:
 805 |         """Get top supply/demand breakout stocks by momentum score."""  # formerly relative strength rating
 806 |         return (
 807 |             session.query(cls)
 808 |             .join(Stock)
 809 |             .order_by(cls.momentum_score.desc())  # formerly rs_rating
 810 |             .limit(limit)
 811 |             .all()
 812 |         )
 813 | 
 814 |     @classmethod
 815 |     def get_stocks_above_moving_averages(
 816 |         cls, session: Session
 817 |     ) -> Sequence[SupplyDemandBreakoutStocks]:
 818 |         """Get stocks in demand expansion phase - trading above all major moving averages.
 819 | 
 820 |         This identifies stocks with:
 821 |         - Price above 50, 150, and 200-day moving averages (demand zone)
 822 |         - Upward trending moving averages (accumulation structure)
 823 |         - Indicates institutional accumulation and supply absorption
 824 |         """
 825 |         return (
 826 |             session.query(cls)
 827 |             .join(Stock)
 828 |             .filter(
 829 |                 cls.close_price > cls.sma_50,
 830 |                 cls.close_price > cls.sma_150,
 831 |                 cls.close_price > cls.sma_200,
 832 |                 cls.sma_50 > cls.sma_150,
 833 |                 cls.sma_150 > cls.sma_200,
 834 |             )
 835 |             .order_by(cls.momentum_score.desc())  # formerly rs_rating
 836 |             .all()
 837 |         )
 838 | 
 839 |     @classmethod
 840 |     def get_latest_analysis(
 841 |         cls, session: Session, days_back: int = 1
 842 |     ) -> Sequence[SupplyDemandBreakoutStocks]:
 843 |         """Get latest supply/demand analysis within specified days."""
 844 |         cutoff_date = datetime.now(UTC).date() - timedelta(days=days_back)
 845 |         return (
 846 |             session.query(cls)
 847 |             .join(Stock)
 848 |             .filter(cls.date_analyzed >= cutoff_date)
 849 |             .order_by(cls.momentum_score.desc())  # formerly rs_rating
 850 |             .all()
 851 |         )
 852 | 
 853 |     def to_dict(self) -> dict:
 854 |         """Convert to dictionary for JSON serialization."""
 855 |         return {
 856 |             "stock_id": str(self.stock_id),
 857 |             "ticker": self.stock.ticker_symbol if self.stock else None,
 858 |             "date_analyzed": self.date_analyzed.isoformat()
 859 |             if self.date_analyzed
 860 |             else None,
 861 |             "close": float(self.close_price) if self.close_price else 0,
 862 |             "volume": self.volume,
 863 |             "momentum_score": float(self.momentum_score)
 864 |             if self.momentum_score
 865 |             else 0,  # formerly rs_rating
 866 |             "adr_pct": float(self.adr_pct) if self.adr_pct else 0,
 867 |             "pattern": self.pattern_type,
 868 |             "squeeze": self.squeeze_status,
 869 |             "consolidation": self.consolidation_status,  # formerly vcp
 870 |             "entry": self.entry_signal,
 871 |             "ema_21": float(self.ema_21) if self.ema_21 else 0,
 872 |             "sma_50": float(self.sma_50) if self.sma_50 else 0,
 873 |             "sma_150": float(self.sma_150) if self.sma_150 else 0,
 874 |             "sma_200": float(self.sma_200) if self.sma_200 else 0,
 875 |             "atr": float(self.atr) if self.atr else 0,
 876 |             "avg_volume_30d": float(self.avg_volume_30d) if self.avg_volume_30d else 0,
 877 |             "accumulation_rating": float(self.accumulation_rating)
 878 |             if self.accumulation_rating
 879 |             else 0,
 880 |             "distribution_rating": float(self.distribution_rating)
 881 |             if self.distribution_rating
 882 |             else 0,
 883 |             "breakout_strength": float(self.breakout_strength)
 884 |             if self.breakout_strength
 885 |             else 0,
 886 |         }
 887 | 
 888 | 
 889 | class TechnicalCache(Base, TimestampMixin):
 890 |     """Cache for calculated technical indicators."""
 891 | 
 892 |     __tablename__ = "mcp_technical_cache"
 893 |     __table_args__ = (
 894 |         UniqueConstraint(
 895 |             "stock_id",
 896 |             "date",
 897 |             "indicator_type",
 898 |             name="mcp_technical_cache_stock_date_indicator_unique",
 899 |         ),
 900 |         Index("mcp_technical_cache_stock_date_idx", "stock_id", "date"),
 901 |         Index("mcp_technical_cache_indicator_idx", "indicator_type"),
 902 |         Index("mcp_technical_cache_date_idx", "date"),
 903 |     )
 904 | 
 905 |     id = Column(get_primary_key_type(), primary_key=True, autoincrement=True)
 906 |     stock_id = Column(Uuid, ForeignKey("mcp_stocks.stock_id"), nullable=False)
 907 |     date = Column(Date, nullable=False)
 908 |     indicator_type = Column(
 909 |         String(50), nullable=False
 910 |     )  # 'SMA_20', 'EMA_21', 'RSI_14', etc.
 911 | 
 912 |     # Flexible indicator values
 913 |     value = Column(Numeric(20, 8))  # Primary indicator value
 914 |     value_2 = Column(Numeric(20, 8))  # Secondary value (e.g., MACD signal)
 915 |     value_3 = Column(Numeric(20, 8))  # Tertiary value (e.g., MACD histogram)
 916 | 
 917 |     # Text values for complex indicators
 918 |     meta_data = Column(Text)  # JSON string for additional metadata
 919 | 
 920 |     # Calculation parameters
 921 |     period = Column(Integer)  # Period used (20 for SMA_20, etc.)
 922 |     parameters = Column(Text)  # JSON string for additional parameters
 923 | 
 924 |     # Relationships
 925 |     stock = relationship("Stock", back_populates="technical_cache")
 926 | 
 927 |     def __repr__(self):
 928 |         return (
 929 |             f"<TechnicalCache(stock_id={self.stock_id}, date={self.date}, "
 930 |             f"indicator={self.indicator_type}, value={self.value})>"
 931 |         )
 932 | 
 933 |     @classmethod
 934 |     def get_indicator(
 935 |         cls,
 936 |         session: Session,
 937 |         ticker_symbol: str,
 938 |         indicator_type: str,
 939 |         start_date: str,
 940 |         end_date: str | None = None,
 941 |     ) -> pd.DataFrame:
 942 |         """
 943 |         Get technical indicator data for a symbol and date range.
 944 | 
 945 |         Args:
 946 |             session: Database session
 947 |             ticker_symbol: Stock ticker symbol
 948 |             indicator_type: Type of indicator (e.g., 'SMA_20', 'RSI_14')
 949 |             start_date: Start date in YYYY-MM-DD format
 950 |             end_date: End date in YYYY-MM-DD format (default: today)
 951 | 
 952 |         Returns:
 953 |             DataFrame with indicator data indexed by date
 954 |         """
 955 |         if not end_date:
 956 |             end_date = datetime.now(UTC).strftime("%Y-%m-%d")
 957 | 
 958 |         query = (
 959 |             session.query(
 960 |                 cls.date,
 961 |                 cls.value,
 962 |                 cls.value_2,
 963 |                 cls.value_3,
 964 |                 cls.meta_data,
 965 |                 cls.parameters,
 966 |             )
 967 |             .join(Stock)
 968 |             .filter(
 969 |                 Stock.ticker_symbol == ticker_symbol.upper(),
 970 |                 cls.indicator_type == indicator_type,
 971 |                 cls.date >= pd.to_datetime(start_date).date(),
 972 |                 cls.date <= pd.to_datetime(end_date).date(),
 973 |             )
 974 |             .order_by(cls.date)
 975 |         )
 976 | 
 977 |         df = pd.DataFrame(query.all())
 978 | 
 979 |         if not df.empty:
 980 |             df["date"] = pd.to_datetime(df["date"])
 981 |             df.set_index("date", inplace=True)
 982 | 
 983 |             # Convert decimal types to float
 984 |             for col in ["value", "value_2", "value_3"]:
 985 |                 if col in df.columns:
 986 |                     df[col] = df[col].astype(float)
 987 | 
 988 |             df["symbol"] = ticker_symbol.upper()
 989 |             df["indicator_type"] = indicator_type
 990 | 
 991 |         return df
 992 | 
 993 |     def to_dict(self) -> dict:
 994 |         """Convert to dictionary for JSON serialization."""
 995 |         return {
 996 |             "stock_id": str(self.stock_id),
 997 |             "date": self.date.isoformat() if self.date else None,
 998 |             "indicator_type": self.indicator_type,
 999 |             "value": float(self.value) if self.value else None,
1000 |             "value_2": float(self.value_2) if self.value_2 else None,
1001 |             "value_3": float(self.value_3) if self.value_3 else None,
1002 |             "period": self.period,
1003 |             "meta_data": self.meta_data,
1004 |             "parameters": self.parameters,
1005 |         }
1006 | 
1007 | 
1008 | # Backtesting Models
1009 | 
1010 | 
1011 | class BacktestResult(Base, TimestampMixin):
1012 |     """Main backtest results table with comprehensive metrics."""
1013 | 
1014 |     __tablename__ = "mcp_backtest_results"
1015 |     __table_args__ = (
1016 |         Index("mcp_backtest_results_symbol_idx", "symbol"),
1017 |         Index("mcp_backtest_results_strategy_idx", "strategy_type"),
1018 |         Index("mcp_backtest_results_date_idx", "backtest_date"),
1019 |         Index("mcp_backtest_results_sharpe_idx", "sharpe_ratio"),
1020 |         Index("mcp_backtest_results_total_return_idx", "total_return"),
1021 |         Index("mcp_backtest_results_symbol_strategy_idx", "symbol", "strategy_type"),
1022 |     )
1023 | 
1024 |     backtest_id = Column(Uuid, primary_key=True, default=uuid.uuid4)
1025 | 
1026 |     # Basic backtest metadata
1027 |     symbol = Column(String(10), nullable=False, index=True)
1028 |     strategy_type = Column(String(50), nullable=False)
1029 |     backtest_date = Column(
1030 |         DateTime(timezone=True), nullable=False, default=lambda: datetime.now(UTC)
1031 |     )
1032 | 
1033 |     # Date range and setup
1034 |     start_date = Column(Date, nullable=False)
1035 |     end_date = Column(Date, nullable=False)
1036 |     initial_capital = Column(Numeric(15, 2), default=10000.0)
1037 | 
1038 |     # Trading costs and parameters
1039 |     fees = Column(Numeric(6, 4), default=0.001)  # 0.1% default
1040 |     slippage = Column(Numeric(6, 4), default=0.001)  # 0.1% default
1041 | 
1042 |     # Strategy parameters (stored as JSON for flexibility)
1043 |     parameters = Column(JSON)
1044 | 
1045 |     # Key Performance Metrics
1046 |     total_return = Column(Numeric(10, 4))  # Total return percentage
1047 |     annualized_return = Column(Numeric(10, 4))  # Annualized return percentage
1048 |     sharpe_ratio = Column(Numeric(8, 4))
1049 |     sortino_ratio = Column(Numeric(8, 4))
1050 |     calmar_ratio = Column(Numeric(8, 4))
1051 | 
1052 |     # Risk Metrics
1053 |     max_drawdown = Column(Numeric(8, 4))  # Maximum drawdown percentage
1054 |     max_drawdown_duration = Column(Integer)  # Days
1055 |     volatility = Column(Numeric(8, 4))  # Annualized volatility
1056 |     downside_volatility = Column(Numeric(8, 4))  # Downside deviation
1057 | 
1058 |     # Trade Statistics
1059 |     total_trades = Column(Integer, default=0)
1060 |     winning_trades = Column(Integer, default=0)
1061 |     losing_trades = Column(Integer, default=0)
1062 |     win_rate = Column(Numeric(5, 4))  # Win rate percentage
1063 | 
1064 |     # P&L Statistics
1065 |     profit_factor = Column(Numeric(8, 4))  # Gross profit / Gross loss
1066 |     average_win = Column(Numeric(12, 4))
1067 |     average_loss = Column(Numeric(12, 4))
1068 |     largest_win = Column(Numeric(12, 4))
1069 |     largest_loss = Column(Numeric(12, 4))
1070 | 
1071 |     # Portfolio Value Metrics
1072 |     final_portfolio_value = Column(Numeric(15, 2))
1073 |     peak_portfolio_value = Column(Numeric(15, 2))
1074 | 
1075 |     # Additional Analysis
1076 |     beta = Column(Numeric(8, 4))  # Market beta
1077 |     alpha = Column(Numeric(8, 4))  # Alpha vs market
1078 | 
1079 |     # Time series data (stored as JSON for efficient queries)
1080 |     equity_curve = Column(JSON)  # Daily portfolio values
1081 |     drawdown_series = Column(JSON)  # Daily drawdown values
1082 | 
1083 |     # Execution metadata
1084 |     execution_time_seconds = Column(Numeric(8, 3))  # How long the backtest took
1085 |     data_points = Column(Integer)  # Number of data points used
1086 | 
1087 |     # Status and notes
1088 |     status = Column(String(20), default="completed")  # completed, failed, in_progress
1089 |     error_message = Column(Text)  # Error details if status = failed
1090 |     notes = Column(Text)  # User notes
1091 | 
1092 |     # Relationships
1093 |     trades = relationship(
1094 |         "BacktestTrade",
1095 |         back_populates="backtest_result",
1096 |         cascade="all, delete-orphan",
1097 |         lazy="selectin",
1098 |     )
1099 |     optimization_results = relationship(
1100 |         "OptimizationResult",
1101 |         back_populates="backtest_result",
1102 |         cascade="all, delete-orphan",
1103 |     )
1104 | 
1105 |     def __repr__(self):
1106 |         return (
1107 |             f"<BacktestResult(id={self.backtest_id}, symbol={self.symbol}, "
1108 |             f"strategy={self.strategy_type}, return={self.total_return})>"
1109 |         )
1110 | 
1111 |     @classmethod
1112 |     def get_by_symbol_and_strategy(
1113 |         cls, session: Session, symbol: str, strategy_type: str, limit: int = 10
1114 |     ) -> Sequence[BacktestResult]:
1115 |         """Get recent backtests for a specific symbol and strategy."""
1116 |         return (
1117 |             session.query(cls)
1118 |             .filter(cls.symbol == symbol.upper(), cls.strategy_type == strategy_type)
1119 |             .order_by(cls.backtest_date.desc())
1120 |             .limit(limit)
1121 |             .all()
1122 |         )
1123 | 
1124 |     @classmethod
1125 |     def get_best_performing(
1126 |         cls, session: Session, metric: str = "sharpe_ratio", limit: int = 20
1127 |     ) -> Sequence[BacktestResult]:
1128 |         """Get best performing backtests by specified metric."""
1129 |         metric_column = getattr(cls, metric, cls.sharpe_ratio)
1130 |         return (
1131 |             session.query(cls)
1132 |             .filter(cls.status == "completed")
1133 |             .order_by(metric_column.desc())
1134 |             .limit(limit)
1135 |             .all()
1136 |         )
1137 | 
1138 |     def to_dict(self) -> dict:
1139 |         """Convert to dictionary for JSON serialization."""
1140 |         return {
1141 |             "backtest_id": str(self.backtest_id),
1142 |             "symbol": self.symbol,
1143 |             "strategy_type": self.strategy_type,
1144 |             "backtest_date": self.backtest_date.isoformat()
1145 |             if self.backtest_date
1146 |             else None,
1147 |             "start_date": self.start_date.isoformat() if self.start_date else None,
1148 |             "end_date": self.end_date.isoformat() if self.end_date else None,
1149 |             "initial_capital": float(self.initial_capital)
1150 |             if self.initial_capital
1151 |             else 0,
1152 |             "total_return": float(self.total_return) if self.total_return else 0,
1153 |             "sharpe_ratio": float(self.sharpe_ratio) if self.sharpe_ratio else 0,
1154 |             "max_drawdown": float(self.max_drawdown) if self.max_drawdown else 0,
1155 |             "win_rate": float(self.win_rate) if self.win_rate else 0,
1156 |             "total_trades": self.total_trades,
1157 |             "parameters": self.parameters,
1158 |             "status": self.status,
1159 |         }
1160 | 
1161 | 
1162 | class BacktestTrade(Base, TimestampMixin):
1163 |     """Individual trade records from backtests."""
1164 | 
1165 |     __tablename__ = "mcp_backtest_trades"
1166 |     __table_args__ = (
1167 |         Index("mcp_backtest_trades_backtest_idx", "backtest_id"),
1168 |         Index("mcp_backtest_trades_entry_date_idx", "entry_date"),
1169 |         Index("mcp_backtest_trades_exit_date_idx", "exit_date"),
1170 |         Index("mcp_backtest_trades_pnl_idx", "pnl"),
1171 |         Index("mcp_backtest_trades_backtest_entry_idx", "backtest_id", "entry_date"),
1172 |     )
1173 | 
1174 |     trade_id = Column(Uuid, primary_key=True, default=uuid.uuid4)
1175 |     backtest_id = Column(
1176 |         Uuid, ForeignKey("mcp_backtest_results.backtest_id"), nullable=False
1177 |     )
1178 | 
1179 |     # Trade identification
1180 |     trade_number = Column(
1181 |         Integer, nullable=False
1182 |     )  # Sequential trade number in backtest
1183 | 
1184 |     # Entry details
1185 |     entry_date = Column(Date, nullable=False)
1186 |     entry_price = Column(Numeric(12, 4), nullable=False)
1187 |     entry_time = Column(DateTime(timezone=True))  # For intraday backtests
1188 | 
1189 |     # Exit details
1190 |     exit_date = Column(Date)
1191 |     exit_price = Column(Numeric(12, 4))
1192 |     exit_time = Column(DateTime(timezone=True))
1193 | 
1194 |     # Position details
1195 |     position_size = Column(Numeric(15, 2))  # Number of shares/units
1196 |     direction = Column(String(5), nullable=False)  # 'long' or 'short'
1197 | 
1198 |     # P&L and performance
1199 |     pnl = Column(Numeric(12, 4))  # Profit/Loss in currency
1200 |     pnl_percent = Column(Numeric(8, 4))  # P&L as percentage
1201 | 
1202 |     # Risk metrics for this trade
1203 |     mae = Column(Numeric(8, 4))  # Maximum Adverse Excursion
1204 |     mfe = Column(Numeric(8, 4))  # Maximum Favorable Excursion
1205 | 
1206 |     # Trade duration
1207 |     duration_days = Column(Integer)
1208 |     duration_hours = Column(Numeric(8, 2))  # For intraday precision
1209 | 
1210 |     # Exit reason and fees
1211 |     exit_reason = Column(String(50))  # stop_loss, take_profit, signal, time_exit
1212 |     fees_paid = Column(Numeric(10, 4), default=0)
1213 |     slippage_cost = Column(Numeric(10, 4), default=0)
1214 | 
1215 |     # Relationships
1216 |     backtest_result = relationship(
1217 |         "BacktestResult", back_populates="trades", lazy="joined"
1218 |     )
1219 | 
1220 |     def __repr__(self):
1221 |         return (
1222 |             f"<BacktestTrade(id={self.trade_id}, backtest_id={self.backtest_id}, "
1223 |             f"pnl={self.pnl}, duration={self.duration_days}d)>"
1224 |         )
1225 | 
1226 |     @classmethod
1227 |     def get_trades_for_backtest(
1228 |         cls, session: Session, backtest_id: str
1229 |     ) -> Sequence[BacktestTrade]:
1230 |         """Get all trades for a specific backtest."""
1231 |         return (
1232 |             session.query(cls)
1233 |             .filter(cls.backtest_id == backtest_id)
1234 |             .order_by(cls.entry_date, cls.trade_number)
1235 |             .all()
1236 |         )
1237 | 
1238 |     @classmethod
1239 |     def get_winning_trades(
1240 |         cls, session: Session, backtest_id: str
1241 |     ) -> Sequence[BacktestTrade]:
1242 |         """Get winning trades for a backtest."""
1243 |         return (
1244 |             session.query(cls)
1245 |             .filter(cls.backtest_id == backtest_id, cls.pnl > 0)
1246 |             .order_by(cls.pnl.desc())
1247 |             .all()
1248 |         )
1249 | 
1250 |     @classmethod
1251 |     def get_losing_trades(
1252 |         cls, session: Session, backtest_id: str
1253 |     ) -> Sequence[BacktestTrade]:
1254 |         """Get losing trades for a backtest."""
1255 |         return (
1256 |             session.query(cls)
1257 |             .filter(cls.backtest_id == backtest_id, cls.pnl < 0)
1258 |             .order_by(cls.pnl)
1259 |             .all()
1260 |         )
1261 | 
1262 | 
1263 | class OptimizationResult(Base, TimestampMixin):
1264 |     """Parameter optimization results for strategies."""
1265 | 
1266 |     __tablename__ = "mcp_optimization_results"
1267 |     __table_args__ = (
1268 |         Index("mcp_optimization_results_backtest_idx", "backtest_id"),
1269 |         Index("mcp_optimization_results_param_set_idx", "parameter_set"),
1270 |         Index("mcp_optimization_results_objective_idx", "objective_value"),
1271 |     )
1272 | 
1273 |     optimization_id = Column(Uuid, primary_key=True, default=uuid.uuid4)
1274 |     backtest_id = Column(
1275 |         Uuid, ForeignKey("mcp_backtest_results.backtest_id"), nullable=False
1276 |     )
1277 | 
1278 |     # Optimization metadata
1279 |     optimization_date = Column(
1280 |         DateTime(timezone=True), default=lambda: datetime.now(UTC)
1281 |     )
1282 |     parameter_set = Column(Integer, nullable=False)  # Set number in optimization run
1283 | 
1284 |     # Parameters tested (JSON for flexibility)
1285 |     parameters = Column(JSON, nullable=False)
1286 | 
1287 |     # Optimization objective and results
1288 |     objective_function = Column(
1289 |         String(50)
1290 |     )  # sharpe_ratio, total_return, profit_factor, etc.
1291 |     objective_value = Column(Numeric(12, 6))  # Value of objective function
1292 | 
1293 |     # Key metrics for this parameter set
1294 |     total_return = Column(Numeric(10, 4))
1295 |     sharpe_ratio = Column(Numeric(8, 4))
1296 |     max_drawdown = Column(Numeric(8, 4))
1297 |     win_rate = Column(Numeric(5, 4))
1298 |     profit_factor = Column(Numeric(8, 4))
1299 |     total_trades = Column(Integer)
1300 | 
1301 |     # Ranking within optimization
1302 |     rank = Column(Integer)  # 1 = best, 2 = second best, etc.
1303 | 
1304 |     # Statistical significance
1305 |     is_statistically_significant = Column(Boolean, default=False)
1306 |     p_value = Column(Numeric(8, 6))  # Statistical significance test result
1307 | 
1308 |     # Relationships
1309 |     backtest_result = relationship(
1310 |         "BacktestResult", back_populates="optimization_results", lazy="joined"
1311 |     )
1312 | 
1313 |     def __repr__(self):
1314 |         return (
1315 |             f"<OptimizationResult(id={self.optimization_id}, "
1316 |             f"objective={self.objective_value}, rank={self.rank})>"
1317 |         )
1318 | 
1319 |     @classmethod
1320 |     def get_best_parameters(
1321 |         cls, session: Session, backtest_id: str, limit: int = 5
1322 |     ) -> Sequence[OptimizationResult]:
1323 |         """Get top performing parameter sets for a backtest."""
1324 |         return (
1325 |             session.query(cls)
1326 |             .filter(cls.backtest_id == backtest_id)
1327 |             .order_by(cls.rank)
1328 |             .limit(limit)
1329 |             .all()
1330 |         )
1331 | 
1332 | 
1333 | class WalkForwardTest(Base, TimestampMixin):
1334 |     """Walk-forward validation test results."""
1335 | 
1336 |     __tablename__ = "mcp_walk_forward_tests"
1337 |     __table_args__ = (
1338 |         Index("mcp_walk_forward_tests_parent_idx", "parent_backtest_id"),
1339 |         Index("mcp_walk_forward_tests_period_idx", "test_period_start"),
1340 |         Index("mcp_walk_forward_tests_performance_idx", "out_of_sample_return"),
1341 |     )
1342 | 
1343 |     walk_forward_id = Column(Uuid, primary_key=True, default=uuid.uuid4)
1344 |     parent_backtest_id = Column(
1345 |         Uuid, ForeignKey("mcp_backtest_results.backtest_id"), nullable=False
1346 |     )
1347 | 
1348 |     # Test configuration
1349 |     test_date = Column(DateTime(timezone=True), default=lambda: datetime.now(UTC))
1350 |     window_size_months = Column(Integer, nullable=False)  # Training window size
1351 |     step_size_months = Column(Integer, nullable=False)  # Step size for walking forward
1352 | 
1353 |     # Time periods
1354 |     training_start = Column(Date, nullable=False)
1355 |     training_end = Column(Date, nullable=False)
1356 |     test_period_start = Column(Date, nullable=False)
1357 |     test_period_end = Column(Date, nullable=False)
1358 | 
1359 |     # Optimization results from training period
1360 |     optimal_parameters = Column(JSON)  # Best parameters from training
1361 |     training_performance = Column(Numeric(10, 4))  # Training period return
1362 | 
1363 |     # Out-of-sample test results
1364 |     out_of_sample_return = Column(Numeric(10, 4))
1365 |     out_of_sample_sharpe = Column(Numeric(8, 4))
1366 |     out_of_sample_drawdown = Column(Numeric(8, 4))
1367 |     out_of_sample_trades = Column(Integer)
1368 | 
1369 |     # Performance vs training expectations
1370 |     performance_ratio = Column(Numeric(8, 4))  # Out-sample return / Training return
1371 |     degradation_factor = Column(Numeric(8, 4))  # How much performance degraded
1372 | 
1373 |     # Statistical validation
1374 |     is_profitable = Column(Boolean)
1375 |     is_statistically_significant = Column(Boolean, default=False)
1376 | 
1377 |     # Relationships
1378 |     parent_backtest = relationship(
1379 |         "BacktestResult", foreign_keys=[parent_backtest_id], lazy="joined"
1380 |     )
1381 | 
1382 |     def __repr__(self):
1383 |         return (
1384 |             f"<WalkForwardTest(id={self.walk_forward_id}, "
1385 |             f"return={self.out_of_sample_return}, ratio={self.performance_ratio})>"
1386 |         )
1387 | 
1388 |     @classmethod
1389 |     def get_walk_forward_results(
1390 |         cls, session: Session, parent_backtest_id: str
1391 |     ) -> Sequence[WalkForwardTest]:
1392 |         """Get all walk-forward test results for a backtest."""
1393 |         return (
1394 |             session.query(cls)
1395 |             .filter(cls.parent_backtest_id == parent_backtest_id)
1396 |             .order_by(cls.test_period_start)
1397 |             .all()
1398 |         )
1399 | 
1400 | 
1401 | class BacktestPortfolio(Base, TimestampMixin):
1402 |     """Portfolio-level backtests with multiple symbols."""
1403 | 
1404 |     __tablename__ = "mcp_backtest_portfolios"
1405 |     __table_args__ = (
1406 |         Index("mcp_backtest_portfolios_name_idx", "portfolio_name"),
1407 |         Index("mcp_backtest_portfolios_date_idx", "backtest_date"),
1408 |         Index("mcp_backtest_portfolios_return_idx", "total_return"),
1409 |     )
1410 | 
1411 |     portfolio_backtest_id = Column(Uuid, primary_key=True, default=uuid.uuid4)
1412 | 
1413 |     # Portfolio identification
1414 |     portfolio_name = Column(String(100), nullable=False)
1415 |     description = Column(Text)
1416 | 
1417 |     # Test metadata
1418 |     backtest_date = Column(DateTime(timezone=True), default=lambda: datetime.now(UTC))
1419 |     start_date = Column(Date, nullable=False)
1420 |     end_date = Column(Date, nullable=False)
1421 | 
1422 |     # Portfolio composition
1423 |     symbols = Column(JSON, nullable=False)  # List of symbols
1424 |     weights = Column(JSON)  # Portfolio weights (if not equal weight)
1425 |     rebalance_frequency = Column(String(20))  # daily, weekly, monthly, quarterly
1426 | 
1427 |     # Portfolio parameters
1428 |     initial_capital = Column(Numeric(15, 2), default=100000.0)
1429 |     max_positions = Column(Integer)  # Maximum concurrent positions
1430 |     position_sizing_method = Column(
1431 |         String(50)
1432 |     )  # equal_weight, volatility_weighted, etc.
1433 | 
1434 |     # Risk management
1435 |     portfolio_stop_loss = Column(Numeric(6, 4))  # Portfolio-level stop loss
1436 |     max_sector_allocation = Column(Numeric(5, 4))  # Maximum allocation per sector
1437 |     correlation_threshold = Column(
1438 |         Numeric(5, 4)
1439 |     )  # Maximum correlation between holdings
1440 | 
1441 |     # Performance metrics (portfolio level)
1442 |     total_return = Column(Numeric(10, 4))
1443 |     annualized_return = Column(Numeric(10, 4))
1444 |     sharpe_ratio = Column(Numeric(8, 4))
1445 |     sortino_ratio = Column(Numeric(8, 4))
1446 |     max_drawdown = Column(Numeric(8, 4))
1447 |     volatility = Column(Numeric(8, 4))
1448 | 
1449 |     # Portfolio-specific metrics
1450 |     diversification_ratio = Column(Numeric(8, 4))  # Portfolio vol / Weighted avg vol
1451 |     concentration_index = Column(Numeric(8, 4))  # Herfindahl index
1452 |     turnover_rate = Column(Numeric(8, 4))  # Portfolio turnover
1453 | 
1454 |     # Individual component backtests (JSON references)
1455 |     component_backtest_ids = Column(JSON)  # List of individual backtest IDs
1456 | 
1457 |     # Time series data
1458 |     portfolio_equity_curve = Column(JSON)
1459 |     portfolio_weights_history = Column(JSON)  # Historical weights over time
1460 | 
1461 |     # Status
1462 |     status = Column(String(20), default="completed")
1463 |     notes = Column(Text)
1464 | 
1465 |     def __repr__(self):
1466 |         return (
1467 |             f"<BacktestPortfolio(id={self.portfolio_backtest_id}, "
1468 |             f"name={self.portfolio_name}, return={self.total_return})>"
1469 |         )
1470 | 
1471 |     @classmethod
1472 |     def get_portfolio_backtests(
1473 |         cls, session: Session, portfolio_name: str | None = None, limit: int = 10
1474 |     ) -> Sequence[BacktestPortfolio]:
1475 |         """Get portfolio backtests, optionally filtered by name."""
1476 |         query = session.query(cls).order_by(cls.backtest_date.desc())
1477 |         if portfolio_name:
1478 |             query = query.filter(cls.portfolio_name == portfolio_name)
1479 |         return query.limit(limit).all()
1480 | 
1481 |     def to_dict(self) -> dict:
1482 |         """Convert to dictionary for JSON serialization."""
1483 |         return {
1484 |             "portfolio_backtest_id": str(self.portfolio_backtest_id),
1485 |             "portfolio_name": self.portfolio_name,
1486 |             "symbols": self.symbols,
1487 |             "start_date": self.start_date.isoformat() if self.start_date else None,
1488 |             "end_date": self.end_date.isoformat() if self.end_date else None,
1489 |             "total_return": float(self.total_return) if self.total_return else 0,
1490 |             "sharpe_ratio": float(self.sharpe_ratio) if self.sharpe_ratio else 0,
1491 |             "max_drawdown": float(self.max_drawdown) if self.max_drawdown else 0,
1492 |             "status": self.status,
1493 |         }
1494 | 
1495 | 
1496 | # Helper functions for working with the models
1497 | def bulk_insert_price_data(
1498 |     session: Session, ticker_symbol: str, df: pd.DataFrame
1499 | ) -> int:
1500 |     """
1501 |     Bulk insert price data from a DataFrame.
1502 | 
1503 |     Args:
1504 |         session: Database session
1505 |         ticker_symbol: Stock ticker symbol
1506 |         df: DataFrame with OHLCV data (must have date index)
1507 | 
1508 |     Returns:
1509 |         Number of records inserted (or would be inserted)
1510 |     """
1511 |     if df.empty:
1512 |         return 0
1513 | 
1514 |     # Get or create stock
1515 |     stock = Stock.get_or_create(session, ticker_symbol)
1516 | 
1517 |     # First, check how many records already exist
1518 |     existing_dates = set()
1519 |     if hasattr(df.index[0], "date"):
1520 |         dates_to_check = [d.date() for d in df.index]
1521 |     else:
1522 |         dates_to_check = list(df.index)
1523 | 
1524 |     existing_query = session.query(PriceCache.date).filter(
1525 |         PriceCache.stock_id == stock.stock_id, PriceCache.date.in_(dates_to_check)
1526 |     )
1527 |     existing_dates = {row[0] for row in existing_query.all()}
1528 | 
1529 |     # Prepare data for bulk insert
1530 |     records = []
1531 |     new_count = 0
1532 |     for date_idx, row in df.iterrows():
1533 |         # Handle different index types - datetime index vs date index
1534 |         if hasattr(date_idx, "date") and callable(date_idx.date):
1535 |             date_val = date_idx.date()  # type: ignore[attr-defined]
1536 |         elif hasattr(date_idx, "to_pydatetime") and callable(date_idx.to_pydatetime):
1537 |             date_val = date_idx.to_pydatetime().date()  # type: ignore[attr-defined]
1538 |         else:
1539 |             # Assume it's already a date-like object
1540 |             date_val = date_idx
1541 | 
1542 |         # Skip if already exists
1543 |         if date_val in existing_dates:
1544 |             continue
1545 | 
1546 |         new_count += 1
1547 | 
1548 |         # Handle both lowercase and capitalized column names from yfinance
1549 |         open_val = row.get("open", row.get("Open", 0))
1550 |         high_val = row.get("high", row.get("High", 0))
1551 |         low_val = row.get("low", row.get("Low", 0))
1552 |         close_val = row.get("close", row.get("Close", 0))
1553 |         volume_val = row.get("volume", row.get("Volume", 0))
1554 | 
1555 |         # Handle None values
1556 |         if volume_val is None:
1557 |             volume_val = 0
1558 | 
1559 |         records.append(
1560 |             {
1561 |                 "stock_id": stock.stock_id,
1562 |                 "date": date_val,
1563 |                 "open_price": Decimal(str(open_val)),
1564 |                 "high_price": Decimal(str(high_val)),
1565 |                 "low_price": Decimal(str(low_val)),
1566 |                 "close_price": Decimal(str(close_val)),
1567 |                 "volume": int(volume_val),
1568 |                 "created_at": datetime.now(UTC),
1569 |                 "updated_at": datetime.now(UTC),
1570 |             }
1571 |         )
1572 | 
1573 |     # Only insert if there are new records
1574 |     if records:
1575 |         # Use database-specific upsert logic
1576 |         if "postgresql" in DATABASE_URL:
1577 |             from sqlalchemy.dialects.postgresql import insert
1578 | 
1579 |             stmt = insert(PriceCache).values(records)
1580 |             stmt = stmt.on_conflict_do_nothing(index_elements=["stock_id", "date"])
1581 |         else:
1582 |             # For SQLite, use INSERT OR IGNORE
1583 |             from sqlalchemy import insert
1584 | 
1585 |             stmt = insert(PriceCache).values(records)
1586 |             # SQLite doesn't support on_conflict_do_nothing, use INSERT OR IGNORE
1587 |             stmt = stmt.prefix_with("OR IGNORE")
1588 | 
1589 |         result = session.execute(stmt)
1590 |         session.commit()
1591 | 
1592 |         # Log if rowcount differs from expected
1593 |         if result.rowcount != new_count:
1594 |             logger.warning(
1595 |                 f"Expected to insert {new_count} records but rowcount was {result.rowcount}"
1596 |             )
1597 | 
1598 |         return result.rowcount
1599 |     else:
1600 |         logger.debug(
1601 |             f"All {len(df)} records already exist in cache for {ticker_symbol}"
1602 |         )
1603 |         return 0
1604 | 
1605 | 
1606 | def get_latest_maverick_screening(days_back: int = 1) -> dict:
1607 |     """Get latest screening results from all maverick tables."""
1608 |     with SessionLocal() as session:
1609 |         results = {
1610 |             "maverick_stocks": [
1611 |                 stock.to_dict()
1612 |                 for stock in MaverickStocks.get_latest_analysis(
1613 |                     session, days_back=days_back
1614 |                 )
1615 |             ],
1616 |             "maverick_bear_stocks": [
1617 |                 stock.to_dict()
1618 |                 for stock in MaverickBearStocks.get_latest_analysis(
1619 |                     session, days_back=days_back
1620 |                 )
1621 |             ],
1622 |             "supply_demand_breakouts": [
1623 |                 stock.to_dict()
1624 |                 for stock in SupplyDemandBreakoutStocks.get_latest_analysis(
1625 |                     session, days_back=days_back
1626 |                 )
1627 |             ],
1628 |         }
1629 | 
1630 |     return results
1631 | 
1632 | 
1633 | def bulk_insert_screening_data(
1634 |     session: Session,
1635 |     model_class,
1636 |     screening_data: list[dict],
1637 |     date_analyzed: date | None = None,
1638 | ) -> int:
1639 |     """
1640 |     Bulk insert screening data for any screening model.
1641 | 
1642 |     Args:
1643 |         session: Database session
1644 |         model_class: The screening model class (MaverickStocks, etc.)
1645 |         screening_data: List of screening result dictionaries
1646 |         date_analyzed: Date of analysis (default: today)
1647 | 
1648 |     Returns:
1649 |         Number of records inserted
1650 |     """
1651 |     if not screening_data:
1652 |         return 0
1653 | 
1654 |     if date_analyzed is None:
1655 |         date_analyzed = datetime.now(UTC).date()
1656 | 
1657 |     # Remove existing data for this date
1658 |     session.query(model_class).filter(
1659 |         model_class.date_analyzed == date_analyzed
1660 |     ).delete()
1661 | 
1662 |     inserted_count = 0
1663 |     for data in screening_data:
1664 |         # Get or create stock
1665 |         ticker = data.get("ticker") or data.get("symbol")
1666 |         if not ticker:
1667 |             continue
1668 | 
1669 |         stock = Stock.get_or_create(session, ticker)
1670 | 
1671 |         # Create screening record
1672 |         record_data = {
1673 |             "stock_id": stock.stock_id,
1674 |             "date_analyzed": date_analyzed,
1675 |         }
1676 | 
1677 |         # Map common fields
1678 |         field_mapping = {
1679 |             "open": "open_price",
1680 |             "high": "high_price",
1681 |             "low": "low_price",
1682 |             "close": "close_price",
1683 |             "pat": "pattern_type",
1684 |             "sqz": "squeeze_status",
1685 |             "vcp": "consolidation_status",
1686 |             "entry": "entry_signal",
1687 |         }
1688 | 
1689 |         for key, value in data.items():
1690 |             if key in ["ticker", "symbol"]:
1691 |                 continue
1692 |             mapped_key = field_mapping.get(key, key)
1693 |             if hasattr(model_class, mapped_key):
1694 |                 record_data[mapped_key] = value
1695 | 
1696 |         record = model_class(**record_data)
1697 |         session.add(record)
1698 |         inserted_count += 1
1699 | 
1700 |     session.commit()
1701 |     return inserted_count
1702 | 
1703 | 
1704 | # ============================================================================
1705 | # Portfolio Management Models
1706 | # ============================================================================
1707 | 
1708 | 
1709 | class UserPortfolio(TimestampMixin, Base):
1710 |     """
1711 |     User portfolio for tracking investment holdings.
1712 | 
1713 |     Follows personal-use design with single user_id="default" for the personal
1714 |     MaverickMCP server. Stores portfolio metadata and relationships to positions.
1715 | 
1716 |     Attributes:
1717 |         id: Unique portfolio identifier (UUID)
1718 |         user_id: User identifier (default: "default" for single-user)
1719 |         name: Portfolio display name
1720 |         positions: Relationship to PortfolioPosition records
1721 |     """
1722 | 
1723 |     __tablename__ = "mcp_portfolios"
1724 | 
1725 |     id = Column(Uuid, primary_key=True, default=uuid.uuid4)
1726 |     user_id = Column(String(100), nullable=False, default="default", index=True)
1727 |     name = Column(String(200), nullable=False, default="My Portfolio")
1728 | 
1729 |     # Relationships
1730 |     positions = relationship(
1731 |         "PortfolioPosition",
1732 |         back_populates="portfolio",
1733 |         cascade="all, delete-orphan",
1734 |         lazy="selectin",  # Efficient loading
1735 |     )
1736 | 
1737 |     # Indexes for queries
1738 |     __table_args__ = (
1739 |         Index("idx_portfolio_user", "user_id"),
1740 |         UniqueConstraint("user_id", "name", name="uq_user_portfolio_name"),
1741 |     )
1742 | 
1743 |     def __repr__(self):
1744 |         return f"<UserPortfolio(id={self.id}, name='{self.name}', positions={len(self.positions)})>"
1745 | 
1746 | 
1747 | class PortfolioPosition(TimestampMixin, Base):
1748 |     """
1749 |     Individual position within a portfolio with cost basis tracking.
1750 | 
1751 |     Stores position details with high-precision Decimal types for financial accuracy.
1752 |     Uses average cost basis method for educational simplicity.
1753 | 
1754 |     Attributes:
1755 |         id: Unique position identifier (UUID)
1756 |         portfolio_id: Foreign key to parent portfolio
1757 |         ticker: Stock ticker symbol (e.g., "AAPL")
1758 |         shares: Number of shares owned (supports fractional shares)
1759 |         average_cost_basis: Average cost per share
1760 |         total_cost: Total capital invested (shares × average_cost_basis)
1761 |         purchase_date: Earliest purchase date for this position
1762 |         notes: Optional user notes about the position
1763 |     """
1764 | 
1765 |     __tablename__ = "mcp_portfolio_positions"
1766 | 
1767 |     id = Column(Uuid, primary_key=True, default=uuid.uuid4)
1768 |     portfolio_id = Column(
1769 |         Uuid, ForeignKey("mcp_portfolios.id", ondelete="CASCADE"), nullable=False
1770 |     )
1771 | 
1772 |     # Position details with financial precision
1773 |     ticker = Column(String(20), nullable=False, index=True)
1774 |     shares = Column(
1775 |         Numeric(20, 8), nullable=False
1776 |     )  # High precision for fractional shares
1777 |     average_cost_basis = Column(
1778 |         Numeric(12, 4), nullable=False
1779 |     )  # 4 decimal places (cents)
1780 |     total_cost = Column(Numeric(20, 4), nullable=False)  # Total capital invested
1781 |     purchase_date = Column(DateTime(timezone=True), nullable=False)  # Earliest purchase
1782 |     notes = Column(Text, nullable=True)  # Optional user notes
1783 | 
1784 |     # Relationships
1785 |     portfolio = relationship("UserPortfolio", back_populates="positions")
1786 | 
1787 |     # Indexes for efficient queries
1788 |     __table_args__ = (
1789 |         Index("idx_position_portfolio", "portfolio_id"),
1790 |         Index("idx_position_ticker", "ticker"),
1791 |         Index("idx_position_portfolio_ticker", "portfolio_id", "ticker"),
1792 |         UniqueConstraint("portfolio_id", "ticker", name="uq_portfolio_position_ticker"),
1793 |     )
1794 | 
1795 |     def __repr__(self):
1796 |         return f"<PortfolioPosition(ticker='{self.ticker}', shares={self.shares}, cost_basis={self.average_cost_basis})>"
1797 | 
1798 | 
1799 | # Auth models removed for personal use - no multi-user functionality needed
1800 | 
1801 | # Initialize tables when module is imported
1802 | if __name__ == "__main__":
1803 |     logger.info("Creating database tables...")
1804 |     init_db()
1805 |     logger.info("Database tables created successfully!")
1806 | 
```
Page 38/39FirstPrevNextLast