#
tokens: 47778/50000 14/435 files (page 9/29)
lines: off (toggle) GitHub
raw markdown copy
This is page 9 of 29. Use http://codebase.md/wshobson/maverick-mcp?lines=false&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.example
├── .github
│   ├── dependabot.yml
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── feature_request.md
│   │   ├── question.md
│   │   └── security_report.md
│   ├── pull_request_template.md
│   └── workflows
│       ├── claude-code-review.yml
│       └── claude.yml
├── .gitignore
├── .python-version
├── .vscode
│   ├── launch.json
│   └── settings.json
├── alembic
│   ├── env.py
│   ├── script.py.mako
│   └── versions
│       ├── 001_initial_schema.py
│       ├── 003_add_performance_indexes.py
│       ├── 006_rename_metadata_columns.py
│       ├── 008_performance_optimization_indexes.py
│       ├── 009_rename_to_supply_demand.py
│       ├── 010_self_contained_schema.py
│       ├── 011_remove_proprietary_terms.py
│       ├── 013_add_backtest_persistence_models.py
│       ├── 014_add_portfolio_models.py
│       ├── 08e3945a0c93_merge_heads.py
│       ├── 9374a5c9b679_merge_heads_for_testing.py
│       ├── abf9b9afb134_merge_multiple_heads.py
│       ├── adda6d3fd84b_merge_proprietary_terms_removal_with_.py
│       ├── e0c75b0bdadb_fix_financial_data_precision_only.py
│       ├── f0696e2cac15_add_essential_performance_indexes.py
│       └── fix_database_integrity_issues.py
├── alembic.ini
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── DATABASE_SETUP.md
├── docker-compose.override.yml.example
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── api
│   │   └── backtesting.md
│   ├── BACKTESTING.md
│   ├── COST_BASIS_SPECIFICATION.md
│   ├── deep_research_agent.md
│   ├── exa_research_testing_strategy.md
│   ├── PORTFOLIO_PERSONALIZATION_PLAN.md
│   ├── PORTFOLIO.md
│   ├── SETUP_SELF_CONTAINED.md
│   └── speed_testing_framework.md
├── examples
│   ├── complete_speed_validation.py
│   ├── deep_research_integration.py
│   ├── llm_optimization_example.py
│   ├── llm_speed_demo.py
│   ├── monitoring_example.py
│   ├── parallel_research_example.py
│   ├── speed_optimization_demo.py
│   └── timeout_fix_demonstration.py
├── LICENSE
├── Makefile
├── MANIFEST.in
├── maverick_mcp
│   ├── __init__.py
│   ├── agents
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── circuit_breaker.py
│   │   ├── deep_research.py
│   │   ├── market_analysis.py
│   │   ├── optimized_research.py
│   │   ├── supervisor.py
│   │   └── technical_analysis.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── api_server.py
│   │   ├── connection_manager.py
│   │   ├── dependencies
│   │   │   ├── __init__.py
│   │   │   ├── stock_analysis.py
│   │   │   └── technical_analysis.py
│   │   ├── error_handling.py
│   │   ├── inspector_compatible_sse.py
│   │   ├── inspector_sse.py
│   │   ├── middleware
│   │   │   ├── error_handling.py
│   │   │   ├── mcp_logging.py
│   │   │   ├── rate_limiting_enhanced.py
│   │   │   └── security.py
│   │   ├── openapi_config.py
│   │   ├── routers
│   │   │   ├── __init__.py
│   │   │   ├── agents.py
│   │   │   ├── backtesting.py
│   │   │   ├── data_enhanced.py
│   │   │   ├── data.py
│   │   │   ├── health_enhanced.py
│   │   │   ├── health_tools.py
│   │   │   ├── health.py
│   │   │   ├── intelligent_backtesting.py
│   │   │   ├── introspection.py
│   │   │   ├── mcp_prompts.py
│   │   │   ├── monitoring.py
│   │   │   ├── news_sentiment_enhanced.py
│   │   │   ├── performance.py
│   │   │   ├── portfolio.py
│   │   │   ├── research.py
│   │   │   ├── screening_ddd.py
│   │   │   ├── screening_parallel.py
│   │   │   ├── screening.py
│   │   │   ├── technical_ddd.py
│   │   │   ├── technical_enhanced.py
│   │   │   ├── technical.py
│   │   │   └── tool_registry.py
│   │   ├── server.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   ├── base_service.py
│   │   │   ├── market_service.py
│   │   │   ├── portfolio_service.py
│   │   │   ├── prompt_service.py
│   │   │   └── resource_service.py
│   │   ├── simple_sse.py
│   │   └── utils
│   │       ├── __init__.py
│   │       ├── insomnia_export.py
│   │       └── postman_export.py
│   ├── application
│   │   ├── __init__.py
│   │   ├── commands
│   │   │   └── __init__.py
│   │   ├── dto
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_dto.py
│   │   ├── queries
│   │   │   ├── __init__.py
│   │   │   └── get_technical_analysis.py
│   │   └── screening
│   │       ├── __init__.py
│   │       ├── dtos.py
│   │       └── queries.py
│   ├── backtesting
│   │   ├── __init__.py
│   │   ├── ab_testing.py
│   │   ├── analysis.py
│   │   ├── batch_processing_stub.py
│   │   ├── batch_processing.py
│   │   ├── model_manager.py
│   │   ├── optimization.py
│   │   ├── persistence.py
│   │   ├── retraining_pipeline.py
│   │   ├── strategies
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   ├── ml
│   │   │   │   ├── __init__.py
│   │   │   │   ├── adaptive.py
│   │   │   │   ├── ensemble.py
│   │   │   │   ├── feature_engineering.py
│   │   │   │   └── regime_aware.py
│   │   │   ├── ml_strategies.py
│   │   │   ├── parser.py
│   │   │   └── templates.py
│   │   ├── strategy_executor.py
│   │   ├── vectorbt_engine.py
│   │   └── visualization.py
│   ├── config
│   │   ├── __init__.py
│   │   ├── constants.py
│   │   ├── database_self_contained.py
│   │   ├── database.py
│   │   ├── llm_optimization_config.py
│   │   ├── logging_settings.py
│   │   ├── plotly_config.py
│   │   ├── security_utils.py
│   │   ├── security.py
│   │   ├── settings.py
│   │   ├── technical_constants.py
│   │   ├── tool_estimation.py
│   │   └── validation.py
│   ├── core
│   │   ├── __init__.py
│   │   ├── technical_analysis.py
│   │   └── visualization.py
│   ├── data
│   │   ├── __init__.py
│   │   ├── cache_manager.py
│   │   ├── cache.py
│   │   ├── django_adapter.py
│   │   ├── health.py
│   │   ├── models.py
│   │   ├── performance.py
│   │   ├── session_management.py
│   │   └── validation.py
│   ├── database
│   │   ├── __init__.py
│   │   ├── base.py
│   │   └── optimization.py
│   ├── dependencies.py
│   ├── domain
│   │   ├── __init__.py
│   │   ├── entities
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis.py
│   │   ├── events
│   │   │   └── __init__.py
│   │   ├── portfolio.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   ├── entities.py
│   │   │   ├── services.py
│   │   │   └── value_objects.py
│   │   ├── services
│   │   │   ├── __init__.py
│   │   │   └── technical_analysis_service.py
│   │   ├── stock_analysis
│   │   │   ├── __init__.py
│   │   │   └── stock_analysis_service.py
│   │   └── value_objects
│   │       ├── __init__.py
│   │       └── technical_indicators.py
│   ├── exceptions.py
│   ├── infrastructure
│   │   ├── __init__.py
│   │   ├── cache
│   │   │   └── __init__.py
│   │   ├── caching
│   │   │   ├── __init__.py
│   │   │   └── cache_management_service.py
│   │   ├── connection_manager.py
│   │   ├── data_fetching
│   │   │   ├── __init__.py
│   │   │   └── stock_data_service.py
│   │   ├── health
│   │   │   ├── __init__.py
│   │   │   └── health_checker.py
│   │   ├── persistence
│   │   │   ├── __init__.py
│   │   │   └── stock_repository.py
│   │   ├── providers
│   │   │   └── __init__.py
│   │   ├── screening
│   │   │   ├── __init__.py
│   │   │   └── repositories.py
│   │   └── sse_optimizer.py
│   ├── langchain_tools
│   │   ├── __init__.py
│   │   ├── adapters.py
│   │   └── registry.py
│   ├── logging_config.py
│   ├── memory
│   │   ├── __init__.py
│   │   └── stores.py
│   ├── monitoring
│   │   ├── __init__.py
│   │   ├── health_check.py
│   │   ├── health_monitor.py
│   │   ├── integration_example.py
│   │   ├── metrics.py
│   │   ├── middleware.py
│   │   └── status_dashboard.py
│   ├── providers
│   │   ├── __init__.py
│   │   ├── dependencies.py
│   │   ├── factories
│   │   │   ├── __init__.py
│   │   │   ├── config_factory.py
│   │   │   └── provider_factory.py
│   │   ├── implementations
│   │   │   ├── __init__.py
│   │   │   ├── cache_adapter.py
│   │   │   ├── macro_data_adapter.py
│   │   │   ├── market_data_adapter.py
│   │   │   ├── persistence_adapter.py
│   │   │   └── stock_data_adapter.py
│   │   ├── interfaces
│   │   │   ├── __init__.py
│   │   │   ├── cache.py
│   │   │   ├── config.py
│   │   │   ├── macro_data.py
│   │   │   ├── market_data.py
│   │   │   ├── persistence.py
│   │   │   └── stock_data.py
│   │   ├── llm_factory.py
│   │   ├── macro_data.py
│   │   ├── market_data.py
│   │   ├── mocks
│   │   │   ├── __init__.py
│   │   │   ├── mock_cache.py
│   │   │   ├── mock_config.py
│   │   │   ├── mock_macro_data.py
│   │   │   ├── mock_market_data.py
│   │   │   ├── mock_persistence.py
│   │   │   └── mock_stock_data.py
│   │   ├── openrouter_provider.py
│   │   ├── optimized_screening.py
│   │   ├── optimized_stock_data.py
│   │   └── stock_data.py
│   ├── README.md
│   ├── tests
│   │   ├── __init__.py
│   │   ├── README_INMEMORY_TESTS.md
│   │   ├── test_cache_debug.py
│   │   ├── test_fixes_validation.py
│   │   ├── test_in_memory_routers.py
│   │   ├── test_in_memory_server.py
│   │   ├── test_macro_data_provider.py
│   │   ├── test_mailgun_email.py
│   │   ├── test_market_calendar_caching.py
│   │   ├── test_mcp_tool_fixes_pytest.py
│   │   ├── test_mcp_tool_fixes.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_models_functional.py
│   │   ├── test_server.py
│   │   ├── test_stock_data_enhanced.py
│   │   ├── test_stock_data_provider.py
│   │   └── test_technical_analysis.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── performance_monitoring.py
│   │   ├── portfolio_manager.py
│   │   ├── risk_management.py
│   │   └── sentiment_analysis.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── agent_errors.py
│   │   ├── batch_processing.py
│   │   ├── cache_warmer.py
│   │   ├── circuit_breaker_decorators.py
│   │   ├── circuit_breaker_services.py
│   │   ├── circuit_breaker.py
│   │   ├── data_chunking.py
│   │   ├── database_monitoring.py
│   │   ├── debug_utils.py
│   │   ├── fallback_strategies.py
│   │   ├── llm_optimization.py
│   │   ├── logging_example.py
│   │   ├── logging_init.py
│   │   ├── logging.py
│   │   ├── mcp_logging.py
│   │   ├── memory_profiler.py
│   │   ├── monitoring_middleware.py
│   │   ├── monitoring.py
│   │   ├── orchestration_logging.py
│   │   ├── parallel_research.py
│   │   ├── parallel_screening.py
│   │   ├── quick_cache.py
│   │   ├── resource_manager.py
│   │   ├── shutdown.py
│   │   ├── stock_helpers.py
│   │   ├── structured_logger.py
│   │   ├── tool_monitoring.py
│   │   ├── tracing.py
│   │   └── yfinance_pool.py
│   ├── validation
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── data.py
│   │   ├── middleware.py
│   │   ├── portfolio.py
│   │   ├── responses.py
│   │   ├── screening.py
│   │   └── technical.py
│   └── workflows
│       ├── __init__.py
│       ├── agents
│       │   ├── __init__.py
│       │   ├── market_analyzer.py
│       │   ├── optimizer_agent.py
│       │   ├── strategy_selector.py
│       │   └── validator_agent.py
│       ├── backtesting_workflow.py
│       └── state.py
├── PLANS.md
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│   ├── dev.sh
│   ├── INSTALLATION_GUIDE.md
│   ├── load_example.py
│   ├── load_market_data.py
│   ├── load_tiingo_data.py
│   ├── migrate_db.py
│   ├── README_TIINGO_LOADER.md
│   ├── requirements_tiingo.txt
│   ├── run_stock_screening.py
│   ├── run-migrations.sh
│   ├── seed_db.py
│   ├── seed_sp500.py
│   ├── setup_database.sh
│   ├── setup_self_contained.py
│   ├── setup_sp500_database.sh
│   ├── test_seeded_data.py
│   ├── test_tiingo_loader.py
│   ├── tiingo_config.py
│   └── validate_setup.py
├── SECURITY.md
├── server.json
├── setup.py
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── core
│   │   └── test_technical_analysis.py
│   ├── data
│   │   └── test_portfolio_models.py
│   ├── domain
│   │   ├── conftest.py
│   │   ├── test_portfolio_entities.py
│   │   └── test_technical_analysis_service.py
│   ├── fixtures
│   │   └── orchestration_fixtures.py
│   ├── integration
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── README.md
│   │   ├── run_integration_tests.sh
│   │   ├── test_api_technical.py
│   │   ├── test_chaos_engineering.py
│   │   ├── test_config_management.py
│   │   ├── test_full_backtest_workflow_advanced.py
│   │   ├── test_full_backtest_workflow.py
│   │   ├── test_high_volume.py
│   │   ├── test_mcp_tools.py
│   │   ├── test_orchestration_complete.py
│   │   ├── test_portfolio_persistence.py
│   │   ├── test_redis_cache.py
│   │   ├── test_security_integration.py.disabled
│   │   └── vcr_setup.py
│   ├── performance
│   │   ├── __init__.py
│   │   ├── test_benchmarks.py
│   │   ├── test_load.py
│   │   ├── test_profiling.py
│   │   └── test_stress.py
│   ├── providers
│   │   └── test_stock_data_simple.py
│   ├── README.md
│   ├── test_agents_router_mcp.py
│   ├── test_backtest_persistence.py
│   ├── test_cache_management_service.py
│   ├── test_cache_serialization.py
│   ├── test_circuit_breaker.py
│   ├── test_database_pool_config_simple.py
│   ├── test_database_pool_config.py
│   ├── test_deep_research_functional.py
│   ├── test_deep_research_integration.py
│   ├── test_deep_research_parallel_execution.py
│   ├── test_error_handling.py
│   ├── test_event_loop_integrity.py
│   ├── test_exa_research_integration.py
│   ├── test_exception_hierarchy.py
│   ├── test_financial_search.py
│   ├── test_graceful_shutdown.py
│   ├── test_integration_simple.py
│   ├── test_langgraph_workflow.py
│   ├── test_market_data_async.py
│   ├── test_market_data_simple.py
│   ├── test_mcp_orchestration_functional.py
│   ├── test_ml_strategies.py
│   ├── test_optimized_research_agent.py
│   ├── test_orchestration_integration.py
│   ├── test_orchestration_logging.py
│   ├── test_orchestration_tools_simple.py
│   ├── test_parallel_research_integration.py
│   ├── test_parallel_research_orchestrator.py
│   ├── test_parallel_research_performance.py
│   ├── test_performance_optimizations.py
│   ├── test_production_validation.py
│   ├── test_provider_architecture.py
│   ├── test_rate_limiting_enhanced.py
│   ├── test_runner_validation.py
│   ├── test_security_comprehensive.py.disabled
│   ├── test_security_cors.py
│   ├── test_security_enhancements.py.disabled
│   ├── test_security_headers.py
│   ├── test_security_penetration.py
│   ├── test_session_management.py
│   ├── test_speed_optimization_validation.py
│   ├── test_stock_analysis_dependencies.py
│   ├── test_stock_analysis_service.py
│   ├── test_stock_data_fetching_service.py
│   ├── test_supervisor_agent.py
│   ├── test_supervisor_functional.py
│   ├── test_tool_estimation_config.py
│   ├── test_visualization.py
│   └── utils
│       ├── test_agent_errors.py
│       ├── test_logging.py
│       ├── test_parallel_screening.py
│       └── test_quick_cache.py
├── tools
│   ├── check_orchestration_config.py
│   ├── experiments
│   │   ├── validation_examples.py
│   │   └── validation_fixed.py
│   ├── fast_dev.sh
│   ├── hot_reload.py
│   ├── quick_test.py
│   └── templates
│       ├── new_router_template.py
│       ├── new_tool_template.py
│       ├── screening_strategy_template.py
│       └── test_template.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/maverick_mcp/providers/optimized_screening.py:
--------------------------------------------------------------------------------

```python
"""
Optimized screening operations with eager loading and batch processing.

This module demonstrates proper eager loading patterns and optimizations
for database queries to prevent N+1 query issues.
"""

from datetime import datetime, timedelta
from typing import Any

from sqlalchemy import and_
from sqlalchemy.orm import Session, selectinload

from maverick_mcp.data.models import (
    MaverickBearStocks,
    MaverickStocks,
    PriceCache,
    Stock,
    SupplyDemandBreakoutStocks,
)
from maverick_mcp.data.session_management import get_db_session
from maverick_mcp.utils.logging import get_logger

logger = get_logger(__name__)


class OptimizedScreeningProvider:
    """
    Optimized screening provider that demonstrates proper eager loading
    and batch operations to prevent N+1 queries.
    """

    def __init__(self, session: Session | None = None):
        """Initialize with optional database session."""
        self._session = session

    def _get_session(self) -> tuple[Session, bool]:
        """Get database session and whether it should be closed."""
        if self._session:
            return self._session, False
        else:
            return next(get_db_session()), True

    def get_enhanced_maverick_recommendations(
        self,
        limit: int = 20,
        min_score: int | None = None,
        include_stock_details: bool = True,
    ) -> list[dict[str, Any]]:
        """
        Get Maverick recommendations with optional stock details using eager loading.

        This demonstrates proper eager loading to prevent N+1 queries when
        accessing related Stock model data.

        Args:
            limit: Maximum number of recommendations
            min_score: Minimum combined score filter
            include_stock_details: Whether to include full stock details (requires joins)

        Returns:
            List of stock recommendations with enhanced details
        """
        session, should_close = self._get_session()
        try:
            if include_stock_details:
                # Example of proper eager loading if there were relationships
                # This would prevent N+1 queries when accessing stock details
                query = (
                    session.query(MaverickStocks)
                    # If MaverickStocks had a foreign key to Stock, we would use:
                    # .options(joinedload(MaverickStocks.stock_details))
                    # Since it doesn't, we'll show how to join manually
                    .join(Stock, Stock.ticker_symbol == MaverickStocks.stock)
                    .options(
                        # Eager load any related data to prevent N+1 queries
                        selectinload(
                            Stock.price_caches.and_(
                                PriceCache.date >= datetime.now() - timedelta(days=30)
                            )
                        )
                    )
                )
            else:
                # Simple query without joins for basic screening
                query = session.query(MaverickStocks)

            # Apply filters
            if min_score:
                query = query.filter(MaverickStocks.combined_score >= min_score)

            # Execute query with limit
            if include_stock_details:
                results = (
                    query.order_by(MaverickStocks.combined_score.desc())
                    .limit(limit)
                    .all()
                )
                stocks = [(maverick_stock, stock) for maverick_stock, stock in results]
            else:
                stocks = (
                    query.order_by(MaverickStocks.combined_score.desc())
                    .limit(limit)
                    .all()
                )

            # Process results efficiently
            recommendations = []
            for item in stocks:
                if include_stock_details:
                    maverick_stock, stock_details = item
                    rec = {
                        **maverick_stock.to_dict(),
                        "recommendation_type": "maverick_bullish",
                        "reason": self._generate_reason(maverick_stock),
                        # Enhanced details from Stock model
                        "company_name": stock_details.company_name,
                        "sector": stock_details.sector,
                        "industry": stock_details.industry,
                        "exchange": stock_details.exchange,
                        # Recent price data (already eager loaded)
                        "recent_prices": [
                            {
                                "date": pc.date.isoformat(),
                                "close": pc.close_price,
                                "volume": pc.volume,
                            }
                            for pc in stock_details.price_caches[-5:]  # Last 5 days
                        ]
                        if stock_details.price_caches
                        else [],
                    }
                else:
                    rec = {
                        **item.to_dict(),
                        "recommendation_type": "maverick_bullish",
                        "reason": self._generate_reason(item),
                    }
                recommendations.append(rec)

            return recommendations

        except Exception as e:
            logger.error(f"Error getting enhanced maverick recommendations: {e}")
            return []
        finally:
            if should_close:
                session.close()

    def get_batch_stock_details(self, symbols: list[str]) -> dict[str, dict[str, Any]]:
        """
        Get stock details for multiple symbols efficiently with batch query.

        This demonstrates how to avoid N+1 queries when fetching details
        for multiple stocks by using a single batch query.

        Args:
            symbols: List of stock symbols

        Returns:
            Dictionary mapping symbols to their details
        """
        session, should_close = self._get_session()
        try:
            # Single query to get all stock details with eager loading
            stocks = (
                session.query(Stock)
                .options(
                    # Eager load price caches to prevent N+1 queries
                    selectinload(
                        Stock.price_caches.and_(
                            PriceCache.date >= datetime.now() - timedelta(days=30)
                        )
                    )
                )
                .filter(Stock.ticker_symbol.in_(symbols))
                .all()
            )

            # Build result dictionary
            result = {}
            for stock in stocks:
                result[stock.ticker_symbol] = {
                    "company_name": stock.company_name,
                    "sector": stock.sector,
                    "industry": stock.industry,
                    "exchange": stock.exchange,
                    "country": stock.country,
                    "currency": stock.currency,
                    "recent_prices": [
                        {
                            "date": pc.date.isoformat(),
                            "close": pc.close_price,
                            "volume": pc.volume,
                            "high": pc.high_price,
                            "low": pc.low_price,
                        }
                        for pc in sorted(stock.price_caches, key=lambda x: x.date)[-10:]
                    ]
                    if stock.price_caches
                    else [],
                }

            return result

        except Exception as e:
            logger.error(f"Error getting batch stock details: {e}")
            return {}
        finally:
            if should_close:
                session.close()

    def get_comprehensive_screening_results(
        self, include_details: bool = False
    ) -> dict[str, list[dict[str, Any]]]:
        """
        Get all screening results efficiently with optional eager loading.

        This demonstrates how to minimize database queries when fetching
        multiple types of screening results.

        Args:
            include_details: Whether to include enhanced stock details

        Returns:
            Dictionary with all screening types and their results
        """
        session, should_close = self._get_session()
        try:
            results = {}

            if include_details:
                # Get all unique stock symbols first
                maverick_symbols = (
                    session.query(MaverickStocks.stock).distinct().subquery()
                )
                bear_symbols = (
                    session.query(MaverickBearStocks.stock).distinct().subquery()
                )
                supply_demand_symbols = (
                    session.query(SupplyDemandBreakoutStocks.stock)
                    .distinct()
                    .subquery()
                )

                # Single query to get all stock details for all screening types
                all_symbols = (
                    session.query(maverick_symbols.c.stock)
                    .union(session.query(bear_symbols.c.stock))
                    .union(session.query(supply_demand_symbols.c.stock))
                    .all()
                )

                symbol_list = [s[0] for s in all_symbols]
                stock_details = self.get_batch_stock_details(symbol_list)

            # Get screening results
            maverick_stocks = (
                session.query(MaverickStocks)
                .order_by(MaverickStocks.combined_score.desc())
                .limit(20)
                .all()
            )

            bear_stocks = (
                session.query(MaverickBearStocks)
                .order_by(MaverickBearStocks.score.desc())
                .limit(20)
                .all()
            )

            supply_demand_stocks = (
                session.query(SupplyDemandBreakoutStocks)
                .filter(
                    and_(
                        SupplyDemandBreakoutStocks.close_price
                        > SupplyDemandBreakoutStocks.sma_50,
                        SupplyDemandBreakoutStocks.close_price
                        > SupplyDemandBreakoutStocks.sma_150,
                        SupplyDemandBreakoutStocks.close_price
                        > SupplyDemandBreakoutStocks.sma_200,
                    )
                )
                .order_by(SupplyDemandBreakoutStocks.momentum_score.desc())
                .limit(20)
                .all()
            )

            # Process results with optional details
            results["maverick_bullish"] = [
                {
                    **stock.to_dict(),
                    "recommendation_type": "maverick_bullish",
                    "reason": self._generate_reason(stock),
                    **(stock_details.get(stock.stock, {}) if include_details else {}),
                }
                for stock in maverick_stocks
            ]

            results["maverick_bearish"] = [
                {
                    **stock.to_dict(),
                    "recommendation_type": "maverick_bearish",
                    "reason": self._generate_bear_reason(stock),
                    **(stock_details.get(stock.stock, {}) if include_details else {}),
                }
                for stock in bear_stocks
            ]

            results["supply_demand_breakouts"] = [
                {
                    **stock.to_dict(),
                    "recommendation_type": "supply_demand_breakout",
                    "reason": self._generate_supply_demand_reason(stock),
                    **(stock_details.get(stock.stock, {}) if include_details else {}),
                }
                for stock in supply_demand_stocks
            ]

            return results

        except Exception as e:
            logger.error(f"Error getting comprehensive screening results: {e}")
            return {}
        finally:
            if should_close:
                session.close()

    def _generate_reason(self, stock: MaverickStocks) -> str:
        """Generate recommendation reason for Maverick stock."""
        reasons = []

        if hasattr(stock, "combined_score") and stock.combined_score >= 90:
            reasons.append("Exceptional combined score")
        elif hasattr(stock, "combined_score") and stock.combined_score >= 80:
            reasons.append("Strong combined score")

        if hasattr(stock, "momentum_score") and stock.momentum_score >= 90:
            reasons.append("outstanding relative strength")
        elif hasattr(stock, "momentum_score") and stock.momentum_score >= 80:
            reasons.append("strong relative strength")

        if hasattr(stock, "pat") and stock.pat:
            reasons.append(f"{stock.pat} pattern detected")

        return (
            "Bullish setup with " + ", ".join(reasons)
            if reasons
            else "Strong technical setup"
        )

    def _generate_bear_reason(self, stock: MaverickBearStocks) -> str:
        """Generate recommendation reason for bear stock."""
        reasons = []

        if hasattr(stock, "score") and stock.score >= 80:
            reasons.append("Strong bear signals")

        if hasattr(stock, "momentum_score") and stock.momentum_score <= 30:
            reasons.append("weak relative strength")

        return (
            "Bearish setup with " + ", ".join(reasons)
            if reasons
            else "Weak technical setup"
        )

    def _generate_supply_demand_reason(self, stock: SupplyDemandBreakoutStocks) -> str:
        """Generate recommendation reason for supply/demand breakout stock."""
        reasons = []

        if hasattr(stock, "momentum_score") and stock.momentum_score >= 90:
            reasons.append("exceptional relative strength")

        if hasattr(stock, "close") and hasattr(stock, "sma_200"):
            if stock.close > stock.sma_200 * 1.1:  # 10% above 200 SMA
                reasons.append("strong uptrend")

        return (
            "Supply/demand breakout with " + ", ".join(reasons)
            if reasons
            else "Supply absorption and demand expansion"
        )

```

--------------------------------------------------------------------------------
/maverick_mcp/application/queries/get_technical_analysis.py:
--------------------------------------------------------------------------------

```python
"""
Application query for getting technical analysis.

This query orchestrates the domain services and repositories
to provide technical analysis functionality.
"""

from datetime import UTC, datetime, timedelta
from typing import Protocol

import pandas as pd

from maverick_mcp.application.dto.technical_analysis_dto import (
    BollingerBandsDTO,
    CompleteTechnicalAnalysisDTO,
    MACDAnalysisDTO,
    PriceLevelDTO,
    RSIAnalysisDTO,
    StochasticDTO,
    TrendAnalysisDTO,
    VolumeAnalysisDTO,
)
from maverick_mcp.domain.entities.stock_analysis import StockAnalysis
from maverick_mcp.domain.services.technical_analysis_service import (
    TechnicalAnalysisService,
)
from maverick_mcp.domain.value_objects.technical_indicators import (
    Signal,
)


class StockDataRepository(Protocol):
    """Protocol for stock data repository."""

    def get_price_data(
        self, symbol: str, start_date: str, end_date: str
    ) -> pd.DataFrame:
        """Get historical price data."""
        ...


class GetTechnicalAnalysisQuery:
    """
    Application query for retrieving technical analysis.

    This query coordinates between the domain layer and infrastructure
    to provide technical analysis without exposing domain complexity.
    """

    def __init__(
        self,
        stock_repository: StockDataRepository,
        technical_service: TechnicalAnalysisService,
    ):
        """
        Initialize the query handler.

        Args:
            stock_repository: Repository for fetching stock data
            technical_service: Domain service for technical calculations
        """
        self.stock_repository = stock_repository
        self.technical_service = technical_service

    async def execute(
        self,
        symbol: str,
        days: int = 365,
        indicators: list[str] | None = None,
        rsi_period: int = 14,
    ) -> CompleteTechnicalAnalysisDTO:
        """
        Execute the technical analysis query.

        Args:
            symbol: Stock ticker symbol
            days: Number of days of historical data
            indicators: Specific indicators to calculate (None = all)
            rsi_period: Period for RSI calculation (default: 14)

        Returns:
            Complete technical analysis DTO
        """
        # Calculate date range
        end_date = datetime.now(UTC)
        start_date = end_date - timedelta(days=days)

        # Fetch stock data from repository
        df = self.stock_repository.get_price_data(
            symbol,
            start_date.strftime("%Y-%m-%d"),
            end_date.strftime("%Y-%m-%d"),
        )

        # Create domain entity
        analysis = StockAnalysis(
            symbol=symbol,
            analysis_date=datetime.now(UTC),
            current_price=float(df["close"].iloc[-1]),
            trend_direction=self.technical_service.identify_trend(
                pd.Series(df["close"])
            ),
            trend_strength=self._calculate_trend_strength(df),
            analysis_period_days=days,
            indicators_used=[],  # Initialize indicators_used
        )

        # Calculate requested indicators
        # Since we initialized indicators_used as [], it's safe to use
        assert analysis.indicators_used is not None

        if not indicators or "rsi" in indicators:
            analysis.rsi = self.technical_service.calculate_rsi(
                pd.Series(df["close"]), period=rsi_period
            )
            analysis.indicators_used.append("RSI")

        if not indicators or "macd" in indicators:
            analysis.macd = self.technical_service.calculate_macd(
                pd.Series(df["close"])
            )
            analysis.indicators_used.append("MACD")

        if not indicators or "bollinger" in indicators:
            analysis.bollinger_bands = self.technical_service.calculate_bollinger_bands(
                pd.Series(df["close"])
            )
            analysis.indicators_used.append("Bollinger Bands")

        if not indicators or "stochastic" in indicators:
            analysis.stochastic = self.technical_service.calculate_stochastic(
                pd.Series(df["high"]), pd.Series(df["low"]), pd.Series(df["close"])
            )
            analysis.indicators_used.append("Stochastic")

        # Analyze volume
        if "volume" in df.columns:
            analysis.volume_profile = self.technical_service.analyze_volume(
                pd.Series(df["volume"])
            )

        # Calculate support and resistance levels
        analysis.support_levels = self.technical_service.find_support_levels(df)
        analysis.resistance_levels = self.technical_service.find_resistance_levels(df)

        # Calculate composite signal
        analysis.composite_signal = self.technical_service.calculate_composite_signal(
            analysis.rsi,
            analysis.macd,
            analysis.bollinger_bands,
            analysis.stochastic,
        )

        # Calculate confidence score
        analysis.confidence_score = self._calculate_confidence_score(analysis)

        # Convert to DTO
        return self._map_to_dto(analysis)

    def _calculate_trend_strength(self, df: pd.DataFrame) -> float:
        """Calculate trend strength as a percentage."""
        # Simple implementation using price change
        if len(df) < 20:
            return 0.0

        price_change = (df["close"].iloc[-1] - df["close"].iloc[-20]) / df[
            "close"
        ].iloc[-20]
        return float(min(abs(price_change) * 100, 100.0))

    def _calculate_confidence_score(self, analysis: StockAnalysis) -> float:
        """Calculate confidence score based on indicator agreement."""
        signals = []

        if analysis.rsi:
            signals.append(analysis.rsi.signal)
        if analysis.macd:
            signals.append(analysis.macd.signal)
        if analysis.bollinger_bands:
            signals.append(analysis.bollinger_bands.signal)
        if analysis.stochastic:
            signals.append(analysis.stochastic.signal)

        if not signals:
            return 0.0

        # Count agreeing signals
        signal_counts: dict[Signal, int] = {}
        for signal in signals:
            signal_counts[signal] = signal_counts.get(signal, 0) + 1

        max_agreement = max(signal_counts.values())
        confidence = (max_agreement / len(signals)) * 100

        # Boost confidence if volume confirms
        if analysis.volume_profile and analysis.volume_profile.unusual_activity:
            confidence = min(100, confidence + 10)

        return float(confidence)

    def _map_to_dto(self, analysis: StockAnalysis) -> CompleteTechnicalAnalysisDTO:
        """Map domain entity to DTO."""
        dto = CompleteTechnicalAnalysisDTO(
            symbol=analysis.symbol,
            analysis_date=analysis.analysis_date,
            current_price=analysis.current_price,
            trend=TrendAnalysisDTO(
                direction=analysis.trend_direction.value,
                strength=analysis.trend_strength,
                interpretation=self._interpret_trend(analysis),
            ),
            composite_signal=analysis.composite_signal.value,
            confidence_score=analysis.confidence_score,
            risk_reward_ratio=analysis.risk_reward_ratio,
            summary=self._generate_summary(analysis),
            key_levels=analysis.get_key_levels(),
            rsi=None,
            macd=None,
            bollinger_bands=None,
            stochastic=None,
            volume_analysis=None,
        )

        # Map indicators if present
        if analysis.rsi:
            dto.rsi = RSIAnalysisDTO(
                current_value=analysis.rsi.value,
                period=analysis.rsi.period,
                signal=analysis.rsi.signal.value,
                is_overbought=analysis.rsi.is_overbought,
                is_oversold=analysis.rsi.is_oversold,
                interpretation=self._interpret_rsi(analysis.rsi),
            )

        if analysis.macd:
            dto.macd = MACDAnalysisDTO(
                macd_line=analysis.macd.macd_line,
                signal_line=analysis.macd.signal_line,
                histogram=analysis.macd.histogram,
                signal=analysis.macd.signal.value,
                is_bullish_crossover=analysis.macd.is_bullish_crossover,
                is_bearish_crossover=analysis.macd.is_bearish_crossover,
                interpretation=self._interpret_macd(analysis.macd),
            )

        if analysis.bollinger_bands:
            dto.bollinger_bands = BollingerBandsDTO(
                upper_band=analysis.bollinger_bands.upper_band,
                middle_band=analysis.bollinger_bands.middle_band,
                lower_band=analysis.bollinger_bands.lower_band,
                current_price=analysis.bollinger_bands.current_price,
                bandwidth=analysis.bollinger_bands.bandwidth,
                percent_b=analysis.bollinger_bands.percent_b,
                signal=analysis.bollinger_bands.signal.value,
                interpretation=self._interpret_bollinger(analysis.bollinger_bands),
            )

        if analysis.stochastic:
            dto.stochastic = StochasticDTO(
                k_value=analysis.stochastic.k_value,
                d_value=analysis.stochastic.d_value,
                signal=analysis.stochastic.signal.value,
                is_overbought=analysis.stochastic.is_overbought,
                is_oversold=analysis.stochastic.is_oversold,
                interpretation=self._interpret_stochastic(analysis.stochastic),
            )

        # Map levels
        dto.support_levels = [
            PriceLevelDTO(
                price=level.price,
                strength=level.strength,
                touches=level.touches,
                distance_from_current=(
                    (analysis.current_price - level.price)
                    / analysis.current_price
                    * 100
                ),
            )
            for level in (analysis.support_levels or [])
        ]

        dto.resistance_levels = [
            PriceLevelDTO(
                price=level.price,
                strength=level.strength,
                touches=level.touches,
                distance_from_current=(
                    (level.price - analysis.current_price)
                    / analysis.current_price
                    * 100
                ),
            )
            for level in (analysis.resistance_levels or [])
        ]

        # Map volume if present
        if analysis.volume_profile:
            dto.volume_analysis = VolumeAnalysisDTO(
                current_volume=analysis.volume_profile.current_volume,
                average_volume=analysis.volume_profile.average_volume,
                relative_volume=analysis.volume_profile.relative_volume,
                volume_trend=analysis.volume_profile.volume_trend.value,
                unusual_activity=analysis.volume_profile.unusual_activity,
                interpretation=self._interpret_volume(analysis.volume_profile),
            )

        return dto

    def _generate_summary(self, analysis: StockAnalysis) -> str:
        """Generate executive summary of the analysis."""
        signal_text = {
            Signal.STRONG_BUY: "strong buy signal",
            Signal.BUY: "buy signal",
            Signal.NEUTRAL: "neutral stance",
            Signal.SELL: "sell signal",
            Signal.STRONG_SELL: "strong sell signal",
        }

        summary_parts = [
            f"{analysis.symbol} shows a {signal_text[analysis.composite_signal]}",
            f"with {analysis.confidence_score:.0f}% confidence.",
            f"The stock is in a {analysis.trend_direction.value.replace('_', ' ')}.",
        ]

        if analysis.risk_reward_ratio:
            summary_parts.append(
                f"Risk/reward ratio is {analysis.risk_reward_ratio:.2f}."
            )

        return " ".join(summary_parts)

    def _interpret_trend(self, analysis: StockAnalysis) -> str:
        """Generate trend interpretation."""
        return (
            f"The stock is showing a {analysis.trend_direction.value.replace('_', ' ')} "
            f"with {analysis.trend_strength:.0f}% strength."
        )

    def _interpret_rsi(self, rsi) -> str:
        """Generate RSI interpretation."""
        if rsi.is_overbought:
            return f"RSI at {rsi.value:.1f} indicates overbought conditions."
        elif rsi.is_oversold:
            return f"RSI at {rsi.value:.1f} indicates oversold conditions."
        else:
            return f"RSI at {rsi.value:.1f} is in neutral territory."

    def _interpret_macd(self, macd) -> str:
        """Generate MACD interpretation."""
        if macd.is_bullish_crossover:
            return "MACD shows bullish crossover - potential buy signal."
        elif macd.is_bearish_crossover:
            return "MACD shows bearish crossover - potential sell signal."
        else:
            return "MACD is neutral, no clear signal."

    def _interpret_bollinger(self, bb) -> str:
        """Generate Bollinger Bands interpretation."""
        if bb.is_squeeze:
            return "Bollinger Bands are squeezing - expect volatility breakout."
        elif bb.percent_b > 1:
            return "Price above upper band - potential overbought."
        elif bb.percent_b < 0:
            return "Price below lower band - potential oversold."
        else:
            return f"Price at {bb.percent_b:.1%} of bands range."

    def _interpret_stochastic(self, stoch) -> str:
        """Generate Stochastic interpretation."""
        if stoch.is_overbought:
            return f"Stochastic at {stoch.k_value:.1f} indicates overbought."
        elif stoch.is_oversold:
            return f"Stochastic at {stoch.k_value:.1f} indicates oversold."
        else:
            return f"Stochastic at {stoch.k_value:.1f} is neutral."

    def _interpret_volume(self, volume) -> str:
        """Generate volume interpretation."""
        if volume.unusual_activity:
            return f"Unusual volume at {volume.relative_volume:.1f}x average!"
        elif volume.is_high_volume:
            return f"High volume at {volume.relative_volume:.1f}x average."
        elif volume.is_low_volume:
            return f"Low volume at {volume.relative_volume:.1f}x average."
        else:
            return "Normal trading volume."

```

--------------------------------------------------------------------------------
/tests/providers/test_stock_data_simple.py:
--------------------------------------------------------------------------------

```python
"""
Simplified unit tests for maverick_mcp.providers.stock_data module.

This module contains focused tests for the Enhanced Stock Data Provider
with proper mocking to avoid external dependencies.
"""

from datetime import datetime, timedelta
from unittest.mock import Mock, patch

import pandas as pd
import pytest
from sqlalchemy.orm import Session

from maverick_mcp.providers.stock_data import EnhancedStockDataProvider


class TestEnhancedStockDataProviderCore:
    """Test core functionality of the Enhanced Stock Data Provider."""

    @pytest.fixture
    def mock_db_session(self):
        """Create a mock database session."""
        session = Mock(spec=Session)
        session.execute.return_value.fetchone.return_value = [1]
        return session

    @pytest.fixture
    def provider(self, mock_db_session):
        """Create a stock data provider with mocked dependencies."""
        with patch("maverick_mcp.providers.stock_data.get_db_session_read_only"):
            with patch("maverick_mcp.providers.stock_data.mcal.get_calendar"):
                provider = EnhancedStockDataProvider(db_session=mock_db_session)
                return provider

    def test_provider_initialization(self, mock_db_session):
        """Test provider initialization."""
        with patch("maverick_mcp.providers.stock_data.get_db_session_read_only"):
            with patch("maverick_mcp.providers.stock_data.mcal.get_calendar"):
                provider = EnhancedStockDataProvider(db_session=mock_db_session)

                assert provider.timeout == 30
                assert provider.max_retries == 3
                assert provider.cache_days == 1
                assert provider._db_session == mock_db_session

    def test_provider_initialization_without_session(self):
        """Test provider initialization without database session."""
        with patch("maverick_mcp.providers.stock_data.get_db_session_read_only"):
            with patch("maverick_mcp.providers.stock_data.mcal.get_calendar"):
                provider = EnhancedStockDataProvider()

                assert provider._db_session is None

    def test_get_stock_data_returns_dataframe(self, provider):
        """Test that get_stock_data returns a DataFrame."""
        # Test with use_cache=False to avoid database dependency
        result = provider.get_stock_data(
            "AAPL", "2024-01-01", "2024-01-31", use_cache=False
        )

        assert isinstance(result, pd.DataFrame)
        # Note: May be empty due to mocking, but should be DataFrame

    def test_get_maverick_recommendations_no_session(self):
        """Test getting Maverick recommendations without database session."""
        with patch("maverick_mcp.providers.stock_data.get_db_session_read_only"):
            with patch("maverick_mcp.providers.stock_data.mcal.get_calendar"):
                provider = EnhancedStockDataProvider(db_session=None)

                result = provider.get_maverick_recommendations()

                assert isinstance(result, list)
                assert len(result) == 0

    def test_get_maverick_bear_recommendations_no_session(self):
        """Test getting Maverick bear recommendations without database session."""
        with patch("maverick_mcp.providers.stock_data.get_db_session_read_only"):
            with patch("maverick_mcp.providers.stock_data.mcal.get_calendar"):
                provider = EnhancedStockDataProvider(db_session=None)

                result = provider.get_maverick_bear_recommendations()

                assert isinstance(result, list)
                assert len(result) == 0

    def test_get_trending_recommendations_no_session(self):
        """Test getting trending recommendations without database session."""
        with patch("maverick_mcp.providers.stock_data.get_db_session_read_only"):
            with patch("maverick_mcp.providers.stock_data.mcal.get_calendar"):
                provider = EnhancedStockDataProvider(db_session=None)

                result = provider.get_trending_recommendations()

                assert isinstance(result, list)
                # The provider now falls back to using default database connection
                # when no session is provided, so we expect actual results
                assert len(result) >= 0  # May return cached/fallback data

    @patch("maverick_mcp.providers.stock_data.get_latest_maverick_screening")
    def test_get_all_screening_recommendations(self, mock_screening, provider):
        """Test getting all screening recommendations."""
        mock_screening.return_value = {
            "maverick_stocks": [],
            "maverick_bear_stocks": [],
            "trending_stocks": [],
        }

        result = provider.get_all_screening_recommendations()

        assert isinstance(result, dict)
        assert "maverick_stocks" in result
        assert "maverick_bear_stocks" in result
        assert "trending_stocks" in result

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_get_stock_info_success(self, mock_ticker, provider):
        """Test getting stock information successfully."""
        mock_info = {
            "symbol": "AAPL",
            "longName": "Apple Inc.",
            "sector": "Technology",
            "industry": "Consumer Electronics",
        }

        mock_ticker.return_value.info = mock_info

        result = provider.get_stock_info("AAPL")

        assert isinstance(result, dict)
        assert result.get("symbol") == "AAPL"

    @pytest.mark.skip(reason="Flaky test with external dependencies")
    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_get_stock_info_exception(self, mock_ticker, provider):
        """Test getting stock information with exception."""
        mock_ticker.side_effect = Exception("API Error")

        result = provider.get_stock_info("INVALID")

        assert isinstance(result, dict)
        assert result == {}

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_get_realtime_data_success(self, mock_ticker, provider):
        """Test getting real-time data successfully."""
        # Create mock data that matches the expected format
        mock_data = pd.DataFrame(
            {
                "Open": [150.0],
                "High": [155.0],
                "Low": [149.0],
                "Close": [153.0],
                "Volume": [1000000],
            },
            index=pd.DatetimeIndex([datetime.now()]),
        )

        mock_ticker.return_value.history.return_value = mock_data
        mock_ticker.return_value.info = {"previousClose": 151.0}

        result = provider.get_realtime_data("AAPL")

        assert isinstance(result, dict)
        assert "symbol" in result
        assert "price" in result

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_get_realtime_data_empty(self, mock_ticker, provider):
        """Test getting real-time data with empty result."""
        mock_ticker.return_value.history.return_value = pd.DataFrame()

        result = provider.get_realtime_data("INVALID")

        assert result is None

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_get_realtime_data_exception(self, mock_ticker, provider):
        """Test getting real-time data with exception."""
        mock_ticker.side_effect = Exception("API Error")

        result = provider.get_realtime_data("INVALID")

        assert result is None

    def test_get_all_realtime_data(self, provider):
        """Test getting real-time data for multiple symbols."""
        with patch.object(provider, "get_realtime_data") as mock_single:
            mock_single.side_effect = [
                {"symbol": "AAPL", "price": 153.0},
                {"symbol": "MSFT", "price": 420.0},
            ]

            result = provider.get_all_realtime_data(["AAPL", "MSFT"])

            assert isinstance(result, dict)
            assert "AAPL" in result
            assert "MSFT" in result

    def test_is_market_open(self, provider):
        """Test market open check."""
        with patch.object(provider.market_calendar, "open_at_time") as mock_open:
            mock_open.return_value = True

            result = provider.is_market_open()

            assert isinstance(result, bool)

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_get_news_success(self, mock_ticker, provider):
        """Test getting news successfully."""
        mock_news = [
            {
                "title": "Apple Reports Strong Q4 Earnings",
                "link": "https://example.com/news1",
                "providerPublishTime": datetime.now().timestamp(),
                "type": "STORY",
            },
        ]

        mock_ticker.return_value.news = mock_news

        result = provider.get_news("AAPL", limit=5)

        assert isinstance(result, pd.DataFrame)

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_get_news_exception(self, mock_ticker, provider):
        """Test getting news with exception."""
        mock_ticker.side_effect = Exception("API Error")

        result = provider.get_news("INVALID")

        assert isinstance(result, pd.DataFrame)
        assert result.empty

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_get_earnings_success(self, mock_ticker, provider):
        """Test getting earnings data successfully."""
        mock_ticker.return_value.calendar = pd.DataFrame()
        mock_ticker.return_value.earnings_dates = {}
        mock_ticker.return_value.earnings_trend = {}

        result = provider.get_earnings("AAPL")

        assert isinstance(result, dict)
        assert "earnings" in result or "earnings_dates" in result

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_get_earnings_exception(self, mock_ticker, provider):
        """Test getting earnings with exception."""
        mock_ticker.side_effect = Exception("API Error")

        result = provider.get_earnings("INVALID")

        assert isinstance(result, dict)

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_get_recommendations_success(self, mock_ticker, provider):
        """Test getting analyst recommendations successfully."""
        mock_recommendations = pd.DataFrame(
            {
                "period": ["0m", "-1m"],
                "strongBuy": [5, 4],
                "buy": [10, 12],
                "hold": [3, 3],
                "sell": [1, 1],
                "strongSell": [0, 0],
            }
        )

        mock_ticker.return_value.recommendations = mock_recommendations

        result = provider.get_recommendations("AAPL")

        assert isinstance(result, pd.DataFrame)

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_get_recommendations_exception(self, mock_ticker, provider):
        """Test getting recommendations with exception."""
        mock_ticker.side_effect = Exception("API Error")

        result = provider.get_recommendations("INVALID")

        assert isinstance(result, pd.DataFrame)
        assert result.empty

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_is_etf_true(self, mock_ticker, provider):
        """Test ETF detection for actual ETF."""
        mock_ticker.return_value.info = {"quoteType": "ETF"}

        result = provider.is_etf("SPY")

        assert result is True

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_is_etf_false(self, mock_ticker, provider):
        """Test ETF detection for stock."""
        mock_ticker.return_value.info = {"quoteType": "EQUITY"}

        result = provider.is_etf("AAPL")

        assert result is False

    @patch("maverick_mcp.providers.stock_data.yf.Ticker")
    def test_is_etf_exception(self, mock_ticker, provider):
        """Test ETF detection with exception."""
        mock_ticker.side_effect = Exception("API Error")

        result = provider.is_etf("INVALID")

        assert result is False


class TestStockDataProviderErrorHandling:
    """Test error handling and edge cases."""

    def test_invalid_date_range(self):
        """Test with invalid date range."""
        with patch("maverick_mcp.providers.stock_data.get_db_session_read_only"):
            with patch("maverick_mcp.providers.stock_data.mcal.get_calendar"):
                provider = EnhancedStockDataProvider()

                # Test with end date before start date
                result = provider.get_stock_data(
                    "AAPL", "2024-12-31", "2024-01-01", use_cache=False
                )

                assert isinstance(result, pd.DataFrame)

    @pytest.mark.skip(reason="Flaky test with external dependencies")
    def test_empty_symbol(self):
        """Test with empty symbol."""
        with patch("maverick_mcp.providers.stock_data.get_db_session_read_only"):
            with patch("maverick_mcp.providers.stock_data.mcal.get_calendar"):
                provider = EnhancedStockDataProvider()

                result = provider.get_stock_data(
                    "", "2024-01-01", "2024-01-31", use_cache=False
                )

                assert isinstance(result, pd.DataFrame)

    def test_future_date_range(self):
        """Test with future dates."""
        with patch("maverick_mcp.providers.stock_data.get_db_session_read_only"):
            with patch("maverick_mcp.providers.stock_data.mcal.get_calendar"):
                provider = EnhancedStockDataProvider()

                future_date = (datetime.now() + timedelta(days=365)).strftime(
                    "%Y-%m-%d"
                )
                result = provider.get_stock_data(
                    "AAPL", future_date, future_date, use_cache=False
                )

                assert isinstance(result, pd.DataFrame)

    def test_database_connection_failure(self):
        """Test graceful handling of database connection failure."""
        mock_session = Mock(spec=Session)
        mock_session.execute.side_effect = Exception("Connection failed")

        with patch("maverick_mcp.providers.stock_data.get_db_session_read_only"):
            with patch("maverick_mcp.providers.stock_data.mcal.get_calendar"):
                # Should not raise exception, just log warning
                provider = EnhancedStockDataProvider(db_session=mock_session)
                assert provider is not None


if __name__ == "__main__":
    pytest.main([__file__])

```

--------------------------------------------------------------------------------
/maverick_mcp/utils/fallback_strategies.py:
--------------------------------------------------------------------------------

```python
"""
Fallback strategies for circuit breakers to provide graceful degradation.
"""

import logging
from abc import ABC, abstractmethod
from datetime import UTC, datetime, timedelta
from typing import TypeVar

import pandas as pd

from maverick_mcp.data.models import PriceCache, Stock
from maverick_mcp.data.session_management import get_db_session_read_only as get_session
from maverick_mcp.exceptions import DataNotFoundError

logger = logging.getLogger(__name__)

T = TypeVar("T")


class FallbackStrategy[T](ABC):
    """Base class for fallback strategies."""

    @abstractmethod
    async def execute_async(self, *args, **kwargs) -> T:
        """Execute the fallback strategy asynchronously."""
        pass

    @abstractmethod
    def execute_sync(self, *args, **kwargs) -> T:
        """Execute the fallback strategy synchronously."""
        pass


class FallbackChain[T]:
    """
    Chain of fallback strategies to execute in order.
    Stops at the first successful strategy.
    """

    def __init__(self, strategies: list[FallbackStrategy[T]]):
        """Initialize fallback chain with ordered strategies."""
        self.strategies = strategies

    async def execute_async(self, *args, **kwargs) -> T:
        """Execute strategies asynchronously until one succeeds."""
        last_error = None

        for i, strategy in enumerate(self.strategies):
            try:
                logger.info(
                    f"Executing fallback strategy {i + 1}/{len(self.strategies)}: {strategy.__class__.__name__}"
                )
                result = await strategy.execute_async(*args, **kwargs)
                if result is not None:  # Success
                    return result
            except Exception as e:
                logger.warning(
                    f"Fallback strategy {strategy.__class__.__name__} failed: {e}"
                )
                last_error = e
                continue

        # All strategies failed
        if last_error:
            raise last_error
        raise DataNotFoundError("All fallback strategies failed")

    def execute_sync(self, *args, **kwargs) -> T:
        """Execute strategies synchronously until one succeeds."""
        last_error = None

        for i, strategy in enumerate(self.strategies):
            try:
                logger.info(
                    f"Executing fallback strategy {i + 1}/{len(self.strategies)}: {strategy.__class__.__name__}"
                )
                result = strategy.execute_sync(*args, **kwargs)
                if result is not None:  # Success
                    return result
            except Exception as e:
                logger.warning(
                    f"Fallback strategy {strategy.__class__.__name__} failed: {e}"
                )
                last_error = e
                continue

        # All strategies failed
        if last_error:
            raise last_error
        raise DataNotFoundError("All fallback strategies failed")


class CachedStockDataFallback(FallbackStrategy[pd.DataFrame]):
    """Fallback to cached stock data from database."""

    def __init__(self, max_age_days: int = 7):
        """
        Initialize cached data fallback.

        Args:
            max_age_days: Maximum age of cached data to use
        """
        self.max_age_days = max_age_days

    async def execute_async(
        self, symbol: str, start_date: str, end_date: str, **kwargs
    ) -> pd.DataFrame:
        """Get cached stock data asynchronously."""
        # For now, delegate to sync version
        return self.execute_sync(symbol, start_date, end_date, **kwargs)

    def execute_sync(
        self, symbol: str, start_date: str, end_date: str, **kwargs
    ) -> pd.DataFrame:
        """Get cached stock data synchronously."""
        try:
            with get_session() as session:
                # Check if stock exists
                stock = session.query(Stock).filter_by(symbol=symbol).first()
                if not stock:
                    raise DataNotFoundError(f"Stock {symbol} not found in database")

                # Get cached prices
                cutoff_date = datetime.now(UTC) - timedelta(days=self.max_age_days)

                query = session.query(PriceCache).filter(
                    PriceCache.stock_id == stock.id,
                    PriceCache.date >= start_date,
                    PriceCache.date <= end_date,
                    PriceCache.updated_at >= cutoff_date,  # Only use recent cache
                )

                results = query.all()

                if not results:
                    raise DataNotFoundError(f"No cached data found for {symbol}")

                # Convert to DataFrame
                data = []
                for row in results:
                    data.append(
                        {
                            "Date": pd.to_datetime(row.date),
                            "Open": float(row.open),
                            "High": float(row.high),
                            "Low": float(row.low),
                            "Close": float(row.close),
                            "Volume": int(row.volume),
                        }
                    )

                df = pd.DataFrame(data)
                df.set_index("Date", inplace=True)
                df.sort_index(inplace=True)

                logger.info(
                    f"Returned {len(df)} rows of cached data for {symbol} "
                    f"(may be stale up to {self.max_age_days} days)"
                )

                return df

        except Exception as e:
            logger.error(f"Failed to get cached data for {symbol}: {e}")
            raise


class StaleDataFallback(FallbackStrategy[pd.DataFrame]):
    """Return any available cached data regardless of age."""

    async def execute_async(
        self, symbol: str, start_date: str, end_date: str, **kwargs
    ) -> pd.DataFrame:
        """Get stale stock data asynchronously."""
        return self.execute_sync(symbol, start_date, end_date, **kwargs)

    def execute_sync(
        self, symbol: str, start_date: str, end_date: str, **kwargs
    ) -> pd.DataFrame:
        """Get stale stock data synchronously."""
        try:
            with get_session() as session:
                # Check if stock exists
                stock = session.query(Stock).filter_by(symbol=symbol).first()
                if not stock:
                    raise DataNotFoundError(f"Stock {symbol} not found in database")

                # Get any cached prices
                query = session.query(PriceCache).filter(
                    PriceCache.stock_id == stock.id,
                    PriceCache.date >= start_date,
                    PriceCache.date <= end_date,
                )

                results = query.all()

                if not results:
                    raise DataNotFoundError(f"No cached data found for {symbol}")

                # Convert to DataFrame
                data = []
                for row in results:
                    data.append(
                        {
                            "Date": pd.to_datetime(row.date),
                            "Open": float(row.open),
                            "High": float(row.high),
                            "Low": float(row.low),
                            "Close": float(row.close),
                            "Volume": int(row.volume),
                        }
                    )

                df = pd.DataFrame(data)
                df.set_index("Date", inplace=True)
                df.sort_index(inplace=True)

                # Add warning about stale data
                oldest_update = min(row.updated_at for row in results)
                age_days = (datetime.now(UTC) - oldest_update).days

                logger.warning(
                    f"Returning {len(df)} rows of STALE cached data for {symbol} "
                    f"(data is up to {age_days} days old)"
                )

                # Add metadata to indicate stale data
                df.attrs["is_stale"] = True
                df.attrs["max_age_days"] = age_days
                df.attrs["warning"] = f"Data may be up to {age_days} days old"

                return df

        except Exception as e:
            logger.error(f"Failed to get stale cached data for {symbol}: {e}")
            raise


class DefaultMarketDataFallback(FallbackStrategy[dict]):
    """Return default/neutral market data when APIs are down."""

    async def execute_async(self, mover_type: str = "gainers", **kwargs) -> dict:
        """Get default market data asynchronously."""
        return self.execute_sync(mover_type, **kwargs)

    def execute_sync(self, mover_type: str = "gainers", **kwargs) -> dict:
        """Get default market data synchronously."""
        logger.warning(f"Returning default {mover_type} data due to API failure")

        # Return empty but valid structure
        return {
            "movers": [],
            "metadata": {
                "source": "fallback",
                "timestamp": datetime.now(UTC).isoformat(),
                "is_fallback": True,
                "message": f"Market {mover_type} data temporarily unavailable",
            },
        }


class CachedEconomicDataFallback(FallbackStrategy[pd.Series]):
    """Fallback to cached economic indicator data."""

    def __init__(self, default_values: dict[str, float] | None = None):
        """
        Initialize economic data fallback.

        Args:
            default_values: Default values for common indicators
        """
        self.default_values = default_values or {
            "GDP": 2.5,  # Default GDP growth %
            "UNRATE": 4.0,  # Default unemployment rate %
            "CPIAUCSL": 2.0,  # Default inflation rate %
            "DFF": 5.0,  # Default federal funds rate %
            "DGS10": 4.0,  # Default 10-year treasury yield %
            "VIXCLS": 20.0,  # Default VIX
        }

    async def execute_async(
        self, series_id: str, start_date: str, end_date: str, **kwargs
    ) -> pd.Series:
        """Get cached economic data asynchronously."""
        return self.execute_sync(series_id, start_date, end_date, **kwargs)

    def execute_sync(
        self, series_id: str, start_date: str, end_date: str, **kwargs
    ) -> pd.Series:
        """Get cached economic data synchronously."""
        # For now, return default values as a series
        logger.warning(f"Returning default value for {series_id} due to API failure")

        default_value = self.default_values.get(series_id, 0.0)

        # Create a simple series with the default value
        dates = pd.date_range(start=start_date, end=end_date, freq="D")
        series = pd.Series(default_value, index=dates, name=series_id)

        # Add metadata
        series.attrs["is_fallback"] = True
        series.attrs["source"] = "default"
        series.attrs["warning"] = f"Using default value of {default_value}"

        return series


class EmptyNewsFallback(FallbackStrategy[dict]):
    """Return empty news data when news APIs are down."""

    async def execute_async(self, symbol: str, **kwargs) -> dict:
        """Get empty news data asynchronously."""
        return self.execute_sync(symbol, **kwargs)

    def execute_sync(self, symbol: str, **kwargs) -> dict:
        """Get empty news data synchronously."""
        logger.warning(f"News API unavailable for {symbol}, returning empty news")

        return {
            "articles": [],
            "metadata": {
                "symbol": symbol,
                "source": "fallback",
                "timestamp": datetime.now(UTC).isoformat(),
                "is_fallback": True,
                "message": "News sentiment analysis temporarily unavailable",
            },
        }


class LastKnownQuoteFallback(FallbackStrategy[dict]):
    """Return last known quote from cache."""

    async def execute_async(self, symbol: str, **kwargs) -> dict:
        """Get last known quote asynchronously."""
        return self.execute_sync(symbol, **kwargs)

    def execute_sync(self, symbol: str, **kwargs) -> dict:
        """Get last known quote synchronously."""
        try:
            with get_session() as session:
                # Get stock
                stock = session.query(Stock).filter_by(symbol=symbol).first()
                if not stock:
                    raise DataNotFoundError(f"Stock {symbol} not found")

                # Get most recent price
                latest_price = (
                    session.query(PriceCache)
                    .filter_by(stock_id=stock.id)
                    .order_by(PriceCache.date.desc())
                    .first()
                )

                if not latest_price:
                    raise DataNotFoundError(f"No cached prices for {symbol}")

                age_days = (datetime.now(UTC).date() - latest_price.date).days

                logger.warning(
                    f"Returning cached quote for {symbol} from {latest_price.date} "
                    f"({age_days} days old)"
                )

                return {
                    "symbol": symbol,
                    "price": float(latest_price.close),
                    "open": float(latest_price.open),
                    "high": float(latest_price.high),
                    "low": float(latest_price.low),
                    "close": float(latest_price.close),
                    "volume": int(latest_price.volume),
                    "date": latest_price.date.isoformat(),
                    "is_fallback": True,
                    "age_days": age_days,
                    "warning": f"Quote is {age_days} days old",
                }

        except Exception as e:
            logger.error(f"Failed to get cached quote for {symbol}: {e}")
            # Return a minimal quote structure
            return {
                "symbol": symbol,
                "price": 0.0,
                "is_fallback": True,
                "error": str(e),
                "warning": "No quote data available",
            }


# Pre-configured fallback chains for common use cases
STOCK_DATA_FALLBACK_CHAIN = FallbackChain[pd.DataFrame](
    [
        CachedStockDataFallback(max_age_days=1),  # Try recent cache first
        CachedStockDataFallback(max_age_days=7),  # Then older cache
        StaleDataFallback(),  # Finally any cache
    ]
)

MARKET_DATA_FALLBACK = DefaultMarketDataFallback()

ECONOMIC_DATA_FALLBACK = CachedEconomicDataFallback()

NEWS_FALLBACK = EmptyNewsFallback()

QUOTE_FALLBACK = LastKnownQuoteFallback()

```

--------------------------------------------------------------------------------
/maverick_mcp/config/logging_settings.py:
--------------------------------------------------------------------------------

```python
"""
Structured logging configuration settings for the backtesting system.

This module provides centralized configuration for all logging-related settings
including debug mode, log levels, output formats, and performance monitoring.
"""

import os
from dataclasses import dataclass
from pathlib import Path
from typing import Any


@dataclass
class LoggingSettings:
    """Comprehensive logging configuration settings."""

    # Basic logging configuration
    log_level: str = "INFO"
    log_format: str = "json"  # json or text
    enable_async_logging: bool = True
    console_output: str = "stderr"  # stdout or stderr

    # File logging configuration
    enable_file_logging: bool = True
    log_file_path: str = "logs/backtesting.log"
    enable_log_rotation: bool = True
    max_log_size_mb: int = 10
    backup_count: int = 5

    # Debug mode configuration
    debug_enabled: bool = False
    verbose_modules: list[str] = None
    log_request_response: bool = False
    max_payload_length: int = 1000

    # Performance monitoring
    enable_performance_logging: bool = True
    performance_log_threshold_ms: float = 1000.0
    enable_resource_tracking: bool = True
    enable_business_metrics: bool = True

    # Async logging configuration
    async_log_queue_size: int = 10000
    async_log_flush_interval: float = 1.0

    # Sensitive data handling
    mask_sensitive_data: bool = True
    sensitive_field_patterns: list[str] = None

    # Remote logging (for future log aggregation)
    enable_remote_logging: bool = False
    remote_endpoint: str | None = None
    remote_api_key: str | None = None

    # Correlation and tracing
    enable_correlation_tracking: bool = True
    correlation_id_header: str = "X-Correlation-ID"
    enable_request_tracing: bool = True

    def __post_init__(self):
        """Initialize default values for mutable fields."""
        if self.verbose_modules is None:
            self.verbose_modules = []

        if self.sensitive_field_patterns is None:
            self.sensitive_field_patterns = [
                "password",
                "token",
                "key",
                "secret",
                "auth",
                "credential",
                "bearer",
                "session",
                "cookie",
                "api_key",
                "access_token",
                "refresh_token",
                "private",
                "confidential",
            ]

    @classmethod
    def from_env(cls) -> "LoggingSettings":
        """Create logging settings from environment variables."""
        return cls(
            log_level=os.getenv("MAVERICK_LOG_LEVEL", "INFO").upper(),
            log_format=os.getenv("MAVERICK_LOG_FORMAT", "json").lower(),
            enable_async_logging=os.getenv("MAVERICK_ASYNC_LOGGING", "true").lower()
            == "true",
            console_output=os.getenv("MAVERICK_CONSOLE_OUTPUT", "stderr").lower(),
            # File logging
            enable_file_logging=os.getenv("MAVERICK_FILE_LOGGING", "true").lower()
            == "true",
            log_file_path=os.getenv("MAVERICK_LOG_FILE", "logs/backtesting.log"),
            enable_log_rotation=os.getenv("MAVERICK_LOG_ROTATION", "true").lower()
            == "true",
            max_log_size_mb=int(os.getenv("MAVERICK_LOG_SIZE_MB", "10")),
            backup_count=int(os.getenv("MAVERICK_LOG_BACKUPS", "5")),
            # Debug configuration
            debug_enabled=os.getenv("MAVERICK_DEBUG", "false").lower() == "true",
            log_request_response=os.getenv("MAVERICK_LOG_REQUESTS", "false").lower()
            == "true",
            max_payload_length=int(os.getenv("MAVERICK_MAX_PAYLOAD", "1000")),
            # Performance monitoring
            enable_performance_logging=os.getenv(
                "MAVERICK_PERF_LOGGING", "true"
            ).lower()
            == "true",
            performance_log_threshold_ms=float(
                os.getenv("MAVERICK_PERF_THRESHOLD", "1000.0")
            ),
            enable_resource_tracking=os.getenv(
                "MAVERICK_RESOURCE_TRACKING", "true"
            ).lower()
            == "true",
            enable_business_metrics=os.getenv(
                "MAVERICK_BUSINESS_METRICS", "true"
            ).lower()
            == "true",
            # Async logging
            async_log_queue_size=int(os.getenv("MAVERICK_LOG_QUEUE_SIZE", "10000")),
            async_log_flush_interval=float(
                os.getenv("MAVERICK_LOG_FLUSH_INTERVAL", "1.0")
            ),
            # Sensitive data
            mask_sensitive_data=os.getenv("MAVERICK_MASK_SENSITIVE", "true").lower()
            == "true",
            # Remote logging
            enable_remote_logging=os.getenv("MAVERICK_REMOTE_LOGGING", "false").lower()
            == "true",
            remote_endpoint=os.getenv("MAVERICK_REMOTE_LOG_ENDPOINT"),
            remote_api_key=os.getenv("MAVERICK_REMOTE_LOG_API_KEY"),
            # Correlation and tracing
            enable_correlation_tracking=os.getenv(
                "MAVERICK_CORRELATION_TRACKING", "true"
            ).lower()
            == "true",
            correlation_id_header=os.getenv(
                "MAVERICK_CORRELATION_HEADER", "X-Correlation-ID"
            ),
            enable_request_tracing=os.getenv("MAVERICK_REQUEST_TRACING", "true").lower()
            == "true",
        )

    def to_dict(self) -> dict[str, Any]:
        """Convert settings to dictionary for serialization."""
        return {
            "log_level": self.log_level,
            "log_format": self.log_format,
            "enable_async_logging": self.enable_async_logging,
            "console_output": self.console_output,
            "enable_file_logging": self.enable_file_logging,
            "log_file_path": self.log_file_path,
            "enable_log_rotation": self.enable_log_rotation,
            "max_log_size_mb": self.max_log_size_mb,
            "backup_count": self.backup_count,
            "debug_enabled": self.debug_enabled,
            "verbose_modules": self.verbose_modules,
            "log_request_response": self.log_request_response,
            "max_payload_length": self.max_payload_length,
            "enable_performance_logging": self.enable_performance_logging,
            "performance_log_threshold_ms": self.performance_log_threshold_ms,
            "enable_resource_tracking": self.enable_resource_tracking,
            "enable_business_metrics": self.enable_business_metrics,
            "async_log_queue_size": self.async_log_queue_size,
            "async_log_flush_interval": self.async_log_flush_interval,
            "mask_sensitive_data": self.mask_sensitive_data,
            "sensitive_field_patterns": self.sensitive_field_patterns,
            "enable_remote_logging": self.enable_remote_logging,
            "remote_endpoint": self.remote_endpoint,
            "enable_correlation_tracking": self.enable_correlation_tracking,
            "correlation_id_header": self.correlation_id_header,
            "enable_request_tracing": self.enable_request_tracing,
        }

    def ensure_log_directory(self):
        """Ensure the log directory exists."""
        if self.enable_file_logging and self.log_file_path:
            log_path = Path(self.log_file_path)
            log_path.parent.mkdir(parents=True, exist_ok=True)

    def get_debug_modules(self) -> list[str]:
        """Get list of modules for debug logging."""
        if not self.debug_enabled:
            return []

        if not self.verbose_modules:
            # Default debug modules for backtesting
            return [
                "maverick_mcp.backtesting",
                "maverick_mcp.api.tools.backtesting",
                "maverick_mcp.providers",
                "maverick_mcp.data.cache",
            ]

        return self.verbose_modules

    def should_log_performance(self, duration_ms: float) -> bool:
        """Check if operation should be logged based on performance threshold."""
        if not self.enable_performance_logging:
            return False
        return duration_ms >= self.performance_log_threshold_ms

    def get_log_file_config(self) -> dict[str, Any]:
        """Get file logging configuration."""
        if not self.enable_file_logging:
            return {}

        config = {
            "filename": self.log_file_path,
            "mode": "a",
            "encoding": "utf-8",
        }

        if self.enable_log_rotation:
            config.update(
                {
                    "maxBytes": self.max_log_size_mb * 1024 * 1024,
                    "backupCount": self.backup_count,
                }
            )

        return config

    def get_performance_config(self) -> dict[str, Any]:
        """Get performance monitoring configuration."""
        return {
            "enabled": self.enable_performance_logging,
            "threshold_ms": self.performance_log_threshold_ms,
            "resource_tracking": self.enable_resource_tracking,
            "business_metrics": self.enable_business_metrics,
        }

    def get_debug_config(self) -> dict[str, Any]:
        """Get debug configuration."""
        return {
            "enabled": self.debug_enabled,
            "verbose_modules": self.get_debug_modules(),
            "log_request_response": self.log_request_response,
            "max_payload_length": self.max_payload_length,
        }


# Environment-specific configurations
class EnvironmentLogSettings:
    """Environment-specific logging configurations."""

    @staticmethod
    def development() -> LoggingSettings:
        """Development environment logging configuration."""
        return LoggingSettings(
            log_level="DEBUG",
            log_format="text",
            debug_enabled=True,
            log_request_response=True,
            enable_performance_logging=True,
            performance_log_threshold_ms=100.0,  # Lower threshold for development
            console_output="stdout",
            enable_file_logging=True,
            log_file_path="logs/dev_backtesting.log",
        )

    @staticmethod
    def testing() -> LoggingSettings:
        """Testing environment logging configuration."""
        return LoggingSettings(
            log_level="WARNING",
            log_format="text",
            debug_enabled=False,
            enable_performance_logging=False,
            enable_file_logging=False,
            console_output="stdout",
            enable_async_logging=False,  # Synchronous for tests
        )

    @staticmethod
    def production() -> LoggingSettings:
        """Production environment logging configuration."""
        return LoggingSettings(
            log_level="INFO",
            log_format="json",
            debug_enabled=False,
            log_request_response=False,
            enable_performance_logging=True,
            performance_log_threshold_ms=2000.0,  # Higher threshold for production
            console_output="stderr",
            enable_file_logging=True,
            log_file_path="/var/log/maverick/backtesting.log",
            enable_log_rotation=True,
            max_log_size_mb=50,  # Larger files in production
            backup_count=10,
            enable_remote_logging=True,  # Enable for log aggregation
        )


# Global logging settings instance
_logging_settings: LoggingSettings | None = None


def get_logging_settings() -> LoggingSettings:
    """Get global logging settings instance."""
    global _logging_settings

    if _logging_settings is None:
        environment = os.getenv("MAVERICK_ENVIRONMENT", "development").lower()

        if environment == "development":
            _logging_settings = EnvironmentLogSettings.development()
        elif environment == "testing":
            _logging_settings = EnvironmentLogSettings.testing()
        elif environment == "production":
            _logging_settings = EnvironmentLogSettings.production()
        else:
            # Default to environment variables
            _logging_settings = LoggingSettings.from_env()

        # Override with any environment variables
        env_overrides = LoggingSettings.from_env()
        for key, value in env_overrides.to_dict().items():
            if value is not None and value != getattr(LoggingSettings(), key):
                setattr(_logging_settings, key, value)

        # Ensure log directory exists
        _logging_settings.ensure_log_directory()

    return _logging_settings


def configure_logging_for_environment(environment: str) -> LoggingSettings:
    """Configure logging for specific environment."""
    global _logging_settings

    if environment.lower() == "development":
        _logging_settings = EnvironmentLogSettings.development()
    elif environment.lower() == "testing":
        _logging_settings = EnvironmentLogSettings.testing()
    elif environment.lower() == "production":
        _logging_settings = EnvironmentLogSettings.production()
    else:
        raise ValueError(f"Unknown environment: {environment}")

    _logging_settings.ensure_log_directory()
    return _logging_settings


# Logging configuration validation
def validate_logging_settings(settings: LoggingSettings) -> list[str]:
    """Validate logging settings and return list of warnings/errors."""
    warnings = []

    # Validate log level
    valid_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
    if settings.log_level not in valid_levels:
        warnings.append(f"Invalid log level '{settings.log_level}', using INFO")

    # Validate log format
    valid_formats = ["json", "text"]
    if settings.log_format not in valid_formats:
        warnings.append(f"Invalid log format '{settings.log_format}', using json")

    # Validate console output
    valid_outputs = ["stdout", "stderr"]
    if settings.console_output not in valid_outputs:
        warnings.append(
            f"Invalid console output '{settings.console_output}', using stderr"
        )

    # Validate file logging
    if settings.enable_file_logging:
        try:
            log_path = Path(settings.log_file_path)
            log_path.parent.mkdir(parents=True, exist_ok=True)
        except Exception as e:
            warnings.append(f"Cannot create log directory: {e}")

    # Validate performance settings
    if settings.performance_log_threshold_ms < 0:
        warnings.append("Performance threshold cannot be negative, using 1000ms")

    # Validate async settings
    if settings.async_log_queue_size < 100:
        warnings.append("Async log queue size too small, using 1000")

    return warnings

```

--------------------------------------------------------------------------------
/maverick_mcp/exceptions.py:
--------------------------------------------------------------------------------

```python
"""
Custom exception classes for MaverickMCP with comprehensive error handling.

This module provides a unified exception hierarchy with proper error codes,
HTTP status codes, and standardized error responses.
"""

from typing import Any


class MaverickException(Exception):
    """Base exception for all Maverick errors."""

    # Default values can be overridden by subclasses
    error_code: str = "INTERNAL_ERROR"
    status_code: int = 500

    def __init__(
        self,
        message: str,
        error_code: str | None = None,
        status_code: int | None = None,
        field: str | None = None,
        context: dict[str, Any] | None = None,
        recoverable: bool = True,
    ):
        super().__init__(message)
        self.message = message
        self.error_code = error_code or self.__class__.error_code
        self.status_code = status_code or self.__class__.status_code
        self.field = field
        self.context = context or {}
        self.recoverable = recoverable

    def to_dict(self) -> dict[str, Any]:
        """Convert exception to dictionary for API responses."""
        result: dict[str, Any] = {
            "code": self.error_code,
            "message": self.message,
        }
        if self.field:
            result["field"] = self.field
        if self.context:
            result["context"] = self.context
        return result

    def __repr__(self) -> str:
        """String representation of the exception."""
        return f"{self.__class__.__name__}('{self.message}', code='{self.error_code}')"


# Validation exceptions
class ValidationError(MaverickException):
    """Raised when input validation fails."""


# Research and agent exceptions
class ResearchError(MaverickException):
    """Raised when research operations fail."""

    error_code = "RESEARCH_ERROR"
    status_code = 500

    def __init__(
        self,
        message: str,
        research_type: str | None = None,
        provider: str | None = None,
        error_code: str | None = None,
        status_code: int | None = None,
        field: str | None = None,
        context: dict[str, Any] | None = None,
        recoverable: bool = True,
    ):
        super().__init__(
            message=message,
            error_code=error_code,
            status_code=status_code,
            field=field,
            context=context,
            recoverable=recoverable,
        )
        self.research_type = research_type
        self.provider = provider

    def to_dict(self) -> dict[str, Any]:
        """Convert exception to dictionary for API responses."""
        result = super().to_dict()
        if self.research_type:
            result["research_type"] = self.research_type
        if self.provider:
            result["provider"] = self.provider
        return result


class WebSearchError(ResearchError):
    """Raised when web search operations fail."""

    error_code = "WEB_SEARCH_ERROR"


class ContentAnalysisError(ResearchError):
    """Raised when content analysis fails."""

    error_code = "CONTENT_ANALYSIS_ERROR"


class AgentExecutionError(MaverickException):
    """Raised when agent execution fails."""

    error_code = "AGENT_EXECUTION_ERROR"
    status_code = 500


# Authentication/Authorization exceptions
class AuthenticationError(MaverickException):
    """Raised when authentication fails."""

    error_code = "AUTHENTICATION_ERROR"
    status_code = 401

    def __init__(self, message: str = "Authentication failed", **kwargs):
        super().__init__(message, **kwargs)


class AuthorizationError(MaverickException):
    """Raised when authorization fails."""

    error_code = "AUTHORIZATION_ERROR"
    status_code = 403

    def __init__(
        self,
        message: str = "Insufficient permissions",
        resource: str | None = None,
        action: str | None = None,
        **kwargs,
    ):
        if resource and action:
            message = f"Unauthorized access to {resource} for action '{action}'"
        super().__init__(message, **kwargs)
        if resource:
            self.context["resource"] = resource
        if action:
            self.context["action"] = action


# Resource exceptions
class NotFoundError(MaverickException):
    """Raised when a requested resource is not found."""

    error_code = "NOT_FOUND"
    status_code = 404

    def __init__(self, resource: str, identifier: str | None = None, **kwargs):
        message = f"{resource} not found"
        if identifier:
            message += f": {identifier}"
        super().__init__(message, **kwargs)
        self.context["resource"] = resource
        if identifier:
            self.context["identifier"] = identifier


class ConflictError(MaverickException):
    """Raised when there's a conflict with existing data."""

    error_code = "CONFLICT"
    status_code = 409

    def __init__(self, message: str, field: str | None = None, **kwargs):
        super().__init__(message, field=field, **kwargs)


# Rate limiting exceptions
class RateLimitError(MaverickException):
    """Raised when rate limit is exceeded."""

    error_code = "RATE_LIMIT_EXCEEDED"
    status_code = 429

    def __init__(
        self,
        message: str = "Rate limit exceeded",
        retry_after: int | None = None,
        **kwargs,
    ):
        super().__init__(message, **kwargs)
        if retry_after:
            self.context["retry_after"] = retry_after


# External service exceptions
class ExternalServiceError(MaverickException):
    """Raised when an external service fails."""

    error_code = "EXTERNAL_SERVICE_ERROR"
    status_code = 503

    def __init__(
        self, service: str, message: str, original_error: str | None = None, **kwargs
    ):
        super().__init__(message, **kwargs)
        self.context["service"] = service
        if original_error:
            self.context["original_error"] = original_error


# Data provider exceptions
class DataProviderError(MaverickException):
    """Base exception for data provider errors."""

    error_code = "DATA_PROVIDER_ERROR"
    status_code = 503

    def __init__(self, provider: str, message: str, **kwargs):
        super().__init__(message, **kwargs)
        self.context["provider"] = provider


class DataNotFoundError(DataProviderError):
    """Raised when requested data is not found."""

    error_code = "DATA_NOT_FOUND"
    status_code = 404

    def __init__(self, symbol: str, date_range: tuple | None = None, **kwargs):
        message = f"Data not found for symbol '{symbol}'"
        if date_range:
            message += f" in range {date_range[0]} to {date_range[1]}"
        super().__init__("cache", message, **kwargs)
        self.context["symbol"] = symbol
        if date_range:
            self.context["date_range"] = date_range


class APIRateLimitError(DataProviderError):
    """Raised when API rate limit is exceeded."""

    error_code = "RATE_LIMIT_EXCEEDED"
    status_code = 429

    def __init__(self, provider: str, retry_after: int | None = None, **kwargs):
        message = f"Rate limit exceeded for {provider}"
        if retry_after:
            message += f". Retry after {retry_after} seconds"
        super().__init__(provider, message, recoverable=True, **kwargs)
        if retry_after:
            self.context["retry_after"] = retry_after


class APIConnectionError(DataProviderError):
    """Raised when API connection fails."""

    error_code = "API_CONNECTION_ERROR"
    status_code = 503

    def __init__(self, provider: str, endpoint: str, reason: str, **kwargs):
        message = f"Failed to connect to {provider} at {endpoint}: {reason}"
        super().__init__(provider, message, recoverable=True, **kwargs)
        self.context["endpoint"] = endpoint
        self.context["connection_reason"] = reason


# Database exceptions
class DatabaseError(MaverickException):
    """Base exception for database errors."""

    error_code = "DATABASE_ERROR"
    status_code = 500

    def __init__(self, operation: str, message: str, **kwargs):
        super().__init__(message, **kwargs)
        self.context["operation"] = operation


class DatabaseConnectionError(DatabaseError):
    """Raised when database connection fails."""

    error_code = "DATABASE_CONNECTION_ERROR"
    status_code = 503

    def __init__(self, reason: str, **kwargs):
        message = f"Database connection failed: {reason}"
        super().__init__("connect", message, recoverable=True, **kwargs)


class DataIntegrityError(DatabaseError):
    """Raised when data integrity check fails."""

    error_code = "DATA_INTEGRITY_ERROR"
    status_code = 422

    def __init__(
        self,
        message: str,
        table: str | None = None,
        constraint: str | None = None,
        **kwargs,
    ):
        super().__init__("integrity_check", message, recoverable=False, **kwargs)
        if table:
            self.context["table"] = table
        if constraint:
            self.context["constraint"] = constraint


# Cache exceptions
class CacheError(MaverickException):
    """Base exception for cache errors."""

    error_code = "CACHE_ERROR"
    status_code = 503

    def __init__(self, operation: str, message: str, **kwargs):
        super().__init__(message, **kwargs)
        self.context["operation"] = operation


class CacheConnectionError(CacheError):
    """Raised when cache connection fails."""

    error_code = "CACHE_CONNECTION_ERROR"
    status_code = 503

    def __init__(self, cache_type: str, reason: str, **kwargs):
        message = f"{cache_type} cache connection failed: {reason}"
        super().__init__("connect", message, recoverable=True, **kwargs)
        self.context["cache_type"] = cache_type


# Configuration exceptions
class ConfigurationError(MaverickException):
    """Raised when there's a configuration problem."""

    error_code = "CONFIGURATION_ERROR"
    status_code = 500

    def __init__(self, message: str, config_key: str | None = None, **kwargs):
        super().__init__(message, **kwargs)
        if config_key:
            self.context["config_key"] = config_key


# Webhook exceptions
class WebhookError(MaverickException):
    """Raised when webhook processing fails."""

    error_code = "WEBHOOK_ERROR"
    status_code = 400

    def __init__(
        self,
        message: str,
        event_type: str | None = None,
        event_id: str | None = None,
        **kwargs,
    ):
        super().__init__(message, **kwargs)
        if event_type:
            self.context["event_type"] = event_type
        if event_id:
            self.context["event_id"] = event_id


# Agent-specific exceptions
class AgentInitializationError(MaverickException):
    """Raised when agent initialization fails."""

    error_code = "AGENT_INIT_ERROR"
    status_code = 500

    def __init__(self, agent_type: str, reason: str, **kwargs):
        message = f"Failed to initialize {agent_type}: {reason}"
        super().__init__(message, **kwargs)
        self.context["agent_type"] = agent_type
        self.context["reason"] = reason


class PersonaConfigurationError(MaverickException):
    """Raised when persona configuration is invalid."""

    error_code = "PERSONA_CONFIG_ERROR"
    status_code = 400

    def __init__(self, persona: str, valid_personas: list, **kwargs):
        message = (
            f"Invalid persona '{persona}'. Valid options: {', '.join(valid_personas)}"
        )
        super().__init__(message, **kwargs)
        self.context["invalid_persona"] = persona
        self.context["valid_personas"] = valid_personas


class ToolRegistrationError(MaverickException):
    """Raised when tool registration fails."""

    error_code = "TOOL_REGISTRATION_ERROR"
    status_code = 500

    def __init__(self, tool_name: str, reason: str, **kwargs):
        message = f"Failed to register tool '{tool_name}': {reason}"
        super().__init__(message, **kwargs)
        self.context["tool_name"] = tool_name
        self.context["reason"] = reason


# Circuit breaker exceptions
class CircuitBreakerError(MaverickException):
    """Raised when circuit breaker is open."""

    error_code = "CIRCUIT_BREAKER_OPEN"
    status_code = 503

    def __init__(self, service: str, failure_count: int, threshold: int, **kwargs):
        message = (
            f"Circuit breaker open for {service}: {failure_count}/{threshold} failures"
        )
        super().__init__(message, recoverable=True, **kwargs)
        self.context["service"] = service
        self.context["failure_count"] = failure_count
        self.context["threshold"] = threshold


# Parameter validation exceptions
class ParameterValidationError(ValidationError):
    """Raised when function parameters are invalid."""

    error_code = "PARAMETER_VALIDATION_ERROR"
    status_code = 400

    def __init__(self, param_name: str, expected_type: str, actual_type: str, **kwargs):
        reason = f"Expected {expected_type}, got {actual_type}"
        message = f"Validation failed for '{param_name}': {reason}"
        super().__init__(message, field=param_name, **kwargs)
        self.context["expected_type"] = expected_type
        self.context["actual_type"] = actual_type


# Error code constants
ERROR_CODES = {
    "VALIDATION_ERROR": "Request validation failed",
    "AUTHENTICATION_ERROR": "Authentication failed",
    "AUTHORIZATION_ERROR": "Insufficient permissions",
    "NOT_FOUND": "Resource not found",
    "CONFLICT": "Resource conflict",
    "RATE_LIMIT_EXCEEDED": "Too many requests",
    "EXTERNAL_SERVICE_ERROR": "External service unavailable",
    "DATA_PROVIDER_ERROR": "Data provider error",
    "DATA_NOT_FOUND": "Data not found",
    "API_CONNECTION_ERROR": "API connection failed",
    "DATABASE_ERROR": "Database error",
    "DATABASE_CONNECTION_ERROR": "Database connection failed",
    "DATA_INTEGRITY_ERROR": "Data integrity violation",
    "CACHE_ERROR": "Cache error",
    "CACHE_CONNECTION_ERROR": "Cache connection failed",
    "CONFIGURATION_ERROR": "Configuration error",
    "WEBHOOK_ERROR": "Webhook processing failed",
    "AGENT_INIT_ERROR": "Agent initialization failed",
    "PERSONA_CONFIG_ERROR": "Invalid persona configuration",
    "TOOL_REGISTRATION_ERROR": "Tool registration failed",
    "CIRCUIT_BREAKER_OPEN": "Service unavailable - circuit breaker open",
    "PARAMETER_VALIDATION_ERROR": "Invalid parameter",
    "INTERNAL_ERROR": "Internal server error",
}


def get_error_message(code: str) -> str:
    """Get human-readable message for error code."""
    return ERROR_CODES.get(code, "Unknown error")


# Backward compatibility alias
MaverickMCPError = MaverickException

```

--------------------------------------------------------------------------------
/maverick_mcp/utils/logging.py:
--------------------------------------------------------------------------------

```python
"""
Structured logging with request context for MaverickMCP.

This module provides structured logging capabilities that:
- Capture request context (request ID, user, tool name)
- Track performance metrics (duration, memory usage)
- Support JSON output for log aggregation
- Integrate with FastMCP's context system
"""

import functools
import json
import logging
import sys
import time
import traceback
import uuid
from collections.abc import Callable
from contextvars import ContextVar
from datetime import UTC, datetime
from typing import Any

import psutil
from fastmcp import Context as MCPContext

# Context variables for request tracking
request_id_var: ContextVar[str | None] = ContextVar("request_id", default=None)  # type: ignore[assignment]
user_id_var: ContextVar[str | None] = ContextVar("user_id", default=None)  # type: ignore[assignment]
tool_name_var: ContextVar[str | None] = ContextVar("tool_name", default=None)  # type: ignore[assignment]
request_start_var: ContextVar[float | None] = ContextVar("request_start", default=None)  # type: ignore[assignment]


class StructuredFormatter(logging.Formatter):
    """Custom formatter that outputs structured JSON logs."""

    def format(self, record: logging.LogRecord) -> str:
        """Format log record as structured JSON."""
        # Base log data
        log_data = {
            "timestamp": datetime.now(UTC).isoformat(),
            "level": record.levelname,
            "logger": record.name,
            "message": record.getMessage(),
            "module": record.module,
            "function": record.funcName,
            "line": record.lineno,
        }

        # Add request context if available
        request_id = request_id_var.get()
        if request_id:
            log_data["request_id"] = request_id

        user_id = user_id_var.get()
        if user_id:
            log_data["user_id"] = user_id

        tool_name = tool_name_var.get()
        if tool_name:
            log_data["tool_name"] = tool_name

        # Add request duration if available
        request_start = request_start_var.get()
        if request_start:
            log_data["duration_ms"] = int((time.time() - request_start) * 1000)

        # Add exception info if present
        if record.exc_info:
            log_data["exception"] = {
                "type": record.exc_info[0].__name__
                if record.exc_info[0]
                else "Unknown",
                "message": str(record.exc_info[1]),
                "traceback": traceback.format_exception(*record.exc_info),
            }

        # Add any extra fields
        for key, value in record.__dict__.items():
            if key not in [
                "name",
                "msg",
                "args",
                "created",
                "filename",
                "funcName",
                "levelname",
                "levelno",
                "lineno",
                "module",
                "msecs",
                "pathname",
                "process",
                "processName",
                "relativeCreated",
                "thread",
                "threadName",
                "exc_info",
                "exc_text",
                "stack_info",
            ]:
                log_data[key] = value

        return json.dumps(log_data)


class RequestContextLogger:
    """Logger that automatically includes request context."""

    def __init__(self, logger: logging.Logger):
        self.logger = logger

    def _log_with_context(self, level: int, msg: str, *args, **kwargs):
        """Log with additional context fields."""
        extra = kwargs.get("extra", {})

        # Add performance metrics
        process = psutil.Process()
        extra["memory_mb"] = process.memory_info().rss / 1024 / 1024
        extra["cpu_percent"] = process.cpu_percent(interval=0.1)

        kwargs["extra"] = extra
        self.logger.log(level, msg, *args, **kwargs)

    def debug(self, msg: str, *args, **kwargs):
        self._log_with_context(logging.DEBUG, msg, *args, **kwargs)

    def info(self, msg: str, *args, **kwargs):
        self._log_with_context(logging.INFO, msg, *args, **kwargs)

    def warning(self, msg: str, *args, **kwargs):
        self._log_with_context(logging.WARNING, msg, *args, **kwargs)

    def error(self, msg: str, *args, **kwargs):
        self._log_with_context(logging.ERROR, msg, *args, **kwargs)

    def critical(self, msg: str, *args, **kwargs):
        self._log_with_context(logging.CRITICAL, msg, *args, **kwargs)


def setup_structured_logging(
    log_level: str = "INFO",
    log_format: str = "json",
    log_file: str | None = None,
    use_stderr: bool = False,
) -> None:
    """
    Set up structured logging for the application.

    Args:
        log_level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
        log_format: Output format ("json" or "text")
        log_file: Optional log file path
        use_stderr: If True, send console logs to stderr instead of stdout
    """
    # Configure warnings filter to suppress known deprecation warnings
    import warnings

    # Suppress pandas_ta pkg_resources deprecation warning
    warnings.filterwarnings(
        "ignore",
        message="pkg_resources is deprecated as an API.*",
        category=UserWarning,
        module="pandas_ta.*",
    )

    # Suppress passlib crypt deprecation warning
    warnings.filterwarnings(
        "ignore",
        message="'crypt' is deprecated and slated for removal.*",
        category=DeprecationWarning,
        module="passlib.*",
    )

    # Suppress LangChain Pydantic v1 deprecation warnings
    warnings.filterwarnings(
        "ignore",
        message=".*pydantic.* is deprecated.*",
        category=DeprecationWarning,
        module="langchain.*",
    )

    # Suppress Starlette cookie deprecation warnings
    warnings.filterwarnings(
        "ignore",
        message=".*cookie.*deprecated.*",
        category=DeprecationWarning,
        module="starlette.*",
    )

    root_logger = logging.getLogger()
    root_logger.setLevel(getattr(logging, log_level.upper()))

    # Remove existing handlers
    for handler in root_logger.handlers[:]:
        root_logger.removeHandler(handler)

    # Console handler - use stderr for stdio transport to avoid interfering with JSON-RPC
    console_handler = logging.StreamHandler(sys.stderr if use_stderr else sys.stdout)

    if log_format == "json":
        console_handler.setFormatter(StructuredFormatter())
    else:
        console_handler.setFormatter(
            logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
        )

    root_logger.addHandler(console_handler)

    # File handler if specified
    if log_file:
        file_handler = logging.FileHandler(log_file)
        file_handler.setFormatter(StructuredFormatter())
        root_logger.addHandler(file_handler)


def get_logger(name: str) -> RequestContextLogger:
    """Get a logger with request context support."""
    return RequestContextLogger(logging.getLogger(name))


def log_tool_execution(func: Callable) -> Callable:
    """
    Decorator to log tool execution with context.

    Automatically captures:
    - Tool name
    - Request ID
    - Execution time
    - Success/failure status
    - Input parameters (sanitized)
    """

    @functools.wraps(func)
    async def wrapper(*args, **kwargs):
        # Generate request ID
        request_id = str(uuid.uuid4())
        request_id_var.set(request_id)

        # Set tool name
        tool_name = getattr(func, "__name__", "unknown_function")
        tool_name_var.set(tool_name)

        # Set start time
        start_time = time.time()
        request_start_var.set(start_time)

        # Get logger
        logger = get_logger(f"maverick_mcp.tools.{tool_name}")

        # Check if context is available (but not used in this decorator)
        for arg in args:
            if isinstance(arg, MCPContext):
                break

        # Sanitize parameters for logging (hide sensitive data)
        safe_kwargs = _sanitize_params(kwargs)

        logger.info(
            "Tool execution started",
            extra={
                "tool_name": tool_name,
                "request_id": request_id,
                "parameters": safe_kwargs,
            },
        )

        try:
            # Execute the tool
            result = await func(*args, **kwargs)

            # Log success
            duration_ms = int((time.time() - start_time) * 1000)
            logger.info(
                "Tool execution completed successfully",
                extra={
                    "tool_name": tool_name,
                    "request_id": request_id,
                    "duration_ms": duration_ms,
                    "status": "success",
                },
            )

            return result

        except Exception as e:
            # Log error
            duration_ms = int((time.time() - start_time) * 1000)
            logger.error(
                f"Tool execution failed: {str(e)}",
                exc_info=True,
                extra={
                    "tool_name": tool_name,
                    "request_id": request_id,
                    "duration_ms": duration_ms,
                    "status": "error",
                    "error_type": type(e).__name__,
                },
            )
            raise

        finally:
            # Clear context vars
            request_id_var.set(None)
            tool_name_var.set(None)
            request_start_var.set(None)

    return wrapper


def _sanitize_params(params: dict[str, Any]) -> dict[str, Any]:
    """
    Sanitize parameters for logging by hiding sensitive data.

    Args:
        params: Original parameters

    Returns:
        Sanitized parameters safe for logging
    """
    sensitive_keys = {"password", "api_key", "secret", "token", "auth"}
    sanitized = {}

    for key, value in params.items():
        if any(sensitive in key.lower() for sensitive in sensitive_keys):
            sanitized[key] = "***REDACTED***"
        elif isinstance(value, dict):
            sanitized[key] = _sanitize_params(value)
        elif isinstance(value, list) and len(value) > 10:
            # Truncate long lists
            sanitized[key] = f"[{len(value)} items]"
        elif isinstance(value, str) and len(value) > 1000:
            # Truncate long strings
            sanitized[key] = value[:100] + f"... ({len(value)} chars total)"
        else:
            sanitized[key] = value

    return sanitized


def log_database_query(
    query: str, params: dict | None = None, duration_ms: int | None = None
):
    """Log database query execution."""
    logger = get_logger("maverick_mcp.database")

    extra = {"query_type": _get_query_type(query), "query_length": len(query)}

    if duration_ms is not None:
        extra["duration_ms"] = duration_ms
        extra["slow_query"] = duration_ms > 1000  # Mark queries over 1 second as slow

    if params:
        extra["param_count"] = len(params)

    logger.info("Database query executed", extra=extra)

    # Log the actual query at debug level
    logger.debug(
        f"Query details: {query[:200]}..."
        if len(query) > 200
        else f"Query details: {query}",
        extra={"params": _sanitize_params(params) if params else None},
    )


def _get_query_type(query: str) -> str:
    """Extract query type from SQL query."""
    query_upper = query.strip().upper()
    if query_upper.startswith("SELECT"):
        return "SELECT"
    elif query_upper.startswith("INSERT"):
        return "INSERT"
    elif query_upper.startswith("UPDATE"):
        return "UPDATE"
    elif query_upper.startswith("DELETE"):
        return "DELETE"
    elif query_upper.startswith("CREATE"):
        return "CREATE"
    elif query_upper.startswith("DROP"):
        return "DROP"
    else:
        return "OTHER"


def log_cache_operation(
    operation: str, key: str, hit: bool = False, duration_ms: int | None = None
):
    """Log cache operation."""
    logger = get_logger("maverick_mcp.cache")

    extra = {"operation": operation, "cache_key": key, "cache_hit": hit}

    if duration_ms is not None:
        extra["duration_ms"] = duration_ms

    logger.info(f"Cache {operation}: {'hit' if hit else 'miss'} for {key}", extra=extra)


def log_external_api_call(
    service: str,
    endpoint: str,
    method: str = "GET",
    status_code: int | None = None,
    duration_ms: int | None = None,
    error: str | None = None,
):
    """Log external API call."""
    logger = get_logger("maverick_mcp.external_api")

    extra: dict[str, Any] = {"service": service, "endpoint": endpoint, "method": method}

    if status_code is not None:
        extra["status_code"] = status_code
        extra["success"] = 200 <= status_code < 300

    if duration_ms is not None:
        extra["duration_ms"] = duration_ms

    if error:
        extra["error"] = error
        logger.error(
            f"External API call failed: {service} {method} {endpoint}", extra=extra
        )
    else:
        logger.info(f"External API call: {service} {method} {endpoint}", extra=extra)


# Performance monitoring context manager
class PerformanceMonitor:
    """Context manager for monitoring performance of code blocks."""

    def __init__(self, operation_name: str, logger: RequestContextLogger | None = None):
        self.operation_name = operation_name
        self.logger = logger or get_logger("maverick_mcp.performance")
        self.start_time: float | None = None
        self.start_memory: float | None = None

    def __enter__(self):
        self.start_time = time.time()
        process = psutil.Process()
        self.start_memory = process.memory_info().rss / 1024 / 1024
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        duration_ms = int((time.time() - (self.start_time or 0)) * 1000)
        process = psutil.Process()
        end_memory = process.memory_info().rss / 1024 / 1024
        memory_delta = end_memory - (self.start_memory or 0)

        extra = {
            "operation": self.operation_name,
            "duration_ms": duration_ms,
            "memory_delta_mb": round(memory_delta, 2),
            "success": exc_type is None,
        }

        if exc_type:
            extra["error_type"] = exc_type.__name__
            self.logger.error(
                f"Operation '{self.operation_name}' failed after {duration_ms}ms",
                extra=extra,
            )
        else:
            self.logger.info(
                f"Operation '{self.operation_name}' completed in {duration_ms}ms",
                extra=extra,
            )

```

--------------------------------------------------------------------------------
/tests/test_circuit_breaker.py:
--------------------------------------------------------------------------------

```python
"""
Comprehensive tests for the circuit breaker system.
"""

import asyncio
import time
from unittest.mock import patch

import pytest

from maverick_mcp.exceptions import CircuitBreakerError, ExternalServiceError
from maverick_mcp.utils.circuit_breaker import (
    CircuitBreakerConfig,
    CircuitBreakerMetrics,
    CircuitState,
    EnhancedCircuitBreaker,
    FailureDetectionStrategy,
    circuit_breaker,
    get_all_circuit_breakers,
    get_circuit_breaker,
    get_circuit_breaker_status,
    reset_all_circuit_breakers,
)


class TestCircuitBreakerMetrics:
    """Test circuit breaker metrics collection."""

    def test_metrics_initialization(self):
        """Test metrics are initialized correctly."""
        metrics = CircuitBreakerMetrics(window_size=10)
        stats = metrics.get_stats()

        assert stats["total_calls"] == 0
        assert stats["success_rate"] == 1.0
        assert stats["failure_rate"] == 0.0
        assert stats["avg_duration"] == 0.0
        assert stats["timeout_rate"] == 0.0

    def test_record_successful_call(self):
        """Test recording successful calls."""
        metrics = CircuitBreakerMetrics()

        metrics.record_call(True, 0.5)
        metrics.record_call(True, 1.0)

        stats = metrics.get_stats()
        assert stats["total_calls"] == 2
        assert stats["success_rate"] == 1.0
        assert stats["failure_rate"] == 0.0
        assert stats["avg_duration"] == 0.75

    def test_record_failed_call(self):
        """Test recording failed calls."""
        metrics = CircuitBreakerMetrics()

        metrics.record_call(False, 2.0)
        metrics.record_call(True, 1.0)

        stats = metrics.get_stats()
        assert stats["total_calls"] == 2
        assert stats["success_rate"] == 0.5
        assert stats["failure_rate"] == 0.5
        assert stats["avg_duration"] == 1.5

    def test_window_cleanup(self):
        """Test old data is cleaned up."""
        metrics = CircuitBreakerMetrics(window_size=1)  # 1 second window

        metrics.record_call(True, 0.5)
        time.sleep(1.1)  # Wait for window to expire
        metrics.record_call(True, 1.0)

        stats = metrics.get_stats()
        assert stats["total_calls"] == 1  # Old call should be removed


class TestEnhancedCircuitBreaker:
    """Test enhanced circuit breaker functionality."""

    def test_circuit_breaker_initialization(self):
        """Test circuit breaker is initialized correctly."""
        config = CircuitBreakerConfig(
            name="test",
            failure_threshold=3,
            recovery_timeout=5,
        )
        breaker = EnhancedCircuitBreaker(config)

        assert breaker.state == CircuitState.CLOSED
        assert breaker.is_closed
        assert not breaker.is_open

    def test_consecutive_failures_opens_circuit(self):
        """Test circuit opens after consecutive failures."""
        config = CircuitBreakerConfig(
            name="test",
            failure_threshold=3,
            detection_strategy=FailureDetectionStrategy.CONSECUTIVE_FAILURES,
        )
        breaker = EnhancedCircuitBreaker(config)

        # Fail 3 times
        for _ in range(3):
            try:
                breaker.call_sync(lambda: 1 / 0)
            except ZeroDivisionError:
                pass

        assert breaker.state == CircuitState.OPEN
        assert breaker.is_open

    def test_failure_rate_opens_circuit(self):
        """Test circuit opens based on failure rate."""
        config = CircuitBreakerConfig(
            name="test",
            failure_rate_threshold=0.5,
            detection_strategy=FailureDetectionStrategy.FAILURE_RATE,
        )
        breaker = EnhancedCircuitBreaker(config)

        # Need minimum calls for rate calculation
        for i in range(10):
            try:
                if i % 2 == 0:  # 50% failure rate
                    breaker.call_sync(lambda: 1 / 0)
                else:
                    breaker.call_sync(lambda: "success")
            except (ZeroDivisionError, CircuitBreakerError):
                pass

        stats = breaker._metrics.get_stats()
        assert stats["failure_rate"] >= 0.5
        assert breaker.state == CircuitState.OPEN

    def test_circuit_breaker_blocks_calls_when_open(self):
        """Test circuit breaker blocks calls when open."""
        config = CircuitBreakerConfig(
            name="test",
            failure_threshold=1,
            recovery_timeout=60,
        )
        breaker = EnhancedCircuitBreaker(config)

        # Open the circuit
        try:
            breaker.call_sync(lambda: 1 / 0)
        except ZeroDivisionError:
            pass

        # Next call should be blocked
        with pytest.raises(CircuitBreakerError) as exc_info:
            breaker.call_sync(lambda: "success")

        assert "Circuit breaker open for test:" in str(exc_info.value)
        assert exc_info.value.context["state"] == "open"

    def test_circuit_breaker_recovery(self):
        """Test circuit breaker recovery to half-open then closed."""
        config = CircuitBreakerConfig(
            name="test",
            failure_threshold=1,
            recovery_timeout=1,  # 1 second
            success_threshold=2,
        )
        breaker = EnhancedCircuitBreaker(config)

        # Open the circuit
        try:
            breaker.call_sync(lambda: 1 / 0)
        except ZeroDivisionError:
            pass

        assert breaker.state == CircuitState.OPEN

        # Wait for recovery timeout
        time.sleep(1.1)

        # First successful call should move to half-open
        result = breaker.call_sync(lambda: "success1")
        assert result == "success1"
        assert breaker.state == CircuitState.HALF_OPEN

        # Second successful call should close the circuit
        result = breaker.call_sync(lambda: "success2")
        assert result == "success2"
        assert breaker.state == CircuitState.CLOSED

    def test_half_open_failure_reopens(self):
        """Test failure in half-open state reopens circuit."""
        config = CircuitBreakerConfig(
            name="test",
            failure_threshold=1,
            recovery_timeout=1,
        )
        breaker = EnhancedCircuitBreaker(config)

        # Open the circuit
        try:
            breaker.call_sync(lambda: 1 / 0)
        except ZeroDivisionError:
            pass

        # Wait for recovery
        time.sleep(1.1)

        # Fail in half-open state
        try:
            breaker.call_sync(lambda: 1 / 0)
        except ZeroDivisionError:
            pass

        assert breaker.state == CircuitState.OPEN

    def test_manual_reset(self):
        """Test manual circuit breaker reset."""
        config = CircuitBreakerConfig(
            name="test",
            failure_threshold=1,
        )
        breaker = EnhancedCircuitBreaker(config)

        # Open the circuit
        try:
            breaker.call_sync(lambda: 1 / 0)
        except ZeroDivisionError:
            pass

        assert breaker.state == CircuitState.OPEN

        # Manual reset
        breaker.reset()
        assert breaker.state == CircuitState.CLOSED
        assert breaker._consecutive_failures == 0

    @pytest.mark.asyncio
    async def test_async_circuit_breaker(self):
        """Test circuit breaker with async functions."""
        config = CircuitBreakerConfig(
            name="test_async",
            failure_threshold=2,
        )
        breaker = EnhancedCircuitBreaker(config)

        async def failing_func():
            raise ValueError("Async failure")

        async def success_func():
            return "async success"

        # Test failures
        for _ in range(2):
            with pytest.raises(ValueError):
                await breaker.call_async(failing_func)

        assert breaker.state == CircuitState.OPEN

        # Test blocking
        with pytest.raises(CircuitBreakerError):
            await breaker.call_async(success_func)

    @pytest.mark.asyncio
    async def test_async_timeout(self):
        """Test async timeout handling."""
        config = CircuitBreakerConfig(
            name="test_timeout",
            timeout_threshold=0.1,  # 100ms
            failure_threshold=1,
        )
        breaker = EnhancedCircuitBreaker(config)

        async def slow_func():
            await asyncio.sleep(0.5)  # 500ms
            return "done"

        with pytest.raises(ExternalServiceError) as exc_info:
            await breaker.call_async(slow_func)

        assert "timed out" in str(exc_info.value)
        assert breaker.state == CircuitState.OPEN


class TestCircuitBreakerDecorator:
    """Test circuit breaker decorator functionality."""

    def test_sync_decorator(self):
        """Test decorator with sync function."""
        call_count = 0

        @circuit_breaker(name="test_decorator", failure_threshold=2)
        def test_func(should_fail=False):
            nonlocal call_count
            call_count += 1
            if should_fail:
                raise ValueError("Test failure")
            return "success"

        # Successful calls
        assert test_func() == "success"
        assert test_func() == "success"

        # Failures
        for _ in range(2):
            with pytest.raises(ValueError):
                test_func(should_fail=True)

        # Circuit should be open
        with pytest.raises(CircuitBreakerError):
            test_func()

    @pytest.mark.asyncio
    async def test_async_decorator(self):
        """Test decorator with async function."""

        @circuit_breaker(name="test_async_decorator", failure_threshold=1)
        async def async_test_func(should_fail=False):
            if should_fail:
                raise ValueError("Async test failure")
            return "async success"

        # Success
        result = await async_test_func()
        assert result == "async success"

        # Failure
        with pytest.raises(ValueError):
            await async_test_func(should_fail=True)

        # Circuit open
        with pytest.raises(CircuitBreakerError):
            await async_test_func()


class TestCircuitBreakerRegistry:
    """Test global circuit breaker registry."""

    def test_get_circuit_breaker(self):
        """Test getting circuit breaker by name."""

        # Create a breaker via decorator
        @circuit_breaker(name="registry_test")
        def test_func():
            return "test"

        # Call to initialize
        test_func()

        # Get from registry
        breaker = get_circuit_breaker("registry_test")
        assert breaker is not None
        assert breaker.config.name == "registry_test"

    def test_get_all_circuit_breakers(self):
        """Test getting all circuit breakers."""
        # Clear existing (from other tests)
        from maverick_mcp.utils.circuit_breaker import _breakers

        _breakers.clear()

        # Create multiple breakers
        @circuit_breaker(name="breaker1")
        def func1():
            pass

        @circuit_breaker(name="breaker2")
        def func2():
            pass

        # Initialize
        func1()
        func2()

        all_breakers = get_all_circuit_breakers()
        assert len(all_breakers) == 2
        assert "breaker1" in all_breakers
        assert "breaker2" in all_breakers

    def test_reset_all_circuit_breakers(self):
        """Test resetting all circuit breakers."""

        # Create and open a breaker
        @circuit_breaker(name="reset_test", failure_threshold=1)
        def failing_func():
            raise ValueError("Fail")

        with pytest.raises(ValueError):
            failing_func()

        breaker = get_circuit_breaker("reset_test")
        assert breaker.state == CircuitState.OPEN

        # Reset all
        reset_all_circuit_breakers()
        assert breaker.state == CircuitState.CLOSED

    def test_circuit_breaker_status(self):
        """Test getting status of all circuit breakers."""

        # Create a breaker
        @circuit_breaker(name="status_test")
        def test_func():
            return "test"

        test_func()

        status = get_circuit_breaker_status()
        assert "status_test" in status
        assert status["status_test"]["state"] == "closed"
        assert status["status_test"]["name"] == "status_test"


class TestServiceSpecificCircuitBreakers:
    """Test service-specific circuit breaker implementations."""

    def test_stock_data_circuit_breaker(self):
        """Test stock data circuit breaker with fallback."""
        from maverick_mcp.utils.circuit_breaker_services import StockDataCircuitBreaker

        breaker = StockDataCircuitBreaker()

        # Mock a failing function
        def failing_fetch(symbol, start, end):
            raise Exception("API Error")

        # Mock fallback data
        with patch.object(breaker.fallback_chain, "execute_sync") as mock_fallback:
            import pandas as pd

            mock_fallback.return_value = pd.DataFrame({"Close": [100, 101, 102]})

            # Should use fallback
            result = breaker.fetch_with_fallback(
                failing_fetch, "AAPL", "2024-01-01", "2024-01-31"
            )

            assert not result.empty
            assert len(result) == 3
            mock_fallback.assert_called_once()

    def test_market_data_circuit_breaker(self):
        """Test market data circuit breaker with fallback."""
        from maverick_mcp.utils.circuit_breaker_services import MarketDataCircuitBreaker

        breaker = MarketDataCircuitBreaker("finviz")

        # Mock failing function
        def failing_fetch(mover_type):
            raise Exception("Finviz Error")

        # Should return fallback
        result = breaker.fetch_with_fallback(failing_fetch, "gainers")

        assert isinstance(result, dict)
        assert "movers" in result
        assert result["movers"] == []
        assert result["metadata"]["is_fallback"] is True

    def test_economic_data_circuit_breaker(self):
        """Test economic data circuit breaker with fallback."""
        from maverick_mcp.utils.circuit_breaker_services import (
            EconomicDataCircuitBreaker,
        )

        breaker = EconomicDataCircuitBreaker()

        # Mock failing function
        def failing_fetch(series_id, start, end):
            raise Exception("FRED API Error")

        # Should return default values
        result = breaker.fetch_with_fallback(
            failing_fetch, "GDP", "2024-01-01", "2024-01-31"
        )

        import pandas as pd

        assert isinstance(result, pd.Series)
        assert result.attrs["is_fallback"] is True
        assert all(result == 2.5)  # Default GDP value

```

--------------------------------------------------------------------------------
/maverick_mcp/api/routers/news_sentiment_enhanced.py:
--------------------------------------------------------------------------------

```python
"""
Enhanced news sentiment analysis using Tiingo News API or LLM-based analysis.

This module provides reliable news sentiment analysis by:
1. Using Tiingo's get_news method (if available)
2. Falling back to LLM-based sentiment analysis using existing research tools
3. Never relying on undefined EXTERNAL_DATA_API_KEY
"""

import asyncio
import logging
import os
import uuid
from datetime import datetime, timedelta
from typing import Any

from tiingo import TiingoClient

from maverick_mcp.api.middleware.mcp_logging import get_tool_logger
from maverick_mcp.config.settings import get_settings

logger = logging.getLogger(__name__)
settings = get_settings()


def get_tiingo_client() -> TiingoClient | None:
    """Get or create Tiingo client if API key is available."""
    api_key = os.getenv("TIINGO_API_KEY")
    if api_key:
        try:
            config = {"session": True, "api_key": api_key}
            return TiingoClient(config)
        except Exception as e:
            logger.warning(f"Failed to initialize Tiingo client: {e}")
    return None


def get_llm():
    """Get LLM for sentiment analysis (optimized for speed)."""
    from maverick_mcp.providers.llm_factory import get_llm as get_llm_factory
    from maverick_mcp.providers.openrouter_provider import TaskType

    # Use sentiment analysis task type with fast preference
    return get_llm_factory(
        task_type=TaskType.SENTIMENT_ANALYSIS, prefer_fast=True, prefer_cheap=True
    )


async def get_news_sentiment_enhanced(
    ticker: str, timeframe: str = "7d", limit: int = 10
) -> dict[str, Any]:
    """
    Enhanced news sentiment analysis using Tiingo News API or LLM analysis.

    This tool provides reliable sentiment analysis by:
    1. First attempting to use Tiingo's news API (if available)
    2. Analyzing news sentiment using LLM if news is retrieved
    3. Falling back to research-based sentiment if Tiingo unavailable
    4. Providing guaranteed responses with appropriate fallbacks

    Args:
        ticker: Stock ticker symbol
        timeframe: Time frame for news (1d, 7d, 30d, etc.)
        limit: Maximum number of news articles to analyze

    Returns:
        Dictionary containing news sentiment analysis with confidence scores
    """
    tool_logger = get_tool_logger("data_get_news_sentiment_enhanced")
    request_id = str(uuid.uuid4())

    try:
        # Step 1: Try Tiingo News API
        tool_logger.step("tiingo_check", f"Checking Tiingo News API for {ticker}")

        tiingo_client = get_tiingo_client()
        if tiingo_client:
            try:
                # Calculate date range from timeframe
                end_date = datetime.now()
                days = int(timeframe.rstrip("d")) if timeframe.endswith("d") else 7
                start_date = end_date - timedelta(days=days)

                tool_logger.step(
                    "tiingo_fetch", f"Fetching news from Tiingo for {ticker}"
                )

                # Fetch news using Tiingo's get_news method
                news_articles = await asyncio.wait_for(
                    asyncio.to_thread(
                        tiingo_client.get_news,
                        tickers=[ticker],
                        startDate=start_date.strftime("%Y-%m-%d"),
                        endDate=end_date.strftime("%Y-%m-%d"),
                        limit=limit,
                        sortBy="publishedDate",
                        onlyWithTickers=True,
                    ),
                    timeout=10.0,
                )

                if news_articles:
                    tool_logger.step(
                        "llm_analysis",
                        f"Analyzing {len(news_articles)} articles with LLM",
                    )

                    # Analyze sentiment using LLM
                    sentiment_result = await _analyze_news_sentiment_with_llm(
                        news_articles, ticker, tool_logger
                    )

                    tool_logger.complete(
                        f"Tiingo news sentiment analysis completed for {ticker}"
                    )

                    return {
                        "ticker": ticker,
                        "sentiment": sentiment_result["overall_sentiment"],
                        "confidence": sentiment_result["confidence"],
                        "source": "tiingo_news_with_llm_analysis",
                        "status": "success",
                        "analysis": {
                            "articles_analyzed": len(news_articles),
                            "sentiment_breakdown": sentiment_result["breakdown"],
                            "key_themes": sentiment_result["themes"],
                            "recent_headlines": sentiment_result["headlines"][:3],
                        },
                        "timeframe": timeframe,
                        "request_id": request_id,
                        "timestamp": datetime.now().isoformat(),
                    }

            except TimeoutError:
                tool_logger.step(
                    "tiingo_timeout", "Tiingo API timed out, using fallback"
                )
            except Exception as e:
                # Check if it's a permissions issue (free tier doesn't include news)
                if (
                    "403" in str(e)
                    or "permission" in str(e).lower()
                    or "unauthorized" in str(e).lower()
                ):
                    tool_logger.step(
                        "tiingo_no_permission",
                        "Tiingo news not available (requires paid plan)",
                    )
                else:
                    tool_logger.step("tiingo_error", f"Tiingo error: {str(e)}")

        # Step 2: Fallback to research-based sentiment
        tool_logger.step("research_fallback", "Using research-based sentiment analysis")

        from maverick_mcp.api.routers.research import analyze_market_sentiment

        # Use research tools to gather sentiment
        result = await asyncio.wait_for(
            analyze_market_sentiment(
                topic=f"{ticker} stock news sentiment recent {timeframe}",
                timeframe="1w" if days <= 7 else "1m",
                persona="moderate",
            ),
            timeout=15.0,
        )

        if result.get("success", False):
            sentiment_data = result.get("sentiment_analysis", {})
            return {
                "ticker": ticker,
                "sentiment": _extract_sentiment_from_research(sentiment_data),
                "confidence": sentiment_data.get("sentiment_confidence", 0.5),
                "source": "research_based_sentiment",
                "status": "fallback_success",
                "analysis": {
                    "overall_sentiment": sentiment_data.get("overall_sentiment", {}),
                    "key_themes": sentiment_data.get("sentiment_themes", [])[:3],
                    "market_insights": sentiment_data.get("market_insights", [])[:2],
                },
                "timeframe": timeframe,
                "request_id": request_id,
                "timestamp": datetime.now().isoformat(),
                "message": "Using research-based sentiment (Tiingo news unavailable on free tier)",
            }

        # Step 3: Basic fallback
        return _provide_basic_sentiment_fallback(ticker, request_id)

    except Exception as e:
        tool_logger.error("sentiment_error", e, f"Sentiment analysis failed: {str(e)}")
        return _provide_basic_sentiment_fallback(ticker, request_id, str(e))


async def _analyze_news_sentiment_with_llm(
    news_articles: list, ticker: str, tool_logger
) -> dict[str, Any]:
    """Analyze news articles sentiment using LLM."""

    llm = get_llm()
    if not llm:
        # No LLM available, do basic analysis
        return _basic_news_analysis(news_articles)

    try:
        # Prepare news summary for LLM
        news_summary = []
        for article in news_articles[:10]:  # Limit to 10 most recent
            news_summary.append(
                {
                    "title": article.get("title", ""),
                    "description": article.get("description", "")[:200]
                    if article.get("description")
                    else "",
                    "publishedDate": article.get("publishedDate", ""),
                    "source": article.get("source", ""),
                }
            )

        # Create sentiment analysis prompt
        prompt = f"""Analyze the sentiment of these recent news articles about {ticker} stock.

News Articles:
{chr(10).join([f"- {a['title']} ({a['source']}, {a['publishedDate'][:10] if a['publishedDate'] else 'Unknown date'})" for a in news_summary[:5]])}

Provide a JSON response with:
1. overall_sentiment: "bullish", "bearish", or "neutral"
2. confidence: 0.0 to 1.0
3. breakdown: dict with counts of positive, negative, neutral articles
4. themes: list of 3 key themes from the news
5. headlines: list of 3 most important headlines

Response format:
{{"overall_sentiment": "...", "confidence": 0.X, "breakdown": {{"positive": X, "negative": Y, "neutral": Z}}, "themes": ["...", "...", "..."], "headlines": ["...", "...", "..."]}}"""

        # Get LLM analysis
        response = await asyncio.to_thread(lambda: llm.invoke(prompt).content)

        # Parse JSON response
        import json

        try:
            # Extract JSON from response (handle markdown code blocks)
            if "```json" in response:
                json_str = response.split("```json")[1].split("```")[0].strip()
            elif "```" in response:
                json_str = response.split("```")[1].split("```")[0].strip()
            elif "{" in response:
                # Find JSON object in response
                start = response.index("{")
                end = response.rindex("}") + 1
                json_str = response[start:end]
            else:
                json_str = response

            result = json.loads(json_str)

            # Ensure all required fields
            return {
                "overall_sentiment": result.get("overall_sentiment", "neutral"),
                "confidence": float(result.get("confidence", 0.5)),
                "breakdown": result.get(
                    "breakdown",
                    {"positive": 0, "negative": 0, "neutral": len(news_articles)},
                ),
                "themes": result.get(
                    "themes",
                    ["Market movement", "Company performance", "Industry trends"],
                ),
                "headlines": [a.get("title", "") for a in news_summary[:3]],
            }

        except (json.JSONDecodeError, ValueError) as e:
            tool_logger.step("llm_parse_error", f"Failed to parse LLM response: {e}")
            return _basic_news_analysis(news_articles)

    except Exception as e:
        tool_logger.step("llm_error", f"LLM analysis failed: {e}")
        return _basic_news_analysis(news_articles)


def _basic_news_analysis(news_articles: list) -> dict[str, Any]:
    """Basic sentiment analysis without LLM."""

    # Simple keyword-based sentiment
    positive_keywords = [
        "gain",
        "rise",
        "up",
        "beat",
        "exceed",
        "strong",
        "bull",
        "buy",
        "upgrade",
        "positive",
    ]
    negative_keywords = [
        "loss",
        "fall",
        "down",
        "miss",
        "below",
        "weak",
        "bear",
        "sell",
        "downgrade",
        "negative",
    ]

    positive_count = 0
    negative_count = 0
    neutral_count = 0

    for article in news_articles:
        title = (
            article.get("title", "") + " " + article.get("description", "")
        ).lower()

        pos_score = sum(1 for keyword in positive_keywords if keyword in title)
        neg_score = sum(1 for keyword in negative_keywords if keyword in title)

        if pos_score > neg_score:
            positive_count += 1
        elif neg_score > pos_score:
            negative_count += 1
        else:
            neutral_count += 1

    total = len(news_articles)
    if total == 0:
        return {
            "overall_sentiment": "neutral",
            "confidence": 0.0,
            "breakdown": {"positive": 0, "negative": 0, "neutral": 0},
            "themes": [],
            "headlines": [],
        }

    # Determine overall sentiment
    if positive_count > negative_count * 1.5:
        overall = "bullish"
    elif negative_count > positive_count * 1.5:
        overall = "bearish"
    else:
        overall = "neutral"

    # Calculate confidence based on consensus
    max_count = max(positive_count, negative_count, neutral_count)
    confidence = max_count / total if total > 0 else 0.0

    return {
        "overall_sentiment": overall,
        "confidence": confidence,
        "breakdown": {
            "positive": positive_count,
            "negative": negative_count,
            "neutral": neutral_count,
        },
        "themes": ["Recent news", "Market activity", "Company updates"],
        "headlines": [a.get("title", "") for a in news_articles[:3]],
    }


def _extract_sentiment_from_research(sentiment_data: dict) -> str:
    """Extract simple sentiment direction from research data."""

    overall = sentiment_data.get("overall_sentiment", {})

    # Check for sentiment keywords
    if isinstance(overall, dict):
        sentiment_str = str(overall).lower()
    else:
        sentiment_str = str(overall).lower()

    if "bullish" in sentiment_str or "positive" in sentiment_str:
        return "bullish"
    elif "bearish" in sentiment_str or "negative" in sentiment_str:
        return "bearish"

    # Check confidence for direction
    confidence = sentiment_data.get("sentiment_confidence", 0.5)
    if confidence > 0.6:
        return "bullish"
    elif confidence < 0.4:
        return "bearish"

    return "neutral"


def _provide_basic_sentiment_fallback(
    ticker: str, request_id: str, error_detail: str = None
) -> dict[str, Any]:
    """Provide basic fallback when all methods fail."""

    response = {
        "ticker": ticker,
        "sentiment": "neutral",
        "confidence": 0.0,
        "source": "fallback",
        "status": "all_methods_failed",
        "message": "Unable to fetch news sentiment - returning neutral baseline",
        "analysis": {
            "note": "Consider using a paid Tiingo plan for news access or check API keys"
        },
        "request_id": request_id,
        "timestamp": datetime.now().isoformat(),
    }

    if error_detail:
        response["error_detail"] = error_detail[:200]  # Limit error message length

    return response

```

--------------------------------------------------------------------------------
/maverick_mcp/tests/test_macro_data_provider.py:
--------------------------------------------------------------------------------

```python
"""
Tests for the MacroDataProvider class.
"""

import unittest
from datetime import datetime
from unittest.mock import MagicMock, patch

import pandas as pd

from maverick_mcp.providers.macro_data import MacroDataProvider


class TestMacroDataProvider(unittest.TestCase):
    """Test suite for MacroDataProvider."""

    @patch("fredapi.Fred")
    def setUp(self, mock_fred_class):
        """Set up test fixtures."""
        mock_fred = MagicMock()
        mock_fred_class.return_value = mock_fred
        # Create provider with mocked FRED
        self.provider = MacroDataProvider()
        self.provider.fred = mock_fred

    @patch("fredapi.Fred")
    def test_init_with_fred_api(self, mock_fred_class):
        """Test initialization with FRED API."""
        mock_fred = MagicMock()
        mock_fred_class.return_value = mock_fred

        provider = MacroDataProvider(window_days=180)

        self.assertEqual(provider.window_days, 180)
        self.assertIsNotNone(provider.scaler)
        self.assertIsNotNone(provider.weights)
        mock_fred_class.assert_called_once()

    def test_calculate_weighted_rolling_performance(self):
        """Test weighted rolling performance calculation."""
        # Mock FRED data
        mock_data = pd.Series(
            [100, 102, 104, 106, 108],
            index=pd.date_range(end=datetime.now(), periods=5, freq="D"),
        )

        with patch.object(self.provider.fred, "get_series") as mock_get_series:
            mock_get_series.return_value = mock_data

            result = self.provider._calculate_weighted_rolling_performance(  # type: ignore[attr-defined]
                "SP500", [30, 90, 180], [0.5, 0.3, 0.2]
            )

            self.assertIsInstance(result, float)
            self.assertEqual(mock_get_series.call_count, 3)

    def test_calculate_weighted_rolling_performance_empty_data(self):
        """Test weighted rolling performance with empty data."""
        with patch.object(self.provider.fred, "get_series") as mock_get_series:
            mock_get_series.return_value = pd.Series([])

            result = self.provider._calculate_weighted_rolling_performance(  # type: ignore[attr-defined]
                "SP500", [30], [1.0]
            )

            self.assertEqual(result, 0.0)

    def test_get_sp500_performance(self):
        """Test S&P 500 performance calculation."""
        with patch.object(
            self.provider, "_calculate_weighted_rolling_performance"
        ) as mock_calc:
            mock_calc.return_value = 5.5

            result = self.provider.get_sp500_performance()

            self.assertEqual(result, 5.5)
            mock_calc.assert_called_once_with("SP500", [30, 90, 180], [0.5, 0.3, 0.2])

    def test_get_nasdaq_performance(self):
        """Test NASDAQ performance calculation."""
        with patch.object(
            self.provider, "_calculate_weighted_rolling_performance"
        ) as mock_calc:
            mock_calc.return_value = 7.2

            result = self.provider.get_nasdaq_performance()

            self.assertEqual(result, 7.2)
            mock_calc.assert_called_once_with(
                "NASDAQ100", [30, 90, 180], [0.5, 0.3, 0.2]
            )

    def test_get_gdp_growth_rate(self):
        """Test GDP growth rate fetching."""
        mock_data = pd.Series(
            [2.5, 2.8], index=pd.date_range(end=datetime.now(), periods=2, freq="Q")
        )

        with patch.object(self.provider.fred, "get_series") as mock_get_series:
            mock_get_series.return_value = mock_data

            result = self.provider.get_gdp_growth_rate()

            self.assertIsInstance(result, dict)
            self.assertEqual(result["current"], 2.8)
            self.assertEqual(result["previous"], 2.5)

    def test_get_gdp_growth_rate_empty_data(self):
        """Test GDP growth rate with no data."""
        with patch.object(self.provider.fred, "get_series") as mock_get_series:
            mock_get_series.return_value = pd.Series([])

            result = self.provider.get_gdp_growth_rate()

            self.assertEqual(result["current"], 0.0)
            self.assertEqual(result["previous"], 0.0)

    def test_get_unemployment_rate(self):
        """Test unemployment rate fetching."""
        mock_data = pd.Series(
            [3.5, 3.6, 3.7],
            index=pd.date_range(end=datetime.now(), periods=3, freq="M"),
        )

        with patch.object(self.provider.fred, "get_series") as mock_get_series:
            mock_get_series.return_value = mock_data

            result = self.provider.get_unemployment_rate()

            self.assertIsInstance(result, dict)
            self.assertEqual(result["current"], 3.7)
            self.assertEqual(result["previous"], 3.6)

    def test_get_inflation_rate(self):
        """Test inflation rate calculation."""
        # Create CPI data for 24 months
        dates = pd.date_range(end=datetime.now(), periods=24, freq="MS")
        cpi_values = [100 + i * 0.2 for i in range(24)]  # Gradual increase
        mock_data = pd.Series(cpi_values, index=dates)

        with patch.object(self.provider.fred, "get_series") as mock_get_series:
            mock_get_series.return_value = mock_data

            result = self.provider.get_inflation_rate()

            self.assertIsInstance(result, dict)
            self.assertIn("current", result)
            self.assertIn("previous", result)
            self.assertIn("bounds", result)
            self.assertIsInstance(result["bounds"], tuple)

    def test_get_inflation_rate_insufficient_data(self):
        """Test inflation rate with insufficient data."""
        # Only 6 months of data (need 13+ for YoY)
        dates = pd.date_range(end=datetime.now(), periods=6, freq="MS")
        mock_data = pd.Series([100, 101, 102, 103, 104, 105], index=dates)

        with patch.object(self.provider.fred, "get_series") as mock_get_series:
            mock_get_series.return_value = mock_data

            result = self.provider.get_inflation_rate()

            self.assertEqual(result["current"], 0.0)
            self.assertEqual(result["previous"], 0.0)

    def test_get_vix(self):
        """Test VIX fetching."""
        # Test with yfinance first
        with patch("yfinance.Ticker") as mock_ticker_class:
            mock_ticker = MagicMock()
            mock_ticker_class.return_value = mock_ticker
            mock_ticker.history.return_value = pd.DataFrame(
                {"Close": [18.5]}, index=[datetime.now()]
            )

            result = self.provider.get_vix()

            self.assertEqual(result, 18.5)

    def test_get_vix_fallback_to_fred(self):
        """Test VIX fetching with FRED fallback."""
        with patch("yfinance.Ticker") as mock_ticker_class:
            mock_ticker = MagicMock()
            mock_ticker_class.return_value = mock_ticker
            mock_ticker.history.return_value = pd.DataFrame()  # Empty yfinance data

            mock_fred_data = pd.Series([20.5], index=[datetime.now()])
            with patch.object(self.provider.fred, "get_series") as mock_get_series:
                mock_get_series.return_value = mock_fred_data

                result = self.provider.get_vix()

                self.assertEqual(result, 20.5)

    def test_get_sp500_momentum(self):
        """Test S&P 500 momentum calculation."""
        # Create mock data with upward trend
        dates = pd.date_range(end=datetime.now(), periods=15, freq="D")
        values = [3000 + i * 10 for i in range(15)]
        mock_data = pd.Series(values, index=dates)

        with patch.object(self.provider.fred, "get_series") as mock_get_series:
            mock_get_series.return_value = mock_data

            result = self.provider.get_sp500_momentum()

            self.assertIsInstance(result, float)
            self.assertGreater(result, 0)  # Should be positive for upward trend

    def test_get_nasdaq_momentum(self):
        """Test NASDAQ momentum calculation."""
        dates = pd.date_range(end=datetime.now(), periods=15, freq="D")
        values = [15000 + i * 50 for i in range(15)]
        mock_data = pd.Series(values, index=dates)

        with patch.object(self.provider.fred, "get_series") as mock_get_series:
            mock_get_series.return_value = mock_data

            result = self.provider.get_nasdaq_momentum()

            self.assertIsInstance(result, float)
            self.assertGreater(result, 0)

    def test_get_usd_momentum(self):
        """Test USD momentum calculation."""
        dates = pd.date_range(end=datetime.now(), periods=15, freq="D")
        values = [100 + i * 0.1 for i in range(15)]
        mock_data = pd.Series(values, index=dates)

        with patch.object(self.provider.fred, "get_series") as mock_get_series:
            mock_get_series.return_value = mock_data

            result = self.provider.get_usd_momentum()

            self.assertIsInstance(result, float)

    def test_update_historical_bounds(self):
        """Test updating historical bounds."""
        # Mock data for different indicators
        gdp_data = pd.Series([1.5, 2.0, 2.5, 3.0])
        unemployment_data = pd.Series([3.5, 4.0, 4.5, 5.0])

        with patch.object(self.provider.fred, "get_series") as mock_get_series:

            def side_effect(series_id, *args, **kwargs):
                if series_id == "A191RL1Q225SBEA":
                    return gdp_data
                elif series_id == "UNRATE":
                    return unemployment_data
                else:
                    return pd.Series([])

            mock_get_series.side_effect = side_effect

            self.provider.update_historical_bounds()

            self.assertIn("gdp_growth_rate", self.provider.historical_data_bounds)
            self.assertIn("unemployment_rate", self.provider.historical_data_bounds)

    def test_default_bounds(self):
        """Test default bounds for indicators."""
        bounds = self.provider.default_bounds("vix")
        self.assertEqual(bounds["min"], 10.0)
        self.assertEqual(bounds["max"], 50.0)

        bounds = self.provider.default_bounds("unknown_indicator")
        self.assertEqual(bounds["min"], 0.0)
        self.assertEqual(bounds["max"], 1.0)

    def test_normalize_indicators(self):
        """Test indicator normalization."""
        indicators = {
            "vix": 30.0,  # Middle of 10-50 range
            "sp500_momentum": 0.0,  # Middle of -15 to 15 range
            "unemployment_rate": 6.0,  # Middle of 2-10 range
            "gdp_growth_rate": 2.0,  # In -2 to 6 range
        }

        normalized = self.provider.normalize_indicators(indicators)

        # VIX should be inverted (lower is better)
        self.assertAlmostEqual(normalized["vix"], 0.5, places=1)
        # SP500 momentum at 0 should normalize to 0.5
        self.assertAlmostEqual(normalized["sp500_momentum"], 0.5, places=1)
        # Unemployment should be inverted
        self.assertAlmostEqual(normalized["unemployment_rate"], 0.5, places=1)

    def test_normalize_indicators_with_none_values(self):
        """Test normalization with None values."""
        indicators = {
            "vix": None,
            "sp500_momentum": 5.0,
        }

        normalized = self.provider.normalize_indicators(indicators)

        self.assertEqual(normalized["vix"], 0.5)  # Default for None
        self.assertGreater(normalized["sp500_momentum"], 0.5)

    def test_get_historical_data(self):
        """Test fetching historical data."""
        # Mock different data series
        sp500_data = pd.Series(
            [3000, 3050, 3100],
            index=pd.date_range(end=datetime.now(), periods=3, freq="D"),
        )
        vix_data = pd.Series(
            [15, 16, 17], index=pd.date_range(end=datetime.now(), periods=3, freq="D")
        )

        with patch.object(self.provider.fred, "get_series") as mock_get_series:

            def side_effect(series_id, *args, **kwargs):
                if series_id == "SP500":
                    return sp500_data
                elif series_id == "VIXCLS":
                    return vix_data
                else:
                    return pd.Series([])

            mock_get_series.side_effect = side_effect

            result = self.provider.get_historical_data()

            self.assertIsInstance(result, dict)
            self.assertIn("sp500_performance", result)
            self.assertIn("vix", result)
            self.assertIsInstance(result["sp500_performance"], list)
            self.assertIsInstance(result["vix"], list)

    def test_get_macro_statistics(self):
        """Test comprehensive macro statistics."""
        # Mock all the individual methods
        with patch.object(self.provider, "get_gdp_growth_rate") as mock_gdp:
            mock_gdp.return_value = {"current": 2.5, "previous": 2.3}

            with patch.object(
                self.provider, "get_unemployment_rate"
            ) as mock_unemployment:
                mock_unemployment.return_value = {"current": 3.7, "previous": 3.8}

                with patch.object(
                    self.provider, "get_inflation_rate"
                ) as mock_inflation:
                    mock_inflation.return_value = {
                        "current": 2.1,
                        "previous": 2.0,
                        "bounds": (1.5, 3.0),
                    }

                    with patch.object(self.provider, "get_vix") as mock_vix:
                        mock_vix.return_value = 18.5

                        result = self.provider.get_macro_statistics()

                        self.assertIsInstance(result, dict)
                        self.assertEqual(result["gdp_growth_rate"], 2.5)
                        self.assertEqual(result["unemployment_rate"], 3.7)
                        self.assertEqual(result["inflation_rate"], 2.1)
                        self.assertEqual(result["vix"], 18.5)
                        self.assertIn("sentiment_score", result)
                        self.assertIsInstance(result["sentiment_score"], float)
                        self.assertTrue(1 <= result["sentiment_score"] <= 100)

    def test_get_macro_statistics_error_handling(self):
        """Test macro statistics with errors."""
        with patch.object(self.provider, "update_historical_bounds") as mock_update:
            mock_update.side_effect = Exception("Update error")

            result = self.provider.get_macro_statistics()

            # Should return safe defaults
            self.assertEqual(result["gdp_growth_rate"], 0.0)
            self.assertEqual(result["unemployment_rate"], 0.0)
            self.assertEqual(result["sentiment_score"], 50.0)


if __name__ == "__main__":
    unittest.main()

```

--------------------------------------------------------------------------------
/maverick_mcp/api/routers/technical_enhanced.py:
--------------------------------------------------------------------------------

```python
"""
Enhanced technical analysis router with comprehensive logging and timeout handling.

This module fixes the "No result received from client-side tool execution" issues by:
- Adding comprehensive logging for each step of tool execution
- Implementing proper timeout handling (under 25 seconds)
- Breaking down complex operations into logged steps
- Providing detailed error context and debugging information
- Ensuring JSON-RPC responses are always sent
"""

import asyncio
from concurrent.futures import ThreadPoolExecutor
from datetime import UTC, datetime
from typing import Any

from fastmcp import FastMCP
from fastmcp.server.dependencies import get_access_token

from maverick_mcp.api.middleware.mcp_logging import get_tool_logger
from maverick_mcp.core.technical_analysis import (
    analyze_bollinger_bands,
    analyze_macd,
    analyze_rsi,
    analyze_stochastic,
    analyze_trend,
    analyze_volume,
    generate_outlook,
    identify_chart_patterns,
    identify_resistance_levels,
    identify_support_levels,
)
from maverick_mcp.utils.logging import get_logger
from maverick_mcp.utils.stock_helpers import get_stock_dataframe_async
from maverick_mcp.validation.technical import TechnicalAnalysisRequest

logger = get_logger("maverick_mcp.routers.technical_enhanced")

# Create the enhanced technical analysis router
technical_enhanced_router: FastMCP = FastMCP("Technical_Analysis_Enhanced")

# Thread pool for blocking operations
executor = ThreadPoolExecutor(max_workers=4)


class TechnicalAnalysisTimeoutError(Exception):
    """Raised when technical analysis times out."""

    pass


class TechnicalAnalysisError(Exception):
    """Base exception for technical analysis errors."""

    pass


async def get_full_technical_analysis_enhanced(
    request: TechnicalAnalysisRequest,
) -> dict[str, Any]:
    """
    Enhanced technical analysis with comprehensive logging and timeout handling.

    This version:
    - Logs every step of execution for debugging
    - Uses proper timeout handling (25 seconds max)
    - Breaks complex operations into chunks
    - Always returns a JSON-RPC compatible response
    - Provides detailed error context

    Args:
        request: Validated technical analysis request

    Returns:
        Dictionary containing complete technical analysis

    Raises:
        TechnicalAnalysisTimeoutError: If analysis takes too long
        TechnicalAnalysisError: For other analysis errors
    """
    tool_logger = get_tool_logger("get_full_technical_analysis_enhanced")
    ticker = request.ticker
    days = request.days

    try:
        # Set overall timeout (25s to stay under Claude Desktop's 30s limit)
        return await asyncio.wait_for(
            _execute_technical_analysis_with_logging(tool_logger, ticker, days),
            timeout=25.0,
        )

    except TimeoutError:
        error_msg = f"Technical analysis for {ticker} timed out after 25 seconds"
        tool_logger.error("timeout", TimeoutError(error_msg))
        logger.error(error_msg, extra={"ticker": ticker, "days": days})

        return {
            "error": error_msg,
            "error_type": "timeout",
            "ticker": ticker,
            "status": "failed",
            "execution_time": 25.0,
            "timestamp": datetime.now(UTC).isoformat(),
        }

    except Exception as e:
        error_msg = f"Technical analysis for {ticker} failed: {str(e)}"
        tool_logger.error("general_error", e)
        logger.error(
            error_msg,
            extra={"ticker": ticker, "days": days, "error_type": type(e).__name__},
        )

        return {
            "error": error_msg,
            "error_type": type(e).__name__,
            "ticker": ticker,
            "status": "failed",
            "timestamp": datetime.now(UTC).isoformat(),
        }


async def _execute_technical_analysis_with_logging(
    tool_logger, ticker: str, days: int
) -> dict[str, Any]:
    """Execute technical analysis with comprehensive step-by-step logging."""

    # Step 1: Check authentication (optional)
    tool_logger.step("auth_check", "Checking authentication context")
    has_premium = False
    try:
        access_token = get_access_token()
        if access_token and "premium:access" in access_token.scopes:
            has_premium = True
            logger.info(
                f"Premium user accessing technical analysis: {access_token.client_id}"
            )
    except Exception:
        logger.debug("Unauthenticated user accessing technical analysis")

    # Step 2: Fetch stock data
    tool_logger.step("data_fetch", f"Fetching {days} days of data for {ticker}")
    try:
        df = await asyncio.wait_for(
            get_stock_dataframe_async(ticker, days),
            timeout=8.0,  # Data fetch should be fast
        )

        if df.empty:
            raise TechnicalAnalysisError(f"No data available for {ticker}")

        logger.info(f"Retrieved {len(df)} data points for {ticker}")
        tool_logger.step("data_validation", f"Retrieved {len(df)} data points")

    except TimeoutError:
        raise TechnicalAnalysisError(f"Data fetch for {ticker} timed out")
    except Exception as e:
        raise TechnicalAnalysisError(f"Failed to fetch data for {ticker}: {str(e)}")

    # Step 3: Calculate basic indicators (parallel execution)
    tool_logger.step("basic_indicators", "Calculating RSI, MACD, Stochastic")
    try:
        # Run basic indicators in parallel with timeouts
        basic_tasks = [
            asyncio.wait_for(_run_in_executor(analyze_rsi, df), timeout=3.0),
            asyncio.wait_for(_run_in_executor(analyze_macd, df), timeout=3.0),
            asyncio.wait_for(_run_in_executor(analyze_stochastic, df), timeout=3.0),
            asyncio.wait_for(_run_in_executor(analyze_trend, df), timeout=2.0),
        ]

        rsi_analysis, macd_analysis, stoch_analysis, trend = await asyncio.gather(
            *basic_tasks
        )
        tool_logger.step(
            "basic_indicators_complete", "Basic indicators calculated successfully"
        )

    except TimeoutError:
        raise TechnicalAnalysisError("Basic indicator calculation timed out")
    except Exception as e:
        raise TechnicalAnalysisError(f"Basic indicator calculation failed: {str(e)}")

    # Step 4: Calculate advanced indicators
    tool_logger.step(
        "advanced_indicators", "Calculating Bollinger Bands, Volume analysis"
    )
    try:
        advanced_tasks = [
            asyncio.wait_for(
                _run_in_executor(analyze_bollinger_bands, df), timeout=3.0
            ),
            asyncio.wait_for(_run_in_executor(analyze_volume, df), timeout=3.0),
        ]

        bb_analysis, volume_analysis = await asyncio.gather(*advanced_tasks)
        tool_logger.step(
            "advanced_indicators_complete", "Advanced indicators calculated"
        )

    except TimeoutError:
        raise TechnicalAnalysisError("Advanced indicator calculation timed out")
    except Exception as e:
        raise TechnicalAnalysisError(f"Advanced indicator calculation failed: {str(e)}")

    # Step 5: Pattern recognition and levels
    tool_logger.step(
        "pattern_analysis", "Identifying patterns and support/resistance levels"
    )
    try:
        pattern_tasks = [
            asyncio.wait_for(
                _run_in_executor(identify_chart_patterns, df), timeout=4.0
            ),
            asyncio.wait_for(
                _run_in_executor(identify_support_levels, df), timeout=3.0
            ),
            asyncio.wait_for(
                _run_in_executor(identify_resistance_levels, df), timeout=3.0
            ),
        ]

        patterns, support, resistance = await asyncio.gather(*pattern_tasks)
        tool_logger.step("pattern_analysis_complete", f"Found {len(patterns)} patterns")

    except TimeoutError:
        raise TechnicalAnalysisError("Pattern analysis timed out")
    except Exception as e:
        raise TechnicalAnalysisError(f"Pattern analysis failed: {str(e)}")

    # Step 6: Generate outlook
    tool_logger.step("outlook_generation", "Generating market outlook")
    try:
        outlook = await asyncio.wait_for(
            _run_in_executor(
                generate_outlook,
                df,
                str(trend),
                rsi_analysis,
                macd_analysis,
                stoch_analysis,
            ),
            timeout=3.0,
        )
        tool_logger.step("outlook_complete", "Market outlook generated")

    except TimeoutError:
        raise TechnicalAnalysisError("Outlook generation timed out")
    except Exception as e:
        raise TechnicalAnalysisError(f"Outlook generation failed: {str(e)}")

    # Step 7: Compile final results
    tool_logger.step("result_compilation", "Compiling final analysis results")
    try:
        current_price = float(df["close"].iloc[-1])

        result = {
            "ticker": ticker,
            "current_price": current_price,
            "trend": trend,
            "outlook": outlook,
            "indicators": {
                "rsi": rsi_analysis,
                "macd": macd_analysis,
                "stochastic": stoch_analysis,
                "bollinger_bands": bb_analysis,
                "volume": volume_analysis,
            },
            "levels": {
                "support": sorted(support) if support else [],
                "resistance": sorted(resistance) if resistance else [],
            },
            "patterns": patterns,
            "analysis_metadata": {
                "data_points": len(df),
                "period_days": days,
                "has_premium": has_premium,
                "timestamp": datetime.now(UTC).isoformat(),
            },
            "status": "completed",
        }

        tool_logger.complete(
            f"Analysis completed for {ticker} with {len(df)} data points"
        )
        return result

    except Exception as e:
        raise TechnicalAnalysisError(f"Result compilation failed: {str(e)}")


async def _run_in_executor(func, *args) -> Any:
    """Run a synchronous function in the thread pool executor."""
    loop = asyncio.get_event_loop()
    return await loop.run_in_executor(executor, func, *args)


async def get_stock_chart_analysis_enhanced(ticker: str) -> dict[str, Any]:
    """
    Enhanced stock chart analysis with logging and timeout handling.

    This version generates charts with proper timeout handling and error logging.

    Args:
        ticker: Stock ticker symbol

    Returns:
        Dictionary containing chart data or error information
    """
    tool_logger = get_tool_logger("get_stock_chart_analysis_enhanced")

    try:
        # Set timeout for chart generation
        return await asyncio.wait_for(
            _generate_chart_with_logging(tool_logger, ticker),
            timeout=15.0,  # Charts should be faster than full analysis
        )

    except TimeoutError:
        error_msg = f"Chart generation for {ticker} timed out after 15 seconds"
        tool_logger.error("timeout", TimeoutError(error_msg))

        return {
            "error": error_msg,
            "error_type": "timeout",
            "ticker": ticker,
            "status": "failed",
        }

    except Exception as e:
        error_msg = f"Chart generation for {ticker} failed: {str(e)}"
        tool_logger.error("general_error", e)

        return {
            "error": error_msg,
            "error_type": type(e).__name__,
            "ticker": ticker,
            "status": "failed",
        }


async def _generate_chart_with_logging(tool_logger, ticker: str) -> dict[str, Any]:
    """Generate chart with step-by-step logging."""
    from maverick_mcp.core.technical_analysis import add_technical_indicators
    from maverick_mcp.core.visualization import (
        create_plotly_technical_chart,
        plotly_fig_to_base64,
    )

    # Step 1: Fetch data
    tool_logger.step("chart_data_fetch", f"Fetching chart data for {ticker}")
    df = await get_stock_dataframe_async(ticker, 365)

    if df.empty:
        raise TechnicalAnalysisError(
            f"No data available for chart generation: {ticker}"
        )

    # Step 2: Add technical indicators
    tool_logger.step("chart_indicators", "Adding technical indicators to chart")
    df_with_indicators = await _run_in_executor(add_technical_indicators, df)

    # Step 3: Generate chart configurations (progressive sizing)
    chart_configs = [
        {"height": 400, "width": 600, "format": "png", "quality": 85},
        {"height": 300, "width": 500, "format": "jpeg", "quality": 75},
        {"height": 250, "width": 400, "format": "jpeg", "quality": 65},
    ]

    for i, config in enumerate(chart_configs):
        try:
            tool_logger.step(
                f"chart_generation_{i + 1}", f"Generating chart (attempt {i + 1})"
            )

            # Generate chart
            chart = await _run_in_executor(
                create_plotly_technical_chart,
                df_with_indicators,
                ticker,
                config["height"],
                config["width"],
            )

            # Convert to base64
            data_uri = await _run_in_executor(
                plotly_fig_to_base64, chart, config["format"]
            )

            # Validate size (Claude Desktop has limits)
            if len(data_uri) < 200000:  # ~200KB limit for safety
                tool_logger.complete(
                    f"Chart generated successfully (size: {len(data_uri)} chars)"
                )

                return {
                    "ticker": ticker,
                    "chart_data": data_uri,
                    "chart_format": config["format"],
                    "chart_size": {
                        "height": config["height"],
                        "width": config["width"],
                    },
                    "data_points": len(df),
                    "status": "completed",
                    "timestamp": datetime.now(UTC).isoformat(),
                }
            else:
                logger.warning(
                    f"Chart too large ({len(data_uri)} chars), trying smaller config"
                )

        except Exception as e:
            logger.warning(f"Chart generation attempt {i + 1} failed: {e}")
            if i == len(chart_configs) - 1:  # Last attempt
                raise TechnicalAnalysisError(
                    f"All chart generation attempts failed: {e}"
                )

    raise TechnicalAnalysisError(
        "Chart generation failed - all size configurations exceeded limits"
    )


# Export functions for registration with FastMCP
__all__ = [
    "technical_enhanced_router",
    "get_full_technical_analysis_enhanced",
    "get_stock_chart_analysis_enhanced",
]

```

--------------------------------------------------------------------------------
/maverick_mcp/domain/screening/services.py:
--------------------------------------------------------------------------------

```python
"""
Screening domain services.

This module contains pure business logic services that operate on
screening entities and value objects without any external dependencies.
"""

from datetime import datetime
from decimal import Decimal
from typing import Any, Protocol

from .entities import ScreeningResult, ScreeningResultCollection
from .value_objects import (
    ScreeningCriteria,
    ScreeningLimits,
    ScreeningStrategy,
    SortingOptions,
)


class IStockRepository(Protocol):
    """Protocol defining the interface for stock data access."""

    def get_maverick_stocks(
        self, limit: int = 20, min_score: int | None = None
    ) -> list[dict[str, Any]]:
        """Get Maverick bullish stocks."""
        ...

    def get_maverick_bear_stocks(
        self, limit: int = 20, min_score: int | None = None
    ) -> list[dict[str, Any]]:
        """Get Maverick bearish stocks."""
        ...

    def get_trending_stocks(
        self,
        limit: int = 20,
        min_momentum_score: Decimal | None = None,
        filter_moving_averages: bool = False,
    ) -> list[dict[str, Any]]:
        """Get trending stocks."""
        ...


class ScreeningService:
    """
    Pure domain service for stock screening business logic.

    This service contains no external dependencies and focuses solely
    on the business rules and logic for screening operations.
    """

    def __init__(self):
        """Initialize the screening service."""
        self._default_limits = ScreeningLimits()

    def create_screening_result_from_raw_data(
        self, raw_data: dict[str, Any], screening_date: datetime | None = None
    ) -> ScreeningResult:
        """
        Create a ScreeningResult entity from raw database data.

        This method handles the transformation of raw data into
        a properly validated domain entity.
        """
        if screening_date is None:
            screening_date = datetime.utcnow()

        return ScreeningResult(
            stock_symbol=raw_data.get("stock", ""),
            screening_date=screening_date,
            open_price=Decimal(str(raw_data.get("open", 0))),
            high_price=Decimal(str(raw_data.get("high", 0))),
            low_price=Decimal(str(raw_data.get("low", 0))),
            close_price=Decimal(str(raw_data.get("close", 0))),
            volume=int(raw_data.get("volume", 0)),
            ema_21=Decimal(str(raw_data.get("ema_21", 0))),
            sma_50=Decimal(str(raw_data.get("sma_50", 0))),
            sma_150=Decimal(str(raw_data.get("sma_150", 0))),
            sma_200=Decimal(str(raw_data.get("sma_200", 0))),
            momentum_score=Decimal(str(raw_data.get("momentum_score", 0))),
            avg_volume_30d=Decimal(
                str(raw_data.get("avg_vol_30d", raw_data.get("avg_volume_30d", 0)))
            ),
            adr_percentage=Decimal(str(raw_data.get("adr_pct", 0))),
            atr=Decimal(str(raw_data.get("atr", 0))),
            pattern=raw_data.get("pat"),
            squeeze=raw_data.get("sqz"),
            vcp=raw_data.get("vcp"),
            entry_signal=raw_data.get("entry"),
            combined_score=int(raw_data.get("combined_score", 0)),
            bear_score=int(raw_data.get("score", 0)),  # Bear score uses 'score' field
            compression_score=int(raw_data.get("compression_score", 0)),
            pattern_detected=int(raw_data.get("pattern_detected", 0)),
            # Bearish-specific fields
            rsi_14=Decimal(str(raw_data["rsi_14"]))
            if raw_data.get("rsi_14") is not None
            else None,
            macd=Decimal(str(raw_data["macd"]))
            if raw_data.get("macd") is not None
            else None,
            macd_signal=Decimal(str(raw_data["macd_s"]))
            if raw_data.get("macd_s") is not None
            else None,
            macd_histogram=Decimal(str(raw_data["macd_h"]))
            if raw_data.get("macd_h") is not None
            else None,
            distribution_days_20=raw_data.get("dist_days_20"),
            atr_contraction=raw_data.get("atr_contraction"),
            big_down_volume=raw_data.get("big_down_vol"),
        )

    def apply_screening_criteria(
        self, results: list[ScreeningResult], criteria: ScreeningCriteria
    ) -> list[ScreeningResult]:
        """
        Apply screening criteria to filter results.

        This method implements all the business rules for filtering
        screening results based on the provided criteria.
        """
        if not criteria.has_any_filters():
            return results

        filtered_results = results

        # Momentum Score filters
        if criteria.min_momentum_score is not None:
            filtered_results = [
                r
                for r in filtered_results
                if r.momentum_score >= criteria.min_momentum_score
            ]

        if criteria.max_momentum_score is not None:
            filtered_results = [
                r
                for r in filtered_results
                if r.momentum_score <= criteria.max_momentum_score
            ]

        # Volume filters
        if criteria.min_volume is not None:
            filtered_results = [
                r for r in filtered_results if r.avg_volume_30d >= criteria.min_volume
            ]

        if criteria.max_volume is not None:
            filtered_results = [
                r for r in filtered_results if r.avg_volume_30d <= criteria.max_volume
            ]

        # Price filters
        if criteria.min_price is not None:
            filtered_results = [
                r for r in filtered_results if r.close_price >= criteria.min_price
            ]

        if criteria.max_price is not None:
            filtered_results = [
                r for r in filtered_results if r.close_price <= criteria.max_price
            ]

        # Score filters
        if criteria.min_combined_score is not None:
            filtered_results = [
                r
                for r in filtered_results
                if r.combined_score >= criteria.min_combined_score
            ]

        if criteria.min_bear_score is not None:
            filtered_results = [
                r for r in filtered_results if r.bear_score >= criteria.min_bear_score
            ]

        # ADR filters
        if criteria.min_adr_percentage is not None:
            filtered_results = [
                r
                for r in filtered_results
                if r.adr_percentage >= criteria.min_adr_percentage
            ]

        if criteria.max_adr_percentage is not None:
            filtered_results = [
                r
                for r in filtered_results
                if r.adr_percentage <= criteria.max_adr_percentage
            ]

        # Pattern filters
        if criteria.require_pattern_detected:
            filtered_results = [r for r in filtered_results if r.pattern_detected > 0]

        if criteria.require_squeeze:
            filtered_results = [
                r
                for r in filtered_results
                if r.squeeze is not None and r.squeeze.strip()
            ]

        if criteria.require_vcp:
            filtered_results = [
                r for r in filtered_results if r.vcp is not None and r.vcp.strip()
            ]

        if criteria.require_entry_signal:
            filtered_results = [
                r
                for r in filtered_results
                if r.entry_signal is not None and r.entry_signal.strip()
            ]

        # Moving average filters
        if criteria.require_above_sma50:
            filtered_results = [r for r in filtered_results if r.close_price > r.sma_50]

        if criteria.require_above_sma150:
            filtered_results = [
                r for r in filtered_results if r.close_price > r.sma_150
            ]

        if criteria.require_above_sma200:
            filtered_results = [
                r for r in filtered_results if r.close_price > r.sma_200
            ]

        if criteria.require_ma_alignment:
            filtered_results = [
                r
                for r in filtered_results
                if (r.sma_50 > r.sma_150 and r.sma_150 > r.sma_200)
            ]

        return filtered_results

    def sort_screening_results(
        self, results: list[ScreeningResult], sorting: SortingOptions
    ) -> list[ScreeningResult]:
        """
        Sort screening results according to the specified options.

        This method implements the business rules for ranking and
        ordering screening results.
        """

        def get_sort_value(result: ScreeningResult, field: str) -> Any:
            """Get the value for sorting from a result."""
            if field == "combined_score":
                return result.combined_score
            elif field == "bear_score":
                return result.bear_score
            elif field == "momentum_score":
                return result.momentum_score
            elif field == "close_price":
                return result.close_price
            elif field == "volume":
                return result.volume
            elif field == "avg_volume_30d":
                return result.avg_volume_30d
            elif field == "adr_percentage":
                return result.adr_percentage
            elif field == "quality_score":
                return result.get_quality_score()
            else:
                return 0

        # Sort by primary field
        sorted_results = sorted(
            results,
            key=lambda r: get_sort_value(r, sorting.field),
            reverse=sorting.descending,
        )

        # Apply secondary sort if specified
        if sorting.secondary_field:
            sorted_results = sorted(
                sorted_results,
                key=lambda r: (
                    get_sort_value(r, sorting.field),
                    get_sort_value(r, sorting.secondary_field),
                ),
                reverse=sorting.descending,
            )

        return sorted_results

    def create_screening_collection(
        self,
        results: list[ScreeningResult],
        strategy: ScreeningStrategy,
        total_candidates: int,
    ) -> ScreeningResultCollection:
        """
        Create a ScreeningResultCollection from individual results.

        This method assembles the aggregate root with proper validation.
        """
        return ScreeningResultCollection(
            results=results,
            strategy_used=strategy.value,
            screening_timestamp=datetime.utcnow(),
            total_candidates_analyzed=total_candidates,
        )

    def validate_screening_limits(self, requested_limit: int) -> int:
        """
        Validate and adjust the requested result limit.

        Business rule: Limits must be within acceptable bounds.
        """
        return self._default_limits.validate_limit(requested_limit)

    def calculate_screening_statistics(
        self, collection: ScreeningResultCollection
    ) -> dict[str, Any]:
        """
        Calculate comprehensive statistics for a screening collection.

        This method provides business intelligence metrics for
        screening result analysis.
        """
        base_stats = collection.get_statistics()

        # Add additional business metrics
        results = collection.results
        if not results:
            return base_stats

        # Quality distribution
        quality_scores = [r.get_quality_score() for r in results]
        base_stats.update(
            {
                "quality_distribution": {
                    "high_quality": sum(1 for q in quality_scores if q >= 80),
                    "medium_quality": sum(1 for q in quality_scores if 50 <= q < 80),
                    "low_quality": sum(1 for q in quality_scores if q < 50),
                },
                "avg_quality_score": sum(quality_scores) / len(quality_scores),
            }
        )

        # Risk/reward analysis
        risk_rewards = [r.calculate_risk_reward_ratio() for r in results]
        valid_ratios = [rr for rr in risk_rewards if rr > 0]

        if valid_ratios:
            base_stats.update(
                {
                    "risk_reward_analysis": {
                        "avg_ratio": float(sum(valid_ratios) / len(valid_ratios)),
                        "favorable_setups": sum(1 for rr in valid_ratios if rr >= 2),
                        "conservative_setups": sum(
                            1 for rr in valid_ratios if 1 <= rr < 2
                        ),
                        "risky_setups": sum(1 for rr in valid_ratios if rr < 1),
                    }
                }
            )

        # Strategy-specific metrics
        if collection.strategy_used == ScreeningStrategy.MAVERICK_BULLISH.value:
            base_stats["momentum_analysis"] = self._calculate_momentum_metrics(results)
        elif collection.strategy_used == ScreeningStrategy.MAVERICK_BEARISH.value:
            base_stats["weakness_analysis"] = self._calculate_weakness_metrics(results)
        elif collection.strategy_used == ScreeningStrategy.TRENDING_STAGE2.value:
            base_stats["trend_analysis"] = self._calculate_trend_metrics(results)

        return base_stats

    def _calculate_momentum_metrics(
        self, results: list[ScreeningResult]
    ) -> dict[str, Any]:
        """Calculate momentum-specific metrics for bullish screens."""
        return {
            "high_momentum": sum(1 for r in results if r.combined_score >= 80),
            "pattern_breakouts": sum(1 for r in results if r.pattern_detected > 0),
            "strong_momentum": sum(1 for r in results if r.momentum_score >= 90),
        }

    def _calculate_weakness_metrics(
        self, results: list[ScreeningResult]
    ) -> dict[str, Any]:
        """Calculate weakness-specific metrics for bearish screens."""
        return {
            "severe_weakness": sum(1 for r in results if r.bear_score >= 80),
            "distribution_signals": sum(
                1
                for r in results
                if r.distribution_days_20 is not None and r.distribution_days_20 >= 5
            ),
            "breakdown_candidates": sum(
                1 for r in results if r.close_price < r.sma_200
            ),
        }

    def _calculate_trend_metrics(
        self, results: list[ScreeningResult]
    ) -> dict[str, Any]:
        """Calculate trend-specific metrics for trending screens."""
        return {
            "strong_trends": sum(1 for r in results if r.is_trending_stage2()),
            "perfect_alignment": sum(
                1 for r in results if (r.sma_50 > r.sma_150 and r.sma_150 > r.sma_200)
            ),
            "elite_momentum": sum(1 for r in results if r.momentum_score >= 95),
        }

```

--------------------------------------------------------------------------------
/maverick_mcp/api/routers/data.py:
--------------------------------------------------------------------------------

```python
"""
Data fetching router for Maverick-MCP.

This module contains all data retrieval tools including
stock data, news, fundamentals, and caching operations.

Updated to use separated services following Single Responsibility Principle.
"""

import json
import logging
from concurrent.futures import ThreadPoolExecutor
from datetime import UTC, datetime
from typing import Any

import requests
import requests.exceptions
from fastmcp import FastMCP

from maverick_mcp.config.settings import settings
from maverick_mcp.data.models import PriceCache
from maverick_mcp.data.session_management import get_db_session_read_only
from maverick_mcp.domain.stock_analysis import StockAnalysisService
from maverick_mcp.infrastructure.caching import CacheManagementService
from maverick_mcp.infrastructure.data_fetching import StockDataFetchingService
from maverick_mcp.providers.stock_data import (
    StockDataProvider,
)  # Kept for backward compatibility

logger = logging.getLogger(__name__)

# Create the data router
data_router: FastMCP = FastMCP("Data_Operations")

# Thread pool for blocking operations
executor = ThreadPoolExecutor(max_workers=10)


def fetch_stock_data(
    ticker: str,
    start_date: str | None = None,
    end_date: str | None = None,
) -> dict[str, Any]:
    """
    Fetch historical stock data for a given ticker symbol.

    This is the primary tool for retrieving stock price data. It uses intelligent
    caching to minimize API calls and improve performance.

    Updated to use separated services following Single Responsibility Principle.

    Args:
        ticker: The ticker symbol of the stock (e.g., AAPL, MSFT)
        start_date: Start date for data in YYYY-MM-DD format (default: 1 year ago)
        end_date: End date for data in YYYY-MM-DD format (default: today)

    Returns:
        Dictionary containing the stock data in JSON format with:
        - data: OHLCV price data
        - columns: Column names
        - index: Date index

    Examples:
        >>> fetch_stock_data(ticker="AAPL")
        >>> fetch_stock_data(
        ...     ticker="MSFT",
        ...     start_date="2024-01-01",
        ...     end_date="2024-12-31"
        ... )
    """
    try:
        # Create services with dependency injection
        data_fetching_service = StockDataFetchingService()

        with get_db_session_read_only() as session:
            cache_service = CacheManagementService(db_session=session)
            stock_analysis_service = StockAnalysisService(
                data_fetching_service=data_fetching_service,
                cache_service=cache_service,
                db_session=session,
            )

            data = stock_analysis_service.get_stock_data(ticker, start_date, end_date)
            json_data = data.to_json(orient="split", date_format="iso")
            result: dict[str, Any] = json.loads(json_data) if json_data else {}
            result["ticker"] = ticker
            result["record_count"] = len(data)
            return result
    except Exception as e:
        logger.error(f"Error fetching stock data for {ticker}: {e}")
        return {"error": str(e), "ticker": ticker}


def fetch_stock_data_batch(
    tickers: list[str],
    start_date: str | None = None,
    end_date: str | None = None,
) -> dict[str, Any]:
    """
    Fetch historical data for multiple tickers efficiently.

    This tool fetches data for multiple stocks in a single call,
    which is more efficient than calling fetch_stock_data multiple times.

    Updated to use separated services following Single Responsibility Principle.

    Args:
        tickers: List of ticker symbols (e.g., ["AAPL", "MSFT", "GOOGL"])
        start_date: Start date in YYYY-MM-DD format
        end_date: End date in YYYY-MM-DD format

    Returns:
        Dictionary with ticker symbols as keys and data/errors as values

    Examples:
        >>> fetch_stock_data_batch(
        ...     tickers=["AAPL", "MSFT", "GOOGL"],
        ...     start_date="2024-01-01"
        ... )
    """
    results = {}

    # Create services with dependency injection
    data_fetching_service = StockDataFetchingService()

    with get_db_session_read_only() as session:
        cache_service = CacheManagementService(db_session=session)
        stock_analysis_service = StockAnalysisService(
            data_fetching_service=data_fetching_service,
            cache_service=cache_service,
            db_session=session,
        )

        for ticker in tickers:
            try:
                data = stock_analysis_service.get_stock_data(
                    ticker, start_date, end_date
                )
                results[ticker] = {
                    "status": "success",
                    "data": json.loads(
                        data.to_json(orient="split", date_format="iso") or "{}"
                    ),
                    "record_count": len(data),
                }
            except Exception as e:
                logger.error(f"Error fetching data for {ticker}: {e}")
                results[ticker] = {"status": "error", "error": str(e)}

    return {
        "results": results,
        "success_count": sum(1 for r in results.values() if r["status"] == "success"),
        "error_count": sum(1 for r in results.values() if r["status"] == "error"),
        "tickers": tickers,
    }


def get_stock_info(ticker: str) -> dict[str, Any]:
    """
    Get detailed fundamental information about a stock.

    This tool retrieves comprehensive stock information including:
    - Company description and sector
    - Market cap and valuation metrics
    - Financial ratios
    - Trading information

    Args:
        ticker: Stock ticker symbol

    Returns:
        Dictionary containing detailed stock information
    """
    try:
        # Use read-only context manager for automatic session management
        with get_db_session_read_only() as session:
            provider = StockDataProvider(db_session=session)
            info = provider.get_stock_info(ticker)

            # Extract key information
            return {
                "ticker": ticker,
                "company": {
                    "name": info.get("longName", info.get("shortName")),
                    "sector": info.get("sector"),
                    "industry": info.get("industry"),
                    "website": info.get("website"),
                    "description": info.get("longBusinessSummary"),
                },
                "market_data": {
                    "current_price": info.get(
                        "currentPrice", info.get("regularMarketPrice")
                    ),
                    "market_cap": info.get("marketCap"),
                    "enterprise_value": info.get("enterpriseValue"),
                    "shares_outstanding": info.get("sharesOutstanding"),
                    "float_shares": info.get("floatShares"),
                },
                "valuation": {
                    "pe_ratio": info.get("trailingPE"),
                    "forward_pe": info.get("forwardPE"),
                    "peg_ratio": info.get("pegRatio"),
                    "price_to_book": info.get("priceToBook"),
                    "price_to_sales": info.get("priceToSalesTrailing12Months"),
                },
                "financials": {
                    "revenue": info.get("totalRevenue"),
                    "profit_margin": info.get("profitMargins"),
                    "operating_margin": info.get("operatingMargins"),
                    "roe": info.get("returnOnEquity"),
                    "roa": info.get("returnOnAssets"),
                },
                "trading": {
                    "avg_volume": info.get("averageVolume"),
                    "avg_volume_10d": info.get("averageVolume10days"),
                    "beta": info.get("beta"),
                    "52_week_high": info.get("fiftyTwoWeekHigh"),
                    "52_week_low": info.get("fiftyTwoWeekLow"),
                },
            }
    except Exception as e:
        logger.error(f"Error fetching stock info for {ticker}: {e}")
        return {"error": str(e), "ticker": ticker}


def get_news_sentiment(
    ticker: str,
    timeframe: str = "7d",
    limit: int = 10,
) -> dict[str, Any]:
    """
    Retrieve news sentiment analysis for a stock.

    This tool fetches sentiment data from External API,
    providing insights into market sentiment based on recent news.

    Args:
        ticker: The ticker symbol of the stock to analyze
        timeframe: Time frame for news (1d, 7d, 30d, etc.)
        limit: Maximum number of news articles to analyze

    Returns:
        Dictionary containing news sentiment analysis
    """
    try:
        api_key = settings.external_data.api_key
        base_url = settings.external_data.base_url
        if not api_key:
            logger.info(
                "External sentiment API not configured, providing basic response"
            )
            return {
                "ticker": ticker,
                "sentiment": "neutral",
                "message": "External sentiment API not configured - configure EXTERNAL_DATA_API_KEY for enhanced sentiment analysis",
                "status": "fallback_mode",
                "confidence": 0.5,
                "source": "fallback",
            }

        url = f"{base_url}/sentiment/{ticker}"
        headers = {"X-API-KEY": api_key}
        logger.info(f"Fetching sentiment for {ticker} from {url}")
        resp = requests.get(url, headers=headers, timeout=10)

        if resp.status_code == 404:
            return {
                "ticker": ticker,
                "sentiment": "unavailable",
                "message": f"No sentiment data available for {ticker}",
                "status": "not_found",
            }
        elif resp.status_code == 401:
            return {
                "error": "Invalid API key",
                "ticker": ticker,
                "sentiment": "unavailable",
                "status": "unauthorized",
            }
        elif resp.status_code == 429:
            return {
                "error": "Rate limit exceeded",
                "ticker": ticker,
                "sentiment": "unavailable",
                "status": "rate_limited",
            }

        resp.raise_for_status()
        return resp.json()

    except requests.exceptions.Timeout:
        return {
            "error": "Request timed out",
            "ticker": ticker,
            "sentiment": "unavailable",
            "status": "timeout",
        }
    except requests.exceptions.ConnectionError:
        return {
            "error": "Connection error",
            "ticker": ticker,
            "sentiment": "unavailable",
            "status": "connection_error",
        }
    except Exception as e:
        logger.error(f"Error fetching sentiment from External API for {ticker}: {e}")
        return {
            "error": str(e),
            "ticker": ticker,
            "sentiment": "unavailable",
            "status": "error",
        }


def get_cached_price_data(
    ticker: str,
    start_date: str,
    end_date: str | None = None,
) -> dict[str, Any]:
    """
    Get cached price data directly from the database.

    This tool retrieves data from the local cache without making external API calls.
    Useful for checking what data is available locally.

    Args:
        ticker: Stock ticker symbol
        start_date: Start date in YYYY-MM-DD format
        end_date: End date in YYYY-MM-DD format (optional, defaults to today)

    Returns:
        Dictionary containing cached price data
    """
    try:
        with get_db_session_read_only() as session:
            df = PriceCache.get_price_data(session, ticker, start_date, end_date)

            if df.empty:
                return {
                    "status": "success",
                    "ticker": ticker,
                    "message": "No cached data found for the specified date range",
                    "data": [],
                }

            # Convert DataFrame to dict format
            data = df.reset_index().to_dict(orient="records")

            return {
                "status": "success",
                "ticker": ticker,
                "start_date": start_date,
                "end_date": end_date or datetime.now(UTC).strftime("%Y-%m-%d"),
                "count": len(data),
                "data": data,
            }
    except Exception as e:
        logger.error(f"Error fetching cached price data for {ticker}: {str(e)}")
        return {"error": str(e), "status": "error"}


def get_chart_links(ticker: str) -> dict[str, Any]:
    """
    Provide links to various financial charting websites.

    This tool generates URLs to popular financial websites where detailed
    stock charts can be viewed, including:
    - TradingView (advanced charting)
    - Finviz (visual screener)
    - Yahoo Finance (comprehensive data)
    - StockCharts (technical analysis)

    Args:
        ticker: The ticker symbol of the stock

    Returns:
        Dictionary containing links to various chart providers
    """
    try:
        links = {
            "trading_view": f"https://www.tradingview.com/symbols/{ticker}",
            "finviz": f"https://finviz.com/quote.ashx?t={ticker}",
            "yahoo_finance": f"https://finance.yahoo.com/quote/{ticker}/chart",
            "stock_charts": f"https://stockcharts.com/h-sc/ui?s={ticker}",
            "seeking_alpha": f"https://seekingalpha.com/symbol/{ticker}/charts",
            "marketwatch": f"https://www.marketwatch.com/investing/stock/{ticker}/charts",
        }

        return {
            "ticker": ticker,
            "charts": links,
            "description": "External chart resources for detailed analysis",
        }
    except Exception as e:
        logger.error(f"Error generating chart links for {ticker}: {e}")
        return {"error": str(e)}


def clear_cache(ticker: str | None = None) -> dict[str, Any]:
    """
    Clear cached data for a specific ticker or all tickers.

    This tool helps manage the local cache by removing stored data,
    forcing fresh data retrieval on the next request.

    Args:
        ticker: Specific ticker to clear (None to clear all)

    Returns:
        Dictionary with cache clearing status
    """
    try:
        from maverick_mcp.data.cache import clear_cache as cache_clear

        if ticker:
            pattern = f"stock:{ticker}:*"
            count = cache_clear(pattern)
            message = f"Cleared cache for {ticker}"
        else:
            count = cache_clear()
            message = "Cleared all cache entries"

        return {"status": "success", "message": message, "entries_cleared": count}
    except Exception as e:
        logger.error(f"Error clearing cache: {e}")
        return {"error": str(e), "status": "error"}

```

--------------------------------------------------------------------------------
/maverick_mcp/config/security.py:
--------------------------------------------------------------------------------

```python
"""
Comprehensive Security Configuration for Maverick MCP.

This module provides centralized security configuration including CORS settings,
security headers, rate limiting, and environment-specific security policies.
All security settings are validated to prevent common misconfigurations.
"""

import os

from pydantic import BaseModel, Field, model_validator


class CORSConfig(BaseModel):
    """CORS (Cross-Origin Resource Sharing) configuration with validation."""

    # Origins configuration
    allowed_origins: list[str] = Field(
        default_factory=lambda: _get_cors_origins(),
        description="List of allowed origins for CORS requests",
    )

    # Credentials and methods
    allow_credentials: bool = Field(
        default=True, description="Whether to allow credentials in CORS requests"
    )

    allowed_methods: list[str] = Field(
        default=["GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH"],
        description="Allowed HTTP methods for CORS requests",
    )

    # Headers configuration
    allowed_headers: list[str] = Field(
        default=[
            "Authorization",
            "Content-Type",
            "X-API-Key",
            "X-Request-ID",
            "X-Requested-With",
            "Accept",
            "Origin",
            "User-Agent",
            "Cache-Control",
        ],
        description="Allowed headers for CORS requests",
    )

    exposed_headers: list[str] = Field(
        default=[
            "X-Process-Time",
            "X-RateLimit-Limit",
            "X-RateLimit-Remaining",
            "X-RateLimit-Reset",
            "X-Request-ID",
        ],
        description="Headers exposed to the client",
    )

    # Cache and validation
    max_age: int = Field(
        default=86400,  # 24 hours
        description="Maximum age for CORS preflight cache in seconds",
    )

    @model_validator(mode="after")
    def validate_cors_security(self):
        """Validate CORS configuration for security."""
        # Critical: Wildcard origins with credentials is dangerous
        if self.allow_credentials and "*" in self.allowed_origins:
            raise ValueError(
                "CORS Security Error: Cannot use wildcard origin ('*') with "
                "allow_credentials=True. This is a serious security vulnerability. "
                "Specify explicit origins instead."
            )

        # Warning for wildcard origins without credentials
        if "*" in self.allowed_origins and not self.allow_credentials:
            # This is allowed but should be logged
            import logging

            logger = logging.getLogger(__name__)
            logger.warning(
                "CORS Warning: Using wildcard origin ('*') without credentials. "
                "Consider using specific origins for better security."
            )

        return self


class SecurityHeadersConfig(BaseModel):
    """Security headers configuration."""

    # Content security
    x_content_type_options: str = Field(
        default="nosniff", description="X-Content-Type-Options header value"
    )

    x_frame_options: str = Field(
        default="DENY",
        description="X-Frame-Options header value (DENY, SAMEORIGIN, or ALLOW-FROM)",
    )

    x_xss_protection: str = Field(
        default="1; mode=block", description="X-XSS-Protection header value"
    )

    referrer_policy: str = Field(
        default="strict-origin-when-cross-origin",
        description="Referrer-Policy header value",
    )

    permissions_policy: str = Field(
        default="geolocation=(), microphone=(), camera=(), usb=(), magnetometer=()",
        description="Permissions-Policy header value",
    )

    # HSTS (HTTP Strict Transport Security)
    hsts_max_age: int = Field(
        default=31536000,  # 1 year
        description="HSTS max-age in seconds",
    )

    hsts_include_subdomains: bool = Field(
        default=True, description="Include subdomains in HSTS policy"
    )

    hsts_preload: bool = Field(
        default=False,
        description="Enable HSTS preload (requires manual submission to browser vendors)",
    )

    # Content Security Policy
    csp_default_src: list[str] = Field(
        default=["'self'"], description="CSP default-src directive"
    )

    csp_script_src: list[str] = Field(
        default=["'self'", "'unsafe-inline'"],
        description="CSP script-src directive",
    )

    csp_style_src: list[str] = Field(
        default=["'self'", "'unsafe-inline'"], description="CSP style-src directive"
    )

    csp_img_src: list[str] = Field(
        default=["'self'", "data:", "https:"], description="CSP img-src directive"
    )

    csp_connect_src: list[str] = Field(
        default=["'self'"],
        description="CSP connect-src directive",
    )

    csp_frame_src: list[str] = Field(
        default=["'none'"], description="CSP frame-src directive"
    )

    csp_object_src: list[str] = Field(
        default=["'none'"], description="CSP object-src directive"
    )

    @property
    def hsts_header_value(self) -> str:
        """Generate HSTS header value."""
        value = f"max-age={self.hsts_max_age}"
        if self.hsts_include_subdomains:
            value += "; includeSubDomains"
        if self.hsts_preload:
            value += "; preload"
        return value

    @property
    def csp_header_value(self) -> str:
        """Generate Content-Security-Policy header value."""
        directives = [
            f"default-src {' '.join(self.csp_default_src)}",
            f"script-src {' '.join(self.csp_script_src)}",
            f"style-src {' '.join(self.csp_style_src)}",
            f"img-src {' '.join(self.csp_img_src)}",
            f"connect-src {' '.join(self.csp_connect_src)}",
            f"frame-src {' '.join(self.csp_frame_src)}",
            f"object-src {' '.join(self.csp_object_src)}",
            "base-uri 'self'",
            "form-action 'self'",
        ]
        return "; ".join(directives)


class RateLimitConfig(BaseModel):
    """Rate limiting configuration."""

    # Basic rate limits
    default_rate_limit: str = Field(
        default="1000 per hour", description="Default rate limit for all endpoints"
    )

    # User-specific limits
    authenticated_limit_per_minute: int = Field(
        default=60, description="Rate limit for authenticated users per minute"
    )

    anonymous_limit_per_minute: int = Field(
        default=10, description="Rate limit for anonymous users per minute"
    )

    # Endpoint-specific limits
    auth_endpoints_limit: str = Field(
        default="10 per hour",
        description="Rate limit for authentication endpoints (login, signup)",
    )

    api_endpoints_limit: str = Field(
        default="60 per minute", description="Rate limit for API endpoints"
    )

    sensitive_endpoints_limit: str = Field(
        default="5 per minute", description="Rate limit for sensitive operations"
    )

    webhook_endpoints_limit: str = Field(
        default="100 per minute", description="Rate limit for webhook endpoints"
    )

    # Redis configuration for rate limiting
    redis_url: str | None = Field(
        default_factory=lambda: os.getenv("AUTH_REDIS_URL", "redis://localhost:6379/1"),
        description="Redis URL for rate limiting storage",
    )

    enabled: bool = Field(
        default_factory=lambda: os.getenv("RATE_LIMITING_ENABLED", "true").lower()
        == "true",
        description="Enable rate limiting",
    )


class TrustedHostsConfig(BaseModel):
    """Trusted hosts configuration."""

    allowed_hosts: list[str] = Field(
        default_factory=lambda: _get_trusted_hosts(),
        description="List of trusted host patterns",
    )

    enforce_in_development: bool = Field(
        default=False, description="Whether to enforce trusted hosts in development"
    )


class SecurityConfig(BaseModel):
    """Comprehensive security configuration for Maverick MCP."""

    # Environment detection
    environment: str = Field(
        default_factory=lambda: os.getenv("ENVIRONMENT", "development").lower(),
        description="Environment (development, staging, production)",
    )

    # Sub-configurations
    cors: CORSConfig = Field(
        default_factory=CORSConfig, description="CORS configuration"
    )

    headers: SecurityHeadersConfig = Field(
        default_factory=SecurityHeadersConfig,
        description="Security headers configuration",
    )

    rate_limiting: RateLimitConfig = Field(
        default_factory=RateLimitConfig, description="Rate limiting configuration"
    )

    trusted_hosts: TrustedHostsConfig = Field(
        default_factory=TrustedHostsConfig, description="Trusted hosts configuration"
    )

    # General security settings
    force_https: bool = Field(
        default_factory=lambda: os.getenv("FORCE_HTTPS", "false").lower() == "true",
        description="Force HTTPS in production",
    )

    strict_security: bool = Field(
        default_factory=lambda: os.getenv("STRICT_SECURITY", "false").lower() == "true",
        description="Enable strict security mode",
    )

    @model_validator(mode="after")
    def validate_environment_security(self):
        """Validate security configuration based on environment."""
        if self.environment == "production":
            # Production security requirements
            if not self.force_https:
                import logging

                logger = logging.getLogger(__name__)
                logger.warning(
                    "Production Warning: FORCE_HTTPS is disabled in production. "
                    "Set FORCE_HTTPS=true for better security."
                )

            # Validate CORS for production
            if "*" in self.cors.allowed_origins:
                import logging

                logger = logging.getLogger(__name__)
                logger.error(
                    "Production Error: Wildcard CORS origins detected in production. "
                    "This is a security risk and should be fixed."
                )

        return self

    def get_cors_middleware_config(self) -> dict:
        """Get CORS middleware configuration dictionary."""
        return {
            "allow_origins": self.cors.allowed_origins,
            "allow_credentials": self.cors.allow_credentials,
            "allow_methods": self.cors.allowed_methods,
            "allow_headers": self.cors.allowed_headers,
            "expose_headers": self.cors.exposed_headers,
            "max_age": self.cors.max_age,
        }

    def get_security_headers(self) -> dict[str, str]:
        """Get security headers dictionary."""
        headers = {
            "X-Content-Type-Options": self.headers.x_content_type_options,
            "X-Frame-Options": self.headers.x_frame_options,
            "X-XSS-Protection": self.headers.x_xss_protection,
            "Referrer-Policy": self.headers.referrer_policy,
            "Permissions-Policy": self.headers.permissions_policy,
            "Content-Security-Policy": self.headers.csp_header_value,
        }

        # Add HSTS only in production or when HTTPS is forced
        if self.environment == "production" or self.force_https:
            headers["Strict-Transport-Security"] = self.headers.hsts_header_value

        return headers

    def is_production(self) -> bool:
        """Check if running in production environment."""
        return self.environment == "production"

    def is_development(self) -> bool:
        """Check if running in development environment."""
        return self.environment in ["development", "dev", "local"]


def _get_cors_origins() -> list[str]:
    """Get CORS origins based on environment."""
    environment = os.getenv("ENVIRONMENT", "development").lower()
    cors_origins_env = os.getenv("CORS_ORIGINS")

    if cors_origins_env:
        # Parse comma-separated origins from environment
        return [origin.strip() for origin in cors_origins_env.split(",")]

    if environment == "production":
        return [
            "https://app.maverick-mcp.com",
            "https://maverick-mcp.com",
            "https://www.maverick-mcp.com",
        ]
    elif environment in ["staging", "test"]:
        return [
            "https://staging.maverick-mcp.com",
            "https://test.maverick-mcp.com",
            "http://localhost:3000",
            "http://localhost:3001",
        ]
    else:
        # Development
        return [
            "http://localhost:3000",
            "http://localhost:3001",
            "http://127.0.0.1:3000",
            "http://127.0.0.1:3001",
            "http://localhost:8080",
            "http://localhost:5173",  # Vite default
        ]


def _get_trusted_hosts() -> list[str]:
    """Get trusted hosts based on environment."""
    environment = os.getenv("ENVIRONMENT", "development").lower()
    trusted_hosts_env = os.getenv("TRUSTED_HOSTS")

    if trusted_hosts_env:
        # Parse comma-separated hosts from environment
        return [host.strip() for host in trusted_hosts_env.split(",")]

    if environment == "production":
        return ["api.maverick-mcp.com", "*.maverick-mcp.com", "maverick-mcp.com"]
    elif environment in ["staging", "test"]:
        return [
            "staging.maverick-mcp.com",
            "test.maverick-mcp.com",
            "*.maverick-mcp.com",
            "localhost",
            "127.0.0.1",
        ]
    else:
        # Development - allow any host
        return ["*"]


# Create singleton instance
security_config = SecurityConfig()


def get_security_config() -> SecurityConfig:
    """Get the security configuration instance."""
    return security_config


def validate_security_config() -> dict[str, any]:
    """Validate the current security configuration."""
    config = get_security_config()
    issues = []
    warnings = []

    # Check for dangerous CORS configuration
    if config.cors.allow_credentials and "*" in config.cors.allowed_origins:
        issues.append("CRITICAL: Wildcard CORS origins with credentials enabled")

    # Check production-specific requirements
    if config.is_production():
        if "*" in config.cors.allowed_origins:
            issues.append("CRITICAL: Wildcard CORS origins in production")

        if not config.force_https:
            warnings.append("HTTPS not enforced in production")

        if "localhost" in str(config.cors.allowed_origins).lower():
            warnings.append("Localhost origins found in production CORS config")

    # Check for insecure headers
    if config.headers.x_frame_options not in ["DENY", "SAMEORIGIN"]:
        warnings.append("X-Frame-Options not set to DENY or SAMEORIGIN")

    return {
        "valid": len(issues) == 0,
        "issues": issues,
        "warnings": warnings,
        "environment": config.environment,
        "cors_origins": config.cors.allowed_origins,
        "force_https": config.force_https,
    }

```
Page 9/29FirstPrevNextLast