#
tokens: 49843/50000 7/315 files (page 5/6)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 5 of 6. Use http://codebase.md/dbt-labs/dbt-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .changes
│   ├── header.tpl.md
│   ├── unreleased
│   │   ├── .gitkeep
│   │   └── Under the Hood-20251104-091321.yaml
│   ├── v0.1.3.md
│   ├── v0.10.0.md
│   ├── v0.10.1.md
│   ├── v0.10.2.md
│   ├── v0.10.3.md
│   ├── v0.2.0.md
│   ├── v0.2.1.md
│   ├── v0.2.10.md
│   ├── v0.2.11.md
│   ├── v0.2.12.md
│   ├── v0.2.13.md
│   ├── v0.2.14.md
│   ├── v0.2.15.md
│   ├── v0.2.16.md
│   ├── v0.2.17.md
│   ├── v0.2.18.md
│   ├── v0.2.19.md
│   ├── v0.2.2.md
│   ├── v0.2.20.md
│   ├── v0.2.3.md
│   ├── v0.2.4.md
│   ├── v0.2.5.md
│   ├── v0.2.6.md
│   ├── v0.2.7.md
│   ├── v0.2.8.md
│   ├── v0.2.9.md
│   ├── v0.3.0.md
│   ├── v0.4.0.md
│   ├── v0.4.1.md
│   ├── v0.4.2.md
│   ├── v0.5.0.md
│   ├── v0.6.0.md
│   ├── v0.6.1.md
│   ├── v0.6.2.md
│   ├── v0.7.0.md
│   ├── v0.8.0.md
│   ├── v0.8.1.md
│   ├── v0.8.2.md
│   ├── v0.8.3.md
│   ├── v0.8.4.md
│   ├── v0.9.0.md
│   ├── v0.9.1.md
│   ├── v1.0.0.md
│   └── v1.1.0.md
├── .changie.yaml
├── .env.example
├── .github
│   ├── actions
│   │   └── setup-python
│   │       └── action.yml
│   ├── CODEOWNERS
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   └── feature_request.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── changelog-check.yml
│       ├── codeowners-check.yml
│       ├── create-release-pr.yml
│       ├── release.yml
│       └── run-checks-pr.yaml
├── .gitignore
├── .pre-commit-config.yaml
├── .task
│   └── checksum
│       └── d2
├── .tool-versions
├── .vscode
│   ├── launch.json
│   └── settings.json
├── CHANGELOG.md
├── CONTRIBUTING.md
├── docs
│   ├── d2.png
│   └── diagram.d2
├── evals
│   └── semantic_layer
│       └── test_eval_semantic_layer.py
├── examples
│   ├── .DS_Store
│   ├── aws_strands_agent
│   │   ├── __init__.py
│   │   ├── .DS_Store
│   │   ├── dbt_data_scientist
│   │   │   ├── __init__.py
│   │   │   ├── .env.example
│   │   │   ├── agent.py
│   │   │   ├── prompts.py
│   │   │   ├── quick_mcp_test.py
│   │   │   ├── test_all_tools.py
│   │   │   └── tools
│   │   │       ├── __init__.py
│   │   │       ├── dbt_compile.py
│   │   │       ├── dbt_mcp.py
│   │   │       └── dbt_model_analyzer.py
│   │   ├── LICENSE
│   │   ├── README.md
│   │   └── requirements.txt
│   ├── google_adk_agent
│   │   ├── __init__.py
│   │   ├── main.py
│   │   ├── pyproject.toml
│   │   └── README.md
│   ├── langgraph_agent
│   │   ├── __init__.py
│   │   ├── .python-version
│   │   ├── main.py
│   │   ├── pyproject.toml
│   │   ├── README.md
│   │   └── uv.lock
│   ├── openai_agent
│   │   ├── __init__.py
│   │   ├── .gitignore
│   │   ├── .python-version
│   │   ├── main_streamable.py
│   │   ├── main.py
│   │   ├── pyproject.toml
│   │   ├── README.md
│   │   └── uv.lock
│   ├── openai_responses
│   │   ├── __init__.py
│   │   ├── .gitignore
│   │   ├── .python-version
│   │   ├── main.py
│   │   ├── pyproject.toml
│   │   ├── README.md
│   │   └── uv.lock
│   ├── pydantic_ai_agent
│   │   ├── __init__.py
│   │   ├── .gitignore
│   │   ├── .python-version
│   │   ├── main.py
│   │   ├── pyproject.toml
│   │   └── README.md
│   └── remote_mcp
│       ├── .python-version
│       ├── main.py
│       ├── pyproject.toml
│       ├── README.md
│       └── uv.lock
├── LICENSE
├── pyproject.toml
├── README.md
├── src
│   ├── client
│   │   ├── __init__.py
│   │   ├── main.py
│   │   └── tools.py
│   ├── dbt_mcp
│   │   ├── __init__.py
│   │   ├── .gitignore
│   │   ├── config
│   │   │   ├── config_providers.py
│   │   │   ├── config.py
│   │   │   ├── dbt_project.py
│   │   │   ├── dbt_yaml.py
│   │   │   ├── headers.py
│   │   │   ├── settings.py
│   │   │   └── transport.py
│   │   ├── dbt_admin
│   │   │   ├── __init__.py
│   │   │   ├── client.py
│   │   │   ├── constants.py
│   │   │   ├── run_results_errors
│   │   │   │   ├── __init__.py
│   │   │   │   ├── config.py
│   │   │   │   └── parser.py
│   │   │   └── tools.py
│   │   ├── dbt_cli
│   │   │   ├── binary_type.py
│   │   │   └── tools.py
│   │   ├── dbt_codegen
│   │   │   ├── __init__.py
│   │   │   └── tools.py
│   │   ├── discovery
│   │   │   ├── client.py
│   │   │   └── tools.py
│   │   ├── errors
│   │   │   ├── __init__.py
│   │   │   ├── admin_api.py
│   │   │   ├── base.py
│   │   │   ├── cli.py
│   │   │   ├── common.py
│   │   │   ├── discovery.py
│   │   │   ├── semantic_layer.py
│   │   │   └── sql.py
│   │   ├── gql
│   │   │   └── errors.py
│   │   ├── lsp
│   │   │   ├── __init__.py
│   │   │   ├── lsp_binary_manager.py
│   │   │   ├── lsp_client.py
│   │   │   ├── lsp_connection.py
│   │   │   ├── providers
│   │   │   │   ├── __init__.py
│   │   │   │   ├── local_lsp_client_provider.py
│   │   │   │   ├── local_lsp_connection_provider.py
│   │   │   │   ├── lsp_client_provider.py
│   │   │   │   └── lsp_connection_provider.py
│   │   │   └── tools.py
│   │   ├── main.py
│   │   ├── mcp
│   │   │   ├── create.py
│   │   │   └── server.py
│   │   ├── oauth
│   │   │   ├── client_id.py
│   │   │   ├── context_manager.py
│   │   │   ├── dbt_platform.py
│   │   │   ├── fastapi_app.py
│   │   │   ├── logging.py
│   │   │   ├── login.py
│   │   │   ├── refresh_strategy.py
│   │   │   ├── token_provider.py
│   │   │   └── token.py
│   │   ├── prompts
│   │   │   ├── __init__.py
│   │   │   ├── admin_api
│   │   │   │   ├── cancel_job_run.md
│   │   │   │   ├── get_job_details.md
│   │   │   │   ├── get_job_run_artifact.md
│   │   │   │   ├── get_job_run_details.md
│   │   │   │   ├── get_job_run_error.md
│   │   │   │   ├── list_job_run_artifacts.md
│   │   │   │   ├── list_jobs_runs.md
│   │   │   │   ├── list_jobs.md
│   │   │   │   ├── retry_job_run.md
│   │   │   │   └── trigger_job_run.md
│   │   │   ├── dbt_cli
│   │   │   │   ├── args
│   │   │   │   │   ├── full_refresh.md
│   │   │   │   │   ├── limit.md
│   │   │   │   │   ├── resource_type.md
│   │   │   │   │   ├── selectors.md
│   │   │   │   │   ├── sql_query.md
│   │   │   │   │   └── vars.md
│   │   │   │   ├── build.md
│   │   │   │   ├── compile.md
│   │   │   │   ├── docs.md
│   │   │   │   ├── list.md
│   │   │   │   ├── parse.md
│   │   │   │   ├── run.md
│   │   │   │   ├── show.md
│   │   │   │   └── test.md
│   │   │   ├── dbt_codegen
│   │   │   │   ├── args
│   │   │   │   │   ├── case_sensitive_cols.md
│   │   │   │   │   ├── database_name.md
│   │   │   │   │   ├── generate_columns.md
│   │   │   │   │   ├── include_data_types.md
│   │   │   │   │   ├── include_descriptions.md
│   │   │   │   │   ├── leading_commas.md
│   │   │   │   │   ├── materialized.md
│   │   │   │   │   ├── model_name.md
│   │   │   │   │   ├── model_names.md
│   │   │   │   │   ├── schema_name.md
│   │   │   │   │   ├── source_name.md
│   │   │   │   │   ├── table_name.md
│   │   │   │   │   ├── table_names.md
│   │   │   │   │   ├── tables.md
│   │   │   │   │   └── upstream_descriptions.md
│   │   │   │   ├── generate_model_yaml.md
│   │   │   │   ├── generate_source.md
│   │   │   │   └── generate_staging_model.md
│   │   │   ├── discovery
│   │   │   │   ├── get_all_models.md
│   │   │   │   ├── get_all_sources.md
│   │   │   │   ├── get_exposure_details.md
│   │   │   │   ├── get_exposures.md
│   │   │   │   ├── get_mart_models.md
│   │   │   │   ├── get_model_children.md
│   │   │   │   ├── get_model_details.md
│   │   │   │   ├── get_model_health.md
│   │   │   │   └── get_model_parents.md
│   │   │   ├── lsp
│   │   │   │   ├── args
│   │   │   │   │   ├── column_name.md
│   │   │   │   │   └── model_id.md
│   │   │   │   └── get_column_lineage.md
│   │   │   ├── prompts.py
│   │   │   └── semantic_layer
│   │   │       ├── get_dimensions.md
│   │   │       ├── get_entities.md
│   │   │       ├── get_metrics_compiled_sql.md
│   │   │       ├── list_metrics.md
│   │   │       ├── list_saved_queries.md
│   │   │       └── query_metrics.md
│   │   ├── py.typed
│   │   ├── semantic_layer
│   │   │   ├── client.py
│   │   │   ├── gql
│   │   │   │   ├── gql_request.py
│   │   │   │   └── gql.py
│   │   │   ├── levenshtein.py
│   │   │   ├── tools.py
│   │   │   └── types.py
│   │   ├── sql
│   │   │   └── tools.py
│   │   ├── telemetry
│   │   │   └── logging.py
│   │   ├── tools
│   │   │   ├── annotations.py
│   │   │   ├── definitions.py
│   │   │   ├── policy.py
│   │   │   ├── register.py
│   │   │   ├── tool_names.py
│   │   │   └── toolsets.py
│   │   └── tracking
│   │       └── tracking.py
│   └── remote_mcp
│       ├── __init__.py
│       └── session.py
├── Taskfile.yml
├── tests
│   ├── __init__.py
│   ├── conftest.py
│   ├── env_vars.py
│   ├── integration
│   │   ├── __init__.py
│   │   ├── dbt_codegen
│   │   │   ├── __init__.py
│   │   │   └── test_dbt_codegen.py
│   │   ├── discovery
│   │   │   └── test_discovery.py
│   │   ├── initialization
│   │   │   ├── __init__.py
│   │   │   └── test_initialization.py
│   │   ├── lsp
│   │   │   └── test_lsp_connection.py
│   │   ├── remote_mcp
│   │   │   └── test_remote_mcp.py
│   │   ├── remote_tools
│   │   │   └── test_remote_tools.py
│   │   ├── semantic_layer
│   │   │   └── test_semantic_layer.py
│   │   └── tracking
│   │       └── test_tracking.py
│   ├── mocks
│   │   └── config.py
│   └── unit
│       ├── __init__.py
│       ├── config
│       │   ├── __init__.py
│       │   ├── test_config.py
│       │   └── test_transport.py
│       ├── dbt_admin
│       │   ├── __init__.py
│       │   ├── test_client.py
│       │   ├── test_error_fetcher.py
│       │   └── test_tools.py
│       ├── dbt_cli
│       │   ├── __init__.py
│       │   ├── test_cli_integration.py
│       │   └── test_tools.py
│       ├── dbt_codegen
│       │   ├── __init__.py
│       │   └── test_tools.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── conftest.py
│       │   ├── test_exposures_fetcher.py
│       │   └── test_sources_fetcher.py
│       ├── lsp
│       │   ├── __init__.py
│       │   ├── test_local_lsp_client_provider.py
│       │   ├── test_local_lsp_connection_provider.py
│       │   ├── test_lsp_client.py
│       │   ├── test_lsp_connection.py
│       │   └── test_lsp_tools.py
│       ├── oauth
│       │   ├── test_credentials_provider.py
│       │   ├── test_fastapi_app_pagination.py
│       │   └── test_token.py
│       ├── semantic_layer
│       │   ├── __init__.py
│       │   └── test_saved_queries.py
│       ├── tools
│       │   ├── test_disable_tools.py
│       │   ├── test_tool_names.py
│       │   ├── test_tool_policies.py
│       │   └── test_toolsets.py
│       └── tracking
│           └── test_tracking.py
├── ui
│   ├── .gitignore
│   ├── assets
│   │   ├── dbt_logo BLK.svg
│   │   └── dbt_logo WHT.svg
│   ├── eslint.config.js
│   ├── index.html
│   ├── package.json
│   ├── pnpm-lock.yaml
│   ├── pnpm-workspace.yaml
│   ├── README.md
│   ├── src
│   │   ├── App.css
│   │   ├── App.tsx
│   │   ├── global.d.ts
│   │   ├── index.css
│   │   ├── main.tsx
│   │   └── vite-env.d.ts
│   ├── tsconfig.app.json
│   ├── tsconfig.json
│   ├── tsconfig.node.json
│   └── vite.config.ts
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/src/dbt_mcp/config/settings.py:
--------------------------------------------------------------------------------

```python
  1 | import logging
  2 | import socket
  3 | import time
  4 | import shutil
  5 | from enum import Enum
  6 | from pathlib import Path
  7 | from typing import Annotated
  8 | 
  9 | from filelock import FileLock
 10 | from pydantic import Field, field_validator, model_validator
 11 | from pydantic_core.core_schema import ValidationInfo
 12 | from pydantic_settings import BaseSettings, NoDecode, SettingsConfigDict
 13 | 
 14 | from dbt_mcp.config.dbt_project import DbtProjectYaml
 15 | from dbt_mcp.config.dbt_yaml import try_read_yaml
 16 | from dbt_mcp.config.headers import (
 17 |     TokenProvider,
 18 | )
 19 | from dbt_mcp.oauth.context_manager import DbtPlatformContextManager
 20 | from dbt_mcp.oauth.dbt_platform import DbtPlatformContext
 21 | from dbt_mcp.oauth.login import login
 22 | from dbt_mcp.oauth.token_provider import (
 23 |     OAuthTokenProvider,
 24 |     StaticTokenProvider,
 25 | )
 26 | from dbt_mcp.tools.tool_names import ToolName
 27 | 
 28 | logger = logging.getLogger(__name__)
 29 | 
 30 | OAUTH_REDIRECT_STARTING_PORT = 6785
 31 | DEFAULT_DBT_CLI_TIMEOUT = 60
 32 | 
 33 | 
 34 | class AuthenticationMethod(Enum):
 35 |     OAUTH = "oauth"
 36 |     ENV_VAR = "env_var"
 37 | 
 38 | 
 39 | class DbtMcpSettings(BaseSettings):
 40 |     model_config = SettingsConfigDict(
 41 |         env_prefix="",
 42 |         case_sensitive=False,
 43 |         env_file=".env",
 44 |         env_file_encoding="utf-8",
 45 |         extra="ignore",
 46 |     )
 47 | 
 48 |     # dbt Platform settings
 49 |     dbt_host: str | None = Field(None, alias="DBT_HOST")
 50 |     dbt_mcp_host: str | None = Field(None, alias="DBT_MCP_HOST")
 51 |     dbt_prod_env_id: int | None = Field(None, alias="DBT_PROD_ENV_ID")
 52 |     dbt_env_id: int | None = Field(None, alias="DBT_ENV_ID")  # legacy support
 53 |     dbt_dev_env_id: int | None = Field(None, alias="DBT_DEV_ENV_ID")
 54 |     dbt_user_id: int | None = Field(None, alias="DBT_USER_ID")
 55 |     dbt_account_id: int | None = Field(None, alias="DBT_ACCOUNT_ID")
 56 |     dbt_token: str | None = Field(None, alias="DBT_TOKEN")
 57 |     multicell_account_prefix: str | None = Field(None, alias="MULTICELL_ACCOUNT_PREFIX")
 58 |     host_prefix: str | None = Field(None, alias="DBT_HOST_PREFIX")
 59 |     dbt_lsp_path: str | None = Field(None, alias="DBT_LSP_PATH")
 60 | 
 61 |     # dbt CLI settings
 62 |     dbt_project_dir: str | None = Field(None, alias="DBT_PROJECT_DIR")
 63 |     dbt_path: str = Field("dbt", alias="DBT_PATH")
 64 |     dbt_cli_timeout: int = Field(DEFAULT_DBT_CLI_TIMEOUT, alias="DBT_CLI_TIMEOUT")
 65 |     dbt_warn_error_options: str | None = Field(None, alias="DBT_WARN_ERROR_OPTIONS")
 66 |     dbt_profiles_dir: str | None = Field(None, alias="DBT_PROFILES_DIR")
 67 | 
 68 |     # Disable tool settings
 69 |     disable_dbt_cli: bool = Field(False, alias="DISABLE_DBT_CLI")
 70 |     disable_dbt_codegen: bool = Field(True, alias="DISABLE_DBT_CODEGEN")
 71 |     disable_semantic_layer: bool = Field(False, alias="DISABLE_SEMANTIC_LAYER")
 72 |     disable_discovery: bool = Field(False, alias="DISABLE_DISCOVERY")
 73 |     disable_remote: bool | None = Field(None, alias="DISABLE_REMOTE")
 74 |     disable_admin_api: bool = Field(False, alias="DISABLE_ADMIN_API")
 75 |     disable_sql: bool | None = Field(None, alias="DISABLE_SQL")
 76 |     disable_tools: Annotated[list[ToolName] | None, NoDecode] = Field(
 77 |         None, alias="DISABLE_TOOLS"
 78 |     )
 79 |     disable_lsp: bool | None = Field(None, alias="DISABLE_LSP")
 80 | 
 81 |     # Tracking settings
 82 |     do_not_track: str | None = Field(None, alias="DO_NOT_TRACK")
 83 |     send_anonymous_usage_data: str | None = Field(
 84 |         None, alias="DBT_SEND_ANONYMOUS_USAGE_STATS"
 85 |     )
 86 | 
 87 |     # Developer settings
 88 |     file_logging: bool = Field(False, alias="DBT_MCP_SERVER_FILE_LOGGING")
 89 | 
 90 |     def __repr__(self):
 91 |         """Custom repr to bring most important settings to front. Redact sensitive info."""
 92 |         return (
 93 |             #  auto-disable settings
 94 |             f"DbtMcpSettings(dbt_host={self.dbt_host}, "
 95 |             f"dbt_path={self.dbt_path}, "
 96 |             f"dbt_project_dir={self.dbt_project_dir}, "
 97 |             # disable settings
 98 |             f"disable_dbt_cli={self.disable_dbt_cli}, "
 99 |             f"disable_dbt_codegen={self.disable_dbt_codegen}, "
100 |             f"disable_semantic_layer={self.disable_semantic_layer}, "
101 |             f"disable_discovery={self.disable_discovery}, "
102 |             f"disable_admin_api={self.disable_admin_api}, "
103 |             f"disable_sql={self.disable_sql}, "
104 |             f"disable_tools={self.disable_tools}, "
105 |             f"disable_lsp={self.disable_lsp}, "
106 |             # everything else
107 |             f"dbt_prod_env_id={self.dbt_prod_env_id}, "
108 |             f"dbt_dev_env_id={self.dbt_dev_env_id}, "
109 |             f"dbt_user_id={self.dbt_user_id}, "
110 |             f"dbt_account_id={self.dbt_account_id}, "
111 |             f"dbt_token={'***redacted***' if self.dbt_token else None}, "
112 |             f"send_anonymous_usage_data={self.send_anonymous_usage_data}, "
113 |             f"file_logging={self.file_logging})"
114 |         )
115 | 
116 |     @property
117 |     def actual_host(self) -> str | None:
118 |         host = self.dbt_host or self.dbt_mcp_host
119 |         if host is None:
120 |             return None
121 |         return host.rstrip("/").removeprefix("https://").removeprefix("http://")
122 | 
123 |     @property
124 |     def actual_prod_environment_id(self) -> int | None:
125 |         return self.dbt_prod_env_id or self.dbt_env_id
126 | 
127 |     @property
128 |     def actual_disable_sql(self) -> bool:
129 |         if self.disable_sql is not None:
130 |             return self.disable_sql
131 |         if self.disable_remote is not None:
132 |             return self.disable_remote
133 |         return True
134 | 
135 |     @property
136 |     def actual_host_prefix(self) -> str | None:
137 |         if self.host_prefix is not None:
138 |             return self.host_prefix
139 |         if self.multicell_account_prefix is not None:
140 |             return self.multicell_account_prefix
141 |         return None
142 | 
143 |     @property
144 |     def dbt_project_yml(self) -> DbtProjectYaml | None:
145 |         if not self.dbt_project_dir:
146 |             return None
147 |         dbt_project_yml = try_read_yaml(Path(self.dbt_project_dir) / "dbt_project.yml")
148 |         if dbt_project_yml is None:
149 |             return None
150 |         return DbtProjectYaml.model_validate(dbt_project_yml)
151 | 
152 |     @property
153 |     def usage_tracking_enabled(self) -> bool:
154 |         # dbt environment variables take precedence over dbt_project.yml
155 |         if (
156 |             self.send_anonymous_usage_data is not None
157 |             and (
158 |                 self.send_anonymous_usage_data.lower() == "false"
159 |                 or self.send_anonymous_usage_data == "0"
160 |             )
161 |         ) or (
162 |             self.do_not_track is not None
163 |             and (self.do_not_track.lower() == "true" or self.do_not_track == "1")
164 |         ):
165 |             return False
166 |         dbt_project_yml = self.dbt_project_yml
167 |         if (
168 |             dbt_project_yml
169 |             and dbt_project_yml.flags
170 |             and dbt_project_yml.flags.send_anonymous_usage_stats is not None
171 |         ):
172 |             return dbt_project_yml.flags.send_anonymous_usage_stats
173 |         return True
174 | 
175 |     @field_validator("dbt_host", "dbt_mcp_host", mode="after")
176 |     @classmethod
177 |     def validate_host(cls, v: str | None, info: ValidationInfo) -> str | None:
178 |         """Intentionally error on misconfigured host-like env vars (DBT_HOST and DBT_MCP_HOST)."""
179 |         host = (
180 |             v.rstrip("/").removeprefix("https://").removeprefix("http://") if v else v
181 |         )
182 | 
183 |         if host and (host.startswith("metadata") or host.startswith("semantic-layer")):
184 |             field_name = (
185 |                 getattr(info, "field_name", "None") if info is not None else "None"
186 |             ).upper()
187 |             raise ValueError(
188 |                 f"{field_name} must not start with 'metadata' or 'semantic-layer': {v}"
189 |             )
190 |         return v
191 | 
192 |     @field_validator("dbt_path", mode="after")
193 |     @classmethod
194 |     def validate_file_exists(cls, v: str | None, info: ValidationInfo) -> str | None:
195 |         """Validate a path exists in the system.
196 | 
197 |         This will only fail if the path is explicitly set to a non-existing path.
198 |         It will auto-disable upon model validation if it can't be found AND it's not $PATH.
199 |         """
200 |         # Allow 'dbt' and 'dbtf' as special cases as they're expected to be on PATH
201 |         if v in ["dbt", "dbtf"]:
202 |             return v
203 |         if v:
204 |             p = Path(v)
205 |             if p.exists():
206 |                 return v
207 | 
208 |             field_name = (
209 |                 getattr(info, "field_name", "None") if info is not None else "None"
210 |             ).upper()
211 |             raise ValueError(f"{field_name} path does not exist: {v}")
212 |         return v
213 | 
214 |     @field_validator("dbt_project_dir", "dbt_profiles_dir", mode="after")
215 |     @classmethod
216 |     def validate_dir_exists(cls, v: str | None, info: ValidationInfo) -> str | None:
217 |         """Validate a directory path exists in the system."""
218 |         if v:
219 |             path = Path(v)
220 |             if not path.is_dir():
221 |                 field_name = (
222 |                     getattr(info, "field_name", "None") if info is not None else "None"
223 |                 ).upper()
224 |                 raise ValueError(f"{field_name} directory does not exist: {v}")
225 |         return v
226 | 
227 |     @field_validator("disable_tools", mode="before")
228 |     @classmethod
229 |     def parse_disable_tools(cls, env_var: str | None) -> list[ToolName]:
230 |         if not env_var:
231 |             return []
232 |         errors: list[str] = []
233 |         tool_names: list[ToolName] = []
234 |         for tool_name in env_var.split(","):
235 |             tool_name_stripped = tool_name.strip()
236 |             if tool_name_stripped == "":
237 |                 continue
238 |             try:
239 |                 tool_names.append(ToolName(tool_name_stripped))
240 |             except ValueError:
241 |                 errors.append(
242 |                     f"Invalid tool name in DISABLE_TOOLS: {tool_name_stripped}."
243 |                     + " Must be a valid tool name."
244 |                 )
245 |         if errors:
246 |             raise ValueError("\n".join(errors))
247 |         return tool_names
248 | 
249 |     @model_validator(mode="after")
250 |     def auto_disable(self) -> "DbtMcpSettings":
251 |         """Auto-disable features based on required settings."""
252 |         # platform features
253 |         if (
254 |             not self.actual_host
255 |         ):  # host is the only truly required setting for platform features
256 |             # object.__setattr__ is used in case we want to set values on a frozen model
257 |             object.__setattr__(self, "disable_semantic_layer", True)
258 |             object.__setattr__(self, "disable_discovery", True)
259 |             object.__setattr__(self, "disable_admin_api", True)
260 |             object.__setattr__(self, "disable_sql", True)
261 | 
262 |             logger.warning(
263 |                 "Platform features have been automatically disabled due to missing DBT_HOST."
264 |             )
265 | 
266 |         # CLI features
267 |         cli_errors = validate_dbt_cli_settings(self)
268 |         if cli_errors:
269 |             object.__setattr__(self, "disable_dbt_cli", True)
270 |             object.__setattr__(self, "disable_dbt_codegen", True)
271 |             logger.warning(
272 |                 f"CLI features have been automatically disabled due to misconfigurations:\n    {'\n    '.join(cli_errors)}."
273 |             )
274 |         return self
275 | 
276 | 
277 | def _find_available_port(*, start_port: int, max_attempts: int = 20) -> int:
278 |     """
279 |     Return the first available port on 127.0.0.1 starting at start_port.
280 | 
281 |     Raises RuntimeError if no port is found within the attempted range.
282 |     """
283 |     for candidate_port in range(start_port, start_port + max_attempts):
284 |         with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
285 |             sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
286 |             try:
287 |                 sock.bind(("127.0.0.1", candidate_port))
288 |             except OSError:
289 |                 continue
290 |             return candidate_port
291 |     raise RuntimeError(
292 |         "No available port found starting at "
293 |         f"{start_port} after {max_attempts} attempts."
294 |     )
295 | 
296 | 
297 | def get_dbt_profiles_path(dbt_profiles_dir: str | None = None) -> Path:
298 |     # Respect DBT_PROFILES_DIR if set; otherwise default to ~/.dbt/mcp.yml
299 |     if dbt_profiles_dir:
300 |         return Path(dbt_profiles_dir).expanduser()
301 |     else:
302 |         return Path.home() / ".dbt"
303 | 
304 | 
305 | async def get_dbt_platform_context(
306 |     *,
307 |     dbt_user_dir: Path,
308 |     dbt_platform_url: str,
309 |     dbt_platform_context_manager: DbtPlatformContextManager,
310 | ) -> DbtPlatformContext:
311 |     # Some MCP hosts (Claude Desktop) tend to run multiple MCP servers instances.
312 |     # We need to lock so that only one can run the oauth flow.
313 |     with FileLock(dbt_user_dir / "mcp.lock"):
314 |         dbt_ctx = dbt_platform_context_manager.read_context()
315 |         if (
316 |             dbt_ctx
317 |             and dbt_ctx.account_id
318 |             and dbt_ctx.host_prefix
319 |             and dbt_ctx.dev_environment
320 |             and dbt_ctx.prod_environment
321 |             and dbt_ctx.decoded_access_token
322 |             and dbt_ctx.decoded_access_token.access_token_response.expires_at
323 |             > time.time() + 120  # 2 minutes buffer
324 |         ):
325 |             return dbt_ctx
326 |         # Find an available port for the local OAuth redirect server
327 |         selected_port = _find_available_port(start_port=OAUTH_REDIRECT_STARTING_PORT)
328 |         return await login(
329 |             dbt_platform_url=dbt_platform_url,
330 |             port=selected_port,
331 |             dbt_platform_context_manager=dbt_platform_context_manager,
332 |         )
333 | 
334 | 
335 | def get_dbt_host(
336 |     settings: DbtMcpSettings, dbt_platform_context: DbtPlatformContext
337 | ) -> str:
338 |     actual_host = settings.actual_host
339 |     if not actual_host:
340 |         raise ValueError("DBT_HOST is a required environment variable")
341 |     host_prefix_with_period = f"{dbt_platform_context.host_prefix}."
342 |     if not actual_host.startswith(host_prefix_with_period):
343 |         raise ValueError(
344 |             f"The DBT_HOST environment variable is expected to start with the {dbt_platform_context.host_prefix} custom subdomain."
345 |         )
346 |     # We have to remove the custom subdomain prefix
347 |     # so that the metadata and semantic-layer URLs can be constructed correctly.
348 |     return actual_host.removeprefix(host_prefix_with_period)
349 | 
350 | 
351 | def validate_settings(settings: DbtMcpSettings):
352 |     errors: list[str] = []
353 |     errors.extend(validate_dbt_platform_settings(settings))
354 |     errors.extend(validate_dbt_cli_settings(settings))
355 |     if errors:
356 |         raise ValueError("Errors found in configuration:\n\n" + "\n".join(errors))
357 | 
358 | 
359 | def validate_dbt_platform_settings(
360 |     settings: DbtMcpSettings,
361 | ) -> list[str]:
362 |     errors: list[str] = []
363 |     if (
364 |         not settings.disable_semantic_layer
365 |         or not settings.disable_discovery
366 |         or not settings.actual_disable_sql
367 |         or not settings.disable_admin_api
368 |     ):
369 |         if not settings.actual_host:
370 |             errors.append(
371 |                 "DBT_HOST environment variable is required when semantic layer, discovery, SQL or admin API tools are enabled."
372 |             )
373 |         if not settings.actual_prod_environment_id:
374 |             errors.append(
375 |                 "DBT_PROD_ENV_ID environment variable is required when semantic layer, discovery, SQL or admin API tools are enabled."
376 |             )
377 |         if not settings.dbt_token:
378 |             errors.append(
379 |                 "DBT_TOKEN environment variable is required when semantic layer, discovery, SQL or admin API tools are enabled."
380 |             )
381 |         if settings.actual_host and (
382 |             settings.actual_host.startswith("metadata")
383 |             or settings.actual_host.startswith("semantic-layer")
384 |         ):
385 |             errors.append(
386 |                 "DBT_HOST must not start with 'metadata' or 'semantic-layer'."
387 |             )
388 |     if (
389 |         not settings.actual_disable_sql
390 |         and ToolName.TEXT_TO_SQL not in (settings.disable_tools or [])
391 |         and not settings.actual_prod_environment_id
392 |     ):
393 |         errors.append(
394 |             "DBT_PROD_ENV_ID environment variable is required when text_to_sql is enabled."
395 |         )
396 |     if not settings.actual_disable_sql and ToolName.EXECUTE_SQL not in (
397 |         settings.disable_tools or []
398 |     ):
399 |         if not settings.dbt_dev_env_id:
400 |             errors.append(
401 |                 "DBT_DEV_ENV_ID environment variable is required when execute_sql is enabled."
402 |             )
403 |         if not settings.dbt_user_id:
404 |             errors.append(
405 |                 "DBT_USER_ID environment variable is required when execute_sql is enabled."
406 |             )
407 |     return errors
408 | 
409 | 
410 | def validate_dbt_cli_settings(settings: DbtMcpSettings) -> list[str]:
411 |     errors: list[str] = []
412 |     if not settings.disable_dbt_cli:
413 |         if not settings.dbt_project_dir:
414 |             errors.append(
415 |                 "DBT_PROJECT_DIR environment variable is required when dbt CLI tools are enabled."
416 |             )
417 |         if not settings.dbt_path:
418 |             errors.append(
419 |                 "DBT_PATH environment variable is required when dbt CLI tools are enabled."
420 |             )
421 |         else:
422 |             dbt_path = Path(settings.dbt_path)
423 |             if not (dbt_path.exists() or shutil.which(dbt_path)):
424 |                 errors.append(
425 |                     f"DBT_PATH executable can't be found: {settings.dbt_path}"
426 |                 )
427 |     return errors
428 | 
429 | 
430 | class CredentialsProvider:
431 |     def __init__(self, settings: DbtMcpSettings):
432 |         self.settings = settings
433 |         self.token_provider: TokenProvider | None = None
434 |         self.authentication_method: AuthenticationMethod | None = None
435 | 
436 |     def _log_settings(self) -> None:
437 |         settings = self.settings.model_dump()
438 |         if settings.get("dbt_token") is not None:
439 |             settings["dbt_token"] = "***redacted***"
440 |         logger.info(f"Settings: {settings}")
441 | 
442 |     async def get_credentials(self) -> tuple[DbtMcpSettings, TokenProvider]:
443 |         if self.token_provider is not None:
444 |             # If token provider is already set, just return the cached values
445 |             return self.settings, self.token_provider
446 |         # Load settings from environment variables using pydantic_settings
447 |         dbt_platform_errors = validate_dbt_platform_settings(self.settings)
448 |         if dbt_platform_errors:
449 |             dbt_user_dir = get_dbt_profiles_path(
450 |                 dbt_profiles_dir=self.settings.dbt_profiles_dir
451 |             )
452 |             config_location = dbt_user_dir / "mcp.yml"
453 |             dbt_platform_url = f"https://{self.settings.actual_host}"
454 |             dbt_platform_context_manager = DbtPlatformContextManager(config_location)
455 |             dbt_platform_context = await get_dbt_platform_context(
456 |                 dbt_platform_context_manager=dbt_platform_context_manager,
457 |                 dbt_user_dir=dbt_user_dir,
458 |                 dbt_platform_url=dbt_platform_url,
459 |             )
460 | 
461 |             # Override settings with settings attained from login or mcp.yml
462 |             self.settings.dbt_user_id = dbt_platform_context.user_id
463 |             self.settings.dbt_dev_env_id = (
464 |                 dbt_platform_context.dev_environment.id
465 |                 if dbt_platform_context.dev_environment
466 |                 else None
467 |             )
468 |             self.settings.dbt_prod_env_id = (
469 |                 dbt_platform_context.prod_environment.id
470 |                 if dbt_platform_context.prod_environment
471 |                 else None
472 |             )
473 |             self.settings.dbt_account_id = dbt_platform_context.account_id
474 |             self.settings.host_prefix = dbt_platform_context.host_prefix
475 |             self.settings.dbt_host = get_dbt_host(self.settings, dbt_platform_context)
476 |             if not dbt_platform_context.decoded_access_token:
477 |                 raise ValueError("No decoded access token found in OAuth context")
478 |             self.settings.dbt_token = dbt_platform_context.decoded_access_token.access_token_response.access_token
479 | 
480 |             self.token_provider = OAuthTokenProvider(
481 |                 access_token_response=dbt_platform_context.decoded_access_token.access_token_response,
482 |                 dbt_platform_url=dbt_platform_url,
483 |                 context_manager=dbt_platform_context_manager,
484 |             )
485 |             validate_settings(self.settings)
486 |             self.authentication_method = AuthenticationMethod.OAUTH
487 |             self._log_settings()
488 |             return self.settings, self.token_provider
489 |         self.token_provider = StaticTokenProvider(token=self.settings.dbt_token)
490 |         validate_settings(self.settings)
491 |         self.authentication_method = AuthenticationMethod.ENV_VAR
492 |         self._log_settings()
493 |         return self.settings, self.token_provider
494 | 
```

--------------------------------------------------------------------------------
/tests/unit/config/test_config.py:
--------------------------------------------------------------------------------

```python
  1 | import os
  2 | from unittest.mock import patch
  3 | 
  4 | import pytest
  5 | 
  6 | from dbt_mcp.config.config import (
  7 |     DbtMcpSettings,
  8 |     load_config,
  9 | )
 10 | from dbt_mcp.config.settings import DEFAULT_DBT_CLI_TIMEOUT
 11 | from dbt_mcp.dbt_cli.binary_type import BinaryType
 12 | from dbt_mcp.tools.tool_names import ToolName
 13 | 
 14 | 
 15 | class TestDbtMcpSettings:
 16 |     def setup_method(self):
 17 |         # Clear environment variables that could interfere with default value tests
 18 |         env_vars_to_clear = [
 19 |             "DBT_HOST",
 20 |             "DBT_MCP_HOST",
 21 |             "DBT_PROD_ENV_ID",
 22 |             "DBT_ENV_ID",
 23 |             "DBT_DEV_ENV_ID",
 24 |             "DBT_USER_ID",
 25 |             "DBT_TOKEN",
 26 |             "DBT_PROJECT_DIR",
 27 |             "DBT_PATH",
 28 |             "DBT_CLI_TIMEOUT",
 29 |             "DISABLE_DBT_CLI",
 30 |             "DISABLE_DBT_CODEGEN",
 31 |             "DISABLE_SEMANTIC_LAYER",
 32 |             "DISABLE_DISCOVERY",
 33 |             "DISABLE_REMOTE",
 34 |             "DISABLE_ADMIN_API",
 35 |             "MULTICELL_ACCOUNT_PREFIX",
 36 |             "DBT_WARN_ERROR_OPTIONS",
 37 |             "DISABLE_TOOLS",
 38 |             "DBT_ACCOUNT_ID",
 39 |         ]
 40 |         for var in env_vars_to_clear:
 41 |             os.environ.pop(var, None)
 42 | 
 43 |     def test_default_values(self, env_setup):
 44 |         # Test with clean environment and no .env file
 45 |         clean_env = {
 46 |             "HOME": os.environ.get("HOME", ""),
 47 |         }  # Keep HOME for potential path resolution
 48 |         with env_setup(env_vars=clean_env):
 49 |             settings = DbtMcpSettings(_env_file=None)
 50 |             assert settings.dbt_path == "dbt"
 51 |             assert settings.dbt_cli_timeout == DEFAULT_DBT_CLI_TIMEOUT
 52 |             assert settings.disable_remote is None, "disable_remote"
 53 |             assert settings.disable_dbt_cli is False, "disable_dbt_cli"
 54 |             assert settings.disable_dbt_codegen is True, "disable_dbt_codegen"
 55 |             assert settings.disable_admin_api is False, "disable_admin_api"
 56 |             assert settings.disable_semantic_layer is False, "disable_semantic_layer"
 57 |             assert settings.disable_discovery is False, "disable_discovery"
 58 |             assert settings.disable_sql is None, "disable_sql"
 59 |             assert settings.disable_tools == [], "disable_tools"
 60 | 
 61 |     def test_usage_tracking_disabled_by_env_vars(self):
 62 |         env_vars = {
 63 |             "DO_NOT_TRACK": "true",
 64 |             "DBT_SEND_ANONYMOUS_USAGE_STATS": "1",
 65 |         }
 66 | 
 67 |         with patch.dict(os.environ, env_vars, clear=True):
 68 |             settings = DbtMcpSettings(_env_file=None)
 69 |             assert settings.usage_tracking_enabled is False
 70 | 
 71 |     def test_usage_tracking_respects_dbt_project_yaml(self, env_setup):
 72 |         with env_setup() as (project_dir, helpers):
 73 |             (project_dir / "dbt_project.yml").write_text(
 74 |                 "flags:\n  send_anonymous_usage_stats: false\n"
 75 |             )
 76 | 
 77 |             settings = DbtMcpSettings(_env_file=None)
 78 |             assert settings.usage_tracking_enabled is False
 79 | 
 80 |     def test_usage_tracking_env_var_precedence_over_yaml(self, env_setup):
 81 |         env_vars = {
 82 |             "DBT_SEND_ANONYMOUS_USAGE_STATS": "false",
 83 |         }
 84 |         with env_setup(env_vars=env_vars) as (project_dir, helpers):
 85 |             (project_dir / "dbt_project.yml").write_text(
 86 |                 "flags:\n  send_anonymous_usage_stats: true\n"
 87 |             )
 88 | 
 89 |             settings = DbtMcpSettings(_env_file=None)
 90 |             assert settings.usage_tracking_enabled is False
 91 | 
 92 |     @pytest.mark.parametrize(
 93 |         "do_not_track, send_anonymous_usage_stats",
 94 |         [
 95 |             ("true", "1"),
 96 |             ("1", "true"),
 97 |             ("true", None),
 98 |             ("1", None),
 99 |             (None, "false"),
100 |             (None, "0"),
101 |         ],
102 |     )
103 |     def test_usage_tracking_conflicting_env_vars_bias_off(
104 |         self, do_not_track, send_anonymous_usage_stats
105 |     ):
106 |         env_vars = {}
107 |         if do_not_track is not None:
108 |             env_vars["DO_NOT_TRACK"] = do_not_track
109 |         if send_anonymous_usage_stats is not None:
110 |             env_vars["DBT_SEND_ANONYMOUS_USAGE_STATS"] = send_anonymous_usage_stats
111 | 
112 |         with patch.dict(os.environ, env_vars, clear=True):
113 |             settings = DbtMcpSettings(_env_file=None)
114 |             assert settings.usage_tracking_enabled is False
115 | 
116 |     def test_env_var_parsing(self, env_setup):
117 |         env_vars = {
118 |             "DBT_HOST": "test.dbt.com",
119 |             "DBT_PROD_ENV_ID": "123",
120 |             "DBT_TOKEN": "test_token",
121 |             "DISABLE_DBT_CLI": "true",
122 |             "DISABLE_TOOLS": "build,compile,docs",
123 |         }
124 | 
125 |         with env_setup(env_vars=env_vars) as (project_dir, helpers):
126 |             settings = DbtMcpSettings(_env_file=None)
127 |             assert settings.dbt_host == "test.dbt.com"
128 |             assert settings.dbt_prod_env_id == 123
129 |             assert settings.dbt_token == "test_token"
130 |             assert settings.dbt_project_dir == str(project_dir)
131 |             assert settings.disable_dbt_cli is True
132 |             assert settings.disable_tools == [
133 |                 ToolName.BUILD,
134 |                 ToolName.COMPILE,
135 |                 ToolName.DOCS,
136 |             ]
137 | 
138 |     def test_disable_tools_parsing_edge_cases(self):
139 |         test_cases = [
140 |             ("build,compile,docs", [ToolName.BUILD, ToolName.COMPILE, ToolName.DOCS]),
141 |             (
142 |                 "build, compile , docs",
143 |                 [ToolName.BUILD, ToolName.COMPILE, ToolName.DOCS],
144 |             ),
145 |             ("build,,docs", [ToolName.BUILD, ToolName.DOCS]),
146 |             ("", []),
147 |             ("run", [ToolName.RUN]),
148 |         ]
149 | 
150 |         for input_val, expected in test_cases:
151 |             with patch.dict(os.environ, {"DISABLE_TOOLS": input_val}):
152 |                 settings = DbtMcpSettings(_env_file=None)
153 |                 assert settings.disable_tools == expected
154 | 
155 |     def test_actual_host_property(self):
156 |         with patch.dict(os.environ, {"DBT_HOST": "host1.com"}):
157 |             settings = DbtMcpSettings(_env_file=None)
158 |             assert settings.actual_host == "host1.com"
159 | 
160 |         with patch.dict(os.environ, {"DBT_MCP_HOST": "host2.com"}):
161 |             settings = DbtMcpSettings(_env_file=None)
162 |             assert settings.actual_host == "host2.com"
163 | 
164 |         with patch.dict(
165 |             os.environ, {"DBT_HOST": "host1.com", "DBT_MCP_HOST": "host2.com"}
166 |         ):
167 |             settings = DbtMcpSettings(_env_file=None)
168 |             assert settings.actual_host == "host1.com"  # DBT_HOST takes precedence
169 | 
170 |     def test_actual_prod_environment_id_property(self):
171 |         with patch.dict(os.environ, {"DBT_PROD_ENV_ID": "123"}):
172 |             settings = DbtMcpSettings(_env_file=None)
173 |             assert settings.actual_prod_environment_id == 123
174 | 
175 |         with patch.dict(os.environ, {"DBT_ENV_ID": "456"}):
176 |             settings = DbtMcpSettings(_env_file=None)
177 |             assert settings.actual_prod_environment_id == 456
178 | 
179 |         with patch.dict(os.environ, {"DBT_PROD_ENV_ID": "123", "DBT_ENV_ID": "456"}):
180 |             settings = DbtMcpSettings(_env_file=None)
181 |             assert (
182 |                 settings.actual_prod_environment_id == 123
183 |             )  # DBT_PROD_ENV_ID takes precedence
184 | 
185 |     def test_auto_disable_platform_features_logging(self):
186 |         with patch.dict(os.environ, {}, clear=True):
187 |             settings = DbtMcpSettings(_env_file=None)
188 |             # When DBT_HOST is missing, platform features should be disabled
189 |             assert settings.disable_admin_api is True
190 |             assert settings.disable_sql is True
191 |             assert settings.disable_semantic_layer is True
192 |             assert settings.disable_discovery is True
193 |             assert settings.disable_dbt_cli is True
194 |             assert settings.disable_dbt_codegen is True
195 | 
196 | 
197 | class TestLoadConfig:
198 |     def setup_method(self):
199 |         # Clear any existing environment variables that might interfere
200 |         env_vars_to_clear = [
201 |             "DBT_HOST",
202 |             "DBT_MCP_HOST",
203 |             "DBT_PROD_ENV_ID",
204 |             "DBT_ENV_ID",
205 |             "DBT_DEV_ENV_ID",
206 |             "DBT_USER_ID",
207 |             "DBT_TOKEN",
208 |             "DBT_PROJECT_DIR",
209 |             "DBT_PATH",
210 |             "DBT_CLI_TIMEOUT",
211 |             "DISABLE_DBT_CLI",
212 |             "DISABLE_SEMANTIC_LAYER",
213 |             "DISABLE_DISCOVERY",
214 |             "DISABLE_REMOTE",
215 |             "DISABLE_ADMIN_API",
216 |             "MULTICELL_ACCOUNT_PREFIX",
217 |             "DBT_WARN_ERROR_OPTIONS",
218 |             "DISABLE_TOOLS",
219 |             "DBT_ACCOUNT_ID",
220 |         ]
221 |         for var in env_vars_to_clear:
222 |             os.environ.pop(var, None)
223 | 
224 |     def _load_config_with_env(self, env_vars):
225 |         """Helper method to load config with test environment variables, avoiding .env file interference"""
226 |         with (
227 |             patch.dict(os.environ, env_vars),
228 |             patch("dbt_mcp.config.config.DbtMcpSettings") as mock_settings_class,
229 |             patch(
230 |                 "dbt_mcp.config.config.detect_binary_type",
231 |                 return_value=BinaryType.DBT_CORE,
232 |             ),
233 |         ):
234 |             # Create a real instance with test values, but without .env file loading
235 |             with patch.dict(os.environ, env_vars, clear=True):
236 |                 settings_instance = DbtMcpSettings(_env_file=None)
237 |             mock_settings_class.return_value = settings_instance
238 |             return load_config()
239 | 
240 |     def test_valid_config_all_services_enabled(self, env_setup):
241 |         env_vars = {
242 |             "DBT_HOST": "test.dbt.com",
243 |             "DBT_PROD_ENV_ID": "123",
244 |             "DBT_DEV_ENV_ID": "456",
245 |             "DBT_USER_ID": "789",
246 |             "DBT_ACCOUNT_ID": "123",
247 |             "DBT_TOKEN": "test_token",
248 |             "DISABLE_SEMANTIC_LAYER": "false",
249 |             "DISABLE_DISCOVERY": "false",
250 |             "DISABLE_REMOTE": "false",
251 |             "DISABLE_ADMIN_API": "false",
252 |             "DISABLE_DBT_CODEGEN": "false",
253 |         }
254 |         with env_setup(env_vars=env_vars) as (project_dir, helpers):
255 |             config = load_config()
256 | 
257 |             assert config.sql_config_provider is not None, (
258 |                 "sql_config_provider should be set"
259 |             )
260 |             assert config.dbt_cli_config is not None, "dbt_cli_config should be set"
261 |             assert config.discovery_config_provider is not None, (
262 |                 "discovery_config_provider should be set"
263 |             )
264 |             assert config.semantic_layer_config_provider is not None, (
265 |                 "semantic_layer_config_provider should be set"
266 |             )
267 |             assert config.admin_api_config_provider is not None, (
268 |                 "admin_api_config_provider should be set"
269 |             )
270 |             assert config.credentials_provider is not None, (
271 |                 "credentials_provider should be set"
272 |             )
273 |             assert config.dbt_codegen_config is not None, (
274 |                 "dbt_codegen_config should be set"
275 |             )
276 | 
277 |     def test_valid_config_all_services_disabled(self):
278 |         env_vars = {
279 |             "DBT_TOKEN": "test_token",
280 |             "DISABLE_DBT_CLI": "true",
281 |             "DISABLE_SEMANTIC_LAYER": "true",
282 |             "DISABLE_DISCOVERY": "true",
283 |             "DISABLE_REMOTE": "true",
284 |             "DISABLE_ADMIN_API": "true",
285 |         }
286 | 
287 |         config = self._load_config_with_env(env_vars)
288 | 
289 |         assert config.sql_config_provider is None
290 |         assert config.dbt_cli_config is None
291 |         assert config.discovery_config_provider is None
292 |         assert config.semantic_layer_config_provider is None
293 | 
294 |     def test_invalid_environment_variable_types(self):
295 |         # Test invalid integer types
296 |         env_vars = {
297 |             "DBT_HOST": "test.dbt.com",
298 |             "DBT_PROD_ENV_ID": "not_an_integer",
299 |             "DBT_TOKEN": "test_token",
300 |             "DISABLE_DISCOVERY": "false",
301 |         }
302 | 
303 |         with pytest.raises(ValueError):
304 |             self._load_config_with_env(env_vars)
305 | 
306 |     def test_multicell_account_prefix_configurations(self):
307 |         env_vars = {
308 |             "DBT_HOST": "test.dbt.com",
309 |             "DBT_PROD_ENV_ID": "123",
310 |             "DBT_TOKEN": "test_token",
311 |             "MULTICELL_ACCOUNT_PREFIX": "prefix",
312 |             "DISABLE_DISCOVERY": "false",
313 |             "DISABLE_SEMANTIC_LAYER": "false",
314 |             "DISABLE_DBT_CLI": "true",
315 |             "DISABLE_REMOTE": "true",
316 |         }
317 | 
318 |         config = self._load_config_with_env(env_vars)
319 | 
320 |         assert config.discovery_config_provider is not None
321 |         assert config.semantic_layer_config_provider is not None
322 | 
323 |     def test_localhost_semantic_layer_config(self):
324 |         env_vars = {
325 |             "DBT_HOST": "localhost:8080",
326 |             "DBT_PROD_ENV_ID": "123",
327 |             "DBT_TOKEN": "test_token",
328 |             "DISABLE_SEMANTIC_LAYER": "false",
329 |             "DISABLE_DBT_CLI": "true",
330 |             "DISABLE_DISCOVERY": "true",
331 |             "DISABLE_REMOTE": "true",
332 |         }
333 | 
334 |         config = self._load_config_with_env(env_vars)
335 | 
336 |         assert config.semantic_layer_config_provider is not None
337 | 
338 |     def test_warn_error_options_default_setting(self):
339 |         env_vars = {
340 |             "DBT_TOKEN": "test_token",
341 |             "DISABLE_DBT_CLI": "true",
342 |             "DISABLE_SEMANTIC_LAYER": "true",
343 |             "DISABLE_DISCOVERY": "true",
344 |             "DISABLE_REMOTE": "true",
345 |             "DISABLE_ADMIN_API": "true",
346 |         }
347 | 
348 |         # For this test, we need to call load_config directly to see environment side effects
349 |         with patch.dict(os.environ, env_vars, clear=True):
350 |             with patch("dbt_mcp.config.config.DbtMcpSettings") as mock_settings_class:
351 |                 settings_instance = DbtMcpSettings(_env_file=None)
352 |                 mock_settings_class.return_value = settings_instance
353 |                 load_config()
354 | 
355 |                 assert (
356 |                     os.environ["DBT_WARN_ERROR_OPTIONS"]
357 |                     == '{"error": ["NoNodesForSelectionCriteria"]}'
358 |                 )
359 | 
360 |     def test_warn_error_options_not_overridden_if_set(self):
361 |         env_vars = {
362 |             "DBT_TOKEN": "test_token",
363 |             "DBT_WARN_ERROR_OPTIONS": "custom_options",
364 |             "DISABLE_DBT_CLI": "true",
365 |             "DISABLE_SEMANTIC_LAYER": "true",
366 |             "DISABLE_DISCOVERY": "true",
367 |             "DISABLE_REMOTE": "true",
368 |             "DISABLE_ADMIN_API": "true",
369 |         }
370 | 
371 |         # For this test, we need to call load_config directly to see environment side effects
372 |         with patch.dict(os.environ, env_vars, clear=True):
373 |             with patch("dbt_mcp.config.config.DbtMcpSettings") as mock_settings_class:
374 |                 settings_instance = DbtMcpSettings(_env_file=None)
375 |                 mock_settings_class.return_value = settings_instance
376 |                 load_config()
377 | 
378 |                 assert os.environ["DBT_WARN_ERROR_OPTIONS"] == "custom_options"
379 | 
380 |     def test_local_user_id_loading_from_dbt_profile(self):
381 |         user_data = {"id": "local_user_123"}
382 | 
383 |         env_vars = {
384 |             "DBT_TOKEN": "test_token",
385 |             "HOME": "/fake/home",
386 |             "DISABLE_DBT_CLI": "true",
387 |             "DISABLE_SEMANTIC_LAYER": "true",
388 |             "DISABLE_DISCOVERY": "true",
389 |             "DISABLE_REMOTE": "true",
390 |             "DISABLE_ADMIN_API": "true",
391 |         }
392 | 
393 |         with (
394 |             patch.dict(os.environ, env_vars),
395 |             patch("dbt_mcp.tracking.tracking.try_read_yaml", return_value=user_data),
396 |         ):
397 |             config = self._load_config_with_env(env_vars)
398 |             # local_user_id is now loaded by UsageTracker, not Config
399 |             assert config.credentials_provider is not None
400 | 
401 |     def test_local_user_id_loading_failure_handling(self):
402 |         env_vars = {
403 |             "DBT_TOKEN": "test_token",
404 |             "HOME": "/fake/home",
405 |             "DISABLE_DBT_CLI": "true",
406 |             "DISABLE_SEMANTIC_LAYER": "true",
407 |             "DISABLE_DISCOVERY": "true",
408 |             "DISABLE_REMOTE": "true",
409 |             "DISABLE_ADMIN_API": "true",
410 |         }
411 | 
412 |         with (
413 |             patch.dict(os.environ, env_vars),
414 |             patch("dbt_mcp.tracking.tracking.try_read_yaml", return_value=None),
415 |         ):
416 |             config = self._load_config_with_env(env_vars)
417 |             # local_user_id is now loaded by UsageTracker, not Config
418 |             assert config.credentials_provider is not None
419 | 
420 |     def test_remote_requirements(self):
421 |         # Test that remote_config is only created when remote tools are enabled
422 |         # and all required fields are present
423 |         env_vars = {
424 |             "DBT_HOST": "test.dbt.com",
425 |             "DBT_PROD_ENV_ID": "123",
426 |             "DBT_TOKEN": "test_token",
427 |             "DISABLE_REMOTE": "true",
428 |             "DISABLE_DBT_CLI": "true",
429 |             "DISABLE_SEMANTIC_LAYER": "true",
430 |             "DISABLE_DISCOVERY": "true",
431 |             "DISABLE_ADMIN_API": "true",
432 |         }
433 | 
434 |         config = self._load_config_with_env(env_vars)
435 |         # Remote config should not be created when remote tools are disabled
436 |         assert config.sql_config_provider is None
437 | 
438 |         # Test remote requirements (needs user_id and dev_env_id too)
439 |         env_vars.update(
440 |             {
441 |                 "DBT_USER_ID": "789",
442 |                 "DBT_DEV_ENV_ID": "456",
443 |                 "DISABLE_REMOTE": "false",
444 |             }
445 |         )
446 | 
447 |         config = self._load_config_with_env(env_vars)
448 |         assert config.sql_config_provider is not None
449 | 
450 |     def test_disable_flags_combinations(self, env_setup):
451 |         base_env = {
452 |             "DBT_HOST": "test.dbt.com",
453 |             "DBT_PROD_ENV_ID": "123",
454 |             "DBT_TOKEN": "test_token",
455 |         }
456 | 
457 |         test_cases = [
458 |             # Only CLI enabled
459 |             {
460 |                 "DISABLE_DBT_CLI": "false",
461 |                 "DISABLE_SEMANTIC_LAYER": "true",
462 |                 "DISABLE_DISCOVERY": "true",
463 |                 "DISABLE_REMOTE": "true",
464 |             },
465 |             # Only semantic layer enabled
466 |             {
467 |                 "DISABLE_DBT_CLI": "true",
468 |                 "DISABLE_SEMANTIC_LAYER": "false",
469 |                 "DISABLE_DISCOVERY": "true",
470 |                 "DISABLE_REMOTE": "true",
471 |             },
472 |             # Multiple services enabled
473 |             {
474 |                 "DISABLE_DBT_CLI": "false",
475 |                 "DISABLE_SEMANTIC_LAYER": "false",
476 |                 "DISABLE_DISCOVERY": "false",
477 |                 "DISABLE_REMOTE": "true",
478 |             },
479 |         ]
480 | 
481 |         for disable_flags in test_cases:
482 |             env_vars = {**base_env, **disable_flags}
483 |             with env_setup(env_vars=env_vars) as (project_dir, helpers):
484 |                 config = load_config()
485 | 
486 |                 # Verify configs are created only when services are enabled
487 |                 assert (config.dbt_cli_config is not None) == (
488 |                     disable_flags["DISABLE_DBT_CLI"] == "false"
489 |                 )
490 |                 assert (config.semantic_layer_config_provider is not None) == (
491 |                     disable_flags["DISABLE_SEMANTIC_LAYER"] == "false"
492 |                 )
493 |                 assert (config.discovery_config_provider is not None) == (
494 |                     disable_flags["DISABLE_DISCOVERY"] == "false"
495 |                 )
496 | 
497 |     def test_legacy_env_id_support(self):
498 |         # Test that DBT_ENV_ID still works for backward compatibility
499 |         env_vars = {
500 |             "DBT_HOST": "test.dbt.com",
501 |             "DBT_ENV_ID": "123",  # Using legacy variable
502 |             "DBT_TOKEN": "test_token",
503 |             "DISABLE_DISCOVERY": "false",
504 |             "DISABLE_DBT_CLI": "true",
505 |             "DISABLE_SEMANTIC_LAYER": "true",
506 |             "DISABLE_REMOTE": "true",
507 |         }
508 | 
509 |         config = self._load_config_with_env(env_vars)
510 |         assert config.discovery_config_provider is not None
511 |         assert config.credentials_provider is not None
512 | 
513 |     def test_case_insensitive_environment_variables(self):
514 |         # pydantic_settings should handle case insensitivity based on config
515 |         env_vars = {
516 |             "dbt_host": "test.dbt.com",  # lowercase
517 |             "DBT_PROD_ENV_ID": "123",  # uppercase
518 |             "dbt_token": "test_token",  # lowercase
519 |             "DISABLE_DISCOVERY": "false",
520 |             "DISABLE_DBT_CLI": "true",
521 |             "DISABLE_SEMANTIC_LAYER": "true",
522 |             "DISABLE_REMOTE": "true",
523 |         }
524 | 
525 |         config = self._load_config_with_env(env_vars)
526 |         assert config.discovery_config_provider is not None
527 |         assert config.credentials_provider is not None
528 | 
```

--------------------------------------------------------------------------------
/tests/integration/lsp/test_lsp_connection.py:
--------------------------------------------------------------------------------

```python
  1 | """Integration-style tests for LSP connection using real instances instead of mocks.
  2 | 
  3 | These tests use real sockets, asyncio primitives, and actual data flow
  4 | to provide more realistic test coverage compared to heavily mocked unit tests.
  5 | """
  6 | 
  7 | import asyncio
  8 | import json
  9 | import socket
 10 | 
 11 | import pytest
 12 | 
 13 | from dbt_mcp.lsp.lsp_connection import (
 14 |     SocketLSPConnection,
 15 |     LspEventName,
 16 |     JsonRpcMessage,
 17 | )
 18 | 
 19 | 
 20 | class TestRealSocketOperations:
 21 |     """Tests using real sockets to verify actual network communication."""
 22 | 
 23 |     def test_setup_socket_real(self, tmp_path):
 24 |         """Test socket setup with real socket binding."""
 25 |         binary_path = tmp_path / "lsp"
 26 |         binary_path.touch()
 27 | 
 28 |         conn = SocketLSPConnection(str(binary_path), "/test")
 29 | 
 30 |         # Use real socket
 31 |         conn.setup_socket()
 32 | 
 33 |         try:
 34 |             # Verify real socket was created and bound
 35 |             assert conn._socket is not None
 36 |             assert isinstance(conn._socket, socket.socket)
 37 |             assert conn.port > 0  # OS assigned a port
 38 |             assert conn.host == "127.0.0.1"
 39 | 
 40 |             # Verify socket is actually listening
 41 |             sockname = conn._socket.getsockname()
 42 |             assert sockname[0] == "127.0.0.1"
 43 |             assert sockname[1] == conn.port
 44 | 
 45 |         finally:
 46 |             # Cleanup
 47 |             if conn._socket:
 48 |                 conn._socket.close()
 49 | 
 50 |     def test_socket_reuse_address(self, tmp_path):
 51 |         """Test that SO_REUSEADDR is set on real socket."""
 52 |         binary_path = tmp_path / "lsp"
 53 |         binary_path.touch()
 54 | 
 55 |         conn = SocketLSPConnection(str(binary_path), "/test")
 56 |         conn.setup_socket()
 57 | 
 58 |         try:
 59 |             # Verify SO_REUSEADDR is set (value varies by platform, just check it's non-zero)
 60 |             reuse = conn._socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
 61 |             assert reuse != 0
 62 | 
 63 |         finally:
 64 |             if conn._socket:
 65 |                 conn._socket.close()
 66 | 
 67 |     @pytest.mark.asyncio
 68 |     async def test_socket_accept_with_real_client(self, tmp_path):
 69 |         """Test socket accept with real client connection."""
 70 |         binary_path = tmp_path / "lsp"
 71 |         binary_path.touch()
 72 | 
 73 |         conn = SocketLSPConnection(str(binary_path), "/test", connection_timeout=2.0)
 74 |         conn.setup_socket()
 75 | 
 76 |         try:
 77 |             # Create real client socket that connects to the server
 78 |             client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
 79 | 
 80 |             async def client_connect():
 81 |                 await asyncio.sleep(0.1)  # Let server start listening
 82 |                 await asyncio.get_running_loop().run_in_executor(
 83 |                     None, client_socket.connect, (conn.host, conn.port)
 84 |                 )
 85 | 
 86 |             async def server_accept():
 87 |                 conn._socket.settimeout(conn.connection_timeout)
 88 |                 connection, addr = await asyncio.get_running_loop().run_in_executor(
 89 |                     None, conn._socket.accept
 90 |                 )
 91 |                 return connection, addr
 92 | 
 93 |             # Run both concurrently
 94 |             client_task = asyncio.create_task(client_connect())
 95 |             server_result = await server_accept()
 96 | 
 97 |             await client_task
 98 | 
 99 |             connection, client_addr = server_result
100 |             assert connection is not None
101 |             assert client_addr[0] in ("127.0.0.1", "::1")  # IPv4 or IPv6 localhost
102 | 
103 |             # Cleanup
104 |             connection.close()
105 |             client_socket.close()
106 | 
107 |         finally:
108 |             if conn._socket:
109 |                 conn._socket.close()
110 | 
111 | 
112 | class TestRealAsyncioQueues:
113 |     """Tests using real asyncio queues to verify message queueing."""
114 | 
115 |     def test_send_message_with_real_queue(self, tmp_path):
116 |         """Test message sending with real asyncio queue."""
117 |         binary_path = tmp_path / "lsp"
118 |         binary_path.touch()
119 | 
120 |         conn = SocketLSPConnection(str(binary_path), "/test")
121 | 
122 |         # The _outgoing_queue is already a real asyncio.Queue
123 |         assert isinstance(conn._outgoing_queue, asyncio.Queue)
124 |         assert conn._outgoing_queue.empty()
125 | 
126 |         # Send a message
127 |         message = JsonRpcMessage(id=1, method="test", params={"key": "value"})
128 |         conn._send_message(message)
129 | 
130 |         # Verify message was actually queued
131 |         assert not conn._outgoing_queue.empty()
132 |         data = conn._outgoing_queue.get_nowait()
133 | 
134 |         # Verify LSP protocol format
135 |         assert isinstance(data, bytes)
136 |         assert b"Content-Length:" in data
137 |         assert b"\r\n\r\n" in data
138 |         assert b'"jsonrpc"' in data
139 |         assert b'"2.0"' in data
140 |         assert b'"test"' in data
141 | 
142 |     def test_multiple_messages_queue_order(self, tmp_path):
143 |         """Test that multiple messages maintain FIFO order in real queue."""
144 |         binary_path = tmp_path / "lsp"
145 |         binary_path.touch()
146 | 
147 |         conn = SocketLSPConnection(str(binary_path), "/test")
148 | 
149 |         # Send multiple messages
150 |         msg1 = JsonRpcMessage(id=1, method="first")
151 |         msg2 = JsonRpcMessage(id=2, method="second")
152 |         msg3 = JsonRpcMessage(id=3, method="third")
153 | 
154 |         conn._send_message(msg1)
155 |         conn._send_message(msg2)
156 |         conn._send_message(msg3)
157 | 
158 |         # Verify queue size
159 |         assert conn._outgoing_queue.qsize() == 3
160 | 
161 |         # Verify FIFO order
162 |         data1 = conn._outgoing_queue.get_nowait()
163 |         data2 = conn._outgoing_queue.get_nowait()
164 |         data3 = conn._outgoing_queue.get_nowait()
165 | 
166 |         assert b'"first"' in data1
167 |         assert b'"second"' in data2
168 |         assert b'"third"' in data3
169 | 
170 |         # Queue should be empty
171 |         assert conn._outgoing_queue.empty()
172 | 
173 | 
174 | class TestRealAsyncioFutures:
175 |     """Tests using real asyncio futures to verify async behavior."""
176 | 
177 |     @pytest.mark.asyncio
178 |     async def test_handle_response_with_real_future(self, tmp_path):
179 |         """Test handling response with real asyncio future."""
180 |         binary_path = tmp_path / "lsp"
181 |         binary_path.touch()
182 | 
183 |         conn = SocketLSPConnection(str(binary_path), "/test")
184 | 
185 |         # Create real future in current event loop
186 |         future = asyncio.get_running_loop().create_future()
187 |         conn.state.pending_requests[42] = future
188 | 
189 |         # Handle response in the same loop
190 |         message = JsonRpcMessage(id=42, result={"success": True, "data": "test"})
191 |         conn._handle_incoming_message(message)
192 | 
193 |         # Wait for future to be resolved (should be immediate via call_soon_threadsafe)
194 |         result = await asyncio.wait_for(future, timeout=1.0)
195 | 
196 |         assert result == {"success": True, "data": "test"}
197 |         assert 42 not in conn.state.pending_requests
198 | 
199 |     @pytest.mark.asyncio
200 |     async def test_handle_error_with_real_future(self, tmp_path):
201 |         """Test handling error response with real asyncio future."""
202 |         binary_path = tmp_path / "lsp"
203 |         binary_path.touch()
204 | 
205 |         conn = SocketLSPConnection(str(binary_path), "/test")
206 | 
207 |         # Create real future
208 |         future = asyncio.get_running_loop().create_future()
209 |         conn.state.pending_requests[42] = future
210 | 
211 |         # Handle error response
212 |         message = JsonRpcMessage(
213 |             id=42, error={"code": -32601, "message": "Method not found"}
214 |         )
215 |         conn._handle_incoming_message(message)
216 | 
217 |         # Future should be rejected with exception
218 |         with pytest.raises(RuntimeError, match="LSP error"):
219 |             await asyncio.wait_for(future, timeout=1.0)
220 | 
221 |         assert 42 not in conn.state.pending_requests
222 | 
223 |     @pytest.mark.asyncio
224 |     async def test_notification_futures_real(self, tmp_path):
225 |         """Test waiting for notifications with real futures."""
226 |         binary_path = tmp_path / "lsp"
227 |         binary_path.touch()
228 | 
229 |         conn = SocketLSPConnection(str(binary_path), "/test")
230 | 
231 |         # Register to wait for a notification
232 |         future = conn.wait_for_notification(LspEventName.compileComplete)
233 | 
234 |         # Verify it's a real future
235 |         assert isinstance(future, asyncio.Future)
236 |         assert not future.done()
237 | 
238 |         # Simulate receiving the notification
239 |         message = JsonRpcMessage(
240 |             method="dbt/lspCompileComplete", params={"success": True, "errors": []}
241 |         )
242 |         conn._handle_incoming_message(message)
243 | 
244 |         # Wait for notification
245 |         result = await asyncio.wait_for(future, timeout=1.0)
246 | 
247 |         assert result == {"success": True, "errors": []}
248 |         assert conn.state.compiled is True
249 | 
250 | 
251 | class TestRealSocketCommunication:
252 |     """Tests using real socket pairs to verify end-to-end communication."""
253 | 
254 |     @pytest.mark.asyncio
255 |     async def test_socket_pair_communication(self, tmp_path):
256 |         """Test bidirectional communication using socketpair."""
257 |         binary_path = tmp_path / "lsp"
258 |         binary_path.touch()
259 | 
260 |         conn = SocketLSPConnection(str(binary_path), "/test")
261 | 
262 |         # Create a real socket pair (connected sockets)
263 |         server_socket, client_socket = socket.socketpair()
264 |         conn._connection = server_socket
265 | 
266 |         try:
267 |             # Send a message through the connection
268 |             message = JsonRpcMessage(id=1, method="test", params={"foo": "bar"})
269 |             conn._send_message(message)
270 | 
271 |             # Get the data from the queue
272 |             data = conn._outgoing_queue.get_nowait()
273 | 
274 |             # Actually send it through the socket
275 |             await asyncio.get_running_loop().run_in_executor(
276 |                 None, server_socket.sendall, data
277 |             )
278 | 
279 |             # Read it back on the client side
280 |             received_data = await asyncio.get_running_loop().run_in_executor(
281 |                 None, client_socket.recv, 4096
282 |             )
283 | 
284 |             # Verify we got the complete LSP message
285 |             assert b"Content-Length:" in received_data
286 |             assert b"\r\n\r\n" in received_data
287 |             assert b'"test"' in received_data
288 |             assert b'"foo"' in received_data
289 |             assert b'"bar"' in received_data
290 | 
291 |         finally:
292 |             server_socket.close()
293 |             client_socket.close()
294 | 
295 |     @pytest.mark.asyncio
296 |     async def test_message_roundtrip_real(self, tmp_path):
297 |         """Test complete message send and parse roundtrip."""
298 |         binary_path = tmp_path / "lsp"
299 |         binary_path.touch()
300 | 
301 |         conn = SocketLSPConnection(str(binary_path), "/test")
302 | 
303 |         # Create socket pair
304 |         server_socket, client_socket = socket.socketpair()
305 |         conn._connection = server_socket
306 | 
307 |         try:
308 |             # Original message
309 |             original_message = JsonRpcMessage(
310 |                 id=123,
311 |                 method="textDocument/completion",
312 |                 params={
313 |                     "textDocument": {"uri": "file:///test.sql"},
314 |                     "position": {"line": 10, "character": 5},
315 |                 },
316 |             )
317 | 
318 |             # Send through connection
319 |             conn._send_message(original_message)
320 |             data = conn._outgoing_queue.get_nowait()
321 |             await asyncio.get_running_loop().run_in_executor(
322 |                 None, server_socket.sendall, data
323 |             )
324 | 
325 |             # Receive on client side
326 |             received_data = await asyncio.get_running_loop().run_in_executor(
327 |                 None, client_socket.recv, 4096
328 |             )
329 | 
330 |             # Parse it back
331 |             parsed_message, remaining = conn._parse_message(received_data)
332 | 
333 |             # Verify roundtrip integrity
334 |             assert parsed_message is not None
335 |             assert parsed_message.id == original_message.id
336 |             assert parsed_message.method == original_message.method
337 |             assert parsed_message.params == original_message.params
338 |             assert remaining == b""
339 | 
340 |         finally:
341 |             server_socket.close()
342 |             client_socket.close()
343 | 
344 |     @pytest.mark.asyncio
345 |     async def test_multiple_messages_streaming(self, tmp_path):
346 |         """Test streaming multiple messages through real socket."""
347 |         binary_path = tmp_path / "lsp"
348 |         binary_path.touch()
349 | 
350 |         conn = SocketLSPConnection(str(binary_path), "/test")
351 | 
352 |         # Create socket pair
353 |         server_socket, client_socket = socket.socketpair()
354 |         conn._connection = server_socket
355 | 
356 |         try:
357 |             # Set non-blocking for client to avoid hangs
358 |             client_socket.setblocking(False)
359 | 
360 |             # Send multiple messages
361 |             messages = [
362 |                 JsonRpcMessage(id=1, method="initialize"),
363 |                 JsonRpcMessage(method="initialized", params={}),
364 |                 JsonRpcMessage(id=2, method="textDocument/didOpen"),
365 |             ]
366 | 
367 |             for msg in messages:
368 |                 conn._send_message(msg)
369 |                 data = conn._outgoing_queue.get_nowait()
370 |                 await asyncio.get_running_loop().run_in_executor(
371 |                     None, server_socket.sendall, data
372 |                 )
373 | 
374 |             # Receive all data on client side with timeout
375 |             received_data = b""
376 |             client_socket.setblocking(True)
377 |             client_socket.settimeout(1.0)
378 | 
379 |             try:
380 |                 while True:
381 |                     chunk = await asyncio.get_running_loop().run_in_executor(
382 |                         None, client_socket.recv, 4096
383 |                     )
384 |                     if not chunk:
385 |                         break
386 |                     received_data += chunk
387 | 
388 |                     # Try to parse - if we have all 3 messages, we're done
389 |                     temp_buffer = received_data
390 |                     temp_count = 0
391 |                     while True:
392 |                         msg, temp_buffer = conn._parse_message(temp_buffer)
393 |                         if msg is None:
394 |                             break
395 |                         temp_count += 1
396 | 
397 |                     if temp_count >= 3:
398 |                         break
399 |             except TimeoutError:
400 |                 pass  # Expected when all data is received
401 | 
402 |             # Parse all messages
403 |             buffer = received_data
404 |             parsed_messages = []
405 | 
406 |             while buffer:
407 |                 msg, buffer = conn._parse_message(buffer)
408 |                 if msg is None:
409 |                     break
410 |                 parsed_messages.append(msg)
411 | 
412 |             # Verify all messages were received and parsed correctly
413 |             assert len(parsed_messages) == 3
414 |             assert parsed_messages[0].id == 1
415 |             assert parsed_messages[0].method == "initialize"
416 |             assert parsed_messages[1].method == "initialized"
417 |             assert parsed_messages[2].id == 2
418 | 
419 |         finally:
420 |             server_socket.close()
421 |             client_socket.close()
422 | 
423 | 
424 | class TestRealMessageParsing:
425 |     """Tests parsing with real byte streams."""
426 | 
427 |     def test_parse_real_lsp_message(self, tmp_path):
428 |         """Test parsing a real LSP protocol message."""
429 |         binary_path = tmp_path / "lsp"
430 |         binary_path.touch()
431 | 
432 |         conn = SocketLSPConnection(str(binary_path), "/test")
433 | 
434 |         # Create a real LSP message exactly as it would be sent
435 |         content = json.dumps(
436 |             {
437 |                 "jsonrpc": "2.0",
438 |                 "id": 1,
439 |                 "result": {
440 |                     "capabilities": {
441 |                         "textDocumentSync": 2,
442 |                         "completionProvider": {"triggerCharacters": ["."]},
443 |                     }
444 |                 },
445 |             }
446 |         )
447 |         content_bytes = content.encode("utf-8")
448 |         header = f"Content-Length: {len(content_bytes)}\r\n\r\n"
449 |         full_message = header.encode("utf-8") + content_bytes
450 | 
451 |         # Parse it
452 |         message, remaining = conn._parse_message(full_message)
453 | 
454 |         assert message is not None
455 |         assert message.id == 1
456 |         assert "capabilities" in message.result
457 |         assert message.result["capabilities"]["textDocumentSync"] == 2
458 |         assert remaining == b""
459 | 
460 |     def test_parse_chunked_message_real(self, tmp_path):
461 |         """Test parsing message that arrives in multiple chunks."""
462 |         binary_path = tmp_path / "lsp"
463 |         binary_path.touch()
464 | 
465 |         conn = SocketLSPConnection(str(binary_path), "/test")
466 | 
467 |         # Create a message
468 |         content = json.dumps({"jsonrpc": "2.0", "id": 1, "method": "test"})
469 |         content_bytes = content.encode("utf-8")
470 |         header = f"Content-Length: {len(content_bytes)}\r\n\r\n"
471 |         full_message = header.encode("utf-8") + content_bytes
472 | 
473 |         # Split into chunks (simulate network chunking)
474 |         chunk1 = full_message[:20]
475 |         chunk2 = full_message[20:40]
476 |         chunk3 = full_message[40:]
477 | 
478 |         # Parse first chunk - should be incomplete
479 |         msg1, buffer = conn._parse_message(chunk1)
480 |         assert msg1 is None
481 |         assert buffer == chunk1
482 | 
483 |         # Add second chunk - still incomplete
484 |         buffer += chunk2
485 |         msg2, buffer = conn._parse_message(buffer)
486 |         assert msg2 is None
487 | 
488 |         # Add final chunk - should complete
489 |         buffer += chunk3
490 |         msg3, buffer = conn._parse_message(buffer)
491 |         assert msg3 is not None
492 |         assert msg3.id == 1
493 |         assert msg3.method == "test"
494 |         assert buffer == b""
495 | 
496 | 
497 | class TestRealConcurrentOperations:
498 |     """Tests with real concurrent async operations."""
499 | 
500 |     @pytest.mark.asyncio
501 |     async def test_concurrent_request_futures(self, tmp_path):
502 |         """Test handling multiple concurrent requests with real futures."""
503 |         binary_path = tmp_path / "lsp"
504 |         binary_path.touch()
505 | 
506 |         conn = SocketLSPConnection(str(binary_path), "/test")
507 | 
508 |         # Create multiple real futures for concurrent requests
509 |         futures = {}
510 |         for i in range(10):
511 |             future = asyncio.get_running_loop().create_future()
512 |             futures[i] = future
513 |             conn.state.pending_requests[i] = future
514 | 
515 |         # Simulate responses arriving concurrently
516 |         async def respond(request_id: int, delay: float):
517 |             await asyncio.sleep(delay)
518 |             message = JsonRpcMessage(id=request_id, result={"request_id": request_id})
519 |             conn._handle_incoming_message(message)
520 | 
521 |         # Start all responses with random delays
522 |         response_tasks = [asyncio.create_task(respond(i, i * 0.01)) for i in range(10)]
523 | 
524 |         # Wait for all futures to resolve
525 |         results = await asyncio.gather(*[futures[i] for i in range(10)])
526 | 
527 |         # Verify all completed correctly
528 |         assert len(results) == 10
529 |         for i, result in enumerate(results):
530 |             assert result["request_id"] == i
531 | 
532 |         # All requests should be removed
533 |         assert len(conn.state.pending_requests) == 0
534 | 
535 |         # Cleanup
536 |         await asyncio.gather(*response_tasks)
537 | 
538 |     @pytest.mark.asyncio
539 |     async def test_concurrent_notifications_real(self, tmp_path):
540 |         """Test multiple futures waiting for the same notification."""
541 |         binary_path = tmp_path / "lsp"
542 |         binary_path.touch()
543 | 
544 |         conn = SocketLSPConnection(str(binary_path), "/test")
545 | 
546 |         # Create multiple waiters for the same event
547 |         future1 = conn.wait_for_notification(LspEventName.compileComplete)
548 |         future2 = conn.wait_for_notification(LspEventName.compileComplete)
549 |         future3 = conn.wait_for_notification(LspEventName.compileComplete)
550 | 
551 |         # All should be real futures
552 |         assert all(isinstance(f, asyncio.Future) for f in [future1, future2, future3])
553 | 
554 |         # Send the notification
555 |         message = JsonRpcMessage(
556 |             method="dbt/lspCompileComplete", params={"status": "success"}
557 |         )
558 |         conn._handle_incoming_message(message)
559 | 
560 |         # All futures should resolve
561 |         results = await asyncio.wait_for(
562 |             asyncio.gather(future1, future2, future3), timeout=1.0
563 |         )
564 | 
565 |         assert all(r == {"status": "success"} for r in results)
566 | 
567 | 
568 | class TestRealStateManagement:
569 |     """Tests using real state objects."""
570 | 
571 |     def test_real_state_initialization(self, tmp_path):
572 |         """Test that connection uses real LspConnectionState."""
573 |         binary_path = tmp_path / "lsp"
574 |         binary_path.touch()
575 | 
576 |         conn = SocketLSPConnection(str(binary_path), "/test")
577 | 
578 |         # Verify state is a real instance
579 |         from dbt_mcp.lsp.lsp_connection import LspConnectionState
580 | 
581 |         assert isinstance(conn.state, LspConnectionState)
582 |         assert conn.state.initialized is False
583 |         assert conn.state.compiled is False
584 |         assert isinstance(conn.state.pending_requests, dict)
585 |         assert isinstance(conn.state.pending_notifications, dict)
586 | 
587 |     def test_real_request_id_generation(self, tmp_path):
588 |         """Test real request ID counter."""
589 |         binary_path = tmp_path / "lsp"
590 |         binary_path.touch()
591 | 
592 |         conn = SocketLSPConnection(str(binary_path), "/test")
593 | 
594 |         # Get sequential IDs
595 |         ids = [conn.state.get_next_request_id() for _ in range(100)]
596 | 
597 |         # Verify they're sequential (starting point may vary if other tests ran)
598 |         # Just verify they are sequential and unique
599 |         first_id = ids[0]
600 |         assert ids[-1] == first_id + 99
601 |         assert ids == list(range(first_id, first_id + 100))
602 |         assert len(set(ids)) == 100  # All unique
603 | 
604 |     def test_real_state_updates(self, tmp_path):
605 |         """Test that state updates work with real instances."""
606 |         binary_path = tmp_path / "lsp"
607 |         binary_path.touch()
608 | 
609 |         conn = SocketLSPConnection(str(binary_path), "/test")
610 | 
611 |         # Update state
612 |         conn.state.initialized = True
613 |         conn.state.capabilities = {"test": True}
614 |         conn.state.compiled = True
615 | 
616 |         # Verify updates persist
617 |         assert conn.state.initialized is True
618 |         assert conn.state.capabilities == {"test": True}
619 |         assert conn.state.compiled is True
620 | 
```

--------------------------------------------------------------------------------
/ui/src/App.css:
--------------------------------------------------------------------------------

```css
   1 | /* Reset and base styles */
   2 | * {
   3 |   box-sizing: border-box;
   4 | }
   5 | 
   6 | body {
   7 |   width: 100%;
   8 |   margin: 0;
   9 |   padding: 0;
  10 |   font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
  11 |     'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
  12 |     sans-serif;
  13 |   line-height: 1.6;
  14 |   -webkit-font-smoothing: antialiased;
  15 |   -moz-osx-font-smoothing: grayscale;
  16 |   background-color: #fff;
  17 |   color: #1c1a19;
  18 | }
  19 | 
  20 | p {
  21 |   margin-bottom: 1rem;
  22 | }
  23 | 
  24 | @media (prefers-color-scheme: dark) {
  25 |   body {
  26 |     background-color: #1c1a19;
  27 |     color: #f6f6f6;
  28 |   }
  29 | 
  30 |   p {
  31 |     margin-bottom: 1rem;
  32 |   }
  33 | }
  34 | 
  35 | /* Logo */
  36 | .logo-container {
  37 |   position: fixed;
  38 |   top: 1rem;
  39 |   left: 1rem;
  40 |   z-index: 1000;
  41 | }
  42 | 
  43 | .logo {
  44 |   height: 2rem;
  45 |   width: auto;
  46 |   transition: opacity 0.2s ease-in-out;
  47 | }
  48 | 
  49 | .logo-light {
  50 |   display: block;
  51 | }
  52 | 
  53 | .logo-dark {
  54 |   display: none;
  55 | }
  56 | 
  57 | /* Main layout */
  58 | .app-container {
  59 |   min-height: 100vh;
  60 |   display: flex;
  61 |   justify-content: center;
  62 |   align-items: flex-start;
  63 |   padding: 2rem 1rem;
  64 | }
  65 | 
  66 | .app-content {
  67 |   width: 100%;
  68 |   max-width: 600px;
  69 |   display: flex;
  70 |   flex-direction: column;
  71 |   gap: 2rem;
  72 | }
  73 | 
  74 | /* Header */
  75 | .app-header {
  76 |   text-align: center;
  77 |   margin-bottom: 1rem;
  78 | }
  79 | 
  80 | .app-header h1 {
  81 |   margin: 0 0 0.5rem 0;
  82 |   font-size: 2.5rem;
  83 |   font-weight: 700;
  84 |   letter-spacing: -0.025em;
  85 | }
  86 | 
  87 | .app-header p {
  88 |   margin: 0;
  89 |   font-size: 1.125rem;
  90 |   opacity: 0.7;
  91 | }
  92 | 
  93 | /* Sections */
  94 | section {
  95 |   background: #fff;
  96 |   border-radius: 12px;
  97 |   border: 1px solid #ebe9e9;
  98 |   overflow: visible;
  99 |   box-shadow: 0 1px 3px 0 rgba(0, 0, 0, 0.1), 0 1px 2px 0 rgba(0, 0, 0, 0.06);
 100 | }
 101 | 
 102 | /* Specific overflow handling for sections with dropdowns */
 103 | .project-selection-section {
 104 |   overflow: visible;
 105 | }
 106 | 
 107 | .section-header {
 108 |   padding: 1.5rem 1.5rem 0 1.5rem;
 109 |   border-bottom: 1px solid #ebe9e9;
 110 |   margin-bottom: 1.5rem;
 111 | }
 112 | 
 113 | .section-header h2 {
 114 |   margin: 0 0 0.5rem 0;
 115 |   font-size: 1.5rem;
 116 |   font-weight: 600;
 117 | }
 118 | 
 119 | .section-header h3 {
 120 |   margin: 0 0 0.5rem 0;
 121 |   font-size: 1.25rem;
 122 |   font-weight: 600;
 123 | }
 124 | 
 125 | .section-header p {
 126 |   margin: 0 0 1.5rem 0;
 127 |   opacity: 0.7;
 128 |   font-size: 0.875rem;
 129 | }
 130 | 
 131 | /* Form content */
 132 | .form-content {
 133 |   padding: 0 1.5rem 1.5rem 1.5rem;
 134 | }
 135 | 
 136 | .form-group {
 137 |   margin-bottom: 1rem;
 138 | }
 139 | 
 140 | .form-label {
 141 |   display: block;
 142 |   margin-bottom: 0.5rem;
 143 |   font-weight: 500;
 144 |   font-size: 0.875rem;
 145 | }
 146 | 
 147 | .form-select {
 148 |   width: 100%;
 149 |   padding: 0.875rem 3rem 0.875rem 1rem;
 150 |   border: 1.5px solid #ebe9e9;
 151 |   border-radius: 12px;
 152 |   font-size: 1rem;
 153 |   font-weight: 500;
 154 |   background-color: #fff;
 155 |   background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%231c1a19' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e");
 156 |   background-position: right 0.75rem center;
 157 |   background-repeat: no-repeat;
 158 |   background-size: 1.25rem 1.25rem;
 159 |   cursor: pointer;
 160 |   transition: all 0.2s ease-in-out;
 161 |   appearance: none;
 162 |   -webkit-appearance: none;
 163 |   -moz-appearance: none;
 164 | }
 165 | 
 166 | .form-select:focus {
 167 |   outline: none;
 168 |   border-color: #3b82f6;
 169 |   background-color: white;
 170 |   box-shadow:
 171 |     0 0 0 3px rgba(59, 130, 246, 0.12),
 172 |     0 4px 6px -1px rgba(0, 0, 0, 0.1),
 173 |     0 2px 4px -1px rgba(0, 0, 0, 0.06);
 174 |   transform: translateY(-1px);
 175 | }
 176 | 
 177 | .form-select:hover:not(:focus) {
 178 |   border-color: #9ca3af;
 179 |   background-color: white;
 180 |   box-shadow:
 181 |     0 2px 4px -1px rgba(0, 0, 0, 0.1),
 182 |     0 1px 2px -1px rgba(0, 0, 0, 0.06);
 183 | }
 184 | 
 185 | .form-select:disabled {
 186 |   background-color: #f3f4f6;
 187 |   border-color: #e5e7eb;
 188 |   cursor: not-allowed;
 189 |   opacity: 0.7;
 190 | }
 191 | 
 192 | /* Custom dropdown */
 193 | .custom-dropdown {
 194 |   position: relative;
 195 |   width: 100%;
 196 |   z-index: 999999;
 197 |   /* Ensure proper stacking context */
 198 |   isolation: isolate;
 199 | }
 200 | 
 201 | .dropdown-trigger {
 202 |   width: 100%;
 203 |   padding: 0.875rem 3rem 0.875rem 1rem;
 204 |   border: 1.5px solid #ebe9e9;
 205 |   border-radius: 12px;
 206 |   font-size: 1rem;
 207 |   font-weight: 500;
 208 |   background-color: #fff;
 209 |   background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%231c1a19' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e");
 210 |   background-position: right 0.75rem center;
 211 |   background-repeat: no-repeat;
 212 |   background-size: 1.25rem 1.25rem;
 213 |   cursor: pointer;
 214 |   transition: all 0.2s ease-in-out;
 215 |   text-align: left;
 216 |   color: #1c1a19;
 217 | }
 218 | 
 219 | .dropdown-trigger:focus {
 220 |   outline: none;
 221 |   border-color: #3b82f6;
 222 |   background-color: white;
 223 |   box-shadow:
 224 |     0 0 0 3px rgba(59, 130, 246, 0.12),
 225 |     0 4px 6px -1px rgba(0, 0, 0, 0.1),
 226 |     0 2px 4px -1px rgba(0, 0, 0, 0.06);
 227 |   transform: translateY(-1px);
 228 | }
 229 | 
 230 | .dropdown-trigger:hover:not(:focus) {
 231 |   border-color: #9ca3af;
 232 |   background-color: white;
 233 |   box-shadow:
 234 |     0 2px 4px -1px rgba(0, 0, 0, 0.1),
 235 |     0 1px 2px -1px rgba(0, 0, 0, 0.06);
 236 | }
 237 | 
 238 | .dropdown-trigger.open {
 239 |   border-color: #3b82f6;
 240 |   background-color: white;
 241 |   background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%233b82f6' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M14 12l-4-4-4 4'/%3e%3c/svg%3e");
 242 |   box-shadow:
 243 |     0 0 0 3px rgba(59, 130, 246, 0.12),
 244 |     0 4px 6px -1px rgba(0, 0, 0, 0.1),
 245 |     0 2px 4px -1px rgba(0, 0, 0, 0.06);
 246 |   border-bottom-left-radius: 4px;
 247 |   border-bottom-right-radius: 4px;
 248 | }
 249 | 
 250 | .dropdown-trigger.placeholder {
 251 |   color: #9ca3af;
 252 |   font-weight: 400;
 253 | }
 254 | 
 255 | .dropdown-options {
 256 |   position: absolute;
 257 |   top: 100%;
 258 |   left: 0;
 259 |   right: 0;
 260 |   background: white;
 261 |   border: 1.5px solid;
 262 |   border-top: none;
 263 |   border-bottom-left-radius: 12px;
 264 |   border-bottom-right-radius: 12px;
 265 |   box-shadow:
 266 |     0 0 0 3px rgba(59, 130, 246, 0.12),
 267 |     0 10px 15px -3px rgba(0, 0, 0, 0.1),
 268 |     0 4px 6px -2px rgba(0, 0, 0, 0.05);
 269 |   z-index: 999999;
 270 |   animation: dropdownSlideIn 0.15s ease-out;
 271 |   /* Ensure proper rendering and isolation */
 272 |   isolation: isolate;
 273 |   contain: layout style;
 274 |   /* Add scrolling for long lists */
 275 |   max-height: 300px;
 276 |   overflow-y: auto;
 277 | }
 278 | 
 279 | /* Removed dropdown-options-fixed - using simple absolute positioning */
 280 | 
 281 | @keyframes dropdownSlideIn {
 282 |   0% {
 283 |     opacity: 0;
 284 |     transform: translateY(-8px);
 285 |   }
 286 |   100% {
 287 |     opacity: 1;
 288 |     transform: translateY(0);
 289 |   }
 290 | }
 291 | 
 292 | .dropdown-option {
 293 |   padding: 0.875rem 1rem;
 294 |   cursor: pointer;
 295 |   transition: all 0.15s ease-in-out;
 296 |   border: none;
 297 |   background: none;
 298 |   width: 100%;
 299 |   text-align: left;
 300 |   font-size: 1rem;
 301 |   color: #374151;
 302 |   display: flex;
 303 |   flex-direction: column;
 304 |   gap: 0.125rem;
 305 | }
 306 | 
 307 | .dropdown-option:hover {
 308 |   background-color: #f8fafc;
 309 |   color: #1f2937;
 310 | }
 311 | 
 312 | .dropdown-option:focus {
 313 |   outline: none;
 314 |   background-color: #eff6ff;
 315 |   color: #1e40af;
 316 | }
 317 | 
 318 | .dropdown-option:active {
 319 |   background-color: #dbeafe;
 320 | }
 321 | 
 322 | .dropdown-option.selected {
 323 |   background-color: #f3f4f6;
 324 |   color: #374151;
 325 | }
 326 | 
 327 | .dropdown-option.selected:hover {
 328 |   background-color: #e5e7eb;
 329 | }
 330 | 
 331 | .option-primary {
 332 |   font-weight: 500;
 333 |   line-height: 1.4;
 334 | }
 335 | 
 336 | .option-secondary {
 337 |   font-size: 0.875rem;
 338 |   opacity: 0.7;
 339 |   font-weight: 400;
 340 | }
 341 | 
 342 | .dropdown-option.selected .option-secondary {
 343 |   opacity: 0.9;
 344 | }
 345 | 
 346 | /* Dropdown scrollbar styling */
 347 | .dropdown-options::-webkit-scrollbar {
 348 |   width: 8px;
 349 | }
 350 | 
 351 | .dropdown-options::-webkit-scrollbar-track {
 352 |   background: #f8fafc;
 353 |   border-radius: 4px;
 354 | }
 355 | 
 356 | .dropdown-options::-webkit-scrollbar-thumb {
 357 |   background: #cbd5e1;
 358 |   border-radius: 4px;
 359 |   border: 1px solid #f8fafc;
 360 | }
 361 | 
 362 | .dropdown-options::-webkit-scrollbar-thumb:hover {
 363 |   background: #94a3b8;
 364 | }
 365 | 
 366 | /* Loading state */
 367 | .loading-state {
 368 |   display: flex;
 369 |   align-items: center;
 370 |   gap: 0.75rem;
 371 |   padding: 1rem;
 372 |   background-color: #fff;
 373 |   border: 1px solid #ebe9e9;
 374 |   border-radius: 8px;
 375 |   margin: 1rem 1.5rem;
 376 | }
 377 | 
 378 | .spinner {
 379 |   width: 20px;
 380 |   height: 20px;
 381 |   border: 2px solid #ebe9e9;
 382 |   border-top: 2px solid #1c1a19;
 383 |   border-radius: 50%;
 384 |   animation: spin 1s linear infinite;
 385 | }
 386 | 
 387 | @keyframes spin {
 388 |   0% {
 389 |     transform: rotate(0deg);
 390 |   }
 391 | 
 392 |   100% {
 393 |     transform: rotate(360deg);
 394 |   }
 395 | }
 396 | 
 397 | /* Error state */
 398 | .error-state {
 399 |   padding: 1rem;
 400 |   background-color: #fef2f2;
 401 |   border: 1px solid #fecaca;
 402 |   border-radius: 8px;
 403 |   margin: 1rem 1.5rem;
 404 | }
 405 | 
 406 | .error-state strong {
 407 |   display: block;
 408 |   margin-bottom: 0.25rem;
 409 |   font-weight: 600;
 410 | }
 411 | 
 412 | .error-state p {
 413 |   margin: 0;
 414 |   font-size: 0.875rem;
 415 |   opacity: 0.8;
 416 | }
 417 | 
 418 | /* OAuth Error Section */
 419 | .error-section {
 420 |   background: #fff;
 421 |   border: 1px solid #fecaca;
 422 | }
 423 | 
 424 | .error-details {
 425 |   padding: 0 1.5rem 1.5rem 1.5rem;
 426 |   display: flex;
 427 |   flex-direction: column;
 428 |   gap: 1rem;
 429 | }
 430 | 
 431 | .error-item {
 432 |   display: flex;
 433 |   flex-direction: column;
 434 |   gap: 0.5rem;
 435 | }
 436 | 
 437 | .error-item strong {
 438 |   font-weight: 500;
 439 |   font-size: 0.875rem;
 440 |   color: #991b1b;
 441 | }
 442 | 
 443 | .error-code {
 444 |   display: inline-block;
 445 |   padding: 0.5rem 0.75rem;
 446 |   background-color: #fef2f2;
 447 |   border: 1px solid #fecaca;
 448 |   border-radius: 6px;
 449 |   font-family: 'SF Mono', Monaco, 'Cascadia Code', 'Roboto Mono', Consolas, 'Courier New', monospace;
 450 |   font-size: 0.875rem;
 451 |   color: #991b1b;
 452 |   font-weight: 500;
 453 | }
 454 | 
 455 | .error-description {
 456 |   margin: 0;
 457 |   padding: 0.75rem;
 458 |   background-color: #fef2f2;
 459 |   border: 1px solid #fecaca;
 460 |   border-radius: 6px;
 461 |   color: #991b1b;
 462 |   font-size: 0.875rem;
 463 |   line-height: 1.5;
 464 | }
 465 | 
 466 | .error-actions {
 467 |   margin-top: 0.5rem;
 468 |   padding: 1rem;
 469 |   background-color: #fffbeb;
 470 |   border: 1px solid #fde68a;
 471 |   border-radius: 6px;
 472 | }
 473 | 
 474 | .error-actions p {
 475 |   margin: 0;
 476 |   color: #92400e;
 477 |   font-size: 0.875rem;
 478 |   line-height: 1.5;
 479 | }
 480 | 
 481 | /* Context details */
 482 | .context-details {
 483 |   padding: 0 1.5rem 1.5rem 1.5rem;
 484 |   display: flex;
 485 |   flex-direction: column;
 486 |   gap: 1rem;
 487 | }
 488 | 
 489 | .context-item {
 490 |   display: flex;
 491 |   flex-direction: column;
 492 |   gap: 0.25rem;
 493 | }
 494 | 
 495 | .context-item strong {
 496 |   font-weight: 500;
 497 |   font-size: 0.875rem;
 498 |   opacity: 0.7;
 499 | }
 500 | 
 501 | .environment-details {
 502 |   display: flex;
 503 |   align-items: center;
 504 |   gap: 0.5rem;
 505 | }
 506 | 
 507 | .env-name {
 508 |   font-weight: 500;
 509 | }
 510 | 
 511 | .env-type {
 512 |   font-size: 0.875rem;
 513 |   opacity: 0.6;
 514 | }
 515 | 
 516 | /* Actions section */
 517 | .actions-section {
 518 |   padding: 1.5rem;
 519 |   text-align: center;
 520 |   background-color: #f9fafb;
 521 | }
 522 | 
 523 | /* Button container */
 524 | .button-container {
 525 |   display: flex;
 526 |   justify-content: center;
 527 |   align-items: center;
 528 | }
 529 | 
 530 | /* Button */
 531 | .primary-button {
 532 |   display: inline-flex;
 533 |   align-items: center;
 534 |   padding: 0.75rem 1.5rem;
 535 |   background-color: #1c1a19;
 536 |   color: #fff;
 537 |   border: 1px solid #1c1a19;
 538 |   border-radius: 8px;
 539 |   font-size: 1rem;
 540 |   font-weight: 500;
 541 |   cursor: pointer;
 542 |   transition:
 543 |     background-color 0.15s ease-in-out,
 544 |     transform 0.15s ease-in-out,
 545 |     opacity 0.15s ease-in-out;
 546 | }
 547 | 
 548 | .primary-button:hover {
 549 |   background-color: #2d2a28;
 550 |   border-color: #2d2a28;
 551 |   transform: translateY(-1px);
 552 | }
 553 | 
 554 | .primary-button:focus {
 555 |   outline: none;
 556 |   box-shadow: 0 0 0 3px rgba(28, 26, 25, 0.2);
 557 |   border-color: #2d2a28;
 558 | }
 559 | 
 560 | .primary-button:active {
 561 |   transform: translateY(0);
 562 |   background-color: #3d3a38;
 563 | }
 564 | 
 565 | .primary-button:disabled {
 566 |   background-color: #d1d5db;
 567 |   border-color: #d1d5db;
 568 |   color: #6b7280;
 569 |   cursor: not-allowed;
 570 |   transform: none;
 571 |   box-shadow: none;
 572 |   opacity: 0.65;
 573 | }
 574 | 
 575 |   .primary-button:disabled:hover,
 576 | .primary-button:disabled:focus,
 577 | .primary-button:disabled:active {
 578 |   background-color: #d1d5db;
 579 |   border-color: #d1d5db;
 580 |   color: #6b7280;
 581 |   transform: none;
 582 |   box-shadow: none;
 583 | }
 584 | 
 585 | /* Completion section */
 586 | .completion-section {
 587 |   padding: 0;
 588 | }
 589 | 
 590 | .completion-card {
 591 |   padding: 2rem 1.5rem;
 592 |   text-align: center;
 593 | }
 594 | 
 595 | .completion-card h2 {
 596 |   margin: 0 0 1rem 0;
 597 |   font-size: 1.75rem;
 598 |   font-weight: 600;
 599 | }
 600 | 
 601 | .completion-card p {
 602 |   margin: 0;
 603 |   font-size: 1rem;
 604 |   line-height: 1.6;
 605 | }
 606 | 
 607 | /* Response section */
 608 | .response-section {
 609 |   padding: 1.5rem;
 610 | }
 611 | 
 612 | .response-text {
 613 |   background-color: #fff;
 614 |   border: 1px solid #ebe9e9;
 615 |   border-radius: 8px;
 616 |   padding: 1rem;
 617 |   font-family: 'SF Mono', Monaco, 'Cascadia Code', 'Roboto Mono', Consolas, 'Courier New', monospace;
 618 |   font-size: 0.875rem;
 619 |   line-height: 1.5;
 620 |   white-space: pre-wrap;
 621 |   word-break: break-word;
 622 |   overflow-x: auto;
 623 | }
 624 | 
 625 | /* Responsive design */
 626 | @media (max-width: 768px) {
 627 |   .logo-container {
 628 |     top: 0.5rem;
 629 |     left: 0.5rem;
 630 |   }
 631 | 
 632 |   .logo {
 633 |     height: 1.5rem;
 634 |   }
 635 | 
 636 |   .app-container {
 637 |     padding: 1rem 0.5rem;
 638 |   }
 639 | 
 640 |   .app-content {
 641 |     max-width: 100%;
 642 |   }
 643 | 
 644 |   .app-header h1 {
 645 |     font-size: 2rem;
 646 |   }
 647 | 
 648 |   .section-header {
 649 |     padding: 1rem 1rem 0 1rem;
 650 |     margin-bottom: 1rem;
 651 |   }
 652 | 
 653 |   .form-content,
 654 |   .context-details,
 655 |   .actions-section,
 656 |   .response-section {
 657 |     padding-left: 1rem;
 658 |     padding-right: 1rem;
 659 |   }
 660 | 
 661 |   .loading-state,
 662 |   .error-state {
 663 |     margin-left: 1rem;
 664 |     margin-right: 1rem;
 665 |   }
 666 | }
 667 | 
 668 | @media (max-width: 480px) {
 669 |   .logo {
 670 |     height: 1.25rem;
 671 |   }
 672 | 
 673 |   .app-container {
 674 |     padding: 0.5rem 0.25rem;
 675 |   }
 676 | 
 677 |   .app-header h1 {
 678 |     font-size: 1.75rem;
 679 |   }
 680 | 
 681 |   .primary-button {
 682 |     width: 100%;
 683 |   }
 684 | }
 685 | 
 686 | /* Light mode styles */
 687 | @media (prefers-color-scheme: light) {
 688 |   body {
 689 |     background-color: #fff;
 690 |     color: #1c1a19;
 691 |   }
 692 | 
 693 |   /* Sections */
 694 |   section {
 695 |     background: #fff;
 696 |     border-color: #ebe9e9;
 697 |   }
 698 | 
 699 |   .section-header {
 700 |     border-bottom-color: #ebe9e9;
 701 |   }
 702 | 
 703 |   .section-header h2,
 704 |   .section-header h3 {
 705 |     color: #1c1a19;
 706 |   }
 707 | 
 708 |   .section-header p {
 709 |     color: #1c1a19;
 710 |   }
 711 | 
 712 |   /* Form elements */
 713 |   .form-label {
 714 |     color: #1c1a19;
 715 |   }
 716 | 
 717 |   .form-select {
 718 |     background-color: #fff;
 719 |     border-color: #ebe9e9;
 720 |     color: #1c1a19;
 721 |     background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%231c1a19' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e");
 722 |   }
 723 | 
 724 |   .form-select:focus {
 725 |     background-color: #fff;
 726 |     border-color: #ebe9e9;
 727 |   }
 728 | 
 729 |   .form-select:hover:not(:focus) {
 730 |     background-color: #fff;
 731 |     border-color: #ebe9e9;
 732 |   }
 733 | 
 734 |   .form-select:disabled {
 735 |     background-color: #f9f9f9;
 736 |     border-color: #ebe9e9;
 737 |   }
 738 | 
 739 |   /* Custom dropdown */
 740 |   .dropdown-trigger {
 741 |     background-color: #fff;
 742 |     border-color: #ebe9e9;
 743 |     color: #1c1a19;
 744 |     background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%231c1a19' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e");
 745 |   }
 746 | 
 747 |   .dropdown-trigger:focus {
 748 |     background-color: #fff;
 749 |     border-color: #ebe9e9;
 750 |   }
 751 | 
 752 |   .dropdown-trigger:hover:not(:focus) {
 753 |     background-color: #fff;
 754 |     border-color: #ebe9e9;
 755 |   }
 756 | 
 757 |   .dropdown-trigger.open {
 758 |     background-color: #fff;
 759 |     border-color: #ebe9e9;
 760 |     background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%231c1a19' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M14 12l-4-4-4 4'/%3e%3c/svg%3e");
 761 |   }
 762 | 
 763 |   .dropdown-options {
 764 |     background: #fff;
 765 |     border-color: #ebe9e9;
 766 |   }
 767 | 
 768 |   .dropdown-option {
 769 |     color: #1c1a19;
 770 |   }
 771 | 
 772 |   .dropdown-option:hover {
 773 |     background-color: #f9f9f9;
 774 |     color: #1c1a19;
 775 |   }
 776 | 
 777 |   .dropdown-option:focus {
 778 |     background-color: #f9f9f9;
 779 |     color: #1c1a19;
 780 |   }
 781 | 
 782 |   .dropdown-option:active {
 783 |     background-color: #f3f3f3;
 784 |   }
 785 | 
 786 |   .dropdown-option.selected {
 787 |     background-color: #f9f9f9;
 788 |     color: #1c1a19;
 789 |   }
 790 | 
 791 |   .dropdown-option.selected:hover {
 792 |     background-color: #f3f3f3;
 793 |   }
 794 | 
 795 |   /* Loading state */
 796 |   .loading-state {
 797 |     background-color: #fff;
 798 |     border: 1px solid #ebe9e9;
 799 |     color: #1c1a19;
 800 |   }
 801 | 
 802 |   .spinner {
 803 |     border-color: #ebe9e9;
 804 |     border-top-color: #1c1a19;
 805 |   }
 806 | 
 807 |   /* Error state */
 808 |   .error-state {
 809 |     background-color: #fef2f2;
 810 |     border-color: #fecaca;
 811 |     color: #991b1b;
 812 |   }
 813 | 
 814 |   .error-state strong {
 815 |     color: #991b1b;
 816 |   }
 817 | 
 818 |   .error-state p {
 819 |     color: #991b1b;
 820 |   }
 821 | 
 822 |   /* Context details */
 823 |   .context-item strong {
 824 |     color: #1c1a19;
 825 |   }
 826 | 
 827 |   .env-name {
 828 |     color: #1c1a19;
 829 |   }
 830 | 
 831 |   .env-type {
 832 |     color: #1c1a19;
 833 |   }
 834 | 
 835 |   /* Actions section */
 836 |   .actions-section {
 837 |     background-color: #fff;
 838 |   }
 839 | 
 840 |   /* Response section */
 841 |   .response-text {
 842 |     background-color: #fff;
 843 |     border-color: #ebe9e9;
 844 |     color: #1c1a19;
 845 |   }
 846 | 
 847 |   /* App header */
 848 |   .app-header h1 {
 849 |     color: #1c1a19;
 850 |   }
 851 | 
 852 |   .app-header p {
 853 |     color: #1c1a19;
 854 |   }
 855 | 
 856 |   /* Button light mode */
 857 |   .primary-button {
 858 |     background-color: #1c1a19;
 859 |     color: #fff;
 860 |     border-color: #1c1a19;
 861 |   }
 862 | 
 863 |   .primary-button:hover {
 864 |     background-color: #2d2a28;
 865 |     border-color: #2d2a28;
 866 |   }
 867 | 
 868 |   .primary-button:focus {
 869 |     box-shadow: 0 0 0 3px rgba(28, 26, 25, 0.2);
 870 |     border-color: #2d2a28;
 871 |   }
 872 | 
 873 |   .primary-button:active {
 874 |     background-color: #3d3a38;
 875 |   }
 876 | 
 877 |   .primary-button:disabled {
 878 |     background-color: #d6d3d1;
 879 |     border-color: #e7e5e4;
 880 |     color: #78716c;
 881 |     opacity: 0.7;
 882 |   }
 883 | 
 884 |   .primary-button:disabled:hover,
 885 |   .primary-button:disabled:focus,
 886 |   .primary-button:disabled:active {
 887 |     background-color: #d6d3d1;
 888 |     border-color: #e7e5e4;
 889 |     color: #78716c;
 890 |   }
 891 | }
 892 | 
 893 | /* Dark mode styles */
 894 | @media (prefers-color-scheme: dark) {
 895 |   /* Logo theme switching */
 896 |   .logo-light {
 897 |     display: none;
 898 |   }
 899 | 
 900 |   .logo-dark {
 901 |     display: block;
 902 |   }
 903 | 
 904 |   /* Sections */
 905 |   section {
 906 |     background: #1c1a19;
 907 |     border-color: #4e4a49;
 908 |   }
 909 | 
 910 |   .section-header {
 911 |     border-bottom-color: #4e4a49;
 912 |   }
 913 | 
 914 |   .section-header h2,
 915 |   .section-header h3 {
 916 |     color: #f6f6f6;
 917 |   }
 918 | 
 919 |   .section-header p {
 920 |     color: #f6f6f6;
 921 |   }
 922 | 
 923 |   /* Form elements */
 924 |   .form-label {
 925 |     color: #f6f6f6;
 926 |   }
 927 | 
 928 |   .form-select {
 929 |     background-color: #1c1a19;
 930 |     border-color: #4e4a49;
 931 |     color: #f6f6f6;
 932 |     background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%23f6f6f6' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e");
 933 |   }
 934 | 
 935 |   .form-select:focus {
 936 |     background-color: #1c1a19;
 937 |     border-color: #4e4a49;
 938 |   }
 939 | 
 940 |   .form-select:hover:not(:focus) {
 941 |     background-color: #1c1a19;
 942 |     border-color: #4e4a49;
 943 |   }
 944 | 
 945 |   .form-select:disabled {
 946 |     background-color: #374151;
 947 |     border-color: #4e4a49;
 948 |   }
 949 | 
 950 |   /* Custom dropdown */
 951 |   .dropdown-trigger {
 952 |     background-color: #1c1a19;
 953 |     border-color: #4e4a49;
 954 |     color: #f6f6f6;
 955 |     background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%23f6f6f6' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e");
 956 |   }
 957 | 
 958 |   .dropdown-trigger:focus {
 959 |     background-color: #1c1a19;
 960 |     border-color: #4e4a49;
 961 |   }
 962 | 
 963 |   .dropdown-trigger:hover:not(:focus) {
 964 |     background-color: #1c1a19;
 965 |     border-color: #4e4a49;
 966 |   }
 967 | 
 968 |   .dropdown-trigger.open {
 969 |     background-color: #1c1a19;
 970 |     border-color: #4e4a49;
 971 |     background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%23f6f6f6' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M14 12l-4-4-4 4'/%3e%3c/svg%3e");
 972 |   }
 973 | 
 974 |   .dropdown-trigger.placeholder {
 975 |     color: #9ca3af;
 976 |   }
 977 | 
 978 |   .dropdown-options {
 979 |     background: #1c1a19;
 980 |     border-color: #4e4a49;
 981 |   }
 982 | 
 983 |   .dropdown-option {
 984 |     color: #f6f6f6;
 985 |   }
 986 | 
 987 |   .dropdown-option:hover {
 988 |     background-color: #374151;
 989 |     color: #f6f6f6;
 990 |   }
 991 | 
 992 |   .dropdown-option:focus {
 993 |     background-color: #374151;
 994 |     color: #f6f6f6;
 995 |   }
 996 | 
 997 |   .dropdown-option:active {
 998 |     background-color: #4b5563;
 999 |   }
1000 | 
1001 |   .dropdown-option.selected {
1002 |     background-color: #4e4a49;
1003 |     color: #f6f6f6;
1004 |   }
1005 | 
1006 |   .dropdown-option.selected:hover {
1007 |     background-color: #6b7280;
1008 |   }
1009 | 
1010 |   /* Dropdown scrollbar styling for dark mode */
1011 |   .dropdown-options::-webkit-scrollbar-track {
1012 |     background: #374151;
1013 |   }
1014 | 
1015 |   .dropdown-options::-webkit-scrollbar-thumb {
1016 |     background: #6b7280;
1017 |     border: 1px solid #374151;
1018 |   }
1019 | 
1020 |   .dropdown-options::-webkit-scrollbar-thumb:hover {
1021 |     background: #9ca3af;
1022 |   }
1023 | 
1024 |   /* Loading state */
1025 |   .loading-state {
1026 |     background-color: #1c1a19;
1027 |     border: 1px solid #4e4a49;
1028 |     color: #f6f6f6;
1029 |   }
1030 | 
1031 |   .spinner {
1032 |     border-color: #4e4a49;
1033 |     border-top-color: #f6f6f6;
1034 |   }
1035 | 
1036 |   /* Error state */
1037 |   .error-state {
1038 |     background-color: #7f1d1d;
1039 |     border-color: #4e4a49;
1040 |     color: #f6f6f6;
1041 |   }
1042 | 
1043 |   .error-state strong {
1044 |     color: #f6f6f6;
1045 |   }
1046 | 
1047 |   .error-state p {
1048 |     color: #f6f6f6;
1049 |   }
1050 | 
1051 |   /* OAuth Error Section Dark Mode */
1052 |   .error-section {
1053 |     background: #1c1a19;
1054 |     border-color: #991b1b;
1055 |   }
1056 | 
1057 |   .error-item strong {
1058 |     color: #fca5a5;
1059 |   }
1060 | 
1061 |   .error-code {
1062 |     background-color: #450a0a;
1063 |     border-color: #7f1d1d;
1064 |     color: #fca5a5;
1065 |   }
1066 | 
1067 |   .error-description {
1068 |     background-color: #450a0a;
1069 |     border-color: #7f1d1d;
1070 |     color: #fca5a5;
1071 |   }
1072 | 
1073 |   .error-actions {
1074 |     background-color: #422006;
1075 |     border-color: #92400e;
1076 |   }
1077 | 
1078 |   .error-actions p {
1079 |     color: #fde68a;
1080 |   }
1081 | 
1082 |   /* Context details */
1083 |   .context-item strong {
1084 |     color: #f6f6f6;
1085 |   }
1086 | 
1087 |   .env-name {
1088 |     color: #f6f6f6;
1089 |   }
1090 | 
1091 |   .env-type {
1092 |     color: #f6f6f6;
1093 |   }
1094 | 
1095 |   /* Actions section */
1096 |   .actions-section {
1097 |     background-color: #374151;
1098 |   }
1099 | 
1100 |   /* Response section */
1101 |   .response-text {
1102 |     background-color: #374151;
1103 |     border-color: #4e4a49;
1104 |     color: #f6f6f6;
1105 |   }
1106 | 
1107 |   /* App header */
1108 |   .app-header h1 {
1109 |     color: #f6f6f6;
1110 |   }
1111 | 
1112 |   .app-header p {
1113 |     color: #f6f6f6;
1114 |   }
1115 | 
1116 |   /* Button dark mode */
1117 |   .primary-button {
1118 |     background-color: #fdfdfd;
1119 |     color: #374151;
1120 |     border-color: #4e4a49;
1121 |   }
1122 | 
1123 |   .primary-button:hover {
1124 |     background-color: #f3f4f6;
1125 |     border-color: #6b7280;
1126 |   }
1127 | 
1128 |   .primary-button:focus {
1129 |     box-shadow: 0 0 0 3px rgba(246, 246, 246, 0.1);
1130 |     border-color: #9ca3af;
1131 |   }
1132 | 
1133 |   .primary-button:active {
1134 |     background-color: #e5e7eb;
1135 |   }
1136 | 
1137 |   .primary-button:disabled {
1138 |     background-color: #2f2f30;
1139 |     border-color: #3f3f40;
1140 |     color: #8b949e;
1141 |     opacity: 0.55;
1142 |   }
1143 | 
1144 |   .primary-button:disabled:hover,
1145 |   .primary-button:disabled:focus,
1146 |   .primary-button:disabled:active {
1147 |     background-color: #2f2f30;
1148 |     border-color: #3f3f40;
1149 |     color: #8b949e;
1150 |   }
1151 | }
1152 | 
```

--------------------------------------------------------------------------------
/tests/unit/discovery/test_exposures_fetcher.py:
--------------------------------------------------------------------------------

```python
  1 | from unittest.mock import patch
  2 | 
  3 | import pytest
  4 | 
  5 | from dbt_mcp.discovery.client import ExposuresFetcher
  6 | 
  7 | 
  8 | @pytest.fixture
  9 | def exposures_fetcher(mock_api_client):
 10 |     return ExposuresFetcher(api_client=mock_api_client)
 11 | 
 12 | 
 13 | async def test_fetch_exposures_single_page(exposures_fetcher, mock_api_client):
 14 |     mock_response = {
 15 |         "data": {
 16 |             "environment": {
 17 |                 "definition": {
 18 |                     "exposures": {
 19 |                         "pageInfo": {"hasNextPage": False, "endCursor": None},
 20 |                         "edges": [
 21 |                             {
 22 |                                 "node": {
 23 |                                     "name": "test_exposure",
 24 |                                     "uniqueId": "exposure.test.test_exposure",
 25 |                                     "exposureType": "application",
 26 |                                     "maturity": "high",
 27 |                                     "ownerEmail": "[email protected]",
 28 |                                     "ownerName": "Test Owner",
 29 |                                     "url": "https://example.com",
 30 |                                     "meta": {},
 31 |                                     "freshnessStatus": "Unknown",
 32 |                                     "description": "Test exposure",
 33 |                                     "label": None,
 34 |                                     "parents": [
 35 |                                         {"uniqueId": "model.test.parent_model"}
 36 |                                     ],
 37 |                                 }
 38 |                             }
 39 |                         ],
 40 |                     }
 41 |                 }
 42 |             }
 43 |         }
 44 |     }
 45 | 
 46 |     mock_api_client.execute_query.return_value = mock_response
 47 | 
 48 |     with patch("dbt_mcp.discovery.client.raise_gql_error"):
 49 |         result = await exposures_fetcher.fetch_exposures()
 50 | 
 51 |     assert len(result) == 1
 52 |     assert result[0]["name"] == "test_exposure"
 53 |     assert result[0]["uniqueId"] == "exposure.test.test_exposure"
 54 |     assert result[0]["exposureType"] == "application"
 55 |     assert result[0]["maturity"] == "high"
 56 |     assert result[0]["ownerEmail"] == "[email protected]"
 57 |     assert result[0]["ownerName"] == "Test Owner"
 58 |     assert result[0]["url"] == "https://example.com"
 59 |     assert result[0]["meta"] == {}
 60 |     assert result[0]["freshnessStatus"] == "Unknown"
 61 |     assert result[0]["description"] == "Test exposure"
 62 |     assert result[0]["parents"] == [{"uniqueId": "model.test.parent_model"}]
 63 | 
 64 |     mock_api_client.execute_query.assert_called_once()
 65 |     args, kwargs = mock_api_client.execute_query.call_args
 66 |     assert args[1]["environmentId"] == 123
 67 |     assert args[1]["first"] == 100
 68 | 
 69 | 
 70 | async def test_fetch_exposures_multiple_pages(exposures_fetcher, mock_api_client):
 71 |     page1_response = {
 72 |         "data": {
 73 |             "environment": {
 74 |                 "definition": {
 75 |                     "exposures": {
 76 |                         "pageInfo": {"hasNextPage": True, "endCursor": "cursor123"},
 77 |                         "edges": [
 78 |                             {
 79 |                                 "node": {
 80 |                                     "name": "exposure1",
 81 |                                     "uniqueId": "exposure.test.exposure1",
 82 |                                     "exposureType": "application",
 83 |                                     "maturity": "high",
 84 |                                     "ownerEmail": "[email protected]",
 85 |                                     "ownerName": "Test Owner 1",
 86 |                                     "url": "https://example1.com",
 87 |                                     "meta": {},
 88 |                                     "freshnessStatus": "Unknown",
 89 |                                     "description": "Test exposure 1",
 90 |                                     "label": None,
 91 |                                     "parents": [],
 92 |                                 }
 93 |                             }
 94 |                         ],
 95 |                     }
 96 |                 }
 97 |             }
 98 |         }
 99 |     }
100 | 
101 |     page2_response = {
102 |         "data": {
103 |             "environment": {
104 |                 "definition": {
105 |                     "exposures": {
106 |                         "pageInfo": {"hasNextPage": False, "endCursor": "cursor456"},
107 |                         "edges": [
108 |                             {
109 |                                 "node": {
110 |                                     "name": "exposure2",
111 |                                     "uniqueId": "exposure.test.exposure2",
112 |                                     "exposureType": "dashboard",
113 |                                     "maturity": "medium",
114 |                                     "ownerEmail": "[email protected]",
115 |                                     "ownerName": "Test Owner 2",
116 |                                     "url": "https://example2.com",
117 |                                     "meta": {"key": "value"},
118 |                                     "freshnessStatus": "Fresh",
119 |                                     "description": "Test exposure 2",
120 |                                     "label": "Label 2",
121 |                                     "parents": [
122 |                                         {"uniqueId": "model.test.parent_model2"}
123 |                                     ],
124 |                                 }
125 |                             }
126 |                         ],
127 |                     }
128 |                 }
129 |             }
130 |         }
131 |     }
132 | 
133 |     mock_api_client.execute_query.side_effect = [page1_response, page2_response]
134 | 
135 |     with patch("dbt_mcp.discovery.client.raise_gql_error"):
136 |         result = await exposures_fetcher.fetch_exposures()
137 | 
138 |     assert len(result) == 2
139 |     assert result[0]["name"] == "exposure1"
140 |     assert result[1]["name"] == "exposure2"
141 |     assert result[1]["meta"] == {"key": "value"}
142 |     assert result[1]["label"] == "Label 2"
143 | 
144 |     assert mock_api_client.execute_query.call_count == 2
145 | 
146 |     # Check first call (no cursor)
147 |     first_call = mock_api_client.execute_query.call_args_list[0]
148 |     assert first_call[0][1]["environmentId"] == 123
149 |     assert first_call[0][1]["first"] == 100
150 |     assert "after" not in first_call[0][1]
151 | 
152 |     # Check second call (with cursor)
153 |     second_call = mock_api_client.execute_query.call_args_list[1]
154 |     assert second_call[0][1]["environmentId"] == 123
155 |     assert second_call[0][1]["first"] == 100
156 |     assert second_call[0][1]["after"] == "cursor123"
157 | 
158 | 
159 | async def test_fetch_exposures_empty_response(exposures_fetcher, mock_api_client):
160 |     mock_response = {
161 |         "data": {
162 |             "environment": {
163 |                 "definition": {
164 |                     "exposures": {
165 |                         "pageInfo": {"hasNextPage": False, "endCursor": None},
166 |                         "edges": [],
167 |                     }
168 |                 }
169 |             }
170 |         }
171 |     }
172 | 
173 |     mock_api_client.execute_query.return_value = mock_response
174 | 
175 |     with patch("dbt_mcp.discovery.client.raise_gql_error"):
176 |         result = await exposures_fetcher.fetch_exposures()
177 | 
178 |     assert len(result) == 0
179 |     assert isinstance(result, list)
180 | 
181 | 
182 | async def test_fetch_exposures_handles_malformed_edges(
183 |     exposures_fetcher, mock_api_client
184 | ):
185 |     mock_response = {
186 |         "data": {
187 |             "environment": {
188 |                 "definition": {
189 |                     "exposures": {
190 |                         "pageInfo": {"hasNextPage": False, "endCursor": None},
191 |                         "edges": [
192 |                             {
193 |                                 "node": {
194 |                                     "name": "valid_exposure",
195 |                                     "uniqueId": "exposure.test.valid_exposure",
196 |                                     "exposureType": "application",
197 |                                     "maturity": "high",
198 |                                     "ownerEmail": "[email protected]",
199 |                                     "ownerName": "Test Owner",
200 |                                     "url": "https://example.com",
201 |                                     "meta": {},
202 |                                     "freshnessStatus": "Unknown",
203 |                                     "description": "Valid exposure",
204 |                                     "label": None,
205 |                                     "parents": [],
206 |                                 }
207 |                             },
208 |                             {"invalid": "edge"},  # Missing "node" key
209 |                             {"node": "not_a_dict"},  # Node is not a dict
210 |                             {
211 |                                 "node": {
212 |                                     "name": "another_valid_exposure",
213 |                                     "uniqueId": "exposure.test.another_valid_exposure",
214 |                                     "exposureType": "dashboard",
215 |                                     "maturity": "low",
216 |                                     "ownerEmail": "[email protected]",
217 |                                     "ownerName": "Test Owner 2",
218 |                                     "url": "https://example2.com",
219 |                                     "meta": {},
220 |                                     "freshnessStatus": "Stale",
221 |                                     "description": "Another valid exposure",
222 |                                     "label": None,
223 |                                     "parents": [],
224 |                                 }
225 |                             },
226 |                         ],
227 |                     }
228 |                 }
229 |             }
230 |         }
231 |     }
232 | 
233 |     mock_api_client.execute_query.return_value = mock_response
234 | 
235 |     with patch("dbt_mcp.discovery.client.raise_gql_error"):
236 |         result = await exposures_fetcher.fetch_exposures()
237 | 
238 |     # Should only get the valid exposures (malformed edges should be filtered out)
239 |     assert len(result) == 2
240 |     assert result[0]["name"] == "valid_exposure"
241 |     assert result[1]["name"] == "another_valid_exposure"
242 | 
243 | 
244 | async def test_fetch_exposure_details_by_unique_ids_single(
245 |     exposures_fetcher, mock_api_client
246 | ):
247 |     mock_response = {
248 |         "data": {
249 |             "environment": {
250 |                 "definition": {
251 |                     "exposures": {
252 |                         "edges": [
253 |                             {
254 |                                 "node": {
255 |                                     "name": "customer_dashboard",
256 |                                     "uniqueId": "exposure.analytics.customer_dashboard",
257 |                                     "exposureType": "dashboard",
258 |                                     "maturity": "high",
259 |                                     "ownerEmail": "[email protected]",
260 |                                     "ownerName": "Analytics Team",
261 |                                     "url": "https://dashboard.example.com/customers",
262 |                                     "meta": {"team": "analytics", "priority": "high"},
263 |                                     "freshnessStatus": "Fresh",
264 |                                     "description": "Customer analytics dashboard",
265 |                                     "label": "Customer Dashboard",
266 |                                     "parents": [
267 |                                         {"uniqueId": "model.analytics.customers"},
268 |                                         {
269 |                                             "uniqueId": "model.analytics.customer_metrics"
270 |                                         },
271 |                                     ],
272 |                                 }
273 |                             }
274 |                         ]
275 |                     }
276 |                 }
277 |             }
278 |         }
279 |     }
280 | 
281 |     mock_api_client.execute_query.return_value = mock_response
282 | 
283 |     with patch("dbt_mcp.discovery.client.raise_gql_error"):
284 |         result = await exposures_fetcher.fetch_exposure_details(
285 |             unique_ids=["exposure.analytics.customer_dashboard"]
286 |         )
287 | 
288 |     assert isinstance(result, list)
289 |     assert len(result) == 1
290 |     exposure = result[0]
291 |     assert exposure["name"] == "customer_dashboard"
292 |     assert exposure["uniqueId"] == "exposure.analytics.customer_dashboard"
293 |     assert exposure["exposureType"] == "dashboard"
294 |     assert exposure["maturity"] == "high"
295 |     assert exposure["ownerEmail"] == "[email protected]"
296 |     assert exposure["ownerName"] == "Analytics Team"
297 |     assert exposure["url"] == "https://dashboard.example.com/customers"
298 |     assert exposure["meta"] == {"team": "analytics", "priority": "high"}
299 |     assert exposure["freshnessStatus"] == "Fresh"
300 |     assert exposure["description"] == "Customer analytics dashboard"
301 |     assert exposure["label"] == "Customer Dashboard"
302 |     assert len(exposure["parents"]) == 2
303 |     assert exposure["parents"][0]["uniqueId"] == "model.analytics.customers"
304 |     assert exposure["parents"][1]["uniqueId"] == "model.analytics.customer_metrics"
305 | 
306 |     mock_api_client.execute_query.assert_called_once()
307 |     args, kwargs = mock_api_client.execute_query.call_args
308 |     assert args[1]["environmentId"] == 123
309 |     assert args[1]["first"] == 1
310 |     assert args[1]["filter"] == {"uniqueIds": ["exposure.analytics.customer_dashboard"]}
311 | 
312 | 
313 | async def test_fetch_exposure_details_by_unique_ids_multiple(
314 |     exposures_fetcher, mock_api_client
315 | ):
316 |     mock_response = {
317 |         "data": {
318 |             "environment": {
319 |                 "definition": {
320 |                     "exposures": {
321 |                         "edges": [
322 |                             {
323 |                                 "node": {
324 |                                     "name": "customer_dashboard",
325 |                                     "uniqueId": "exposure.analytics.customer_dashboard",
326 |                                     "exposureType": "dashboard",
327 |                                     "maturity": "high",
328 |                                     "ownerEmail": "[email protected]",
329 |                                     "ownerName": "Analytics Team",
330 |                                     "url": "https://dashboard.example.com/customers",
331 |                                     "meta": {"team": "analytics", "priority": "high"},
332 |                                     "freshnessStatus": "Fresh",
333 |                                     "description": "Customer analytics dashboard",
334 |                                     "label": "Customer Dashboard",
335 |                                     "parents": [],
336 |                                 }
337 |                             },
338 |                             {
339 |                                 "node": {
340 |                                     "name": "sales_report",
341 |                                     "uniqueId": "exposure.sales.sales_report",
342 |                                     "exposureType": "analysis",
343 |                                     "maturity": "medium",
344 |                                     "ownerEmail": "[email protected]",
345 |                                     "ownerName": "Sales Team",
346 |                                     "url": None,
347 |                                     "meta": {},
348 |                                     "freshnessStatus": "Stale",
349 |                                     "description": "Monthly sales analysis report",
350 |                                     "label": None,
351 |                                     "parents": [{"uniqueId": "model.sales.sales_data"}],
352 |                                 }
353 |                             },
354 |                         ]
355 |                     }
356 |                 }
357 |             }
358 |         }
359 |     }
360 | 
361 |     mock_api_client.execute_query.return_value = mock_response
362 | 
363 |     with patch("dbt_mcp.discovery.client.raise_gql_error"):
364 |         result = await exposures_fetcher.fetch_exposure_details(
365 |             unique_ids=[
366 |                 "exposure.analytics.customer_dashboard",
367 |                 "exposure.sales.sales_report",
368 |             ]
369 |         )
370 | 
371 |     assert isinstance(result, list)
372 |     assert len(result) == 2
373 | 
374 |     # Check first exposure
375 |     exposure1 = result[0]
376 |     assert exposure1["name"] == "customer_dashboard"
377 |     assert exposure1["uniqueId"] == "exposure.analytics.customer_dashboard"
378 |     assert exposure1["exposureType"] == "dashboard"
379 | 
380 |     # Check second exposure
381 |     exposure2 = result[1]
382 |     assert exposure2["name"] == "sales_report"
383 |     assert exposure2["uniqueId"] == "exposure.sales.sales_report"
384 |     assert exposure2["exposureType"] == "analysis"
385 | 
386 |     mock_api_client.execute_query.assert_called_once()
387 |     args, kwargs = mock_api_client.execute_query.call_args
388 |     assert args[1]["environmentId"] == 123
389 |     assert args[1]["first"] == 2
390 |     assert args[1]["filter"] == {
391 |         "uniqueIds": [
392 |             "exposure.analytics.customer_dashboard",
393 |             "exposure.sales.sales_report",
394 |         ]
395 |     }
396 | 
397 | 
398 | async def test_fetch_exposure_details_by_name(exposures_fetcher, mock_api_client):
399 |     # Mock the response for fetch_exposures (which gets called when filtering by name)
400 |     mock_exposures_response = {
401 |         "data": {
402 |             "environment": {
403 |                 "definition": {
404 |                     "exposures": {
405 |                         "pageInfo": {"hasNextPage": False, "endCursor": None},
406 |                         "edges": [
407 |                             {
408 |                                 "node": {
409 |                                     "name": "sales_report",
410 |                                     "uniqueId": "exposure.sales.sales_report",
411 |                                     "exposureType": "analysis",
412 |                                     "maturity": "medium",
413 |                                     "ownerEmail": "[email protected]",
414 |                                     "ownerName": "Sales Team",
415 |                                     "url": None,
416 |                                     "meta": {},
417 |                                     "freshnessStatus": "Stale",
418 |                                     "description": "Monthly sales analysis report",
419 |                                     "label": None,
420 |                                     "parents": [{"uniqueId": "model.sales.sales_data"}],
421 |                                 }
422 |                             },
423 |                             {
424 |                                 "node": {
425 |                                     "name": "other_exposure",
426 |                                     "uniqueId": "exposure.other.other_exposure",
427 |                                     "exposureType": "dashboard",
428 |                                     "maturity": "high",
429 |                                     "ownerEmail": "[email protected]",
430 |                                     "ownerName": "Other Team",
431 |                                     "url": None,
432 |                                     "meta": {},
433 |                                     "freshnessStatus": "Fresh",
434 |                                     "description": "Other exposure",
435 |                                     "label": None,
436 |                                     "parents": [],
437 |                                 }
438 |                             },
439 |                         ],
440 |                     }
441 |                 }
442 |             }
443 |         }
444 |     }
445 | 
446 |     mock_api_client.execute_query.return_value = mock_exposures_response
447 | 
448 |     with patch("dbt_mcp.discovery.client.raise_gql_error"):
449 |         result = await exposures_fetcher.fetch_exposure_details(
450 |             exposure_name="sales_report"
451 |         )
452 | 
453 |     assert isinstance(result, list)
454 |     assert len(result) == 1
455 |     exposure = result[0]
456 |     assert exposure["name"] == "sales_report"
457 |     assert exposure["uniqueId"] == "exposure.sales.sales_report"
458 |     assert exposure["exposureType"] == "analysis"
459 |     assert exposure["maturity"] == "medium"
460 |     assert exposure["url"] is None
461 |     assert exposure["meta"] == {}
462 |     assert exposure["freshnessStatus"] == "Stale"
463 |     assert exposure["label"] is None
464 | 
465 |     # Should have called the GET_EXPOSURES query (not GET_EXPOSURE_DETAILS)
466 |     mock_api_client.execute_query.assert_called_once()
467 |     args, kwargs = mock_api_client.execute_query.call_args
468 |     assert args[1]["environmentId"] == 123
469 |     assert args[1]["first"] == 100  # PAGE_SIZE for fetch_exposures
470 | 
471 | 
472 | async def test_fetch_exposure_details_not_found(exposures_fetcher, mock_api_client):
473 |     mock_response = {
474 |         "data": {"environment": {"definition": {"exposures": {"edges": []}}}}
475 |     }
476 | 
477 |     mock_api_client.execute_query.return_value = mock_response
478 | 
479 |     with patch("dbt_mcp.discovery.client.raise_gql_error"):
480 |         result = await exposures_fetcher.fetch_exposure_details(
481 |             unique_ids=["exposure.nonexistent.exposure"]
482 |         )
483 | 
484 |     assert result == []
485 | 
486 | 
487 | async def test_get_exposure_filters_unique_ids(exposures_fetcher):
488 |     filters = exposures_fetcher._get_exposure_filters(
489 |         unique_ids=["exposure.test.test_exposure"]
490 |     )
491 |     assert filters == {"uniqueIds": ["exposure.test.test_exposure"]}
492 | 
493 | 
494 | async def test_get_exposure_filters_multiple_unique_ids(exposures_fetcher):
495 |     filters = exposures_fetcher._get_exposure_filters(
496 |         unique_ids=["exposure.test.test1", "exposure.test.test2"]
497 |     )
498 |     assert filters == {"uniqueIds": ["exposure.test.test1", "exposure.test.test2"]}
499 | 
500 | 
501 | async def test_get_exposure_filters_name_raises_error(exposures_fetcher):
502 |     from dbt_mcp.errors import InvalidParameterError
503 | 
504 |     with pytest.raises(
505 |         InvalidParameterError, match="ExposureFilter only supports uniqueIds"
506 |     ):
507 |         exposures_fetcher._get_exposure_filters(exposure_name="test_exposure")
508 | 
509 | 
510 | async def test_get_exposure_filters_no_params(exposures_fetcher):
511 |     from dbt_mcp.errors import InvalidParameterError
512 | 
513 |     with pytest.raises(
514 |         InvalidParameterError,
515 |         match="unique_ids must be provided for exposure filtering",
516 |     ):
517 |         exposures_fetcher._get_exposure_filters()
518 | 
519 | 
520 | async def test_fetch_exposure_details_by_name_not_found(
521 |     exposures_fetcher, mock_api_client
522 | ):
523 |     # Mock empty response for fetch_exposures
524 |     mock_response = {
525 |         "data": {
526 |             "environment": {
527 |                 "definition": {
528 |                     "exposures": {
529 |                         "pageInfo": {"hasNextPage": False, "endCursor": None},
530 |                         "edges": [],
531 |                     }
532 |                 }
533 |             }
534 |         }
535 |     }
536 | 
537 |     mock_api_client.execute_query.return_value = mock_response
538 | 
539 |     with patch("dbt_mcp.discovery.client.raise_gql_error"):
540 |         result = await exposures_fetcher.fetch_exposure_details(
541 |             exposure_name="nonexistent_exposure"
542 |         )
543 | 
544 |     assert result == []
545 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/lsp/lsp_connection.py:
--------------------------------------------------------------------------------

```python
  1 | """LSP Connection Manager for dbt Fusion LSP.
  2 | 
  3 | This module manages the lifecycle of LSP processes and handles JSON-RPC
  4 | communication according to the Language Server Protocol specification.
  5 | """
  6 | 
  7 | import asyncio
  8 | import itertools
  9 | import json
 10 | import logging
 11 | import socket
 12 | import subprocess
 13 | from collections.abc import Iterator, Sequence
 14 | from dataclasses import dataclass, field
 15 | from pathlib import Path
 16 | from typing import Any
 17 | import uuid
 18 | from dataclasses import asdict
 19 | 
 20 | from dbt_mcp.lsp.providers.lsp_connection_provider import (
 21 |     LSPConnectionProviderProtocol,
 22 |     LspEventName,
 23 | )
 24 | 
 25 | logger = logging.getLogger(__name__)
 26 | 
 27 | 
 28 | def event_name_from_string(string: str) -> LspEventName | None:
 29 |     """Create an LSP event name from a string."""
 30 |     try:
 31 |         return LspEventName(string)
 32 |     except ValueError:
 33 |         return None
 34 | 
 35 | 
 36 | @dataclass
 37 | class JsonRpcMessage:
 38 |     """Represents a JSON-RPC 2.0 message."""
 39 | 
 40 |     jsonrpc: str = "2.0"
 41 |     id: int | str | None = None
 42 |     method: str | None = None
 43 |     params: dict[str, Any] | list[Any] | None = None
 44 |     result: Any = None
 45 |     error: dict[str, Any] | None = None
 46 | 
 47 |     def to_dict(self, none_values: bool = False) -> dict[str, Any]:
 48 |         """Convert the message to a dictionary."""
 49 | 
 50 |         def dict_factory(x: list[tuple[str, Any]]) -> dict[str, Any]:
 51 |             return dict(x) if none_values else {k: v for k, v in x if v is not None}
 52 | 
 53 |         return asdict(self, dict_factory=dict_factory)
 54 | 
 55 | 
 56 | @dataclass
 57 | class LspConnectionState:
 58 |     """Tracks the state of an LSP connection."""
 59 | 
 60 |     initialized: bool = False
 61 |     shutting_down: bool = False
 62 |     capabilities: dict[str, Any] = field(default_factory=dict)
 63 |     pending_requests: dict[int | str, asyncio.Future] = field(default_factory=dict)
 64 |     pending_notifications: dict[LspEventName, list[asyncio.Future]] = field(
 65 |         default_factory=dict
 66 |     )
 67 |     compiled: bool = False
 68 |     # start at 20 to avoid collisions between ids of requests we are waiting for and the lsp server requests from us
 69 |     request_id_counter: Iterator[int] = field(
 70 |         default_factory=lambda: itertools.count(20)
 71 |     )
 72 | 
 73 |     def get_next_request_id(self) -> int:
 74 |         return next(self.request_id_counter)
 75 | 
 76 | 
 77 | class SocketLSPConnection(LSPConnectionProviderProtocol):
 78 |     """LSP process lifecycle and communication via socket.
 79 | 
 80 |     This class handles:
 81 |     - Starting and stopping LSP server processes
 82 |     - Socket-based JSON-RPC communication
 83 |     - Request/response correlation
 84 |     - Error handling and cleanup
 85 |     """
 86 | 
 87 |     def __init__(
 88 |         self,
 89 |         binary_path: str,
 90 |         cwd: str,
 91 |         args: Sequence[str] | None = None,
 92 |         connection_timeout: float = 10,
 93 |         default_request_timeout: float = 60,
 94 |     ):
 95 |         """Initialize the LSP connection manager.
 96 | 
 97 |         Args:
 98 |             binary_path: Path to the LSP server binary
 99 |             cwd: Working directory for the LSP process
100 |             args: Optional command-line arguments for the LSP server
101 |             connection_timeout: Timeout in seconds for establishing the initial socket
102 |                               connection (default: 10). Used during server startup.
103 |             default_request_timeout: Default timeout in seconds for LSP request operations
104 |                                    (default: 60). Used when no timeout is specified for
105 |                                    individual requests.
106 |         """
107 |         self.binary_path = Path(binary_path)
108 |         self.args = list(args) if args else []
109 |         self.cwd = cwd
110 |         self.host = "127.0.0.1"
111 |         self.port = 0
112 | 
113 |         self.process: asyncio.subprocess.Process | None = None
114 |         self.state = LspConnectionState()
115 | 
116 |         # Socket components
117 |         self._socket: socket.socket | None = None
118 |         self._connection: socket.socket | None = None
119 | 
120 |         # Asyncio components for I/O
121 |         self._reader_task: asyncio.Task | None = None
122 |         self._writer_task: asyncio.Task | None = None
123 |         self._stdout_reader_task: asyncio.Task | None = None
124 |         self._stderr_reader_task: asyncio.Task | None = None
125 |         self._stop_event = asyncio.Event()
126 |         self._outgoing_queue: asyncio.Queue[bytes] = asyncio.Queue()
127 | 
128 |         # Timeouts
129 |         self.connection_timeout = connection_timeout
130 |         self.default_request_timeout = default_request_timeout
131 | 
132 |         logger.debug(f"LSP Connection initialized with binary: {self.binary_path}")
133 | 
134 |     def compiled(self) -> bool:
135 |         return self.state.compiled
136 | 
137 |     def initialized(self) -> bool:
138 |         return self.state.initialized
139 | 
140 |     async def start(self) -> None:
141 |         """Start the LSP server process and socket communication tasks."""
142 |         if self.process is not None:
143 |             logger.warning("LSP process is already running")
144 |             return
145 | 
146 |         try:
147 |             self.setup_socket()
148 | 
149 |             await self.launch_lsp_process()
150 | 
151 |             # Wait for connection with timeout (run socket.accept in executor)
152 |             if self._socket:
153 |                 self._socket.settimeout(self.connection_timeout)
154 |                 try:
155 |                     (
156 |                         self._connection,
157 |                         client_addr,
158 |                     ) = await asyncio.get_running_loop().run_in_executor(
159 |                         None, self._socket.accept
160 |                     )
161 |                     if self._connection:
162 |                         self._connection.settimeout(
163 |                             None
164 |                         )  # Set to blocking for read/write
165 |                     logger.debug(f"LSP server connected from {client_addr}")
166 |                 except TimeoutError:
167 |                     raise RuntimeError("Timeout waiting for LSP server to connect")
168 | 
169 |             # Start I/O tasks
170 |             self._stop_event.clear()
171 |             self._reader_task = asyncio.get_running_loop().create_task(
172 |                 self._read_loop()
173 |             )
174 |             self._writer_task = asyncio.get_running_loop().create_task(
175 |                 self._write_loop()
176 |             )
177 | 
178 |         except Exception as e:
179 |             logger.error(f"Failed to start LSP server: {e}")
180 |             await self.stop()
181 |             raise
182 | 
183 |     def setup_socket(self) -> None:
184 |         """Set up the socket for LSP server communication.
185 | 
186 |         Creates a TCP socket, binds it to the configured host and port,
187 |         and starts listening for incoming connections. If port is 0,
188 |         the OS will auto-assign an available port.
189 |         """
190 |         # Create socket and bind
191 |         self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
192 |         self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
193 |         self._socket.bind((self.host, self.port))
194 |         self._socket.listen(1)
195 | 
196 |         # Get the actual port if auto-assigned
197 |         _, actual_port = self._socket.getsockname()
198 |         self.port = actual_port
199 |         logger.debug(f"Socket listening on {self.host}:{self.port}")
200 | 
201 |     async def launch_lsp_process(self) -> None:
202 |         """Launch the LSP server process.
203 | 
204 |         Starts the LSP server as a subprocess with socket communication enabled.
205 |         The process is started with stdout and stderr capture for monitoring.
206 |         The server will connect back to the socket set up by setup_socket().
207 |         """
208 |         # Prepare command with socket info
209 |         cmd = [
210 |             str(self.binary_path),
211 |             "--socket",
212 |             f"{self.port}",
213 |             "--project-dir",
214 |             self.cwd,
215 |             *self.args,
216 |         ]
217 | 
218 |         logger.debug(f"Starting LSP server: {' '.join(cmd)}")
219 |         self.process = await asyncio.create_subprocess_exec(*cmd)
220 | 
221 |         logger.info(f"LSP server started with PID: {self.process.pid}")
222 | 
223 |     async def stop(self) -> None:
224 |         """Stop the LSP server process and cleanup resources."""
225 |         logger.info("Stopping LSP server...")
226 | 
227 |         # Signal tasks to stop
228 |         self._stop_event.set()
229 | 
230 |         # Cancel I/O tasks
231 |         if self._reader_task and not self._reader_task.done():
232 |             self._reader_task.cancel()
233 |             try:
234 |                 await self._reader_task
235 |             except asyncio.CancelledError:
236 |                 pass
237 | 
238 |         if self._writer_task and not self._writer_task.done():
239 |             self._writer_task.cancel()
240 |             try:
241 |                 await self._writer_task
242 |             except asyncio.CancelledError:
243 |                 pass
244 | 
245 |         # Cancel stdout/stderr reader tasks
246 |         if self._stdout_reader_task and not self._stdout_reader_task.done():
247 |             self._stdout_reader_task.cancel()
248 |             try:
249 |                 await self._stdout_reader_task
250 |             except asyncio.CancelledError:
251 |                 pass
252 | 
253 |         if self._stderr_reader_task and not self._stderr_reader_task.done():
254 |             self._stderr_reader_task.cancel()
255 |             try:
256 |                 await self._stderr_reader_task
257 |             except asyncio.CancelledError:
258 |                 pass
259 | 
260 |         # Send shutdown request if initialized
261 |         if self.process and not self.state.shutting_down:
262 |             self.state.shutting_down = True
263 |             try:
264 |                 self._send_shutdown_request()
265 |                 await asyncio.sleep(0.5)  # Give server time to process shutdown
266 |             except Exception as e:
267 |                 logger.warning(f"Error sending shutdown request: {e}")
268 | 
269 |         # Close socket connection
270 |         if self._connection:
271 |             try:
272 |                 self._connection.close()
273 |             except Exception as e:
274 |                 logger.error(f"Error closing socket connection: {e}")
275 |             finally:
276 |                 self._connection = None
277 | 
278 |         # Close listening socket
279 |         if self._socket:
280 |             try:
281 |                 self._socket.close()
282 |             except Exception as e:
283 |                 logger.error(f"Error closing socket: {e}")
284 |             finally:
285 |                 self._socket = None
286 | 
287 |         # Terminate the process
288 |         if self.process:
289 |             try:
290 |                 self.process.terminate()
291 |                 try:
292 |                     await self.process.wait()
293 |                 except subprocess.TimeoutExpired:
294 |                     logger.warning("LSP process didn't terminate, killing...")
295 |                     self.process.kill()
296 |                     await self.process.wait()
297 |             except Exception as e:
298 |                 logger.error(f"Error terminating LSP process: {e}")
299 |             finally:
300 |                 self.process = None
301 | 
302 |         # Clear state
303 |         self.state = LspConnectionState()
304 | 
305 |         logger.info("LSP server stopped")
306 | 
307 |     async def initialize(self, timeout: float | None = None) -> None:
308 |         """Initialize the LSP connection.
309 | 
310 |         Sends the initialize request to the LSP server and waits for the response.
311 |         The server capabilities are stored in the connection state.
312 | 
313 |         Args:
314 |             root_uri: The root URI of the workspace (optional)
315 |             timeout: Timeout in seconds for the initialize request (default: 10)
316 |         """
317 |         if self.state.initialized:
318 |             raise RuntimeError("LSP server is already initialized")
319 | 
320 |         params = {
321 |             "processId": None,
322 |             "rootUri": None,
323 |             "clientInfo": {
324 |                 "name": "dbt-mcp",
325 |                 "version": "1.0.0",
326 |             },
327 |             "capabilities": {},
328 |             "initializationOptions": {
329 |                 "project-dir": "file:///",
330 |                 "command-prefix": str(uuid.uuid4()),
331 |             },
332 |         }
333 | 
334 |         # Send initialize request
335 |         result = await self.send_request(
336 |             "initialize", params, timeout=timeout or self.default_request_timeout
337 |         )
338 | 
339 |         # Store capabilities
340 |         self.state.capabilities = result.get("capabilities", {})
341 |         self.state.initialized = True
342 | 
343 |         # Send initialized notification
344 |         self.send_notification("initialized", {})
345 | 
346 |         logger.info("LSP server initialized successfully")
347 | 
348 |     async def _read_loop(self) -> None:
349 |         """Background task that reads messages from the LSP server via socket."""
350 |         if not self._connection:
351 |             logger.warning("LSP server socket is not available")
352 |             return
353 | 
354 |         buffer = b""
355 | 
356 |         while not self._stop_event.is_set():
357 |             try:
358 |                 # Read data from socket (run in executor to avoid blocking)
359 |                 self._connection.settimeout(0.1)  # Short timeout to check stop event
360 |                 try:
361 |                     chunk = await asyncio.get_running_loop().run_in_executor(
362 |                         None, self._connection.recv, 4096
363 |                     )
364 |                 except TimeoutError:
365 |                     continue
366 | 
367 |                 if not chunk:
368 |                     logger.warning("LSP server socket closed")
369 |                     break
370 | 
371 |                 buffer += chunk
372 | 
373 |                 # Try to parse messages from buffer
374 |                 while True:
375 |                     message, remaining = self._parse_message(buffer)
376 |                     if message is None:
377 |                         break
378 | 
379 |                     buffer = remaining
380 | 
381 |                     # Process the message
382 |                     self._handle_incoming_message(message)
383 | 
384 |             except asyncio.CancelledError:
385 |                 # Task was cancelled, exit cleanly
386 |                 break
387 |             except Exception as e:
388 |                 if not self._stop_event.is_set():
389 |                     logger.error(f"Error in reader task: {e}")
390 |                 break
391 | 
392 |     async def _write_loop(self) -> None:
393 |         """Background task that writes messages to the LSP server via socket."""
394 |         if not self._connection:
395 |             return
396 | 
397 |         while not self._stop_event.is_set():
398 |             try:
399 |                 # Get message from queue (with timeout to check stop event)
400 |                 try:
401 |                     data = await asyncio.wait_for(
402 |                         self._outgoing_queue.get(), timeout=0.1
403 |                     )
404 |                 except TimeoutError:
405 |                     continue
406 | 
407 |                 # Write to socket (run in executor to avoid blocking)
408 |                 await asyncio.get_running_loop().run_in_executor(
409 |                     None, self._connection.sendall, data
410 |                 )
411 | 
412 |             except asyncio.CancelledError:
413 |                 # Task was cancelled, exit cleanly
414 |                 break
415 |             except Exception as e:
416 |                 if not self._stop_event.is_set():
417 |                     logger.error(f"Error in writer task: {e}")
418 |                 break
419 | 
420 |     def _parse_message(self, buffer: bytes) -> tuple[JsonRpcMessage | None, bytes]:
421 |         """Parse a JSON-RPC message from the buffer.
422 | 
423 |         LSP uses HTTP-like headers followed by JSON content:
424 |         Content-Length: <length>\\r\\n
425 |         \\r\\n
426 |         <json-content>
427 |         """
428 |         # Look for Content-Length header
429 |         header_end = buffer.find(b"\r\n\r\n")
430 |         if header_end == -1:
431 |             return None, buffer
432 | 
433 |         # Parse headers
434 |         headers = buffer[:header_end].decode("utf-8")
435 |         content_length = None
436 | 
437 |         for line in headers.split("\r\n"):
438 |             if line.startswith("Content-Length:"):
439 |                 try:
440 |                     content_length = int(line.split(":")[1].strip())
441 |                 except (IndexError, ValueError):
442 |                     logger.error(f"Invalid Content-Length header: {line}")
443 |                     return None, buffer[header_end + 4 :]
444 | 
445 |         if content_length is None:
446 |             logger.error("Missing Content-Length header")
447 |             return None, buffer[header_end + 4 :]
448 | 
449 |         # Check if we have the full message
450 |         content_start = header_end + 4
451 |         content_end = content_start + content_length
452 | 
453 |         if len(buffer) < content_end:
454 |             return None, buffer
455 | 
456 |         # Parse JSON content
457 |         try:
458 |             content = buffer[content_start:content_end].decode("utf-8")
459 |             data = json.loads(content)
460 |             message = JsonRpcMessage(**data)
461 | 
462 |             return message, buffer[content_end:]
463 | 
464 |         except (json.JSONDecodeError, UnicodeDecodeError) as e:
465 |             logger.error(f"Failed to parse message: {e}")
466 |             return None, buffer[content_end:]
467 | 
468 |     def _handle_incoming_message(self, message: JsonRpcMessage) -> None:
469 |         """Handle an incoming message from the LSP server."""
470 | 
471 |         # Handle responses to requests
472 |         if message.id is not None:
473 |             # Thread-safe: pop with default avoids race condition between check and pop
474 |             future = self.state.pending_requests.pop(message.id, None)
475 |             if future is not None:
476 |                 logger.debug(f"Received response for request {message.to_dict()}")
477 | 
478 |                 # Use call_soon_threadsafe to safely resolve futures across event loop contexts
479 |                 # This prevents "Task got Future attached to a different loop" errors when
480 |                 # the future was created in one loop but is being resolved from another loop
481 |                 # Get the loop from the future itself to ensure we schedule on the correct loop
482 |                 future_loop = future.get_loop()
483 |                 if message.error:
484 |                     future_loop.call_soon_threadsafe(
485 |                         future.set_exception,
486 |                         RuntimeError(f"LSP error: {message.error}"),
487 |                     )
488 |                 else:
489 |                     future_loop.call_soon_threadsafe(future.set_result, message.result)
490 |                 return
491 |             else:
492 |                 # it's an unknown request, we respond with an empty result
493 |                 logger.debug(f"LSP request {message.to_dict()}")
494 |                 self._send_message(
495 |                     JsonRpcMessage(id=message.id, result=None), none_values=True
496 |                 )
497 | 
498 |         if message.method is None:
499 |             return
500 | 
501 |         # it's a known event type we want to explicitly handle
502 |         if lsp_event_name := event_name_from_string(message.method):
503 |             # Check if this is an event we're waiting for
504 |             # Thread-safe: pop with default avoids race condition
505 |             futures = self.state.pending_notifications.pop(lsp_event_name, None)
506 |             if futures is not None:
507 |                 logger.debug(f"Received event {lsp_event_name} - {message.to_dict()}")
508 |                 # Use call_soon_threadsafe for notification futures as well
509 |                 for future in futures:
510 |                     future_loop = future.get_loop()
511 |                     future_loop.call_soon_threadsafe(future.set_result, message.params)
512 | 
513 |             match lsp_event_name:
514 |                 case LspEventName.compileComplete:
515 |                     logger.info("Recorded compile complete event")
516 |                     self.state.compiled = True
517 |                 case _:
518 |                     logger.debug(f"LSP event {message.method}")
519 |                     pass
520 | 
521 |         else:
522 |             # it's an unknown notification, log it and move on
523 |             logger.debug(f"LSP event {message.method}")
524 | 
525 |     async def send_request(
526 |         self,
527 |         method: str,
528 |         params: dict[str, Any] | list[Any] | None = None,
529 |         timeout: float | None = None,
530 |     ) -> dict[str, Any]:
531 |         """Send a request to the LSP server.
532 | 
533 |         Args:
534 |             method: The JSON-RPC method name
535 |             params: Optional parameters for the method
536 |             timeout: Timeout in seconds for this request. If not specified, uses
537 |                     default_request_timeout from the connection configuration.
538 | 
539 |         Returns:
540 |             A dictionary containing the response result or error information
541 |         """
542 |         if not self.process:
543 |             raise RuntimeError("LSP server is not running")
544 | 
545 |         # Create request message
546 |         request_id = self.state.get_next_request_id()
547 |         message = JsonRpcMessage(
548 |             id=request_id,
549 |             method=method,
550 |             params=params,
551 |         )
552 | 
553 |         # Create future for response using the current running loop
554 |         # This prevents "Task got Future attached to a different loop" errors
555 |         # when send_request is called from a different loop context than where
556 |         # the connection was initialized
557 |         future = asyncio.get_running_loop().create_future()
558 |         self.state.pending_requests[request_id] = future
559 | 
560 |         # Send the message
561 |         self._send_message(message)
562 | 
563 |         try:
564 |             return await asyncio.wait_for(
565 |                 future, timeout=timeout or self.default_request_timeout
566 |             )
567 |         except Exception as e:
568 |             return {"error": str(e)}
569 | 
570 |     def send_notification(
571 |         self,
572 |         method: str,
573 |         params: dict[str, Any] | list[Any] | None = None,
574 |     ) -> None:
575 |         """Send a notification to the LSP server.
576 | 
577 |         Args:
578 |             method: The JSON-RPC method name
579 |             params: Optional parameters for the method
580 |         """
581 |         if not self.process:
582 |             raise RuntimeError("LSP server is not running")
583 | 
584 |         # Create notification message (no ID)
585 |         message = JsonRpcMessage(
586 |             method=method,
587 |             params=params,
588 |         )
589 | 
590 |         # Send the message
591 |         self._send_message(message)
592 | 
593 |     def wait_for_notification(
594 |         self, event_name: LspEventName
595 |     ) -> asyncio.Future[dict[str, Any]]:
596 |         """Wait for a notification from the LSP server.
597 | 
598 |         Args:
599 |             event_name: The LSP event name to wait for
600 | 
601 |         Returns:
602 |             A Future that will be resolved with the notification params when received
603 |         """
604 |         future = asyncio.get_running_loop().create_future()
605 |         self.state.pending_notifications.setdefault(event_name, []).append(future)
606 | 
607 |         return future
608 | 
609 |     def _send_message(self, message: JsonRpcMessage, none_values: bool = False) -> None:
610 |         """Send a message to the LSP server."""
611 |         # Serialize message
612 |         content = json.dumps(message.to_dict(none_values=none_values))
613 |         content_bytes = content.encode("utf-8")
614 | 
615 |         # Create LSP message with headers
616 |         header = f"Content-Length: {len(content_bytes)}\r\n\r\n"
617 |         header_bytes = header.encode("utf-8")
618 | 
619 |         data = header_bytes + content_bytes
620 | 
621 |         logger.debug(f"Sending message: {content}")
622 | 
623 |         # Queue for sending (put_nowait is safe from sync context)
624 |         self._outgoing_queue.put_nowait(data)
625 | 
626 |     def _send_shutdown_request(self) -> None:
627 |         """Send shutdown request to the LSP server."""
628 |         try:
629 |             # Send shutdown request
630 |             message = JsonRpcMessage(
631 |                 id=self.state.get_next_request_id(),
632 |                 method="shutdown",
633 |             )
634 |             self._send_message(message)
635 | 
636 |             # Send exit notification
637 |             exit_message = JsonRpcMessage(
638 |                 method="exit",
639 |             )
640 |             self._send_message(exit_message)
641 | 
642 |         except Exception as e:
643 |             logger.error(f"Error sending shutdown: {e}")
644 | 
645 |     def is_running(self) -> bool:
646 |         """Check if the LSP server is running."""
647 |         return self.process is not None and self.process.returncode is None
648 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/discovery/client.py:
--------------------------------------------------------------------------------

```python
  1 | import textwrap
  2 | from typing import Literal, TypedDict
  3 | 
  4 | import requests
  5 | 
  6 | from dbt_mcp.config.config_providers import ConfigProvider, DiscoveryConfig
  7 | from dbt_mcp.errors import GraphQLError, InvalidParameterError
  8 | from dbt_mcp.gql.errors import raise_gql_error
  9 | 
 10 | PAGE_SIZE = 100
 11 | MAX_NODE_QUERY_LIMIT = 1000
 12 | 
 13 | 
 14 | class GraphQLQueries:
 15 |     GET_MODELS = textwrap.dedent("""
 16 |         query GetModels(
 17 |             $environmentId: BigInt!,
 18 |             $modelsFilter: ModelAppliedFilter,
 19 |             $after: String,
 20 |             $first: Int,
 21 |             $sort: AppliedModelSort
 22 |         ) {
 23 |             environment(id: $environmentId) {
 24 |                 applied {
 25 |                     models(filter: $modelsFilter, after: $after, first: $first, sort: $sort) {
 26 |                         pageInfo {
 27 |                             endCursor
 28 |                         }
 29 |                         edges {
 30 |                             node {
 31 |                                 name
 32 |                                 uniqueId
 33 |                                 description
 34 |                             }
 35 |                         }
 36 |                     }
 37 |                 }
 38 |             }
 39 |         }
 40 |     """)
 41 | 
 42 |     GET_MODEL_HEALTH = textwrap.dedent("""
 43 |         query GetModelDetails(
 44 |             $environmentId: BigInt!,
 45 |             $modelsFilter: ModelAppliedFilter
 46 |             $first: Int,
 47 |         ) {
 48 |             environment(id: $environmentId) {
 49 |                 applied {
 50 |                     models(filter: $modelsFilter, first: $first) {
 51 |                         edges {
 52 |                             node {
 53 |                                 name
 54 |                                 uniqueId
 55 |                                 executionInfo {
 56 |                                     lastRunGeneratedAt
 57 |                                     lastRunStatus
 58 |                                     executeCompletedAt
 59 |                                     executeStartedAt
 60 |                                 }
 61 |                                 tests {
 62 |                                     name
 63 |                                     description
 64 |                                     columnName
 65 |                                     testType
 66 |                                     executionInfo {
 67 |                                         lastRunGeneratedAt
 68 |                                         lastRunStatus
 69 |                                         executeCompletedAt
 70 |                                         executeStartedAt
 71 |                                     }
 72 |                                 }
 73 |                                 ancestors(types: [Model, Source, Seed, Snapshot]) {
 74 |                                   ... on ModelAppliedStateNestedNode {
 75 |                                     name
 76 |                                     uniqueId
 77 |                                     resourceType
 78 |                                     materializedType
 79 |                                     modelexecutionInfo: executionInfo {
 80 |                                       lastRunStatus
 81 |                                       executeCompletedAt
 82 |                                       }
 83 |                                   }
 84 |                                   ... on SnapshotAppliedStateNestedNode {
 85 |                                     name
 86 |                                     uniqueId
 87 |                                     resourceType
 88 |                                     snapshotExecutionInfo: executionInfo {
 89 |                                       lastRunStatus
 90 |                                       executeCompletedAt
 91 |                                     }
 92 |                                   }
 93 |                                   ... on SeedAppliedStateNestedNode {
 94 |                                     name
 95 |                                     uniqueId
 96 |                                     resourceType
 97 |                                     seedExecutionInfo: executionInfo {
 98 |                                       lastRunStatus
 99 |                                       executeCompletedAt
100 |                                     }
101 |                                   }
102 |                                   ... on SourceAppliedStateNestedNode {
103 |                                     sourceName
104 |                                     name
105 |                                     resourceType
106 |                                     freshness {
107 |                                       maxLoadedAt
108 |                                       maxLoadedAtTimeAgoInS
109 |                                       freshnessStatus
110 |                                     }
111 |                                   }
112 |                               }
113 |                             }
114 |                         }
115 |                     }
116 |                 }
117 |             }
118 |         }
119 |     """)
120 | 
121 |     GET_MODEL_DETAILS = textwrap.dedent("""
122 |         query GetModelDetails(
123 |             $environmentId: BigInt!,
124 |             $modelsFilter: ModelAppliedFilter
125 |             $first: Int,
126 |         ) {
127 |             environment(id: $environmentId) {
128 |                 applied {
129 |                     models(filter: $modelsFilter, first: $first) {
130 |                         edges {
131 |                             node {
132 |                                 name
133 |                                 uniqueId
134 |                                 compiledCode
135 |                                 description
136 |                                 database
137 |                                 schema
138 |                                 alias
139 |                                 catalog {
140 |                                     columns {
141 |                                         description
142 |                                         name
143 |                                         type
144 |                                     }
145 |                                 }
146 |                             }
147 |                         }
148 |                     }
149 |                 }
150 |             }
151 |         }
152 |     """)
153 | 
154 |     COMMON_FIELDS_PARENTS_CHILDREN = textwrap.dedent("""
155 |         {
156 |         ... on ExposureAppliedStateNestedNode {
157 |             resourceType
158 |             name
159 |             description
160 |         }
161 |         ... on ExternalModelNode {
162 |             resourceType
163 |             description
164 |             name
165 |         }
166 |         ... on MacroDefinitionNestedNode {
167 |             resourceType
168 |             name
169 |             description
170 |         }
171 |         ... on MetricDefinitionNestedNode {
172 |             resourceType
173 |             name
174 |             description
175 |         }
176 |         ... on ModelAppliedStateNestedNode {
177 |             resourceType
178 |             name
179 |             description
180 |         }
181 |         ... on SavedQueryDefinitionNestedNode {
182 |             resourceType
183 |             name
184 |             description
185 |         }
186 |         ... on SeedAppliedStateNestedNode {
187 |             resourceType
188 |             name
189 |             description
190 |         }
191 |         ... on SemanticModelDefinitionNestedNode {
192 |             resourceType
193 |             name
194 |             description
195 |         }
196 |         ... on SnapshotAppliedStateNestedNode {
197 |             resourceType
198 |             name
199 |             description
200 |         }
201 |         ... on SourceAppliedStateNestedNode {
202 |             resourceType
203 |             sourceName
204 |             uniqueId
205 |             name
206 |             description
207 |         }
208 |         ... on TestAppliedStateNestedNode {
209 |             resourceType
210 |             name
211 |             description
212 |         }
213 |     """)
214 | 
215 |     GET_MODEL_PARENTS = (
216 |         textwrap.dedent("""
217 |         query GetModelParents(
218 |             $environmentId: BigInt!,
219 |             $modelsFilter: ModelAppliedFilter
220 |             $first: Int,
221 |         ) {
222 |             environment(id: $environmentId) {
223 |                 applied {
224 |                     models(filter: $modelsFilter, first: $first) {
225 |                         pageInfo {
226 |                             endCursor
227 |                         }
228 |                         edges {
229 |                             node {
230 |                                 parents
231 |     """)
232 |         + COMMON_FIELDS_PARENTS_CHILDREN
233 |         + textwrap.dedent("""
234 |                                 }
235 |                             }
236 |                         }
237 |                     }
238 |                 }
239 |             }
240 |         }
241 |     """)
242 |     )
243 | 
244 |     GET_MODEL_CHILDREN = (
245 |         textwrap.dedent("""
246 |         query GetModelChildren(
247 |             $environmentId: BigInt!,
248 |             $modelsFilter: ModelAppliedFilter
249 |             $first: Int,
250 |         ) {
251 |             environment(id: $environmentId) {
252 |                 applied {
253 |                     models(filter: $modelsFilter, first: $first) {
254 |                         pageInfo {
255 |                             endCursor
256 |                         }
257 |                         edges {
258 |                             node {
259 |                                 children
260 |     """)
261 |         + COMMON_FIELDS_PARENTS_CHILDREN
262 |         + textwrap.dedent("""
263 |                                 }
264 |                             }
265 |                         }
266 |                     }
267 |                 }
268 |             }
269 |         }
270 |     """)
271 |     )
272 | 
273 |     GET_SOURCES = textwrap.dedent("""
274 |         query GetSources(
275 |             $environmentId: BigInt!,
276 |             $sourcesFilter: SourceAppliedFilter,
277 |             $after: String,
278 |             $first: Int
279 |         ) {
280 |             environment(id: $environmentId) {
281 |                 applied {
282 |                     sources(filter: $sourcesFilter, after: $after, first: $first) {
283 |                         pageInfo {
284 |                             hasNextPage
285 |                             endCursor
286 |                         }
287 |                         edges {
288 |                             node {
289 |                                 name
290 |                                 uniqueId
291 |                                 identifier
292 |                                 description
293 |                                 sourceName
294 |                                 resourceType
295 |                                 database
296 |                                 schema
297 |                                 freshness {
298 |                                     maxLoadedAt
299 |                                     maxLoadedAtTimeAgoInS
300 |                                     freshnessStatus
301 |                                 }
302 |                             }
303 |                         }
304 |                     }
305 |                 }
306 |             }
307 |         }
308 |     """)
309 | 
310 |     GET_EXPOSURES = textwrap.dedent("""
311 |         query Exposures($environmentId: BigInt!, $first: Int, $after: String) {
312 |             environment(id: $environmentId) {
313 |                 definition {
314 |                     exposures(first: $first, after: $after) {
315 |                         totalCount
316 |                         pageInfo {
317 |                             hasNextPage
318 |                             endCursor
319 |                         }
320 |                         edges {
321 |                             node {
322 |                                 name
323 |                                 uniqueId
324 |                                 url
325 |                                 description
326 |                             }
327 |                         }
328 |                     }
329 |                 }
330 |             }
331 |         }
332 |     """)
333 | 
334 |     GET_EXPOSURE_DETAILS = textwrap.dedent("""
335 |         query ExposureDetails($environmentId: BigInt!, $filter: ExposureFilter, $first: Int) {
336 |             environment(id: $environmentId) {
337 |                 definition {
338 |                     exposures(first: $first, filter: $filter) {
339 |                         edges {
340 |                             node {
341 |                                 name
342 |                                 maturity
343 |                                 label
344 |                                 ownerEmail
345 |                                 ownerName
346 |                                 uniqueId
347 |                                 url
348 |                                 meta
349 |                                 freshnessStatus
350 |                                 exposureType
351 |                                 description
352 |                                 parents {
353 |                                     uniqueId
354 |                                 }
355 |                             }
356 |                         }
357 |                     }
358 |                 }
359 |             }
360 |         }
361 |     """)
362 | 
363 | 
364 | class MetadataAPIClient:
365 |     def __init__(self, config_provider: ConfigProvider[DiscoveryConfig]):
366 |         self.config_provider = config_provider
367 | 
368 |     async def execute_query(self, query: str, variables: dict) -> dict:
369 |         config = await self.config_provider.get_config()
370 |         url = config.url
371 |         headers = config.headers_provider.get_headers()
372 |         response = requests.post(
373 |             url=url,
374 |             json={"query": query, "variables": variables},
375 |             headers=headers,
376 |         )
377 |         return response.json()
378 | 
379 | 
380 | class ModelFilter(TypedDict, total=False):
381 |     modelingLayer: Literal["marts"] | None
382 | 
383 | 
384 | class SourceFilter(TypedDict, total=False):
385 |     sourceNames: list[str]
386 |     uniqueIds: list[str] | None
387 | 
388 | 
389 | class ModelsFetcher:
390 |     def __init__(self, api_client: MetadataAPIClient):
391 |         self.api_client = api_client
392 | 
393 |     async def get_environment_id(self) -> int:
394 |         config = await self.api_client.config_provider.get_config()
395 |         return config.environment_id
396 | 
397 |     def _parse_response_to_json(self, result: dict) -> list[dict]:
398 |         raise_gql_error(result)
399 |         edges = result["data"]["environment"]["applied"]["models"]["edges"]
400 |         parsed_edges: list[dict] = []
401 |         if not edges:
402 |             return parsed_edges
403 |         if result.get("errors"):
404 |             raise GraphQLError(f"GraphQL query failed: {result['errors']}")
405 |         for edge in edges:
406 |             if not isinstance(edge, dict) or "node" not in edge:
407 |                 continue
408 |             node = edge["node"]
409 |             if not isinstance(node, dict):
410 |                 continue
411 |             parsed_edges.append(node)
412 |         return parsed_edges
413 | 
414 |     def _get_model_filters(
415 |         self, model_name: str | None = None, unique_id: str | None = None
416 |     ) -> dict[str, list[str] | str]:
417 |         if unique_id:
418 |             return {"uniqueIds": [unique_id]}
419 |         elif model_name:
420 |             return {"identifier": model_name}
421 |         else:
422 |             raise InvalidParameterError(
423 |                 "Either model_name or unique_id must be provided"
424 |             )
425 | 
426 |     async def fetch_models(self, model_filter: ModelFilter | None = None) -> list[dict]:
427 |         has_next_page = True
428 |         after_cursor: str = ""
429 |         all_edges: list[dict] = []
430 |         while has_next_page and len(all_edges) < MAX_NODE_QUERY_LIMIT:
431 |             variables = {
432 |                 "environmentId": await self.get_environment_id(),
433 |                 "after": after_cursor,
434 |                 "first": PAGE_SIZE,
435 |                 "modelsFilter": model_filter or {},
436 |                 "sort": {"field": "queryUsageCount", "direction": "desc"},
437 |             }
438 | 
439 |             result = await self.api_client.execute_query(
440 |                 GraphQLQueries.GET_MODELS, variables
441 |             )
442 |             all_edges.extend(self._parse_response_to_json(result))
443 | 
444 |             previous_after_cursor = after_cursor
445 |             after_cursor = result["data"]["environment"]["applied"]["models"][
446 |                 "pageInfo"
447 |             ]["endCursor"]
448 |             if previous_after_cursor == after_cursor:
449 |                 has_next_page = False
450 | 
451 |         return all_edges
452 | 
453 |     async def fetch_model_details(
454 |         self, model_name: str | None = None, unique_id: str | None = None
455 |     ) -> dict:
456 |         model_filters = self._get_model_filters(model_name, unique_id)
457 |         variables = {
458 |             "environmentId": await self.get_environment_id(),
459 |             "modelsFilter": model_filters,
460 |             "first": 1,
461 |         }
462 |         result = await self.api_client.execute_query(
463 |             GraphQLQueries.GET_MODEL_DETAILS, variables
464 |         )
465 |         raise_gql_error(result)
466 |         edges = result["data"]["environment"]["applied"]["models"]["edges"]
467 |         if not edges:
468 |             return {}
469 |         return edges[0]["node"]
470 | 
471 |     async def fetch_model_parents(
472 |         self, model_name: str | None = None, unique_id: str | None = None
473 |     ) -> list[dict]:
474 |         model_filters = self._get_model_filters(model_name, unique_id)
475 |         variables = {
476 |             "environmentId": await self.get_environment_id(),
477 |             "modelsFilter": model_filters,
478 |             "first": 1,
479 |         }
480 |         result = await self.api_client.execute_query(
481 |             GraphQLQueries.GET_MODEL_PARENTS, variables
482 |         )
483 |         raise_gql_error(result)
484 |         edges = result["data"]["environment"]["applied"]["models"]["edges"]
485 |         if not edges:
486 |             return []
487 |         return edges[0]["node"]["parents"]
488 | 
489 |     async def fetch_model_children(
490 |         self, model_name: str | None = None, unique_id: str | None = None
491 |     ) -> list[dict]:
492 |         model_filters = self._get_model_filters(model_name, unique_id)
493 |         variables = {
494 |             "environmentId": await self.get_environment_id(),
495 |             "modelsFilter": model_filters,
496 |             "first": 1,
497 |         }
498 |         result = await self.api_client.execute_query(
499 |             GraphQLQueries.GET_MODEL_CHILDREN, variables
500 |         )
501 |         raise_gql_error(result)
502 |         edges = result["data"]["environment"]["applied"]["models"]["edges"]
503 |         if not edges:
504 |             return []
505 |         return edges[0]["node"]["children"]
506 | 
507 |     async def fetch_model_health(
508 |         self, model_name: str | None = None, unique_id: str | None = None
509 |     ) -> list[dict]:
510 |         model_filters = self._get_model_filters(model_name, unique_id)
511 |         variables = {
512 |             "environmentId": await self.get_environment_id(),
513 |             "modelsFilter": model_filters,
514 |             "first": 1,
515 |         }
516 |         result = await self.api_client.execute_query(
517 |             GraphQLQueries.GET_MODEL_HEALTH, variables
518 |         )
519 |         raise_gql_error(result)
520 |         edges = result["data"]["environment"]["applied"]["models"]["edges"]
521 |         if not edges:
522 |             return []
523 |         return edges[0]["node"]
524 | 
525 | 
526 | class ExposuresFetcher:
527 |     def __init__(self, api_client: MetadataAPIClient):
528 |         self.api_client = api_client
529 | 
530 |     async def get_environment_id(self) -> int:
531 |         config = await self.api_client.config_provider.get_config()
532 |         return config.environment_id
533 | 
534 |     def _parse_response_to_json(self, result: dict) -> list[dict]:
535 |         raise_gql_error(result)
536 |         edges = result["data"]["environment"]["definition"]["exposures"]["edges"]
537 |         parsed_edges: list[dict] = []
538 |         if not edges:
539 |             return parsed_edges
540 |         if result.get("errors"):
541 |             raise GraphQLError(f"GraphQL query failed: {result['errors']}")
542 |         for edge in edges:
543 |             if not isinstance(edge, dict) or "node" not in edge:
544 |                 continue
545 |             node = edge["node"]
546 |             if not isinstance(node, dict):
547 |                 continue
548 |             parsed_edges.append(node)
549 |         return parsed_edges
550 | 
551 |     async def fetch_exposures(self) -> list[dict]:
552 |         has_next_page = True
553 |         after_cursor: str | None = None
554 |         all_edges: list[dict] = []
555 | 
556 |         while has_next_page:
557 |             variables: dict[str, int | str] = {
558 |                 "environmentId": await self.get_environment_id(),
559 |                 "first": PAGE_SIZE,
560 |             }
561 |             if after_cursor:
562 |                 variables["after"] = after_cursor
563 | 
564 |             result = await self.api_client.execute_query(
565 |                 GraphQLQueries.GET_EXPOSURES, variables
566 |             )
567 |             new_edges = self._parse_response_to_json(result)
568 |             all_edges.extend(new_edges)
569 | 
570 |             page_info = result["data"]["environment"]["definition"]["exposures"][
571 |                 "pageInfo"
572 |             ]
573 |             has_next_page = page_info.get("hasNextPage", False)
574 |             after_cursor = page_info.get("endCursor")
575 | 
576 |         return all_edges
577 | 
578 |     def _get_exposure_filters(
579 |         self, exposure_name: str | None = None, unique_ids: list[str] | None = None
580 |     ) -> dict[str, list[str]]:
581 |         if unique_ids:
582 |             return {"uniqueIds": unique_ids}
583 |         elif exposure_name:
584 |             raise InvalidParameterError(
585 |                 "ExposureFilter only supports uniqueIds. Please use unique_ids parameter instead of exposure_name."
586 |             )
587 |         else:
588 |             raise InvalidParameterError(
589 |                 "unique_ids must be provided for exposure filtering"
590 |             )
591 | 
592 |     async def fetch_exposure_details(
593 |         self, exposure_name: str | None = None, unique_ids: list[str] | None = None
594 |     ) -> list[dict]:
595 |         if exposure_name and not unique_ids:
596 |             # Since ExposureFilter doesn't support filtering by name,
597 |             # we need to fetch all exposures and find the one with matching name
598 |             all_exposures = await self.fetch_exposures()
599 |             for exposure in all_exposures:
600 |                 if exposure.get("name") == exposure_name:
601 |                     return [exposure]
602 |             return []
603 |         elif unique_ids:
604 |             exposure_filters = self._get_exposure_filters(unique_ids=unique_ids)
605 |             variables = {
606 |                 "environmentId": await self.get_environment_id(),
607 |                 "filter": exposure_filters,
608 |                 "first": len(unique_ids),  # Request as many as we're filtering for
609 |             }
610 |             result = await self.api_client.execute_query(
611 |                 GraphQLQueries.GET_EXPOSURE_DETAILS, variables
612 |             )
613 |             raise_gql_error(result)
614 |             edges = result["data"]["environment"]["definition"]["exposures"]["edges"]
615 |             if not edges:
616 |                 return []
617 |             return [edge["node"] for edge in edges]
618 |         else:
619 |             raise InvalidParameterError(
620 |                 "Either exposure_name or unique_ids must be provided"
621 |             )
622 | 
623 | 
624 | class SourcesFetcher:
625 |     def __init__(self, api_client: MetadataAPIClient):
626 |         self.api_client = api_client
627 | 
628 |     async def get_environment_id(self) -> int:
629 |         config = await self.api_client.config_provider.get_config()
630 |         return config.environment_id
631 | 
632 |     def _parse_response_to_json(self, result: dict) -> list[dict]:
633 |         raise_gql_error(result)
634 |         edges = result["data"]["environment"]["applied"]["sources"]["edges"]
635 |         parsed_edges: list[dict] = []
636 |         if not edges:
637 |             return parsed_edges
638 |         if result.get("errors"):
639 |             raise GraphQLError(f"GraphQL query failed: {result['errors']}")
640 |         for edge in edges:
641 |             if not isinstance(edge, dict) or "node" not in edge:
642 |                 continue
643 |             node = edge["node"]
644 |             if not isinstance(node, dict):
645 |                 continue
646 |             parsed_edges.append(node)
647 |         return parsed_edges
648 | 
649 |     async def fetch_sources(
650 |         self,
651 |         source_names: list[str] | None = None,
652 |         unique_ids: list[str] | None = None,
653 |     ) -> list[dict]:
654 |         source_filter: SourceFilter = {}
655 |         if source_names is not None:
656 |             source_filter["sourceNames"] = source_names
657 |         if unique_ids is not None:
658 |             source_filter["uniqueIds"] = unique_ids
659 | 
660 |         has_next_page = True
661 |         after_cursor: str = ""
662 |         all_edges: list[dict] = []
663 | 
664 |         while has_next_page and len(all_edges) < MAX_NODE_QUERY_LIMIT:
665 |             variables = {
666 |                 "environmentId": await self.get_environment_id(),
667 |                 "after": after_cursor,
668 |                 "first": PAGE_SIZE,
669 |                 "sourcesFilter": source_filter,
670 |             }
671 | 
672 |             result = await self.api_client.execute_query(
673 |                 GraphQLQueries.GET_SOURCES, variables
674 |             )
675 |             all_edges.extend(self._parse_response_to_json(result))
676 | 
677 |             page_info = result["data"]["environment"]["applied"]["sources"]["pageInfo"]
678 |             has_next_page = page_info.get("hasNextPage", False)
679 |             after_cursor = page_info.get("endCursor")
680 | 
681 |         return all_edges
682 | 
```
Page 5/6FirstPrevNextLast