#
tokens: 48859/50000 37/305 files (page 2/5)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 2 of 5. Use http://codebase.md/dbt-labs/dbt-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .changes
│   ├── header.tpl.md
│   ├── unreleased
│   │   ├── .gitkeep
│   │   ├── Bug Fix-20251028-143835.yaml
│   │   ├── Enhancement or New Feature-20251014-175047.yaml
│   │   └── Under the Hood-20251030-151902.yaml
│   ├── v0.1.3.md
│   ├── v0.10.0.md
│   ├── v0.10.1.md
│   ├── v0.10.2.md
│   ├── v0.10.3.md
│   ├── v0.2.0.md
│   ├── v0.2.1.md
│   ├── v0.2.10.md
│   ├── v0.2.11.md
│   ├── v0.2.12.md
│   ├── v0.2.13.md
│   ├── v0.2.14.md
│   ├── v0.2.15.md
│   ├── v0.2.16.md
│   ├── v0.2.17.md
│   ├── v0.2.18.md
│   ├── v0.2.19.md
│   ├── v0.2.2.md
│   ├── v0.2.20.md
│   ├── v0.2.3.md
│   ├── v0.2.4.md
│   ├── v0.2.5.md
│   ├── v0.2.6.md
│   ├── v0.2.7.md
│   ├── v0.2.8.md
│   ├── v0.2.9.md
│   ├── v0.3.0.md
│   ├── v0.4.0.md
│   ├── v0.4.1.md
│   ├── v0.4.2.md
│   ├── v0.5.0.md
│   ├── v0.6.0.md
│   ├── v0.6.1.md
│   ├── v0.6.2.md
│   ├── v0.7.0.md
│   ├── v0.8.0.md
│   ├── v0.8.1.md
│   ├── v0.8.2.md
│   ├── v0.8.3.md
│   ├── v0.8.4.md
│   ├── v0.9.0.md
│   ├── v0.9.1.md
│   └── v1.0.0.md
├── .changie.yaml
├── .env.example
├── .github
│   ├── actions
│   │   └── setup-python
│   │       └── action.yml
│   ├── CODEOWNERS
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   └── feature_request.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── changelog-check.yml
│       ├── codeowners-check.yml
│       ├── create-release-pr.yml
│       ├── release.yml
│       └── run-checks-pr.yaml
├── .gitignore
├── .pre-commit-config.yaml
├── .task
│   └── checksum
│       └── d2
├── .tool-versions
├── .vscode
│   ├── launch.json
│   └── settings.json
├── CHANGELOG.md
├── CONTRIBUTING.md
├── docs
│   ├── d2.png
│   └── diagram.d2
├── evals
│   └── semantic_layer
│       └── test_eval_semantic_layer.py
├── examples
│   ├── .DS_Store
│   ├── aws_strands_agent
│   │   ├── __init__.py
│   │   ├── .DS_Store
│   │   ├── dbt_data_scientist
│   │   │   ├── __init__.py
│   │   │   ├── .env.example
│   │   │   ├── agent.py
│   │   │   ├── prompts.py
│   │   │   ├── quick_mcp_test.py
│   │   │   ├── test_all_tools.py
│   │   │   └── tools
│   │   │       ├── __init__.py
│   │   │       ├── dbt_compile.py
│   │   │       ├── dbt_mcp.py
│   │   │       └── dbt_model_analyzer.py
│   │   ├── LICENSE
│   │   ├── README.md
│   │   └── requirements.txt
│   ├── google_adk_agent
│   │   ├── __init__.py
│   │   ├── main.py
│   │   ├── pyproject.toml
│   │   └── README.md
│   ├── langgraph_agent
│   │   ├── __init__.py
│   │   ├── .python-version
│   │   ├── main.py
│   │   ├── pyproject.toml
│   │   ├── README.md
│   │   └── uv.lock
│   ├── openai_agent
│   │   ├── __init__.py
│   │   ├── .gitignore
│   │   ├── .python-version
│   │   ├── main_streamable.py
│   │   ├── main.py
│   │   ├── pyproject.toml
│   │   ├── README.md
│   │   └── uv.lock
│   ├── openai_responses
│   │   ├── __init__.py
│   │   ├── .gitignore
│   │   ├── .python-version
│   │   ├── main.py
│   │   ├── pyproject.toml
│   │   ├── README.md
│   │   └── uv.lock
│   ├── pydantic_ai_agent
│   │   ├── __init__.py
│   │   ├── .gitignore
│   │   ├── .python-version
│   │   ├── main.py
│   │   ├── pyproject.toml
│   │   └── README.md
│   └── remote_mcp
│       ├── .python-version
│       ├── main.py
│       ├── pyproject.toml
│       ├── README.md
│       └── uv.lock
├── LICENSE
├── pyproject.toml
├── README.md
├── src
│   ├── client
│   │   ├── __init__.py
│   │   ├── main.py
│   │   └── tools.py
│   ├── dbt_mcp
│   │   ├── __init__.py
│   │   ├── .gitignore
│   │   ├── config
│   │   │   ├── config_providers.py
│   │   │   ├── config.py
│   │   │   ├── dbt_project.py
│   │   │   ├── dbt_yaml.py
│   │   │   ├── headers.py
│   │   │   ├── settings.py
│   │   │   └── transport.py
│   │   ├── dbt_admin
│   │   │   ├── __init__.py
│   │   │   ├── client.py
│   │   │   ├── constants.py
│   │   │   ├── run_results_errors
│   │   │   │   ├── __init__.py
│   │   │   │   ├── config.py
│   │   │   │   └── parser.py
│   │   │   └── tools.py
│   │   ├── dbt_cli
│   │   │   ├── binary_type.py
│   │   │   └── tools.py
│   │   ├── dbt_codegen
│   │   │   ├── __init__.py
│   │   │   └── tools.py
│   │   ├── discovery
│   │   │   ├── client.py
│   │   │   └── tools.py
│   │   ├── errors
│   │   │   ├── __init__.py
│   │   │   ├── admin_api.py
│   │   │   ├── base.py
│   │   │   ├── cli.py
│   │   │   ├── common.py
│   │   │   ├── discovery.py
│   │   │   ├── semantic_layer.py
│   │   │   └── sql.py
│   │   ├── gql
│   │   │   └── errors.py
│   │   ├── lsp
│   │   │   ├── __init__.py
│   │   │   ├── lsp_binary_manager.py
│   │   │   ├── lsp_client.py
│   │   │   ├── lsp_connection.py
│   │   │   └── tools.py
│   │   ├── main.py
│   │   ├── mcp
│   │   │   ├── create.py
│   │   │   └── server.py
│   │   ├── oauth
│   │   │   ├── client_id.py
│   │   │   ├── context_manager.py
│   │   │   ├── dbt_platform.py
│   │   │   ├── fastapi_app.py
│   │   │   ├── logging.py
│   │   │   ├── login.py
│   │   │   ├── refresh_strategy.py
│   │   │   ├── token_provider.py
│   │   │   └── token.py
│   │   ├── prompts
│   │   │   ├── __init__.py
│   │   │   ├── admin_api
│   │   │   │   ├── cancel_job_run.md
│   │   │   │   ├── get_job_details.md
│   │   │   │   ├── get_job_run_artifact.md
│   │   │   │   ├── get_job_run_details.md
│   │   │   │   ├── get_job_run_error.md
│   │   │   │   ├── list_job_run_artifacts.md
│   │   │   │   ├── list_jobs_runs.md
│   │   │   │   ├── list_jobs.md
│   │   │   │   ├── retry_job_run.md
│   │   │   │   └── trigger_job_run.md
│   │   │   ├── dbt_cli
│   │   │   │   ├── args
│   │   │   │   │   ├── full_refresh.md
│   │   │   │   │   ├── limit.md
│   │   │   │   │   ├── resource_type.md
│   │   │   │   │   ├── selectors.md
│   │   │   │   │   ├── sql_query.md
│   │   │   │   │   └── vars.md
│   │   │   │   ├── build.md
│   │   │   │   ├── compile.md
│   │   │   │   ├── docs.md
│   │   │   │   ├── list.md
│   │   │   │   ├── parse.md
│   │   │   │   ├── run.md
│   │   │   │   ├── show.md
│   │   │   │   └── test.md
│   │   │   ├── dbt_codegen
│   │   │   │   ├── args
│   │   │   │   │   ├── case_sensitive_cols.md
│   │   │   │   │   ├── database_name.md
│   │   │   │   │   ├── generate_columns.md
│   │   │   │   │   ├── include_data_types.md
│   │   │   │   │   ├── include_descriptions.md
│   │   │   │   │   ├── leading_commas.md
│   │   │   │   │   ├── materialized.md
│   │   │   │   │   ├── model_name.md
│   │   │   │   │   ├── model_names.md
│   │   │   │   │   ├── schema_name.md
│   │   │   │   │   ├── source_name.md
│   │   │   │   │   ├── table_name.md
│   │   │   │   │   ├── table_names.md
│   │   │   │   │   ├── tables.md
│   │   │   │   │   └── upstream_descriptions.md
│   │   │   │   ├── generate_model_yaml.md
│   │   │   │   ├── generate_source.md
│   │   │   │   └── generate_staging_model.md
│   │   │   ├── discovery
│   │   │   │   ├── get_all_models.md
│   │   │   │   ├── get_all_sources.md
│   │   │   │   ├── get_exposure_details.md
│   │   │   │   ├── get_exposures.md
│   │   │   │   ├── get_mart_models.md
│   │   │   │   ├── get_model_children.md
│   │   │   │   ├── get_model_details.md
│   │   │   │   ├── get_model_health.md
│   │   │   │   └── get_model_parents.md
│   │   │   ├── lsp
│   │   │   │   ├── args
│   │   │   │   │   ├── column_name.md
│   │   │   │   │   └── model_id.md
│   │   │   │   └── get_column_lineage.md
│   │   │   ├── prompts.py
│   │   │   └── semantic_layer
│   │   │       ├── get_dimensions.md
│   │   │       ├── get_entities.md
│   │   │       ├── get_metrics_compiled_sql.md
│   │   │       ├── list_metrics.md
│   │   │       └── query_metrics.md
│   │   ├── py.typed
│   │   ├── semantic_layer
│   │   │   ├── client.py
│   │   │   ├── gql
│   │   │   │   ├── gql_request.py
│   │   │   │   └── gql.py
│   │   │   ├── levenshtein.py
│   │   │   ├── tools.py
│   │   │   └── types.py
│   │   ├── sql
│   │   │   └── tools.py
│   │   ├── telemetry
│   │   │   └── logging.py
│   │   ├── tools
│   │   │   ├── annotations.py
│   │   │   ├── definitions.py
│   │   │   ├── policy.py
│   │   │   ├── register.py
│   │   │   ├── tool_names.py
│   │   │   └── toolsets.py
│   │   └── tracking
│   │       └── tracking.py
│   └── remote_mcp
│       ├── __init__.py
│       └── session.py
├── Taskfile.yml
├── tests
│   ├── __init__.py
│   ├── env_vars.py
│   ├── integration
│   │   ├── __init__.py
│   │   ├── dbt_codegen
│   │   │   ├── __init__.py
│   │   │   └── test_dbt_codegen.py
│   │   ├── discovery
│   │   │   └── test_discovery.py
│   │   ├── initialization
│   │   │   ├── __init__.py
│   │   │   └── test_initialization.py
│   │   ├── lsp
│   │   │   └── test_lsp_connection.py
│   │   ├── remote_mcp
│   │   │   └── test_remote_mcp.py
│   │   ├── remote_tools
│   │   │   └── test_remote_tools.py
│   │   ├── semantic_layer
│   │   │   └── test_semantic_layer.py
│   │   └── tracking
│   │       └── test_tracking.py
│   ├── mocks
│   │   └── config.py
│   └── unit
│       ├── __init__.py
│       ├── config
│       │   ├── __init__.py
│       │   ├── test_config.py
│       │   └── test_transport.py
│       ├── dbt_admin
│       │   ├── __init__.py
│       │   ├── test_client.py
│       │   ├── test_error_fetcher.py
│       │   └── test_tools.py
│       ├── dbt_cli
│       │   ├── __init__.py
│       │   ├── test_cli_integration.py
│       │   └── test_tools.py
│       ├── dbt_codegen
│       │   ├── __init__.py
│       │   └── test_tools.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── conftest.py
│       │   ├── test_exposures_fetcher.py
│       │   └── test_sources_fetcher.py
│       ├── lsp
│       │   ├── __init__.py
│       │   ├── test_lsp_client.py
│       │   ├── test_lsp_connection.py
│       │   └── test_lsp_tools.py
│       ├── oauth
│       │   ├── test_credentials_provider.py
│       │   ├── test_fastapi_app_pagination.py
│       │   └── test_token.py
│       ├── tools
│       │   ├── test_disable_tools.py
│       │   ├── test_tool_names.py
│       │   ├── test_tool_policies.py
│       │   └── test_toolsets.py
│       └── tracking
│           └── test_tracking.py
├── ui
│   ├── .gitignore
│   ├── assets
│   │   ├── dbt_logo BLK.svg
│   │   └── dbt_logo WHT.svg
│   ├── eslint.config.js
│   ├── index.html
│   ├── package.json
│   ├── pnpm-lock.yaml
│   ├── pnpm-workspace.yaml
│   ├── README.md
│   ├── src
│   │   ├── App.css
│   │   ├── App.tsx
│   │   ├── global.d.ts
│   │   ├── index.css
│   │   ├── main.tsx
│   │   └── vite-env.d.ts
│   ├── tsconfig.app.json
│   ├── tsconfig.json
│   ├── tsconfig.node.json
│   └── vite.config.ts
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/tests/unit/tools/test_tool_names.py:
--------------------------------------------------------------------------------

```python
 1 | from unittest.mock import patch
 2 | 
 3 | import pytest
 4 | 
 5 | from dbt_mcp.config.config import load_config
 6 | from dbt_mcp.dbt_cli.binary_type import BinaryType
 7 | from dbt_mcp.lsp.lsp_binary_manager import LspBinaryInfo
 8 | from dbt_mcp.mcp.server import create_dbt_mcp
 9 | from dbt_mcp.tools.tool_names import ToolName
10 | from dbt_mcp.tools.toolsets import proxied_tools
11 | from tests.env_vars import default_env_vars_context
12 | 
13 | 
14 | @pytest.mark.asyncio
15 | async def test_tool_names_match_server_tools():
16 |     """Test that the ToolName enum matches the tools registered in the server."""
17 |     with (
18 |         default_env_vars_context(),
19 |         patch(
20 |             "dbt_mcp.config.config.detect_binary_type", return_value=BinaryType.DBT_CORE
21 |         ),
22 |         patch(
23 |             "dbt_mcp.lsp.tools.dbt_lsp_binary_info",
24 |             return_value=LspBinaryInfo(path="/path/to/lsp", version="1.0.0"),
25 |         ),
26 |     ):
27 |         config = load_config()
28 |         dbt_mcp = await create_dbt_mcp(config)
29 | 
30 |         # Get all tools from the server
31 |         server_tools = await dbt_mcp.list_tools()
32 |         # Manually adding SQL tools here because the server doesn't get them
33 |         # in this unit test.
34 |         server_tool_names = {tool.name for tool in server_tools} | {
35 |             p.value for p in proxied_tools
36 |         }
37 |         enum_names = {n for n in ToolName.get_all_tool_names()}
38 | 
39 |         # This should not raise any errors if the enum is in sync
40 |         if server_tool_names != enum_names:
41 |             raise ValueError(
42 |                 f"Tool name mismatch:\n"
43 |                 f"In server but not in enum: {server_tool_names - enum_names}\n"
44 |                 f"In enum but not in server: {enum_names - server_tool_names}"
45 |             )
46 | 
47 |         # Double check that all enum values are strings
48 |         for tool in ToolName:
49 |             assert isinstance(tool.value, str), (
50 |                 f"Tool {tool.name} value should be a string"
51 |             )
52 | 
53 | 
54 | def test_tool_names_no_duplicates():
55 |     """Test that there are no duplicate tool names in the enum."""
56 |     assert len(ToolName.get_all_tool_names()) == len(set(ToolName.get_all_tool_names()))
57 | 
```

--------------------------------------------------------------------------------
/examples/aws_strands_agent/dbt_data_scientist/agent.py:
--------------------------------------------------------------------------------

```python
 1 | """Main application entry point for dbt AWS agentcore multi-agent."""
 2 | 
 3 | from dotenv import load_dotenv
 4 | from bedrock_agentcore import BedrockAgentCoreApp
 5 | from strands import Agent
 6 | 
 7 | import prompts
 8 | 
 9 | from tools import (
10 |     dbt_compile,
11 |     dbt_mcp_tool,
12 |     dbt_model_analyzer_agent
13 | )
14 | 
15 | # Load environment variables
16 | load_dotenv()
17 | 
18 | # Initialize the Bedrock Agent Core App
19 | app = BedrockAgentCoreApp()
20 | 
21 | # Initialize the main agent
22 | dbt_agent = Agent(
23 |     system_prompt=prompts.ROOT_AGENT_INSTR,
24 |     callback_handler=None,
25 |     tools=[dbt_compile, dbt_mcp_tool, dbt_model_analyzer_agent]
26 | )
27 | 
28 | @app.entrypoint
29 | def invoke(payload):
30 |     """Main AI agent function with access to dbt tools."""
31 |     user_message = payload.get("prompt", "Hello! How can I help you today?")
32 |     
33 |     try:
34 |         # Process the user message with the dbt agent
35 |         result = dbt_agent(user_message)
36 |         
37 |         # Extract the response content
38 |         response_content = str(result)
39 |         
40 |         return {"result": response_content}
41 |     
42 |     except Exception as e:
43 |         return {"result": f"Error processing your request: {str(e)}"}
44 | 
45 | # Example usage for local testing
46 | if __name__ == "__main__":
47 |     print("\ndbt's Assistant Strands Agent\n")
48 |     print("Ask a question about our dbt mcp server, our local fusion compiler, or our data model analyzer and I'll route it to the appropriate specialist.")
49 |     print("Type 'exit' to quit.")
50 | 
51 |     # Interactive loop for local testing
52 |     while True:
53 |         try:
54 |             user_input = input("\n> ")
55 |             if user_input.lower() == "exit":
56 |                 print("\nGoodbye! 👋")
57 |                 break
58 | 
59 |             response = dbt_agent(user_input)
60 |             
61 |             # Extract and print only the relevant content from the specialized agent's response
62 |             content = str(response)
63 |             print(content)
64 |             
65 |         except KeyboardInterrupt:
66 |             print("\n\nExecution interrupted. Exiting...")
67 |             break
68 |         except Exception as e:
69 |             print(f"\nAn error occurred: {str(e)}")
70 |             print("Please try asking a different question.")
71 | 
```

--------------------------------------------------------------------------------
/tests/unit/tools/test_tool_policies.py:
--------------------------------------------------------------------------------

```python
 1 | from unittest.mock import patch
 2 | 
 3 | from dbt_mcp.config.config import load_config
 4 | from dbt_mcp.dbt_cli.binary_type import BinaryType
 5 | from dbt_mcp.lsp.lsp_binary_manager import LspBinaryInfo
 6 | from dbt_mcp.mcp.server import create_dbt_mcp
 7 | from dbt_mcp.tools.policy import tool_policies
 8 | from dbt_mcp.tools.tool_names import ToolName
 9 | from dbt_mcp.tools.toolsets import proxied_tools
10 | from tests.env_vars import default_env_vars_context
11 | 
12 | 
13 | async def test_tool_policies_match_server_tools():
14 |     """Test that the ToolPolicy enum matches the tools registered in the server."""
15 |     with (
16 |         default_env_vars_context(),
17 |         patch(
18 |             "dbt_mcp.config.config.detect_binary_type", return_value=BinaryType.DBT_CORE
19 |         ),
20 |         patch(
21 |             "dbt_mcp.lsp.tools.dbt_lsp_binary_info",
22 |             return_value=LspBinaryInfo(path="/path/to/lsp", version="1.0.0"),
23 |         ),
24 |     ):
25 |         config = load_config()
26 |         dbt_mcp = await create_dbt_mcp(config)
27 | 
28 |         # Get all tools from the server
29 |         server_tools = await dbt_mcp.list_tools()
30 |         # Manually adding SQL tools here because the server doesn't get them
31 |         # in this unit test.
32 |         server_tool_names = {tool.name for tool in server_tools} | {
33 |             p.value for p in proxied_tools
34 |         }
35 |         policy_names = {policy_name for policy_name in tool_policies}
36 | 
37 |         if server_tool_names != policy_names:
38 |             raise ValueError(
39 |                 f"Tool name mismatch:\n"
40 |                 f"In server but not in enum: {server_tool_names - policy_names}\n"
41 |                 f"In enum but not in server: {policy_names - server_tool_names}"
42 |             )
43 | 
44 | 
45 | def test_tool_policies_match_tool_names():
46 |     policy_names = {policy.upper() for policy in tool_policies}
47 |     tool_names = {tool.name for tool in ToolName}
48 |     if tool_names != policy_names:
49 |         raise ValueError(
50 |             f"Tool name mismatch:\n"
51 |             f"In tool names but not in policy: {tool_names - policy_names}\n"
52 |             f"In policy but not in tool names: {policy_names - tool_names}"
53 |         )
54 | 
55 | 
56 | def test_tool_policies_no_duplicates():
57 |     """Test that there are no duplicate tool names in the policy."""
58 |     assert len(tool_policies) == len(set(tool_policies.keys()))
59 | 
```

--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------

```toml
 1 | [project]
 2 | name = "dbt-mcp"
 3 | description = "A MCP (Model Context Protocol) server for interacting with dbt resources."
 4 | authors = [{ name = "dbt Labs" }]
 5 | readme = "README.md"
 6 | license = { file = "LICENSE" }
 7 | # until pyarrow releases wheels for 3.14 https://github.com/apache/arrow/issues/47438
 8 | requires-python = ">=3.12,<3.14"
 9 | dynamic = ["version"]
10 | keywords = [
11 |   "dbt",
12 |   "mcp",
13 |   "model-context-protocol",
14 |   "data",
15 |   "analytics",
16 |   "ai-agent",
17 |   "llm",
18 | ]
19 | classifiers = [
20 |   "Development Status :: 5 - Production/Stable",
21 |   "Intended Audience :: Developers",
22 |   "Intended Audience :: Information Technology",
23 |   "Intended Audience :: System Administrators",
24 |   "License :: OSI Approved :: Apache Software License",
25 |   "Operating System :: OS Independent",
26 |   "Programming Language :: Python :: 3",
27 |   "Topic :: Scientific/Engineering :: Information Analysis",
28 |   "Typing :: Typed",
29 | ]
30 | dependencies = [
31 |   # Pinning all dependencies because this app is installed with uvx
32 |   # and we want to have a consistent install as much as possible.
33 |   "authlib==1.6.5",
34 |   "dbt-protos==1.0.382",
35 |   "dbt-sl-sdk[sync]==0.13.0",
36 |   "dbtlabs-vortex==0.2.0",
37 |   "fastapi==0.116.1",
38 |   "uvicorn==0.30.6",
39 |   "mcp[cli]==1.10.1",
40 |   "pandas==2.2.3",
41 |   "pydantic-settings==2.10.1",
42 |   "pyjwt==2.10.1",
43 |   "pyyaml==6.0.2",
44 |   "requests==2.32.4",
45 |   "filelock>=3.18.0",
46 | ]
47 | [dependency-groups]
48 | dev = [
49 |   "ruff>=0.11.2",
50 |   "types-requests>=2.32.0.20250328",
51 |   "mypy>=1.12.1",
52 |   "pre-commit>=4.2.0",
53 |   "pytest-asyncio>=0.26.0",
54 |   "pytest>=8.3.5",
55 |   "openai>=1.71.0",
56 |   "pyarrow-stubs>=19.1",
57 |   "types-pyyaml>=6.0.12.20250516",
58 |   "types-authlib>=1.6.4.20250920",
59 | ]
60 | 
61 | [project.urls]
62 | Documentation = "https://docs.getdbt.com/docs/dbt-ai/about-mcp"
63 | Issues = "https://github.com/dbt-labs/dbt-mcp/issues"
64 | Source = "https://github.com/dbt-labs/dbt-mcp"
65 | Changelog = "https://github.com/dbt-labs/dbt-mcp/blob/main/CHANGELOG.md"
66 | 
67 | [project.scripts]
68 | dbt-mcp = "dbt_mcp.main:main"
69 | 
70 | [build-system]
71 | requires = ["hatchling", "hatch-vcs"]
72 | build-backend = "hatchling.build"
73 | 
74 | [tool.hatch.build.targets.sdist]
75 | include = ["src/dbt_mcp/**/*", "README.md", "LICENSE"]
76 | 
77 | [tool.hatch.version]
78 | source = "vcs"
79 | 
80 | [tool.pytest.ini_options]
81 | asyncio_mode = "auto"
82 | asyncio_default_fixture_loop_scope = "function"
83 | pythonpath = [".", "src"]
84 | 
85 | [tool.ruff.lint]
86 | extend-select = ["UP"] # UP=pyupgrade
87 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/admin_api/get_job_run_error.md:
--------------------------------------------------------------------------------

```markdown
 1 | Get focused error information for a failed dbt job run.
 2 | 
 3 | This tool retrieves and analyzes job run failures to provide concise, actionable error details optimized for troubleshooting. Instead of verbose run details, it returns structured error information with minimal token usage.
 4 | 
 5 | ## Parameters
 6 | 
 7 | - run_id (required): The run ID to analyze for error information
 8 | 
 9 | ## Returns
10 | 
11 | Structured error information with `failed_steps` containing a list of failed step details:
12 | 
13 | - failed_steps: List of failed steps, each containing:
14 |   - target: The dbt target environment where the failure occurred
15 |   - step_name: The failed step that caused the run to fail
16 |   - finished_at: Timestamp when the failed step completed
17 |   - errors: List of specific error details, each with:
18 |     - unique_id: Model/test unique identifier (nullable)
19 |     - relation_name: Database relation name or "No database relation"
20 |     - message: Error message
21 |     - compiled_code: Raw compiled SQL code (nullable)
22 |     - truncated_logs: Raw truncated debug log output (nullable)
23 | 
24 | NOTE: The "truncated_logs" key only populates if there is no `run_results.json` artifact to parse after a job run error.
25 | 
26 | ## Error Types Handled
27 | 
28 | - Model execution
29 | - Data and unit tests
30 | - Source freshness
31 | - Snapshot
32 | - Data constraints / contracts
33 | - Cancelled runs (with and without executed steps)
34 | 
35 | ## Use Cases
36 | 
37 | - Quick failure diagnosis
38 | - LLM-optimized troubleshooting
39 | - Automated monitoring
40 | - Failure pattern analysis
41 | - Rapid incident response
42 | 
43 | ## Advantages over get_job_run_details
44 | 
45 | - Reduced token usage by filreting for relevant error information
46 | - Returns errors in a structured format
47 | - Handles source freshness errors in addition to model/test errors
48 | 
49 | ## Example Usage
50 | 
51 | ```json
52 | {
53 |   "run_id": 789
54 | }
55 | ```
56 | 
57 | ## Example Response
58 | 
59 | ```json
60 | {
61 |   "failed_steps": [
62 |     {
63 |       "target": "prod",
64 |       "step_name": "Invoke dbt with `dbt run --models staging`",
65 |       "finished_at": "2025-09-17 14:32:15.123456+00:00",
66 |       "errors": [
67 |         {
68 |           "unique_id": "model.analytics.stg_users",
69 |           "relation_name": "analytics_staging.stg_users",
70 |           "message": "Syntax error: Expected end of input but got keyword SELECT at line 15",
71 |           "compiled_code": "SELECT\n  id,\n  name\nFROM raw_users\nSELECT -- duplicate SELECT causes error",
72 |           "truncated_logs": null
73 |         }
74 |       ]
75 |     }
76 |   ]
77 | }
78 | ```
79 | 
80 | ## Response Information
81 | 
82 | The focused response provides only the essential error context needed for quick diagnosis and resolution of dbt job failures.
```

--------------------------------------------------------------------------------
/src/dbt_mcp/oauth/context_manager.py:
--------------------------------------------------------------------------------

```python
 1 | import logging
 2 | from pathlib import Path
 3 | 
 4 | import yaml
 5 | 
 6 | from dbt_mcp.oauth.dbt_platform import DbtPlatformContext
 7 | 
 8 | logger = logging.getLogger(__name__)
 9 | 
10 | 
11 | class DbtPlatformContextManager:
12 |     """
13 |     Manages dbt platform context files and context creation.
14 |     """
15 | 
16 |     def __init__(self, config_location: Path):
17 |         self.config_location = config_location
18 | 
19 |     def read_context(self) -> DbtPlatformContext | None:
20 |         """Read the current context from file with proper locking."""
21 |         if not self.config_location.exists():
22 |             return None
23 |         try:
24 |             content = self.config_location.read_text()
25 |             if not content.strip():
26 |                 return None
27 |             parsed_content = yaml.safe_load(content)
28 |             if parsed_content is None or not isinstance(parsed_content, dict):
29 |                 logger.warning("dbt Platform Context YAML file is invalid")
30 |                 return None
31 |             return DbtPlatformContext(**parsed_content)
32 |         except yaml.YAMLError as e:
33 |             logger.error(f"Failed to parse YAML from {self.config_location}: {e}")
34 |         except Exception as e:
35 |             logger.error(f"Failed to read context from {self.config_location}: {e}")
36 |         return None
37 | 
38 |     def update_context(
39 |         self, new_dbt_platform_context: DbtPlatformContext
40 |     ) -> DbtPlatformContext:
41 |         """
42 |         Update the existing context by merging with new context data.
43 |         Reads existing context, merges with new data, and writes back to file.
44 |         """
45 |         existing_dbt_platform_context = self.read_context()
46 |         if existing_dbt_platform_context is None:
47 |             existing_dbt_platform_context = DbtPlatformContext()
48 |         next_dbt_platform_context = existing_dbt_platform_context.override(
49 |             new_dbt_platform_context
50 |         )
51 |         self.write_context_to_file(next_dbt_platform_context)
52 |         return next_dbt_platform_context
53 | 
54 |     def _ensure_config_location_exists(self) -> None:
55 |         """Ensure the config file location and its parent directories exist."""
56 |         self.config_location.parent.mkdir(parents=True, exist_ok=True)
57 |         if not self.config_location.exists():
58 |             self.config_location.touch()
59 | 
60 |     def write_context_to_file(self, context: DbtPlatformContext) -> None:
61 |         """Write context to file with proper locking."""
62 |         self._ensure_config_location_exists()
63 |         self.config_location.write_text(
64 |             yaml.dump(context.model_dump(), default_flow_style=False)
65 |         )
66 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/oauth/dbt_platform.py:
--------------------------------------------------------------------------------

```python
 1 | from __future__ import annotations
 2 | 
 3 | from typing import Any
 4 | 
 5 | from pydantic import BaseModel
 6 | 
 7 | from dbt_mcp.oauth.token import (
 8 |     AccessTokenResponse,
 9 |     DecodedAccessToken,
10 |     fetch_jwks_and_verify_token,
11 | )
12 | 
13 | 
14 | class DbtPlatformAccount(BaseModel):
15 |     id: int
16 |     name: str
17 |     locked: bool
18 |     state: int
19 |     static_subdomain: str | None
20 |     vanity_subdomain: str | None
21 | 
22 |     @property
23 |     def host_prefix(self) -> str | None:
24 |         if self.static_subdomain:
25 |             return self.static_subdomain
26 |         if self.vanity_subdomain:
27 |             return self.vanity_subdomain
28 |         return None
29 | 
30 | 
31 | class DbtPlatformProject(BaseModel):
32 |     id: int
33 |     name: str
34 |     account_id: int
35 |     account_name: str
36 | 
37 | 
38 | class DbtPlatformEnvironmentResponse(BaseModel):
39 |     id: int
40 |     name: str
41 |     deployment_type: str | None
42 | 
43 | 
44 | class DbtPlatformEnvironment(BaseModel):
45 |     id: int
46 |     name: str
47 |     deployment_type: str
48 | 
49 | 
50 | class SelectedProjectRequest(BaseModel):
51 |     account_id: int
52 |     project_id: int
53 | 
54 | 
55 | def dbt_platform_context_from_token_response(
56 |     token_response: dict[str, Any], dbt_platform_url: str
57 | ) -> DbtPlatformContext:
58 |     new_access_token_response = AccessTokenResponse(**token_response)
59 |     decoded_claims = fetch_jwks_and_verify_token(
60 |         new_access_token_response.access_token, dbt_platform_url
61 |     )
62 |     decoded_access_token = DecodedAccessToken(
63 |         access_token_response=new_access_token_response,
64 |         decoded_claims=decoded_claims,
65 |     )
66 |     return DbtPlatformContext(
67 |         decoded_access_token=decoded_access_token,
68 |     )
69 | 
70 | 
71 | class DbtPlatformContext(BaseModel):
72 |     decoded_access_token: DecodedAccessToken | None = None
73 |     host_prefix: str | None = None
74 |     dev_environment: DbtPlatformEnvironment | None = None
75 |     prod_environment: DbtPlatformEnvironment | None = None
76 |     account_id: int | None = None
77 | 
78 |     @property
79 |     def user_id(self) -> int | None:
80 |         return (
81 |             int(self.decoded_access_token.decoded_claims["sub"])
82 |             if self.decoded_access_token
83 |             else None
84 |         )
85 | 
86 |     def override(self, other: DbtPlatformContext) -> DbtPlatformContext:
87 |         return DbtPlatformContext(
88 |             dev_environment=other.dev_environment or self.dev_environment,
89 |             prod_environment=other.prod_environment or self.prod_environment,
90 |             decoded_access_token=other.decoded_access_token
91 |             or self.decoded_access_token,
92 |             host_prefix=other.host_prefix or self.host_prefix,
93 |             account_id=other.account_id or self.account_id,
94 |         )
95 | 
```

--------------------------------------------------------------------------------
/examples/google_adk_agent/main.py:
--------------------------------------------------------------------------------

```python
 1 | import asyncio
 2 | import os
 3 | from pathlib import Path
 4 | 
 5 | from google.adk.agents import LlmAgent
 6 | from google.adk.runners import Runner
 7 | from google.adk.sessions import InMemorySessionService
 8 | from google.adk.tools.mcp_tool.mcp_toolset import McpToolset
 9 | from google.adk.tools.mcp_tool.mcp_session_manager import (
10 |     StdioConnectionParams,
11 |     StdioServerParameters,
12 | )
13 | from google.genai import types
14 | 
15 | 
16 | async def main():
17 |     if not os.environ.get("GOOGLE_GENAI_API_KEY"):
18 |         print("Missing GOOGLE_GENAI_API_KEY environment variable.")
19 |         print("Get your API key from: https://aistudio.google.com/apikey")
20 |         return
21 | 
22 |     dbt_mcp_dir = Path(__file__).parent.parent.parent
23 | 
24 |     toolset = McpToolset(
25 |         connection_params=StdioConnectionParams(
26 |             server_params=StdioServerParameters(
27 |                 command="uvx",
28 |                 args=["--env-file", f"{dbt_mcp_dir}/.env", "dbt-mcp"],
29 |                 env=os.environ.copy(),
30 |             )
31 |         )
32 |     )
33 | 
34 |     agent = LlmAgent(
35 |         name="dbt_assistant",
36 |         model=os.environ.get("ADK_MODEL", "gemini-2.0-flash"),
37 |         instruction="You are a helpful dbt assistant with access to dbt tools via MCP Tools.",
38 |         tools=[toolset],
39 |     )
40 | 
41 |     runner = Runner(
42 |         agent=agent,
43 |         app_name="dbt_adk_agent",
44 |         session_service=InMemorySessionService(),
45 |     )
46 | 
47 |     await runner.session_service.create_session(
48 |         app_name="dbt_adk_agent", user_id="user", session_id="session_1"
49 |     )
50 | 
51 |     print("Google ADK + dbt MCP Agent ready! Type 'quit' to exit.\n")
52 | 
53 |     while True:
54 |         try:
55 |             user_input = input("User > ").strip()
56 | 
57 |             if user_input.lower() in {"quit", "exit", "q"}:
58 |                 print("Goodbye!")
59 |                 break
60 | 
61 |             if not user_input:
62 |                 continue
63 | 
64 |             events = runner.run(
65 |                 user_id="user",
66 |                 session_id="session_1",
67 |                 new_message=types.Content(
68 |                     role="user", parts=[types.Part(text=user_input)]
69 |                 ),
70 |             )
71 | 
72 |             for event in events:
73 |                 if hasattr(event, "content") and hasattr(event.content, "parts"):
74 |                     for part in event.content.parts:
75 |                         if hasattr(part, "text") and part.text:
76 |                             print(f"Assistant: {part.text}")
77 | 
78 |         except (EOFError, KeyboardInterrupt):
79 |             print("\nGoodbye!")
80 |             break
81 |         except Exception as e:
82 |             print(f"Error: {e}")
83 | 
84 | 
85 | if __name__ == "__main__":
86 |     try:
87 |         asyncio.run(main())
88 |     except KeyboardInterrupt:
89 |         print("\nExiting.")
90 | 
```

--------------------------------------------------------------------------------
/examples/pydantic_ai_agent/main.py:
--------------------------------------------------------------------------------

```python
 1 | import asyncio
 2 | import os
 3 | from pydantic_ai import Agent, RunContext  # type: ignore
 4 | from pydantic_ai.mcp import MCPServerStreamableHTTP  # type: ignore
 5 | from pydantic_ai.messages import (  # type: ignore
 6 |     FunctionToolCallEvent,
 7 | )
 8 | 
 9 | 
10 | async def main():
11 |     """Start a conversation using PydanticAI with an HTTP MCP server."""
12 | 
13 |     prod_environment_id = os.environ.get("DBT_PROD_ENV_ID", os.getenv("DBT_ENV_ID"))
14 |     token = os.environ.get("DBT_TOKEN")
15 |     host = os.environ.get("DBT_HOST", "cloud.getdbt.com")
16 | 
17 |     # Configure MCP server connection
18 |     mcp_server_url = f"https://{host}/api/ai/v1/mcp/"
19 |     mcp_server_headers = {
20 |         "Authorization": f"token {token}",
21 |         "x-dbt-prod-environment-id": prod_environment_id,
22 |     }
23 |     server = MCPServerStreamableHTTP(url=mcp_server_url, headers=mcp_server_headers)
24 | 
25 |     # Initialize the agent with OpenAI model and MCP tools
26 |     # PydanticAI also supports Anthropic models, Google models, and more
27 |     agent = Agent(
28 |         "openai:gpt-5",
29 |         toolsets=[server],
30 |         system_prompt="You are a helpful AI assistant with access to MCP tools.",
31 |     )
32 | 
33 |     print("Starting conversation with PydanticAI + MCP server...")
34 |     print("Type 'quit' to exit\n")
35 | 
36 |     async with agent:
37 |         while True:
38 |             try:
39 |                 user_input = input("You: ").strip()
40 | 
41 |                 if user_input.lower() in ["quit", "exit", "q"]:
42 |                     print("Goodbye!")
43 |                     break
44 | 
45 |                 if not user_input:
46 |                     continue
47 | 
48 |                 # Event handler for real-time tool call detection
49 |                 async def event_handler(ctx: RunContext, event_stream):
50 |                     async for event in event_stream:
51 |                         if isinstance(event, FunctionToolCallEvent):
52 |                             print(f"\n🔧 Tool called: {event.part.tool_name}")
53 |                             print(f"   Arguments: {event.part.args}")
54 |                             print("Assistant: ", end="", flush=True)
55 | 
56 |                 # Stream the response with real-time events
57 |                 print("Assistant: ", end="", flush=True)
58 |                 async with agent.run_stream(
59 |                     user_input, event_stream_handler=event_handler
60 |                 ) as result:
61 |                     async for text in result.stream_text(delta=True):
62 |                         print(text, end="", flush=True)
63 |                 print()  # New line after response
64 | 
65 |             except KeyboardInterrupt:
66 |                 print("\nGoodbye!")
67 |                 break
68 |             except Exception as e:
69 |                 print(f"Error: {e}")
70 | 
71 | 
72 | if __name__ == "__main__":
73 |     asyncio.run(main())
74 | 
```

--------------------------------------------------------------------------------
/tests/unit/lsp/test_lsp_client.py:
--------------------------------------------------------------------------------

```python
 1 | """Tests for the DbtLspClient class."""
 2 | 
 3 | from unittest.mock import MagicMock
 4 | 
 5 | import pytest
 6 | 
 7 | from dbt_mcp.lsp.lsp_client import LSPClient
 8 | from dbt_mcp.lsp.lsp_connection import LSPConnection, LspConnectionState
 9 | 
10 | 
11 | @pytest.fixture
12 | def mock_lsp_connection() -> LSPConnection:
13 |     """Create a mock LSP connection manager."""
14 |     connection = MagicMock(spec=LSPConnection)
15 |     connection.state = LspConnectionState(initialized=True, compiled=True)
16 |     return connection
17 | 
18 | 
19 | @pytest.fixture
20 | def lsp_client(mock_lsp_connection: LSPConnection):
21 |     """Create an LSP client with a mock connection manager."""
22 |     return LSPClient(mock_lsp_connection)
23 | 
24 | 
25 | @pytest.mark.asyncio
26 | async def test_get_column_lineage_success(lsp_client, mock_lsp_connection):
27 |     """Test successful column lineage request."""
28 |     # Setup mock
29 |     mock_result = {
30 |         "nodes": [
31 |             {"model": "upstream_model", "column": "id"},
32 |             {"model": "current_model", "column": "customer_id"},
33 |         ]
34 |     }
35 | 
36 |     mock_lsp_connection.send_request.return_value = mock_result
37 | 
38 |     # Execute
39 |     result = await lsp_client.get_column_lineage(
40 |         model_id="model.my_project.my_model",
41 |         column_name="customer_id",
42 |     )
43 | 
44 |     # Verify
45 |     assert result == mock_result
46 |     mock_lsp_connection.send_request.assert_called_once_with(
47 |         "workspace/executeCommand",
48 |         {
49 |             "command": "dbt.listNodes",
50 |             "arguments": ["+column:model.my_project.my_model.CUSTOMER_ID+"],
51 |         },
52 |     )
53 | 
54 | 
55 | @pytest.mark.asyncio
56 | async def test_list_nodes_success(lsp_client, mock_lsp_connection):
57 |     """Test successful list nodes request."""
58 |     # Setup mock
59 |     mock_result = {
60 |         "nodes": ["model.my_project.upstream1", "model.my_project.upstream2"],
61 |     }
62 | 
63 |     mock_lsp_connection.send_request.return_value = mock_result
64 | 
65 |     # Execute
66 |     result = await lsp_client._list_nodes(
67 |         model_selector="+model.my_project.my_model+",
68 |     )
69 | 
70 |     # Verify
71 |     assert result == mock_result
72 |     mock_lsp_connection.send_request.assert_called_once_with(
73 |         "workspace/executeCommand",
74 |         {"command": "dbt.listNodes", "arguments": ["+model.my_project.my_model+"]},
75 |     )
76 | 
77 | 
78 | @pytest.mark.asyncio
79 | async def test_get_column_lineage_error(lsp_client, mock_lsp_connection):
80 |     """Test column lineage request with LSP error."""
81 |     # Setup mock to raise an error
82 |     mock_lsp_connection.send_request.return_value = {"error": "LSP server error"}
83 | 
84 |     # Execute and verify exception is raised
85 |     result = await lsp_client.get_column_lineage(
86 |         model_id="model.my_project.my_model",
87 |         column_name="customer_id",
88 |     )
89 | 
90 |     assert result == {"error": "LSP server error"}
91 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/oauth/login.py:
--------------------------------------------------------------------------------

```python
 1 | import errno
 2 | import logging
 3 | import secrets
 4 | import webbrowser
 5 | from importlib import resources
 6 | 
 7 | from authlib.integrations.requests_client import OAuth2Session
 8 | from uvicorn import Config, Server
 9 | 
10 | from dbt_mcp.oauth.client_id import OAUTH_CLIENT_ID
11 | from dbt_mcp.oauth.context_manager import DbtPlatformContextManager
12 | from dbt_mcp.oauth.dbt_platform import DbtPlatformContext
13 | from dbt_mcp.oauth.fastapi_app import create_app
14 | from dbt_mcp.oauth.logging import disable_server_logs
15 | 
16 | logger = logging.getLogger(__name__)
17 | 
18 | 
19 | async def login(
20 |     *,
21 |     dbt_platform_url: str,
22 |     port: int,
23 |     dbt_platform_context_manager: DbtPlatformContextManager,
24 | ) -> DbtPlatformContext:
25 |     """Start OAuth login flow with PKCE using authlib and return
26 |     the decoded access token
27 |     """
28 |     # 'offline_access' scope indicates that we want to request a refresh token
29 |     # 'user_access' is equivalent to a PAT
30 |     scope = "user_access offline_access"
31 | 
32 |     # Create OAuth2Session with PKCE support
33 |     client = OAuth2Session(
34 |         client_id=OAUTH_CLIENT_ID,
35 |         redirect_uri=f"http://localhost:{port}",
36 |         scope=scope,
37 |         code_challenge_method="S256",
38 |     )
39 | 
40 |     # Generate code_verifier
41 |     code_verifier = secrets.token_urlsafe(32)
42 | 
43 |     # Generate authorization URL with PKCE
44 |     authorization_url, state = client.create_authorization_url(
45 |         url=f"{dbt_platform_url}/oauth/authorize",
46 |         code_verifier=code_verifier,
47 |     )
48 | 
49 |     try:
50 |         # Resolve static assets directory from package
51 |         package_root = resources.files("dbt_mcp")
52 |         packaged_dist = package_root / "ui" / "dist"
53 |         if not packaged_dist.is_dir():
54 |             raise FileNotFoundError(f"{packaged_dist} not found in packaged resources")
55 |         static_dir = str(packaged_dist)
56 | 
57 |         # Create FastAPI app and Uvicorn server
58 |         app = create_app(
59 |             oauth_client=client,
60 |             state_to_verifier={state: code_verifier},
61 |             dbt_platform_url=dbt_platform_url,
62 |             static_dir=static_dir,
63 |             dbt_platform_context_manager=dbt_platform_context_manager,
64 |         )
65 |         config = Config(
66 |             app=app,
67 |             host="127.0.0.1",
68 |             port=port,
69 |         )
70 |         server = Server(config)
71 |         app.state.server_ref = server
72 | 
73 |         logger.info("Opening authorization URL")
74 |         webbrowser.open(authorization_url)
75 |         # Logs have to be disabled because they mess up stdio MCP communication
76 |         disable_server_logs()
77 |         await server.serve()
78 | 
79 |         if not app.state.dbt_platform_context:
80 |             raise ValueError("Undefined login state")
81 |         logger.info("Login successful")
82 |         return app.state.dbt_platform_context
83 |     except OSError as e:
84 |         if e.errno == errno.EADDRINUSE:
85 |             logger.error(f"Error: Port {port} is already in use.")
86 |         raise
87 | 
```

--------------------------------------------------------------------------------
/examples/aws_strands_agent/dbt_data_scientist/tools/dbt_compile.py:
--------------------------------------------------------------------------------

```python
 1 | from strands import Agent, tool
 2 | from strands_tools import python_repl, shell, file_read, file_write, editor
 3 | import os, json, subprocess
 4 | 
 5 | from dotenv import load_dotenv
 6 | 
 7 | DBT_COMPILE_ASSISTANT_SYSTEM_PROMPT = """
 8 | You are a dbt pipeline expert, a specialized assistant for dbt pipeline analysis and troubleshooting. Your capabilities include:
 9 | 
10 | When asked to 'find a problem' or 'compile a project' on your local dbt project, inspect its JSON logs,
11 |     and then:
12 |     1) Summarize the problem(s) (file, node, message).
13 |     2) Recommend a concrete fix in 1-3 bullet points (e.g., correct ref(), add column, fix Jinja).
14 |     3) If no errors, say compile is clean and suggest next step (e.g., run build state:modified+).
15 | """
16 | 
17 | @tool
18 | def dbt_compile(query: str) -> str:
19 |     """
20 |     Runs `dbt compile --log-format json` in the DBT_ROOT and returns:
21 |     returncode
22 |     logs: list of compiled JSON events (dbt emits JSON per line)
23 |     """
24 |     # Load environment variables from .env file
25 |     load_dotenv()
26 | 
27 |     # Get the DBT_ROOT environment variable, default to current directory
28 |     dbt_project_location = os.getenv("DBT_PROJECT_LOCATION", os.getcwd())
29 |     dbt_executable = os.getenv("DBT_EXECUTABLE")
30 |     
31 |     print(f"Running dbt compile in: {dbt_project_location}")
32 |     print(f"Running dbt executable located here: {dbt_executable}")
33 | 
34 |     proc = subprocess.run(
35 |             [dbt_executable, "compile", "--log-format", "json"],
36 |             cwd=dbt_project_location,
37 |             text=True,
38 |             capture_output=True
39 |         )
40 |     print(proc)
41 |     logs: List[Dict] = []
42 |     for stream in (proc.stdout, proc.stderr):
43 |         for line in stream.splitlines():
44 |             try:
45 |                 logs.append(json.loads(line))
46 |             except json.JSONDecodeError:
47 |                 # ignore non-JSON lines quietly
48 |                 pass
49 |     output_of_compile = {"returncode": proc.returncode, "logs": logs}
50 | 
51 |     # Format the query for the compile agent with clear instructions
52 |     formatted_query = f"User will have asked to compile a dbt project. Summarize the results of the compile command output: {output_of_compile}"
53 |     
54 |     try:
55 |         print("Routed to dbt compile agent")
56 |         # Create the dbt compile agent with relevant tools
57 |         dbt_compile_agent = Agent(
58 |             system_prompt=DBT_COMPILE_ASSISTANT_SYSTEM_PROMPT,
59 |             tools=[],
60 |         )
61 |         agent_response = dbt_compile_agent(formatted_query)
62 |         text_response = str(agent_response)
63 | 
64 |         if len(text_response) > 0:
65 |             return text_response
66 |         
67 |         return "I apologize, but I couldn't process your dbt compile question. Please try rephrasing or providing more specific details about what you're trying to learn or accomplish."
68 |     except Exception as e:
69 |         # Return specific error message for dbt compile processing
70 |         return f"Error processing your dbt compile query: {str(e)}"
```

--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------

```yaml
  1 | name: Release dbt-mcp
  2 | 
  3 | permissions:
  4 |   contents: write
  5 | 
  6 | on:
  7 |   push:
  8 |     branches:
  9 |       - main
 10 |     paths:
 11 |       - CHANGELOG.md
 12 | 
 13 | jobs:
 14 |   create-release-tag:
 15 |     runs-on: ubuntu-latest
 16 |     outputs:
 17 |       changie-latest: ${{ steps.changie-latest.outputs.output }}
 18 |     if: "startsWith(github.event.head_commit.message, 'version:')"
 19 |     steps:
 20 |       - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
 21 |       - uses: ./.github/actions/setup-python
 22 |       - name: Get the latest version
 23 |         id: changie-latest
 24 |         uses: miniscruff/changie-action@6dcc2533cac0495148ed4046c438487e4dceaa23
 25 |         with:
 26 |           version: latest
 27 |           args: latest
 28 |       - name: Create tag
 29 |         run: |
 30 |           TAG="${{ steps.changie-latest.outputs.output }}"
 31 |           git config user.name "release-bot"
 32 |           git config user.email "[email protected]"
 33 |           git tag "$TAG"
 34 |           git push origin "$TAG"
 35 | 
 36 |   pypi-publish:
 37 |     runs-on: ubuntu-latest
 38 |     needs: create-release-tag
 39 |     environment:
 40 |       name: pypi
 41 |     permissions:
 42 |       id-token: write
 43 | 
 44 |     steps:
 45 |       - name: checkout code
 46 |         uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
 47 |         with:
 48 |           ref: ${{ needs.create-release-tag.outputs.changie-latest }}
 49 |           fetch-tags: true
 50 | 
 51 |       - name: setup python
 52 |         uses: ./.github/actions/setup-python
 53 |         id: setup-python
 54 | 
 55 |       - uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda
 56 |         with:
 57 |           version: 10
 58 | 
 59 |       - name: Install go-task
 60 |         run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b /usr/local/bin
 61 | 
 62 |       - name: Run tests
 63 |         run: task test:unit
 64 | 
 65 |       - name: Build
 66 |         run: task build
 67 | 
 68 |       - name: Publish package distributions to PyPI
 69 |         uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc
 70 |         with:
 71 |           packages-dir: ./dist
 72 | 
 73 |   create-github-release:
 74 |     runs-on: ubuntu-latest
 75 |     needs: create-release-tag
 76 |     permissions:
 77 |       contents: write # required to create a release
 78 | 
 79 |     steps:
 80 |       - name: Checkout code
 81 |         uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
 82 |         with:
 83 |           fetch-depth: 0 # Fetches all history for changie to work correctly
 84 | 
 85 |       - name: Get the latest version
 86 |         id: changie-latest
 87 |         uses: miniscruff/changie-action@6dcc2533cac0495148ed4046c438487e4dceaa23
 88 |         with:
 89 |           version: latest
 90 |           args: latest
 91 | 
 92 |       - name: Create GitHub Release
 93 |         env:
 94 |           GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
 95 |           VERSION: ${{ steps.changie-latest.outputs.output }}
 96 |         run: |
 97 |           echo "Creating release for version: $VERSION"
 98 |           gh release create "$VERSION" \
 99 |             --notes-file ".changes/$VERSION.md" \
100 |             --title "$VERSION"
101 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/oauth/refresh_strategy.py:
--------------------------------------------------------------------------------

```python
 1 | import asyncio
 2 | import time
 3 | from typing import Protocol
 4 | 
 5 | 
 6 | class RefreshStrategy(Protocol):
 7 |     """Protocol for handling token refresh timing and waiting."""
 8 | 
 9 |     async def wait_until_refresh_needed(self, expires_at: int) -> None:
10 |         """
11 |         Wait until token refresh is needed, then return.
12 | 
13 |         Args:
14 |             expires_at: Token expiration time as Unix timestamp
15 |         """
16 |         ...
17 | 
18 |     async def wait_after_error(self) -> None:
19 |         """
20 |         Wait an appropriate amount of time after an error before retrying.
21 |         """
22 |         ...
23 | 
24 | 
25 | class DefaultRefreshStrategy:
26 |     """Default strategy that refreshes tokens with a buffer before expiry."""
27 | 
28 |     def __init__(self, buffer_seconds: int = 300, error_retry_delay: float = 5.0):
29 |         """
30 |         Initialize with timing configuration.
31 | 
32 |         Args:
33 |             buffer_seconds: How many seconds before expiry to refresh
34 |                 (default: 5 minutes)
35 |             error_retry_delay: How many seconds to wait before retrying after an error
36 |                 (default: 5 seconds)
37 |         """
38 |         self.buffer_seconds = buffer_seconds
39 |         self.error_retry_delay = error_retry_delay
40 | 
41 |     async def wait_until_refresh_needed(self, expires_at: int) -> None:
42 |         """Wait until refresh is needed (buffer seconds before expiry)."""
43 |         current_time = time.time()
44 |         refresh_time = expires_at - self.buffer_seconds
45 |         time_until_refresh = max(refresh_time - current_time, 0)
46 | 
47 |         if time_until_refresh > 0:
48 |             await asyncio.sleep(time_until_refresh)
49 | 
50 |     async def wait_after_error(self) -> None:
51 |         """Wait the configured error retry delay before retrying."""
52 |         await asyncio.sleep(self.error_retry_delay)
53 | 
54 | 
55 | class MockRefreshStrategy:
56 |     """Mock refresh strategy for testing that allows controlling all timing behavior."""
57 | 
58 |     def __init__(self, wait_seconds: float = 1.0):
59 |         """
60 |         Initialize mock refresh strategy.
61 | 
62 |         Args:
63 |             wait_seconds: Number of seconds to wait for testing simulations
64 |         """
65 |         self.wait_seconds = wait_seconds
66 |         self.wait_calls: list[int] = []
67 |         self.wait_durations: list[float] = []
68 |         self.error_wait_calls: int = 0
69 | 
70 |     async def wait_until_refresh_needed(self, expires_at: int) -> None:
71 |         """Record the call and simulate waiting for the configured duration."""
72 |         self.wait_calls.append(expires_at)
73 |         self.wait_durations.append(self.wait_seconds)
74 |         await asyncio.sleep(self.wait_seconds)
75 | 
76 |     async def wait_after_error(self) -> None:
77 |         """Record the error wait call and simulate waiting for configured duration."""
78 |         self.error_wait_calls += 1
79 |         await asyncio.sleep(self.wait_seconds)
80 | 
81 |     def reset(self) -> None:
82 |         """Reset all recorded calls."""
83 |         self.wait_calls.clear()
84 |         self.wait_durations.clear()
85 |         self.error_wait_calls = 0
86 | 
87 |     @property
88 |     def call_count(self) -> int:
89 |         """Get the number of times wait_until_refresh_needed was called."""
90 |         return len(self.wait_calls)
91 | 
```

--------------------------------------------------------------------------------
/.github/workflows/create-release-pr.yml:
--------------------------------------------------------------------------------

```yaml
  1 | name: Create release PR
  2 | permissions:
  3 |   contents: write
  4 |   pull-requests: write
  5 | on:
  6 |   workflow_dispatch:
  7 |     inputs:
  8 |       bump:
  9 |         type: choice
 10 |         description: The version bump type.
 11 |         default: minor
 12 |         options:
 13 |           - major
 14 |           - minor
 15 |           - patch
 16 |       prerelease:
 17 |         type: string
 18 |         description: Optional pre-release tag (e.g. alpha.1, beta.1, rc.1). Leave empty for stable.
 19 |         default: ""
 20 | 
 21 | jobs:
 22 |   create-release-pr:
 23 |     runs-on: ubuntu-latest
 24 |     steps:
 25 |       - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
 26 | 
 27 |       - uses: ./.github/actions/setup-python
 28 | 
 29 |       - name: Compute next dir for bump
 30 |         id: changie-next
 31 |         uses: miniscruff/changie-action@6dcc2533cac0495148ed4046c438487e4dceaa23
 32 |         with:
 33 |           version: latest
 34 |           args: next ${{ inputs.bump }}
 35 | 
 36 |       - name: Prepare batch args
 37 |         id: prepare-batch
 38 |         shell: bash
 39 |         run: |
 40 |           PR="${{ inputs.prerelease }}"
 41 |           BUMP="${{ inputs.bump }}"
 42 |           NEXT_DIR="${{ steps.changie-next.outputs.output }}"
 43 | 
 44 |           if [[ -n "$PR" ]]; then
 45 |             if [[ ! "$PR" =~ ^(alpha|beta|rc)\.[0-9]+$ ]]; then
 46 |               echo "Invalid prerelease format: $PR (expected alpha.N, beta.N, rc.N)" >&2
 47 |               exit 1
 48 |             fi
 49 |             echo "args=batch $BUMP --move-dir $NEXT_DIR --prerelease $PR" >> "$GITHUB_OUTPUT"
 50 |           else
 51 |             if [[ -d "$NEXT_DIR" ]]; then
 52 |               echo "args=batch $BUMP --include $NEXT_DIR --remove-prereleases" >> "$GITHUB_OUTPUT"
 53 |             else
 54 |               echo "args=batch $BUMP" >> "$GITHUB_OUTPUT"
 55 |             fi
 56 |           fi
 57 | 
 58 |       - name: Batch changes
 59 |         uses: miniscruff/changie-action@6dcc2533cac0495148ed4046c438487e4dceaa23
 60 |         with:
 61 |           version: latest
 62 |           args: ${{ steps.prepare-batch.outputs.args }}
 63 | 
 64 |       - name: Merge
 65 |         uses: miniscruff/changie-action@6dcc2533cac0495148ed4046c438487e4dceaa23
 66 |         with:
 67 |           version: latest
 68 |           args: merge
 69 | 
 70 |       - name: Get the latest version
 71 |         id: changie-latest
 72 |         uses: miniscruff/changie-action@6dcc2533cac0495148ed4046c438487e4dceaa23
 73 |         with:
 74 |           version: latest
 75 |           args: latest
 76 | 
 77 |       - name: Set latest package version
 78 |         id: package-version
 79 |         run: |
 80 |           VERSION="${{ steps.changie-latest.outputs.output }}"
 81 |           VERSION_NO_V=$(echo "$VERSION" | cut -c 2-)
 82 | 
 83 |           MESSAGE=$(cat ".changes/$VERSION.md")
 84 | 
 85 |           {
 86 |           echo "version=$VERSION_NO_V"
 87 |           echo "message<<EOF"
 88 |           echo "$MESSAGE"
 89 |           echo "EOF"
 90 |           } >> "$GITHUB_OUTPUT"
 91 | 
 92 |       - name: Create Pull Request
 93 |         uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e
 94 |         with:
 95 |           title: "version: ${{ steps.package-version.outputs.version }}"
 96 |           branch: release/${{ steps.package-version.outputs.version }}
 97 |           commit-message: |
 98 |             version: ${{ steps.package-version.outputs.version }}
 99 | 
100 |             ${{ steps.package-version.outputs.message }}
101 |           body: |
102 |             ## version: ${{ steps.package-version.outputs.version }}
103 | 
104 |             ${{ steps.package-version.outputs.message }}
105 | 
```

--------------------------------------------------------------------------------
/Taskfile.yml:
--------------------------------------------------------------------------------

```yaml
  1 | # https://taskfile.dev
  2 | 
  3 | version: '3'
  4 | 
  5 | # Environment Variables will be set with the following precedence (from high to low):
  6 | # .env.local.<env>
  7 | #     Local environment variables that are specific to an environment have the highest priority.
  8 | #     For example, to test a staging environment, put any staging-specific variables in
  9 | #     .env.local.stg. These variables will be set first and will not be overridden by
 10 | #     variables set in other .env files. A common use case for this is to set specific
 11 | #     variables used for testing personal staging sites in a provider/consumer configuration.
 12 | #
 13 | # .env
 14 | #     Local environment variables that should be common across all environments when doing local
 15 | #     development. In the most common case, this will have the full 'dev' configuration.
 16 | #
 17 | #  envs/.env.<env>
 18 | #     Default environment variables for each of the dev, stg and prod environments. If an
 19 | #     environment variable is not set in any of the local .env files, the default value from this
 20 | #     file will be used. These default values are the non-secret values that will be used for
 21 | #     configuration of the shared stg and prod apps managed through CI/CD.
 22 | 
 23 | dotenv: [".env.local.{{.ENV}}", ".env", "envs/.env.{{.ENV}}"]
 24 | 
 25 | tasks:
 26 |   default:
 27 |     desc: "List tasks"
 28 |     cmds:
 29 |       - task --list
 30 |     silent: true
 31 | 
 32 |   install:
 33 |     desc: Install dependencies
 34 |     cmds:
 35 |       - (cd ui && pnpm install && pnpm build)
 36 |       - uv sync
 37 |       - uv pip install -e .
 38 | 
 39 |   install-pre-commit:
 40 |     desc: Install pre-commit hooks
 41 |     cmds:
 42 |       - uv run pre-commit install
 43 | 
 44 |   check:
 45 |     desc: Run linting and type checking
 46 |     cmds:
 47 |       - (cd ui && pnpm run lint)
 48 |       - uv run pre-commit run --all-files
 49 | 
 50 |   fmt:
 51 |     desc: Format code
 52 |     cmds:
 53 |       - uv run pre-commit run ruff --all-files
 54 | 
 55 |   run:
 56 |     desc: "Run the dbt-mcp server"
 57 |     cmds:
 58 |       - (cd ui && pnpm install && pnpm build)
 59 |       - uv run src/dbt_mcp/main.py
 60 | 
 61 |   dev:
 62 |     desc: "Run the dbt-mcp server in development mode (with debugger support)"
 63 |     env:
 64 |       MCP_TRANSPORT: streamable-http
 65 |     cmds:
 66 |       - (cd ui && pnpm install && pnpm build)
 67 |       - ./.venv/bin/python ./src/dbt_mcp/main.py
 68 | 
 69 |   inspector:
 70 |     desc: "Run the dbt-mcp server with MCP inspector"
 71 |     cmds:
 72 |       - (cd ui && pnpm install && pnpm build)
 73 |       - npx @modelcontextprotocol/inspector ./.venv/bin/python src/dbt_mcp/main.py
 74 | 
 75 |   test:
 76 |     desc: "Run the tests"
 77 |     cmds:
 78 |       - uv run pytest tests {{.CLI_ARGS}}
 79 | 
 80 |   test:integration:
 81 |     desc: "Run the integration tests"
 82 |     cmds:
 83 |       - uv run pytest tests/integration {{.CLI_ARGS}}
 84 | 
 85 |   test:unit:
 86 |     desc: "Run the unit tests"
 87 |     cmds:
 88 |       - uv run pytest tests/unit {{.CLI_ARGS}}
 89 | 
 90 |   eval:
 91 |     desc: "Run the evals"
 92 |     cmds:
 93 |       - uv run pytest evals {{.CLI_ARGS}}
 94 | 
 95 |   build:
 96 |     desc: "Build the package"
 97 |     cmds:
 98 |       - (cd ui && pnpm install && pnpm build)
 99 |       - uv build
100 | 
101 |   client:
102 |     desc: "Run the test client"
103 |     cmds:
104 |       - (cd ui && pnpm install && pnpm build)
105 |       - uv run src/client/main.py
106 | 
107 |   d2:
108 |     desc: "Update d2 diagram from the config"
109 |     preconditions:
110 |       - sh: command -v d2 &> /dev/null
111 |         msg: "Error: d2 command not found. You can install it with 'brew install d2'"
112 |     sources:
113 |       - docs/diagram.d2
114 |     generates:
115 |       - docs/d2.png
116 |     cmds:
117 |       - d2 docs/diagram.d2 docs/d2.png --pad 20
118 | 
```

--------------------------------------------------------------------------------
/src/client/main.py:
--------------------------------------------------------------------------------

```python
 1 | import asyncio
 2 | import json
 3 | from time import time
 4 | 
 5 | from openai import OpenAI
 6 | from openai.types.responses.response_input_param import FunctionCallOutput
 7 | from openai.types.responses.response_output_message import ResponseOutputMessage
 8 | 
 9 | from client.tools import get_tools
10 | from dbt_mcp.config.config import load_config
11 | from dbt_mcp.mcp.server import create_dbt_mcp
12 | 
13 | LLM_MODEL = "gpt-4o-mini"
14 | TOOL_RESPONSE_TRUNCATION = 100  # set to None for no truncation
15 | 
16 | llm_client = OpenAI()
17 | config = load_config()
18 | messages = []
19 | 
20 | 
21 | async def main():
22 |     dbt_mcp = await create_dbt_mcp(config)
23 |     user_role = "user"
24 |     available_tools = await get_tools(dbt_mcp)
25 |     tools_str = "\n".join(
26 |         [
27 |             f"- {t['name']}({', '.join(t['parameters']['properties'].keys())})"
28 |             for t in available_tools
29 |         ]
30 |     )
31 |     print(f"Available tools:\n{tools_str}")
32 |     while True:
33 |         user_input = input(f"{user_role} > ")
34 |         messages.append({"role": user_role, "content": user_input})
35 |         response_output = None
36 |         tool_call_error = None
37 |         while (
38 |             response_output is None
39 |             or response_output.type == "function_call"
40 |             or tool_call_error is not None
41 |         ):
42 |             tool_call_error = None
43 |             response = llm_client.responses.create(
44 |                 model=LLM_MODEL,
45 |                 input=messages,
46 |                 tools=available_tools,
47 |                 parallel_tool_calls=False,
48 |             )
49 |             response_output = response.output[0]
50 |             if isinstance(response_output, ResponseOutputMessage):
51 |                 print(f"{response_output.role} > {response_output.content[0].text}")
52 |             messages.append(response_output)
53 |             if response_output.type != "function_call":
54 |                 continue
55 |             print(
56 |                 f"Calling tool: {response_output.name} with arguments: {response_output.arguments}"
57 |             )
58 |             start_time = time()
59 |             try:
60 |                 tool_response = await dbt_mcp.call_tool(
61 |                     response_output.name,
62 |                     json.loads(response_output.arguments),
63 |                 )
64 |             except Exception as e:
65 |                 tool_call_error = e
66 |                 print(f"Error calling tool: {e}")
67 |                 messages.append(
68 |                     FunctionCallOutput(
69 |                         type="function_call_output",
70 |                         call_id=response_output.call_id,
71 |                         output=str(e),
72 |                     )
73 |                 )
74 |                 continue
75 |             tool_response_str = str(tool_response)
76 |             print(
77 |                 f"Tool responded in {time() - start_time} seconds: "
78 |                 + (
79 |                     f"{tool_response_str[:TOOL_RESPONSE_TRUNCATION]} [TRUNCATED]..."
80 |                     if TOOL_RESPONSE_TRUNCATION
81 |                     and len(tool_response_str) > TOOL_RESPONSE_TRUNCATION
82 |                     else tool_response_str
83 |                 )
84 |             )
85 |             messages.append(
86 |                 FunctionCallOutput(
87 |                     type="function_call_output",
88 |                     call_id=response_output.call_id,
89 |                     output=str(tool_response),
90 |                 )
91 |             )
92 | 
93 | 
94 | if __name__ == "__main__":
95 |     try:
96 |         asyncio.run(main())
97 |     except KeyboardInterrupt:
98 |         print("\nExiting.")
99 | 
```

--------------------------------------------------------------------------------
/examples/openai_agent/main_streamable.py:
--------------------------------------------------------------------------------

```python
  1 | # mypy: ignore-errors
  2 | 
  3 | import asyncio
  4 | import os
  5 | 
  6 | from agents import Agent, Runner, trace
  7 | from agents.mcp import create_static_tool_filter
  8 | from agents.mcp.server import MCPServerStreamableHttp
  9 | from agents.stream_events import RawResponsesStreamEvent, RunItemStreamEvent
 10 | from openai.types.responses import ResponseCompletedEvent, ResponseOutputMessage
 11 | 
 12 | 
 13 | def print_tool_call(tool_name, params, color="yellow", show_params=True):
 14 |     # Define color codes for different colors
 15 |     # we could use a library like colorama but this avoids adding a dependency
 16 |     color_codes = {
 17 |         "grey": "\033[37m",
 18 |         "yellow": "\033[93m",
 19 |     }
 20 |     color_code_reset = "\033[0m"
 21 | 
 22 |     color_code = color_codes.get(color, color_codes["yellow"])
 23 |     msg = f"Calling the tool {tool_name}"
 24 |     if show_params:
 25 |         msg += f" with params {params}"
 26 |     print(f"{color_code}# {msg}{color_code_reset}")
 27 | 
 28 | 
 29 | def handle_event_printing(event, show_tools_calls=True):
 30 |     if type(event) is RunItemStreamEvent and show_tools_calls:
 31 |         if event.name == "tool_called":
 32 |             print_tool_call(
 33 |                 event.item.raw_item.name,
 34 |                 event.item.raw_item.arguments,
 35 |                 color="grey",
 36 |                 show_params=True,
 37 |             )
 38 | 
 39 |     if type(event) is RawResponsesStreamEvent:
 40 |         if type(event.data) is ResponseCompletedEvent:
 41 |             for output in event.data.response.output:
 42 |                 if type(output) is ResponseOutputMessage:
 43 |                     print(output.content[0].text)
 44 | 
 45 | 
 46 | async def main(inspect_events_tools_calls=False):
 47 |     prod_environment_id = os.environ.get("DBT_PROD_ENV_ID", os.getenv("DBT_ENV_ID"))
 48 |     token = os.environ.get("DBT_TOKEN")
 49 |     host = os.environ.get("DBT_HOST", "cloud.getdbt.com")
 50 | 
 51 |     async with MCPServerStreamableHttp(
 52 |         name="dbt",
 53 |         params={
 54 |             "url": f"https://{host}/api/ai/v1/mcp/",
 55 |             "headers": {
 56 |                 "Authorization": f"token {token}",
 57 |                 "x-dbt-prod-environment-id": prod_environment_id,
 58 |             },
 59 |         },
 60 |         client_session_timeout_seconds=20,
 61 |         cache_tools_list=True,
 62 |         tool_filter=create_static_tool_filter(
 63 |             allowed_tool_names=[
 64 |                 "list_metrics",
 65 |                 "get_dimensions",
 66 |                 "get_entities",
 67 |                 "query_metrics",
 68 |                 "get_metrics_compiled_sql",
 69 |             ],
 70 |         ),
 71 |     ) as server:
 72 |         agent = Agent(
 73 |             name="Assistant",
 74 |             instructions="Use the tools to answer the user's questions. Do not invent data or sample data.",
 75 |             mcp_servers=[server],
 76 |             model="gpt-5",
 77 |         )
 78 |         with trace(workflow_name="Conversation"):
 79 |             conversation = []
 80 |             result = None
 81 |             while True:
 82 |                 if result:
 83 |                     conversation = result.to_input_list()
 84 |                 conversation.append({"role": "user", "content": input("User > ")})
 85 | 
 86 |                 if inspect_events_tools_calls:
 87 |                     async for event in Runner.run_streamed(
 88 |                         agent, conversation
 89 |                     ).stream_events():
 90 |                         handle_event_printing(event, show_tools_calls=True)
 91 |                 else:
 92 |                     result = await Runner.run(agent, conversation)
 93 |                     print(result.final_output)
 94 | 
 95 | 
 96 | if __name__ == "__main__":
 97 |     try:
 98 |         asyncio.run(main(inspect_events_tools_calls=True))
 99 |     except KeyboardInterrupt:
100 |         print("\nExiting.")
101 | 
```

--------------------------------------------------------------------------------
/ui/assets/dbt_logo BLK.svg:
--------------------------------------------------------------------------------

```
 1 | <svg width="490" height="190" viewBox="0 0 490 190" fill="none" xmlns="http://www.w3.org/2000/svg">
 2 | <g clip-path="url(#clip0_33_23)">
 3 | <path d="M455.26 148.48C444.41 148.48 436.47 139.69 436.47 128.84V82.57H423.04V61.9H437.55L437.33 40.92L459.04 33.41V61.91H477.82V82.58H459.04V127.43H475.67V148.49H455.25L455.26 148.48Z" fill="black"/>
 4 | <path d="M381.33 149.48C369.96 149.48 361.35 145.8 355.14 135.81L353.94 135.98C354.11 140.29 354.28 144.03 354.28 148.51H331.37V33.53L354.63 25.67V53.97C354.63 62.24 354.63 66.72 353.77 74.47L355.15 74.81C361.53 64.47 371 59.82 382.03 59.82C405.46 59.82 420.1 79.8 420.1 105.3C420.1 130.8 404.94 149.46 381.34 149.46L381.33 149.48ZM375.82 128.63C388.74 128.63 397.01 118.92 397.01 104.97C397.01 91.02 388.74 80.68 375.82 80.68C362.9 80.68 354.12 90.84 354.12 105.14C354.12 119.44 362.56 128.63 375.82 128.63Z" fill="black"/>
 5 | <path d="M262.51 149.48C239.6 149.48 224.44 130.13 224.44 104.97C224.44 79.81 239.43 59.83 263.37 59.83C274.22 59.83 283.36 64.14 289.56 74.3L290.76 73.96C290.07 66.72 290.07 61.73 290.07 53.98V33.54L313.33 25.68V148.52H290.42C290.42 144.21 290.42 140.29 290.76 135.98L289.56 135.64C283.18 145.8 273.88 149.48 262.51 149.48ZM268.88 128.63C282.15 128.63 290.59 119.27 290.59 104.97C290.59 90.67 282.15 80.68 268.88 80.68C255.79 80.85 247.69 91.19 247.69 105.14C247.69 119.09 255.79 128.63 268.88 128.63Z" fill="black"/>
 6 | <path d="M485.216 148.816C482.608 148.816 480.8 146.976 480.8 144.368C480.8 141.776 482.624 139.92 485.216 139.92C487.808 139.92 489.632 141.776 489.632 144.368C489.632 146.976 487.824 148.816 485.216 148.816ZM485.216 148.032C487.328 148.032 488.752 146.48 488.752 144.368C488.752 142.256 487.328 140.72 485.216 140.72C483.104 140.72 481.68 142.256 481.68 144.368C481.68 146.48 483.104 148.032 485.216 148.032ZM483.744 146.56V142.112H485.52C486.448 142.112 486.928 142.64 486.928 143.424C486.928 144.144 486.448 144.656 485.696 144.656H485.664L487.2 146.544V146.56H486.144L484.592 144.608H484.576V146.56H483.744ZM484.576 144.064H485.408C485.84 144.064 486.08 143.808 486.08 143.44C486.08 143.088 485.824 142.848 485.408 142.848H484.576V144.064Z" fill="black"/>
 7 | <path d="M158.184 2.16438C166.564 -2.6797 175.59 1.19557 182.359 7.97729C189.45 15.082 192.351 22.8325 187.839 31.5518C186.227 34.7812 167.209 67.721 161.407 77.0863C158.184 82.2533 156.572 88.7121 156.572 94.8479C156.572 100.984 158.184 107.443 161.407 112.933C167.209 121.975 186.227 155.238 187.839 158.467C192.351 167.509 189.128 174.291 182.681 181.396C175.267 188.823 167.854 192.698 158.828 187.854C155.605 185.917 65.3511 133.924 65.3511 133.924C66.9627 144.581 72.7648 154.269 80.1785 160.082C79.2115 160.405 34.5761 186.232 31.5058 187.854C23.0444 192.326 15.3286 189.336 8.62001 183.01C1.04465 175.867 -2.66173 167.509 2.1733 158.79C3.78498 155.56 22.8028 122.298 28.2825 113.255C31.5058 107.765 33.4398 101.63 33.4398 95.1709C33.4398 88.7121 31.5058 82.5762 28.2825 77.4092C22.8028 67.721 3.78498 34.1354 2.1733 31.2289C-2.66173 22.5096 1.22016 13.1436 7.97534 7.00847C15.6327 0.0538926 22.8028 -2.03382 31.5058 2.16438C34.0845 3.1332 124.016 56.4182 124.016 56.4182C123.049 46.0841 117.892 36.7189 109.511 30.2601C110.156 29.9372 154.96 3.45614 158.184 2.16438ZM98.2293 110.995L111.123 98.0773C112.734 96.4626 112.734 93.8791 111.123 91.9415L98.2293 79.0239C96.2953 77.0863 93.7166 77.0863 91.7826 79.0239L78.8892 91.9415C77.2775 93.5562 77.2775 96.4626 78.8892 98.0773L91.7826 110.995C93.3942 112.61 96.2953 112.61 98.2293 110.995Z" fill="#FE6703"/>
 8 | </g>
 9 | <defs>
10 | <clipPath id="clip0_33_23">
11 | <rect width="490" height="190" fill="white"/>
12 | </clipPath>
13 | </defs>
14 | </svg>
15 | 
```

--------------------------------------------------------------------------------
/ui/assets/dbt_logo WHT.svg:
--------------------------------------------------------------------------------

```
 1 | <svg width="490" height="190" viewBox="0 0 490 190" fill="none" xmlns="http://www.w3.org/2000/svg">
 2 | <g clip-path="url(#clip0_33_49)">
 3 | <path d="M455.26 148.48C444.41 148.48 436.47 139.69 436.47 128.84V82.57H423.04V61.9H437.55L437.33 40.92L459.04 33.41V61.91H477.82V82.58H459.04V127.43H475.67V148.49H455.25L455.26 148.48Z" fill="white"/>
 4 | <path d="M381.33 149.48C369.96 149.48 361.35 145.8 355.14 135.81L353.94 135.98C354.11 140.29 354.28 144.03 354.28 148.51H331.37V33.53L354.63 25.67V53.97C354.63 62.24 354.63 66.72 353.77 74.47L355.15 74.81C361.53 64.47 371 59.82 382.03 59.82C405.46 59.82 420.1 79.8 420.1 105.3C420.1 130.8 404.94 149.46 381.34 149.46L381.33 149.48ZM375.82 128.63C388.74 128.63 397.01 118.92 397.01 104.97C397.01 91.02 388.74 80.68 375.82 80.68C362.9 80.68 354.12 90.84 354.12 105.14C354.12 119.44 362.56 128.63 375.82 128.63Z" fill="white"/>
 5 | <path d="M262.51 149.48C239.6 149.48 224.44 130.13 224.44 104.97C224.44 79.81 239.43 59.83 263.37 59.83C274.22 59.83 283.36 64.14 289.56 74.3L290.76 73.96C290.07 66.72 290.07 61.73 290.07 53.98V33.54L313.33 25.68V148.52H290.42C290.42 144.21 290.42 140.29 290.76 135.98L289.56 135.64C283.18 145.8 273.88 149.48 262.51 149.48ZM268.88 128.63C282.15 128.63 290.59 119.27 290.59 104.97C290.59 90.67 282.15 80.68 268.88 80.68C255.79 80.85 247.69 91.19 247.69 105.14C247.69 119.09 255.79 128.63 268.88 128.63Z" fill="white"/>
 6 | <path d="M485.216 148.816C482.608 148.816 480.8 146.976 480.8 144.368C480.8 141.776 482.624 139.92 485.216 139.92C487.808 139.92 489.632 141.776 489.632 144.368C489.632 146.976 487.824 148.816 485.216 148.816ZM485.216 148.032C487.328 148.032 488.752 146.48 488.752 144.368C488.752 142.256 487.328 140.72 485.216 140.72C483.104 140.72 481.68 142.256 481.68 144.368C481.68 146.48 483.104 148.032 485.216 148.032ZM483.744 146.56V142.112H485.52C486.448 142.112 486.928 142.64 486.928 143.424C486.928 144.144 486.448 144.656 485.696 144.656H485.664L487.2 146.544V146.56H486.144L484.592 144.608H484.576V146.56H483.744ZM484.576 144.064H485.408C485.84 144.064 486.08 143.808 486.08 143.44C486.08 143.088 485.824 142.848 485.408 142.848H484.576V144.064Z" fill="white"/>
 7 | <path d="M158.184 2.16438C166.564 -2.6797 175.59 1.19557 182.359 7.97729C189.45 15.082 192.351 22.8325 187.839 31.5518C186.227 34.7812 167.209 67.721 161.407 77.0863C158.184 82.2533 156.572 88.7121 156.572 94.8479C156.572 100.984 158.184 107.443 161.407 112.933C167.209 121.975 186.227 155.238 187.839 158.467C192.351 167.509 189.128 174.291 182.681 181.396C175.267 188.823 167.854 192.698 158.828 187.854C155.605 185.917 65.3511 133.924 65.3511 133.924C66.9627 144.581 72.7648 154.269 80.1785 160.082C79.2115 160.405 34.5761 186.232 31.5058 187.854C23.0444 192.326 15.3286 189.336 8.62001 183.01C1.04465 175.867 -2.66173 167.509 2.1733 158.79C3.78498 155.56 22.8028 122.298 28.2825 113.255C31.5058 107.765 33.4398 101.63 33.4398 95.1709C33.4398 88.7121 31.5058 82.5762 28.2825 77.4092C22.8028 67.721 3.78498 34.1354 2.1733 31.2289C-2.66173 22.5096 1.22016 13.1436 7.97534 7.00847C15.6327 0.0538926 22.8028 -2.03382 31.5058 2.16438C34.0845 3.1332 124.016 56.4182 124.016 56.4182C123.049 46.0841 117.892 36.7189 109.511 30.2601C110.156 29.9372 154.96 3.45614 158.184 2.16438ZM98.2293 110.995L111.123 98.0773C112.734 96.4626 112.734 93.8791 111.123 91.9415L98.2293 79.0239C96.2953 77.0863 93.7166 77.0863 91.7826 79.0239L78.8892 91.9415C77.2775 93.5562 77.2775 96.4626 78.8892 98.0773L91.7826 110.995C93.3942 112.61 96.2953 112.61 98.2293 110.995Z" fill="#FE6703"/>
 8 | </g>
 9 | <defs>
10 | <clipPath id="clip0_33_49">
11 | <rect width="490" height="190" fill="white"/>
12 | </clipPath>
13 | </defs>
14 | </svg>
15 | 
```

--------------------------------------------------------------------------------
/tests/unit/oauth/test_fastapi_app_pagination.py:
--------------------------------------------------------------------------------

```python
  1 | from unittest.mock import Mock, patch
  2 | 
  3 | import pytest
  4 | 
  5 | from dbt_mcp.oauth.dbt_platform import DbtPlatformAccount
  6 | from dbt_mcp.oauth.fastapi_app import (
  7 |     _get_all_environments_for_project,
  8 |     _get_all_projects_for_account,
  9 | )
 10 | 
 11 | 
 12 | @pytest.fixture
 13 | def base_headers():
 14 |     return {"Accept": "application/json", "Authorization": "Bearer token"}
 15 | 
 16 | 
 17 | @pytest.fixture
 18 | def account():
 19 |     return DbtPlatformAccount(
 20 |         id=1,
 21 |         name="Account 1",
 22 |         locked=False,
 23 |         state=1,
 24 |         static_subdomain=None,
 25 |         vanity_subdomain=None,
 26 |     )
 27 | 
 28 | 
 29 | @patch("dbt_mcp.oauth.fastapi_app.requests.get")
 30 | def test_get_all_projects_for_account_paginates(mock_get: Mock, base_headers, account):
 31 |     # Two pages: first full page (limit=2), second partial page (1 item) -> stop
 32 |     first_page_resp = Mock()
 33 |     first_page_resp.json.return_value = {
 34 |         "data": [
 35 |             {"id": 101, "name": "Proj A", "account_id": account.id},
 36 |             {"id": 102, "name": "Proj B", "account_id": account.id},
 37 |         ]
 38 |     }
 39 |     first_page_resp.raise_for_status.return_value = None
 40 | 
 41 |     second_page_resp = Mock()
 42 |     second_page_resp.json.return_value = {
 43 |         "data": [
 44 |             {"id": 103, "name": "Proj C", "account_id": account.id},
 45 |         ]
 46 |     }
 47 |     second_page_resp.raise_for_status.return_value = None
 48 | 
 49 |     mock_get.side_effect = [first_page_resp, second_page_resp]
 50 | 
 51 |     result = _get_all_projects_for_account(
 52 |         dbt_platform_url="https://cloud.getdbt.com",
 53 |         account=account,
 54 |         headers=base_headers,
 55 |         page_size=2,
 56 |     )
 57 | 
 58 |     # Should aggregate 3 projects and include account_name field
 59 |     assert len(result) == 3
 60 |     assert {p.id for p in result} == {101, 102, 103}
 61 |     assert all(p.account_name == account.name for p in result)
 62 | 
 63 |     # Verify correct pagination URLs called
 64 |     expected_urls = [
 65 |         "https://cloud.getdbt.com/api/v3/accounts/1/projects/?state=1&offset=0&limit=2",
 66 |         "https://cloud.getdbt.com/api/v3/accounts/1/projects/?state=1&offset=2&limit=2",
 67 |     ]
 68 |     actual_urls = [
 69 |         call.kwargs["url"] if "url" in call.kwargs else call.args[0]
 70 |         for call in mock_get.call_args_list
 71 |     ]
 72 |     assert actual_urls == expected_urls
 73 | 
 74 | 
 75 | @patch("dbt_mcp.oauth.fastapi_app.requests.get")
 76 | def test_get_all_environments_for_project_paginates(mock_get: Mock, base_headers):
 77 |     # Two pages: first full page (limit=2), second partial (1 item)
 78 |     first_page_resp = Mock()
 79 |     first_page_resp.json.return_value = {
 80 |         "data": [
 81 |             {"id": 201, "name": "Dev", "deployment_type": "development"},
 82 |             {"id": 202, "name": "Prod", "deployment_type": "production"},
 83 |         ]
 84 |     }
 85 |     first_page_resp.raise_for_status.return_value = None
 86 | 
 87 |     second_page_resp = Mock()
 88 |     second_page_resp.json.return_value = {
 89 |         "data": [
 90 |             {"id": 203, "name": "Staging", "deployment_type": "development"},
 91 |         ]
 92 |     }
 93 |     second_page_resp.raise_for_status.return_value = None
 94 | 
 95 |     mock_get.side_effect = [first_page_resp, second_page_resp]
 96 | 
 97 |     result = _get_all_environments_for_project(
 98 |         dbt_platform_url="https://cloud.getdbt.com",
 99 |         account_id=1,
100 |         project_id=9,
101 |         headers=base_headers,
102 |         page_size=2,
103 |     )
104 | 
105 |     assert len(result) == 3
106 |     assert {e.id for e in result} == {201, 202, 203}
107 | 
108 |     expected_urls = [
109 |         "https://cloud.getdbt.com/api/v3/accounts/1/projects/9/environments/?state=1&offset=0&limit=2",
110 |         "https://cloud.getdbt.com/api/v3/accounts/1/projects/9/environments/?state=1&offset=2&limit=2",
111 |     ]
112 |     actual_urls = [
113 |         call.kwargs["url"] if "url" in call.kwargs else call.args[0]
114 |         for call in mock_get.call_args_list
115 |     ]
116 |     assert actual_urls == expected_urls
117 | 
```

--------------------------------------------------------------------------------
/tests/unit/dbt_cli/test_cli_integration.py:
--------------------------------------------------------------------------------

```python
  1 | import unittest
  2 | from unittest.mock import MagicMock, patch
  3 | 
  4 | from tests.mocks.config import mock_config
  5 | 
  6 | 
  7 | class TestDbtCliIntegration(unittest.TestCase):
  8 |     @patch("subprocess.Popen")
  9 |     def test_dbt_command_execution(self, mock_popen):
 10 |         """
 11 |         Tests the full execution path for dbt commands, ensuring they are properly
 12 |         executed with the right arguments.
 13 |         """
 14 |         # Import here to prevent circular import issues during patching
 15 |         from dbt_mcp.dbt_cli.tools import register_dbt_cli_tools
 16 | 
 17 |         # Mock setup
 18 |         mock_process = MagicMock()
 19 |         mock_process.communicate.return_value = ("command output", None)
 20 |         mock_popen.return_value = mock_process
 21 | 
 22 |         # Create a mock FastMCP and Config
 23 |         mock_fastmcp = MagicMock()
 24 | 
 25 |         # Patch the tool decorator to capture functions
 26 |         tools = {}
 27 | 
 28 |         def mock_tool_decorator(**kwargs):
 29 |             def decorator(func):
 30 |                 tools[func.__name__] = func
 31 |                 return func
 32 | 
 33 |             return decorator
 34 | 
 35 |         mock_fastmcp.tool = mock_tool_decorator
 36 | 
 37 |         # Register the tools
 38 |         register_dbt_cli_tools(mock_fastmcp, mock_config.dbt_cli_config)
 39 | 
 40 |         # Test cases for different command types
 41 |         test_cases = [
 42 |             # Command name, args, expected command list
 43 |             ("build", [], ["/path/to/dbt", "--no-use-colors", "build", "--quiet"]),
 44 |             (
 45 |                 "compile",
 46 |                 [],
 47 |                 ["/path/to/dbt", "--no-use-colors", "compile", "--quiet"],
 48 |             ),
 49 |             (
 50 |                 "docs",
 51 |                 [],
 52 |                 ["/path/to/dbt", "--no-use-colors", "docs", "--quiet", "generate"],
 53 |             ),
 54 |             (
 55 |                 "ls",
 56 |                 [],
 57 |                 ["/path/to/dbt", "--no-use-colors", "list", "--quiet"],
 58 |             ),
 59 |             ("parse", [], ["/path/to/dbt", "--no-use-colors", "parse", "--quiet"]),
 60 |             ("run", [], ["/path/to/dbt", "--no-use-colors", "run", "--quiet"]),
 61 |             ("test", [], ["/path/to/dbt", "--no-use-colors", "test", "--quiet"]),
 62 |             (
 63 |                 "show",
 64 |                 ["SELECT * FROM model"],
 65 |                 [
 66 |                     "/path/to/dbt",
 67 |                     "--no-use-colors",
 68 |                     "show",
 69 |                     "--inline",
 70 |                     "SELECT * FROM model",
 71 |                     "--favor-state",
 72 |                     "--output",
 73 |                     "json",
 74 |                 ],
 75 |             ),
 76 |             (
 77 |                 "show",
 78 |                 ["SELECT * FROM model", 10],
 79 |                 [
 80 |                     "/path/to/dbt",
 81 |                     "--no-use-colors",
 82 |                     "show",
 83 |                     "--inline",
 84 |                     "SELECT * FROM model",
 85 |                     "--favor-state",
 86 |                     "--limit",
 87 |                     "10",
 88 |                     "--output",
 89 |                     "json",
 90 |                 ],
 91 |             ),
 92 |         ]
 93 | 
 94 |         # Run each test case
 95 |         for command_name, args, expected_args in test_cases:
 96 |             mock_popen.reset_mock()
 97 | 
 98 |             # Call the function
 99 |             result = tools[command_name](*args)
100 | 
101 |             # Verify the command was called correctly
102 |             mock_popen.assert_called_once()
103 |             actual_args = mock_popen.call_args.kwargs.get("args")
104 | 
105 |             num_params = 4
106 | 
107 |             self.assertEqual(actual_args[:num_params], expected_args[:num_params])
108 | 
109 |             # Verify correct working directory
110 |             self.assertEqual(mock_popen.call_args.kwargs.get("cwd"), "/test/project")
111 | 
112 |             # Verify the output is returned correctly
113 |             self.assertEqual(result, "command output")
114 | 
115 | 
116 | if __name__ == "__main__":
117 |     unittest.main()
118 | 
```

--------------------------------------------------------------------------------
/tests/mocks/config.py:
--------------------------------------------------------------------------------

```python
  1 | from dbt_mcp.config.config import (
  2 |     Config,
  3 |     DbtCliConfig,
  4 |     DbtCodegenConfig,
  5 |     LspConfig,
  6 | )
  7 | from dbt_mcp.config.config_providers import (
  8 |     AdminApiConfig,
  9 |     DefaultAdminApiConfigProvider,
 10 |     DefaultDiscoveryConfigProvider,
 11 |     DefaultSemanticLayerConfigProvider,
 12 |     DefaultSqlConfigProvider,
 13 |     DiscoveryConfig,
 14 |     SemanticLayerConfig,
 15 |     SqlConfig,
 16 | )
 17 | from dbt_mcp.config.headers import (
 18 |     AdminApiHeadersProvider,
 19 |     DiscoveryHeadersProvider,
 20 |     SemanticLayerHeadersProvider,
 21 |     SqlHeadersProvider,
 22 | )
 23 | from dbt_mcp.config.settings import CredentialsProvider, DbtMcpSettings
 24 | from dbt_mcp.dbt_cli.binary_type import BinaryType
 25 | from dbt_mcp.oauth.token_provider import StaticTokenProvider
 26 | 
 27 | mock_settings = DbtMcpSettings.model_construct()
 28 | 
 29 | mock_sql_config = SqlConfig(
 30 |     url="http://localhost:8000",
 31 |     prod_environment_id=1,
 32 |     dev_environment_id=1,
 33 |     user_id=1,
 34 |     headers_provider=SqlHeadersProvider(
 35 |         token_provider=StaticTokenProvider(token="token")
 36 |     ),
 37 | )
 38 | 
 39 | mock_dbt_cli_config = DbtCliConfig(
 40 |     project_dir="/test/project",
 41 |     dbt_path="/path/to/dbt",
 42 |     dbt_cli_timeout=10,
 43 |     binary_type=BinaryType.DBT_CORE,
 44 | )
 45 | 
 46 | mock_dbt_codegen_config = DbtCodegenConfig(
 47 |     project_dir="/test/project",
 48 |     dbt_path="/path/to/dbt",
 49 |     dbt_cli_timeout=10,
 50 |     binary_type=BinaryType.DBT_CORE,
 51 | )
 52 | 
 53 | mock_lsp_config = LspConfig(
 54 |     project_dir="/test/project",
 55 |     lsp_path="/path/to/lsp",
 56 | )
 57 | 
 58 | mock_discovery_config = DiscoveryConfig(
 59 |     url="http://localhost:8000",
 60 |     headers_provider=DiscoveryHeadersProvider(
 61 |         token_provider=StaticTokenProvider(token="token")
 62 |     ),
 63 |     environment_id=1,
 64 | )
 65 | 
 66 | mock_semantic_layer_config = SemanticLayerConfig(
 67 |     host="localhost",
 68 |     token="token",
 69 |     url="http://localhost:8000",
 70 |     headers_provider=SemanticLayerHeadersProvider(
 71 |         token_provider=StaticTokenProvider(token="token")
 72 |     ),
 73 |     prod_environment_id=1,
 74 | )
 75 | 
 76 | mock_admin_api_config = AdminApiConfig(
 77 |     url="http://localhost:8000",
 78 |     headers_provider=AdminApiHeadersProvider(
 79 |         token_provider=StaticTokenProvider(token="token")
 80 |     ),
 81 |     account_id=12345,
 82 | )
 83 | 
 84 | 
 85 | # Create mock config providers
 86 | class MockSqlConfigProvider(DefaultSqlConfigProvider):
 87 |     def __init__(self):
 88 |         pass  # Skip the base class __init__
 89 | 
 90 |     async def get_config(self):
 91 |         return mock_sql_config
 92 | 
 93 | 
 94 | class MockDiscoveryConfigProvider(DefaultDiscoveryConfigProvider):
 95 |     def __init__(self):
 96 |         pass  # Skip the base class __init__
 97 | 
 98 |     async def get_config(self):
 99 |         return mock_discovery_config
100 | 
101 | 
102 | class MockSemanticLayerConfigProvider(DefaultSemanticLayerConfigProvider):
103 |     def __init__(self):
104 |         pass  # Skip the base class __init__
105 | 
106 |     async def get_config(self):
107 |         return mock_semantic_layer_config
108 | 
109 | 
110 | class MockAdminApiConfigProvider(DefaultAdminApiConfigProvider):
111 |     def __init__(self):
112 |         pass  # Skip the base class __init__
113 | 
114 |     async def get_config(self):
115 |         return mock_admin_api_config
116 | 
117 | 
118 | class MockCredentialsProvider(CredentialsProvider):
119 |     def __init__(self, settings: DbtMcpSettings | None = None):
120 |         super().__init__(settings or mock_settings)
121 |         self.token_provider = StaticTokenProvider(token=self.settings.dbt_token)
122 | 
123 |     async def get_credentials(self):
124 |         return self.settings, self.token_provider
125 | 
126 | 
127 | mock_config = Config(
128 |     sql_config_provider=MockSqlConfigProvider(),
129 |     dbt_cli_config=mock_dbt_cli_config,
130 |     dbt_codegen_config=mock_dbt_codegen_config,
131 |     discovery_config_provider=MockDiscoveryConfigProvider(),
132 |     semantic_layer_config_provider=MockSemanticLayerConfigProvider(),
133 |     admin_api_config_provider=MockAdminApiConfigProvider(),
134 |     lsp_config=mock_lsp_config,
135 |     disable_tools=[],
136 |     credentials_provider=MockCredentialsProvider(),
137 | )
138 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/oauth/token_provider.py:
--------------------------------------------------------------------------------

```python
  1 | import asyncio
  2 | import logging
  3 | from typing import Protocol
  4 | 
  5 | from authlib.integrations.requests_client import OAuth2Session
  6 | 
  7 | from dbt_mcp.oauth.client_id import OAUTH_CLIENT_ID
  8 | from dbt_mcp.oauth.context_manager import DbtPlatformContextManager
  9 | from dbt_mcp.oauth.dbt_platform import dbt_platform_context_from_token_response
 10 | from dbt_mcp.oauth.refresh_strategy import DefaultRefreshStrategy, RefreshStrategy
 11 | from dbt_mcp.oauth.token import AccessTokenResponse
 12 | 
 13 | logger = logging.getLogger(__name__)
 14 | 
 15 | 
 16 | class TokenProvider(Protocol):
 17 |     def get_token(self) -> str: ...
 18 | 
 19 | 
 20 | class OAuthTokenProvider:
 21 |     """
 22 |     Token provider for OAuth access token with periodic refresh.
 23 |     """
 24 | 
 25 |     def __init__(
 26 |         self,
 27 |         access_token_response: AccessTokenResponse,
 28 |         dbt_platform_url: str,
 29 |         context_manager: DbtPlatformContextManager,
 30 |         refresh_strategy: RefreshStrategy | None = None,
 31 |     ):
 32 |         self.access_token_response = access_token_response
 33 |         self.context_manager = context_manager
 34 |         self.dbt_platform_url = dbt_platform_url
 35 |         self.refresh_strategy = refresh_strategy or DefaultRefreshStrategy()
 36 |         self.token_url = f"{self.dbt_platform_url}/oauth/token"
 37 |         self.oauth_client = OAuth2Session(
 38 |             client_id=OAUTH_CLIENT_ID,
 39 |             token_endpoint=self.token_url,
 40 |         )
 41 |         self.refresh_started = False
 42 | 
 43 |     def _get_access_token_response(self) -> AccessTokenResponse:
 44 |         dbt_platform_context = self.context_manager.read_context()
 45 |         if not dbt_platform_context or not dbt_platform_context.decoded_access_token:
 46 |             raise ValueError("No decoded access token found in context")
 47 |         return dbt_platform_context.decoded_access_token.access_token_response
 48 | 
 49 |     def get_token(self) -> str:
 50 |         if not self.refresh_started:
 51 |             self.start_background_refresh()
 52 |             self.refresh_started = True
 53 |         return self.access_token_response.access_token
 54 | 
 55 |     def start_background_refresh(self) -> asyncio.Task[None]:
 56 |         logger.info("Starting oauth token background refresh")
 57 |         return asyncio.create_task(
 58 |             self._background_refresh_worker(), name="oauth-token-refresh"
 59 |         )
 60 | 
 61 |     async def _refresh_token(self) -> None:
 62 |         logger.info("Refreshing OAuth access token using authlib")
 63 |         token_response = self.oauth_client.refresh_token(
 64 |             url=self.token_url,
 65 |             refresh_token=self.access_token_response.refresh_token,
 66 |         )
 67 |         dbt_platform_context = dbt_platform_context_from_token_response(
 68 |             token_response, self.dbt_platform_url
 69 |         )
 70 |         self.context_manager.update_context(dbt_platform_context)
 71 |         if not dbt_platform_context.decoded_access_token:
 72 |             raise ValueError("No decoded access token found in context")
 73 |         self.access_token_response = (
 74 |             dbt_platform_context.decoded_access_token.access_token_response
 75 |         )
 76 |         logger.info("OAuth access token refreshed and context updated successfully")
 77 | 
 78 |     async def _background_refresh_worker(self) -> None:
 79 |         """Background worker that periodically refreshes tokens before expiry."""
 80 |         logger.info("Background token refresh worker started")
 81 |         while True:
 82 |             try:
 83 |                 await self.refresh_strategy.wait_until_refresh_needed(
 84 |                     self.access_token_response.expires_at
 85 |                 )
 86 |                 await self._refresh_token()
 87 |             except Exception as e:
 88 |                 logger.error(f"Error in background refresh worker: {e}")
 89 |                 await self.refresh_strategy.wait_after_error()
 90 | 
 91 | 
 92 | class StaticTokenProvider:
 93 |     """
 94 |     Token provider for tokens that aren't refreshed (e.g. service tokens and PATs)
 95 |     """
 96 | 
 97 |     def __init__(self, token: str | None = None):
 98 |         self.token = token
 99 | 
100 |     def get_token(self) -> str:
101 |         if not self.token:
102 |             raise ValueError("No token provided")
103 |         return self.token
104 | 
```

--------------------------------------------------------------------------------
/tests/unit/oauth/test_credentials_provider.py:
--------------------------------------------------------------------------------

```python
  1 | from unittest.mock import MagicMock, patch
  2 | 
  3 | import pytest
  4 | 
  5 | from dbt_mcp.config.settings import (
  6 |     AuthenticationMethod,
  7 |     CredentialsProvider,
  8 |     DbtMcpSettings,
  9 | )
 10 | 
 11 | 
 12 | class TestCredentialsProviderAuthenticationMethod:
 13 |     """Test the authentication_method field on CredentialsProvider"""
 14 | 
 15 |     @pytest.mark.asyncio
 16 |     async def test_authentication_method_oauth(self):
 17 |         """Test that authentication_method is set to OAUTH when using OAuth flow"""
 18 |         mock_settings = DbtMcpSettings.model_construct(
 19 |             dbt_host="cloud.getdbt.com",
 20 |             dbt_prod_env_id=123,
 21 |             dbt_account_id=456,
 22 |             dbt_token=None,  # No token means OAuth
 23 |         )
 24 | 
 25 |         credentials_provider = CredentialsProvider(mock_settings)
 26 | 
 27 |         # Mock OAuth flow - create a properly structured context
 28 |         mock_dbt_context = MagicMock()
 29 |         mock_dbt_context.account_id = 456
 30 |         mock_dbt_context.host_prefix = ""
 31 |         mock_dbt_context.user_id = 789
 32 |         mock_dbt_context.dev_environment.id = 111
 33 |         mock_dbt_context.prod_environment.id = 123
 34 |         mock_decoded_token = MagicMock()
 35 |         mock_decoded_token.access_token_response.access_token = "mock_token"
 36 |         mock_dbt_context.decoded_access_token = mock_decoded_token
 37 | 
 38 |         with (
 39 |             patch(
 40 |                 "dbt_mcp.config.settings.get_dbt_platform_context",
 41 |                 return_value=mock_dbt_context,
 42 |             ),
 43 |             patch(
 44 |                 "dbt_mcp.config.settings.get_dbt_host", return_value="cloud.getdbt.com"
 45 |             ),
 46 |             patch("dbt_mcp.config.settings.OAuthTokenProvider") as mock_token_provider,
 47 |             patch("dbt_mcp.config.settings.validate_settings"),
 48 |         ):
 49 |             mock_token_provider.return_value = MagicMock()
 50 | 
 51 |             settings, token_provider = await credentials_provider.get_credentials()
 52 | 
 53 |             assert (
 54 |                 credentials_provider.authentication_method == AuthenticationMethod.OAUTH
 55 |             )
 56 |             assert token_provider is not None
 57 | 
 58 |     @pytest.mark.asyncio
 59 |     async def test_authentication_method_env_var(self):
 60 |         """Test that authentication_method is set to ENV_VAR when using token from env"""
 61 |         mock_settings = DbtMcpSettings.model_construct(
 62 |             dbt_host="test.dbt.com",
 63 |             dbt_prod_env_id=123,
 64 |             dbt_token="test_token",  # Token provided
 65 |         )
 66 | 
 67 |         credentials_provider = CredentialsProvider(mock_settings)
 68 | 
 69 |         with patch("dbt_mcp.config.settings.validate_settings"):
 70 |             settings, token_provider = await credentials_provider.get_credentials()
 71 | 
 72 |             assert (
 73 |                 credentials_provider.authentication_method
 74 |                 == AuthenticationMethod.ENV_VAR
 75 |             )
 76 |             assert token_provider is not None
 77 | 
 78 |     @pytest.mark.asyncio
 79 |     async def test_authentication_method_initially_none(self):
 80 |         """Test that authentication_method starts as None before get_credentials is called"""
 81 |         mock_settings = DbtMcpSettings.model_construct(
 82 |             dbt_token="test_token",
 83 |         )
 84 | 
 85 |         credentials_provider = CredentialsProvider(mock_settings)
 86 | 
 87 |         assert credentials_provider.authentication_method is None
 88 | 
 89 |     @pytest.mark.asyncio
 90 |     async def test_authentication_method_persists_after_get_credentials(self):
 91 |         """Test that authentication_method persists after get_credentials is called"""
 92 |         mock_settings = DbtMcpSettings.model_construct(
 93 |             dbt_host="test.dbt.com",
 94 |             dbt_prod_env_id=123,
 95 |             dbt_token="test_token",
 96 |         )
 97 | 
 98 |         credentials_provider = CredentialsProvider(mock_settings)
 99 | 
100 |         with patch("dbt_mcp.config.settings.validate_settings"):
101 |             # First call
102 |             await credentials_provider.get_credentials()
103 |             assert (
104 |                 credentials_provider.authentication_method
105 |                 == AuthenticationMethod.ENV_VAR
106 |             )
107 | 
108 |             # Second call - should still be set
109 |             await credentials_provider.get_credentials()
110 |             assert (
111 |                 credentials_provider.authentication_method
112 |                 == AuthenticationMethod.ENV_VAR
113 |             )
114 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/config/config.py:
--------------------------------------------------------------------------------

```python
  1 | import os
  2 | from dataclasses import dataclass
  3 | 
  4 | from dbt_mcp.config.config_providers import (
  5 |     DefaultAdminApiConfigProvider,
  6 |     DefaultDiscoveryConfigProvider,
  7 |     DefaultSemanticLayerConfigProvider,
  8 |     DefaultSqlConfigProvider,
  9 | )
 10 | from dbt_mcp.config.settings import (
 11 |     CredentialsProvider,
 12 |     DbtMcpSettings,
 13 | )
 14 | from dbt_mcp.dbt_cli.binary_type import BinaryType, detect_binary_type
 15 | from dbt_mcp.telemetry.logging import configure_logging
 16 | from dbt_mcp.tools.tool_names import ToolName
 17 | 
 18 | PACKAGE_NAME = "dbt-mcp"
 19 | 
 20 | 
 21 | @dataclass
 22 | class DbtCliConfig:
 23 |     project_dir: str
 24 |     dbt_path: str
 25 |     dbt_cli_timeout: int
 26 |     binary_type: BinaryType
 27 | 
 28 | 
 29 | @dataclass
 30 | class DbtCodegenConfig:
 31 |     project_dir: str
 32 |     dbt_path: str
 33 |     dbt_cli_timeout: int
 34 |     binary_type: BinaryType
 35 | 
 36 | 
 37 | @dataclass
 38 | class LspConfig:
 39 |     project_dir: str
 40 |     lsp_path: str | None
 41 | 
 42 | 
 43 | @dataclass
 44 | class Config:
 45 |     disable_tools: list[ToolName]
 46 |     sql_config_provider: DefaultSqlConfigProvider | None
 47 |     dbt_cli_config: DbtCliConfig | None
 48 |     dbt_codegen_config: DbtCodegenConfig | None
 49 |     discovery_config_provider: DefaultDiscoveryConfigProvider | None
 50 |     semantic_layer_config_provider: DefaultSemanticLayerConfigProvider | None
 51 |     admin_api_config_provider: DefaultAdminApiConfigProvider | None
 52 |     credentials_provider: CredentialsProvider
 53 |     lsp_config: LspConfig | None
 54 | 
 55 | 
 56 | def load_config() -> Config:
 57 |     settings = DbtMcpSettings()  # type: ignore
 58 |     configure_logging(settings.file_logging)
 59 |     credentials_provider = CredentialsProvider(settings)
 60 | 
 61 |     # Set default warn error options if not provided
 62 |     if settings.dbt_warn_error_options is None:
 63 |         warn_error_options = '{"error": ["NoNodesForSelectionCriteria"]}'
 64 |         os.environ["DBT_WARN_ERROR_OPTIONS"] = warn_error_options
 65 | 
 66 |     # Build configurations
 67 |     sql_config_provider = None
 68 |     if not settings.actual_disable_sql:
 69 |         sql_config_provider = DefaultSqlConfigProvider(
 70 |             credentials_provider=credentials_provider,
 71 |         )
 72 | 
 73 |     admin_api_config_provider = None
 74 |     if not settings.disable_admin_api:
 75 |         admin_api_config_provider = DefaultAdminApiConfigProvider(
 76 |             credentials_provider=credentials_provider,
 77 |         )
 78 | 
 79 |     dbt_cli_config = None
 80 |     if not settings.disable_dbt_cli and settings.dbt_project_dir and settings.dbt_path:
 81 |         binary_type = detect_binary_type(settings.dbt_path)
 82 |         dbt_cli_config = DbtCliConfig(
 83 |             project_dir=settings.dbt_project_dir,
 84 |             dbt_path=settings.dbt_path,
 85 |             dbt_cli_timeout=settings.dbt_cli_timeout,
 86 |             binary_type=binary_type,
 87 |         )
 88 | 
 89 |     dbt_codegen_config = None
 90 |     if (
 91 |         not settings.disable_dbt_codegen
 92 |         and settings.dbt_project_dir
 93 |         and settings.dbt_path
 94 |     ):
 95 |         binary_type = detect_binary_type(settings.dbt_path)
 96 |         dbt_codegen_config = DbtCodegenConfig(
 97 |             project_dir=settings.dbt_project_dir,
 98 |             dbt_path=settings.dbt_path,
 99 |             dbt_cli_timeout=settings.dbt_cli_timeout,
100 |             binary_type=binary_type,
101 |         )
102 | 
103 |     discovery_config_provider = None
104 |     if not settings.disable_discovery:
105 |         discovery_config_provider = DefaultDiscoveryConfigProvider(
106 |             credentials_provider=credentials_provider,
107 |         )
108 | 
109 |     semantic_layer_config_provider = None
110 |     if not settings.disable_semantic_layer:
111 |         semantic_layer_config_provider = DefaultSemanticLayerConfigProvider(
112 |             credentials_provider=credentials_provider,
113 |         )
114 | 
115 |     lsp_config = None
116 |     if not settings.disable_lsp and settings.dbt_project_dir:
117 |         lsp_config = LspConfig(
118 |             project_dir=settings.dbt_project_dir,
119 |             lsp_path=settings.dbt_lsp_path,
120 |         )
121 | 
122 |     return Config(
123 |         disable_tools=settings.disable_tools or [],
124 |         sql_config_provider=sql_config_provider,
125 |         dbt_cli_config=dbt_cli_config,
126 |         dbt_codegen_config=dbt_codegen_config,
127 |         discovery_config_provider=discovery_config_provider,
128 |         semantic_layer_config_provider=semantic_layer_config_provider,
129 |         admin_api_config_provider=admin_api_config_provider,
130 |         credentials_provider=credentials_provider,
131 |         lsp_config=lsp_config,
132 |     )
133 | 
```

--------------------------------------------------------------------------------
/tests/integration/semantic_layer/test_semantic_layer.py:
--------------------------------------------------------------------------------

```python
  1 | import pytest
  2 | from dbtsl.api.shared.query_params import GroupByParam, GroupByType
  3 | import pyarrow as pa
  4 | 
  5 | from dbt_mcp.config.config import load_config
  6 | from dbt_mcp.semantic_layer.client import (
  7 |     DefaultSemanticLayerClientProvider,
  8 |     SemanticLayerFetcher,
  9 | )
 10 | from dbt_mcp.semantic_layer.types import OrderByParam
 11 | 
 12 | config = load_config()
 13 | 
 14 | 
 15 | @pytest.fixture
 16 | def semantic_layer_fetcher() -> SemanticLayerFetcher:
 17 |     assert config.semantic_layer_config_provider is not None
 18 |     return SemanticLayerFetcher(
 19 |         config_provider=config.semantic_layer_config_provider,
 20 |         client_provider=DefaultSemanticLayerClientProvider(
 21 |             config_provider=config.semantic_layer_config_provider,
 22 |         ),
 23 |     )
 24 | 
 25 | 
 26 | async def test_semantic_layer_list_metrics(
 27 |     semantic_layer_fetcher: SemanticLayerFetcher,
 28 | ):
 29 |     metrics = await semantic_layer_fetcher.list_metrics()
 30 |     assert len(metrics) > 0
 31 | 
 32 | 
 33 | async def test_semantic_layer_list_dimensions(
 34 |     semantic_layer_fetcher: SemanticLayerFetcher,
 35 | ):
 36 |     metrics = await semantic_layer_fetcher.list_metrics()
 37 |     dimensions = await semantic_layer_fetcher.get_dimensions(metrics=[metrics[0].name])
 38 |     assert len(dimensions) > 0
 39 | 
 40 | 
 41 | async def test_semantic_layer_query_metrics(
 42 |     semantic_layer_fetcher: SemanticLayerFetcher,
 43 | ):
 44 |     result = await semantic_layer_fetcher.query_metrics(
 45 |         metrics=["revenue"],
 46 |         group_by=[
 47 |             GroupByParam(
 48 |                 name="metric_time",
 49 |                 type=GroupByType.TIME_DIMENSION,
 50 |                 grain=None,
 51 |             )
 52 |         ],
 53 |     )
 54 |     assert result is not None
 55 | 
 56 | 
 57 | async def test_semantic_layer_query_metrics_invalid_query(
 58 |     semantic_layer_fetcher: SemanticLayerFetcher,
 59 | ):
 60 |     result = await semantic_layer_fetcher.query_metrics(
 61 |         metrics=["food_revenue"],
 62 |         group_by=[
 63 |             GroupByParam(
 64 |                 name="order_id__location__location_name",
 65 |                 type=GroupByType.DIMENSION,
 66 |                 grain=None,
 67 |             ),
 68 |             GroupByParam(
 69 |                 name="metric_time",
 70 |                 type=GroupByType.TIME_DIMENSION,
 71 |                 grain="MONTH",
 72 |             ),
 73 |         ],
 74 |         order_by=[
 75 |             OrderByParam(
 76 |                 name="metric_time",
 77 |                 descending=True,
 78 |             ),
 79 |             OrderByParam(
 80 |                 name="food_revenue",
 81 |                 descending=True,
 82 |             ),
 83 |         ],
 84 |         limit=5,
 85 |     )
 86 |     assert result is not None
 87 | 
 88 | 
 89 | async def test_semantic_layer_query_metrics_with_group_by_grain(
 90 |     semantic_layer_fetcher: SemanticLayerFetcher,
 91 | ):
 92 |     result = await semantic_layer_fetcher.query_metrics(
 93 |         metrics=["revenue"],
 94 |         group_by=[
 95 |             GroupByParam(
 96 |                 name="metric_time",
 97 |                 type=GroupByType.TIME_DIMENSION,
 98 |                 grain="day",
 99 |             )
100 |         ],
101 |     )
102 |     assert result is not None
103 | 
104 | 
105 | async def test_semantic_layer_query_metrics_with_order_by(
106 |     semantic_layer_fetcher: SemanticLayerFetcher,
107 | ):
108 |     result = await semantic_layer_fetcher.query_metrics(
109 |         metrics=["revenue"],
110 |         group_by=[
111 |             GroupByParam(
112 |                 name="metric_time",
113 |                 type=GroupByType.TIME_DIMENSION,
114 |                 grain=None,
115 |             )
116 |         ],
117 |         order_by=[OrderByParam(name="metric_time", descending=True)],
118 |     )
119 |     assert result is not None
120 | 
121 | 
122 | async def test_semantic_layer_query_metrics_with_misspellings(
123 |     semantic_layer_fetcher: SemanticLayerFetcher,
124 | ):
125 |     result = await semantic_layer_fetcher.query_metrics(["revehue"])
126 |     assert result.result is not None
127 |     assert "revenue" in result.result
128 | 
129 | 
130 | async def test_semantic_layer_get_entities(
131 |     semantic_layer_fetcher: SemanticLayerFetcher,
132 | ):
133 |     entities = await semantic_layer_fetcher.get_entities(
134 |         metrics=["count_dbt_copilot_requests"]
135 |     )
136 |     assert len(entities) > 0
137 | 
138 | 
139 | async def test_semantic_layer_query_metrics_with_csv_formatter(
140 |     semantic_layer_fetcher: SemanticLayerFetcher,
141 | ):
142 |     def csv_formatter(table: pa.Table) -> str:
143 |         return table.to_pandas().to_csv(index=False)
144 | 
145 |     result = await semantic_layer_fetcher.query_metrics(
146 |         metrics=["revenue"],
147 |         group_by=[
148 |             GroupByParam(
149 |                 name="metric_time",
150 |                 type=GroupByType.TIME_DIMENSION,
151 |                 grain=None,
152 |             )
153 |         ],
154 |         result_formatter=csv_formatter,
155 |     )
156 |     assert result.result is not None
157 |     assert "revenue" in result.result.casefold()
158 |     # CSV format should have comma separators
159 |     assert "," in result.result
160 | 
```

--------------------------------------------------------------------------------
/examples/aws_strands_agent/dbt_data_scientist/tools/dbt_mcp.py:
--------------------------------------------------------------------------------

```python
  1 | """dbt MCP Tool - Remote dbt MCP server connection for AWS Bedrock Agent Core."""
  2 | import os
  3 | from strands import Agent, tool
  4 | from mcp import ClientSession
  5 | from dotenv import load_dotenv
  6 | from mcp.client.streamable_http import streamablehttp_client
  7 | from strands.tools.mcp.mcp_client import MCPClient
  8 | 
  9 | # Load environment variables
 10 | load_dotenv()
 11 | DBT_MCP_URL = os.environ.get("DBT_MCP_URL")
 12 | DBT_USER_ID = os.environ.get("DBT_USER_ID")
 13 | DBT_PROD_ENV_ID = os.environ.get("DBT_PROD_ENV_ID")
 14 | DBT_DEV_ENV_ID = os.environ.get("DBT_DEV_ENV_ID")
 15 | DBT_ACCOUNT_ID = os.environ.get("DBT_ACCOUNT_ID")
 16 | DBT_TOKEN = os.environ.get("DBT_TOKEN")
 17 | 
 18 | DBT_MCP_AGENT_SYSTEM_PROMPT = """
 19 | You are a dbt MCP server expert, a specialized assistant for dbt MCP server analysis and troubleshooting. Your capabilities include:
 20 | 
 21 | When asked to 'view features available on the dbt MCP server', or 'ask about a specific tool or function', inspect the dbt MCP server and return a result based on the available tools and functions.
 22 | """
 23 | 
 24 | # Create MCP client once at module level
 25 | def create_dbt_mcp_client():
 26 |     """Create the dbt MCP client with proper configuration."""
 27 |     load_dotenv()
 28 |     
 29 |     if not DBT_MCP_URL:
 30 |         raise ValueError("DBT_MCP_URL environment variable is required")
 31 |     
 32 |     return MCPClient(lambda: streamablehttp_client(
 33 |         url=DBT_MCP_URL,
 34 |         headers={
 35 |             "x-dbt-user-id": DBT_USER_ID,
 36 |             "x-dbt-prod-environment-id": DBT_PROD_ENV_ID,
 37 |             "x-dbt-dev-environment-id": DBT_DEV_ENV_ID,
 38 |             "x-dbt-account-id": DBT_ACCOUNT_ID,
 39 |             "Authorization": f"token {DBT_TOKEN}",
 40 |         },
 41 |     ))
 42 | 
 43 | # Global MCP client instance
 44 | dbt_mcp_client = create_dbt_mcp_client()
 45 | 
 46 | @tool
 47 | def dbt_mcp_tool(query: str) -> str:
 48 |     """
 49 |     Connects to remote dbt MCP server and executes queries.
 50 |     
 51 |     Args:
 52 |         query: The user's question about dbt MCP server functionality
 53 |         
 54 |     Returns:
 55 |         String response with dbt MCP server results
 56 |     """
 57 |     try:
 58 |         print(f"Connecting to dbt MCP server for query: {query}")
 59 |         
 60 |         with dbt_mcp_client:
 61 |             # Get available tools from MCP server
 62 |             tools = dbt_mcp_client.list_tools_sync()
 63 |             
 64 |             if not tools:
 65 |                 return "No tools available on the dbt MCP server."
 66 |             
 67 |             # If user asks to list tools, return them
 68 |             if "list" in query.lower() and ("tool" in query.lower() or "feature" in query.lower()):
 69 |                 tool_list = "\n".join([f"- {tool.name}: {tool.description}" for tool in tools])
 70 |                 return f"Available dbt MCP tools:\n{tool_list}"
 71 |             
 72 |             # For other queries, try to find and execute the most relevant tool
 73 |             # This is a simplified approach - in practice you'd want more sophisticated routing
 74 |             if tools:
 75 |                 # Try to call the first available tool as an example
 76 |                 first_tool = tools[0]
 77 |                 try:
 78 |                     result = dbt_mcp_client.call_tool_sync(first_tool.name, {})
 79 |                     return f"Executed {first_tool.name}: {result}"
 80 |                 except Exception as e:
 81 |                     return f"Error executing {first_tool.name}: {str(e)}"
 82 |             
 83 |             return f"Found {len(tools)} tools on dbt MCP server. Use 'list tools' to see them."
 84 |     
 85 |     except Exception as e:
 86 |         return f"Error connecting to dbt MCP server: {str(e)}"
 87 | 
 88 | def test_connection():
 89 |     """Test function to verify MCP connectivity."""
 90 |     print("🧪 Testing dbt MCP connection...")
 91 |     
 92 |     try:
 93 |         with dbt_mcp_client:
 94 |             tools = dbt_mcp_client.list_tools_sync()
 95 |             print(f"✅ Successfully connected to dbt MCP server!")
 96 |             print(f"📋 Found {len(tools)} available tools:")
 97 |             
 98 |             for i, tool in enumerate(tools, 1):
 99 |                 print(f"  {i}. {tool.name}: {tool.description}")
100 |             
101 |             return True
102 |             
103 |     except Exception as e:
104 |         print(f"❌ Connection failed: {e}")
105 |         return False
106 | 
107 | # Test the connection when this module is run directly
108 | if __name__ == "__main__":
109 |     print("🔌 dbt MCP Server Connection Test")
110 |     print("=" * 40)
111 |     
112 |     # Check environment variables
113 |     load_dotenv()
114 |     required_vars = ["DBT_MCP_URL", "DBT_TOKEN", "DBT_USER_ID", "DBT_PROD_ENV_ID"]
115 |     missing_vars = [var for var in required_vars if not os.environ.get(var)]
116 |     
117 |     if missing_vars:
118 |         print(f"❌ Missing required environment variables: {', '.join(missing_vars)}")
119 |         print("Please set these in your .env file or environment.")
120 |         sys.exit(1)
121 |     
122 |     # Test connection
123 |     success = test_connection()
124 |     
125 |     if success:
126 |         print("\n🎉 MCP connection test passed!")
127 |         print("You can now run the agent: python dbt_data_scientist/agent.py")
128 |     else:
129 |         print("\n💥 MCP connection test failed!")
130 |         print("Please check your configuration and try again.")
131 |     
132 |     sys.exit(0 if success else 1)
```

--------------------------------------------------------------------------------
/src/dbt_mcp/semantic_layer/tools.py:
--------------------------------------------------------------------------------

```python
  1 | import logging
  2 | from collections.abc import Sequence
  3 | 
  4 | from dbtsl.api.shared.query_params import GroupByParam
  5 | from mcp.server.fastmcp import FastMCP
  6 | 
  7 | from dbt_mcp.config.config_providers import (
  8 |     ConfigProvider,
  9 |     SemanticLayerConfig,
 10 | )
 11 | from dbt_mcp.prompts.prompts import get_prompt
 12 | from dbt_mcp.semantic_layer.client import (
 13 |     SemanticLayerClientProvider,
 14 |     SemanticLayerFetcher,
 15 | )
 16 | from dbt_mcp.semantic_layer.types import (
 17 |     DimensionToolResponse,
 18 |     EntityToolResponse,
 19 |     GetMetricsCompiledSqlSuccess,
 20 |     MetricToolResponse,
 21 |     OrderByParam,
 22 |     QueryMetricsSuccess,
 23 | )
 24 | from dbt_mcp.tools.annotations import create_tool_annotations
 25 | from dbt_mcp.tools.definitions import ToolDefinition
 26 | from dbt_mcp.tools.register import register_tools
 27 | from dbt_mcp.tools.tool_names import ToolName
 28 | 
 29 | logger = logging.getLogger(__name__)
 30 | 
 31 | 
 32 | def create_sl_tool_definitions(
 33 |     config_provider: ConfigProvider[SemanticLayerConfig],
 34 |     client_provider: SemanticLayerClientProvider,
 35 | ) -> list[ToolDefinition]:
 36 |     semantic_layer_fetcher = SemanticLayerFetcher(
 37 |         config_provider=config_provider,
 38 |         client_provider=client_provider,
 39 |     )
 40 | 
 41 |     async def list_metrics(search: str | None = None) -> list[MetricToolResponse]:
 42 |         return await semantic_layer_fetcher.list_metrics(search=search)
 43 | 
 44 |     async def get_dimensions(
 45 |         metrics: list[str], search: str | None = None
 46 |     ) -> list[DimensionToolResponse]:
 47 |         return await semantic_layer_fetcher.get_dimensions(
 48 |             metrics=metrics, search=search
 49 |         )
 50 | 
 51 |     async def get_entities(
 52 |         metrics: list[str], search: str | None = None
 53 |     ) -> list[EntityToolResponse]:
 54 |         return await semantic_layer_fetcher.get_entities(metrics=metrics, search=search)
 55 | 
 56 |     async def query_metrics(
 57 |         metrics: list[str],
 58 |         group_by: list[GroupByParam] | None = None,
 59 |         order_by: list[OrderByParam] | None = None,
 60 |         where: str | None = None,
 61 |         limit: int | None = None,
 62 |     ) -> str:
 63 |         result = await semantic_layer_fetcher.query_metrics(
 64 |             metrics=metrics,
 65 |             group_by=group_by,
 66 |             order_by=order_by,
 67 |             where=where,
 68 |             limit=limit,
 69 |         )
 70 |         if isinstance(result, QueryMetricsSuccess):
 71 |             return result.result
 72 |         else:
 73 |             return result.error
 74 | 
 75 |     async def get_metrics_compiled_sql(
 76 |         metrics: list[str],
 77 |         group_by: list[GroupByParam] | None = None,
 78 |         order_by: list[OrderByParam] | None = None,
 79 |         where: str | None = None,
 80 |         limit: int | None = None,
 81 |     ) -> str:
 82 |         result = await semantic_layer_fetcher.get_metrics_compiled_sql(
 83 |             metrics=metrics,
 84 |             group_by=group_by,
 85 |             order_by=order_by,
 86 |             where=where,
 87 |             limit=limit,
 88 |         )
 89 |         if isinstance(result, GetMetricsCompiledSqlSuccess):
 90 |             return result.sql
 91 |         else:
 92 |             return result.error
 93 | 
 94 |     return [
 95 |         ToolDefinition(
 96 |             description=get_prompt("semantic_layer/list_metrics"),
 97 |             fn=list_metrics,
 98 |             annotations=create_tool_annotations(
 99 |                 title="List Metrics",
100 |                 read_only_hint=True,
101 |                 destructive_hint=False,
102 |                 idempotent_hint=True,
103 |             ),
104 |         ),
105 |         ToolDefinition(
106 |             description=get_prompt("semantic_layer/get_dimensions"),
107 |             fn=get_dimensions,
108 |             annotations=create_tool_annotations(
109 |                 title="Get Dimensions",
110 |                 read_only_hint=True,
111 |                 destructive_hint=False,
112 |                 idempotent_hint=True,
113 |             ),
114 |         ),
115 |         ToolDefinition(
116 |             description=get_prompt("semantic_layer/get_entities"),
117 |             fn=get_entities,
118 |             annotations=create_tool_annotations(
119 |                 title="Get Entities",
120 |                 read_only_hint=True,
121 |                 destructive_hint=False,
122 |                 idempotent_hint=True,
123 |             ),
124 |         ),
125 |         ToolDefinition(
126 |             description=get_prompt("semantic_layer/query_metrics"),
127 |             fn=query_metrics,
128 |             annotations=create_tool_annotations(
129 |                 title="Query Metrics",
130 |                 read_only_hint=True,
131 |                 destructive_hint=False,
132 |                 idempotent_hint=True,
133 |             ),
134 |         ),
135 |         ToolDefinition(
136 |             description=get_prompt("semantic_layer/get_metrics_compiled_sql"),
137 |             fn=get_metrics_compiled_sql,
138 |             annotations=create_tool_annotations(
139 |                 title="Compile SQL",
140 |                 read_only_hint=True,
141 |                 destructive_hint=False,
142 |                 idempotent_hint=True,
143 |             ),
144 |         ),
145 |     ]
146 | 
147 | 
148 | def register_sl_tools(
149 |     dbt_mcp: FastMCP,
150 |     config_provider: ConfigProvider[SemanticLayerConfig],
151 |     client_provider: SemanticLayerClientProvider,
152 |     exclude_tools: Sequence[ToolName] = [],
153 | ) -> None:
154 |     register_tools(
155 |         dbt_mcp,
156 |         create_sl_tool_definitions(config_provider, client_provider),
157 |         exclude_tools,
158 |     )
159 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/lsp/lsp_client.py:
--------------------------------------------------------------------------------

```python
  1 | """LSP Client for dbt Fusion.
  2 | 
  3 | This module provides a high-level client interface for interacting with the
  4 | dbt Fusion LSP server, wrapping low-level JSON-RPC communication with
  5 | typed methods for dbt-specific operations.
  6 | """
  7 | 
  8 | import asyncio
  9 | import logging
 10 | from typing import Any
 11 | 
 12 | from dbt_mcp.lsp.lsp_connection import LSPConnection, LspEventName
 13 | 
 14 | logger = logging.getLogger(__name__)
 15 | 
 16 | # Default timeout for LSP operations (in seconds)
 17 | DEFAULT_LSP_TIMEOUT = 30
 18 | 
 19 | 
 20 | class LSPClient:
 21 |     """High-level client for dbt Fusion LSP operations.
 22 | 
 23 |     This class provides typed methods for dbt-specific LSP operations
 24 |     such as column lineage, model references, and more.
 25 |     """
 26 | 
 27 |     def __init__(self, lsp_connection: LSPConnection, timeout: float | None = None):
 28 |         """Initialize the dbt LSP client.
 29 | 
 30 |         Args:
 31 |             lsp_connection: The LSP connection to use
 32 |             timeout: Default timeout for LSP operations in seconds. If not specified,
 33 |                     uses DEFAULT_LSP_TIMEOUT (30 seconds).
 34 |         """
 35 |         self.lsp_connection = lsp_connection
 36 |         self.timeout = timeout if timeout is not None else DEFAULT_LSP_TIMEOUT
 37 | 
 38 |     async def compile(self, timeout: float | None = None) -> dict[str, Any]:
 39 |         """Compile the dbt project.
 40 | 
 41 |         Returns the compilation log as dictionary.
 42 |         """
 43 |         # Register for the notification BEFORE sending the command to avoid race conditions
 44 |         compile_complete_future = self.lsp_connection.wait_for_notification(
 45 |             LspEventName.compileComplete
 46 |         )
 47 | 
 48 |         async with asyncio.timeout(timeout or self.timeout):
 49 |             await self.lsp_connection.send_request(
 50 |                 "workspace/executeCommand",
 51 |                 {"command": "dbt.compileLsp", "arguments": []},
 52 |             )
 53 | 
 54 |             # wait for complation to complete
 55 |             result = await compile_complete_future
 56 | 
 57 |             if "error" in result and result["error"] is not None:
 58 |                 return {"error": result["error"]}
 59 | 
 60 |             if "log" in result and result["log"] is not None:
 61 |                 return {"log": result["log"]}
 62 | 
 63 |             return result
 64 | 
 65 |     async def get_column_lineage(
 66 |         self,
 67 |         model_id: str,
 68 |         column_name: str,
 69 |         timeout: float | None = None,
 70 |     ) -> dict[str, Any]:
 71 |         """Get column lineage information for a specific model column.
 72 | 
 73 |         Args:
 74 |             model_id: The dbt model identifier
 75 |             column_name: The column name to trace lineage for
 76 | 
 77 |         Returns:
 78 |             Dictionary containing lineage information with 'nodes' key
 79 |         """
 80 | 
 81 |         if not self.lsp_connection.state.compiled:
 82 |             await self.compile()
 83 | 
 84 |         logger.info(f"Requesting column lineage for {model_id}.{column_name}")
 85 | 
 86 |         selector = f"+column:{model_id}.{column_name.upper()}+"
 87 |         async with asyncio.timeout(timeout or self.timeout):
 88 |             result = await self.lsp_connection.send_request(
 89 |                 "workspace/executeCommand",
 90 |                 {"command": "dbt.listNodes", "arguments": [selector]},
 91 |             )
 92 |             if not result:
 93 |                 return {"error": "No result from LSP"}
 94 | 
 95 |             if "error" in result and result["error"] is not None:
 96 |                 return {"error": result["error"]}
 97 | 
 98 |             if "nodes" in result and result["nodes"] is not None:
 99 |                 return {"nodes": result["nodes"]}
100 | 
101 |             return result
102 | 
103 |     async def get_model_lineage(self, model_selector: str) -> dict[str, Any]:
104 |         nodes = []
105 |         response = await self._list_nodes(model_selector)
106 | 
107 |         if not response:
108 |             return {"error": "No result from LSP"}
109 | 
110 |         if "error" in response and response["error"] is not None:
111 |             return {"error": response["error"]}
112 | 
113 |         if "nodes" in response and response["nodes"] is not None:
114 |             for node in response["nodes"]:
115 |                 nodes.append(
116 |                     {
117 |                         "depends_on": node["depends_on"],
118 |                         "name": node["name"],
119 |                         "unique_id": node["unique_id"],
120 |                         "path": node["path"],
121 |                     }
122 |                 )
123 | 
124 |         return {"nodes": nodes}
125 | 
126 |     async def _list_nodes(
127 |         self, model_selector: str, timeout: float | None = None
128 |     ) -> dict[str, Any]:
129 |         """List nodes in the dbt project."""
130 | 
131 |         if not self.lsp_connection.state.compiled:
132 |             await self.compile()
133 | 
134 |         logger.info("Listing nodes", extra={"model_selector": model_selector})
135 |         async with asyncio.timeout(timeout or self.timeout):
136 |             result = await self.lsp_connection.send_request(
137 |                 "workspace/executeCommand",
138 |                 {"command": "dbt.listNodes", "arguments": [model_selector]},
139 |             )
140 | 
141 |             if not result:
142 |                 return {"error": "No result from LSP"}
143 | 
144 |             if "error" in result and result["error"] is not None:
145 |                 return {"error": result["error"]}
146 | 
147 |             if "nodes" in result and result["nodes"] is not None:
148 |                 return {"nodes": result["nodes"]}
149 | 
150 |             return result
151 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/config/config_providers.py:
--------------------------------------------------------------------------------

```python
  1 | from abc import ABC, abstractmethod
  2 | from dataclasses import dataclass
  3 | 
  4 | from dbt_mcp.config.headers import (
  5 |     AdminApiHeadersProvider,
  6 |     DiscoveryHeadersProvider,
  7 |     HeadersProvider,
  8 |     SemanticLayerHeadersProvider,
  9 |     SqlHeadersProvider,
 10 | )
 11 | from dbt_mcp.config.settings import CredentialsProvider
 12 | 
 13 | 
 14 | @dataclass
 15 | class SemanticLayerConfig:
 16 |     url: str
 17 |     host: str
 18 |     prod_environment_id: int
 19 |     token: str
 20 |     headers_provider: HeadersProvider
 21 | 
 22 | 
 23 | @dataclass
 24 | class DiscoveryConfig:
 25 |     url: str
 26 |     headers_provider: HeadersProvider
 27 |     environment_id: int
 28 | 
 29 | 
 30 | @dataclass
 31 | class AdminApiConfig:
 32 |     url: str
 33 |     headers_provider: HeadersProvider
 34 |     account_id: int
 35 |     prod_environment_id: int | None = None
 36 | 
 37 | 
 38 | @dataclass
 39 | class SqlConfig:
 40 |     user_id: int
 41 |     dev_environment_id: int
 42 |     prod_environment_id: int
 43 |     url: str
 44 |     headers_provider: HeadersProvider
 45 | 
 46 | 
 47 | class ConfigProvider[ConfigType](ABC):
 48 |     @abstractmethod
 49 |     async def get_config(self) -> ConfigType: ...
 50 | 
 51 | 
 52 | class DefaultSemanticLayerConfigProvider(ConfigProvider[SemanticLayerConfig]):
 53 |     def __init__(self, credentials_provider: CredentialsProvider):
 54 |         self.credentials_provider = credentials_provider
 55 | 
 56 |     async def get_config(self) -> SemanticLayerConfig:
 57 |         settings, token_provider = await self.credentials_provider.get_credentials()
 58 |         assert (
 59 |             settings.actual_host
 60 |             and settings.actual_prod_environment_id
 61 |             and settings.dbt_token
 62 |         )
 63 |         is_local = settings.actual_host and settings.actual_host.startswith("localhost")
 64 |         if is_local:
 65 |             host = settings.actual_host
 66 |         elif settings.actual_host_prefix:
 67 |             host = (
 68 |                 f"{settings.actual_host_prefix}.semantic-layer.{settings.actual_host}"
 69 |             )
 70 |         else:
 71 |             host = f"semantic-layer.{settings.actual_host}"
 72 |         assert host is not None
 73 | 
 74 |         return SemanticLayerConfig(
 75 |             url=f"http://{host}" if is_local else f"https://{host}" + "/api/graphql",
 76 |             host=host,
 77 |             prod_environment_id=settings.actual_prod_environment_id,
 78 |             token=settings.dbt_token,
 79 |             headers_provider=SemanticLayerHeadersProvider(
 80 |                 token_provider=token_provider
 81 |             ),
 82 |         )
 83 | 
 84 | 
 85 | class DefaultDiscoveryConfigProvider(ConfigProvider[DiscoveryConfig]):
 86 |     def __init__(self, credentials_provider: CredentialsProvider):
 87 |         self.credentials_provider = credentials_provider
 88 | 
 89 |     async def get_config(self) -> DiscoveryConfig:
 90 |         settings, token_provider = await self.credentials_provider.get_credentials()
 91 |         assert (
 92 |             settings.actual_host
 93 |             and settings.actual_prod_environment_id
 94 |             and settings.dbt_token
 95 |         )
 96 |         if settings.actual_host_prefix:
 97 |             url = f"https://{settings.actual_host_prefix}.metadata.{settings.actual_host}/graphql"
 98 |         else:
 99 |             url = f"https://metadata.{settings.actual_host}/graphql"
100 | 
101 |         return DiscoveryConfig(
102 |             url=url,
103 |             headers_provider=DiscoveryHeadersProvider(token_provider=token_provider),
104 |             environment_id=settings.actual_prod_environment_id,
105 |         )
106 | 
107 | 
108 | class DefaultAdminApiConfigProvider(ConfigProvider[AdminApiConfig]):
109 |     def __init__(self, credentials_provider: CredentialsProvider):
110 |         self.credentials_provider = credentials_provider
111 | 
112 |     async def get_config(self) -> AdminApiConfig:
113 |         settings, token_provider = await self.credentials_provider.get_credentials()
114 |         assert settings.dbt_token and settings.actual_host and settings.dbt_account_id
115 |         if settings.actual_host_prefix:
116 |             url = f"https://{settings.actual_host_prefix}.{settings.actual_host}"
117 |         else:
118 |             url = f"https://{settings.actual_host}"
119 | 
120 |         return AdminApiConfig(
121 |             url=url,
122 |             headers_provider=AdminApiHeadersProvider(token_provider=token_provider),
123 |             account_id=settings.dbt_account_id,
124 |             prod_environment_id=settings.actual_prod_environment_id,
125 |         )
126 | 
127 | 
128 | class DefaultSqlConfigProvider(ConfigProvider[SqlConfig]):
129 |     def __init__(self, credentials_provider: CredentialsProvider):
130 |         self.credentials_provider = credentials_provider
131 | 
132 |     async def get_config(self) -> SqlConfig:
133 |         settings, token_provider = await self.credentials_provider.get_credentials()
134 |         assert (
135 |             settings.dbt_user_id
136 |             and settings.dbt_token
137 |             and settings.dbt_dev_env_id
138 |             and settings.actual_prod_environment_id
139 |             and settings.actual_host
140 |         )
141 |         is_local = settings.actual_host and settings.actual_host.startswith("localhost")
142 |         path = "/v1/mcp/" if is_local else "/api/ai/v1/mcp/"
143 |         scheme = "http://" if is_local else "https://"
144 |         host_prefix = (
145 |             f"{settings.actual_host_prefix}." if settings.actual_host_prefix else ""
146 |         )
147 |         url = f"{scheme}{host_prefix}{settings.actual_host}{path}"
148 | 
149 |         return SqlConfig(
150 |             user_id=settings.dbt_user_id,
151 |             dev_environment_id=settings.dbt_dev_env_id,
152 |             prod_environment_id=settings.actual_prod_environment_id,
153 |             url=url,
154 |             headers_provider=SqlHeadersProvider(token_provider=token_provider),
155 |         )
156 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/sql/tools.py:
--------------------------------------------------------------------------------

```python
  1 | import logging
  2 | from collections.abc import Sequence
  3 | from contextlib import AsyncExitStack
  4 | from typing import (
  5 |     Annotated,
  6 |     Any,
  7 | )
  8 | 
  9 | from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
 10 | from mcp import ClientSession
 11 | from mcp.client.streamable_http import GetSessionIdCallback, streamablehttp_client
 12 | from mcp.server.fastmcp import FastMCP
 13 | from mcp.server.fastmcp.tools.base import Tool as InternalTool
 14 | from mcp.server.fastmcp.utilities.func_metadata import (
 15 |     ArgModelBase,
 16 |     FuncMetadata,
 17 |     _get_typed_annotation,
 18 | )
 19 | from mcp.shared.message import SessionMessage
 20 | from mcp.types import (
 21 |     ContentBlock,
 22 |     Tool,
 23 | )
 24 | from pydantic import Field, WithJsonSchema, create_model
 25 | from pydantic.fields import FieldInfo
 26 | from pydantic_core import PydanticUndefined
 27 | 
 28 | from dbt_mcp.config.config_providers import ConfigProvider, SqlConfig
 29 | from dbt_mcp.errors import RemoteToolError
 30 | from dbt_mcp.tools.tool_names import ToolName
 31 | from dbt_mcp.tools.toolsets import Toolset, toolsets
 32 | 
 33 | logger = logging.getLogger(__name__)
 34 | 
 35 | 
 36 | # Based on this: https://github.com/modelcontextprotocol/python-sdk/blob/9ae4df85fbab97bf476ddd160b766ca4c208cd13/src/mcp/server/fastmcp/utilities/func_metadata.py#L105
 37 | def get_remote_tool_fn_metadata(tool: Tool) -> FuncMetadata:
 38 |     dynamic_pydantic_model_params: dict[str, Any] = {}
 39 |     for key in tool.inputSchema["properties"]:
 40 |         # Remote tools shouldn't have type annotations or default values
 41 |         # for their arguments. So, we set them to defaults.
 42 |         field_info = FieldInfo.from_annotated_attribute(
 43 |             annotation=_get_typed_annotation(
 44 |                 annotation=Annotated[
 45 |                     Any,
 46 |                     Field(),
 47 |                     WithJsonSchema({"title": key, "type": "string"}),
 48 |                 ],
 49 |                 globalns={},
 50 |             ),
 51 |             default=PydanticUndefined,
 52 |         )
 53 |         dynamic_pydantic_model_params[key] = (field_info.annotation, None)
 54 |     return FuncMetadata(
 55 |         arg_model=create_model(
 56 |             f"{tool.name}Arguments",
 57 |             **dynamic_pydantic_model_params,
 58 |             __base__=ArgModelBase,
 59 |         )
 60 |     )
 61 | 
 62 | 
 63 | async def _get_sql_tools(session: ClientSession) -> list[Tool]:
 64 |     try:
 65 |         sql_tool_names = {t.value for t in toolsets[Toolset.SQL]}
 66 |         return [
 67 |             t for t in (await session.list_tools()).tools if t.name in sql_tool_names
 68 |         ]
 69 |     except Exception as e:
 70 |         logger.error(f"Error getting SQL tools: {e}")
 71 |         return []
 72 | 
 73 | 
 74 | class SqlToolsManager:
 75 |     _stack = AsyncExitStack()
 76 | 
 77 |     async def get_remote_mcp_session(
 78 |         self, url: str, headers: dict[str, str]
 79 |     ) -> ClientSession:
 80 |         streamablehttp_client_context: tuple[
 81 |             MemoryObjectReceiveStream[SessionMessage | Exception],
 82 |             MemoryObjectSendStream[SessionMessage],
 83 |             GetSessionIdCallback,
 84 |         ] = await self._stack.enter_async_context(
 85 |             streamablehttp_client(
 86 |                 url=url,
 87 |                 headers=headers,
 88 |             )
 89 |         )
 90 |         read_stream, write_stream, _ = streamablehttp_client_context
 91 |         return await self._stack.enter_async_context(
 92 |             ClientSession(read_stream, write_stream)
 93 |         )
 94 | 
 95 |     @classmethod
 96 |     async def close(cls) -> None:
 97 |         await cls._stack.aclose()
 98 | 
 99 | 
100 | async def register_sql_tools(
101 |     dbt_mcp: FastMCP,
102 |     config_provider: ConfigProvider[SqlConfig],
103 |     exclude_tools: Sequence[ToolName] = [],
104 | ) -> None:
105 |     """
106 |     Register SQL MCP tools.
107 | 
108 |     SQL tools are hosted remotely, so their definitions aren't found in this repo.
109 |     """
110 |     config = await config_provider.get_config()
111 |     headers = {
112 |         "x-dbt-prod-environment-id": str(config.prod_environment_id),
113 |         "x-dbt-dev-environment-id": str(config.dev_environment_id),
114 |         "x-dbt-user-id": str(config.user_id),
115 |     } | config.headers_provider.get_headers()
116 |     sql_tools_manager = SqlToolsManager()
117 |     session = await sql_tools_manager.get_remote_mcp_session(config.url, headers)
118 |     await session.initialize()
119 |     sql_tools = await _get_sql_tools(session)
120 |     logger.info(f"Loaded sql tools: {', '.join([tool.name for tool in sql_tools])}")
121 |     for tool in sql_tools:
122 |         if tool.name.lower() in [tool.value.lower() for tool in exclude_tools]:
123 |             continue
124 | 
125 |         # Create a new function using a factory to avoid closure issues
126 |         def create_tool_function(tool_name: str):
127 |             async def tool_function(*args, **kwargs) -> Sequence[ContentBlock]:
128 |                 tool_call_result = await session.call_tool(
129 |                     tool_name,
130 |                     kwargs,
131 |                 )
132 |                 if tool_call_result.isError:
133 |                     raise RemoteToolError(
134 |                         f"Tool {tool_name} reported an error: "
135 |                         + f"{tool_call_result.content}"
136 |                     )
137 |                 return tool_call_result.content
138 | 
139 |             return tool_function
140 | 
141 |         dbt_mcp._tool_manager._tools[tool.name] = InternalTool(
142 |             fn=create_tool_function(tool.name),
143 |             title=tool.title,
144 |             name=tool.name,
145 |             annotations=tool.annotations,
146 |             description=tool.description or "",
147 |             parameters=tool.inputSchema,
148 |             fn_metadata=get_remote_tool_fn_metadata(tool),
149 |             is_async=True,
150 |             context_kwarg=None,
151 |         )
152 | 
```

--------------------------------------------------------------------------------
/tests/unit/dbt_admin/test_error_fetcher.py:
--------------------------------------------------------------------------------

```python
  1 | import json
  2 | from unittest.mock import AsyncMock, Mock
  3 | 
  4 | import pytest
  5 | 
  6 | from dbt_mcp.config.config_providers import AdminApiConfig
  7 | from dbt_mcp.dbt_admin.run_results_errors.parser import ErrorFetcher
  8 | 
  9 | 
 10 | class MockHeadersProvider:
 11 |     """Mock headers provider for testing."""
 12 | 
 13 |     def get_headers(self) -> dict[str, str]:
 14 |         return {"Authorization": "Bearer test_token"}
 15 | 
 16 | 
 17 | @pytest.fixture
 18 | def admin_config():
 19 |     """Admin API config for testing."""
 20 |     return AdminApiConfig(
 21 |         account_id=12345,
 22 |         headers_provider=MockHeadersProvider(),
 23 |         url="https://cloud.getdbt.com",
 24 |     )
 25 | 
 26 | 
 27 | @pytest.fixture
 28 | def mock_client():
 29 |     """Base mock client - behavior configured per test."""
 30 |     return Mock()
 31 | 
 32 | 
 33 | @pytest.mark.parametrize(
 34 |     "run_details,artifact_responses,expected_step_count,expected_error_messages",
 35 |     [
 36 |         # Cancelled run
 37 |         (
 38 |             {
 39 |                 "id": 300,
 40 |                 "status": 30,
 41 |                 "is_cancelled": True,
 42 |                 "finished_at": "2024-01-01T09:00:00Z",
 43 |                 "run_steps": [],
 44 |             },
 45 |             [],
 46 |             1,
 47 |             ["Job run was cancelled"],
 48 |         ),
 49 |         # Source freshness fails (doesn't stop job) + model error downstream
 50 |         (
 51 |             {
 52 |                 "id": 400,
 53 |                 "status": 20,
 54 |                 "is_cancelled": False,
 55 |                 "finished_at": "2024-01-01T10:00:00Z",
 56 |                 "run_steps": [
 57 |                     {
 58 |                         "index": 1,
 59 |                         "name": "Source freshness",
 60 |                         "status": 20,
 61 |                         "finished_at": "2024-01-01T09:30:00Z",
 62 |                     },
 63 |                     {
 64 |                         "index": 2,
 65 |                         "name": "Invoke dbt with `dbt build`",
 66 |                         "status": 20,
 67 |                         "finished_at": "2024-01-01T10:00:00Z",
 68 |                     },
 69 |                 ],
 70 |             },
 71 |             [
 72 |                 None,  # Source freshness artifact not available
 73 |                 {
 74 |                     "results": [
 75 |                         {
 76 |                             "unique_id": "model.test_model",
 77 |                             "status": "error",
 78 |                             "message": "Model compilation failed",
 79 |                             "relation_name": "analytics.test_model",
 80 |                         }
 81 |                     ],
 82 |                     "args": {"target": "prod"},
 83 |                 },
 84 |             ],
 85 |             2,
 86 |             ["Source freshness error - returning logs", "Model compilation failed"],
 87 |         ),
 88 |     ],
 89 | )
 90 | async def test_error_scenarios(
 91 |     mock_client,
 92 |     admin_config,
 93 |     run_details,
 94 |     artifact_responses,
 95 |     expected_step_count,
 96 |     expected_error_messages,
 97 | ):
 98 |     """Test various error scenarios with parametrized data."""
 99 |     # Map step_index to run_results_content
100 |     step_index_to_run_results = {}
101 |     for i, failed_step in enumerate(run_details.get("run_steps", [])):
102 |         if i < len(artifact_responses):
103 |             step_index = failed_step["index"]
104 |             step_index_to_run_results[step_index] = artifact_responses[i]
105 | 
106 |     async def mock_get_artifact(account_id, run_id, artifact_path, step=None):
107 |         run_results_content = step_index_to_run_results.get(step)
108 |         if run_results_content is None:
109 |             raise Exception("Artifact not available")
110 |         return json.dumps(run_results_content)
111 | 
112 |     mock_client.get_job_run_artifact = AsyncMock(side_effect=mock_get_artifact)
113 | 
114 |     error_fetcher = ErrorFetcher(
115 |         run_id=run_details["id"],
116 |         run_details=run_details,
117 |         client=mock_client,
118 |         admin_api_config=admin_config,
119 |     )
120 | 
121 |     result = await error_fetcher.analyze_run_errors()
122 | 
123 |     assert len(result["failed_steps"]) == expected_step_count
124 |     for i, expected_msg in enumerate(expected_error_messages):
125 |         assert expected_msg in result["failed_steps"][i]["errors"][0]["message"]
126 | 
127 | 
128 | async def test_schema_validation_failure(mock_client, admin_config):
129 |     """Test handling of run_results.json schema changes - should fallback to logs."""
130 |     run_details = {
131 |         "id": 400,
132 |         "status": 20,
133 |         "is_cancelled": False,
134 |         "finished_at": "2024-01-01T11:00:00Z",
135 |         "run_steps": [
136 |             {
137 |                 "index": 1,
138 |                 "name": "Invoke dbt with `dbt build`",
139 |                 "status": 20,
140 |                 "finished_at": "2024-01-01T11:00:00Z",
141 |                 "logs": "Model compilation failed due to missing table",
142 |             }
143 |         ],
144 |     }
145 | 
146 |     # Return valid JSON but with missing required fields (schema mismatch)
147 |     # Expected schema: {"results": [...], "args": {...}, "metadata": {...}}
148 |     mock_client.get_job_run_artifact = AsyncMock(
149 |         return_value='{"metadata": {"some": "value"}, "invalid_field": true}'
150 |     )
151 | 
152 |     error_fetcher = ErrorFetcher(
153 |         run_id=400,
154 |         run_details=run_details,
155 |         client=mock_client,
156 |         admin_api_config=admin_config,
157 |     )
158 | 
159 |     result = await error_fetcher.analyze_run_errors()
160 | 
161 |     # Should fallback to logs when schema validation fails
162 |     assert len(result["failed_steps"]) == 1
163 |     step = result["failed_steps"][0]
164 |     assert step["step_name"] == "Invoke dbt with `dbt build`"
165 |     assert "run_results.json not available" in step["errors"][0]["message"]
166 |     assert "Model compilation failed" in step["errors"][0]["truncated_logs"]
167 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/mcp/server.py:
--------------------------------------------------------------------------------

```python
  1 | import logging
  2 | import time
  3 | import uuid
  4 | from collections.abc import AsyncIterator, Callable, Sequence
  5 | from contextlib import (
  6 |     AbstractAsyncContextManager,
  7 |     asynccontextmanager,
  8 | )
  9 | from typing import Any
 10 | 
 11 | from dbtlabs_vortex.producer import shutdown
 12 | from mcp.server.fastmcp import FastMCP
 13 | from mcp.server.lowlevel.server import LifespanResultT
 14 | from mcp.types import (
 15 |     ContentBlock,
 16 |     TextContent,
 17 | )
 18 | 
 19 | from dbt_mcp.config.config import Config
 20 | from dbt_mcp.dbt_admin.tools import register_admin_api_tools
 21 | from dbt_mcp.dbt_cli.tools import register_dbt_cli_tools
 22 | from dbt_mcp.dbt_codegen.tools import register_dbt_codegen_tools
 23 | from dbt_mcp.discovery.tools import register_discovery_tools
 24 | from dbt_mcp.semantic_layer.client import DefaultSemanticLayerClientProvider
 25 | from dbt_mcp.semantic_layer.tools import register_sl_tools
 26 | from dbt_mcp.sql.tools import SqlToolsManager, register_sql_tools
 27 | from dbt_mcp.tracking.tracking import DefaultUsageTracker, ToolCalledEvent, UsageTracker
 28 | from dbt_mcp.lsp.tools import cleanup_lsp_connection, register_lsp_tools
 29 | 
 30 | logger = logging.getLogger(__name__)
 31 | 
 32 | 
 33 | class DbtMCP(FastMCP):
 34 |     def __init__(
 35 |         self,
 36 |         config: Config,
 37 |         usage_tracker: UsageTracker,
 38 |         lifespan: Callable[["DbtMCP"], AbstractAsyncContextManager[LifespanResultT]],
 39 |         *args: Any,
 40 |         **kwargs: Any,
 41 |     ) -> None:
 42 |         super().__init__(*args, **kwargs, lifespan=lifespan)
 43 |         self.usage_tracker = usage_tracker
 44 |         self.config = config
 45 | 
 46 |     async def call_tool(
 47 |         self, name: str, arguments: dict[str, Any]
 48 |     ) -> Sequence[ContentBlock] | dict[str, Any]:
 49 |         logger.info(f"Calling tool: {name} with arguments: {arguments}")
 50 |         result = None
 51 |         start_time = int(time.time() * 1000)
 52 |         try:
 53 |             result = await super().call_tool(
 54 |                 name,
 55 |                 arguments,
 56 |             )
 57 |         except Exception as e:
 58 |             end_time = int(time.time() * 1000)
 59 |             logger.error(
 60 |                 f"Error calling tool: {name} with arguments: {arguments} "
 61 |                 + f"in {end_time - start_time}ms: {e}"
 62 |             )
 63 |             await self.usage_tracker.emit_tool_called_event(
 64 |                 tool_called_event=ToolCalledEvent(
 65 |                     tool_name=name,
 66 |                     arguments=arguments,
 67 |                     start_time_ms=start_time,
 68 |                     end_time_ms=end_time,
 69 |                     error_message=str(e),
 70 |                 ),
 71 |             )
 72 |             return [
 73 |                 TextContent(
 74 |                     type="text",
 75 |                     text=str(e),
 76 |                 )
 77 |             ]
 78 |         end_time = int(time.time() * 1000)
 79 |         logger.info(f"Tool {name} called successfully in {end_time - start_time}ms")
 80 |         await self.usage_tracker.emit_tool_called_event(
 81 |             tool_called_event=ToolCalledEvent(
 82 |                 tool_name=name,
 83 |                 arguments=arguments,
 84 |                 start_time_ms=start_time,
 85 |                 end_time_ms=end_time,
 86 |                 error_message=None,
 87 |             ),
 88 |         )
 89 |         return result
 90 | 
 91 | 
 92 | @asynccontextmanager
 93 | async def app_lifespan(server: DbtMCP) -> AsyncIterator[None]:
 94 |     logger.info("Starting MCP server")
 95 |     try:
 96 |         yield
 97 |     except Exception as e:
 98 |         logger.error(f"Error in MCP server: {e}")
 99 |         raise e
100 |     finally:
101 |         logger.info("Shutting down MCP server")
102 |         try:
103 |             await SqlToolsManager.close()
104 |         except Exception:
105 |             logger.exception("Error closing SQL tools manager")
106 |         try:
107 |             await cleanup_lsp_connection()
108 |         except Exception:
109 |             logger.exception("Error cleaning up LSP connection")
110 |         try:
111 |             shutdown()
112 |         except Exception:
113 |             logger.exception("Error shutting down MCP server")
114 | 
115 | 
116 | async def create_dbt_mcp(config: Config) -> DbtMCP:
117 |     dbt_mcp = DbtMCP(
118 |         config=config,
119 |         usage_tracker=DefaultUsageTracker(
120 |             credentials_provider=config.credentials_provider,
121 |             session_id=uuid.uuid4(),
122 |         ),
123 |         name="dbt",
124 |         lifespan=app_lifespan,
125 |     )
126 | 
127 |     if config.semantic_layer_config_provider:
128 |         logger.info("Registering semantic layer tools")
129 |         register_sl_tools(
130 |             dbt_mcp,
131 |             config_provider=config.semantic_layer_config_provider,
132 |             client_provider=DefaultSemanticLayerClientProvider(
133 |                 config_provider=config.semantic_layer_config_provider,
134 |             ),
135 |             exclude_tools=config.disable_tools,
136 |         )
137 | 
138 |     if config.discovery_config_provider:
139 |         logger.info("Registering discovery tools")
140 |         register_discovery_tools(
141 |             dbt_mcp, config.discovery_config_provider, config.disable_tools
142 |         )
143 | 
144 |     if config.dbt_cli_config:
145 |         logger.info("Registering dbt cli tools")
146 |         register_dbt_cli_tools(dbt_mcp, config.dbt_cli_config, config.disable_tools)
147 | 
148 |     if config.dbt_codegen_config:
149 |         logger.info("Registering dbt codegen tools")
150 |         register_dbt_codegen_tools(
151 |             dbt_mcp, config.dbt_codegen_config, config.disable_tools
152 |         )
153 | 
154 |     if config.admin_api_config_provider:
155 |         logger.info("Registering dbt admin API tools")
156 |         register_admin_api_tools(
157 |             dbt_mcp, config.admin_api_config_provider, config.disable_tools
158 |         )
159 | 
160 |     if config.sql_config_provider:
161 |         logger.info("Registering SQL tools")
162 |         await register_sql_tools(
163 |             dbt_mcp, config.sql_config_provider, config.disable_tools
164 |         )
165 | 
166 |     if config.lsp_config:
167 |         logger.info("Registering LSP tools")
168 |         await register_lsp_tools(dbt_mcp, config.lsp_config, config.disable_tools)
169 | 
170 |     return dbt_mcp
171 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/lsp/tools.py:
--------------------------------------------------------------------------------

```python
  1 | import functools
  2 | import inspect
  3 | import logging
  4 | from collections.abc import Callable, Sequence
  5 | from typing import Any
  6 | 
  7 | from mcp.server.fastmcp import FastMCP
  8 | from pydantic import Field
  9 | 
 10 | from dbt_mcp.config.config import LspConfig
 11 | from dbt_mcp.lsp.lsp_binary_manager import dbt_lsp_binary_info
 12 | from dbt_mcp.lsp.lsp_client import LSPClient
 13 | from dbt_mcp.lsp.lsp_connection import LSPConnection
 14 | from dbt_mcp.prompts.prompts import get_prompt
 15 | from dbt_mcp.tools.annotations import create_tool_annotations
 16 | from dbt_mcp.tools.definitions import ToolDefinition
 17 | from dbt_mcp.tools.register import register_tools
 18 | from dbt_mcp.tools.tool_names import ToolName
 19 | 
 20 | logger = logging.getLogger(__name__)
 21 | 
 22 | # Module-level LSP connection to manage lifecycle
 23 | _lsp_connection: LSPConnection | None = None
 24 | 
 25 | 
 26 | async def register_lsp_tools(
 27 |     server: FastMCP,
 28 |     config: LspConfig,
 29 |     exclude_tools: Sequence[ToolName] | None = None,
 30 | ) -> None:
 31 |     register_tools(
 32 |         server,
 33 |         await list_lsp_tools(config),
 34 |         exclude_tools or [],
 35 |     )
 36 | 
 37 | 
 38 | async def list_lsp_tools(config: LspConfig) -> list[ToolDefinition]:
 39 |     """Register dbt Fusion tools with the MCP server.
 40 | 
 41 |     Args:
 42 |         config: LSP configuration containing LSP settings
 43 | 
 44 |     Returns:
 45 |         List of tool definitions for LSP tools
 46 |     """
 47 |     global _lsp_connection
 48 | 
 49 |     # Only initialize if not already initialized
 50 |     if _lsp_connection is None:
 51 |         lsp_binary_path = dbt_lsp_binary_info(config.lsp_path)
 52 | 
 53 |         if not lsp_binary_path:
 54 |             logger.warning("No LSP binary path found")
 55 |             return []
 56 | 
 57 |         logger.info(
 58 |             f"Using LSP binary in {lsp_binary_path.path} with version {lsp_binary_path.version}"
 59 |         )
 60 | 
 61 |         _lsp_connection = LSPConnection(
 62 |             binary_path=lsp_binary_path.path,
 63 |             args=[],
 64 |             cwd=config.project_dir,
 65 |         )
 66 | 
 67 |     def call_with_lsp_client(func: Callable) -> Callable:
 68 |         """Call a function with the LSP connection manager."""
 69 | 
 70 |         @functools.wraps(func)
 71 |         async def wrapper(*args, **kwargs) -> Any:
 72 |             global _lsp_connection
 73 | 
 74 |             if _lsp_connection is None:
 75 |                 return "LSP connection not initialized"
 76 | 
 77 |             if not _lsp_connection.state.initialized:
 78 |                 try:
 79 |                     await _lsp_connection.start()
 80 |                     await _lsp_connection.initialize()
 81 |                     logger.info("LSP connection started and initialized successfully")
 82 | 
 83 |                 except Exception as e:
 84 |                     logger.error(f"Error starting LSP connection: {e}")
 85 |                     # Clean up failed connection
 86 |                     _lsp_connection = None
 87 |                     return "Error: Failed to establish LSP connection"
 88 | 
 89 |             lsp_client = LSPClient(_lsp_connection)
 90 |             return await func(lsp_client, *args, **kwargs)
 91 | 
 92 |         # remove the lsp_client argument from the signature
 93 |         wrapper.__signature__ = inspect.signature(func).replace(  # type: ignore
 94 |             parameters=[
 95 |                 param
 96 |                 for param in inspect.signature(func).parameters.values()
 97 |                 if param.name != "lsp_client"
 98 |             ]
 99 |         )
100 | 
101 |         return wrapper
102 | 
103 |     return [
104 |         ToolDefinition(
105 |             fn=call_with_lsp_client(get_column_lineage),
106 |             description=get_prompt("lsp/get_column_lineage"),
107 |             annotations=create_tool_annotations(
108 |                 title="get_column_lineage",
109 |                 read_only_hint=False,
110 |                 destructive_hint=False,
111 |                 idempotent_hint=True,
112 |             ),
113 |         ),
114 |     ]
115 | 
116 | 
117 | async def get_column_lineage(
118 |     lsp_client: LSPClient,
119 |     model_id: str = Field(description=get_prompt("lsp/args/model_id")),
120 |     column_name: str = Field(description=get_prompt("lsp/args/column_name")),
121 | ) -> dict[str, Any]:
122 |     """Get column lineage for a specific model column.
123 | 
124 |     Args:
125 |         lsp_client: The LSP client instance
126 |         model_id: The dbt model identifier
127 |         column_name: The column name to trace lineage for
128 | 
129 |     Returns:
130 |         Dictionary with either:
131 |         - 'nodes' key containing lineage information on success
132 |         - 'error' key containing error message on failure
133 |     """
134 |     try:
135 |         response = await lsp_client.get_column_lineage(
136 |             model_id=model_id,
137 |             column_name=column_name,
138 |         )
139 | 
140 |         # Check for LSP-level errors
141 |         if "error" in response:
142 |             logger.error(f"LSP error getting column lineage: {response['error']}")
143 |             return {"error": f"LSP error: {response['error']}"}
144 | 
145 |         # Validate response has expected data
146 |         if "nodes" not in response or not response["nodes"]:
147 |             logger.warning(f"No column lineage found for {model_id}.{column_name}")
148 |             return {
149 |                 "error": f"No column lineage found for model {model_id} and column {column_name}"
150 |             }
151 | 
152 |         return {"nodes": response["nodes"]}
153 | 
154 |     except TimeoutError:
155 |         error_msg = f"Timeout waiting for column lineage (model: {model_id}, column: {column_name})"
156 |         logger.error(error_msg)
157 |         return {"error": error_msg}
158 |     except Exception as e:
159 |         error_msg = (
160 |             f"Failed to get column lineage for {model_id}.{column_name}: {str(e)}"
161 |         )
162 |         logger.error(error_msg)
163 |         return {"error": error_msg}
164 | 
165 | 
166 | async def cleanup_lsp_connection() -> None:
167 |     """Clean up the LSP connection when shutting down."""
168 |     global _lsp_connection
169 |     if _lsp_connection:
170 |         try:
171 |             logger.info("Cleaning up LSP connection")
172 |             await _lsp_connection.stop()
173 |         except Exception as e:
174 |             logger.error(f"Error cleaning up LSP connection: {e}")
175 |         finally:
176 |             _lsp_connection = None
177 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/tools/policy.py:
--------------------------------------------------------------------------------

```python
  1 | from enum import Enum
  2 | 
  3 | from pydantic.dataclasses import dataclass
  4 | 
  5 | from dbt_mcp.tools.tool_names import ToolName
  6 | 
  7 | 
  8 | class ToolBehavior(Enum):
  9 |     """Behavior of the tool."""
 10 | 
 11 |     # The tool can return row-level data.
 12 |     RESULT_SET = "result_set"
 13 |     # The tool only returns metadata.
 14 |     METADATA = "metadata"
 15 | 
 16 | 
 17 | @dataclass
 18 | class ToolPolicy:
 19 |     """Policy for a tool."""
 20 | 
 21 |     name: str
 22 |     behavior: ToolBehavior
 23 | 
 24 | 
 25 | # Defining tool policies is important for our internal usage of dbt-mcp.
 26 | # Our policies dictate that we do not send row-level data to LLMs.
 27 | tool_policies = {
 28 |     # CLI tools
 29 |     ToolName.SHOW.value: ToolPolicy(
 30 |         name=ToolName.SHOW.value, behavior=ToolBehavior.RESULT_SET
 31 |     ),
 32 |     ToolName.LIST.value: ToolPolicy(
 33 |         name=ToolName.LIST.value, behavior=ToolBehavior.METADATA
 34 |     ),
 35 |     ToolName.DOCS.value: ToolPolicy(
 36 |         name=ToolName.DOCS.value, behavior=ToolBehavior.METADATA
 37 |     ),
 38 |     # Compile tool can have result_set behavior because of macros like print_table
 39 |     ToolName.COMPILE.value: ToolPolicy(
 40 |         name=ToolName.COMPILE.value, behavior=ToolBehavior.RESULT_SET
 41 |     ),
 42 |     ToolName.TEST.value: ToolPolicy(
 43 |         name=ToolName.TEST.value, behavior=ToolBehavior.METADATA
 44 |     ),
 45 |     # Run tool can have result_set behavior because of macros like print_table
 46 |     ToolName.RUN.value: ToolPolicy(
 47 |         name=ToolName.RUN.value, behavior=ToolBehavior.RESULT_SET
 48 |     ),
 49 |     # Build tool can have result_set behavior because of macros like print_table
 50 |     ToolName.BUILD.value: ToolPolicy(
 51 |         name=ToolName.BUILD.value, behavior=ToolBehavior.RESULT_SET
 52 |     ),
 53 |     ToolName.PARSE.value: ToolPolicy(
 54 |         name=ToolName.PARSE.value, behavior=ToolBehavior.METADATA
 55 |     ),
 56 |     # Semantic Layer tools
 57 |     ToolName.LIST_METRICS.value: ToolPolicy(
 58 |         name=ToolName.LIST_METRICS.value, behavior=ToolBehavior.METADATA
 59 |     ),
 60 |     ToolName.GET_DIMENSIONS.value: ToolPolicy(
 61 |         name=ToolName.GET_DIMENSIONS.value, behavior=ToolBehavior.METADATA
 62 |     ),
 63 |     ToolName.GET_ENTITIES.value: ToolPolicy(
 64 |         name=ToolName.GET_ENTITIES.value, behavior=ToolBehavior.METADATA
 65 |     ),
 66 |     ToolName.QUERY_METRICS.value: ToolPolicy(
 67 |         name=ToolName.QUERY_METRICS.value, behavior=ToolBehavior.RESULT_SET
 68 |     ),
 69 |     ToolName.GET_METRICS_COMPILED_SQL.value: ToolPolicy(
 70 |         name=ToolName.GET_METRICS_COMPILED_SQL.value, behavior=ToolBehavior.METADATA
 71 |     ),
 72 |     # Discovery tools
 73 |     ToolName.GET_MODEL_PARENTS.value: ToolPolicy(
 74 |         name=ToolName.GET_MODEL_PARENTS.value, behavior=ToolBehavior.METADATA
 75 |     ),
 76 |     ToolName.GET_MODEL_CHILDREN.value: ToolPolicy(
 77 |         name=ToolName.GET_MODEL_CHILDREN.value, behavior=ToolBehavior.METADATA
 78 |     ),
 79 |     ToolName.GET_MODEL_DETAILS.value: ToolPolicy(
 80 |         name=ToolName.GET_MODEL_DETAILS.value, behavior=ToolBehavior.METADATA
 81 |     ),
 82 |     ToolName.GET_MODEL_HEALTH.value: ToolPolicy(
 83 |         name=ToolName.GET_MODEL_HEALTH.value, behavior=ToolBehavior.METADATA
 84 |     ),
 85 |     ToolName.GET_MART_MODELS.value: ToolPolicy(
 86 |         name=ToolName.GET_MART_MODELS.value, behavior=ToolBehavior.METADATA
 87 |     ),
 88 |     ToolName.GET_ALL_MODELS.value: ToolPolicy(
 89 |         name=ToolName.GET_ALL_MODELS.value, behavior=ToolBehavior.METADATA
 90 |     ),
 91 |     ToolName.GET_ALL_SOURCES.value: ToolPolicy(
 92 |         name=ToolName.GET_ALL_SOURCES.value, behavior=ToolBehavior.METADATA
 93 |     ),
 94 |     ToolName.GET_EXPOSURES.value: ToolPolicy(
 95 |         name=ToolName.GET_EXPOSURES.value, behavior=ToolBehavior.METADATA
 96 |     ),
 97 |     ToolName.GET_EXPOSURE_DETAILS.value: ToolPolicy(
 98 |         name=ToolName.GET_EXPOSURE_DETAILS.value, behavior=ToolBehavior.METADATA
 99 |     ),
100 |     # SQL tools
101 |     ToolName.TEXT_TO_SQL.value: ToolPolicy(
102 |         name=ToolName.TEXT_TO_SQL.value, behavior=ToolBehavior.METADATA
103 |     ),
104 |     ToolName.EXECUTE_SQL.value: ToolPolicy(
105 |         name=ToolName.EXECUTE_SQL.value, behavior=ToolBehavior.RESULT_SET
106 |     ),
107 |     # Admin API tools
108 |     ToolName.LIST_JOBS.value: ToolPolicy(
109 |         name=ToolName.LIST_JOBS.value, behavior=ToolBehavior.METADATA
110 |     ),
111 |     ToolName.GET_JOB_DETAILS.value: ToolPolicy(
112 |         name=ToolName.GET_JOB_DETAILS.value, behavior=ToolBehavior.METADATA
113 |     ),
114 |     ToolName.TRIGGER_JOB_RUN.value: ToolPolicy(
115 |         name=ToolName.TRIGGER_JOB_RUN.value, behavior=ToolBehavior.METADATA
116 |     ),
117 |     ToolName.LIST_JOBS_RUNS.value: ToolPolicy(
118 |         name=ToolName.LIST_JOBS_RUNS.value, behavior=ToolBehavior.METADATA
119 |     ),
120 |     ToolName.GET_JOB_RUN_DETAILS.value: ToolPolicy(
121 |         name=ToolName.GET_JOB_RUN_DETAILS.value, behavior=ToolBehavior.METADATA
122 |     ),
123 |     ToolName.CANCEL_JOB_RUN.value: ToolPolicy(
124 |         name=ToolName.CANCEL_JOB_RUN.value, behavior=ToolBehavior.METADATA
125 |     ),
126 |     ToolName.RETRY_JOB_RUN.value: ToolPolicy(
127 |         name=ToolName.RETRY_JOB_RUN.value, behavior=ToolBehavior.METADATA
128 |     ),
129 |     ToolName.LIST_JOB_RUN_ARTIFACTS.value: ToolPolicy(
130 |         name=ToolName.LIST_JOB_RUN_ARTIFACTS.value, behavior=ToolBehavior.METADATA
131 |     ),
132 |     ToolName.GET_JOB_RUN_ARTIFACT.value: ToolPolicy(
133 |         name=ToolName.GET_JOB_RUN_ARTIFACT.value, behavior=ToolBehavior.METADATA
134 |     ),
135 |     ToolName.GET_JOB_RUN_ERROR.value: ToolPolicy(
136 |         name=ToolName.GET_JOB_RUN_ERROR.value, behavior=ToolBehavior.METADATA
137 |     ),
138 |     # dbt-codegen tools
139 |     ToolName.GENERATE_SOURCE.value: ToolPolicy(
140 |         name=ToolName.GENERATE_SOURCE.value, behavior=ToolBehavior.METADATA
141 |     ),
142 |     ToolName.GENERATE_MODEL_YAML.value: ToolPolicy(
143 |         name=ToolName.GENERATE_MODEL_YAML.value, behavior=ToolBehavior.METADATA
144 |     ),
145 |     ToolName.GENERATE_STAGING_MODEL.value: ToolPolicy(
146 |         name=ToolName.GENERATE_STAGING_MODEL.value, behavior=ToolBehavior.METADATA
147 |     ),
148 |     # LSP tools
149 |     ToolName.GET_COLUMN_LINEAGE.value: ToolPolicy(
150 |         name=ToolName.GET_COLUMN_LINEAGE.value, behavior=ToolBehavior.METADATA
151 |     ),
152 | }
153 | 
```

--------------------------------------------------------------------------------
/src/dbt_mcp/discovery/tools.py:
--------------------------------------------------------------------------------

```python
  1 | import logging
  2 | from collections.abc import Sequence
  3 | 
  4 | from mcp.server.fastmcp import FastMCP
  5 | 
  6 | from dbt_mcp.config.config_providers import (
  7 |     ConfigProvider,
  8 |     DiscoveryConfig,
  9 | )
 10 | from dbt_mcp.discovery.client import (
 11 |     ExposuresFetcher,
 12 |     MetadataAPIClient,
 13 |     ModelsFetcher,
 14 |     SourcesFetcher,
 15 | )
 16 | from dbt_mcp.prompts.prompts import get_prompt
 17 | from dbt_mcp.tools.annotations import create_tool_annotations
 18 | from dbt_mcp.tools.definitions import ToolDefinition
 19 | from dbt_mcp.tools.register import register_tools
 20 | from dbt_mcp.tools.tool_names import ToolName
 21 | 
 22 | logger = logging.getLogger(__name__)
 23 | 
 24 | 
 25 | def create_discovery_tool_definitions(
 26 |     config_provider: ConfigProvider[DiscoveryConfig],
 27 | ) -> list[ToolDefinition]:
 28 |     api_client = MetadataAPIClient(config_provider=config_provider)
 29 |     models_fetcher = ModelsFetcher(api_client=api_client)
 30 |     exposures_fetcher = ExposuresFetcher(api_client=api_client)
 31 |     sources_fetcher = SourcesFetcher(api_client=api_client)
 32 | 
 33 |     async def get_mart_models() -> list[dict]:
 34 |         mart_models = await models_fetcher.fetch_models(
 35 |             model_filter={"modelingLayer": "marts"}
 36 |         )
 37 |         return [m for m in mart_models if m["name"] != "metricflow_time_spine"]
 38 | 
 39 |     async def get_all_models() -> list[dict]:
 40 |         return await models_fetcher.fetch_models()
 41 | 
 42 |     async def get_model_details(
 43 |         model_name: str | None = None, unique_id: str | None = None
 44 |     ) -> dict:
 45 |         return await models_fetcher.fetch_model_details(model_name, unique_id)
 46 | 
 47 |     async def get_model_parents(
 48 |         model_name: str | None = None, unique_id: str | None = None
 49 |     ) -> list[dict]:
 50 |         return await models_fetcher.fetch_model_parents(model_name, unique_id)
 51 | 
 52 |     async def get_model_children(
 53 |         model_name: str | None = None, unique_id: str | None = None
 54 |     ) -> list[dict]:
 55 |         return await models_fetcher.fetch_model_children(model_name, unique_id)
 56 | 
 57 |     async def get_model_health(
 58 |         model_name: str | None = None, unique_id: str | None = None
 59 |     ) -> list[dict]:
 60 |         return await models_fetcher.fetch_model_health(model_name, unique_id)
 61 | 
 62 |     async def get_exposures() -> list[dict]:
 63 |         return await exposures_fetcher.fetch_exposures()
 64 | 
 65 |     async def get_exposure_details(
 66 |         exposure_name: str | None = None, unique_ids: list[str] | None = None
 67 |     ) -> list[dict]:
 68 |         return await exposures_fetcher.fetch_exposure_details(exposure_name, unique_ids)
 69 | 
 70 |     async def get_all_sources(
 71 |         source_names: list[str] | None = None,
 72 |         unique_ids: list[str] | None = None,
 73 |     ) -> list[dict]:
 74 |         return await sources_fetcher.fetch_sources(source_names, unique_ids)
 75 | 
 76 |     return [
 77 |         ToolDefinition(
 78 |             description=get_prompt("discovery/get_mart_models"),
 79 |             fn=get_mart_models,
 80 |             annotations=create_tool_annotations(
 81 |                 title="Get Mart Models",
 82 |                 read_only_hint=True,
 83 |                 destructive_hint=False,
 84 |                 idempotent_hint=True,
 85 |             ),
 86 |         ),
 87 |         ToolDefinition(
 88 |             description=get_prompt("discovery/get_all_models"),
 89 |             fn=get_all_models,
 90 |             annotations=create_tool_annotations(
 91 |                 title="Get All Models",
 92 |                 read_only_hint=True,
 93 |                 destructive_hint=False,
 94 |                 idempotent_hint=True,
 95 |             ),
 96 |         ),
 97 |         ToolDefinition(
 98 |             description=get_prompt("discovery/get_model_details"),
 99 |             fn=get_model_details,
100 |             annotations=create_tool_annotations(
101 |                 title="Get Model Details",
102 |                 read_only_hint=True,
103 |                 destructive_hint=False,
104 |                 idempotent_hint=True,
105 |             ),
106 |         ),
107 |         ToolDefinition(
108 |             description=get_prompt("discovery/get_model_parents"),
109 |             fn=get_model_parents,
110 |             annotations=create_tool_annotations(
111 |                 title="Get Model Parents",
112 |                 read_only_hint=True,
113 |                 destructive_hint=False,
114 |                 idempotent_hint=True,
115 |             ),
116 |         ),
117 |         ToolDefinition(
118 |             description=get_prompt("discovery/get_model_children"),
119 |             fn=get_model_children,
120 |             annotations=create_tool_annotations(
121 |                 title="Get Model Children",
122 |                 read_only_hint=True,
123 |                 destructive_hint=False,
124 |                 idempotent_hint=True,
125 |             ),
126 |         ),
127 |         ToolDefinition(
128 |             description=get_prompt("discovery/get_model_health"),
129 |             fn=get_model_health,
130 |             annotations=create_tool_annotations(
131 |                 title="Get Model Health",
132 |                 read_only_hint=True,
133 |                 destructive_hint=False,
134 |                 idempotent_hint=True,
135 |             ),
136 |         ),
137 |         ToolDefinition(
138 |             description=get_prompt("discovery/get_exposures"),
139 |             fn=get_exposures,
140 |             annotations=create_tool_annotations(
141 |                 title="Get Exposures",
142 |                 read_only_hint=True,
143 |                 destructive_hint=False,
144 |                 idempotent_hint=True,
145 |             ),
146 |         ),
147 |         ToolDefinition(
148 |             description=get_prompt("discovery/get_exposure_details"),
149 |             fn=get_exposure_details,
150 |             annotations=create_tool_annotations(
151 |                 title="Get Exposure Details",
152 |                 read_only_hint=True,
153 |                 destructive_hint=False,
154 |                 idempotent_hint=True,
155 |             ),
156 |         ),
157 |         ToolDefinition(
158 |             description=get_prompt("discovery/get_all_sources"),
159 |             fn=get_all_sources,
160 |             annotations=create_tool_annotations(
161 |                 title="Get All Sources",
162 |                 read_only_hint=True,
163 |                 destructive_hint=False,
164 |                 idempotent_hint=True,
165 |             ),
166 |         ),
167 |     ]
168 | 
169 | 
170 | def register_discovery_tools(
171 |     dbt_mcp: FastMCP,
172 |     config_provider: ConfigProvider[DiscoveryConfig],
173 |     exclude_tools: Sequence[ToolName] = [],
174 | ) -> None:
175 |     register_tools(
176 |         dbt_mcp,
177 |         create_discovery_tool_definitions(config_provider),
178 |         exclude_tools,
179 |     )
180 | 
```
Page 2/5FirstPrevNextLast