This is page 1 of 5. Use http://codebase.md/dbt-labs/dbt-mcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .changes
│ ├── header.tpl.md
│ ├── unreleased
│ │ ├── .gitkeep
│ │ ├── Bug Fix-20251028-143835.yaml
│ │ ├── Enhancement or New Feature-20251014-175047.yaml
│ │ └── Under the Hood-20251030-151902.yaml
│ ├── v0.1.3.md
│ ├── v0.10.0.md
│ ├── v0.10.1.md
│ ├── v0.10.2.md
│ ├── v0.10.3.md
│ ├── v0.2.0.md
│ ├── v0.2.1.md
│ ├── v0.2.10.md
│ ├── v0.2.11.md
│ ├── v0.2.12.md
│ ├── v0.2.13.md
│ ├── v0.2.14.md
│ ├── v0.2.15.md
│ ├── v0.2.16.md
│ ├── v0.2.17.md
│ ├── v0.2.18.md
│ ├── v0.2.19.md
│ ├── v0.2.2.md
│ ├── v0.2.20.md
│ ├── v0.2.3.md
│ ├── v0.2.4.md
│ ├── v0.2.5.md
│ ├── v0.2.6.md
│ ├── v0.2.7.md
│ ├── v0.2.8.md
│ ├── v0.2.9.md
│ ├── v0.3.0.md
│ ├── v0.4.0.md
│ ├── v0.4.1.md
│ ├── v0.4.2.md
│ ├── v0.5.0.md
│ ├── v0.6.0.md
│ ├── v0.6.1.md
│ ├── v0.6.2.md
│ ├── v0.7.0.md
│ ├── v0.8.0.md
│ ├── v0.8.1.md
│ ├── v0.8.2.md
│ ├── v0.8.3.md
│ ├── v0.8.4.md
│ ├── v0.9.0.md
│ ├── v0.9.1.md
│ └── v1.0.0.md
├── .changie.yaml
├── .env.example
├── .github
│ ├── actions
│ │ └── setup-python
│ │ └── action.yml
│ ├── CODEOWNERS
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.yml
│ │ └── feature_request.yml
│ ├── pull_request_template.md
│ └── workflows
│ ├── changelog-check.yml
│ ├── codeowners-check.yml
│ ├── create-release-pr.yml
│ ├── release.yml
│ └── run-checks-pr.yaml
├── .gitignore
├── .pre-commit-config.yaml
├── .task
│ └── checksum
│ └── d2
├── .tool-versions
├── .vscode
│ ├── launch.json
│ └── settings.json
├── CHANGELOG.md
├── CONTRIBUTING.md
├── docs
│ ├── d2.png
│ └── diagram.d2
├── evals
│ └── semantic_layer
│ └── test_eval_semantic_layer.py
├── examples
│ ├── .DS_Store
│ ├── aws_strands_agent
│ │ ├── __init__.py
│ │ ├── .DS_Store
│ │ ├── dbt_data_scientist
│ │ │ ├── __init__.py
│ │ │ ├── .env.example
│ │ │ ├── agent.py
│ │ │ ├── prompts.py
│ │ │ ├── quick_mcp_test.py
│ │ │ ├── test_all_tools.py
│ │ │ └── tools
│ │ │ ├── __init__.py
│ │ │ ├── dbt_compile.py
│ │ │ ├── dbt_mcp.py
│ │ │ └── dbt_model_analyzer.py
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── requirements.txt
│ ├── google_adk_agent
│ │ ├── __init__.py
│ │ ├── main.py
│ │ ├── pyproject.toml
│ │ └── README.md
│ ├── langgraph_agent
│ │ ├── __init__.py
│ │ ├── .python-version
│ │ ├── main.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── openai_agent
│ │ ├── __init__.py
│ │ ├── .gitignore
│ │ ├── .python-version
│ │ ├── main_streamable.py
│ │ ├── main.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── openai_responses
│ │ ├── __init__.py
│ │ ├── .gitignore
│ │ ├── .python-version
│ │ ├── main.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── pydantic_ai_agent
│ │ ├── __init__.py
│ │ ├── .gitignore
│ │ ├── .python-version
│ │ ├── main.py
│ │ ├── pyproject.toml
│ │ └── README.md
│ └── remote_mcp
│ ├── .python-version
│ ├── main.py
│ ├── pyproject.toml
│ ├── README.md
│ └── uv.lock
├── LICENSE
├── pyproject.toml
├── README.md
├── src
│ ├── client
│ │ ├── __init__.py
│ │ ├── main.py
│ │ └── tools.py
│ ├── dbt_mcp
│ │ ├── __init__.py
│ │ ├── .gitignore
│ │ ├── config
│ │ │ ├── config_providers.py
│ │ │ ├── config.py
│ │ │ ├── dbt_project.py
│ │ │ ├── dbt_yaml.py
│ │ │ ├── headers.py
│ │ │ ├── settings.py
│ │ │ └── transport.py
│ │ ├── dbt_admin
│ │ │ ├── __init__.py
│ │ │ ├── client.py
│ │ │ ├── constants.py
│ │ │ ├── run_results_errors
│ │ │ │ ├── __init__.py
│ │ │ │ ├── config.py
│ │ │ │ └── parser.py
│ │ │ └── tools.py
│ │ ├── dbt_cli
│ │ │ ├── binary_type.py
│ │ │ └── tools.py
│ │ ├── dbt_codegen
│ │ │ ├── __init__.py
│ │ │ └── tools.py
│ │ ├── discovery
│ │ │ ├── client.py
│ │ │ └── tools.py
│ │ ├── errors
│ │ │ ├── __init__.py
│ │ │ ├── admin_api.py
│ │ │ ├── base.py
│ │ │ ├── cli.py
│ │ │ ├── common.py
│ │ │ ├── discovery.py
│ │ │ ├── semantic_layer.py
│ │ │ └── sql.py
│ │ ├── gql
│ │ │ └── errors.py
│ │ ├── lsp
│ │ │ ├── __init__.py
│ │ │ ├── lsp_binary_manager.py
│ │ │ ├── lsp_client.py
│ │ │ ├── lsp_connection.py
│ │ │ └── tools.py
│ │ ├── main.py
│ │ ├── mcp
│ │ │ ├── create.py
│ │ │ └── server.py
│ │ ├── oauth
│ │ │ ├── client_id.py
│ │ │ ├── context_manager.py
│ │ │ ├── dbt_platform.py
│ │ │ ├── fastapi_app.py
│ │ │ ├── logging.py
│ │ │ ├── login.py
│ │ │ ├── refresh_strategy.py
│ │ │ ├── token_provider.py
│ │ │ └── token.py
│ │ ├── prompts
│ │ │ ├── __init__.py
│ │ │ ├── admin_api
│ │ │ │ ├── cancel_job_run.md
│ │ │ │ ├── get_job_details.md
│ │ │ │ ├── get_job_run_artifact.md
│ │ │ │ ├── get_job_run_details.md
│ │ │ │ ├── get_job_run_error.md
│ │ │ │ ├── list_job_run_artifacts.md
│ │ │ │ ├── list_jobs_runs.md
│ │ │ │ ├── list_jobs.md
│ │ │ │ ├── retry_job_run.md
│ │ │ │ └── trigger_job_run.md
│ │ │ ├── dbt_cli
│ │ │ │ ├── args
│ │ │ │ │ ├── full_refresh.md
│ │ │ │ │ ├── limit.md
│ │ │ │ │ ├── resource_type.md
│ │ │ │ │ ├── selectors.md
│ │ │ │ │ ├── sql_query.md
│ │ │ │ │ └── vars.md
│ │ │ │ ├── build.md
│ │ │ │ ├── compile.md
│ │ │ │ ├── docs.md
│ │ │ │ ├── list.md
│ │ │ │ ├── parse.md
│ │ │ │ ├── run.md
│ │ │ │ ├── show.md
│ │ │ │ └── test.md
│ │ │ ├── dbt_codegen
│ │ │ │ ├── args
│ │ │ │ │ ├── case_sensitive_cols.md
│ │ │ │ │ ├── database_name.md
│ │ │ │ │ ├── generate_columns.md
│ │ │ │ │ ├── include_data_types.md
│ │ │ │ │ ├── include_descriptions.md
│ │ │ │ │ ├── leading_commas.md
│ │ │ │ │ ├── materialized.md
│ │ │ │ │ ├── model_name.md
│ │ │ │ │ ├── model_names.md
│ │ │ │ │ ├── schema_name.md
│ │ │ │ │ ├── source_name.md
│ │ │ │ │ ├── table_name.md
│ │ │ │ │ ├── table_names.md
│ │ │ │ │ ├── tables.md
│ │ │ │ │ └── upstream_descriptions.md
│ │ │ │ ├── generate_model_yaml.md
│ │ │ │ ├── generate_source.md
│ │ │ │ └── generate_staging_model.md
│ │ │ ├── discovery
│ │ │ │ ├── get_all_models.md
│ │ │ │ ├── get_all_sources.md
│ │ │ │ ├── get_exposure_details.md
│ │ │ │ ├── get_exposures.md
│ │ │ │ ├── get_mart_models.md
│ │ │ │ ├── get_model_children.md
│ │ │ │ ├── get_model_details.md
│ │ │ │ ├── get_model_health.md
│ │ │ │ └── get_model_parents.md
│ │ │ ├── lsp
│ │ │ │ ├── args
│ │ │ │ │ ├── column_name.md
│ │ │ │ │ └── model_id.md
│ │ │ │ └── get_column_lineage.md
│ │ │ ├── prompts.py
│ │ │ └── semantic_layer
│ │ │ ├── get_dimensions.md
│ │ │ ├── get_entities.md
│ │ │ ├── get_metrics_compiled_sql.md
│ │ │ ├── list_metrics.md
│ │ │ └── query_metrics.md
│ │ ├── py.typed
│ │ ├── semantic_layer
│ │ │ ├── client.py
│ │ │ ├── gql
│ │ │ │ ├── gql_request.py
│ │ │ │ └── gql.py
│ │ │ ├── levenshtein.py
│ │ │ ├── tools.py
│ │ │ └── types.py
│ │ ├── sql
│ │ │ └── tools.py
│ │ ├── telemetry
│ │ │ └── logging.py
│ │ ├── tools
│ │ │ ├── annotations.py
│ │ │ ├── definitions.py
│ │ │ ├── policy.py
│ │ │ ├── register.py
│ │ │ ├── tool_names.py
│ │ │ └── toolsets.py
│ │ └── tracking
│ │ └── tracking.py
│ └── remote_mcp
│ ├── __init__.py
│ └── session.py
├── Taskfile.yml
├── tests
│ ├── __init__.py
│ ├── env_vars.py
│ ├── integration
│ │ ├── __init__.py
│ │ ├── dbt_codegen
│ │ │ ├── __init__.py
│ │ │ └── test_dbt_codegen.py
│ │ ├── discovery
│ │ │ └── test_discovery.py
│ │ ├── initialization
│ │ │ ├── __init__.py
│ │ │ └── test_initialization.py
│ │ ├── lsp
│ │ │ └── test_lsp_connection.py
│ │ ├── remote_mcp
│ │ │ └── test_remote_mcp.py
│ │ ├── remote_tools
│ │ │ └── test_remote_tools.py
│ │ ├── semantic_layer
│ │ │ └── test_semantic_layer.py
│ │ └── tracking
│ │ └── test_tracking.py
│ ├── mocks
│ │ └── config.py
│ └── unit
│ ├── __init__.py
│ ├── config
│ │ ├── __init__.py
│ │ ├── test_config.py
│ │ └── test_transport.py
│ ├── dbt_admin
│ │ ├── __init__.py
│ │ ├── test_client.py
│ │ ├── test_error_fetcher.py
│ │ └── test_tools.py
│ ├── dbt_cli
│ │ ├── __init__.py
│ │ ├── test_cli_integration.py
│ │ └── test_tools.py
│ ├── dbt_codegen
│ │ ├── __init__.py
│ │ └── test_tools.py
│ ├── discovery
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── test_exposures_fetcher.py
│ │ └── test_sources_fetcher.py
│ ├── lsp
│ │ ├── __init__.py
│ │ ├── test_lsp_client.py
│ │ ├── test_lsp_connection.py
│ │ └── test_lsp_tools.py
│ ├── oauth
│ │ ├── test_credentials_provider.py
│ │ ├── test_fastapi_app_pagination.py
│ │ └── test_token.py
│ ├── tools
│ │ ├── test_disable_tools.py
│ │ ├── test_tool_names.py
│ │ ├── test_tool_policies.py
│ │ └── test_toolsets.py
│ └── tracking
│ └── test_tracking.py
├── ui
│ ├── .gitignore
│ ├── assets
│ │ ├── dbt_logo BLK.svg
│ │ └── dbt_logo WHT.svg
│ ├── eslint.config.js
│ ├── index.html
│ ├── package.json
│ ├── pnpm-lock.yaml
│ ├── pnpm-workspace.yaml
│ ├── README.md
│ ├── src
│ │ ├── App.css
│ │ ├── App.tsx
│ │ ├── global.d.ts
│ │ ├── index.css
│ │ ├── main.tsx
│ │ └── vite-env.d.ts
│ ├── tsconfig.app.json
│ ├── tsconfig.json
│ ├── tsconfig.node.json
│ └── vite.config.ts
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/.changes/unreleased/.gitkeep:
--------------------------------------------------------------------------------
```
1 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/.gitignore:
--------------------------------------------------------------------------------
```
1 | ui
2 |
```
--------------------------------------------------------------------------------
/examples/langgraph_agent/.python-version:
--------------------------------------------------------------------------------
```
1 | 3.13
2 |
```
--------------------------------------------------------------------------------
/examples/openai_agent/.python-version:
--------------------------------------------------------------------------------
```
1 | 3.13
2 |
```
--------------------------------------------------------------------------------
/examples/openai_responses/.python-version:
--------------------------------------------------------------------------------
```
1 | 3.13
2 |
```
--------------------------------------------------------------------------------
/examples/pydantic_ai_agent/.python-version:
--------------------------------------------------------------------------------
```
1 | 3.13
2 |
```
--------------------------------------------------------------------------------
/examples/remote_mcp/.python-version:
--------------------------------------------------------------------------------
```
1 | 3.13
2 |
```
--------------------------------------------------------------------------------
/examples/openai_agent/.gitignore:
--------------------------------------------------------------------------------
```
1 | .envrc
```
--------------------------------------------------------------------------------
/examples/openai_responses/.gitignore:
--------------------------------------------------------------------------------
```
1 | .envrc
```
--------------------------------------------------------------------------------
/examples/pydantic_ai_agent/.gitignore:
--------------------------------------------------------------------------------
```
1 | .envrc
```
--------------------------------------------------------------------------------
/ui/.gitignore:
--------------------------------------------------------------------------------
```
1 | node_modules
2 |
```
--------------------------------------------------------------------------------
/.tool-versions:
--------------------------------------------------------------------------------
```
1 | nodejs 20.17.0
2 | uv 0.8.19
3 | task 3.43.2
4 | pnpm 10.15.1
5 |
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
1 | __pycache__/
2 | .venv/
3 | .env
4 | .mypy_cache/
5 | .pytest_cache/
6 | *.egg-info/
7 | .idea/
8 | vortex_dev_mode_output.jsonl
9 | dbt-mcp.log
10 |
```
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
```
1 | DBT_HOST=cloud.getdbt.com
2 | DBT_PROD_ENV_ID=your-production-environment-id
3 | DBT_DEV_ENV_ID=your-development-environment-id
4 | DBT_USER_ID=your-user-id
5 | DBT_TOKEN=your-service-token
6 | DBT_PROJECT_DIR=/path/to/your/dbt/project
7 | DBT_PATH=/path/to/your/dbt/executable
8 | MULTICELL_ACCOUNT_PREFIX=your-account-prefix
9 |
```
--------------------------------------------------------------------------------
/.changie.yaml:
--------------------------------------------------------------------------------
```yaml
1 | changesDir: .changes
2 | unreleasedDir: unreleased
3 | headerPath: header.tpl.md
4 | changelogPath: CHANGELOG.md
5 | versionExt: md
6 | versionFormat: '## {{.Version}} - {{.Time.Format "2006-01-02"}}'
7 | kindFormat: '### {{.Kind}}'
8 | changeFormat: '* {{.Body}}'
9 | kinds:
10 | - label: Breaking Change
11 | auto: major
12 | - label: Enhancement or New Feature
13 | auto: minor
14 | - label: Under the Hood
15 | auto: patch
16 | - label: Bug Fix
17 | auto: patch
18 | - label: Security
19 | auto: patch
20 | newlines:
21 | afterChangelogHeader: 1
22 | beforeChangelogVersion: 1
23 | endOfVersion: 1
24 | envPrefix: CHANGIE_
25 |
```
--------------------------------------------------------------------------------
/examples/aws_strands_agent/dbt_data_scientist/.env.example:
--------------------------------------------------------------------------------
```
1 | # Local Project Development Setup
2 | DBT_PROJECT_LOCATION= Path to your local dbt project directory
3 | DBT_EXECUTABLE= Path to dbt executable (/Users/username/.local/bin/dbt)
4 |
5 | # Not Required: Used for comparing fusion and core
6 | DBT_CLASSIC= Path to dbt executable (/opt/homebrew/bin/dbt)
7 |
8 | # LLM Keys
9 | GOOGLE_API_KEY= Your Google API Key
10 | ANTHROPIC_API_KEY=Your Anthropic/Claude API Key
11 | OPENAI_API_KEY=Your OpenAI API Key
12 |
13 | # ===== REQUIRED: dbt MCP Server Configuration =====
14 | DBT_MCP_URL= The URL of your dbt MCP server
15 | DBT_TOKEN= Your dbt Cloud authentication token
16 | DBT_USER_ID= Your dbt Cloud user ID (numeric)
17 | DBT_PROD_ENV_ID= Your dbt Cloud production environment ID (numeric)
18 |
19 | # ===== OPTIONAL: dbt Environment IDs =====
20 | DBT_DEV_ENV_ID= Your dbt Cloud development environment ID (numeric)
21 | DBT_ACCOUNT_ID= Your dbt Cloud account ID (numeric)
22 |
```
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
```yaml
1 | exclude: |
2 | (?x)^(
3 | .mypy_cache/
4 | | .pytest_cache/
5 | | .venv/
6 | )$
7 |
8 | repos:
9 | - repo: https://github.com/pre-commit/pre-commit-hooks
10 | rev: v5.0.0
11 | hooks:
12 | - id: pretty-format-json
13 | args: ["--autofix"]
14 | - id: check-merge-conflict
15 | - id: no-commit-to-branch
16 | args: [--branch, main]
17 | - repo: https://github.com/rhysd/actionlint
18 | rev: v1.7.3
19 | hooks:
20 | - id: actionlint
21 | - repo: https://github.com/charliermarsh/ruff-pre-commit
22 | rev: v0.9.5
23 | hooks:
24 | - id: ruff
25 | args: [--fix]
26 | exclude: examples/
27 | - id: ruff-format
28 | exclude: examples/
29 | - repo: https://github.com/astral-sh/uv-pre-commit
30 | rev: 0.8.19
31 | hooks:
32 | - id: uv-lock
33 | - repo: https://github.com/pre-commit/mirrors-mypy
34 | rev: v1.13.0
35 | hooks:
36 | - id: mypy
37 | language: system
38 | pass_filenames: false
39 | args: ["--show-error-codes", "--namespace-packages", "--exclude", "examples/", "."]
40 | exclude: examples/
41 |
42 |
```
--------------------------------------------------------------------------------
/examples/remote_mcp/README.md:
--------------------------------------------------------------------------------
```markdown
1 |
```
--------------------------------------------------------------------------------
/examples/openai_responses/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # OpenAI Responses
2 |
3 | An example of using remote dbt-mcp with OpenAI's Responses API
4 |
5 | ## Usage
6 |
7 | `uv run main.py`
8 |
```
--------------------------------------------------------------------------------
/ui/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # dbt MCP
2 |
3 | This UI enables easier configuration of dbt MCP. Check out `package.json` and `Taskfile.yml` for usage.
4 |
```
--------------------------------------------------------------------------------
/examples/langgraph_agent/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # LangGraph Agent Example
2 |
3 | This is a simple example of how to create conversational agent with remote dbt MCP & LangGraph.
4 |
5 | ## Usage
6 |
7 | 1. Set an `ANTHROPIC_API_KEY` environment variable with your Anthropic API key.
8 | 2. Run `uv run main.py`.
9 |
```
--------------------------------------------------------------------------------
/examples/pydantic_ai_agent/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # Pydantic AI Agent
2 |
3 | An example of using Pydantic AI with the remote dbt MCP server.
4 |
5 | ## Config
6 |
7 | Set the following environment variables:
8 | - `OPENAI_API_KEY` (or the API key for any other model supported by PydanticAI)
9 | - `DBT_TOKEN`
10 | - `DBT_PROD_ENV_ID`
11 | - `DBT_HOST` (if not using the default `cloud.getdbt.com`)
12 |
13 | ## Usage
14 |
15 | `uv run main.py`
```
--------------------------------------------------------------------------------
/examples/google_adk_agent/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # Google ADK Agent for dbt MCP
2 |
3 | An example of using Google Agent Development Kit with the remote dbt MCP server.
4 |
5 | ## Config
6 |
7 | Set the following environment variables:
8 | - `GOOGLE_GENAI_API_KEY` (or the API key for any other model supported by google ADK)
9 | - `ADK_MODEL` (Choose a different model (default: gemini-2.0-flash))
10 | - `DBT_TOKEN`
11 | - `DBT_PROD_ENV_ID`
12 | - `DBT_HOST` (if not using the default `cloud.getdbt.com`)
13 | - `DBT_PROJECT_DIR` (if using dbt core)
14 |
15 | ### Usage
16 |
17 | `uv run main.py`
```
--------------------------------------------------------------------------------
/examples/openai_agent/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # OpenAI Agent
2 |
3 | Examples of using dbt-mcp with OpenAI's agent framework
4 |
5 | ## Usage
6 |
7 | ### Local MCP Server
8 |
9 | - set up the env var file like described in the README and make sure that the `MCPServerStdio` points to it
10 | - set up the env var `OPENAI_API_KEY` with your OpenAI API key
11 | - run `uv run main.py`
12 |
13 | ### MCP Streamable HTTP Server
14 |
15 | - set up the env var `OPENAI_API_KEY` with your OpenAI API key
16 | - set up the env var `DBT_TOKEN` with your dbt API token
17 | - set up the env var `DBT_PROD_ENV_ID` with your dbt production environment ID
18 | - set up the env var `DBT_HOST` with your dbt host (default is `cloud.getdbt.com`)
19 | - run `uv run main_streamable.py`
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # dbt MCP Server
2 | [](https://www.bestpractices.dev/projects/11137)
3 |
4 | This MCP (Model Context Protocol) server provides various tools to interact with dbt. You can use this MCP server to provide AI agents with context of your project in dbt Core, dbt Fusion, and dbt Platform.
5 |
6 | Read our documentation [here](https://docs.getdbt.com/docs/dbt-ai/about-mcp) to learn more. [This](https://docs.getdbt.com/blog/introducing-dbt-mcp-server) blog post provides more details for what is possible with the dbt MCP server.
7 |
8 | ## Feedback
9 |
10 | If you have comments or questions, create a GitHub Issue or join us in [the community Slack](https://www.getdbt.com/community/join-the-community) in the `#tools-dbt-mcp` channel.
11 |
12 |
13 | ## Architecture
14 |
15 | The dbt MCP server architecture allows for your agent to connect to a variety of tools.
16 |
17 | 
18 |
19 | ## Examples
20 |
21 | Commonly, you will connect the dbt MCP server to an agent product like Claude or Cursor. However, if you are interested in creating your own agent, check out [the examples directory](https://github.com/dbt-labs/dbt-mcp/tree/main/examples) for how to get started.
22 |
23 | ## Contributing
24 |
25 | Read `CONTRIBUTING.md` for instructions on how to get involved!
26 |
```
--------------------------------------------------------------------------------
/examples/aws_strands_agent/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # dbt AWS Agentcore Multi-Agent
2 |
3 | A multi-agent system built with AWS Bedrock Agent Core that provides intelligent dbt project management and analysis capabilities.
4 |
5 | ## Architecture
6 |
7 | This project implements a multi-agent architecture with three specialized tools:
8 |
9 | 1. **dbt Compile Tool** - Local dbt compilation functionality
10 | 2. **dbt Model Analyzer** - Data model analysis and recommendations
11 | 3. **dbt MCP Server Tool** - Remote dbt MCP server connection
12 |
13 | ## 📋 Prerequisites
14 |
15 | - Python 3.10+
16 | - dbt CLI installed and configured
17 | - dbt Fusion installed
18 | - AWS Agentcore setup
19 |
20 | ## 🛠️ Installation
21 |
22 | 1. **Clone the repository**:
23 | ```bash
24 | git clone <repository-url>
25 | cd dbt-aws-agent
26 | ```
27 |
28 | 2. **Create a virtual environment**:
29 | ```bash
30 | python -m venv .venv
31 | source .venv/bin/activate # On Windows: .venv\Scripts\activate
32 | ```
33 |
34 | 3. **Install dependencies**:
35 | ```bash
36 | pip install -r requirements.txt
37 | ```
38 |
39 | 4. **Set up environment variables**:
40 | ```bash
41 | cp .env.example .env
42 | # Edit .env with your configuration
43 | ```
44 |
45 | 5. **Run**:
46 | ```bash
47 | cd dbt_data_scientist
48 | python agent.py
49 | ```
50 |
51 | ## Project Structure
52 |
53 | ```
54 | dbt-aws-agent/
55 | ├── dbt_data_scientist/ # Main application package
56 | │ ├── __init__.py # Package initialization
57 | │ ├── agent.py # Main agent with Bedrock Agent Core integration
58 | │ ├── prompts.py # Agent prompts and instructions
59 | │ ├── test_all_tools.py # Comprehensive test suite
60 | │ ├── quick_mcp_test.py # Quick MCP connectivity test
61 | │ └── tools/ # Tool implementations
62 | │ ├── __init__.py
63 | │ ├── dbt_compile.py # Local dbt compilation tool
64 | │ ├── dbt_mcp.py # Remote dbt MCP server tool (translated from Google ADK)
65 | │ └── dbt_model_analyzer.py # Data model analysis tool
66 | ├── requirements.txt # Python dependencies
67 | ├── env.example # Environment configuration template
68 | └── README.md # This documentation
69 | ```
70 |
71 | ## Tools Overview
72 |
73 | ### 1. dbt Compile Tool (`dbt_compile.py`)
74 | - **Purpose**: Local dbt project compilation and troubleshooting
75 | - **Features**:
76 | - Runs `dbt compile --log-format json` locally
77 | - Parses JSON logs for structured analysis
78 | - Provides compilation error analysis and recommendations
79 | - Routes to specialized dbt compile agent for intelligent responses
80 |
81 | ### 2. dbt Model Analyzer Tool (`dbt_model_analyzer.py`)
82 | - **Purpose**: Data model analysis and recommendations
83 | - **Features**:
84 | - Analyzes model structure and dependencies
85 | - Assesses data quality patterns and test coverage
86 | - Reviews adherence to dbt best practices
87 | - Provides optimization recommendations
88 | - Generates model documentation suggestions
89 |
90 | ### 3. dbt MCP Server Tool (`dbt_mcp.py`)
91 | - **Purpose**: Remote dbt MCP server connection using AWS Bedrock Agent Core
92 | - **Features**:
93 | - Connects to remote dbt MCP server using streamable HTTP client
94 | - Supports dbt Cloud authentication with headers
95 | - Lists available MCP tools dynamically
96 | - Executes dbt MCP tool functions
97 | - Provides intelligent query routing to appropriate tools
98 | - Built-in connection testing and error handling
99 | ```
100 |
101 | ### 3. Test the Setup
102 |
103 | Before running the full application, test that everything is working:
104 |
105 | ```bash
106 | # Quick MCP test
107 | python dbt_data_scientist/quick_mcp_test.py
108 |
109 | # Full test suite
110 | python dbt_data_scientist/test_all_tools.py
111 | ```
112 |
113 | ### 4. Run the Application
114 |
115 | #### For AWS Bedrock Agent Core:
116 | ```bash
117 | python -m dbt_data_scientist.agent
118 | ```
119 |
120 | #### For Local Testing:
121 | ```bash
122 | python -m dbt_data_scientist.agent
123 | ```
124 |
125 | ## Usage Examples
126 |
127 | ### dbt Compile Tool
128 | ```
129 | > "Compile my dbt project and find any issues"
130 | > "What's wrong with my models in the staging folder?"
131 | ```
132 |
133 | ### dbt Model Analyzer Tool
134 | ```
135 | > "Analyze my data modeling approach for best practices"
136 | > "Review the dependencies in my dbt project"
137 | > "Check the data quality patterns in my models"
138 | ```
139 |
140 | ### dbt MCP Server Tool
141 | ```
142 | > "List all available dbt MCP tools"
143 | > "Show me the catalog from dbt Cloud"
144 | > "Run my models in dbt Cloud"
145 | > "What tests are available in my dbt project?"
146 | ```
147 |
148 | ## Testing
149 |
150 | The project includes comprehensive testing capabilities to verify all components are working correctly.
151 |
152 | ### Quick Tests
153 |
154 | #### Test MCP Connection Only
155 | ```bash
156 | python dbt_data_scientist/quick_mcp_test.py
157 | ```
158 | - Fast, minimal test of MCP connectivity
159 | - Verifies environment variables and connection
160 | - Lists available MCP tools
161 |
162 | #### Test MCP Tool Directly
163 | ```bash
164 | python dbt_data_scientist/tools/dbt_mcp.py
165 | ```
166 | - Tests the MCP module directly
167 | - Built-in connection testing
168 | - Shows detailed error messages
169 |
170 | ### Comprehensive Testing
171 |
172 | #### Full Test Suite
173 | ```bash
174 | python dbt_data_scientist/test_all_tools.py
175 | ```
176 | - Tests all tools individually
177 | - Verifies agent initialization
178 | - Tests tool integration
179 | - Comprehensive error reporting
180 |
181 | ### What Tests Verify
182 |
183 | 1. **Environment Variables** - All required variables are set
184 | 2. **Tool Imports** - All tools can be imported successfully
185 | 3. **Agent Initialization** - Agent loads with all tools
186 | 4. **Individual Tool Testing** - Each tool executes correctly
187 | 5. **Agent Integration** - Tools work together in the agent
188 | 6. **MCP Connectivity** - Remote MCP server connection works
189 |
190 | ### Test Output Example
191 | ```
192 | 🚀 Complete Tool and Agent Test Suite
193 | ==================================================
194 | 🔧 Testing Environment Setup
195 | ------------------------------
196 | ✅ DBT_MCP_URL: https://your-mcp-server.com
197 | ✅ DBT_TOKEN: ****************
198 | ✅ DBT_USER_ID: your_user_id
199 | ✅ DBT_PROD_ENV_ID: your_env_id
200 | ✅ Environment setup complete!
201 |
202 | 📦 Testing Tool Imports
203 | ------------------------------
204 | ✅ All tools imported successfully
205 | ✅ dbt_compile is callable
206 | ✅ dbt_mcp_tool is callable
207 | ✅ dbt_model_analyzer_agent is callable
208 |
209 | ... (more tests)
210 |
211 | 🎉 All tests passed! Your agent and tools are working correctly.
212 | ```
213 |
214 | ## Key Features
215 |
216 | ### 🔄 **Intelligent Routing**
217 | The main agent automatically routes queries to the appropriate specialized tool based on keywords and context.
218 |
219 | ### 🌐 **MCP Server Integration**
220 | Seamless connection to remote dbt MCP servers with proper authentication and error handling.
221 |
222 | ### 📊 **Comprehensive Analysis**
223 | Multi-faceted analysis including compilation, modeling best practices, and data quality assessment.
224 |
225 | ### ⚡ **Async Support**
226 | Full async/await support for MCP operations while maintaining compatibility with Bedrock Agent Core.
227 |
228 | ### 🛡️ **Error Handling**
229 | Robust error handling and fallback mechanisms for all tool operations.
230 |
231 | ## Development
232 |
233 | ### Adding New Tools
234 | 1. Create a new tool file in `dbt_data_scientist/tools/`
235 | 2. Use the `@tool` decorator from strands
236 | 3. Add the tool to the main agent's tools list in `agent.py`
237 | 4. Update the routing logic in the main agent's system prompt
238 |
239 | ## Troubleshooting
240 |
241 | ### Testing First
242 |
243 | Before troubleshooting, run the test suite to identify issues:
244 |
245 | ```bash
246 | # Quick test for MCP issues
247 | python dbt_data_scientist/quick_mcp_test.py
248 |
249 | # Comprehensive test for all issues
250 | python dbt_data_scientist/test_all_tools.py
251 | ```
252 |
253 | ### Common Issues
254 |
255 | 1. **MCP Connection Failed**
256 | - Run `python dbt_data_scientist/quick_mcp_test.py` to diagnose
257 | - Verify `DBT_MCP_URL` is correct
258 | - Check authentication headers
259 | - Ensure dbt MCP server is accessible
260 | - Check network connectivity
261 |
262 | 2. **dbt Compile Errors**
263 | - Verify `DBT_PROJECT_LOCATION` path exists
264 | - Check `DBT_EXECUTABLE` is in PATH
265 | - Ensure dbt project is valid
266 | - Run `dbt compile` manually to test
267 |
268 | 3. **Environment Variable Issues**
269 | - Copy `env.example` to `.env`
270 | - Verify all required variables are set
271 | - Check variable values are correct
272 | - Use the test suite to validate configuration
273 |
274 | 4. **Agent Initialization Issues**
275 | - Check that all tools can be imported
276 | - Verify MCP server is accessible
277 | - Ensure all dependencies are installed
278 | - Run individual tool tests
279 |
280 | ### Debug Mode
281 |
282 | For detailed debugging, you can run individual components:
283 |
284 | ```bash
285 | # Test MCP tool directly
286 | python dbt_data_scientist/tools/dbt_mcp.py
287 |
288 | # Test individual tools
289 | python -c "from dbt_data_scientist.tools import dbt_compile; print(dbt_compile('test'))"
290 | ```
291 |
292 | ## Contributing
293 |
294 | 1. Fork the repository
295 | 2. Create a feature branch
296 | 3. Make your changes
297 | 4. Add tests for new functionality
298 | 5. Submit a pull request
299 |
300 | ## License
301 |
302 | This project is licensed under the MIT License - see the LICENSE file for details.
303 |
```
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
```markdown
1 | # Contributing
2 |
3 | With [task](https://taskfile.dev/) installed, simply run `task` to see the list of available commands. For comments, questions, or requests open a GitHub issue.
4 |
5 | ## Setup
6 |
7 | 1. Clone the repository:
8 | ```shell
9 | git clone https://github.com/dbt-labs/dbt-mcp.git
10 | cd dbt-mcp
11 | ```
12 |
13 | 2. [Install uv](https://docs.astral.sh/uv/getting-started/installation/)
14 |
15 | 3. [Install Task](https://taskfile.dev/installation/)
16 |
17 | 4. Run `task install`
18 |
19 | 5. Configure environment variables:
20 | ```shell
21 | cp .env.example .env
22 | ```
23 | Then edit `.env` with your specific environment variables (see the `Configuration` section of the `README.md`).
24 |
25 | ## Testing
26 |
27 | This repo has automated tests which can be run with `task test:unit`. Additionally, there is a simple CLI tool which can be used to test by running `task client`. If you would like to test in a client like Cursor or Claude, use a configuration file like this:
28 |
29 | ```
30 | {
31 | "mcpServers": {
32 | "dbt": {
33 | "command": "<path-to-uv>",
34 | "args": [
35 | "--directory",
36 | "<path-to-this-directory>/dbt-mcp",
37 | "run",
38 | "dbt-mcp",
39 | "--env-file",
40 | "<path-to-this-directory>/dbt-mcp/.env"
41 | ]
42 | }
43 | }
44 | }
45 | ```
46 |
47 | Or, if you would like to test with Oauth, use a configuration like this:
48 |
49 | ```
50 | {
51 | "mcpServers": {
52 | "dbt": {
53 | "command": "<path-to-uv>",
54 | "args": [
55 | "--directory",
56 | "<path-to-this-directory>/dbt-mcp",
57 | "run",
58 | "dbt-mcp",
59 | ],
60 | "env": {
61 | "DBT_HOST": "<dbt-host-with-custom-subdomain>",
62 | }
63 | }
64 | }
65 | }
66 | ```
67 |
68 | For improved debugging, you can set the `DBT_MCP_SERVER_FILE_LOGGING=true` environment variable to log to a `./dbt-mcp.log` file.
69 |
70 | ## Signed Commits
71 |
72 | Before committing changes, ensure that you have set up [signed commits](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits).
73 | This repo requires signing on all commits for PRs.
74 |
75 | ## Changelog
76 |
77 | Every PR requires a changelog entry. [Install changie](https://changie.dev/) and run `changie new` to create a new changelog entry.
78 |
79 | ## Debugging
80 |
81 | The dbt-mcp server runs with `stdio` transport by default which does not allow for Python debugger support. For debugging with breakpoints, use `streamable-http` transport.
82 |
83 | ### Option 1: MCP Inspector Only (No Breakpoints)
84 | 1. Run `task inspector` - this starts both the server and inspector automatically
85 | 2. Open MCP Inspector UI
86 | 3. Use "STDIO" Transport Type to connect
87 | 4. Test tools interactively in the inspector UI (uses `stdio` transport, no debugger support)
88 |
89 | ### Option 2: VS Code Debugger with Breakpoints (Recommended for Debugging)
90 | 1. Set breakpoints in your code
91 | 2. Press `F5` or select "debug dbt-mcp" from the Run menu
92 | 3. Open MCP Inspector UI via `npx @modelcontextprotocol/inspector`
93 | 4. Connect to `http://localhost:8000/mcp/v1` using "Streamable HTTP" transport and "Via Proxy" connection type
94 | 5. Call tools from Inspector - your breakpoints will trigger
95 |
96 | ### Option 3: Manual Debugging with `task dev`
97 | 1. Run `task dev` - this starts the server with `streamable-http` transport on `http://localhost:8000`
98 | 2. Set breakpoints in your code
99 | 3. Attach your debugger manually (see [debugpy documentation](https://github.com/microsoft/debugpy#debugpy) for examples)
100 | 4. Open MCP Inspector via `npx @modelcontextprotocol/inspector`
101 | 5. Connect to `http://localhost:8000/mcp/v1` using "Streamable HTTP" transport and "Via Proxy" connection type
102 | 6. Call tools from Inspector - your breakpoints will trigger
103 |
104 | **Note:** `task dev` uses `streamable-http` by default. The `streamable-http` transport allows the debugger and MCP Inspector to work simultaneously without conflicts. To override, use `MCP_TRANSPORT=stdio task dev`.
105 |
106 | If you encounter any problems, you can try running `task run` to see errors in your terminal.
107 |
108 | ## Release
109 |
110 | Only people in the `CODEOWNERS` file should trigger a new release with these steps:
111 |
112 | 1. Consider these guidelines when choosing a version number:
113 | - Major
114 | - Removing a tool or toolset
115 | - Changing the behavior of existing environment variables or configurations
116 | - Minor
117 | - Changes to config system related to the function signature of the register functions (e.g. `register_discovery_tools`)
118 | - Adding optional parameters to a tool function signature
119 | - Adding a new tool or toolset
120 | - Removing or adding non-optional parameters from tool function signatures
121 | - Patch
122 | - Bug and security fixes - only major security and bug fixes will be back-ported to prior minor and major versions
123 | - Dependency updates which don’t change behavior
124 | - Minor enhancements
125 | - Editing a tool or parameter description prompt
126 | - Adding an allowed environment variable with the `DBT_MCP_` prefix
127 | 2. Trigger the [Create release PR Action](https://github.com/dbt-labs/dbt-mcp/actions/workflows/create-release-pr.yml).
128 | - If the release is NOT a pre-release, just pick if the bump should be patch, minor or major
129 | - If the release is a pre-release, set the bump and the pre-release suffix. We support alpha.N, beta.N and rc.N.
130 | - use alpha for early releases of experimental features that specific people might want to test. Significant changes can be expected between alpha and the official release.
131 | - use beta for releases that are mostly stable but still in development. It can be used to gather feedback from a group of peopleon how a specific feature should work.
132 | - use rc for releases that are mostly stable and already feature complete. Only bugfixes and minor changes are expected between rc and the official release.
133 | - Picking the prerelease suffix will depend on whether the last release was the stable release or a pre-release:
134 |
135 | | Last Stable | Last Pre-release | Bump | Pre-release Suffix | Resulting Version |
136 | | ----------- | ---------------- | ----- | ------------------ | ----------------- |
137 | | 1.2.0 | - | minor | beta.1 | 1.3.0-beta.1 |
138 | | 1.2.0 | 1.3.0-beta.1 | minor | beta.2 | 1.3.0-beta.2 |
139 | | 1.2.0 | 1.3.0-beta.2 | minor | rc.1 | 1.3.0-rc.1 |
140 | | 1.2.0 | 1.3.0-rc.1 | minor | | 1.3.0 |
141 | | 1.2.0 | 1.3.0-beta.2 | minor | - | 1.3.0 |
142 | | 1.2.0 | - | major | rc.1 | 2.0.0-rc.1 |
143 | | 1.2.0 | 2.0.0-rc.1 | major | - | 2.0.0 |
144 |
145 | 3. Get this PR approved & merged in (if the resulting release name is not the one expected in the PR, just close the PR and try again step 1)
146 | 4. This will trigger the `Release dbt-mcp` Action. On the `Summary` page of this Action a member of the `CODEOWNERS` file will have to manually approve the release. The rest of the release process is automated.
147 |
```
--------------------------------------------------------------------------------
/examples/google_adk_agent/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/examples/langgraph_agent/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/examples/openai_agent/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/examples/openai_responses/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/examples/pydantic_ai_agent/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/src/client/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/dbt_admin/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/mcp/create.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/src/remote_mcp/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/tests/integration/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/tests/integration/dbt_codegen/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/tests/integration/initialization/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/tests/unit/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/tests/unit/config/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/tests/unit/dbt_admin/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/tests/unit/dbt_cli/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/tests/unit/dbt_codegen/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/tests/unit/discovery/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/ui/src/index.css:
--------------------------------------------------------------------------------
```css
1 |
```
--------------------------------------------------------------------------------
/examples/aws_strands_agent/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
2 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.17.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.17 - 2025-07-18
2 |
```
--------------------------------------------------------------------------------
/ui/src/global.d.ts:
--------------------------------------------------------------------------------
```typescript
1 | declare module "*.css";
2 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/lsp/args/column_name.md:
--------------------------------------------------------------------------------
```markdown
1 | The column name to trace lineage for.
2 |
```
--------------------------------------------------------------------------------
/ui/pnpm-workspace.yaml:
--------------------------------------------------------------------------------
```yaml
1 | ignoredBuiltDependencies:
2 | - esbuild
3 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/list.md:
--------------------------------------------------------------------------------
```markdown
1 | List the resources in the your dbt project.
2 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/oauth/client_id.py:
--------------------------------------------------------------------------------
```python
1 | OAUTH_CLIENT_ID = "34ec61e834cdffd9dd90a32231937821"
2 |
```
--------------------------------------------------------------------------------
/tests/unit/lsp/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Unit tests for the dbt Fusion LSP integration."""
2 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.11.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.11 - 2025-07-03
2 | ### Bug Fix
3 | * fix order_by input
4 |
```
--------------------------------------------------------------------------------
/.changes/v0.1.3.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.1.3 and before
2 | * Initial releases before using changie
```
--------------------------------------------------------------------------------
/examples/aws_strands_agent/dbt_data_scientist/__init__.py:
--------------------------------------------------------------------------------
```python
1 | from . import agent
2 | from . import prompts
3 | from . import tools
```
--------------------------------------------------------------------------------
/src/dbt_mcp/dbt_admin/run_results_errors/__init__.py:
--------------------------------------------------------------------------------
```python
1 | from .parser import ErrorFetcher
2 |
3 | __all__ = ["ErrorFetcher"]
4 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/__init__.py:
--------------------------------------------------------------------------------
```python
1 | from dbt_mcp.prompts.prompts import get_prompt # noqa: F401
2 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/table_name.md:
--------------------------------------------------------------------------------
```markdown
1 | The source table name to generate a base model for (required)
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/source_name.md:
--------------------------------------------------------------------------------
```markdown
1 | The source name as defined in your sources.yml file (required)
```
--------------------------------------------------------------------------------
/ui/src/vite-env.d.ts:
--------------------------------------------------------------------------------
```typescript
1 | /// <reference types="vite/client" />
2 | declare module "*.css";
3 |
```
--------------------------------------------------------------------------------
/.changes/v0.4.1.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.4.1 - 2025-08-08
2 | ### Under the Hood
3 | * Upgrade dbt-sl-sdk
4 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/dbt_codegen/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """dbt-codegen MCP tools for automated dbt code generation."""
2 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/discovery/get_all_models.md:
--------------------------------------------------------------------------------
```markdown
1 | Get the name and description of all dbt models in the environment.
2 |
```
--------------------------------------------------------------------------------
/.changes/v0.8.1.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.8.1 - 2025-09-22
2 | ### Under the Hood
3 | * Create ConfigProvider ABC
4 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.19.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.19 - 2025-07-22
2 | ### Under the Hood
3 | * Create list of tool names
4 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.10.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.10 - 2025-07-03
2 | ### Enhancement or New Feature
3 | * Upgrade MCP SDK
4 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/schema_name.md:
--------------------------------------------------------------------------------
```markdown
1 | The schema name in your database that contains the source tables (required)
```
--------------------------------------------------------------------------------
/.changes/v0.2.3.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.3 - 2025-06-02
2 | ### Under the Hood
3 | * Fix release action to fetch tags
4 |
```
--------------------------------------------------------------------------------
/.changes/v0.10.1.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.10.1 - 2025-10-02
2 | ### Bug Fix
3 | * Fix get_job_run_error truncated log output
4 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.12.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.12 - 2025-07-09
2 | ### Bug Fix
3 | * Catch every tool error and surface as string
4 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.13.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.13 - 2025-07-11
2 | ### Under the Hood
3 | * Decouple discovery tools from FastMCP
4 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/database_name.md:
--------------------------------------------------------------------------------
```markdown
1 | The database that contains your source data (optional, defaults to target database)
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/case_sensitive_cols.md:
--------------------------------------------------------------------------------
```markdown
1 | Whether to quote column names to preserve case sensitivity (optional, default false)
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/tables.md:
--------------------------------------------------------------------------------
```markdown
1 | List of table names to generate base models for from the specified source (required)
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/docs.md:
--------------------------------------------------------------------------------
```markdown
1 | The docs command is responsible for generating your project's documentation website.
2 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/materialized.md:
--------------------------------------------------------------------------------
```markdown
1 | The materialization type for the model config block (optional, e.g., 'view', 'table')
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/include_data_types.md:
--------------------------------------------------------------------------------
```markdown
1 | Whether to include data types in the model column definitions (optional, default true)
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/leading_commas.md:
--------------------------------------------------------------------------------
```markdown
1 | Whether to use leading commas instead of trailing commas in SQL (optional, default false)
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/include_descriptions.md:
--------------------------------------------------------------------------------
```markdown
1 | Whether to include placeholder descriptions in the generated YAML (optional, default false)
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/generate_columns.md:
--------------------------------------------------------------------------------
```markdown
1 | Whether to include column definitions in the generated source YAML (optional, default false)
```
--------------------------------------------------------------------------------
/.changes/v0.2.4.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.4 - 2025-06-03
2 | ### Bug Fix
3 | * Add the missing selector argument when running commands
4 |
```
--------------------------------------------------------------------------------
/.changes/v0.10.0.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.10.0 - 2025-10-01
2 | ### Enhancement or New Feature
3 | * Add get_job_run_error to Admin API tools
4 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.1.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.1 - 2025-05-28
2 | ### Under the Hood
3 | * Remove hatch from tag action
4 | * Manually triggering release
5 |
```
--------------------------------------------------------------------------------
/.changes/unreleased/Bug Fix-20251028-143835.yaml:
--------------------------------------------------------------------------------
```yaml
1 | kind: Bug Fix
2 | body: Minor update to the instruction for LSP tool
3 | time: 2025-10-28T14:38:35.729371+01:00
4 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/build.md:
--------------------------------------------------------------------------------
```markdown
1 | The dbt build command will:
2 |
3 | - run models
4 | - test tests
5 | - snapshot snapshots
6 | - seed seeds
7 |
8 | In DAG order.
9 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/upstream_descriptions.md:
--------------------------------------------------------------------------------
```markdown
1 | Whether to include descriptions from upstream models for matching column names (optional, default false)
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/test.md:
--------------------------------------------------------------------------------
```markdown
1 | dbt test runs data tests defined on models, sources, snapshots, and seeds and unit tests defined on SQL models.
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/table_names.md:
--------------------------------------------------------------------------------
```markdown
1 | List of specific table names to generate source definitions for (optional, generates all tables if not specified)
```
--------------------------------------------------------------------------------
/.changes/unreleased/Enhancement or New Feature-20251014-175047.yaml:
--------------------------------------------------------------------------------
```yaml
1 | kind: Enhancement or New Feature
2 | body: This adds the get all sources tool.
3 | time: 2025-10-14T17:50:47.539453+01:00
4 |
```
--------------------------------------------------------------------------------
/.changes/v0.9.1.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.9.1 - 2025-09-30
2 | ### Under the Hood
3 | * Reorganize code and add ability to format the arrow table differently
4 |
```
--------------------------------------------------------------------------------
/.changes/unreleased/Under the Hood-20251030-151902.yaml:
--------------------------------------------------------------------------------
```yaml
1 | kind: Under the Hood
2 | body: Add version number guidelines to contributing.md
3 | time: 2025-10-30T15:19:02.963083-05:00
4 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.15.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.15 - 2025-07-16
2 | ### Under the Hood
3 | * Refactor sl tools for reusability
4 | * Update VSCode instructions in README
5 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/args/sql_query.md:
--------------------------------------------------------------------------------
```markdown
1 | This is the SQL query to run against the data platform. Do not add a limit to this query. Use the `limit` argument instead.
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/prompts.py:
--------------------------------------------------------------------------------
```python
1 | from pathlib import Path
2 |
3 |
4 | def get_prompt(name: str) -> str:
5 | return (Path(__file__).parent / f"{name}.md").read_text()
6 |
```
--------------------------------------------------------------------------------
/examples/aws_strands_agent/dbt_data_scientist/tools/__init__.py:
--------------------------------------------------------------------------------
```python
1 | from .dbt_compile import dbt_compile
2 | from .dbt_mcp import dbt_mcp_tool
3 | from .dbt_model_analyzer import dbt_model_analyzer_agent
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/args/full_refresh.md:
--------------------------------------------------------------------------------
```markdown
1 | If true, dbt will force a complete rebuild of incremental models (built from scratch) rather than processing new or modifed data.
```
--------------------------------------------------------------------------------
/.changes/v0.2.16.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.16 - 2025-07-18
2 | ### Under the Hood
3 | * Adding the ability to exclude certain tools when registering
4 | * OpenAI responses example
5 |
```
--------------------------------------------------------------------------------
/ui/tsconfig.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "files": [],
3 | "references": [
4 | {
5 | "path": "./tsconfig.app.json"
6 | },
7 | {
8 | "path": "./tsconfig.node.json"
9 | }
10 | ]
11 | }
12 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/model_name.md:
--------------------------------------------------------------------------------
```markdown
1 | The name of the model to generate import CTEs for, search for the model name provided with and without extension provided by users(required)
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/args/model_names.md:
--------------------------------------------------------------------------------
```markdown
1 | List of model names to generate YAML documentation for, search for the model names provided with and without extension provided by users(required)
```
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "python.testing.pytestArgs": [
3 | "tests",
4 | "evals"
5 | ],
6 | "python.testing.pytestEnabled": true,
7 | "python.testing.unittestEnabled": false
8 | }
9 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.14.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.14 - 2025-07-14
2 | ### Enhancement or New Feature
3 | * Make dbt CLI command timeout configurable
4 | ### Bug Fix
5 | * Allow passing entities in the group by
6 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/errors/base.py:
--------------------------------------------------------------------------------
```python
1 | """Base error class for all dbt-mcp tool calls."""
2 |
3 |
4 | class ToolCallError(Exception):
5 | """Base exception for all tool call errors in dbt-mcp."""
6 |
7 | pass
8 |
```
--------------------------------------------------------------------------------
/.changes/v0.6.0.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.6.0 - 2025-08-22
2 | ### Under the Hood
3 | * Update docs with new tools
4 | * Using streamable http for SQL tools
5 | * Correctly handle admin API host containing protocol prefix
6 |
```
--------------------------------------------------------------------------------
/.changes/v0.9.0.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.9.0 - 2025-09-30
2 | ### Enhancement or New Feature
3 | * Adding the dbt codegen toolset.
4 | ### Under the Hood
5 | * Updates README with new tools
6 | * Fix .user.yml error with Fusion
7 |
```
--------------------------------------------------------------------------------
/.changes/v0.8.3.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.8.3 - 2025-09-24
2 | ### Under the Hood
3 | * Rename SemanticLayerConfig.service_token to SemanticLayerConfig.token
4 | ### Bug Fix
5 | * Fix Error handling as per native MCP error spec
6 |
```
--------------------------------------------------------------------------------
/.changes/v0.6.1.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.6.1 - 2025-08-28
2 | ### Enhancement or New Feature
3 | * Add support for --vars flag
4 | * Allow headers in AdminApiConfig
5 | ### Under the Hood
6 | * Remove redundant and outdated documentation
7 |
```
--------------------------------------------------------------------------------
/examples/openai_responses/pyproject.toml:
--------------------------------------------------------------------------------
```toml
1 | [project]
2 | name = "openai-responses"
3 | version = "0.1.0"
4 | description = "Add your description here"
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | dependencies = [
8 | "openai>=1.95.1",
9 | ]
10 |
```
--------------------------------------------------------------------------------
/.changes/v0.10.3.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.10.3 - 2025-10-08
2 | ### Under the Hood
3 | * Improved retry logic and post project selection screen
4 | * Avoid double counting in usage tracking proxied tools
5 | * Categorizing ToolCallErrors
6 |
```
--------------------------------------------------------------------------------
/examples/pydantic_ai_agent/pyproject.toml:
--------------------------------------------------------------------------------
```toml
1 | [project]
2 | name = "pydantic-ai-agent"
3 | version = "0.1.0"
4 | description = "Add your description here"
5 | readme = "README.md"
6 | requires-python = ">=3.10"
7 | dependencies = [
8 | "pydantic-ai>=0.8.1",
9 | ]
10 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/errors/semantic_layer.py:
--------------------------------------------------------------------------------
```python
1 | """Semantic Layer tool errors."""
2 |
3 | from dbt_mcp.errors.base import ToolCallError
4 |
5 |
6 | class SemanticLayerToolCallError(ToolCallError):
7 | """Base exception for Semantic Layer tool errors."""
8 |
9 | pass
10 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/args/limit.md:
--------------------------------------------------------------------------------
```markdown
1 | Limit the number of rows that the data platform will return. Use this in place of a `LIMIT` clause in the SQL query. If no limit is passed, use the default of 5 to prevent returning a very large result set.
```
--------------------------------------------------------------------------------
/.changes/v0.2.6.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.6 - 2025-06-16
2 | ### Under the Hood
3 | * Instructing the LLM to more likely use a selector
4 | * Instruct LLM to add limit as an argument instead of SQL
5 | * Fix use of limit in dbt show
6 | * Indicate type checking
7 |
```
--------------------------------------------------------------------------------
/.changes/v0.4.2.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.4.2 - 2025-08-13
2 | ### Enhancement or New Feature
3 | * Add default --limit to show tool
4 | ### Under the Hood
5 | * Define toolsets
6 | ### Bug Fix
7 | * Fix the prompt to ensure grain is passed even for non-time group by"
8 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.8.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.8 - 2025-07-02
2 | ### Enhancement or New Feature
3 | * Raise errors if no node is selected (can also be configured)
4 | ### Bug Fix
5 | * Fix when people provide `DBT_PROJECT_DIR` as a relative path
6 | * Fix link in README
7 |
```
--------------------------------------------------------------------------------
/examples/openai_agent/pyproject.toml:
--------------------------------------------------------------------------------
```toml
1 | [project]
2 | name = "openai-agent"
3 | version = "0.1.0"
4 | description = "A simple example of using this MCP server with OpenAI Agents"
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | dependencies = ["openai-agents>=0.1.0"]
8 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.9.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.9 - 2025-07-02
2 | ### Enhancement or New Feature
3 | * Decrease amount of data retrieved when listing models
4 | ### Under the Hood
5 | * OpenAI conversational analytics example
6 | * README updates
7 | * Move Discover headers to config
8 |
```
--------------------------------------------------------------------------------
/examples/google_adk_agent/pyproject.toml:
--------------------------------------------------------------------------------
```toml
1 | [project]
2 | name = "google-adk-dbt-agent"
3 | version = "0.1.0"
4 | description = "Google ADK agent for dbt MCP"
5 | readme = "README.md"
6 | requires-python = ">=3.10"
7 | dependencies = [
8 | "google-adk>=0.4.0",
9 | "google-genai<=1.34.0",
10 | ]
```
--------------------------------------------------------------------------------
/.changes/v0.8.2.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.8.2 - 2025-09-23
2 | ### Enhancement or New Feature
3 | * Use `dbt --help` to identify binary type
4 | * Increase dbt CLI timeout default
5 | ### Under the Hood
6 | * Implement SemanticLayerClientProvider
7 | ### Bug Fix
8 | * Update how we identify CLIs
9 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.7.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.7 - 2025-06-30
2 | ### Under the Hood
3 | * Timeout dbt list command
4 | * Troubleshooting section in README on clients not finding uvx
5 | * Update Discovery config for simpler usage
6 | ### Bug Fix
7 | * Fixing bug when ordering SL query by a metric
8 |
```
--------------------------------------------------------------------------------
/.changes/v0.7.0.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.7.0 - 2025-09-09
2 | ### Enhancement or New Feature
3 | * Add tools to retrieve exposure information from Disco API
4 | ### Under the Hood
5 | * Expect string sub in oauth JWT
6 | * Using sync endpoints for oauth FastAPI server
7 | * Fix release pipeline
8 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/compile.md:
--------------------------------------------------------------------------------
```markdown
1 | dbt compile generates executable SQL from source model, test, and analysis files.
2 |
3 | The compile command is useful for visually inspecting the compiled output of model files. This is useful for validating complex jinja logic or macro usage.
4 |
```
--------------------------------------------------------------------------------
/examples/langgraph_agent/pyproject.toml:
--------------------------------------------------------------------------------
```toml
1 | [project]
2 | name = "langgraph-agent"
3 | version = "0.1.0"
4 | description = "Add your description here"
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | dependencies = [
8 | "langchain-mcp-adapters>=0.1.9",
9 | "langchain[anthropic]>=0.3.27",
10 | "langgraph>=0.6.4",
11 | ]
12 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/discovery/get_mart_models.md:
--------------------------------------------------------------------------------
```markdown
1 | Get the name and description of all mart models in the environment. A mart model is part of the presentation layer of the dbt project. It's where cleaned, transformed data is organized for consumption by end-users, like analysts, dashboards, or business tools.
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/show.md:
--------------------------------------------------------------------------------
```markdown
1 | dbt show executes an arbitrary SQL statement against the database and returns the results. It is useful for debugging and inspecting data in your dbt project. If you are adding a limit be sure to use the `limit` argument and do not add a limit to the SQL query.
```
--------------------------------------------------------------------------------
/.changes/v0.8.4.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.8.4 - 2025-09-29
2 | ### Enhancement or New Feature
3 | * Allow doc files to skip changie requirements
4 | ### Under the Hood
5 | * Upgrade @vitejs/plugin-react
6 | * Add ruff lint config to enforce Python 3.9+ coding style
7 | * Opt-out of usage tracking with standard dbt methods
8 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.18.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.18 - 2025-07-22
2 | ### Enhancement or New Feature
3 | * Move env var parsing to pydantic_settings for better validation
4 | ### Under the Hood
5 | * Add integration test for server initialization
6 | ### Bug Fix
7 | * Fix SL validation error message when no misspellings are found
8 |
```
--------------------------------------------------------------------------------
/.changes/v0.4.0.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.4.0 - 2025-08-08
2 | ### Enhancement or New Feature
3 | * Tool policies
4 | * Added Semantic Layer tool to get compiled sql
5 | ### Under the Hood
6 | * Fix JSON formatting in README
7 | * Document dbt Copilot credits relationship
8 | ### Bug Fix
9 | * Make model_name of get_model_details optional
10 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/errors/sql.py:
--------------------------------------------------------------------------------
```python
1 | """SQL tool errors."""
2 |
3 | from dbt_mcp.errors.base import ToolCallError
4 |
5 |
6 | class SQLToolCallError(ToolCallError):
7 | """Base exception for SQL tool errors."""
8 |
9 | pass
10 |
11 |
12 | class RemoteToolError(SQLToolCallError):
13 | """Exception raised when a remote SQL tool call fails."""
14 |
15 | pass
16 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.5.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.5 - 2025-06-06
2 | ### Under the Hood
3 | * Small improvements to improve logging and code organization.
4 | * Move `--selector` to the code instead of the prompt
5 | * Cursor deeplink setup
6 | * Fix Cursor deeplinks
7 | * Fix Cursor env var mess up
8 | ### Bug Fix
9 | * Fix Discovery API config enablement
10 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.2.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.2 - 2025-06-02
2 | ### Under the Hood
3 | * Update README to run the MCP server with uvx
4 | * Logging usage events
5 | * Improve remote tools error logging
6 | * Move create-release-tag to release Action
7 | * Update release process documentation
8 | ### Bug Fix
9 | * Fix typo in GH action to create release
10 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/parse.md:
--------------------------------------------------------------------------------
```markdown
1 | The dbt parse command parses and validates the contents of your dbt project. If your project contains Jinja or YAML syntax errors, the command will fail.
2 |
3 | It will also produce an artifact with detailed timing information, which is useful to understand parsing times for large projects.
4 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/lsp/args/model_id.md:
--------------------------------------------------------------------------------
```markdown
1 | The fully qualified dbt model identifier (e.g., "model.my_project_name.my_model").
2 |
3 | Do not use just the model name, always use the model full identifier. If you don't have the full identifier, the project name is the field `name:` in the `dbt_project.yml` file at the root of the dbt project.
```
--------------------------------------------------------------------------------
/src/dbt_mcp/errors/cli.py:
--------------------------------------------------------------------------------
```python
1 | """dbt CLI tool errors."""
2 |
3 | from dbt_mcp.errors.base import ToolCallError
4 |
5 |
6 | class CLIToolCallError(ToolCallError):
7 | """Base exception for dbt CLI tool errors."""
8 |
9 | pass
10 |
11 |
12 | class BinaryExecutionError(CLIToolCallError):
13 | """Exception raised when dbt binary execution fails."""
14 |
15 | pass
16 |
```
--------------------------------------------------------------------------------
/ui/index.html:
--------------------------------------------------------------------------------
```html
1 | <!doctype html>
2 | <html lang="en">
3 | <head>
4 | <meta charset="UTF-8" />
5 | <meta name="viewport" content="width=device-width, initial-scale=1.0" />
6 | <title>dbt MCP</title>
7 | </head>
8 | <body>
9 | <div id="root"></div>
10 | <script type="module" src="/src/main.tsx"></script>
11 | </body>
12 | </html>
13 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/config/dbt_project.py:
--------------------------------------------------------------------------------
```python
1 | from pydantic import BaseModel, ConfigDict
2 |
3 |
4 | class DbtProjectFlags(BaseModel):
5 | model_config = ConfigDict(extra="allow")
6 | send_anonymous_usage_stats: bool | None = None
7 |
8 |
9 | class DbtProjectYaml(BaseModel):
10 | model_config = ConfigDict(extra="allow")
11 | flags: None | DbtProjectFlags = None
12 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/args/resource_type.md:
--------------------------------------------------------------------------------
```markdown
1 | It is possible to list specific resource types we want to return.
2 |
3 | We can pick any from the following and need to return a list of all the resource types to select:
4 | - metric
5 | - semantic_model
6 | - saved_query
7 | - source
8 | - analysis
9 | - model
10 | - test
11 | - unit_test
12 | - exposure
13 | - snapshot
14 | - seed
15 | - default
16 | - all
```
--------------------------------------------------------------------------------
/src/dbt_mcp/errors/common.py:
--------------------------------------------------------------------------------
```python
1 | """Common errors used across multiple tool types."""
2 |
3 | from dbt_mcp.errors.base import ToolCallError
4 |
5 |
6 | class InvalidParameterError(ToolCallError):
7 | """Exception raised when invalid or missing parameters are provided.
8 |
9 | This is a cross-cutting error used by multiple tool types.
10 | """
11 |
12 | pass
13 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/oauth/logging.py:
--------------------------------------------------------------------------------
```python
1 | import logging
2 |
3 |
4 | def disable_server_logs() -> None:
5 | # Disable uvicorn, fastapi, and related loggers
6 | loggers = (
7 | "uvicorn",
8 | "uvicorn.error",
9 | "uvicorn.access",
10 | "fastapi",
11 | )
12 |
13 | for logger_name in loggers:
14 | logging.getLogger(logger_name).disabled = True
15 |
```
--------------------------------------------------------------------------------
/.changes/header.tpl.md:
--------------------------------------------------------------------------------
```markdown
1 | # Changelog
2 |
3 | All notable changes to this project will be documented in this file.
4 |
5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6 | adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html),
7 | and is generated by [Changie](https://github.com/miniscruff/changie).
8 |
```
--------------------------------------------------------------------------------
/.changes/v1.0.0.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v1.0.0 - 2025-10-20
2 | ### Enhancement or New Feature
3 | * Incroduce support for fusion LSP
4 | ### Under the Hood
5 | * Add support for Python debugger
6 | * Update pyproject.toml including development status
7 | * Add example for aws_strands_agent
8 | ### Bug Fix
9 | * Exclude Python 3.14 for now as pyarrow hasn't released wheels yet
10 |
```
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "configurations": [
3 | {
4 | "console": "integratedTerminal",
5 | "env": {
6 | "MCP_TRANSPORT": "streamable-http"
7 | },
8 | "name": "debug dbt-mcp",
9 | "program": "${workspaceFolder}/src/dbt_mcp/main.py",
10 | "request": "launch",
11 | "type": "debugpy"
12 | }
13 | ],
14 | "version": "0.2.0"
15 | }
16 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/run.md:
--------------------------------------------------------------------------------
```markdown
1 | dbt run executes compiled sql model files against the current target database. dbt connects to the target database and runs the relevant SQL required to materialize all data models using the specified materialization strategies. Models are run in the order defined by the dependency graph generated during compilation.
2 |
```
--------------------------------------------------------------------------------
/.changes/v0.2.20.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.20 - 2025-07-25
2 | ### Enhancement or New Feature
3 | * Allow for disabling CLI tools
4 | ### Under the Hood
5 | * Update codeowners
6 | * Improve DISABLE_TOOLS configuration
7 | * Remote MCP example
8 | * Add unit tests for env vars combinations
9 | * Add instructions for Claude Code in README
10 | * Add new example for OpenAI + HTTP Streamable MCP
11 |
```
--------------------------------------------------------------------------------
/tests/integration/tracking/test_tracking.py:
--------------------------------------------------------------------------------
```python
1 | import pytest
2 | from dbtlabs_vortex.producer import shutdown
3 |
4 | from dbt_mcp.config.config import load_config
5 | from dbt_mcp.mcp.server import create_dbt_mcp
6 |
7 |
8 | @pytest.mark.asyncio
9 | async def test_tracking():
10 | config = load_config()
11 | await (await create_dbt_mcp(config)).call_tool("list_metrics", {"foo": "bar"})
12 | shutdown()
13 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/errors/discovery.py:
--------------------------------------------------------------------------------
```python
1 | """Discovery/Metadata API tool errors."""
2 |
3 | from dbt_mcp.errors.base import ToolCallError
4 |
5 |
6 | class DiscoveryToolCallError(ToolCallError):
7 | """Base exception for Discovery/Metadata API tool errors."""
8 |
9 | pass
10 |
11 |
12 | class GraphQLError(DiscoveryToolCallError):
13 | """Exception raised for GraphQL API and query errors."""
14 |
15 | pass
16 |
```
--------------------------------------------------------------------------------
/examples/remote_mcp/pyproject.toml:
--------------------------------------------------------------------------------
```toml
1 | [project]
2 | name = "remote-mcp"
3 | version = "0.1.0"
4 | description = "Add your description here"
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | dependencies = ["mcp>=1.12.1"]
8 |
9 | [build-system]
10 | requires = ["setuptools>=61.0"]
11 | build-backend = "setuptools.build_meta"
12 |
13 | [tool.setuptools.packages.find]
14 | where = ["../../src"]
15 | include = ["remote_mcp*"]
16 |
```
--------------------------------------------------------------------------------
/.changes/v0.6.2.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.6.2 - 2025-09-08
2 | ### Enhancement or New Feature
3 | * Adding the ability to return the config.meta attribute from list metrics to give the LLM more context
4 | * Oauth initial implementation
5 | * Fix #251 - Add flag for no color + ability to detect binary type
6 | ### Under the Hood
7 | * Add docs for using the MCP server with Pydantic AI
8 | * Don't run mypy on examples
9 |
```
--------------------------------------------------------------------------------
/.changes/v0.3.0.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.3.0 - 2025-08-05
2 | ### Enhancement or New Feature
3 | * Add ToolAnnotations
4 | * Add alias field to GET_MODEL_DETAILS GraphQL query
5 | ### Under the Hood
6 | * Test remote tool equality
7 | * Fix initialization integration test
8 | * Refactor README
9 | * Rename Remote Tools to SQL Tools
10 | * Document Remote MCP
11 | * Improved Remote MCP instructions
12 | ### Bug Fix
13 | * Apply dbt_cli_timeout to all dbt commands
14 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/lsp/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """LSP (Language Server Protocol) integration for dbt Fusion."""
2 |
3 | from dbt_mcp.lsp.lsp_binary_manager import LspBinaryInfo
4 | from dbt_mcp.lsp.lsp_client import LSPClient
5 | from dbt_mcp.lsp.lsp_connection import (
6 | LSPConnection,
7 | LspConnectionState,
8 | LspEventName,
9 | )
10 |
11 | __all__ = [
12 | "LSPClient",
13 | "LSPConnection",
14 | "LspBinaryInfo",
15 | "LspConnectionState",
16 | "LspEventName",
17 | ]
18 |
```
--------------------------------------------------------------------------------
/.changes/v0.10.2.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.10.2 - 2025-10-08
2 | ### Enhancement or New Feature
3 | * Improved oauth error handling
4 | * Remove oauth env var feature flag. Enable oauth broadly.
5 | ### Under the Hood
6 | * Improved logging for development
7 | * Updating prompts to include examples to avoid bad parameter generation
8 | * Remove DBT_HOST prefix
9 | * Update usage tracking with new fields
10 | * Write .user.yml if it does not exist
11 | * Changed UsageTracker to a protocol
12 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/discovery/get_exposures.md:
--------------------------------------------------------------------------------
```markdown
1 | Get the id, name, description, and url of all exposures in the dbt environment. Exposures represent downstream applications or analyses that depend on dbt models.
2 |
3 | Returns information including:
4 | - uniqueId: The unique identifier for this exposure taht can then be used to get more details about the exposure
5 | - name: The name of the exposure
6 | - description: Description of the exposure
7 | - url: URL associated with the exposure
```
--------------------------------------------------------------------------------
/src/dbt_mcp/main.py:
--------------------------------------------------------------------------------
```python
1 | import asyncio
2 | import os
3 |
4 | from dbt_mcp.config.config import load_config
5 | from dbt_mcp.config.transport import validate_transport
6 | from dbt_mcp.mcp.server import create_dbt_mcp
7 |
8 |
9 | def main() -> None:
10 | config = load_config()
11 | server = asyncio.run(create_dbt_mcp(config))
12 | transport = validate_transport(os.environ.get("MCP_TRANSPORT", "stdio"))
13 | server.run(transport=transport)
14 |
15 |
16 | if __name__ == "__main__":
17 | main()
18 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/errors/admin_api.py:
--------------------------------------------------------------------------------
```python
1 | """Admin API tool errors."""
2 |
3 | from dbt_mcp.errors.base import ToolCallError
4 |
5 |
6 | class AdminAPIToolCallError(ToolCallError):
7 | """Base exception for Admin API tool errors."""
8 |
9 | pass
10 |
11 |
12 | class AdminAPIError(AdminAPIToolCallError):
13 | """Exception raised for Admin API communication errors."""
14 |
15 | pass
16 |
17 |
18 | class ArtifactRetrievalError(AdminAPIToolCallError):
19 | """Exception raised when artifact retrieval fails."""
20 |
21 | pass
22 |
```
--------------------------------------------------------------------------------
/.github/actions/setup-python/action.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Setup Python
2 | description: Setup Python for ai-codegen-api
3 |
4 | runs:
5 | using: "composite"
6 | steps:
7 | - name: Setup python
8 | id: setup-python
9 | uses: actions/setup-python@v5
10 | with:
11 | python-version: 3.13
12 |
13 | - name: Setup uv
14 | uses: astral-sh/setup-uv@v4
15 | with:
16 | enable-cache: true
17 | cache-dependency-glob: "uv.lock"
18 |
19 | - name: Install the project
20 | shell: bash
21 | run: uv sync
22 |
```
--------------------------------------------------------------------------------
/ui/vite.config.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { defineConfig } from "vite";
2 | import react from "@vitejs/plugin-react";
3 |
4 | // https://vite.dev/config/
5 | export default defineConfig({
6 | plugins: [react()],
7 | // Use relative asset URLs so the app works when served from any base path
8 | base: "./",
9 | build: {
10 | // Emit into the Python package so assets are included in sdist/wheels
11 | outDir: "../src/dbt_mcp/ui/dist",
12 | emptyOutDir: true,
13 | assetsDir: "assets",
14 | },
15 | });
16 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/semantic_layer/list_metrics.md:
--------------------------------------------------------------------------------
```markdown
1 | List all metrics from the dbt Semantic Layer.
2 |
3 | If the user is asking a data-related or business-related question,
4 | this tool should be used as a first step to get a list of metrics
5 | that can be used with other tools to answer the question.
6 |
7 | Examples:
8 | - "What are the top 5 products by revenue?"
9 | - "How many users did we have last month?"
10 |
11 | <parameters>
12 | search: Optional string used to filter metrics by name using partial matches
13 | </parameters>
```
--------------------------------------------------------------------------------
/.changes/v0.5.0.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.5.0 - 2025-08-20
2 | ### Enhancement or New Feature
3 | * Add support for --full-refresh flag
4 | * Adds a new tool to get model health (last run, tests, source freshness) from discovery API
5 | * Add operational/admin tools to interact with the dbt platform
6 | ### Under the Hood
7 | * LangGraph create_react_agent example
8 | * Make model_name optional for more discovery tools
9 | * Update example with OpenAI to show tool calls
10 | ### Bug Fix
11 | * Fix for timeout on Windows
12 |
```
--------------------------------------------------------------------------------
/src/client/tools.py:
--------------------------------------------------------------------------------
```python
1 | from openai.types.responses import (
2 | FunctionToolParam,
3 | )
4 |
5 | from dbt_mcp.mcp.server import DbtMCP
6 |
7 |
8 | async def get_tools(dbt_mcp: DbtMCP) -> list[FunctionToolParam]:
9 | mcp_tools = await dbt_mcp.list_tools()
10 | return [
11 | FunctionToolParam(
12 | type="function",
13 | name=t.name,
14 | description=t.description,
15 | parameters=t.inputSchema,
16 | strict=False,
17 | )
18 | for t in mcp_tools
19 | ]
20 |
```
--------------------------------------------------------------------------------
/tests/integration/initialization/test_initialization.py:
--------------------------------------------------------------------------------
```python
1 | import asyncio
2 | from unittest.mock import patch
3 |
4 | from dbt_mcp.mcp.server import create_dbt_mcp
5 | from tests.mocks.config import mock_config
6 |
7 |
8 | def test_initialization():
9 | with patch("dbt_mcp.config.config.load_config", return_value=mock_config):
10 | result = asyncio.run(create_dbt_mcp(mock_config))
11 |
12 | assert result is not None
13 | assert hasattr(result, "usage_tracker")
14 |
15 | tools = asyncio.run(result.list_tools())
16 | assert isinstance(tools, list)
17 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/tools/definitions.py:
--------------------------------------------------------------------------------
```python
1 | from collections.abc import Callable
2 | from dataclasses import dataclass
3 |
4 | from mcp.types import ToolAnnotations
5 |
6 |
7 | @dataclass
8 | class ToolDefinition:
9 | fn: Callable
10 | description: str
11 | name: str | None = None
12 | title: str | None = None
13 | annotations: ToolAnnotations | None = None
14 | # We haven't strictly defined our tool contracts yet.
15 | # So we're setting this to False by default for now.
16 | structured_output: bool | None = False
17 |
18 | def get_name(self) -> str:
19 | return self.name or self.fn.__name__
20 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/semantic_layer/gql/gql_request.py:
--------------------------------------------------------------------------------
```python
1 | import requests
2 |
3 | from dbt_mcp.config.config_providers import SemanticLayerConfig
4 | from dbt_mcp.gql.errors import raise_gql_error
5 |
6 |
7 | def submit_request(
8 | sl_config: SemanticLayerConfig,
9 | payload: dict,
10 | ) -> dict:
11 | if "variables" not in payload:
12 | payload["variables"] = {}
13 | payload["variables"]["environmentId"] = sl_config.prod_environment_id
14 | r = requests.post(
15 | sl_config.url, json=payload, headers=sl_config.headers_provider.get_headers()
16 | )
17 | result = r.json()
18 | raise_gql_error(result)
19 | return result
20 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/args/vars.md:
--------------------------------------------------------------------------------
```markdown
1 | Variables can be passed to dbt commands using the vars parameter. Variables can be accessed in dbt code using `{{ var('variable_name') }}`.
2 |
3 | Note: The vars parameter must be passed as a simple STRING with no special characters (i.e "\", "\n", etc). Do not pass in a dictionary object.
4 |
5 | Supported formats:
6 | - Single variable (curly brackets optional): `"variable_name: value"`
7 | - Multiple variables (curly brackets needed): `"{"key1": "value1", "key2": "value2"}"`
8 | - Mixed types: `"{"string_var": "hello", "number_var": 42, "boolean_var": true}"`
```
--------------------------------------------------------------------------------
/.github/workflows/codeowners-check.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: CODEOWNERS Coverage Check
2 |
3 | on:
4 | pull_request:
5 | workflow_dispatch:
6 |
7 | jobs:
8 | validate-codeowners:
9 | runs-on: ubuntu-latest
10 | permissions:
11 | contents: read
12 | pull-requests: write
13 | steps:
14 | - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
15 | - uses: mszostok/codeowners-validator@7f3f5e28c6d7b8dfae5731e54ce2272ca384592f
16 | with:
17 | github_access_token: "${{ secrets.OWNERS_VALIDATOR_GITHUB_SECRET }}"
18 | checks: "duppatterns"
19 | experimental_checks: "notowned"
20 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/config/dbt_yaml.py:
--------------------------------------------------------------------------------
```python
1 | from pathlib import Path
2 |
3 | import yaml
4 |
5 |
6 | def try_read_yaml(file_path: Path) -> dict | None:
7 | try:
8 | suffix = file_path.suffix.lower()
9 | if suffix not in {".yml", ".yaml"}:
10 | return None
11 | alternate_suffix = ".yaml" if suffix == ".yml" else ".yml"
12 | alternate_path = file_path.with_suffix(alternate_suffix)
13 | if file_path.exists():
14 | return yaml.safe_load(file_path.read_text())
15 | if alternate_path.exists():
16 | return yaml.safe_load(alternate_path.read_text())
17 | except Exception:
18 | return None
19 | return None
20 |
```
--------------------------------------------------------------------------------
/ui/tsconfig.node.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "compilerOptions": {
3 | "allowImportingTsExtensions": true,
4 | "erasableSyntaxOnly": true,
5 | "lib": [
6 | "ES2023"
7 | ],
8 | "module": "ESNext",
9 | "moduleDetection": "force",
10 | "moduleResolution": "bundler",
11 | "noEmit": true,
12 | "noFallthroughCasesInSwitch": true,
13 | "noUncheckedSideEffectImports": true,
14 | "noUnusedLocals": true,
15 | "noUnusedParameters": true,
16 | "skipLibCheck": true,
17 | "strict": true,
18 | "target": "ES2023",
19 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
20 | "verbatimModuleSyntax": true
21 | },
22 | "include": [
23 | "vite.config.ts"
24 | ]
25 | }
26 |
```
--------------------------------------------------------------------------------
/ui/eslint.config.js:
--------------------------------------------------------------------------------
```javascript
1 | import js from '@eslint/js'
2 | import globals from 'globals'
3 | import reactHooks from 'eslint-plugin-react-hooks'
4 | import reactRefresh from 'eslint-plugin-react-refresh'
5 | import tseslint from 'typescript-eslint'
6 | import { globalIgnores } from 'eslint/config'
7 |
8 | export default tseslint.config([
9 | globalIgnores(['dist']),
10 | {
11 | files: ['**/*.{ts,tsx}'],
12 | extends: [
13 | js.configs.recommended,
14 | tseslint.configs.recommended,
15 | reactHooks.configs['recommended-latest'],
16 | reactRefresh.configs.vite,
17 | ],
18 | languageOptions: {
19 | ecmaVersion: 2020,
20 | globals: globals.browser,
21 | },
22 | },
23 | ])
24 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/lsp/get_column_lineage.md:
--------------------------------------------------------------------------------
```markdown
1 | Get the column-level lineage for a specific column in a dbt model.
2 |
3 | This tool traces how a column's value is derived from upstream sources through transformations, showing the complete lineage path from source tables to the target column. It's useful for understanding data flow, debugging data quality issues, and impact analysis.
4 |
5 | The lineage includes:
6 | - Upstream columns that contribute to this column
7 | - SQL transformations applied
8 | - Intermediate models in the lineage path
9 | - Source tables and columns
10 |
11 | Use this when you need to understand where a column's data comes from or which upstream changes might affect it.
12 |
```
--------------------------------------------------------------------------------
/tests/integration/remote_mcp/test_remote_mcp.py:
--------------------------------------------------------------------------------
```python
1 | from dbt_mcp.config.config import load_config
2 | from dbt_mcp.mcp.server import create_dbt_mcp
3 | from remote_mcp.session import session_context
4 |
5 |
6 | async def test_remote_mcp_list_metrics_equals_local_mcp() -> None:
7 | async with session_context() as session:
8 | config = load_config()
9 | dbt_mcp = await create_dbt_mcp(config)
10 |
11 | remote_metrics = await session.call_tool(
12 | name="list_metrics",
13 | arguments={},
14 | )
15 | local_metrics = await dbt_mcp.call_tool(
16 | name="list_metrics",
17 | arguments={},
18 | )
19 | assert remote_metrics.content == local_metrics
20 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/gql/errors.py:
--------------------------------------------------------------------------------
```python
1 | from dbt_mcp.errors import GraphQLError
2 |
3 |
4 | def raise_gql_error(result: dict) -> None:
5 | if result.get("errors"):
6 | if len(result.get("errors", [])) > 0:
7 | error_messages = [
8 | error.get("message", "Unknown error")
9 | for error in result.get("errors", [])
10 | if isinstance(error, dict)
11 | ]
12 | if error_messages:
13 | raise GraphQLError(f"Errors calling API: {', '.join(error_messages)}")
14 | raise GraphQLError(
15 | "Unknown error calling API. "
16 | + "Check your configuration or contact support if this persists."
17 | )
18 |
```
--------------------------------------------------------------------------------
/ui/package.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "dependencies": {
3 | "react": "^19.1.1",
4 | "react-dom": "^19.1.1"
5 | },
6 | "devDependencies": {
7 | "@eslint/js": "^9.33.0",
8 | "@types/react": "^19.1.10",
9 | "@types/react-dom": "^19.1.7",
10 | "@vitejs/plugin-react": "^5.0.2",
11 | "eslint": "^9.33.0",
12 | "eslint-plugin-react-hooks": "^5.2.0",
13 | "eslint-plugin-react-refresh": "^0.4.20",
14 | "globals": "^16.3.0",
15 | "typescript": "~5.8.3",
16 | "typescript-eslint": "^8.39.1",
17 | "vite": "^7.1.11"
18 | },
19 | "name": "dbt-mcp",
20 | "private": true,
21 | "scripts": {
22 | "build": "tsc -b && vite build",
23 | "dev": "vite",
24 | "lint": "eslint .",
25 | "preview": "vite preview"
26 | },
27 | "type": "module",
28 | "version": "0.0.0"
29 | }
30 |
```
--------------------------------------------------------------------------------
/ui/tsconfig.app.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "compilerOptions": {
3 | "allowImportingTsExtensions": true,
4 | "erasableSyntaxOnly": true,
5 | "jsx": "react-jsx",
6 | "lib": [
7 | "ES2022",
8 | "DOM",
9 | "DOM.Iterable"
10 | ],
11 | "module": "ESNext",
12 | "moduleDetection": "force",
13 | "moduleResolution": "bundler",
14 | "noEmit": true,
15 | "noFallthroughCasesInSwitch": true,
16 | "noUncheckedSideEffectImports": true,
17 | "noUnusedLocals": true,
18 | "noUnusedParameters": true,
19 | "skipLibCheck": true,
20 | "strict": true,
21 | "target": "ES2022",
22 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
23 | "useDefineForClassFields": true,
24 | "verbatimModuleSyntax": true
25 | },
26 | "include": [
27 | "src"
28 | ]
29 | }
30 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/config/transport.py:
--------------------------------------------------------------------------------
```python
1 | import logging
2 | from typing import Literal
3 |
4 | logger = logging.getLogger(__name__)
5 |
6 | TransportType = Literal["stdio", "sse", "streamable-http"]
7 | VALID_TRANSPORTS: set[TransportType] = {"stdio", "sse", "streamable-http"}
8 |
9 |
10 | def validate_transport(transport: str) -> TransportType:
11 | """Validate and return the MCP transport type."""
12 | transport = transport.strip().lower()
13 |
14 | if transport not in VALID_TRANSPORTS:
15 | valid_options = ", ".join(sorted(VALID_TRANSPORTS))
16 | raise ValueError(
17 | f"Invalid MCP_TRANSPORT: '{transport}'. Must be one of: {valid_options}"
18 | )
19 |
20 | logger.debug(f"Using MCP transport: {transport}")
21 | return transport # type: ignore[return-value]
22 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/generate_model_yaml.md:
--------------------------------------------------------------------------------
```markdown
1 | Generate YAML documentation for dbt models by introspecting their compiled SQL. This tool analyzes one or more models to produce complete schema.yml entries with all column names and optionally their data types. Can intelligently inherit column descriptions from upstream models and sources for consistent documentation. Essential for maintaining comprehensive model documentation and enabling dbt's testing framework.
2 |
3 | Note: This is one of three dbt-codegen tools (generate_source, generate_model_yaml, generate_staging_model) that are available when dbt-codegen is enabled. These tools are opt-in and must be explicitly enabled in the configuration.
4 |
5 | Always return the generated YAML output to the user.
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/generate_source.md:
--------------------------------------------------------------------------------
```markdown
1 | Generate lightweight YAML for dbt sources by introspecting database schemas. This tool queries your warehouse to discover tables in a schema and produces the YAML structure for a source that can be pasted into a schema.yml file. Supports selective table generation and can optionally include column definitions with data types. Perfect for bootstrapping new sources or documenting existing database schemas in your dbt project.
2 |
3 | Note: This is one of three dbt-codegen tools (generate_source, generate_model_yaml, generate_staging_model) that are available when dbt-codegen is enabled. These tools are opt-in and must be explicitly enabled in the configuration.
4 |
5 | Always return the generated YAML output to the user.
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_codegen/generate_staging_model.md:
--------------------------------------------------------------------------------
```markdown
1 | Generate SQL for a staging model from a source table. This creates a complete SELECT statement with all columns from the source, properly aliased and formatted according to dbt best practices. Includes optional model configuration block for materialization, column name casing preferences, and comma placement style. The generated SQL serves as the foundation for your staging layer and can be directly saved as a new model file.
2 |
3 | Note: This is one of three dbt-codegen tools (generate_source, generate_model_yaml, generate_staging_model) that are available when dbt-codegen is enabled. These tools are opt-in and must be explicitly enabled in the configuration.
4 |
5 | Always return the generated SQL output to the user.
```
--------------------------------------------------------------------------------
/.changes/v0.8.0.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.8.0 - 2025-09-22
2 | ### Enhancement or New Feature
3 | * Allow creating pre-releases
4 | * Return compiled code in get_model_details
5 | ### Under the Hood
6 | * Handle Claude Desktop running multiple MCP server instances
7 | * Add docs for using the MCP server with google ADK and dbt-core
8 | * Add search string to SL metadata queries
9 | * Improve parameters in query_metrics examples
10 | * Reduce token usage in `get_job_run_details` response by removing debug param and unnecessary logs
11 | * Automatically refresh oauth token
12 | * Improve dbt platform context mcp.yml parsing
13 | * Add PR and issue templates
14 | * Address claude desktop re-triggering oauth on exit
15 | * Turning off caching for static files
16 | ### Bug Fix
17 | * Add account id to dbt platform context
18 |
```
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
```markdown
1 | ## Summary
2 | <!-- Provide a brief description of the changes in this PR -->
3 |
4 | ## What Changed
5 | <!-- Describe the changes made in this PR -->
6 |
7 | ## Why
8 | <!-- Explain the motivation for these changes -->
9 |
10 | ## Related Issues
11 | <!-- Link any related issues using #issue_number -->
12 | Closes #
13 | Related to #
14 |
15 |
16 | ## Checklist
17 | - [ ] I have performed a self-review of my code
18 | - [ ] I have made corresponding changes to the documentation (in https://github.com/dbt-labs/docs.getdbt.com) if required -- Mention it here
19 | - [ ] I have added tests that prove my fix is effective or that my feature works
20 | - [ ] New and existing unit tests pass locally with my changes
21 |
22 | ## Additional Notes
23 | <!-- Any additional information that would be helpful for reviewers -->
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/admin_api/list_jobs.md:
--------------------------------------------------------------------------------
```markdown
1 | List all jobs in a dbt platform account with optional filtering.
2 |
3 | This tool retrieves jobs from the dbt Admin API. Jobs are the configuration for scheduled or triggered dbt runs.
4 |
5 | ## Parameters
6 |
7 | - **project_id** (optional): Filter jobs by specific project ID
8 | - **environment_id** (optional): Filter jobs by specific environment ID
9 | - **limit** (optional): Maximum number of results to return
10 | - **offset** (optional): Number of results to skip for pagination
11 |
12 | Returns a list of job objects with details like:
13 | - Job ID, name, and description
14 | - Environment and project information
15 | - Schedule configuration
16 | - Execute steps (dbt commands)
17 | - Trigger settings
18 |
19 | Use this tool to explore available jobs, understand job configurations, or find specific jobs to trigger.
20 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/dbt_admin/constants.py:
--------------------------------------------------------------------------------
```python
1 | from enum import Enum
2 |
3 |
4 | class JobRunStatus(str, Enum):
5 | """Enum for job run status values."""
6 |
7 | QUEUED = "queued"
8 | STARTING = "starting"
9 | RUNNING = "running"
10 | SUCCESS = "success"
11 | ERROR = "error"
12 | CANCELLED = "cancelled"
13 |
14 |
15 | class RunResultsStatus(str, Enum):
16 | """Enum for run_results.json status values."""
17 |
18 | SUCCESS = "success"
19 | ERROR = "error"
20 | FAIL = "fail"
21 | SKIP = "skip"
22 |
23 |
24 | STATUS_MAP = {
25 | JobRunStatus.QUEUED: 1,
26 | JobRunStatus.STARTING: 2,
27 | JobRunStatus.RUNNING: 3,
28 | JobRunStatus.SUCCESS: 10,
29 | JobRunStatus.ERROR: 20,
30 | JobRunStatus.CANCELLED: 30,
31 | }
32 |
33 | # String match in run_results_errors/parser.py to identify source freshness step
34 | # in run_details response
35 | SOURCE_FRESHNESS_STEP_NAME = "source freshness"
36 |
```
--------------------------------------------------------------------------------
/tests/unit/config/test_transport.py:
--------------------------------------------------------------------------------
```python
1 | import pytest
2 |
3 | from dbt_mcp.config.transport import validate_transport
4 |
5 |
6 | class TestValidateTransport:
7 | def test_valid_transports(self):
8 | assert validate_transport("stdio") == "stdio"
9 | assert validate_transport("sse") == "sse"
10 | assert validate_transport("streamable-http") == "streamable-http"
11 |
12 | def test_case_insensitive_and_whitespace(self):
13 | assert validate_transport(" STDIO ") == "stdio"
14 | assert validate_transport("SSE") == "sse"
15 |
16 | def test_invalid_transport_raises_error(self):
17 | with pytest.raises(ValueError) as exc_info:
18 | validate_transport("invalid")
19 |
20 | assert "Invalid MCP_TRANSPORT: 'invalid'" in str(exc_info.value)
21 | assert "sse, stdio, streamable-http" in str(exc_info.value)
22 |
```
--------------------------------------------------------------------------------
/tests/integration/remote_tools/test_remote_tools.py:
--------------------------------------------------------------------------------
```python
1 | from mcp.server.fastmcp import FastMCP
2 |
3 | from dbt_mcp.config.config import load_config
4 | from dbt_mcp.sql.tools import register_sql_tools
5 |
6 |
7 | async def test_sql_tool_execute_sql():
8 | config = load_config()
9 | dbt_mcp = FastMCP("Test")
10 | await register_sql_tools(dbt_mcp, config.sql_config)
11 | tools = await dbt_mcp.list_tools()
12 | print(tools)
13 | result = await dbt_mcp.call_tool("execute_sql", {"sql": "SELECT 1"})
14 | assert len(result) == 1
15 | assert "1" in result[0].text
16 |
17 |
18 | async def test_sql_tool_text_to_sql():
19 | config = load_config()
20 | dbt_mcp = FastMCP("Test")
21 | await register_sql_tools(dbt_mcp, config.sql_config)
22 | result = await dbt_mcp.call_tool("text_to_sql", {"text": "SELECT 1"})
23 | assert len(result) == 1
24 | assert "SELECT 1" in result[0].text
25 |
```
--------------------------------------------------------------------------------
/src/remote_mcp/session.py:
--------------------------------------------------------------------------------
```python
1 | import contextlib
2 | import os
3 | from collections.abc import AsyncGenerator
4 |
5 | from mcp import ClientSession
6 | from mcp.client.streamable_http import streamablehttp_client
7 |
8 |
9 | @contextlib.asynccontextmanager
10 | async def session_context() -> AsyncGenerator[ClientSession, None]:
11 | async with (
12 | streamablehttp_client(
13 | url=f"https://{os.environ.get('DBT_HOST')}/api/ai/v1/mcp/",
14 | headers={
15 | "Authorization": f"token {os.environ.get('DBT_TOKEN')}",
16 | "x-dbt-prod-environment-id": os.environ.get("DBT_PROD_ENV_ID", ""),
17 | },
18 | ) as (
19 | read_stream,
20 | write_stream,
21 | _,
22 | ),
23 | ClientSession(read_stream, write_stream) as session,
24 | ):
25 | await session.initialize()
26 | yield session
27 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/oauth/token.py:
--------------------------------------------------------------------------------
```python
1 | from typing import Any
2 |
3 | import jwt
4 | from jwt import PyJWKClient
5 | from pydantic import BaseModel
6 |
7 |
8 | class AccessTokenResponse(BaseModel):
9 | access_token: str
10 | refresh_token: str
11 | expires_in: int
12 | scope: str
13 | token_type: str
14 | expires_at: int
15 |
16 |
17 | class DecodedAccessToken(BaseModel):
18 | access_token_response: AccessTokenResponse
19 | decoded_claims: dict[str, Any]
20 |
21 |
22 | def fetch_jwks_and_verify_token(
23 | access_token: str, dbt_platform_url: str
24 | ) -> dict[str, Any]:
25 | jwks_url = f"{dbt_platform_url}/.well-known/jwks.json"
26 | jwks_client = PyJWKClient(jwks_url)
27 | signing_key = jwks_client.get_signing_key_from_jwt(access_token)
28 | claims = jwt.decode(
29 | access_token,
30 | signing_key.key,
31 | algorithms=["RS256"],
32 | options={"verify_aud": False},
33 | )
34 | return claims
35 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/tools/register.py:
--------------------------------------------------------------------------------
```python
1 | from collections.abc import Sequence
2 |
3 | from mcp.server.fastmcp import FastMCP
4 |
5 | from dbt_mcp.tools.definitions import ToolDefinition
6 | from dbt_mcp.tools.tool_names import ToolName
7 |
8 |
9 | def register_tools(
10 | dbt_mcp: FastMCP,
11 | tool_definitions: list[ToolDefinition],
12 | exclude_tools: Sequence[ToolName] = [],
13 | ) -> None:
14 | for tool_definition in tool_definitions:
15 | if tool_definition.get_name().lower() in [
16 | tool.value.lower() for tool in exclude_tools
17 | ]:
18 | continue
19 | dbt_mcp.tool(
20 | name=tool_definition.get_name(),
21 | title=tool_definition.title,
22 | description=tool_definition.description,
23 | annotations=tool_definition.annotations,
24 | structured_output=tool_definition.structured_output,
25 | )(tool_definition.fn)
26 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/errors/__init__.py:
--------------------------------------------------------------------------------
```python
1 | from dbt_mcp.errors.admin_api import (
2 | AdminAPIError,
3 | AdminAPIToolCallError,
4 | ArtifactRetrievalError,
5 | )
6 | from dbt_mcp.errors.base import ToolCallError
7 | from dbt_mcp.errors.cli import BinaryExecutionError, CLIToolCallError
8 | from dbt_mcp.errors.common import InvalidParameterError
9 | from dbt_mcp.errors.discovery import DiscoveryToolCallError, GraphQLError
10 | from dbt_mcp.errors.semantic_layer import SemanticLayerToolCallError
11 | from dbt_mcp.errors.sql import RemoteToolError, SQLToolCallError
12 |
13 | __all__ = [
14 | "AdminAPIError",
15 | "AdminAPIToolCallError",
16 | "ArtifactRetrievalError",
17 | "BinaryExecutionError",
18 | "CLIToolCallError",
19 | "DiscoveryToolCallError",
20 | "GraphQLError",
21 | "InvalidParameterError",
22 | "RemoteToolError",
23 | "SQLToolCallError",
24 | "SemanticLayerToolCallError",
25 | "ToolCallError",
26 | ]
27 |
```
--------------------------------------------------------------------------------
/tests/unit/discovery/conftest.py:
--------------------------------------------------------------------------------
```python
1 | from unittest.mock import Mock
2 |
3 | import pytest
4 |
5 | from dbt_mcp.discovery.client import MetadataAPIClient
6 |
7 |
8 | @pytest.fixture
9 | def mock_api_client():
10 | """
11 | Shared mock MetadataAPIClient for discovery tests.
12 |
13 | Provides a mock API client with:
14 | - A config_provider that returns environment_id = 123
15 | - An async get_config() method for compatibility with async tests
16 |
17 | Used by test_sources_fetcher.py and test_exposures_fetcher.py.
18 | """
19 | mock_client = Mock(spec=MetadataAPIClient)
20 | # Add config_provider mock that returns environment_id
21 | mock_config_provider = Mock()
22 | mock_config = Mock()
23 | mock_config.environment_id = 123
24 |
25 | # Make get_config async
26 | async def mock_get_config():
27 | return mock_config
28 |
29 | mock_config_provider.get_config = mock_get_config
30 | mock_client.config_provider = mock_config_provider
31 | return mock_client
32 |
```
--------------------------------------------------------------------------------
/ui/src/main.tsx:
--------------------------------------------------------------------------------
```typescript
1 | import { StrictMode } from "react";
2 | import { createRoot } from "react-dom/client";
3 | import "./index.css";
4 | import App from "./App.tsx";
5 |
6 | createRoot(document.getElementById("root")!).render(
7 | <StrictMode>
8 | <App />
9 | </StrictMode>
10 | );
11 |
12 | // Attempt to gracefully shut down the backend server when the window/tab closes
13 | let shutdownSent = false;
14 | const shutdownServer = () => {
15 | if (shutdownSent) return;
16 | shutdownSent = true;
17 | const url = "/shutdown";
18 | try {
19 | if ("sendBeacon" in navigator) {
20 | const body = new Blob([""], { type: "text/plain" });
21 | navigator.sendBeacon(url, body);
22 | } else {
23 | // Best-effort fallback; keepalive helps during unload
24 | fetch(url, { method: "POST", keepalive: true }).catch(() => {});
25 | }
26 | } catch {
27 | // noop
28 | }
29 | };
30 |
31 | window.addEventListener("pagehide", shutdownServer);
32 | window.addEventListener("beforeunload", shutdownServer);
33 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/semantic_layer/gql/gql.py:
--------------------------------------------------------------------------------
```python
1 | GRAPHQL_QUERIES = {
2 | "metrics": """
3 | query GetMetrics($environmentId: BigInt!, $search: String) {
4 | metricsPaginated(
5 | environmentId: $environmentId, search: $search
6 | ){
7 | items{
8 | name
9 | label
10 | description
11 | type
12 | config {
13 | meta
14 | }
15 | }
16 | }
17 | }
18 | """,
19 | "dimensions": """
20 | query GetDimensions($environmentId: BigInt!, $metrics: [MetricInput!]!, $search: String) {
21 | dimensionsPaginated(environmentId: $environmentId, metrics: $metrics, search: $search) {
22 | items {
23 | description
24 | name
25 | type
26 | queryableGranularities
27 | queryableTimeGranularities
28 | }
29 | }
30 | }
31 | """,
32 | "entities": """
33 | query GetEntities($environmentId: BigInt!, $metrics: [MetricInput!]!, $search: String) {
34 | entitiesPaginated(environmentId: $environmentId, metrics: $metrics, search: $search) {
35 | items {
36 | description
37 | name
38 | type
39 | }
40 | }
41 | }
42 | """,
43 | }
44 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/discovery/get_model_details.md:
--------------------------------------------------------------------------------
```markdown
1 | <instructions>
2 | Retrieves information about a specific dbt model, including compiled SQL, description, database, schema, alias, and column details.
3 |
4 | IMPORTANT: Use uniqueId when available.
5 | - Using uniqueId guarantees the correct model is retrieved
6 | - Using only model_name may return incorrect results or fail entirely
7 | - If you obtained models via get_all_models(), you should always use the uniqueId from those results
8 | </instructions>
9 |
10 | <parameters>
11 | uniqueId: The unique identifier of the model (format: "model.project_name.model_name"). STRONGLY RECOMMENDED when available.
12 | model_name: The name of the dbt model. Only use this when uniqueId is unavailable.
13 | </parameters>
14 |
15 | <examples>
16 | 1. PREFERRED METHOD - Using uniqueId (always use this when available):
17 | get_model_details(uniqueId="model.my_project.customer_orders")
18 |
19 | 2. FALLBACK METHOD - Using only model_name (only when uniqueId is unknown):
20 | get_model_details(model_name="customer_orders")
```
--------------------------------------------------------------------------------
/.changes/v0.2.0.md:
--------------------------------------------------------------------------------
```markdown
1 | ## v0.2.0 - 2025-05-28
2 | ### Enhancement or New Feature
3 | * Using `--quiet` flag to reduce context saturation of coding assistants
4 | * Add a tool `get_model_children`
5 | * Added optional uniqueId parameter to model lookup methods for more precise model identification
6 | * Enable remote tools in production
7 | * Add selector for dbt commands
8 | * Set pre-changie value to 0.1.13
9 | ### Under the Hood
10 | * Require changelog entries for each PR
11 | * Log Python version in install script
12 | * Update license to full Apache 2.0 text
13 | * Roll back installation script and instructions
14 | * Re-enable tests in CI
15 | * Refactor config for modularity
16 | * Document remote tools
17 | * Usage tracking scaffolding
18 | * Update docs to clarify service token permissions required
19 | * Increase remote tools timeout
20 | * Update release process for new versions
21 | * Point to the correct diagram in README
22 | * Install hatch in release process
23 | * Remove hatch from release process
24 | ### Bug Fix
25 | * Fix diagram according to feature set
26 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/admin_api/list_jobs_runs.md:
--------------------------------------------------------------------------------
```markdown
1 | List all job runs in a dbt platform account with optional filtering.
2 |
3 | This tool retrieves runs from the dbt Admin API. Runs represent executions of dbt jobs in dbt.
4 |
5 | ## Parameters
6 |
7 | - **job_id** (optional, integer): Filter runs by specific job ID
8 | - **status** (optional, string): Filter runs by status. One of: `queued`, `starting`, `running`, `success`, `error`, `cancelled`
9 | - **limit** (optional, integer): Maximum number of results to return
10 | - **offset** (optional, integer): Number of results to skip for pagination
11 | - **order_by** (optional, string): Field to order results by (e.g., "created_at", "finished_at", "id"). Use a `-` prefix for reverse ordering (e.g., "-created_at" for newest first)
12 |
13 | Returns a list of run objects with details like:
14 |
15 | - Run ID and status
16 | - Job information
17 | - Start and end times
18 | - Git branch and SHA
19 | - Artifacts and logs information
20 |
21 | Use this tool to monitor job execution, check run history, or find specific runs for debugging.
22 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/admin_api/get_job_run_details.md:
--------------------------------------------------------------------------------
```markdown
1 | Get detailed information for a specific dbt job run.
2 |
3 | This tool retrieves comprehensive run information including execution details, steps, and artifacts.
4 |
5 | ## Parameters
6 |
7 | - **run_id** (required): The run ID to retrieve details for
8 |
9 | ## Returns
10 |
11 | Run object with detailed execution information including:
12 |
13 | - Run metadata (ID, status, timing information)
14 | - Job and environment details
15 | - Git branch and SHA information
16 | - Execution steps and their status
17 | - Artifacts and logs availability
18 | - Trigger information and cause
19 | - Performance metrics and timing
20 |
21 | ## Use Cases
22 |
23 | - Monitor run progress and status
24 | - Debug failed runs with detailed logs
25 | - Review run performance and timing
26 | - Check artifact generation status
27 | - Audit run execution details
28 | - Troubleshoot run failures
29 |
30 | ## Example Usage
31 |
32 | ```json
33 | {
34 | "run_id": 789
35 | }
36 | ```
37 |
38 | ```json
39 | {
40 | "run_id": 789
41 | }
42 | ```
43 |
44 | ## Response Information
45 |
46 | The detailed response includes timing, status, and execution context to help with monitoring and debugging dbt job runs.
47 |
```
--------------------------------------------------------------------------------
/examples/aws_strands_agent/dbt_data_scientist/prompts.py:
--------------------------------------------------------------------------------
```python
1 |
2 | """Defines the prompts in the dbt data scientist agent."""
3 |
4 | ROOT_AGENT_INSTR = """You are a senior dbt engineer. You have access to several tools.
5 | When asked to 'find a problem' or 'compile a project' on your local dbt project, call the dbt_compile tool, inspect its JSON logs,
6 | and then:
7 | 1) Summarize the problem(s) (file, node, message).
8 | 2) Recommend a concrete fix in 1-3 bullet points (e.g., correct ref(), add column, fix Jinja).
9 | 3) If no errors, say compile is clean and suggest next step (e.g., run build state:modified+).
10 | When asked about dbt platform or any mcp questions use the dbt_mcp_toolset to answer the question with the correct mcp function.
11 | If the user mentions wanting to analyze their data modeling approach, call the dbt_model_analyzer_agent.
12 | """
13 |
14 | REPAIR_HINTS = """If uncertain about columns/types, call inspect catalog().
15 | If parse is clean but tests fail, try build with --select state:modified+ and --fail-fast.
16 | Return a structured Decision: {action, reason, unified_diff?}.
17 | """
```
--------------------------------------------------------------------------------
/examples/remote_mcp/main.py:
--------------------------------------------------------------------------------
```python
1 | import asyncio
2 | import json
3 |
4 | from mcp.types import TextContent
5 |
6 | from remote_mcp.session import session_context
7 |
8 |
9 | async def main():
10 | async with session_context() as session:
11 | available_metrics = await session.call_tool(
12 | name="list_metrics",
13 | arguments={},
14 | )
15 | metrics_content = [
16 | t for t in available_metrics.content if isinstance(t, TextContent)
17 | ]
18 | metrics_names = [json.loads(m.text)["name"] for m in metrics_content]
19 | print(f"Available metrics: {', '.join(metrics_names)}\n")
20 | num_food_orders = await session.call_tool(
21 | name="query_metrics",
22 | arguments={
23 | "metrics": [
24 | "food_orders",
25 | ],
26 | },
27 | )
28 | num_food_order_content = num_food_orders.content[0]
29 | assert isinstance(num_food_order_content, TextContent)
30 | print(f"Number of food orders: {num_food_order_content.text}")
31 |
32 |
33 | if __name__ == "__main__":
34 | asyncio.run(main())
35 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/discovery/get_model_children.md:
--------------------------------------------------------------------------------
```markdown
1 | <instructions>
2 | Retrieves the child models (downstream dependencies) of a specific dbt model. These are the models that depend on the specified model.
3 |
4 | You can provide either a model_name or a uniqueId, if known, to identify the model. Using uniqueId is more precise and guarantees a unique match, which is especially useful when models might have the same name in different projects.
5 | </instructions>
6 |
7 | <parameters>
8 | model_name: The name of the dbt model to retrieve children for.
9 | uniqueId: The unique identifier of the model. If provided, this will be used instead of model_name for a more precise lookup. You can get the uniqueId values for all models from the get_all_models() tool.
10 | </parameters>
11 |
12 | <examples>
13 | 1. Getting children for a model by name:
14 | get_model_children(model_name="customer_orders")
15 |
16 | 2. Getting children for a model by uniqueId (more precise):
17 | get_model_children(model_name="customer_orders", uniqueId="model.my_project.customer_orders")
18 |
19 | 3. Getting children using only uniqueId:
20 | get_model_children(uniqueId="model.my_project.customer_orders")
21 | </examples>
22 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/admin_api/get_job_details.md:
--------------------------------------------------------------------------------
```markdown
1 | Get detailed information for a specific dbt job.
2 |
3 | This tool retrieves comprehensive job configuration including execution settings, triggers, and scheduling information.
4 |
5 | ## Parameters
6 |
7 | - **job_id** (required): The job ID to retrieve details for
8 |
9 | ## Returns
10 |
11 | Job object with detailed configuration including:
12 |
13 | - Job metadata (ID, name, description, type)
14 | - Environment and project associations
15 | - Execute steps (dbt commands to run)
16 | - Trigger configuration (schedule, webhooks, CI)
17 | - Execution settings (timeout, threads, target)
18 | - dbt version overrides
19 | - Generate docs and sources settings
20 | - Most recent run information (if requested)
21 |
22 | ## Use Cases
23 |
24 | - Debug job configuration issues
25 | - Understand job execution settings
26 | - Review scheduling and trigger configuration
27 | - Check dbt commands and execution steps
28 | - Audit job settings across projects
29 | - Get recent run status for monitoring
30 |
31 | ## Job Types
32 |
33 | - **ci**: Continuous integration jobs
34 | - **scheduled**: Regularly scheduled jobs
35 | - **other**: Manual or API-triggered jobs
36 | - **merge**: Jobs triggered on merge events
37 |
38 | ## Example Usage
39 |
40 | ```json
41 | {
42 | "job_id": 456
43 | }
44 | ```
45 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/admin_api/cancel_job_run.md:
--------------------------------------------------------------------------------
```markdown
1 | Cancel a currently running or queued dbt run.
2 |
3 | This tool allows you to stop a run that is currently executing or waiting in the queue.
4 |
5 | ## Parameters
6 |
7 | - **run_id** (required): The run ID to cancel
8 |
9 | ## Returns
10 |
11 | Updated run object showing the cancelled status and timing information.
12 |
13 | ## Run States That Can Be Cancelled
14 |
15 | - **Queued (1)**: Run is waiting to start
16 | - **Starting (2)**: Run is initializing
17 | - **Running (3)**: Run is currently executing
18 |
19 | ## Use Cases
20 |
21 | - Stop long-running jobs that are no longer needed
22 | - Cancel jobs that were triggered by mistake
23 | - Free up run slots for higher priority jobs
24 | - Stop runs that are stuck or hanging
25 | - Emergency cancellation during incidents
26 |
27 | ## Important Notes
28 |
29 | - Once cancelled, a run cannot be resumed
30 | - Partial work may have been completed before cancellation
31 | - Artifacts from cancelled runs may not be available
32 | - Use the retry functionality if you need to re-run after cancellation
33 |
34 | ## Example Usage
35 |
36 | ```json
37 | {
38 | "run_id": 789
39 | }
40 | ```
41 |
42 | ## Response
43 |
44 | Returns the updated run object with:
45 | - Status changed to cancelled (30)
46 | - Cancellation timestamp
47 | - Final execution timing
48 | - Any artifacts that were generated before cancellation
49 |
```
--------------------------------------------------------------------------------
/examples/openai_responses/main.py:
--------------------------------------------------------------------------------
```python
1 | # mypy: ignore-errors
2 |
3 | import os
4 |
5 | from openai import OpenAI
6 |
7 |
8 | def main():
9 | client = OpenAI()
10 | prod_environment_id = os.environ.get("DBT_PROD_ENV_ID", os.getenv("DBT_ENV_ID"))
11 | token = os.environ.get("DBT_TOKEN")
12 | host = os.environ.get("DBT_HOST", "cloud.getdbt.com")
13 |
14 | messages = []
15 | while True:
16 | user_message = input("User > ")
17 | messages.append({"role": "user", "content": user_message})
18 | response = client.responses.create(
19 | model="gpt-4o-mini",
20 | tools=[
21 | {
22 | "type": "mcp",
23 | "server_label": "dbt",
24 | "server_url": f"https://{host}/api/ai/v1/mcp/",
25 | "require_approval": "never",
26 | "headers": {
27 | "Authorization": f"token {token}",
28 | "x-dbt-prod-environment-id": prod_environment_id,
29 | },
30 | }, # type: ignore
31 | ],
32 | input=messages,
33 | )
34 | messages.append({"role": "assistant", "content": response.output_text})
35 | print(f"Assistant > {response.output_text}")
36 |
37 |
38 | if __name__ == "__main__":
39 | main()
40 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/semantic_layer/types.py:
--------------------------------------------------------------------------------
```python
1 | from dataclasses import dataclass
2 |
3 | from dbtsl.models.dimension import DimensionType
4 | from dbtsl.models.entity import EntityType
5 | from dbtsl.models.metric import MetricType
6 |
7 |
8 | @dataclass
9 | class OrderByParam:
10 | name: str
11 | descending: bool
12 |
13 |
14 | @dataclass
15 | class MetricToolResponse:
16 | name: str
17 | type: MetricType
18 | label: str | None = None
19 | description: str | None = None
20 | metadata: str | None = None
21 |
22 |
23 | @dataclass
24 | class DimensionToolResponse:
25 | name: str
26 | type: DimensionType
27 | description: str | None = None
28 | label: str | None = None
29 | granularities: list[str] | None = None
30 |
31 |
32 | @dataclass
33 | class EntityToolResponse:
34 | name: str
35 | type: EntityType
36 | description: str | None = None
37 |
38 |
39 | @dataclass
40 | class QueryMetricsSuccess:
41 | result: str
42 | error: None = None
43 |
44 |
45 | @dataclass
46 | class QueryMetricsError:
47 | error: str
48 | result: None = None
49 |
50 |
51 | QueryMetricsResult = QueryMetricsSuccess | QueryMetricsError
52 |
53 |
54 | @dataclass
55 | class GetMetricsCompiledSqlSuccess:
56 | sql: str
57 | error: None = None
58 |
59 |
60 | @dataclass
61 | class GetMetricsCompiledSqlError:
62 | error: str
63 | sql: None = None
64 |
65 |
66 | GetMetricsCompiledSqlResult = GetMetricsCompiledSqlSuccess | GetMetricsCompiledSqlError
67 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/telemetry/logging.py:
--------------------------------------------------------------------------------
```python
1 | from __future__ import annotations
2 |
3 | import logging
4 | from pathlib import Path
5 |
6 | LOG_FILENAME = "dbt-mcp.log"
7 |
8 |
9 | def _find_repo_root() -> Path:
10 | module_path = Path(__file__).resolve().parent
11 | home = Path.home().resolve()
12 | for candidate in [module_path, *module_path.parents]:
13 | if (
14 | (candidate / ".git").exists()
15 | or (candidate / "pyproject.toml").exists()
16 | or candidate == home
17 | ):
18 | return candidate
19 | return module_path
20 |
21 |
22 | def configure_logging(file_logging: bool) -> None:
23 | if not file_logging:
24 | return
25 |
26 | repo_root = _find_repo_root()
27 | log_path = repo_root / LOG_FILENAME
28 |
29 | root_logger = logging.getLogger()
30 | for handler in root_logger.handlers:
31 | if (
32 | isinstance(handler, logging.FileHandler)
33 | and Path(handler.baseFilename) == log_path
34 | ):
35 | return
36 |
37 | file_handler = logging.FileHandler(log_path, encoding="utf-8")
38 | file_handler.setLevel(logging.INFO)
39 | file_handler.setFormatter(
40 | logging.Formatter("%(asctime)s %(levelname)s [%(name)s] %(message)s")
41 | )
42 |
43 | root_logger.setLevel(logging.INFO)
44 | root_logger.addHandler(file_handler)
45 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/discovery/get_exposure_details.md:
--------------------------------------------------------------------------------
```markdown
1 | Get detailed information about one or more exposures by name or unique IDs.
2 |
3 | Parameters:
4 | - unique_ids (optional): List of unique IDs of exposures (e.g., ["exposure.project.exposure1", "exposure.project.exposure2"]) (more efficient - uses GraphQL filter)
5 |
6 | Returns a list of detailed information dictionaries, each including:
7 | - name: The name of the exposure
8 | - description: Detailed description of the exposure
9 | - exposureType: Type of exposure (application, dashboard, analysis, etc.)
10 | - maturity: Maturity level of the exposure (high, medium, low)
11 | - ownerName: Name of the exposure owner
12 | - ownerEmail: Email of the exposure owner
13 | - url: URL associated with the exposure
14 | - label: Optional label for the exposure
15 | - parents: List of parent models/sources that this exposure depends on
16 | - meta: Additional metadata associated with the exposure
17 | - freshnessStatus: Current freshness status of the exposure
18 | - uniqueId: The unique identifier for this exposure
19 |
20 | Example usage:
21 | - Get single exposure by unique ID: get_exposure_details(unique_ids=["exposure.analytics.customer_dashboard"])
22 | - Get multiple exposures by unique IDs: get_exposure_details(unique_ids=["exposure.analytics.dashboard1", "exposure.sales.report2"])
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/discovery/get_all_sources.md:
--------------------------------------------------------------------------------
```markdown
1 | Get the name, description, and metadata of all dbt sources in the environment. Sources represent external data tables that your dbt models build upon.
2 |
3 | Parameters (all optional):
4 | - source_names: List of specific source names to filter by (e.g., ['raw_data', 'external_api'])
5 | - unique_ids: List of specific source table IDs to filter by
6 |
7 | Note:
8 | - source_names correspond to the top-level source grouping in the source YML config
9 | - unique_ids have the form `source.{YOUR-DBT-PROJECT}.{SOURCE-NAME}.{SOURCE-TABLE}`
10 |
11 | Returns information including:
12 | - name: The table name within the source
13 | - uniqueId: The unique identifier for this source table
14 | - identifier: The underlying table identifier in the warehouse
15 | - description: Description of the source table
16 | - sourceName: The source name (e.g., 'raw_data', 'external_api')
17 | - database: Database containing the source table
18 | - schema: Schema containing the source table
19 | - resourceType: Will be 'source'
20 | - freshness: Real-time freshness status from production including:
21 | - maxLoadedAt: When the source was last loaded
22 | - maxLoadedAtTimeAgoInS: How long ago the source was loaded (in seconds)
23 | - freshnessStatus: Current freshness status (e.g., 'pass', 'warn', 'error')
24 |
25 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/discovery/get_model_parents.md:
--------------------------------------------------------------------------------
```markdown
1 | <instructions>
2 | Retrieves the parent models of a specific dbt model. These are the models that the specified model depends on.
3 |
4 | You can provide either a model_name or a uniqueId, if known, to identify the model. Using uniqueId is more precise and guarantees a unique match, which is especially useful when models might have the same name in different projects.
5 |
6 | Returned parents include `resourceType`, `name`, and `description`. For upstream sources, also provide `sourceName` and `uniqueId` so lineage can be linked back via `get_all_sources`.
7 | </instructions>
8 |
9 | <parameters>
10 | model_name: The name of the dbt model to retrieve parents for.
11 | uniqueId: The unique identifier of the model. If provided, this will be used instead of model_name for a more precise lookup. You can get the uniqueId values for all models from the get_all_models() tool.
12 | </parameters>
13 |
14 | <examples>
15 | 1. Getting parents for a model by name:
16 | get_model_parents(model_name="customer_orders")
17 |
18 | 2. Getting parents for a model by uniqueId (more precise):
19 | get_model_parents(model_name="customer_orders", uniqueId="model.my_project.customer_orders")
20 |
21 | 3. Getting parents using only uniqueId:
22 | get_model_parents(uniqueId="model.my_project.customer_orders")
23 | </examples>
24 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/admin_api/list_job_run_artifacts.md:
--------------------------------------------------------------------------------
```markdown
1 | List all available artifacts for a completed dbt job run.
2 |
3 | This tool retrieves the list of artifact files generated during a run execution, such as manifest.json, catalog.json, and run_results.json.
4 |
5 | ## Parameters
6 |
7 | - **run_id** (required): The run ID to list artifacts for
8 |
9 | ## Returns
10 |
11 | List of artifact file paths available for download. Common artifacts include:
12 |
13 | - **manifest.json**: Complete project metadata and lineage
14 | - **catalog.json**: Documentation and column information
15 | - **run_results.json**: Execution results and timing
16 | - **sources.json**: Source freshness check results
17 | - **compiled/**: Compiled SQL files
18 | - **run/**: SQL statements executed during the run
19 |
20 | ## Artifact Availability
21 |
22 | Artifacts are only available for:
23 | - Successfully completed runs
24 | - Failed runs that progressed beyond compilation
25 | - Runs where `artifacts_saved` is true
26 |
27 | ## Use Cases
28 |
29 | - Discover available artifacts before downloading
30 | - Check if specific artifacts were generated
31 | - Audit artifact generation across runs
32 | - Integrate with external systems that consume dbt artifacts
33 | - Validate run completion and output generation
34 |
35 | ## Example Usage
36 |
37 | ```json
38 | {
39 | "run_id": 789
40 | }
41 | ```
42 |
43 | ## Next Steps
44 |
45 | Use `get_run_artifact` to download specific artifacts from this list for analysis or integration with other tools.
46 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Feature Request
2 | description: Suggest an idea for this project
3 | title: "[Feature]: "
4 | labels: ["enhancement"]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thanks for taking the time to suggest a new feature!
10 | - type: textarea
11 | id: problem
12 | attributes:
13 | label: Is your feature request related to a problem?
14 | description: A clear and concise description of what the problem is.
15 | placeholder: I am having trouble...
16 | validations:
17 | required: false
18 | - type: textarea
19 | id: solution
20 | attributes:
21 | label: Describe the solution you'd like
22 | description: A clear and concise description of what you want to happen.
23 | placeholder: I would like to see...
24 | validations:
25 | required: true
26 | - type: textarea
27 | id: alternatives
28 | attributes:
29 | label: Describe alternatives you've considered
30 | description: A clear and concise description of any alternative solutions or features you've considered.
31 | placeholder: Alternative approaches could be...
32 | validations:
33 | required: false
34 | - type: textarea
35 | id: additional-context
36 | attributes:
37 | label: Additional context
38 | description: Add any other context, screenshots, or examples about the feature request here.
39 | validations:
40 | required: false
```
--------------------------------------------------------------------------------
/tests/env_vars.py:
--------------------------------------------------------------------------------
```python
1 | import os
2 | from contextlib import contextmanager
3 |
4 |
5 | @contextmanager
6 | def env_vars_context(env_vars: dict[str, str]):
7 | """Temporarily set environment variables and restore them afterward."""
8 | # Store original env vars
9 | original_env = {}
10 |
11 | # Save original and set new values
12 | for key, value in env_vars.items():
13 | if key in os.environ:
14 | original_env[key] = os.environ[key]
15 | os.environ[key] = value
16 |
17 | try:
18 | yield
19 | finally:
20 | # Restore original values
21 | for key in env_vars:
22 | if key in original_env:
23 | os.environ[key] = original_env[key]
24 | else:
25 | del os.environ[key]
26 |
27 |
28 | @contextmanager
29 | def default_env_vars_context(override_env_vars: dict[str, str] | None = None):
30 | with env_vars_context(
31 | {
32 | "DBT_HOST": "http://localhost:8000",
33 | "DBT_PROD_ENV_ID": "1234",
34 | "DBT_TOKEN": "5678",
35 | "DBT_PROJECT_DIR": "tests/fixtures/dbt_project",
36 | "DBT_PATH": "dbt",
37 | "DBT_DEV_ENV_ID": "5678",
38 | "DBT_USER_ID": "9012",
39 | "DBT_CLI_TIMEOUT": "10",
40 | "DBT_ACCOUNT_ID": "12345",
41 | "DISABLE_TOOLS": "",
42 | "DISABLE_DBT_CODEGEN": "false",
43 | }
44 | | (override_env_vars or {})
45 | ):
46 | yield
47 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/semantic_layer/get_dimensions.md:
--------------------------------------------------------------------------------
```markdown
1 | <instructions>
2 | Get the dimensions for specified metrics
3 |
4 | Dimensions are the attributes, features, or characteristics
5 | that describe or categorize data.
6 | </instructions>
7 |
8 | <examples>
9 | <example>
10 | Question: "I want to analyze revenue trends - what dimensions are available?"
11 | Thinking step-by-step:
12 | - Using list_metrics(), I find "revenue" is available
13 | - Now I can get the dimensions for this metric
14 | - The search parameter is not needed here since the user is interested in all available dimensions.
15 | Parameters:
16 | metrics=["revenue"]
17 | search=null
18 | </example>
19 |
20 | <example>
21 | Question: "Are there any time-related dimensions for my sales metrics?"
22 | Thinking step-by-step:
23 | - Using list_metrics(), I find "total_sales" and "average_order_value" are available
24 | - The user is interested in time dimensions specifically
25 | - I should use the search parameter to filter for dimensions with "time" in the name
26 | - This will narrow down the results to just time-related dimensions
27 | Parameters:
28 | metrics=["total_sales", "average_order_value"]
29 | search="time"
30 | </example>
31 | </examples>
32 |
33 | <parameters>
34 | metrics: List of metric names
35 | search: Optional string used to filter dimensions by name using partial matches (only use when absolutely necessary as some dimensions might be missed due to specific naming styles)
36 | </parameters>
```
--------------------------------------------------------------------------------
/src/dbt_mcp/config/headers.py:
--------------------------------------------------------------------------------
```python
1 | from abc import ABC, abstractmethod
2 | from typing import Protocol
3 |
4 | from dbt_mcp.oauth.token_provider import TokenProvider
5 |
6 |
7 | class HeadersProvider(Protocol):
8 | def get_headers(self) -> dict[str, str]: ...
9 |
10 |
11 | class TokenHeadersProvider(ABC):
12 | def __init__(self, token_provider: TokenProvider):
13 | self.token_provider = token_provider
14 |
15 | @abstractmethod
16 | def headers_from_token(self, token: str) -> dict[str, str]: ...
17 |
18 | def get_headers(self) -> dict[str, str]:
19 | return self.headers_from_token(self.token_provider.get_token())
20 |
21 |
22 | class AdminApiHeadersProvider(TokenHeadersProvider):
23 | def headers_from_token(self, token: str) -> dict[str, str]:
24 | return {"Authorization": f"Bearer {token}"}
25 |
26 |
27 | class DiscoveryHeadersProvider(TokenHeadersProvider):
28 | def headers_from_token(self, token: str) -> dict[str, str]:
29 | return {
30 | "Authorization": f"Bearer {token}",
31 | "Content-Type": "application/json",
32 | }
33 |
34 |
35 | class SemanticLayerHeadersProvider(TokenHeadersProvider):
36 | def headers_from_token(self, token: str) -> dict[str, str]:
37 | return {
38 | "Authorization": f"Bearer {token}",
39 | "x-dbt-partner-source": "dbt-mcp",
40 | }
41 |
42 |
43 | class SqlHeadersProvider(TokenHeadersProvider):
44 | def headers_from_token(self, token: str) -> dict[str, str]:
45 | return {"Authorization": f"Bearer {token}"}
46 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/semantic_layer/get_entities.md:
--------------------------------------------------------------------------------
```markdown
1 | <instructions>
2 | Get the entities for specified metrics
3 |
4 | Entities are real-world concepts in a business such as customers,
5 | transactions, and ad campaigns. Analysis is often focused around
6 | specific entities, such as customer churn or
7 | annual recurring revenue modeling.
8 | </instructions>
9 |
10 | <examples>
11 | <example>
12 | Question: "I want to analyze revenue - what entities are available?"
13 | Thinking step-by-step:
14 | - Using list_metrics(), I find "revenue" is available
15 | - Now I can get the entities for this metric
16 | - The search parameter is not needed here since the user is interested in all available entities.
17 | Parameters:
18 | metrics=["revenue"]
19 | search=null
20 | </example>
21 |
22 | <example>
23 | Question: "Are there any customer-related entities for my sales metrics?"
24 | Thinking step-by-step:
25 | - Using list_metrics(), I find "total_sales" and "average_order_value" are available
26 | - The user is interested in customer entities specifically
27 | - I should use the search parameter to filter for entities with "customer" in the name
28 | - This will narrow down the results to just customer-related entities
29 | Parameters:
30 | metrics=["total_sales", "average_order_value"]
31 | search="customer"
32 | </example>
33 | </examples>
34 |
35 | <parameters>
36 | metrics: List of metric names
37 | search: Optional string used to filter entities by name using partial matches (only use when absolutely necessary as some entities might be missed due to specific naming styles)
38 | </parameters>
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/admin_api/retry_job_run.md:
--------------------------------------------------------------------------------
```markdown
1 | Retry a failed dbt job run from the point of failure.
2 |
3 | This tool allows you to restart a failed run, continuing from where it failed rather than starting completely over.
4 |
5 | ## Parameters
6 |
7 | - **run_id** (required): The run ID to retry
8 |
9 | ## Returns
10 |
11 | New run object for the retry attempt with a new run ID and execution details.
12 |
13 | ## Requirements for Retry
14 |
15 | - Original run must have failed (status 20)
16 | - Run must be the most recent run for the job
17 | - dbt version must support retry functionality
18 | - Run must have generated run_results.json
19 |
20 | ## Retry Behavior
21 |
22 | - Continues from the failed step
23 | - Skips successfully completed models
24 | - Uses the same configuration as the original run
25 | - Creates a new run with a new run ID
26 | - Maintains the same Git SHA and branch
27 |
28 | ## Use Cases
29 |
30 | - Recover from transient infrastructure failures
31 | - Continue after warehouse connectivity issues
32 | - Resume after temporary resource constraints
33 | - Avoid re-running expensive successful steps
34 | - Quick recovery from known, fixable issues
35 |
36 | ## Retry Not Supported Reasons
37 |
38 | If retry fails, possible reasons include:
39 | - **RETRY_UNSUPPORTED_CMD**: Command type doesn't support retry
40 | - **RETRY_UNSUPPORTED_VERSION**: dbt version too old
41 | - **RETRY_NOT_LATEST_RUN**: Not the most recent run for the job
42 | - **RETRY_NOT_FAILED_RUN**: Run didn't fail
43 | - **RETRY_NO_RUN_RESULTS**: Missing run results for retry logic
44 |
45 | ## Example Usage
46 |
47 | ```json
48 | {
49 | "run_id": 789
50 | }
51 | ```
52 |
53 | This creates a new run that continues from the failure point of run 789.
54 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/dbt_cli/args/selectors.md:
--------------------------------------------------------------------------------
```markdown
1 | A selector needs be used when we need to select specific nodes or are asking to do actions on specific nodes. A node can be a model, a test, a seed or a snapshot. It is strongly preferred to provide a selector, especially on large projects. Always provide a selector initially.
2 |
3 | - to select all models, just do not provide a selector
4 | - to select a particular model, use the selector `<model_name>`
5 |
6 | ## Graph operators
7 |
8 | - to select a particular model and all the downstream ones (also known as children), use the selector `<model_name>+`
9 | - to select a particular model and all the upstream ones (also known as parents), use the selector `+<model_name>`
10 | - to select a particular model and all the downstream and upstream ones, use the selector `+<model_name>+`
11 | - to select the union of different selectors, separate them with a space like `selector1 selector2`
12 | - to select the intersection of different selectors, separate them with a comma like `selector1,selector2`
13 |
14 | ## Matching nodes based on parts of their name
15 |
16 | When looking to select nodes based on parts of their names, the selector needs to be `fqn:<pattern>`. The fqn is the fully qualified name, it starts with the project or package name followed by the subfolder names and finally the node name.
17 |
18 | ### Examples
19 |
20 | - to select a node from any package that contains `stg_`, we would have the selector `fqn:*stg_*`
21 | - to select a node from the current project that contains `stg_`, we would have the selector `fqn:project_name.*stg_*`
22 |
```
--------------------------------------------------------------------------------
/examples/aws_strands_agent/dbt_data_scientist/quick_mcp_test.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """Quick MCP connection test - minimal version."""
3 |
4 | import os
5 | import sys
6 | from dotenv import load_dotenv
7 |
8 | def quick_test():
9 | """Quick test of MCP connectivity."""
10 | print("🧪 Quick MCP Connection Test")
11 | print("-" * 30)
12 |
13 | # Load environment
14 | load_dotenv()
15 |
16 | # Check basic env vars
17 | url = os.environ.get("DBT_MCP_URL")
18 | token = os.environ.get("DBT_TOKEN")
19 |
20 | if not url or not token:
21 | print("❌ Missing DBT_MCP_URL or DBT_TOKEN")
22 | return False
23 |
24 | print(f"✅ URL: {url}")
25 | print(f"✅ Token: {'*' * len(token)}")
26 |
27 | try:
28 | # Import and test
29 | from tools.dbt_mcp import dbt_mcp_client
30 |
31 | print("🔌 Testing connection...")
32 | with dbt_mcp_client:
33 | tools = dbt_mcp_client.list_tools_sync()
34 | print(f"✅ Connected! Found {len(tools)} tools")
35 |
36 | if tools:
37 | print("📋 Available tools:")
38 | for tool in tools[:5]: # Show first 3 tools
39 | print(f" - {tool.tool_name}")
40 | if len(tools) > 3:
41 | print(f" ... and {len(tools) - 3} more")
42 |
43 | return True
44 |
45 | except Exception as e:
46 | print(f"❌ Connection failed: {e}")
47 | return False
48 |
49 | if __name__ == "__main__":
50 | success = quick_test()
51 | if success:
52 | print("\n🎉 MCP connection is working!")
53 | else:
54 | print("\n💥 MCP connection failed!")
55 | sys.exit(0 if success else 1)
56 |
```
--------------------------------------------------------------------------------
/.github/workflows/changelog-check.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Check CHANGELOG
2 | on:
3 | pull_request:
4 | branches:
5 | - main
6 |
7 | jobs:
8 | check-changelog:
9 | permissions:
10 | contents: read
11 | pull-requests: write
12 | runs-on: ubuntu-latest
13 | if: "!startsWith(github.event.head_commit.message, 'Release: v')"
14 | steps:
15 | - name: checkout code
16 | uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
17 | with:
18 | fetch-depth: 0
19 | - name: setup python
20 | uses: ./.github/actions/setup-python
21 | id: setup-python
22 | - name: Check for changes
23 | run: |
24 | BASE=$(git merge-base HEAD origin/main)
25 | ALL_CHANGES=$(git diff --name-only "$BASE"...HEAD)
26 |
27 | # only ignore docs/ and .md files in root
28 | IGNORE_REGEX='(^docs/|^[^/]+\.md$)'
29 | NON_IGNORED_CHANGES=$(echo "$ALL_CHANGES" | grep -Ev "$IGNORE_REGEX" || true)
30 |
31 | if [ "$NON_IGNORED_CHANGES" == "" ]; then
32 | echo "No relevant changes detected. Skipping changelog check."
33 | exit 0
34 | fi
35 |
36 | CHANGIE=$(echo "$ALL_CHANGES" | grep -E "\.changes/unreleased/.*\.yaml" || true)
37 | if [ "$CHANGIE" == "" ]; then
38 | echo "No files added to '.changes/unreleased/'. Make sure you run 'changie new'."
39 | exit 1
40 | fi
41 |
42 | CHANGELOG=$(echo "$ALL_CHANGES" | grep -E "CHANGELOG\.md" || true)
43 | if [ "$CHANGELOG" != "" ]; then
44 | echo "Don't edit 'CHANGELOG.md' manually nor run 'changie merge'."
45 | exit 1
46 | fi
47 |
```
--------------------------------------------------------------------------------
/.github/workflows/run-checks-pr.yaml:
--------------------------------------------------------------------------------
```yaml
1 | name: PR pipeline
2 |
3 | on:
4 | pull_request:
5 | types: [opened, reopened, synchronize, labeled]
6 |
7 | jobs:
8 | # checks the code for styling and type errors
9 | check:
10 | name: Check styling
11 | runs-on: ubuntu-24.04
12 | permissions:
13 | contents: read
14 | pull-requests: write
15 | steps:
16 | - name: checkout code
17 | uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
18 | - name: setup python
19 | uses: ./.github/actions/setup-python
20 | id: setup-python
21 | - uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda
22 | with:
23 | version: 10
24 | - name: Install go-task
25 | run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b /usr/local/bin
26 | - name: Install dependencies
27 | run: task install
28 | - name: Run check
29 | run: task check
30 |
31 | # runs the unit tests
32 | unit-test:
33 | name: Unit test
34 | runs-on: ubuntu-24.04
35 | permissions:
36 | contents: read
37 | pull-requests: write
38 | needs:
39 | - check
40 | steps:
41 | - name: checkout code
42 | uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
43 | - name: setup python
44 | uses: ./.github/actions/setup-python
45 | id: setup-python
46 | - uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda
47 | with:
48 | version: 10
49 | - name: Install go-task
50 | run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b /usr/local/bin
51 | - name: Run tests
52 | run: task test:unit
53 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/tools/annotations.py:
--------------------------------------------------------------------------------
```python
1 | from mcp.types import ToolAnnotations
2 |
3 |
4 | def create_tool_annotations(
5 | title: str | None = None,
6 | read_only_hint: bool = False,
7 | destructive_hint: bool = True,
8 | idempotent_hint: bool = False,
9 | open_world_hint: bool = True,
10 | ) -> ToolAnnotations:
11 | """
12 | Creates tool annotations. Defaults to the most cautious option,
13 | i.e destructive, non-idempotent, and open-world.
14 | Args:
15 | - title: Human-readable title for the tool
16 | - read_only_hint: If true, the tool does not modify its environment.
17 | - destructive_hint:
18 | If true, the tool may perform destructive updates to its environment.
19 | If false, the tool performs only additive updates.
20 | This property is meaningful only when `readOnlyHint == false`.
21 | - idempotent_hint: Whether repeated calls have the same effect
22 | If true, calling the tool repeatedly with the same arguments will have no additional effect on the its environment.
23 | This property is meaningful only when `readOnlyHint == false`.
24 | - open_world_hint: Whether the tool interacts with external systems
25 | If true, this tool may interact with an "open world" of external entities.
26 | If false, the tool's domain of interaction is closed.
27 | For example, the world of a web search tool is open, whereas that of a memory tool is not.
28 | """
29 | return ToolAnnotations(
30 | title=title,
31 | readOnlyHint=read_only_hint,
32 | destructiveHint=destructive_hint,
33 | idempotentHint=idempotent_hint,
34 | openWorldHint=open_world_hint,
35 | )
36 |
```
--------------------------------------------------------------------------------
/examples/openai_agent/main.py:
--------------------------------------------------------------------------------
```python
1 | # mypy: ignore-errors
2 |
3 | import asyncio
4 | from pathlib import Path
5 |
6 | from agents import Agent, Runner, trace
7 | from agents.mcp import create_static_tool_filter
8 | from agents.mcp.server import MCPServerStdio
9 |
10 |
11 | async def main():
12 | dbt_mcp_dir = Path(__file__).parent.parent.parent
13 | async with MCPServerStdio(
14 | name="dbt",
15 | params={
16 | "command": "uvx",
17 | "args": [
18 | "--env-file",
19 | # This file should contain config described in the root README.md
20 | f"{dbt_mcp_dir}/.env",
21 | "dbt-mcp",
22 | ],
23 | },
24 | client_session_timeout_seconds=20,
25 | cache_tools_list=True,
26 | tool_filter=create_static_tool_filter(
27 | allowed_tool_names=[
28 | "list_metrics",
29 | "get_dimensions",
30 | "get_entities",
31 | "query_metrics",
32 | ],
33 | ),
34 | ) as server:
35 | agent = Agent(
36 | name="Assistant",
37 | instructions="Use the tools to answer the user's questions",
38 | mcp_servers=[server],
39 | )
40 | with trace(workflow_name="Conversation"):
41 | conversation = []
42 | result = None
43 | while True:
44 | if result:
45 | conversation = result.to_input_list()
46 | conversation.append({"role": "user", "content": input("User > ")})
47 | result = await Runner.run(agent, conversation)
48 | print(result.final_output)
49 |
50 |
51 | if __name__ == "__main__":
52 | try:
53 | asyncio.run(main())
54 | except KeyboardInterrupt:
55 | print("\nExiting.")
56 |
```
--------------------------------------------------------------------------------
/tests/unit/tools/test_disable_tools.py:
--------------------------------------------------------------------------------
```python
1 | from unittest.mock import patch
2 |
3 | from dbt_mcp.config.config import load_config
4 | from dbt_mcp.dbt_cli.binary_type import BinaryType
5 | from dbt_mcp.mcp.server import create_dbt_mcp
6 | from tests.env_vars import default_env_vars_context
7 |
8 |
9 | async def test_disable_tools():
10 | """Test that the ToolName enum matches the tools registered in the server."""
11 | disable_tools = {"get_mart_models", "list_metrics"}
12 | with (
13 | default_env_vars_context(
14 | override_env_vars={"DISABLE_TOOLS": ",".join(disable_tools)}
15 | ),
16 | patch(
17 | "dbt_mcp.config.config.detect_binary_type", return_value=BinaryType.DBT_CORE
18 | ),
19 | ):
20 | config = load_config()
21 | dbt_mcp = await create_dbt_mcp(config)
22 |
23 | # Get all tools from the server
24 | server_tools = await dbt_mcp.list_tools()
25 | server_tool_names = {tool.name for tool in server_tools}
26 | assert not disable_tools.intersection(server_tool_names)
27 |
28 |
29 | async def test_disable_cli_tools():
30 | disable_tools = {"build", "compile", "docs", "list"}
31 | with (
32 | default_env_vars_context(
33 | override_env_vars={"DISABLE_TOOLS": ",".join(disable_tools)}
34 | ),
35 | patch(
36 | "dbt_mcp.config.config.detect_binary_type", return_value=BinaryType.DBT_CORE
37 | ),
38 | ):
39 | config = load_config()
40 | dbt_mcp = await create_dbt_mcp(config)
41 |
42 | # Get all tools from the server
43 | server_tools = await dbt_mcp.list_tools()
44 | server_tool_names = {tool.name for tool in server_tools}
45 | assert not disable_tools.intersection(server_tool_names)
46 | assert "show" in server_tool_names
47 |
```
--------------------------------------------------------------------------------
/tests/unit/tools/test_toolsets.py:
--------------------------------------------------------------------------------
```python
1 | from unittest.mock import patch
2 |
3 | from dbt_mcp.config.config import load_config
4 | from dbt_mcp.dbt_cli.binary_type import BinaryType
5 | from dbt_mcp.lsp.lsp_binary_manager import LspBinaryInfo
6 | from dbt_mcp.mcp.server import create_dbt_mcp
7 | from dbt_mcp.tools.toolsets import proxied_tools, toolsets
8 | from tests.env_vars import default_env_vars_context
9 |
10 |
11 | async def test_toolsets_match_server_tools():
12 | """Test that the defined toolsets match the tools registered in the server."""
13 | with (
14 | default_env_vars_context(),
15 | patch(
16 | "dbt_mcp.config.config.detect_binary_type", return_value=BinaryType.DBT_CORE
17 | ),
18 | patch(
19 | "dbt_mcp.lsp.tools.dbt_lsp_binary_info",
20 | return_value=LspBinaryInfo(path="/path/to/lsp", version="1.0.0"),
21 | ),
22 | ):
23 | config = load_config()
24 | dbt_mcp = await create_dbt_mcp(config)
25 |
26 | # Get all tools from the server
27 | server_tools = await dbt_mcp.list_tools()
28 | # Manually adding SQL tools here because the server doesn't get them
29 | # in this unit test.
30 | server_tool_names = {tool.name for tool in server_tools} | {
31 | p.value for p in proxied_tools
32 | }
33 | defined_tools = set()
34 | for toolset_tools in toolsets.values():
35 | defined_tools.update({t.value for t in toolset_tools})
36 |
37 | if server_tool_names != defined_tools:
38 | raise ValueError(
39 | f"Tool name mismatch:\n"
40 | f"In server but not in enum: {server_tool_names - defined_tools}\n"
41 | f"In enum but not in server: {defined_tools - server_tool_names}"
42 | )
43 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Bug Report
2 | description: File a bug report to help us improve
3 | title: "[Bug]: "
4 | labels: ["bug"]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thanks for taking the time to fill out this bug report!
10 | - type: textarea
11 | id: what-happened
12 | attributes:
13 | label: What happened?
14 | description: Also tell us, what did you expect to happen?
15 | placeholder: Tell us what you see!
16 | value: "A bug happened!"
17 | validations:
18 | required: true
19 | - type: textarea
20 | id: reproduce
21 | attributes:
22 | label: Steps to Reproduce
23 | description: Please provide detailed steps to reproduce the issue
24 | placeholder: |
25 | 1. Go to '...'
26 | 2. Click on '....'
27 | 3. Scroll down to '....'
28 | 4. See error
29 | validations:
30 | required: true
31 | - type: dropdown
32 | id: deployment
33 | attributes:
34 | label: Deployment
35 | description: Are you using the local MCP server or the remote MCP server?
36 | options:
37 | - Local MCP server
38 | - Remote MCP server
39 | validations:
40 | required: true
41 | - type: textarea
42 | id: environment
43 | attributes:
44 | label: Environment
45 | description: Please provide details about your environment
46 | placeholder: |
47 | - OS: [e.g. macOS, Linux, Windows]
48 | - dbt CLI executable (for CLI tools): [e.g. dbt Core, Fusion, Cloud CLI]
49 | - MCP client: [e.g. Cursor, Claude Desktop, OpenAI SDK...]
50 | validations:
51 | required: false
52 | - type: textarea
53 | id: logs
54 | attributes:
55 | label: Relevant log output
56 | description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
57 | render: shell
58 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/admin_api/trigger_job_run.md:
--------------------------------------------------------------------------------
```markdown
1 | # Trigger Job Run
2 |
3 | Trigger a dbt job run with optional parameter overrides.
4 |
5 | This tool starts a new run for a specified job with the ability to override default settings like Git branch, schema, or other execution parameters.
6 |
7 | ## Parameters
8 |
9 | - **job_id** (required): The job ID to trigger
10 | - **cause** (required): Description of why the job is being triggered
11 | - **git_branch** (optional): Override the Git branch to checkout
12 | - **git_sha** (optional): Override the Git SHA to checkout
13 | - **schema_override** (optional): Override the destination schema
14 |
15 | ## Additional Override Options
16 |
17 | The API supports additional overrides (can be added to the implementation):
18 |
19 | - **dbt_version_override**: Override the dbt version
20 | - **threads_override**: Override the number of threads
21 | - **target_name_override**: Override the target name
22 | - **generate_docs_override**: Override docs generation setting
23 | - **timeout_seconds_override**: Override the timeout
24 | - **steps_override**: Override the dbt commands to execute
25 |
26 | ## Returns
27 |
28 | Run object with information about the newly triggered run including:
29 |
30 | - Run ID and status
31 | - Job and environment information
32 | - Git branch and SHA being used
33 | - Trigger information and cause
34 | - Execution queue position
35 |
36 | ## Use Cases
37 |
38 | - Trigger ad-hoc job runs for testing
39 | - Run jobs with different Git branches for feature testing
40 | - Execute jobs with schema overrides for development
41 | - Trigger jobs via API automation or external systems
42 | - Run jobs with custom parameters for specific scenarios
43 |
44 | ## Example Usage
45 |
46 | ```json
47 | {
48 | "job_id": 456,
49 | "cause": "Manual trigger for testing"
50 | }
51 | ```
52 |
53 | ```json
54 | {
55 | "job_id": 456,
56 | "cause": "Testing feature branch",
57 | "git_branch": "feature/new-models",
58 | "schema_override": "dev_testing"
59 | }
60 | ```
61 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/admin_api/get_job_run_artifact.md:
--------------------------------------------------------------------------------
```markdown
1 | Download a specific artifact file from a dbt job run.
2 |
3 | This tool retrieves the content of a specific artifact file generated during run execution, such as manifest.json, catalog.json, or compiled SQL files.
4 |
5 | ## Parameters
6 |
7 | - **run_id** (required): The run ID containing the artifact
8 | - **artifact_path** (required): The path to the specific artifact file
9 | - **step** (optional): The step index to retrieve artifacts from (default: last step)
10 |
11 | ## Common Artifact Paths
12 |
13 | - **manifest.json**: Complete dbt project metadata, models, and lineage
14 | - **catalog.json**: Table and column documentation with statistics
15 | - **run_results.json**: Execution results, timing, and status information
16 | - **sources.json**: Source freshness check results
17 | - **compiled/[model_path].sql**: Individual compiled SQL files
18 | - **logs/dbt.log**: Complete execution logs
19 |
20 | ## Returns
21 |
22 | The artifact content in its original format:
23 | - JSON files return parsed JSON objects
24 | - SQL files return text content
25 | - Log files return text content
26 |
27 | ## Use Cases
28 |
29 | - Download manifest.json for lineage analysis
30 | - Get catalog.json for documentation systems
31 | - Retrieve run_results.json for execution monitoring
32 | - Access compiled SQL for debugging
33 | - Download logs for troubleshooting failures
34 | - Integration with external tools and systems
35 |
36 | ## Step Selection
37 |
38 | - By default, artifacts from the last step are returned
39 | - Use the `step` parameter to get artifacts from earlier steps
40 | - Step indexing starts at 1 for the first step
41 |
42 | ## Example Usage
43 |
44 | ```json
45 | {
46 | "run_id": 789,
47 | "artifact_path": "manifest.json"
48 | }
49 | ```
50 |
51 | ```json
52 | {
53 | "run_id": 789,
54 | "artifact_path": "compiled/analytics/models/staging/stg_users.sql"
55 | }
56 | ```
57 |
58 | ```json
59 | {
60 | "run_id": 789,
61 | "artifact_path": "run_results.json",
62 | "step": 2
63 | }
64 | ```
65 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/dbt_admin/run_results_errors/config.py:
--------------------------------------------------------------------------------
```python
1 | from typing import Any
2 | from pydantic import BaseModel
3 |
4 |
5 | class RunStepSchema(BaseModel):
6 | """Schema for individual "run_step" key from get_job_run_details()."""
7 |
8 | name: str
9 | status: int # 20 = error
10 | index: int
11 | finished_at: str | None = None
12 | logs: str | None = None
13 |
14 | class Config:
15 | extra = "allow"
16 |
17 |
18 | class RunDetailsSchema(BaseModel):
19 | """Schema for get_job_run_details() response."""
20 |
21 | is_cancelled: bool
22 | run_steps: list[RunStepSchema]
23 | finished_at: str | None = None
24 |
25 | class Config:
26 | extra = "allow"
27 |
28 |
29 | class RunResultSchema(BaseModel):
30 | """Schema for individual result in "results" key of run_results.json."""
31 |
32 | unique_id: str
33 | status: str # "success", "error", "fail", "skip"
34 | message: str | None = None
35 | relation_name: str | None = None
36 | compiled_code: str | None = None
37 |
38 | class Config:
39 | extra = "allow"
40 |
41 |
42 | class RunResultsArgsSchema(BaseModel):
43 | """Schema for "args" key in run_results.json."""
44 |
45 | target: str | None = None
46 |
47 | class Config:
48 | extra = "allow"
49 |
50 |
51 | class RunResultsArtifactSchema(BaseModel):
52 | """Schema for get_job_run_artifact() response (run_results.json)."""
53 |
54 | results: list[RunResultSchema]
55 | args: RunResultsArgsSchema | None = None
56 | metadata: dict[str, Any] | None = None
57 |
58 | class Config:
59 | extra = "allow"
60 |
61 |
62 | class ErrorResultSchema(BaseModel):
63 | """Schema for individual error result."""
64 |
65 | unique_id: str | None = None
66 | relation_name: str | None = None
67 | message: str
68 | compiled_code: str | None = None
69 | truncated_logs: str | None = None
70 |
71 |
72 | class ErrorStepSchema(BaseModel):
73 | """Schema for a single failed step with its errors."""
74 |
75 | target: str | None = None
76 | step_name: str | None = None
77 | finished_at: str | None = None
78 | errors: list[ErrorResultSchema]
79 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/semantic_layer/levenshtein.py:
--------------------------------------------------------------------------------
```python
1 | from dataclasses import dataclass
2 |
3 |
4 | @dataclass
5 | class Misspelling:
6 | word: str
7 | similar_words: list[str]
8 |
9 |
10 | def levenshtein(s1: str, s2: str) -> int:
11 | len_s1, len_s2 = len(s1), len(s2)
12 | dp = [[0] * (len_s2 + 1) for _ in range(len_s1 + 1)]
13 |
14 | for i in range(len_s1 + 1):
15 | dp[i][0] = i
16 | for j in range(len_s2 + 1):
17 | dp[0][j] = j
18 |
19 | for i in range(1, len_s1 + 1):
20 | for j in range(1, len_s2 + 1):
21 | cost = 0 if s1[i - 1] == s2[j - 1] else 1
22 | dp[i][j] = min(
23 | dp[i - 1][j] + 1, # Deletion
24 | dp[i][j - 1] + 1, # Insertion
25 | dp[i - 1][j - 1] + cost, # Substitution
26 | )
27 | return dp[len_s1][len_s2]
28 |
29 |
30 | def get_closest_words(
31 | target: str,
32 | words: list[str],
33 | top_k: int | None = None,
34 | threshold: int | None = None,
35 | ) -> list[str]:
36 | distances = [(word, levenshtein(target, word)) for word in words]
37 |
38 | # Filter by threshold if provided
39 | if threshold is not None:
40 | distances = [(word, dist) for word, dist in distances if dist <= threshold]
41 |
42 | # Sort by distance
43 | distances.sort(key=lambda x: x[1])
44 |
45 | # Limit by top_k if provided
46 | if top_k is not None:
47 | distances = distances[:top_k]
48 |
49 | return [word for word, _ in distances]
50 |
51 |
52 | def get_misspellings(
53 | targets: list[str],
54 | words: list[str],
55 | top_k: int | None = None,
56 | ) -> list[Misspelling]:
57 | misspellings = []
58 | for target in targets:
59 | if target not in words:
60 | misspellings.append(
61 | Misspelling(
62 | word=target,
63 | similar_words=get_closest_words(
64 | target=target,
65 | words=words,
66 | top_k=top_k,
67 | threshold=max(1, len(target) // 2),
68 | ),
69 | )
70 | )
71 | return misspellings
72 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/dbt_cli/binary_type.py:
--------------------------------------------------------------------------------
```python
1 | import subprocess
2 | from enum import Enum
3 |
4 | from dbt_mcp.errors import BinaryExecutionError
5 |
6 |
7 | class BinaryType(Enum):
8 | DBT_CORE = "dbt_core"
9 | FUSION = "fusion"
10 | DBT_CLOUD_CLI = "dbt_cloud_cli"
11 |
12 |
13 | def detect_binary_type(file_path: str) -> BinaryType:
14 | """
15 | Detect the type of dbt binary (dbt Core, Fusion, or dbt Cloud CLI) by running --help.
16 |
17 | Args:
18 | file_path: Path to the dbt executable
19 |
20 | Returns:
21 | BinaryType: The detected binary type
22 |
23 | Raises:
24 | Exception: If the binary cannot be executed or accessed
25 | """
26 | try:
27 | result = subprocess.run(
28 | [file_path, "--help"],
29 | check=False,
30 | capture_output=True,
31 | text=True,
32 | timeout=10,
33 | )
34 | help_output = result.stdout
35 | except Exception as e:
36 | raise BinaryExecutionError(f"Cannot execute binary {file_path}: {e}")
37 |
38 | if not help_output:
39 | # Default to dbt Core if no output
40 | return BinaryType.DBT_CORE
41 |
42 | first_line = help_output.split("\n")[0] if help_output else ""
43 |
44 | # Check for dbt-fusion
45 | if "dbt-fusion" in first_line:
46 | return BinaryType.FUSION
47 |
48 | # Check for dbt Core
49 | if "Usage: dbt [OPTIONS] COMMAND [ARGS]..." in first_line:
50 | return BinaryType.DBT_CORE
51 |
52 | # Check for dbt Cloud CLI
53 | if "The dbt Cloud CLI" in first_line:
54 | return BinaryType.DBT_CLOUD_CLI
55 |
56 | # Default to dbt Core - We could move to Fusion in the future
57 | return BinaryType.DBT_CORE
58 |
59 |
60 | def get_color_disable_flag(binary_type: BinaryType) -> str:
61 | """
62 | Get the appropriate color disable flag for the given binary type.
63 |
64 | Args:
65 | binary_type: The type of dbt binary
66 |
67 | Returns:
68 | str: The color disable flag to use
69 | """
70 | if binary_type == BinaryType.DBT_CLOUD_CLI:
71 | return "--no-color"
72 | else: # DBT_CORE or FUSION
73 | return "--no-use-colors"
74 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/discovery/get_model_health.md:
--------------------------------------------------------------------------------
```markdown
1 | <instructions>
2 | Retrieves information about the health of a dbt model, including the last time it ran, the last test execution status, and whether the upstream data for the model is fresh.
3 |
4 | IMPORTANT: Use uniqueId when available.
5 | - Using uniqueId guarantees the correct model is retrieved
6 | - Using only model_name may return incorrect results or fail entirely
7 | - If you obtained models via get_all_models(), you should always use the uniqueId from those results
8 |
9 | ASSESSING MODEL HEALTH:
10 | For all of the below, summarize whether the model is healthy, questionable, or unhealthy. Only provide more details when asked.
11 |
12 | - for the model executionInfo, if the lastRunStatus is "success" consider the model healthy
13 | - for the test executionInfo, if the lastRunStatus is "success" consider the model healthy
14 |
15 | - for the models parents:
16 | -- check the modelexecutionInfo, snapshotExecutionInfo, and seedExecutionInfo. If the lastRunStatus is "success" consider the model healthy. If the lastRunStatus is "error" consider the model unhealthy.
17 |
18 | -- if the parent node is a SourceAppliedStateNestedNode:
19 | --- If the freshnessStatus is "pass", consider the model healthy
20 | --- If the freshnessStatus is "fail", consider the model unhealthy
21 | --- If the freshnessStatus is null, consider the model health questionable
22 | --- If the freshnessStatus is "warn", consider the model health questionable
23 | </instructions>
24 |
25 | <parameters>
26 | uniqueId: The unique identifier of the model (format: "model.project_name.model_name"). STRONGLY RECOMMENDED when available.
27 | model_name: The name of the dbt model. Only use this when uniqueId is unavailable.
28 | </parameters>
29 |
30 | <examples>
31 | 1. PREFERRED METHOD - Using uniqueId (always use this when available):
32 | get_model_details(uniqueId="model.my_project.customer_orders")
33 |
34 | 2. FALLBACK METHOD - Using only model_name (only when uniqueId is unknown):
35 | get_model_details(model_name="customer_orders")
36 | </examples>
```
--------------------------------------------------------------------------------
/src/dbt_mcp/tools/toolsets.py:
--------------------------------------------------------------------------------
```python
1 | from enum import Enum
2 | from typing import Literal
3 |
4 | from dbt_mcp.tools.tool_names import ToolName
5 |
6 |
7 | class Toolset(Enum):
8 | SQL = "sql"
9 | SEMANTIC_LAYER = "semantic_layer"
10 | DISCOVERY = "discovery"
11 | DBT_CLI = "dbt_cli"
12 | ADMIN_API = "admin_api"
13 | DBT_CODEGEN = "dbt_codegen"
14 | DBT_LSP = "dbt_lsp"
15 |
16 |
17 | proxied_tools: set[Literal[ToolName.TEXT_TO_SQL, ToolName.EXECUTE_SQL]] = set(
18 | [
19 | ToolName.TEXT_TO_SQL,
20 | ToolName.EXECUTE_SQL,
21 | ]
22 | )
23 |
24 | toolsets = {
25 | Toolset.SQL: {
26 | ToolName.TEXT_TO_SQL,
27 | ToolName.EXECUTE_SQL,
28 | },
29 | Toolset.SEMANTIC_LAYER: {
30 | ToolName.LIST_METRICS,
31 | ToolName.GET_DIMENSIONS,
32 | ToolName.GET_ENTITIES,
33 | ToolName.QUERY_METRICS,
34 | ToolName.GET_METRICS_COMPILED_SQL,
35 | },
36 | Toolset.DISCOVERY: {
37 | ToolName.GET_MART_MODELS,
38 | ToolName.GET_ALL_MODELS,
39 | ToolName.GET_MODEL_DETAILS,
40 | ToolName.GET_MODEL_PARENTS,
41 | ToolName.GET_MODEL_CHILDREN,
42 | ToolName.GET_MODEL_HEALTH,
43 | ToolName.GET_ALL_SOURCES,
44 | ToolName.GET_EXPOSURES,
45 | ToolName.GET_EXPOSURE_DETAILS,
46 | },
47 | Toolset.DBT_CLI: {
48 | ToolName.BUILD,
49 | ToolName.COMPILE,
50 | ToolName.DOCS,
51 | ToolName.LIST,
52 | ToolName.PARSE,
53 | ToolName.RUN,
54 | ToolName.TEST,
55 | ToolName.SHOW,
56 | },
57 | Toolset.ADMIN_API: {
58 | ToolName.LIST_JOBS,
59 | ToolName.GET_JOB_DETAILS,
60 | ToolName.TRIGGER_JOB_RUN,
61 | ToolName.LIST_JOBS_RUNS,
62 | ToolName.GET_JOB_RUN_DETAILS,
63 | ToolName.CANCEL_JOB_RUN,
64 | ToolName.RETRY_JOB_RUN,
65 | ToolName.LIST_JOB_RUN_ARTIFACTS,
66 | ToolName.GET_JOB_RUN_ARTIFACT,
67 | ToolName.GET_JOB_RUN_ERROR,
68 | },
69 | Toolset.DBT_CODEGEN: {
70 | ToolName.GENERATE_SOURCE,
71 | ToolName.GENERATE_MODEL_YAML,
72 | ToolName.GENERATE_STAGING_MODEL,
73 | },
74 | Toolset.DBT_LSP: {
75 | ToolName.GET_COLUMN_LINEAGE,
76 | },
77 | }
78 |
```
--------------------------------------------------------------------------------
/examples/aws_strands_agent/requirements.txt:
--------------------------------------------------------------------------------
```
1 | aiohappyeyeballs==2.6.1
2 | aiohttp==3.12.15
3 | aiosignal==1.4.0
4 | annotated-types==0.7.0
5 | anyio==4.10.0
6 | attrs==25.3.0
7 | autopep8==2.3.2
8 | aws-requests-auth==0.4.3
9 | beautifulsoup4==4.13.5
10 | bedrock-agentcore==0.1.4
11 | bedrock-agentcore-starter-toolkit==0.1.12
12 | boto3==1.40.34
13 | botocore==1.40.34
14 | certifi==2025.8.3
15 | chardet==5.2.0
16 | charset-normalizer==3.4.3
17 | click==8.3.0
18 | dill==0.4.0
19 | docstring_parser==0.17.0
20 | dotenv==0.9.9
21 | frozenlist==1.7.0
22 | h11==0.16.0
23 | html5lib==1.1
24 | httpcore==1.0.9
25 | httpx==0.28.1
26 | httpx-sse==0.4.1
27 | idna==3.10
28 | importlib_metadata==8.7.0
29 | Jinja2==3.1.6
30 | jmespath==1.0.1
31 | jsonschema==4.25.1
32 | jsonschema-path==0.3.4
33 | jsonschema-specifications==2025.9.1
34 | lazy-object-proxy==1.12.0
35 | lxml==6.0.2
36 | markdown-it-py==4.0.0
37 | markdownify==1.2.0
38 | MarkupSafe==3.0.2
39 | mcp==1.14.1
40 | mdurl==0.1.2
41 | mpmath==1.3.0
42 | multidict==6.6.4
43 | openapi-schema-validator==0.6.3
44 | openapi-spec-validator==0.7.2
45 | opentelemetry-api==1.37.0
46 | opentelemetry-instrumentation==0.58b0
47 | opentelemetry-instrumentation-threading==0.58b0
48 | opentelemetry-sdk==1.37.0
49 | opentelemetry-semantic-conventions==0.58b0
50 | packaging==25.0
51 | pathable==0.4.4
52 | pillow==11.3.0
53 | prance==25.4.8.0
54 | prompt_toolkit==3.0.52
55 | propcache==0.3.2
56 | py-openapi-schema-to-json-schema==0.0.3
57 | pycodestyle==2.14.0
58 | pydantic==2.11.9
59 | pydantic-settings==2.10.1
60 | pydantic_core==2.33.2
61 | Pygments==2.19.2
62 | PyJWT==2.10.1
63 | python-dateutil==2.9.0.post0
64 | python-dotenv==1.1.1
65 | python-multipart==0.0.20
66 | PyYAML==6.0.2
67 | questionary==2.1.1
68 | readabilipy==0.3.0
69 | referencing==0.36.2
70 | regex==2025.9.18
71 | requests==2.32.5
72 | rfc3339-validator==0.1.4
73 | rich==14.1.0
74 | rpds-py==0.27.1
75 | ruamel.yaml==0.18.15
76 | ruamel.yaml.clib==0.2.12
77 | s3transfer==0.14.0
78 | shellingham==1.5.4
79 | six==1.17.0
80 | slack_bolt==1.25.0
81 | slack_sdk==3.36.0
82 | sniffio==1.3.1
83 | soupsieve==2.8
84 | sse-starlette==3.0.2
85 | starlette==0.49.1
86 | strands-agents==1.9.0
87 | strands-agents-tools==0.2.8
88 | sympy==1.14.0
89 | tenacity==9.1.2
90 | toml==0.10.2
91 | typer==0.17.4
92 | typing-inspection==0.4.1
93 | typing_extensions==4.15.0
94 | urllib3==2.5.0
95 | uvicorn==0.35.0
96 | watchdog==6.0.0
97 | wcwidth==0.2.13
98 | webencodings==0.5.1
99 | wrapt==1.17.3
100 | yarl==1.20.1
101 | zipp==3.23.0
102 |
```
--------------------------------------------------------------------------------
/examples/langgraph_agent/main.py:
--------------------------------------------------------------------------------
```python
1 | # mypy: ignore-errors
2 |
3 | import asyncio
4 | import os
5 |
6 | from langchain_mcp_adapters.client import MultiServerMCPClient
7 | from langgraph.checkpoint.memory import InMemorySaver
8 | from langgraph.prebuilt import create_react_agent
9 |
10 |
11 | def print_stream_item(item):
12 | if "agent" in item:
13 | content = [
14 | part
15 | for message in item["agent"]["messages"]
16 | for part in (
17 | message.content
18 | if isinstance(message.content, list)
19 | else [message.content]
20 | )
21 | ]
22 | for c in content:
23 | if isinstance(c, str):
24 | print(f"Agent > {c}")
25 | elif "text" in c:
26 | print(f"Agent > {c['text']}")
27 | elif c["type"] == "tool_use":
28 | print(f" using tool: {c['name']}")
29 |
30 |
31 | async def main():
32 | url = f"https://{os.environ.get('DBT_HOST')}/api/ai/v1/mcp/"
33 | headers = {
34 | "x-dbt-user-id": os.environ.get("DBT_USER_ID"),
35 | "x-dbt-prod-environment-id": os.environ.get("DBT_PROD_ENV_ID"),
36 | "x-dbt-dev-environment-id": os.environ.get("DBT_DEV_ENV_ID"),
37 | "Authorization": f"token {os.environ.get('DBT_TOKEN')}",
38 | }
39 | client = MultiServerMCPClient(
40 | {
41 | "dbt": {
42 | "url": url,
43 | "headers": headers,
44 | "transport": "streamable_http",
45 | }
46 | }
47 | )
48 | tools = await client.get_tools()
49 | agent = create_react_agent(
50 | model="anthropic:claude-3-7-sonnet-latest",
51 | tools=tools,
52 | # This allows the agent to have conversational memory.
53 | checkpointer=InMemorySaver(),
54 | )
55 | # This config maintains the conversation thread.
56 | config = {"configurable": {"thread_id": "1"}}
57 | while True:
58 | user_input = input("User > ")
59 | async for item in agent.astream(
60 | {"messages": {"role": "user", "content": user_input}},
61 | config,
62 | ):
63 | print_stream_item(item)
64 |
65 |
66 | if __name__ == "__main__":
67 | asyncio.run(main())
68 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/tools/tool_names.py:
--------------------------------------------------------------------------------
```python
1 | from enum import Enum
2 |
3 |
4 | class ToolName(Enum):
5 | """Tool names available in the FastMCP server.
6 |
7 | This enum provides type safety and autocompletion for tool names.
8 | The validate_server_tools() function should be used to ensure
9 | this enum stays in sync with the actual server tools.
10 | """
11 |
12 | # dbt CLI tools
13 | BUILD = "build"
14 | COMPILE = "compile"
15 | DOCS = "docs"
16 | LIST = "list"
17 | PARSE = "parse"
18 | RUN = "run"
19 | TEST = "test"
20 | SHOW = "show"
21 |
22 | # Semantic Layer tools
23 | LIST_METRICS = "list_metrics"
24 | GET_DIMENSIONS = "get_dimensions"
25 | GET_ENTITIES = "get_entities"
26 | QUERY_METRICS = "query_metrics"
27 | GET_METRICS_COMPILED_SQL = "get_metrics_compiled_sql"
28 |
29 | # Discovery tools
30 | GET_MART_MODELS = "get_mart_models"
31 | GET_ALL_MODELS = "get_all_models"
32 | GET_MODEL_DETAILS = "get_model_details"
33 | GET_MODEL_PARENTS = "get_model_parents"
34 | GET_MODEL_CHILDREN = "get_model_children"
35 | GET_MODEL_HEALTH = "get_model_health"
36 | GET_ALL_SOURCES = "get_all_sources"
37 | GET_EXPOSURES = "get_exposures"
38 | GET_EXPOSURE_DETAILS = "get_exposure_details"
39 |
40 | # SQL tools
41 | TEXT_TO_SQL = "text_to_sql"
42 | EXECUTE_SQL = "execute_sql"
43 |
44 | # Admin API tools
45 | LIST_JOBS = "list_jobs"
46 | GET_JOB_DETAILS = "get_job_details"
47 | TRIGGER_JOB_RUN = "trigger_job_run"
48 | LIST_JOBS_RUNS = "list_jobs_runs"
49 | GET_JOB_RUN_DETAILS = "get_job_run_details"
50 | CANCEL_JOB_RUN = "cancel_job_run"
51 | RETRY_JOB_RUN = "retry_job_run"
52 | LIST_JOB_RUN_ARTIFACTS = "list_job_run_artifacts"
53 | GET_JOB_RUN_ARTIFACT = "get_job_run_artifact"
54 | GET_JOB_RUN_ERROR = "get_job_run_error"
55 |
56 | # dbt-codegen tools
57 | GENERATE_SOURCE = "generate_source"
58 | GENERATE_MODEL_YAML = "generate_model_yaml"
59 | GENERATE_STAGING_MODEL = "generate_staging_model"
60 |
61 | # dbt LSP tools
62 | GET_COLUMN_LINEAGE = "get_column_lineage"
63 |
64 | @classmethod
65 | def get_all_tool_names(cls) -> set[str]:
66 | """Returns a set of all tool names as strings."""
67 | return {member.value for member in cls}
68 |
```
--------------------------------------------------------------------------------
/src/dbt_mcp/prompts/semantic_layer/get_metrics_compiled_sql.md:
--------------------------------------------------------------------------------
```markdown
1 | <instructions>
2 | Gets compiled SQL for given metrics and dimensions/entities from the dbt Semantic Layer.
3 |
4 | This tool generates the underlying SQL that would be executed for a given metric query by the `query_metrics` tool,
5 | without actually running the query. This is useful for understanding what SQL is being
6 | generated, debugging query issues, or getting SQL to run elsewhere.
7 |
8 | To use this tool, you must first know about specific metrics, dimensions and
9 | entities to provide. You can call the list_metrics, get_dimensions,
10 | and get_entities tools to get information about which metrics, dimensions,
11 | and entities to use.
12 |
13 | When using the `group_by` parameter, ensure that the dimensions and entities
14 | you specify are valid for the given metrics. Time dimensions can include
15 | grain specifications (e.g., MONTH, DAY, YEAR).
16 |
17 | The tool will return the compiled SQL that the dbt Semantic Layer would generate
18 | to calculate the specified metrics with the given groupings.
19 |
20 | Don't call this tool if the user's question cannot be answered with the provided
21 | metrics, dimensions, and entities. Instead, clarify what metrics, dimensions,
22 | and entities are available and suggest a new question that can be answered
23 | and is approximately the same as the user's question.
24 |
25 | This tool is particularly useful when:
26 | - Users want to see the underlying SQL for a metric calculation
27 | - Debugging complex metric definitions
28 | - Understanding how grouping affects the generated SQL
29 | - Getting SQL to run in other tools or systems
30 | </instructions>
31 |
32 | Returns the compiled SQL as a string, or an error message if the compilation fails.
33 |
34 | <parameters>
35 | metrics: List of metric names (strings) to query for.
36 | group_by: Optional list of objects with name (string), type ("dimension" or "time_dimension"), and grain (string or null for time dimensions only).
37 | order_by: Optional list of objects with name (string) and descending (boolean, default false).
38 | where: Optional SQL WHERE clause (string) to filter results.
39 | limit: Optional limit (integer) for number of results.
40 | </parameters>
41 |
```