This is page 1 of 3. Use http://codebase.md/severity1/terraform-cloud-mcp?page={x} to view the full context.
# Directory Structure
```
├── .gitignore
├── .python-version
├── CLAUDE.md
├── docs
│ ├── API_REFERENCES.md
│ ├── CLAUDE.md
│ ├── CONTRIBUTING.md
│ ├── conversations
│ │ ├── account.md
│ │ ├── apply-management-conversation.md
│ │ ├── assessment-results-conversation.md
│ │ ├── cost-estimate-conversation.md
│ │ ├── organization-entitlements-conversation.md
│ │ ├── organizations-management-conversation.md
│ │ ├── plan-management-conversation.md
│ │ ├── project-management-conversation.md
│ │ ├── runs-management-conversation.md
│ │ ├── state_management.md
│ │ ├── variables-conversation.md
│ │ └── workspace-management-conversation.md
│ ├── DEVELOPMENT.md
│ ├── FILTERING_SYSTEM.md
│ ├── models
│ │ ├── account.md
│ │ ├── apply.md
│ │ ├── assessment_result.md
│ │ ├── cost_estimate.md
│ │ ├── organization.md
│ │ ├── plan.md
│ │ ├── project.md
│ │ ├── run.md
│ │ ├── state_version_outputs.md
│ │ ├── state_versions.md
│ │ ├── variables.md
│ │ └── workspace.md
│ ├── README.md
│ └── tools
│ ├── account.md
│ ├── apply.md
│ ├── assessment_results.md
│ ├── cost_estimate.md
│ ├── organization.md
│ ├── plan.md
│ ├── project.md
│ ├── run.md
│ ├── state_version_outputs.md
│ ├── state_versions.md
│ ├── variables.md
│ └── workspace.md
├── env.example
├── LICENSE
├── mypy.ini
├── pyproject.toml
├── README.md
├── terraform_cloud_mcp
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── CLAUDE.md
│ │ └── client.py
│ ├── configs
│ │ ├── __init__.py
│ │ ├── CLAUDE.md
│ │ └── filter_configs.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── account.py
│ │ ├── applies.py
│ │ ├── assessment_results.py
│ │ ├── base.py
│ │ ├── CLAUDE.md
│ │ ├── cost_estimates.py
│ │ ├── filters.py
│ │ ├── organizations.py
│ │ ├── plans.py
│ │ ├── projects.py
│ │ ├── runs.py
│ │ ├── state_version_outputs.py
│ │ ├── state_versions.py
│ │ ├── variables.py
│ │ └── workspaces.py
│ ├── server.py
│ ├── tools
│ │ ├── __init__.py
│ │ ├── account.py
│ │ ├── applies.py
│ │ ├── assessment_results.py
│ │ ├── CLAUDE.md
│ │ ├── cost_estimates.py
│ │ ├── organizations.py
│ │ ├── plans.py
│ │ ├── projects.py
│ │ ├── runs.py
│ │ ├── state_version_outputs.py
│ │ ├── state_versions.py
│ │ ├── variables.py
│ │ └── workspaces.py
│ └── utils
│ ├── __init__.py
│ ├── CLAUDE.md
│ ├── decorators.py
│ ├── env.py
│ ├── filters.py
│ ├── payload.py
│ └── request.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
```
3.12
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# Process IDs
*.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc
# Others
examples/
ISSUES.md
TASKS.md
```
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
```markdown
# Terraform Cloud MCP Documentation
This directory contains supplementary documentation for the Terraform Cloud MCP project.
## Structure
### Core Documentation
- `FILTERING_SYSTEM.md` - Audit-safe response filtering system (5-15% token reduction, 100% audit compliance)
- `DEVELOPMENT.md` - Comprehensive development guidelines and coding standards
- `API_REFERENCES.md` - Terraform Cloud API documentation links with implementation status
- `CONTRIBUTING.md` - Guidelines for contributing to the project
### Model Documentation
The `models/` directory contains documentation for each model type:
- `account.md` - Account model documentation
- `apply.md` - Apply model documentation
- `assessment_result.md` - Assessment result model documentation
- `cost_estimate.md` - Cost estimate model documentation
- `organization.md` - Organization model documentation
- `plan.md` - Plan model documentation
- `project.md` - Project model documentation
- `run.md` - Run model documentation
- `state_versions.md` - State version model documentation
- `state_version_outputs.md` - State version outputs model documentation
- `variables.md` - Variables and variable sets model documentation
- `workspace.md` - Workspace model documentation
### Tool Documentation
The `tools/` directory contains reference documentation for each tool:
- `account.md` - Account tool reference documentation
- `apply.md` - Apply management tool reference documentation
- `assessment_results.md` - Assessment results tool reference documentation
- `cost_estimate.md` - Cost estimate tools reference documentation
- `organization.md` - Organization tools reference documentation
- `plan.md` - Plan management tool reference documentation
- `project.md` - Project management tool reference documentation
- `run.md` - Run tools reference documentation
- `state_versions.md` - State version management tool reference documentation
- `state_version_outputs.md` - State version outputs tool reference documentation
- `variables.md` - Variables and variable sets tool reference documentation
- `workspace.md` - Workspace tools reference documentation
### Conversation Examples
The `conversations/` directory contains example conversations and usage scenarios:
- `account.md` - Account management conversation examples
- `apply-management-conversation.md` - Apply management conversation examples
- `assessment-results-conversation.md` - Assessment results conversation examples
- `cost-estimate-conversation.md` - Cost estimation conversation examples
- `organization-entitlements-conversation.md` - Organization entitlements conversation examples
- `organizations-management-conversation.md` - Organization management conversation examples
- `plan-management-conversation.md` - Plan management conversation examples
- `project-management-conversation.md` - Project management conversation examples
- `runs-management-conversation.md` - Run management conversation examples
- `state_management.md` - State management conversation examples
- `variables-conversation.md` - Variables management conversation examples
- `workspace-management-conversation.md` - Workspace management conversation examples
## Documentation Philosophy
1. Code docstrings contain essential information about purpose, fields, parameters, and return values
2. Tool documentation provides context, API references, and usage guidance
3. Model examples illustrate how to work with data structures
4. Development standards and patterns are consolidated in DEVELOPMENT.md
5. Conversation examples demonstrate practical application scenarios
6. This structure keeps the codebase clean while maintaining comprehensive documentation
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
[](https://mseep.ai/app/severity1-terraform-cloud-mcp)
# Terraform Cloud MCP Server
A Model Context Protocol (MCP) server that integrates AI assistants with the Terraform Cloud API, allowing you to manage your infrastructure through natural conversation. Built with Pydantic models and structured around domain-specific modules, this server is compatible with any MCP-supporting platform including Claude, Claude Code CLI, Claude Desktop, Cursor, Copilot Studio, and others.




---
## Features
- **Account Management**: Get account details for authenticated users or service accounts.
- **Workspace Management**: Create, read, update, lock/unlock workspaces, and optionally delete workspaces (with safety controls).
- **Project Management**: Create, list, update projects, and optionally delete projects; manage project tag bindings and move workspaces between projects.
- **Run Management**: Create runs, list runs, get run details, apply/discard/cancel runs.
- **Plan Management**: Retrieve plan details and JSON execution output with advanced HTTP redirect handling.
- **Apply Management**: Get apply details and recover from failed state uploads.
- **Organization Management**: List, create, update organizations, view organization entitlements, and optionally delete organizations (with safety controls).
- **Cost Estimation**: Retrieve detailed cost estimates for infrastructure changes including proposed monthly costs, prior costs, resource counts, and usage projections.
- **Assessment Results**: Retrieve health assessment details, JSON output, schema files, and logs from Terraform Cloud health assessments.
- **State Version Management**: List, retrieve, create, and download state versions; get current state for workspaces.
- **State Version Outputs**: List and retrieve specific outputs from state versions including values and sensitivity information.
- **Variables Management**: Complete workspace variable and variable set management including creation, updates, assignments, and optionally deletion (with safety controls).
### Performance Features
- **Audit-Safe Response Filtering**: Conservative token optimization (5-15% reduction) with 100% audit compliance - preserves all user accountability, security configuration, and change tracking data for comprehensive compliance scenarios.
### Safety Features
- **Destructive Operation Controls**: Delete operations are disabled by default and require explicit enablement via environment variable
- **Destructive Hints**: MCP clients receive proper destructive operation warnings for potentially dangerous tools
- **Environment-Based Safety**: Production and development environments can have different safety configurations
---
## Quick Start
### Prerequisites
- Python 3.12+
- MCP (includes FastMCP and development tools)
- `uv` package manager (recommended) or `pip`
- Terraform Cloud API token
### Environment Variables
- `TFC_TOKEN` - Terraform Cloud API token (required)
- `TFC_ADDRESS` - Terraform Cloud/Enterprise address (optional, defaults to https://app.terraform.io)
- `ENABLE_DELETE_TOOLS` - Enable/disable destructive operations (optional, defaults to false)
- `ENABLE_RAW_RESPONSE` - Return raw vs filtered responses (optional, defaults to false)
---
### Installation
```bash
# Clone the repository
git clone https://github.com/severity1/terraform-cloud-mcp.git
cd terraform-cloud-mcp
# Create virtual environment and activate it
uv venv
source .venv/bin/activate
# Install package
uv pip install .
```
### Adding to Claude Environments
#### Adding to Claude Code CLI
```bash
# Add to Claude Code with your Terraform Cloud token
claude mcp add -e TFC_TOKEN=YOUR_TF_TOKEN -e ENABLE_DELETE_TOOLS=false -s user terraform-cloud-mcp -- "terraform-cloud-mcp"
# To use a self-hosted Terraform Enterprise instance:
# claude mcp add -e TFC_TOKEN=YOUR_TF_TOKEN -e TFC_ADDRESS=https://terraform.example.com -s user terraform-cloud-mcp -- "terraform-cloud-mcp"
# To enable delete operations (use with caution):
# claude mcp add -e TFC_TOKEN=YOUR_TF_TOKEN -e ENABLE_DELETE_TOOLS=true -s user terraform-cloud-mcp -- "terraform-cloud-mcp"
```
#### Adding to Claude Desktop
Create a `claude_desktop_config.json` configuration file:
- mac: ~/Library/Application Support/Claude/claude_desktop_config.json
- win: %APPDATA%\Claude\claude_desktop_config.json
```json
{
"mcpServers": {
"terraform-cloud-mcp": {
"command": "/path/to/uv", # Get this by running: `which uv`
"args": [
"--directory",
"/path/to/your/terraform-cloud-mcp", # Full path to this project
"run",
"terraform-cloud-mcp"
],
"env": {
"TFC_TOKEN": "my token...", # replace with actual token
"TFC_ADDRESS": "https://app.terraform.io", # optional, change for self-hosted TFE
"ENABLE_DELETE_TOOLS": "false" # set to "true" to enable destructive operations
}
}
}
}
```
Replace `your_terraform_cloud_token` with your actual Terraform Cloud API token.
#### Other MCP-Compatible Platforms
For other platforms (like Cursor, Copilot Studio, or Glama), follow their platform-specific instructions for adding an MCP server. Most platforms require:
1. The server path or command to start the server.
2. Environment variables for the Terraform Cloud API token (`TFC_TOKEN`).
3. Optional environment variable for self-hosted Terraform Enterprise (`TFC_ADDRESS`).
4. Optional environment variable to enable delete operations (`ENABLE_DELETE_TOOLS=true` for destructive operations).
5. Configuration to auto-start the server when needed.
---
## Available Tools
### Account Tools
- `get_account_details()`: Gets account information for the authenticated user or service account.
### Workspace Management Tools
#### List & Search
- `list_workspaces(organization, page_number, page_size, search)`: List and filter workspaces.
- `get_workspace_details(workspace_id, organization, workspace_name)`: Get detailed information about a specific workspace.
#### Create & Update
- `create_workspace(organization, name, params)`: Create a new workspace with optional parameters.
- `update_workspace(organization, workspace_name, params)`: Update an existing workspace's configuration.
#### Delete (Requires ENABLE_DELETE_TOOLS=true)
- `delete_workspace(organization, workspace_name)`: Delete a workspace and all its content.
- `safe_delete_workspace(organization, workspace_name)`: Delete only if the workspace isn't managing any resources.
**Note**: Delete operations are disabled by default for safety. Set `ENABLE_DELETE_TOOLS=true` to enable these destructive operations.
#### Lock & Unlock
- `lock_workspace(workspace_id, reason)`: Lock a workspace to prevent runs.
- `unlock_workspace(workspace_id)`: Unlock a workspace to allow runs.
- `force_unlock_workspace(workspace_id)`: Force unlock a workspace locked by another user.
<!-- Future implementation: Data Retention
- `set_data_retention_policy(workspace_id, days)`: Set a data retention policy.
- `get_data_retention_policy(workspace_id)`: Get the current data retention policy.
- `delete_data_retention_policy(workspace_id)`: Delete the data retention policy. -->
### Run Management Tools
- `create_run(workspace_id, params)`: Create and queue a Terraform run in a workspace using its ID.
- `list_runs_in_workspace(workspace_id, ...)`: List and filter runs in a specific workspace using its ID.
- `list_runs_in_organization(organization, ...)`: List and filter runs across an entire organization.
- `get_run_details(run_id)`: Get detailed information about a specific run.
- `apply_run(run_id, comment)`: Apply a run waiting for confirmation.
- `discard_run(run_id, comment)`: Discard a run waiting for confirmation.
- `cancel_run(run_id, comment)`: Cancel a run currently planning or applying.
- `force_cancel_run(run_id, comment)`: Forcefully cancel a run immediately.
- `force_execute_run(run_id)`: Forcefully execute a pending run by canceling prior runs.
### Plan Management Tools
- `get_plan_details(plan_id)`: Get detailed information about a specific plan.
- `get_plan_json_output(plan_id)`: Retrieve the JSON execution plan for a specific plan with proper redirect handling.
- `get_run_plan_json_output(run_id)`: Retrieve the JSON execution plan from a run with proper redirect handling.
- `get_plan_logs(plan_id)`: Retrieve logs from a plan operation.
### Apply Management Tools
- `get_apply_details(apply_id)`: Get detailed information about a specific apply.
- `get_errored_state(apply_id)`: Retrieve the errored state from a failed apply for recovery.
- `get_apply_logs(apply_id)`: Retrieve logs from an apply operation.
### Project Management Tools
- `create_project(organization, name, params)`: Create a new project with optional parameters.
- `update_project(project_id, params)`: Update an existing project's configuration.
- `list_projects(organization, ...)`: List and filter projects in an organization.
- `get_project_details(project_id)`: Get detailed information about a specific project.
- `delete_project(project_id)`: Delete a project (fails if it contains workspaces). **Requires ENABLE_DELETE_TOOLS=true**
- `list_project_tag_bindings(project_id)`: List tags bound to a project.
- `add_update_project_tag_bindings(project_id, tag_bindings)`: Add or update tag bindings on a project.
- `move_workspaces_to_project(project_id, workspace_ids)`: Move workspaces into a project.
### Organization Management Tools
- `get_organization_details(organization)`: Get detailed information about a specific organization.
- `get_organization_entitlements(organization)`: Show entitlement set for organization features.
- `list_organizations(page_number, page_size, query, query_email, query_name)`: List and filter organizations.
- `create_organization(name, email, params)`: Create a new organization with optional parameters.
- `update_organization(organization, params)`: Update an existing organization's settings.
- `delete_organization(organization)`: Delete an organization and all its content. **Requires ENABLE_DELETE_TOOLS=true**
### Cost Estimation Tools
- `get_cost_estimate_details(cost_estimate_id)`: Get detailed information about a specific cost estimate, including resource counts (matched and unmatched), prior monthly cost, proposed monthly cost, and delta monthly cost estimations. Use run relationships to find cost estimate IDs for specific runs.
### Assessment Results Tools
- `get_assessment_result_details(assessment_result_id)`: Get detailed information about a specific health assessment result.
- `get_assessment_json_output(assessment_result_id)`: Retrieve the JSON execution plan from an assessment result.
- `get_assessment_json_schema(assessment_result_id)`: Retrieve the JSON schema file from an assessment result.
- `get_assessment_log_output(assessment_result_id)`: Retrieve logs from an assessment result operation.
### State Version Management Tools
- `list_state_versions(organization, workspace_name, page_number, page_size, filter_status)`: List and filter state versions in a workspace.
- `get_current_state_version(workspace_id)`: Get the current state version for a workspace.
- `get_state_version(state_version_id)`: Get details for a specific state version.
- `create_state_version(workspace_id, serial, md5, params)`: Create a new state version in a workspace.
- `download_state_file(state_version_id, json_format)`: Download the raw or JSON formatted state file.
### State Version Outputs Tools
- `list_state_version_outputs(state_version_id, page_number, page_size)`: List outputs for a specific state version.
- `get_state_version_output(state_version_output_id)`: Get details for a specific state version output.
### Variables Management Tools
#### Workspace Variables
- `list_workspace_variables(workspace_id)`: List all variables (Terraform and environment) for a workspace.
- `create_workspace_variable(workspace_id, key, category, params)`: Create a new variable in a workspace.
- `update_workspace_variable(workspace_id, variable_id, params)`: Update an existing workspace variable.
- `delete_workspace_variable(workspace_id, variable_id)`: Delete a workspace variable. **Requires ENABLE_DELETE_TOOLS=true**
#### Variable Sets
- `list_variable_sets(organization, page_number, page_size)`: List variable sets in an organization.
- `get_variable_set(varset_id)`: Get details for a specific variable set.
- `create_variable_set(organization, name, params)`: Create a new variable set.
- `update_variable_set(varset_id, params)`: Update an existing variable set.
- `delete_variable_set(varset_id)`: Delete a variable set and all its variables. **Requires ENABLE_DELETE_TOOLS=true**
- `assign_variable_set_to_workspaces(varset_id, workspace_ids)`: Assign a variable set to workspaces.
- `unassign_variable_set_from_workspaces(varset_id, workspace_ids)`: Remove a variable set from workspaces.
- `assign_variable_set_to_projects(varset_id, project_ids)`: Assign a variable set to projects.
- `unassign_variable_set_from_projects(varset_id, project_ids)`: Remove a variable set from projects.
#### Variable Set Variables
- `list_variables_in_variable_set(varset_id)`: List all variables in a variable set.
- `create_variable_in_variable_set(varset_id, key, category, params)`: Create a variable in a variable set.
- `update_variable_in_variable_set(varset_id, var_id, params)`: Update a variable in a variable set.
- `delete_variable_from_variable_set(varset_id, var_id)`: Delete a variable from a variable set. **Requires ENABLE_DELETE_TOOLS=true**
**Note**: Variable management includes both Terraform input variables and environment variables. Sensitive variables have their values hidden for security. Delete operations are disabled by default and require `ENABLE_DELETE_TOOLS=true`.
---
## Development Guide
For detailed development guidance including code standards, Pydantic patterns, and contribution workflows, see our [Development Documentation](docs/DEVELOPMENT.md).
### Quick Development Setup
```bash
# Clone the repository
git clone https://github.com/severity1/terraform-cloud-mcp.git
cd terraform-cloud-mcp
# Create virtual environment and activate it
uv venv
source .venv/bin/activate # On Windows: .venv\Scripts\activate
# Install in development mode with development dependencies
uv pip install -e .
uv pip install black mypy pydantic ruff
```
### Basic Development Commands
```bash
# Run the server in development mode
mcp dev terraform_cloud_mcp/server.py
# Run tests and quality checks
uv run -m mypy .
uv run -m ruff check .
uv run -m black .
```
For detailed information on code organization, architecture, development workflows, and code quality guidelines, refer to [docs/DEVELOPMENT.md](docs/DEVELOPMENT.md).
---
## Documentation
The codebase includes comprehensive documentation:
- **Code Comments**: Focused on explaining the "why" behind implementation decisions
- **Docstrings**: All public functions and classes include detailed docstrings
- **Implementation References**: Development documentation now references actual code examples rather than using code snippets
- **Example Files**: The `docs/` directory contains detailed examples for each domain:
- `docs/FILTERING_SYSTEM.md`: Comprehensive guide to the audit-safe response filtering system (5-15% token reduction, 100% audit compliance)
- `docs/DEVELOPMENT.md`: Development standards and coding guidelines with references to actual code
- `docs/API_REFERENCES.md`: Terraform Cloud API documentation links with implementation status
- `docs/CONTRIBUTING.md`: Guidelines for contributing to the project
- `docs/models/`: Reference documentation for all model types
- `docs/tools/`: Detailed reference documentation for each tool
- `docs/conversations/`: Sample conversation flows with the API
## Troubleshooting
1. Check server logs (debug logging is enabled by default)
2. Use the MCP Inspector (http://localhost:5173) for debugging
3. Debug logging is already enabled in `server.py`:
```python
import logging
logging.basicConfig(level=logging.DEBUG)
```
---
## Contributing
Contributions are welcome! Please open an issue or pull request if you'd like to contribute to this project.
See our [Contributing Guide](docs/CONTRIBUTING.md) for detailed instructions on how to get started, code quality standards, and the pull request process.
## Disclaimer
This project is not affiliated with, associated with, or endorsed by HashiCorp or Terraform.
"Terraform" and "Terraform Cloud" are trademarks of HashiCorp.
This project merely interacts with the Terraform Cloud public API under fair use.
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/configs/CLAUDE.md:
--------------------------------------------------------------------------------
```markdown
# CLAUDE.md for configs/
This file provides guidance about configuration management for the Terraform Cloud MCP implementation.
## Context Activation
This guidance activates when:
- Working in `terraform_cloud_mcp/configs/` directory
- Creating/editing configuration files (*.py)
- Implementing configuration-driven functionality
- Adding new resource type configurations
**Companion directories**: utils/ (for usage), models/ (for validation)
## Configuration Architecture
The configs directory provides centralized configuration data to maintain consistency and enable easy maintenance:
### Core Configurations
- **filter_configs.py**: Response filtering configurations for token optimization
## Filter Configuration Standards
### Resource Type Configurations
When adding new resource types, add filter configurations to `FILTER_CONFIGS`:
- **always_remove**: Fields always filtered regardless of operation type
- **read_remove**: Fields filtered only for READ operations (detailed views)
- **list_remove**: Fields filtered only for LIST operations (pagination views)
- **essential_relationships**: Relationships to preserve (optional)
### Detection Pattern Standards
Update detection patterns when adding new API endpoints:
- **PATH_PATTERNS**: Add new path patterns in specificity order (most specific first)
- **DATA_TYPE_MAP**: Add fallback mappings for response data type detection
- **RESOURCE_TYPE_MAP**: Add string-to-enum conversions for new resource types
### Configuration Maintenance
When implementing new tools or API endpoints:
- **Review API responses**: Check for verbose/unnecessary fields that impact token usage
- **Test filtering effectiveness**: Ensure essential operational data is preserved
- **Update configurations**: Add new resource types or modify existing filter rules
- **Validate detection**: Ensure new endpoints are correctly detected and mapped
### MCP Context Preservation Requirements
As an MCP server for Terraform Cloud, filter configurations must preserve essential context:
- **Never filter essential fields**: Resource IDs, names, status, operational URLs, permissions
- **Preserve operational context**: Status information, timing data, progress indicators
- **Maintain relationships**: Resource connections and hierarchical data
- **Keep user-relevant data**: Configuration settings, decision support information
- **Conservative READ filtering**: Detailed views need comprehensive context
- **Selective LIST filtering**: Lists can be more aggressively filtered for identification
## Implementation Guidelines
### New Resource Type Process
1. **Add to models**: Define `ResourceType` enum value in `models/filters.py`
2. **Create filter config**: Add `FilterConfig` entry in `FILTER_CONFIGS`
3. **Update detection**: Add path patterns and type mappings as needed
4. **Test filtering**: Verify correct resource detection and appropriate filtering
### Configuration Quality Standards
- **Specificity ordering**: More specific patterns before general ones in `PATH_PATTERNS`
- **Conservative filtering**: Only remove fields that don't impact functionality
- **Documentation**: Document reasoning for field removal decisions
- **Testing**: Validate filter effectiveness after configuration changes
### Maintenance Triggers
Update configurations when:
- **New API endpoints**: Add detection patterns and filter rules
- **API response changes**: Review existing filter configurations
- **Token usage optimization**: Identify new fields for filtering
- **Performance issues**: Optimize filter configurations for better efficiency
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/api/CLAUDE.md:
--------------------------------------------------------------------------------
```markdown
# CLAUDE.md for api/
This file provides guidance about the Terraform Cloud API client implementation.
## Context Activation
This guidance activates when:
- Working in `terraform_cloud_mcp/api/` directory
- Creating/editing API client files (*.py)
- Implementing HTTP request handling or authentication
- Adding redirect management or response processing
**Companion directories**: tools/ (for usage), models/ (for requests), utils/ (for helpers)
## API Client Architecture
The API client provides core functionality for Terraform Cloud API integration:
- **Authentication**: Terraform Cloud API token management
- **Request handling**: Formatting, submission, and response processing
- **Error management**: Consistent error handling across all API calls
- **Redirect handling**: Custom handling for Terraform Cloud pre-signed URLs
### Core Components
- **client.py**: `api_request()` main function and `handle_redirect()` for pre-signed URLs
## Implementation Standards
### Request Function Pattern
The `api_request()` function handles all API interactions with:
- **Path/Method**: API endpoint path and HTTP method (GET, POST, PATCH, DELETE)
- **Authentication**: Automatic token management via utils/env.py
- **Parameters**: Query parameters and request body handling
- **Response types**: JSON and text response processing
### Custom Redirect Management
Custom redirect handling (not httpx automatic) provides:
- **Authentication preservation**: Maintains auth headers through redirects
- **Content-type processing**: Handles pre-signed URL responses appropriately
- **Terraform Cloud compatibility**: Required for API pre-signed URL patterns
### Response Handling Standards
- **Success responses**: 200/201 return raw API data; 204 returns `{"status": "success", "status_code": 204}`
- **Response filtering**: Automatic filtering applied to GET requests returning JSON data
- **Error handling**: HTTP status errors, network issues, JSON parsing failures
- **Security**: Never logs tokens/credentials, validates inputs, proper error redaction
## Usage Standards
### Integration Requirements
- **Error handling**: Always use with `@handle_api_errors` decorator from utils/decorators.py
- **Environment management**: Token handling via `get_tfc_token()` from utils/env.py
- **Payload creation**: Convert Pydantic models using utils/payload.py utilities
- **Content downloads**: Set `accept_text=True` for redirect-based content retrieval
## Development Standards
### Quality Checks
- **Format**: `ruff format .`
- **Lint**: `ruff check .`
- **Type Check**: `mypy .`
- **Test**: `pytest`
### API Client Requirements
- All functions must include proper error handling and consistent response format
- Apply security guidelines for token and credential management
- Follow established patterns for request formatting and response processing
- Test with comprehensive coverage of success, error, and edge cases
### Integration Guidelines
### Tool Integration
When API client is used in tools:
- Always use with `@handle_api_errors` decorator
- Apply proper payload creation using utility functions
- Handle redirects appropriately for content downloads
- Use centralized token management
### Model Integration
When API client works with models:
- Convert Pydantic models using payload utilities
- Apply proper request validation before API calls
- Handle model validation errors appropriately
- Ensure proper type safety throughout request pipeline
### Utility Integration
When API client works with utilities:
- Use environment management for token handling
- Apply payload creation utilities for JSON:API compliance
- Use error handling decorators consistently
- Apply security practices for sensitive data
## Implementation Workflow
### API Client Enhancement Process
1. **Define request patterns**: Follow established function signatures
2. **Implement authentication**: Use centralized token management
3. **Add error handling**: Apply consistent response formats
4. **Handle redirects**: Use custom redirect management where needed
5. **Test thoroughly**: Cover success, error, network, and redirect scenarios
6. **Update documentation**: Implementation status tracking
### Quality Validation Checklist
For each API client enhancement:
- [ ] Function includes proper authentication and error handling
- [ ] Custom redirect handling applied where needed for TFC pre-signed URLs
- [ ] Security guidelines followed for token and credential management
- [ ] Quality checks passed: format, lint, type check
- [ ] Tests cover all scenarios: success, error, network issues, redirects
- [ ] Documentation updated: implementation status tracking
```
--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------
```markdown
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
# Terraform Cloud MCP Development Guide
## Subtree Discovery System
This project uses Claude Code's automatic subtree discovery for context-aware guidance. Component-specific CLAUDE.md files are automatically loaded when working in their directories:
- **docs/** - Documentation standards and templates
- **terraform_cloud_mcp/tools/** - MCP tools implementation patterns
- **terraform_cloud_mcp/models/** - Pydantic models and validation
- **terraform_cloud_mcp/utils/** - Utility functions and error handling
- **terraform_cloud_mcp/api/** - API client patterns
## Working Directory Guidance
### When Working on Tool Implementation
- **Directory**: `terraform_cloud_mcp/tools/`
- **Context**: Automatic loading of tools-specific patterns
- **Focus**: MCP function signatures, error handling, API integration
### When Working on Data Models
- **Directory**: `terraform_cloud_mcp/models/`
- **Context**: Automatic loading of model validation patterns
- **Focus**: Pydantic models, validation rules, type definitions
### When Working on Utilities
- **Directory**: `terraform_cloud_mcp/utils/`
- **Context**: Automatic loading of utility function patterns
- **Focus**: Error handling, API helpers, common operations
### When Working on API Client
- **Directory**: `terraform_cloud_mcp/api/`
- **Context**: Automatic loading of API client patterns
- **Focus**: HTTP client, authentication, request handling
### When Working on Documentation
- **Directory**: `docs/`
- **Context**: Automatic loading of documentation standards
- **Focus**: Templates, formatting, structure requirements
## Development Commands
### Quality Checks
- **Format**: `ruff format .`
- **Lint**: `ruff check .`
- **Type Check**: `mypy .`
- **Test**: `pytest`
### Build Commands
- **Install**: `uv pip install -e .`
- **Dev Install**: `uv pip install -e .[dev]`
- **Requirements**: `uv pip compile requirements.in`
## Core Principles
### Decision-Making Process
1. **Use component guidance**: Let subtree discovery load relevant context
2. **Apply documented patterns**: Follow loaded component standards
3. **Avoid code inference**: Use documented criteria only
4. **Validate against patterns**: Check implementation against guidance
### Task Complexity Assessment
- **Simple** (< 2 hours): Direct implementation, minimal planning
- **Medium** (2-8 hours): Use TodoWrite for 3-5 tasks
- **Complex** (> 8 hours): Use subagents (2-6) with comprehensive TodoWrite planning
## Documentation Standards
### Required Documentation Updates
For any new implementation, always update:
- **TASKS.md**: Move features from planned to completed status
- **docs/API_REFERENCES.md**: Mark corresponding API sections as implemented
- **docs/DEVELOPMENT.md**: Add new patterns if applicable
- **docs/CONTRIBUTING.md**: Update contribution guidelines if new patterns introduced
### Implementation Standards
- **Use utility functions**: `create_api_payload()`, `query_params()`, `@handle_api_errors`
- **Apply response filtering**: Automatic filtering system in `utils/filters.py` handles all GET requests
- **Follow component patterns**: Apply standards from automatically loaded component CLAUDE.md files
- **Run quality checks**: Format, lint, type check before completion
- **Update relevant documentation**: Maintain consistency across project
### Response Filtering Implementation Context
This MCP server implements audit-safe response filtering with the following constraints:
- **Primary use case**: Audit and compliance scenarios requiring complete data integrity
- **Filtering approach**: Conservative token optimization (5-15% reduction) to preserve audit trails
- **Critical preservation requirements**:
- User accountability data (`created-at`, `updated-at`, `version-id`)
- Security configuration (permissions, auth policies)
- Change tracking (status timestamps, source information)
- Operational context (status, timing, progress indicators)
- **Decision principle**: Preserve fields when audit impact is uncertain
- **Implementation location**: `utils/filters.py` with configurations in `configs/filter_configs.py`
## Task Management
### TodoWrite Usage
Use TodoWrite for tasks with:
- Multiple distinct steps (> 2)
- Multiple file changes
- Cross-component coordination
- Medium to complex scope
### Progress Standards
- Mark todos `in_progress` before starting
- Mark `completed` immediately after finishing
- Update status in real-time
- Provide concise progress updates for major phases
## Memory System Maintenance
### Periodic Review Schedule
- **Monthly**: Review component CLAUDE.md files for outdated patterns
- **After major features**: Update decision matrices and workflow patterns
- **When adding new domains**: Verify subtree discovery optimization
- **Quality check failures**: Review and update development standards
### Maintenance Triggers
- **New implementation patterns**: Update relevant component guidance
- **API changes**: Review model and tool implementation standards
- **Development workflow changes**: Update quality check commands
- **Documentation structure changes**: Update docs/ guidance
- **Filter configuration changes**: When new API fields appear, assess preservation vs. optimization trade-offs
- **Compliance requirement changes**: Review filter configurations if audit use cases evolve
### Memory Optimization Indicators
- **High context usage**: Review for redundant information across files
- **Frequent cross-referencing**: Consider consolidating related guidance
- **Outdated patterns**: Remove deprecated implementation approaches
- **Missing decision criteria**: Add decision matrices for common scenarios
```
--------------------------------------------------------------------------------
/docs/CLAUDE.md:
--------------------------------------------------------------------------------
```markdown
# CLAUDE.md for docs/
This file provides guidance about the documentation structure and standards in this repository.
## Context Activation
This guidance activates when:
- Working in `docs/` directory
- Creating/editing documentation files (*.md)
- Implementing multi-layer documentation structure
- Adding cross-references or updating documentation standards
**Companion directories**: All component directories (tools/, models/, utils/, api/)
## Documentation Architecture
### Core Files
- **DEVELOPMENT.md**: Development standards and comprehensive build/quality guidance
- **CONTRIBUTING.md**: Contributing guidelines and PR process
- **README.md**: Documentation directory overview
- **API_REFERENCES.md**: Terraform Cloud API reference links and implementation status
### Multi-Layer Structure
Documentation follows a consistent 4-layer pattern for each domain:
1. **conversations/**: Real-world usage examples and interaction patterns
2. **models/**: Pydantic model documentation with validation rules and API mappings
3. **tools/**: API tool reference with signatures, parameters, and error scenarios
4. **Code docstrings**: Implementation-level documentation with cross-references
## Documentation Standards
### Core Principles
- **Consistency**: Each domain has documentation across all 4 layers
- **Implementation-focused**: Reference actual code rather than duplicate examples
- **Cross-referenced**: All layers link to related documentation
- **API-aligned**: Include Terraform Cloud API documentation references
### Quality Requirements
- Document input parameters, return types, and error handling
- Organize examples from basic to advanced
- Use consistent anchor names (#lowercase-with-hyphens)
- Maintain valid cross-references with relative paths
## New Tool Documentation Workflow
### Implementation Steps
1. **Model Documentation**: Create `docs/models/{domain}.md` with validation rules and API mappings
2. **Tool Documentation**: Create `docs/tools/{domain}.md` with function signatures and examples
3. **Conversation Examples**: Create `docs/conversations/{domain}-conversation.md` with usage scenarios
4. **Cross-References**: Link all layers bidirectionally
5. **Integration**: Update `docs/README.md` and relevant CLAUDE.md files
### Cross-Reference Requirements
Every new tool must maintain bidirectional links across all 4 layers:
- **Code docstrings** → docs/tools/ sections
- **docs/tools/** ↔ docs/models/ (bidirectional)
- **docs/tools/** → docs/conversations/ examples
- **docs/models/** → tools that use the model
- **docs/conversations/** → specific tool and model sections
### Validation Checklist
- [ ] All markdown links use relative paths
- [ ] Cross-references use consistent anchor names (#lowercase-with-hyphens)
- [ ] Each layer references appropriate related layers
- [ ] All links are valid and accessible
## Development Standards
### Quality Checks
- **Format**: `ruff format .`
- **Lint**: `ruff check .`
- **Type Check**: `mypy .`
- **Test**: `pytest`
### Documentation Quality Standards
- Setup and quality check sequences
- Comprehensive validation process for all documentation layers
- KISS/DRY principles applied to documentation structure
- Error handling patterns documented consistently
### AI Documentation Guidelines
- Maintain consistency with existing documentation patterns
- Reference actual code implementations rather than duplicate examples
- Ensure examples include proper error handling
- Keep examples concise but comprehensive
- Follow cross-reference requirements for all layers
## Implementation Workflow
### New Documentation Creation Process
1. **Core Status Files**: Update TASKS.md and API_REFERENCES.md to reflect implementation progress
2. **Multi-Layer Documentation**: Create docs/models/, docs/tools/, and docs/conversations/ for new domains
3. **Cross-References**: Establish bidirectional links across all 4 layers
4. **Integration**: Update docs/README.md with new documentation sections
5. **Validation**: Test all links and anchor references
### Documentation Quality Checklist
For each new documentation implementation:
- [ ] All 4 documentation layers created (conversations, models, tools, code docstrings)
- [ ] Cross-references established bidirectionally between all layers
- [ ] TASKS.md and API_REFERENCES.md updated to reflect new capabilities
- [ ] All markdown links use relative paths and valid anchors
- [ ] Examples include proper error handling and follow established patterns
- [ ] Documentation follows consistent template structure
---
## Documentation Templates
### Code Docstring Template
```python
"""Tool description with clear usage context.
API endpoint: METHOD /path/to/endpoint
Args:
param_name: Description with format/constraints (e.g., "ws-xxxxxxxx")
Returns:
Description of return structure and key fields
See:
docs/tools/domain.md#tool-name for usage examples
"""
```
### Tool Reference Template
```markdown
### tool_name
**Function:** `tool_name(param1: str, param2: int = 0) -> APIResponse`
**Description:** What it does and when to use it
**Parameters:**
- param1: Description with format requirements
- param2: Description with default value
**Returns:** Return structure explanation
**Models:** [DomainModel](../models/domain.md#domainmodel)
**Examples:** [Usage Scenario](../conversations/domain-conversation.md#scenario-name)
```
### Model Documentation Template
```markdown
### ModelName
**Purpose:** What this model validates
**Used by:** [tool_name](../tools/domain.md#tool-name), [other_tool](../tools/domain.md#other-tool)
**Fields:** Field descriptions and validation rules
**API Mapping:** model_field -> "api-field-name"
```
### Conversation Example Template
```markdown
## Scenario Name
**Tools used:** [tool_name](../tools/domain.md#tool-name)
**Models:** [ModelName](../models/domain.md#modelname)
**Description:** Realistic usage scenario with expected inputs/outputs
```
```
--------------------------------------------------------------------------------
/docs/CONTRIBUTING.md:
--------------------------------------------------------------------------------
```markdown
# Contributing to Terraform Cloud MCP
Thank you for your interest in contributing to the Terraform Cloud MCP project! This document provides guidelines and instructions for contributing to this project.
## Getting Started
1. Fork the repository on GitHub
2. Clone your forked repository locally
3. Set up the development environment as described in the [Development Guide](DEVELOPMENT.md)
## Development Environment
```bash
# Clone the repository
git clone https://github.com/severity1/terraform-cloud-mcp.git
cd terraform-cloud-mcp
# Create virtual environment and activate it
uv venv
source .venv/bin/activate # On Windows: .venv\Scripts\activate
# Install in development mode with development dependencies
uv pip install -e .
uv pip install black mypy pydantic ruff
```
## Code Quality Standards
Before submitting your contribution, please ensure your code meets our quality standards:
1. **Type Checking**: Use proper type hints everywhere
```bash
uv run -m mypy .
```
2. **Linting**: Ensure code follows our style guidelines
```bash
uv run -m ruff check .
```
3. **Formatting**: Format code with Black
```bash
uv run -m black .
```
4. **Tests**: Add tests for new functionality
```bash
uv run -m unittest discover tests
```
## Contribution Guidelines
### Pull Request Process
1. Create a feature branch for your changes
2. Make your changes following the code style guidelines in the [Development Guide](DEVELOPMENT.md)
3. Add or update tests to verify your changes
4. Update documentation, including:
- Function docstrings
- Example files in the appropriate `docs/` subdirectory
- README.md if adding new features or changing APIs
5. Run all quality checks to ensure your code meets our standards
6. Commit your changes with a clear, descriptive commit message
7. Push your branch and create a pull request
### Commit Messages
Write clear, descriptive commit messages that explain *why* the change was made, not just *what* changed. For example:
```
Fix workspace creation validation for execution mode
The workspace creation API was not properly validating the execution
mode and defaulting to 'local' instead of 'remote', causing confusion
for users. This fix ensures defaults match API documentation.
```
## Extending the Server
To add new functionality to the MCP server:
1. Add model classes in the `terraform_cloud_mcp/models` directory:
- Define enums for constrained choices
- Create request models inheriting from `APIRequest`
- Create a `*Params` model for function parameters
- For examples, see:
- `account.py` for simple request models
- `workspaces.py` for comprehensive models with params
- `cost_estimates.py` for models with enums and status tracking
2. Add tool functions in the `terraform_cloud_mcp/tools` directory:
- Accept typed `params` objects instead of `**kwargs`
- Use the `@handle_api_errors` decorator
- Use utility functions from `utils/payload.py` for JSON:API payloads
- Use utility functions from `utils/request.py` for parameters
- Return `APIResponse` type
- For examples, see:
- `account.py` for simple GET operations
- `workspaces.py` for full CRUD operations
- `cost_estimates.py` for specialized retrieval operations
3. Register new tools in `terraform_cloud_mcp/server.py`:
- Add import statements at the top
- Use `mcp.tool()(module_name.function_name)` to register each function
- Group related tools together with comments
4. Follow the Pydantic pattern for parameter validation and error handling
5. Ensure all functions include proper type hints and docstrings
6. Update documentation in the appropriate places (following reference-based documentation approach):
- Add model documentation to `docs/models/` (e.g., `cost_estimate.md`) with model structure, validation rules, and references to actual implementations
- Add tool reference documentation to `docs/tools/` (e.g., `cost_estimate.md`) following the established format:
- Overview section explaining the tool's purpose
- API Reference section with links to Terraform Cloud API documentation
- Tools Reference section with function signatures, parameters, return values, and references to actual implementations
- Notes section for important usage information
- Common Error Scenarios section in table format
- Add conversation examples to `docs/conversations/` (e.g., `cost-estimate-conversation.md`) showing real-world usage patterns with the API
- Update `docs/README.md` to include new functionality
- Update `README.md` to include new functionality
- Update `docs/CONTRIBUTING.md` (this file) to reflect new patterns or processes
- Update `docs/DEVELOPMENT.md` to include new development standards or patterns
7. Update existing integration files:
- Add exports to `models/__init__.py`
- Add imports to `tools/__init__.py`
8. Update `CLAUDE.md` files to document new functionality:
- Update main `CLAUDE.md` if adding major new components
- Update `docs/CLAUDE.md` with changes to documentation structure or standards
- Update component-specific CLAUDE.md files as needed:
- `terraform_cloud_mcp/api/CLAUDE.md` for API client changes
- `terraform_cloud_mcp/models/CLAUDE.md` for model patterns
- `terraform_cloud_mcp/tools/CLAUDE.md` for tool implementation patterns
- `terraform_cloud_mcp/utils/CLAUDE.md` for utility function patterns
- These files should include:
- New patterns introduced
- Additional examples for AI assistance
- Component-specific guidelines
- These files are critical for AI-assisted development and should document any non-obvious patterns
## Release Process
If you are a maintainer with release permissions, follow these steps for releasing a new version:
1. Update version number in:
- `pyproject.toml`
- `README.md` badges
- Create release notes
2. Run quality checks:
- `uv run -m mypy .`
- `uv run -m ruff check .`
- `uv run -m black --check .`
- `uv run -m unittest discover tests`
3. Commit changes with clear message
4. Tag the release with the version number:
- `git tag v0.x.y`
5. Push changes and tags:
- `git push origin main --tags`
## Questions?
If you have any questions or need help, please open an issue on GitHub and we'll be happy to assist you.
Thank you for contributing!
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/tools/CLAUDE.md:
--------------------------------------------------------------------------------
```markdown
# CLAUDE.md for tools/
This file provides guidance for MCP tool implementations that expose Terraform Cloud API functionality to AI assistants.
## Context Activation
This guidance activates when:
- Working in `terraform_cloud_mcp/tools/` directory
- Reading/editing tool implementation files (*.py)
- Implementing MCP functions for Terraform Cloud API
- Adding new tool domains or expanding existing tools
**Companion directories**: models/ (for validation), utils/ (for utilities), api/ (for client)
## Tool Architecture
### Directory Structure
- **__init__.py**: Tool registration and imports
- **Domain modules**: Account, workspaces, runs, plans, applies, organizations, projects, cost estimates, assessment results
### Implementation Standards
- **Consistent patterns**: All tools follow standardized implementation patterns
- **Error handling**: Use @handle_api_errors decorator for consistent error management
- **Async patterns**: All API functions are async using httpx
- **Parameter validation**: Use Pydantic models for input validation
## Tool Organization
### Operation Categories
- **CRUD**: create_*, get_*, update_*, delete_* operations
- **List**: list_* operations with filtering and pagination
- **Actions**: lock_*, unlock_*, apply_*, cancel_* state changes
- **Specialized**: Domain-specific operations
### File Organization Rules
- **Add to existing**: Tool fits domain and file has < 15 functions
- **Create new file**: New domain OR existing file ≥ 15 functions
- **Split file**: When file exceeds 20 functions, split by logical sub-domains
- **Domain boundaries**: Create new domain for ≥ 5 conceptually distinct tools
- **Naming**: Use singular form matching API domain (e.g., workspace.py)
### Registration Classification
- **Non-destructive**: get_*, list_*, create_*, update_* (basic registration)
- **Destructive**: delete_*, force_*, *_unlock affecting running processes (conditional)
- **Potentially destructive**: cancel_*, discard_* operations (case-by-case)
## Decision Matrices
### When to Create New File vs Add to Existing
| Scenario | New File | Add to Existing |
|----------|----------|-----------------|
| New API domain (≥5 conceptually distinct tools) | ✅ | ❌ |
| Existing file has ≥15 functions | ✅ | ❌ |
| Tool fits existing domain + file <15 functions | ❌ | ✅ |
| Existing file will exceed 20 functions | ✅ Split by sub-domains | ❌ |
### Tool Registration Decision Matrix
| Operation Type | Examples | Registration | Reason |
|----------------|----------|-------------|---------|
| Non-destructive | get_*, list_*, create_*, update_* | Basic | Safe operations |
| Destructive | delete_*, force_*, *_unlock | Conditional | Affects running processes |
| Potentially destructive | cancel_*, discard_* | Case-by-case | Context-dependent impact |
### Function Signature Pattern Decision
| Parameter Count | Structure | Example |
|-----------------|-----------|---------|
| 1-2 required | Direct parameters | `get_workspace(workspace_id)` |
| 3+ required | Routing + individual + params | `create_workspace(org, name, params)` |
| 5+ optional | Use params object | `update_workspace(org, name, params)` |
| <5 optional | Direct parameters | `list_workspaces(org, search, page)` |
## Implementation Requirements
### Essential Patterns
1. Use @handle_api_errors decorator for consistent error handling
2. Create corresponding Pydantic models for validation
3. Follow function signature pattern: (required_routing_params, optional_individual_params, optional_params_object)
4. Use utility functions for payload creation and parameter handling:
- `create_api_payload()` for JSON:API compliant payload creation
- `query_params()` for transforming Pydantic models to API parameters
5. Document thoroughly with API endpoint references
6. Register appropriately in server.py based on destructiveness
### Function Signature Patterns
Tool functions follow a consistent parameter structure. See `variables.py:create_workspace_variable` for the standard pattern.
**Parameter Order:**
1. **Required routing parameters** (workspace_id, organization, etc.)
2. **Required API parameters** (key, category, name, etc.)
3. **Optional params object** for additional configuration
### Query Parameter Pattern
For list operations with filtering/pagination, use `query_params()` utility. See `workspaces.py:list_workspaces` for the standard pattern of transforming request models to API parameters.
### Documentation Integration
- Reference API_REFERENCES.md for official API groupings
- Create usage examples in docs/tools/
- Link to model definitions and conversation examples
## Development Standards
### Quality Checks
- **Format**: `ruff format .`
- **Lint**: `ruff check .`
- **Type Check**: `mypy .`
- **Test**: `pytest`
### Code Style Requirements
- Use @handle_api_errors decorator for all API functions
- Follow (required_routing_params, optional_individual_params, optional_params_object) signature pattern
- Apply async patterns with httpx for all API calls
- Use Pydantic models for input validation
- Apply comprehensive testing: Happy path → Edge cases → Error cases → Integration
### Model Integration
When working with models:
- Create corresponding Pydantic models for all tool parameters
- Use model validation for input parameters
- Follow field naming conventions (snake_case for Python, kebab-case for API)
- Apply proper type hints and validation rules
### Utility Function Usage
Essential utility functions for tool implementation:
- `create_api_payload()`: For JSON:API compliant payload creation
- `query_params()`: For transforming Pydantic models to API parameters
- `@handle_api_errors`: Decorator for consistent error handling
- Request helpers: Authentication, pagination, filtering
## Implementation Workflow
### New Tool Development Process
1. **Define function signature**: Follow parameter order pattern
2. **Create Pydantic models**: For validation and type safety
3. **Implement core logic**: Using utility functions and decorators
4. **Add error handling**: Apply @handle_api_errors decorator
5. **Register in server.py**: Based on destructiveness classification
6. **Test thoroughly**: Cover happy path, edge cases, and error conditions
7. **Update documentation**: TASKS.md, API_REFERENCES.md status updates
### Quality Validation Checklist
For each tool implementation:
- [ ] Function follows Essential Patterns (decorator, models, utilities)
- [ ] Pydantic models created and validated
- [ ] Tool registered in server.py with appropriate classification
- [ ] Quality checks passed: format, lint, type check
- [ ] Documentation updated: implementation status tracking
- [ ] Tests cover all scenarios: success, edge cases, errors
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/models/CLAUDE.md:
--------------------------------------------------------------------------------
```markdown
# CLAUDE.md for models/
This file provides guidance about the Pydantic data models for Terraform Cloud API request validation.
## Context Activation
This guidance activates when:
- Working in `terraform_cloud_mcp/models/` directory
- Creating/editing Pydantic model files (*.py)
- Implementing request validation models
- Adding field validation or model hierarchies
**Companion directories**: tools/ (for usage), utils/ (for integration), api/ (for requests)
## Model Architecture
The models directory provides request validation through Pydantic models with:
- **Input validation**: API request parameter validation and type safety
- **Field mapping**: Aliases for kebab-case to snake_case API compatibility
- **Response typing**: All responses typed as `APIResponse = Dict[str, Any]` (not validated)
### Core Components
- **base.py**: `APIRequest` base class, `BaseModelConfig`, and `APIResponse` type alias
- **Domain modules**: Account, workspaces, runs, plans, applies, organizations, projects, cost estimates, assessment results
### Model Categories
- **Request models**: Validate API request parameters with field aliases and direct parameter fields
- **Parameter models**: Simplified configuration objects used only for operations with complex parameter sets
- **Enums**: Constrained choices based on API documentation
## Implementation Standards
### Model Patterns
- **Request models**: Extend `APIRequest`, include required fields directly and optional fields as individual parameters
- **Parameter models**: Create `*Params` objects only when operations have complex parameter sets requiring separation
- **Direct parameter approach**: Most models now include optional fields directly rather than nested in `params` objects
- **Base models**: Create `Base*Request` when ≥3 operations share >50% of fields
- **Enums**: Use for fields with API-documented constrained choices
### Implementation Approach
Request models include all parameters directly. See `variables.py:WorkspaceVariableCreateRequest` for the direct parameter pattern where optional fields are included in the model rather than nested in params objects.
### Field Standards
- **Required**: `Field(...)` with no default (follows API documentation)
- **Optional**: `Optional[Type]` with appropriate default values
- **Aliases**: Always use `alias="kebab-case-name"` for API compatibility
- **Validation**: Apply Field constraints when API docs specify limits
## Decision Criteria
### Model Creation Rules
- **BaseRequest**: When ≥3 operations share >50% of fields
- **Params objects**: Use sparingly - only when parameter sets are complex and need separation from routing parameters
- **Direct parameters**: Prefer including optional fields directly in request models rather than nested params
- **Separate Create/Update**: When models differ by >2 fields or validation rules
- **Enums**: When API docs specify constrained values (not examples)
## Decision Matrices
### Model Structure Decision Matrix
| Scenario | Base Model | Separate Models | Direct Parameters |
|----------|------------|-----------------|-------------------|
| ≥3 operations share >50% fields | ✅ | ❌ | ❌ |
| Operations differ by >2 fields | ❌ | ✅ | ❌ |
| <5 optional parameters | ❌ | ❌ | ✅ |
| Complex parameter validation needed | ❌ | Use params object | ❌ |
### Validation Strategy Decision Matrix
| Field Type | Validation Approach | Example |
|------------|---------------------|---------|
| API-documented constraints | Apply Field constraints | `maxLength=128` |
| Optional with API default | Use API default value | `default="latest"` |
| Truly optional in API | Use `None` default | `default=None` |
| Constrained choices in API docs | Create Enum | `ExecutionMode` |
### Parameter Object vs Direct Fields
| Criteria | Use Params Object | Use Direct Fields |
|----------|-------------------|-------------------|
| >5 optional parameters | ✅ | ❌ |
| Complex validation rules | ✅ | ❌ |
| Reuse across operations | ✅ | ❌ |
| Simple operation-specific | ❌ | ✅ |
### When to Use Params Objects vs Direct Parameters
**Use Params Objects When:**
- Tool function has >5 optional parameters requiring complex validation
- Parameters need reuse across multiple similar operations
- Separation of concerns between routing and configuration parameters is critical
**Use Direct Parameters When (Preferred):**
- Model has <5 optional parameters
- Parameters are operation-specific
- Simpler model structure improves maintainability
### Field Validation Rules
- **Validation constraints**: Apply when API docs specify limits (length, range, format)
- **Default values**: Use API defaults; None only when field truly optional in API
- **Required vs Optional**: Follow API documentation exactly
- **Field organization**: Group by domain, clear inheritance with APIRequest
## Development Standards
### Quality Checks
- **Format**: `ruff format .`
- **Lint**: `ruff check .`
- **Type Check**: `mypy .`
- **Test**: `pytest`
### Model-Specific Requirements
- All fields must have proper type hints and API-based validation
- Use `BaseModelConfig` and `APIRequest` for consistency
- Document all field aliases and constraint reasoning
- Apply comprehensive testing for validation rules
### Development Workflow
1. **Pattern implementation**: Follow model patterns and base classes
2. **API validation**: Ensure field alignment with API documentation
3. **Quality checks**: Format, lint, type check validation
4. **Alias mapping**: Verify kebab-case API compatibility
## Tool Integration
### Using Models in Tools
- Import models for parameter validation in tool functions
- Apply model validation to ensure type safety
- Use model instances for payload creation
- Leverage field aliases for API compatibility
### Payload Creation Integration
When working with utilities:
- Use `create_api_payload()` with model instances
- Apply `query_params()` for transforming model data to API parameters
- Handle model validation errors appropriately
- Ensure proper error handling for invalid inputs
### API Client Integration
When working with API client:
- Use models for request parameter validation
- Apply model serialization for API requests
- Handle validation errors before API calls
- Ensure proper type safety throughout request pipeline
## Implementation Workflow
### New Model Development Process
1. **Define model structure**: Extend `APIRequest` base class
2. **Add field validation**: Follow API documentation requirements
3. **Apply aliases**: Use kebab-case aliases for API compatibility
4. **Create enums**: For API-documented constrained choices
5. **Test validation**: Cover valid inputs, invalid inputs, edge cases
6. **Update documentation**: Implementation status tracking
### Quality Validation Checklist
For each model implementation:
- [ ] Model extends `APIRequest` base class with appropriate field aliases
- [ ] Field validation follows API documentation requirements exactly
- [ ] Parameter models created when needed for complex operations
- [ ] Enums defined for API-documented constrained choices
- [ ] Quality checks passed: format, lint, type check
- [ ] Documentation updated: implementation status tracking
- [ ] Tests cover validation scenarios: valid, invalid, edge cases
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/utils/CLAUDE.md:
--------------------------------------------------------------------------------
```markdown
# CLAUDE.md for utils/
This file provides guidance about shared utility functions for consistent operations across the Terraform Cloud MCP implementation.
## Context Activation
This guidance activates when:
- Working in `terraform_cloud_mcp/utils/` directory
- Creating/editing utility functions (*.py)
- Implementing shared functionality (decorators, helpers, formatters)
- Adding error handling or payload creation utilities
**Companion directories**: tools/ (for usage), models/ (for integration), api/ (for requests)
## Utility Architecture
The utils directory provides common functionality to maintain consistency and reduce code duplication:
### Core Utilities
- **decorators.py**: `handle_api_errors` decorator for consistent error handling
- **env.py**: Centralized environment variable management (tokens, feature flags)
- **payload.py**: JSON:API payload creation and relationship management
- **request.py**: Query parameter transformation for API requests
- **filters.py**: Response filtering system for token optimization
## Implementation Standards
### Error Handling Pattern
- **@handle_api_errors decorator**: Apply to all API functions for consistent error formatting
- **Error format**: `{"error": "message"}` (never expose sensitive data)
- **Success preservation**: Decorator preserves successful responses without modification
### Environment Management
- **get_tfc_token()**: Centralized TFC_TOKEN access (never use direct os.getenv)
- **should_enable_delete_tools()**: Safety control for destructive operations
- **Centralized access**: All environment variables managed through utils/env.py
### Payload Creation
- **create_api_payload()**: JSON:API compliant payload creation from Pydantic models
- **add_relationship()**: Standardized relationship management for JSON:API
- **query_params()**: Pydantic model to API parameter transformation for list/filter operations
### Query Parameter Transformation
The `query_params()` function transforms Pydantic model fields to API parameters using consistent naming conventions:
- **Pagination**: `page_number` → `page[number]`, `page_size` → `page[size]`
- **Filters**: `filter_name` → `filter[name]`, `filter_permissions_update` → `filter[permissions][update]`
- **Search**: `search_term` → `search[term]`, `search_user` → `search[user]`
- **Query**: `query_email` → `q[email]`, `query_name` → `q[name]`
- **Direct params**: `q`, `search`, `sort` mapped directly
Usage pattern for all list operations: See `../tools/workspaces.py:list_workspaces` for the standard query parameter transformation approach.
### Parameter Model Integration
Functions use direct parameters with optional params objects. See `../tools/variables.py:create_workspace_variable` for the pattern of combining direct and optional parameters with request models.
## Usage Standards
### Core Requirements
- **Error handling**: Always use `@handle_api_errors` for API functions (never duplicate logic)
- **Environment access**: Use utility functions instead of direct `os.getenv()` calls
- **Payload creation**: Use `create_api_payload()` and `add_relationship()` for JSON:API operations
- **Parameter handling**: Use `query_params()` for all list/filter operations
- **Response filtering**: Review filter configurations when implementing new tools - check for new fields to filter
### Validated Audit-Safe Filtering System
**✅ FULLY TESTED AND VERIFIED**: All 7 tool categories validated for 100% audit compliance:
- **User accountability**: `created-at`, `updated-at`, `version-id` always preserved
- **Security configuration**: All permission and auth fields preserved
- **Change tracking**: Status timestamps, source tracking, version data preserved
- **Operational context**: Status, timing, progress data for monitoring tools
- **Decision context**: Cost data, assessments, diagnostics for analysis tools
- **Conservative 5-15% token reduction**: Balanced optimization vs. complete audit capability
- **Audit-first principle**: When in doubt, preserve the field rather than filter it
### Development Guidelines
- **New utilities**: Place in appropriate module, document with type hints and examples
- **Consistency**: Maintain established patterns, consistent return types and error formats
- **Reusability**: Write utilities to be domain-agnostic and broadly applicable
## Error Handling Decision Matrix
### When to Use Decorator Only
- Standard CRUD operations with predictable API responses (200, 201, 204, 4xx, 5xx)
- Most list and get operations without complex parameter validation
- Tools that don't require business logic validation before API calls
### When to Add Custom Logic (WITH Decorator)
- Parameter validation requiring specific error messages beyond Pydantic
- API responses needing special status code interpretation
- Multi-step operations with intermediate error handling needs
- File operations requiring size/format validation
### Error Response Standards
- **Format**: `{"error": "descriptive message"}` (never expose sensitive data)
- **Success**: 200/201 return raw API response; 204 returns `{"status": "success", "status_code": 204}`
- **Consistency**: Always preserve @handle_api_errors decorator even with custom logic
## Development Standards
### Quality Checks
- **Format**: `ruff format .`
- **Lint**: `ruff check .`
- **Type Check**: `mypy .`
- **Test**: `pytest`
### Utility-Specific Requirements
- All functions must include proper error handling and consistent response format
- Apply security guidelines for sensitive data redaction
- Follow established patterns for error decoration and type safety
- Test with comprehensive quality check sequence after utility changes
### Code Style Requirements
- Type hints required for all parameters and return values
- Apply async patterns where appropriate
- Follow security practices for sensitive data handling
- Maintain consistent naming conventions
## Integration Guidelines
### Tool Integration
When utilities are used in tools:
- Apply `@handle_api_errors` decorator to all API functions
- Use `create_api_payload()` for JSON:API compliant requests
- Apply `query_params()` for all list/filter operations
- Use centralized environment variable access
### Model Integration
When utilities work with models:
- Use `create_api_payload()` with Pydantic model instances
- Apply `query_params()` for transforming model data to API parameters
- Handle model validation errors appropriately
- Ensure proper type safety throughout
### API Client Integration
When utilities work with API client:
- Use utilities for consistent request formatting
- Apply error handling decorators
- Use centralized token management
- Ensure proper response handling
## Implementation Workflow
### New Utility Development Process
1. **Define function signature**: Include proper type hints
2. **Implement core logic**: Follow established patterns
3. **Add error handling**: Apply consistent response formats
4. **Test thoroughly**: Cover success, error, and edge cases
5. **Document function**: Include usage examples and cross-references
6. **Update status**: Implementation tracking
### Quality Validation Checklist
For each utility implementation:
- [ ] Function includes proper error handling and consistent response format
- [ ] Type hints provided for all parameters and return values
- [ ] Documentation includes usage examples and patterns
- [ ] Security guidelines followed for sensitive data handling
- [ ] Quality checks passed: format, lint, type check
- [ ] Tests cover all scenarios: success, error, edge cases
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/api/__init__.py:
--------------------------------------------------------------------------------
```python
"""API client module for Terraform Cloud MCP"""
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/configs/__init__.py:
--------------------------------------------------------------------------------
```python
"""Configuration modules for Terraform Cloud MCP."""
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/__init__.py:
--------------------------------------------------------------------------------
```python
"""Terraform Cloud MCP - A MCP server for Terraform Cloud"""
from .server import main
import importlib.metadata
__version__ = importlib.metadata.version("terraform-cloud-mcp")
__all__ = ["main"]
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/utils/__init__.py:
--------------------------------------------------------------------------------
```python
"""Utility functions for Terraform Cloud MCP"""
from .payload import create_api_payload, add_relationship
from .request import query_params
__all__ = ["create_api_payload", "add_relationship", "query_params"]
```
--------------------------------------------------------------------------------
/mypy.ini:
--------------------------------------------------------------------------------
```
[mypy]
#disable_error_code = no-any-return
python_version = 3.12
warn_return_any = True
warn_unused_configs = True
ignore_missing_imports = True
namespace_packages = True
explicit_package_bases = True
# These modules will be strictly typed
[mypy.api.*]
disallow_untyped_defs = True
[mypy.models.*]
disallow_untyped_defs = True
[mypy.tools.*]
disallow_untyped_defs = True
[mypy.utils.*]
disallow_untyped_defs = True
[mypy.server]
disallow_untyped_defs = True
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/tools/__init__.py:
--------------------------------------------------------------------------------
```python
"""MCP tools for Terraform Cloud"""
# Import tools for easier access
from . import account
from . import applies
from . import assessment_results
from . import cost_estimates
from . import organizations
from . import plans
from . import projects
from . import runs
from . import state_versions
from . import state_version_outputs
from . import variables
from . import workspaces
__all__ = [
"account",
"applies",
"assessment_results",
"cost_estimates",
"organizations",
"plans",
"projects",
"runs",
"state_versions",
"state_version_outputs",
"variables",
"workspaces",
]
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/utils/decorators.py:
--------------------------------------------------------------------------------
```python
"""Decorators and utility functions for Terraform Cloud MCP"""
from functools import wraps
from typing import Callable, Any, Dict, Awaitable, cast
def handle_api_errors(
func: Callable[..., Awaitable[Dict[str, Any]]],
) -> Callable[..., Awaitable[Dict[str, Any]]]:
"""Decorator to handle API errors consistently."""
@wraps(func)
async def wrapper(*args, **kwargs) -> Dict[str, Any]:
try:
result = await func(*args, **kwargs)
# Cast ensures type safety when func might return subclass of Dict
return cast(Dict[str, Any], result)
except ValueError as e:
return {"error": str(e)}
return wrapper
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/models/account.py:
--------------------------------------------------------------------------------
```python
"""Account models for Terraform Cloud API
This module contains models for Terraform Cloud account-related requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/account
"""
from .base import APIRequest
class AccountDetailsRequest(APIRequest):
"""Request model for getting account details.
This model is used for the GET /account/details endpoint which requires no parameters.
The endpoint returns information about the currently authenticated user or service account.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/account#show-account-details
Note:
For team and organization tokens, this endpoint returns information about
a synthetic "service user" account associated with the token.
See:
docs/models/account.md for reference
"""
pass # No parameters needed for this endpoint
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/utils/env.py:
--------------------------------------------------------------------------------
```python
"""Environment variable management for Terraform Cloud MCP"""
import os
from typing import Optional
def get_tfc_token() -> Optional[str]:
"""Get Terraform Cloud API token from environment."""
return os.getenv("TFC_TOKEN")
def get_tfc_address() -> str:
"""Get Terraform Cloud/Enterprise address from environment, with default of app.terraform.io."""
return os.getenv("TFC_ADDRESS", "https://app.terraform.io")
def should_enable_delete_tools() -> bool:
"""Check if delete tools should be enabled."""
env_value = os.getenv("ENABLE_DELETE_TOOLS", "false").lower().strip()
return env_value in ("true", "1", "yes", "on")
def should_return_raw_response() -> bool:
"""Check if raw API responses should be returned instead of filtered responses."""
env_value = os.getenv("ENABLE_RAW_RESPONSE", "false").lower().strip()
return env_value in ("true", "1", "yes", "on")
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/utils/payload.py:
--------------------------------------------------------------------------------
```python
"""JSON:API payload utilities for Terraform Cloud MCP."""
from typing import Dict, Optional, Set, Any
from pydantic import BaseModel
def create_api_payload(
resource_type: str,
model: BaseModel,
exclude_fields: Optional[Set[str]] = None,
) -> Dict[str, Any]:
"""Creates a JSON:API compliant payload from a Pydantic model."""
attributes = model.model_dump(
by_alias=True,
exclude=exclude_fields or set(),
exclude_none=True,
)
return {"data": {"type": resource_type, "attributes": attributes}}
def add_relationship(
payload: Dict[str, Any], relation_name: str, resource_type: str, resource_id: str
) -> Dict[str, Any]:
"""Adds a relationship to a JSON:API payload."""
if "relationships" not in payload["data"]:
payload["data"]["relationships"] = {}
payload["data"]["relationships"][relation_name] = {
"data": {"type": resource_type, "id": resource_id}
}
return payload
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/tools/account.py:
--------------------------------------------------------------------------------
```python
"""Account management tools for Terraform Cloud API
This module implements the /account endpoints of the Terraform Cloud API.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/account
"""
from ..api.client import api_request
from ..utils.decorators import handle_api_errors
from ..models.base import APIResponse
@handle_api_errors
async def get_account_details() -> APIResponse:
"""Get account details for a Terraform Cloud API token
This endpoint shows information about the currently authenticated user or service account,
useful for verifying identity, retrieving email address, and checking authentication status.
It returns the same type of object as the Users API, but also includes an email address,
which is hidden when viewing info about other users.
API endpoint: GET /account/details
Returns:
Raw API response with account information from Terraform Cloud
including user ID, username, email address, and authentication status
See:
docs/tools/account.md for reference documentation
"""
return await api_request("account/details")
```
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
```toml
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "terraform-cloud-mcp"
version = "0.8.17"
description = "A Model Context Protocol (MCP) server that integrates Claude with the Terraform Cloud API, allowing Claude to manage your Terraform infrastructure through natural conversation."
readme = "README.md"
requires-python = ">=3.12"
dependencies = [
"dotenv>=0.9.9",
"httpx>=0.28.1",
"fastmcp>=2.9.1",
]
packages = ["terraform_cloud_mcp", "api", "models", "tools", "utils"]
[project.scripts]
terraform-cloud-mcp = "terraform_cloud_mcp.server:main"
[tool.hatch.metadata]
allow-direct-references = true
[tool.mypy]
python_version = "3.12"
warn_return_any = true
warn_unused_configs = true
disallow_untyped_defs = true
disallow_incomplete_defs = true
check_untyped_defs = true
disallow_untyped_decorators = true
no_implicit_optional = true
strict_optional = true
namespace_packages = true
explicit_package_bases = true
ignore_missing_imports = true
# Focus only on individual modules
[[tool.mypy.overrides]]
module = ["api.*", "models.*", "tools.*", "utils.*"]
disallow_untyped_defs = true
[[tool.mypy.overrides]]
module = ["server"]
disallow_untyped_defs = true
[[tool.mypy.overrides]]
module = ["mcp.*", "dotenv.*", "httpx.*"]
ignore_missing_imports = true
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/tools/cost_estimates.py:
--------------------------------------------------------------------------------
```python
"""Terraform Cloud cost estimates management tools.
This module provides tools for working with cost estimates in Terraform Cloud.
It includes functions to retrieve cost estimate details.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/cost-estimates
"""
from ..api.client import api_request
from ..models.base import APIResponse
from ..models.cost_estimates import CostEstimateRequest
from ..utils.decorators import handle_api_errors
@handle_api_errors
async def get_cost_estimate_details(cost_estimate_id: str) -> APIResponse:
"""Get details for a specific cost estimate.
Retrieves comprehensive information about a cost estimate including its current status,
resource counts, monthly cost estimations, and relationship to other resources.
API endpoint: GET /cost-estimates/{cost_estimate_id}
Args:
cost_estimate_id: The ID of the cost estimate to retrieve details for (format: "ce-xxxxxxxx")
Returns:
Cost estimate details including status, timestamps, resource counts,
and monthly cost estimations
Note:
There is no endpoint to list cost estimates. You can find the ID for a cost estimate
in the `relationships.cost-estimate` property of a run object.
See:
docs/tools/cost_estimate.md for reference documentation
"""
# Validate parameters
params = CostEstimateRequest(cost_estimate_id=cost_estimate_id)
# Make API request
return await api_request(f"cost-estimates/{params.cost_estimate_id}")
```
--------------------------------------------------------------------------------
/docs/tools/account.md:
--------------------------------------------------------------------------------
```markdown
# Account Tools
This module provides tools for retrieving information about the currently authenticated Terraform Cloud account.
## Overview
Account tools allow you to access details about the currently authenticated user or service account. This is useful for retrieving account information, verifying authentication status, and checking permissions.
## API Reference
These tools interact with the Terraform Cloud Account API:
- [Account API Documentation](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/account)
## Tools Reference
### get_account_details
**Function:** `get_account_details() -> Dict[str, Any]`
**Description:** Retrieves information about the currently authenticated user or service account.
**Parameters:** None - uses the authentication from the API token environment variable.
**Returns:** JSON response containing comprehensive account details including:
- User ID and username
- Email address
- Two-factor authentication status
- Service account status
- User permissions and attributes
**Notes:**
- This endpoint returns additional information (email address) beyond what is returned by the standard Users API
- Useful for determining if you're authenticated as a user or a service account
- Can be used to verify 2FA compliance and permissions
- Returns the same type of object as the Users API, but with additional information
**Common Error Scenarios:**
| Error | Cause | Solution |
|-------|-------|----------|
| 401 | Invalid or expired API token | Generate a new API token in Terraform Cloud settings |
| 403 | Token lacks sufficient permissions | Ensure the token has proper access rights |
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/models/state_version_outputs.py:
--------------------------------------------------------------------------------
```python
"""State version output models for Terraform Cloud API
This module contains models for Terraform Cloud state version output-related requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/state-version-outputs
"""
from typing import Optional
from pydantic import Field
from .base import APIRequest
class StateVersionOutputListRequest(APIRequest):
"""Request parameters for listing state version outputs.
Defines the parameters for the state version outputs listing API including pagination.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/state-version-outputs#list-state-version-outputs
See:
docs/models/state_version_outputs.md for reference
"""
state_version_id: str = Field(
...,
description="The ID of the state version to list outputs for",
pattern=r"^sv-[a-zA-Z0-9]{16}$", # Standard state version ID pattern
)
page_number: Optional[int] = Field(
1,
ge=1,
description="Page number to fetch",
)
page_size: Optional[int] = Field(
20,
ge=1,
le=100,
description="Number of results per page",
)
class StateVersionOutputRequest(APIRequest):
"""Request model for retrieving a specific state version output.
Used to validate the state version output ID parameter for API requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/state-version-outputs#show-a-state-version-output
See:
docs/models/state_version_outputs.md for reference
"""
state_version_output_id: str = Field(
...,
description="The ID of the state version output to retrieve",
pattern=r"^wsout-[a-zA-Z0-9]{16}$", # Standard state version output ID pattern
)
# Response handling is implemented through raw dictionaries
```
--------------------------------------------------------------------------------
/docs/tools/cost_estimate.md:
--------------------------------------------------------------------------------
```markdown
# Cost Estimate Tools
The cost estimate tools module provides functions for working with cost estimates in Terraform Cloud.
## Overview
Cost estimates provide insights into the financial impact of infrastructure changes, allowing you to understand the cost implications of your Terraform changes before applying them. These tools allow you to retrieve and analyze cost estimate details.
## API Reference
These tools interact with the Terraform Cloud Cost Estimation API:
- [Cost Estimation API Documentation](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/cost-estimates)
- [Cost Estimation Concepts](https://developer.hashicorp.com/terraform/cloud-docs/cost-estimation)
## Tools Reference
### get_cost_estimate_details
**Function:** `get_cost_estimate_details(cost_estimate_id: str) -> Dict[str, Any]`
**Description:** Retrieves detailed information about a specific cost estimate by ID.
**Parameters:**
- `cost_estimate_id` (str): The ID of the cost estimate (format: "ce-xxxxxxxx")
**Returns:** JSON response containing comprehensive cost estimate details including:
- Status (pending, queued, running, errored, canceled, finished, unreachable)
- Cost projections (prior monthly cost, proposed monthly cost, delta monthly cost)
- Resource counts (total, matched, unmatched resources)
- Execution timestamps
- Related workspaces and runs
**Notes:**
- There is no endpoint to list cost estimates directly
- Cost estimate IDs are typically found in the `relationships.cost-estimate` property of run objects
- Requires "read" permission for the associated workspace
- You can extract the cost estimate ID from a run by accessing:
```
relationships = run_result.get("data", {}).get("relationships", {})
cost_estimate_data = relationships.get("cost-estimate", {}).get("data", {})
cost_estimate_id = cost_estimate_data.get("id")
```
**Common Error Scenarios:**
| Error | Cause | Solution |
|-------|-------|----------|
| 404 | Cost estimate not found | Verify the ID exists and you have proper permissions |
| 422 | Invalid cost estimate ID format | Ensure the ID matches pattern "ce-xxxxxxxx" |
| 403 | Insufficient permissions | Verify your API token has proper access |
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/models/assessment_results.py:
--------------------------------------------------------------------------------
```python
"""Assessment result models for Terraform Cloud API.
This module contains models for Terraform Cloud assessment result-related requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/assessment-results
"""
from enum import Enum
from pydantic import Field
from .base import APIRequest
class AssessmentResultStatus(str, Enum):
"""Status options for assessment results in Terraform Cloud.
Defines the various states an assessment result can be in during its lifecycle:
- PENDING: Assessment has not yet started
- QUEUED: Assessment is queued for execution
- RUNNING: Assessment is currently running
- ERRORED: Assessment has encountered an error
- CANCELED: Assessment was canceled
- FINISHED: Assessment has completed successfully
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/assessment-results
See:
docs/models/assessment_result.md for reference
"""
PENDING = "pending"
QUEUED = "queued"
RUNNING = "running"
ERRORED = "errored"
CANCELED = "canceled"
FINISHED = "finished"
class AssessmentResultRequest(APIRequest):
"""Request model for retrieving assessment result details.
Used to validate the assessment result ID parameter for API requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/assessment-results#show-assessment-result
See:
docs/models/assessment_result.md for reference
"""
assessment_result_id: str = Field(
...,
# No alias needed as field name matches API parameter
description="The ID of the assessment result to retrieve",
pattern=r"^asmtres-[a-zA-Z0-9]{8,}$", # Standard assessment result ID pattern
)
class AssessmentOutputRequest(AssessmentResultRequest):
"""Request model for retrieving assessment result outputs.
Extends the base AssessmentResultRequest for specialized outputs like
JSON plan, schema, and log output.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/assessment-results#retrieve-the-json-output-from-the-assessment-execution
See:
docs/models/assessment_result.md for reference
"""
pass # Uses the same validation as the parent class
# Response handling is implemented through raw dictionaries
```
--------------------------------------------------------------------------------
/docs/conversations/account.md:
--------------------------------------------------------------------------------
```markdown
# Terraform Cloud Account Management Conversation Examples
This document shows example conversations between a user and Claude when using the account management tools.
## Getting Account Details
```
User: What's my account information in Terraform Cloud?
Claude: I'll check your account details in Terraform Cloud.
[Claude uses get_account_details tool]
Based on your Terraform Cloud account details:
Your username is "jsmith"
Your email is "[email protected]"
You have a "regular" user account (not a service account)
Your account was created on February 12, 2023
You have two-factor authentication enabled
You're associated with the "acme-corp" organization
Would you like to see more specific information about your account?
```
## Diagnosing Authentication Issues
```
User: I can't seem to access any workspaces. What's wrong?
Claude: Let me check your account details to see if there might be an authentication issue.
[Claude uses get_account_details tool]
I've checked your Terraform Cloud account, and it looks like your token doesn't have access to any workspaces. This could be due to one of these issues:
1. Your token has limited permissions - it may be scoped only to certain resources
2. You might not be a member of any organizations or teams that have workspace access
3. Your token might have expired or been revoked
Your token identifies as belonging to a service account, not a regular user account. Service accounts often have more restricted permissions.
Would you like me to help you:
1. Check which organizations you have access to?
2. Verify if your token has the correct permissions?
3. Get instructions for creating a new token with broader access?
```
## Features and Enterprise Plan Information
```
User: What features does my Terraform Cloud account have?
Claude: I'll check what features your Terraform Cloud account has access to.
[Claude uses get_account_details tool]
Based on your Terraform Cloud account information:
You're on the Business tier plan with the following features:
- Private module registry
- Team management
- Sentinel policy enforcement
- Run tasks
- Agents for private infrastructure
- Cost estimation
- SSO with SAML
Your organization ("hashicorp-demo") has the following limits:
- Up to 500 users
- Unlimited private modules
- Unlimited workspaces
- Up to 50 concurrent runs
Is there a specific feature you're interested in learning more about?
```
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/models/cost_estimates.py:
--------------------------------------------------------------------------------
```python
"""Cost estimate models for Terraform Cloud API
This module contains models for Terraform Cloud cost estimate-related requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/cost-estimates
"""
from enum import Enum
from typing import Optional
from pydantic import Field
from .base import APIRequest
class CostEstimateStatus(str, Enum):
"""Status options for cost estimates in Terraform Cloud.
Defines the various states a cost estimate can be in during its lifecycle:
- PENDING: Cost estimate has not yet started
- QUEUED: Cost estimate is queued for execution
- RUNNING: Cost estimate is currently running
- ERRORED: Cost estimate has encountered an error
- CANCELED: Cost estimate was canceled
- FINISHED: Cost estimate has completed successfully
- UNREACHABLE: Cost estimate is in an unreachable state
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/cost-estimates
See:
docs/models/cost_estimate.md for reference
"""
PENDING = "pending"
QUEUED = "queued"
RUNNING = "running"
ERRORED = "errored"
CANCELED = "canceled"
FINISHED = "finished"
UNREACHABLE = "unreachable"
class StatusTimestamps(APIRequest):
"""Model for cost estimate execution timestamps.
Captures the timestamps for various stages in a cost estimate's lifecycle.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/cost-estimates
See:
docs/models/cost_estimate.md for reference
"""
queued_at: Optional[str] = Field(
None,
alias="queued-at",
description="When the cost estimate was queued",
)
finished_at: Optional[str] = Field(
None,
alias="finished-at",
description="When the cost estimate execution completed",
)
class CostEstimateRequest(APIRequest):
"""Request model for retrieving a cost estimate.
Used to validate the cost estimate ID parameter for API requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/cost-estimates#show-a-cost-estimate
See:
docs/models/cost_estimate.md for reference
"""
cost_estimate_id: str = Field(
...,
# No alias needed as field name matches API parameter
description="The ID of the cost estimate to retrieve",
pattern=r"^ce-[a-zA-Z0-9]{16}$", # Standard cost estimate ID pattern
)
# Response handling is implemented through raw dictionaries
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/utils/request.py:
--------------------------------------------------------------------------------
```python
"""Request utilities for Terraform Cloud MCP."""
from typing import Dict, Any
from pydantic import BaseModel
def query_params(model: BaseModel) -> Dict[str, Any]:
"""Transform Pydantic model fields to API parameters using naming conventions."""
params = {}
routing_fields = {
"organization",
"workspace_name",
"workspace_id",
"run_id",
"plan_id",
"apply_id",
"project_id",
}
# Use model_dump for reliable field access
model_dict = model.model_dump(exclude_none=True)
for name, value in model_dict.items():
# Skip routing fields that aren't query parameters
if name in routing_fields:
continue
# Pagination parameters
if name.startswith("page_"):
field_name = name.replace("page_", "")
params[f"page[{field_name}]"] = str(value)
# Filter parameters
elif name.startswith("filter_"):
# Skip empty string filters
if value == "":
continue
# Handle nested filters (workspace and organization for state versions)
if name == "filter_workspace_name":
params["filter[workspace][name]"] = str(value)
elif name == "filter_organization_name":
params["filter[organization][name]"] = str(value)
# Handle two-level filters (permissions)
elif "_permissions_" in name:
parts = name.replace("filter_permissions_", "").split("_")
field_name = "-".join(parts)
params[f"filter[permissions][{field_name}]"] = (
"true" if value is True else str(value)
)
else:
field_name = name.replace("filter_", "").replace("_", "-")
params[f"filter[{field_name}]"] = (
"true" if value is True else str(value)
)
# Search parameters
elif name.startswith("search_"):
if value not in ("", None):
field_name = name.replace("search_", "")
params[f"search[{field_name}]"] = str(value)
# Organization query parameters
elif name.startswith("query_"):
if value not in ("", None):
field_name = name.replace("query_", "")
params[f"q[{field_name}]"] = str(value)
# Direct parameters
elif name in ("q", "search", "sort"):
# Only add non-empty string parameters
if value not in ("", None):
params[name] = str(value)
return params
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/tools/state_version_outputs.py:
--------------------------------------------------------------------------------
```python
"""Terraform Cloud state version outputs management tools.
This module provides tools for working with state version outputs in Terraform Cloud.
It includes functions to retrieve and list state version outputs.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/state-version-outputs
"""
from ..api.client import api_request
from ..models.base import APIResponse
from ..models.state_version_outputs import (
StateVersionOutputListRequest,
StateVersionOutputRequest,
)
from ..utils.decorators import handle_api_errors
from ..utils.request import query_params
@handle_api_errors
async def list_state_version_outputs(
state_version_id: str, page_number: int = 1, page_size: int = 20
) -> APIResponse:
"""List outputs for a state version.
Retrieves a paginated list of all outputs for a specific state version.
These outputs include name, value, and sensitivity information.
API endpoint: GET /state-versions/:state_version_id/outputs
Args:
state_version_id: The ID of the state version (format: "sv-xxxxxxxx")
page_number: The page number to return (default: 1)
page_size: The number of items per page (default: 20, max: 100)
Returns:
Paginated list of state version outputs with name, value, and sensitivity information
See:
docs/tools/state_version_outputs.md for reference documentation
"""
# Validate parameters
params = StateVersionOutputListRequest(
state_version_id=state_version_id,
page_number=page_number,
page_size=page_size,
)
# Build query parameters using utility function
query = query_params(params)
# Make API request
return await api_request(
f"state-versions/{params.state_version_id}/outputs", params=query
)
@handle_api_errors
async def get_state_version_output(state_version_output_id: str) -> APIResponse:
"""Get details for a specific state version output.
Retrieves comprehensive information about a state version output including
its name, value, type, and sensitivity information.
API endpoint: GET /state-version-outputs/:state_version_output_id
Args:
state_version_output_id: The ID of the state version output (format: "wsout-xxxxxxxx")
Returns:
State version output details including name, value, type, and sensitivity information
See:
docs/tools/state_version_outputs.md for reference documentation
"""
# Validate parameters
params = StateVersionOutputRequest(state_version_output_id=state_version_output_id)
# Make API request
return await api_request(f"state-version-outputs/{params.state_version_output_id}")
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/models/base.py:
--------------------------------------------------------------------------------
```python
"""Base models for Terraform Cloud MCP.
This module defines base models for Terraform Cloud API requests.
We validate requests using Pydantic models but do not validate responses.
Response structures are documented in comments for reference only.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs
"""
from enum import Enum
from typing import Any, Dict, TypeVar
from pydantic import BaseModel, ConfigDict
class BaseModelConfig(BaseModel):
"""Base model configuration for all models in the project.
Provides common configuration settings for Pydantic models including:
- populate_by_name: Allow populating models by alias name or field name
- use_enum_values: Use string values from enums instead of enum objects
- extra: Ignore extra fields in input data
See:
docs/models/base.md for reference
"""
model_config = ConfigDict(
populate_by_name=True,
use_enum_values=True,
extra="ignore",
)
class APIRequest(BaseModelConfig):
"""Base model for API requests.
All API request models should inherit from this class to ensure
consistent configuration and behavior. It inherits settings from
BaseModelConfig.
Note:
This class provides the foundation for all API requests and inherits
model configuration from BaseModelConfig.
See:
docs/models/base.md for reference
"""
pass
# Common enums used across multiple modules
class ExecutionMode(str, Enum):
"""Execution mode options for workspaces and organizations.
Defines how Terraform operations are executed:
- REMOTE: Terraform runs on Terraform Cloud's infrastructure
- LOCAL: Terraform runs on your local machine
- AGENT: Terraform runs on your own infrastructure using an agent
Reference: https://developer.hashicorp.com/terraform/cloud-docs/workspaces/settings#execution-mode
See:
docs/models/workspace.md for reference
"""
REMOTE = "remote"
LOCAL = "local"
AGENT = "agent"
class CollaboratorAuthPolicy(str, Enum):
"""Authentication policy options for organization collaborators.
Defines the authentication requirements for organization members:
- PASSWORD: Password-only authentication is allowed
- TWO_FACTOR_MANDATORY: Two-factor authentication is required for all users
Reference: https://developer.hashicorp.com/terraform/cloud-docs/users-teams-organizations/organizations#authentication
See:
docs/models/organization.md for reference
"""
PASSWORD = "password"
TWO_FACTOR_MANDATORY = "two_factor_mandatory"
# Response type for all API calls - just a dictionary with no validation
APIResponse = Dict[str, Any]
# Type variable for API requests to use with generics
ReqT = TypeVar("ReqT", bound=APIRequest)
```
--------------------------------------------------------------------------------
/docs/models/state_version_outputs.md:
--------------------------------------------------------------------------------
```markdown
# State Version Outputs Models
This document describes the data models used for state version outputs operations in Terraform Cloud.
## Overview
State version outputs models provide structure and validation for interacting with the Terraform Cloud State Version Outputs API. These models handle the output values from Terraform state files, including name, value, type, and sensitivity information.
## Models Reference
### StateVersionOutputListRequest
**Type:** Request Validation Model
**Description:** Used to validate parameters for listing outputs for a specific state version.
**Fields:**
- `state_version_id` (string, required): The ID of the state version to list outputs for
- Format: Must match pattern "sv-[a-zA-Z0-9]{16}"
- Example: "sv-BPvFFrYCqRV6qVBK"
- `page_number` (integer, optional): Page number to fetch (default: 1, minimum: 1)
- `page_size` (integer, optional): Number of results per page (default: 20, max: 100)
**Validation Rules:**
- State version ID must start with "sv-" prefix
- Must contain exactly 16 alphanumeric characters after the prefix
- Page number must be positive integer
- Page size must be between 1 and 100
### StateVersionOutputRequest
**Type:** Request Validation Model
**Description:** Used to validate state version output ID parameters in API requests.
**Fields:**
- `state_version_output_id` (string, required): The ID of the state version output to retrieve
- Format: Must match pattern "wsout-[a-zA-Z0-9]{16}"
- Example: "wsout-BPvFFrYCqRV6qVBK"
**Validation Rules:**
- State version output ID must start with "wsout-" prefix
- Must contain exactly 16 alphanumeric characters after the prefix
**Used by:**
- `get_state_version_output` tool function to validate the output ID format before making API requests
## API Response Structure
While models validate requests, responses are returned as raw JSON. A typical state version output response has this structure:
```json
{
"data": {
"id": "wsout-BPvFFrYCqRV6qVBK",
"type": "state-version-outputs",
"attributes": {
"name": "vpc_id",
"sensitive": false,
"type": "string",
"value": "vpc-1234567890abcdef",
"detailed-type": "string"
},
"relationships": {
"configurable": {
"data": {
"id": "sv-BPvFFrYCqRV6qVBK",
"type": "state-versions"
}
}
}
}
}
```
**Key Attributes:**
- `name`: The name of the output as defined in Terraform configuration
- `sensitive`: Boolean indicating if the output contains sensitive information
- `type`: Simplified data type (string, number, bool, etc.)
- `value`: The actual output value (hidden for sensitive outputs)
- `detailed-type`: More specific type information for complex structures
## Related Resources
- [State Version Outputs Tools](../tools/state_version_outputs.md)
- [State Version Models](./state_versions.md)
- [Terraform Cloud API - State Version Outputs](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/state-version-outputs)
```
--------------------------------------------------------------------------------
/docs/tools/state_version_outputs.md:
--------------------------------------------------------------------------------
```markdown
# State Version Outputs Tools
The state version outputs tools module provides functions for working with state version outputs in Terraform Cloud.
## Overview
State version outputs represent the output values from Terraform state files, including their names, values, types, and sensitivity information. These tools allow you to retrieve and inspect the outputs generated by your Terraform configurations.
## API Reference
These tools interact with the Terraform Cloud State Version Outputs API:
- [State Version Outputs API Documentation](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/state-version-outputs)
- [State Versions Concepts](https://developer.hashicorp.com/terraform/cloud-docs/workspaces/state)
## Tools Reference
### list_state_version_outputs
**Function:** `list_state_version_outputs(state_version_id: str, page_number: int = 1, page_size: int = 20) -> Dict[str, Any]`
**Description:** Retrieves a paginated list of all outputs for a specific state version.
**Parameters:**
- `state_version_id` (str): The ID of the state version (format: "sv-xxxxxxxx")
- `page_number` (int): Page number to fetch (default: 1)
- `page_size` (int): Number of results per page (default: 20, max: 100)
**Returns:** JSON response containing paginated state version outputs with name, value, type, and sensitivity information.
**Notes:**
- Outputs are populated asynchronously after state version upload
- Check the `resources-processed` property on the state version to confirm processing is complete
- Requires "read" permission for the associated workspace
- Sensitive outputs will show `sensitive: true` but hide the actual value
### get_state_version_output
**Function:** `get_state_version_output(state_version_output_id: str) -> Dict[str, Any]`
**Description:** Retrieves detailed information about a specific state version output by ID.
**Parameters:**
- `state_version_output_id` (str): The ID of the state version output (format: "wsout-xxxxxxxx")
**Returns:** JSON response containing comprehensive output details including name, value, type, and detailed type information.
**Notes:**
- Provides both simplified and detailed type information
- Sensitive outputs will not expose their values in the response
- Requires "read" permission for the associated workspace
- Output IDs are typically obtained from listing outputs first
**Common Error Scenarios:**
| Error | Cause | Solution |
|-------|-------|----------|
| 404 | Output not found | Verify the ID exists and you have proper permissions |
| 422 | Invalid output ID format | Ensure the ID matches pattern "wsout-xxxxxxxx" |
| 403 | Insufficient permissions | Verify your API token has proper workspace access |
| 202 | Processing incomplete | Wait for state version processing to complete |
## Related Resources
- [State Version Outputs Models](../models/state_version_outputs.md)
- [State Version Tools](./state_versions.md)
- [Terraform Cloud API - State Version Outputs](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/state-version-outputs)
```
--------------------------------------------------------------------------------
/docs/models/account.md:
--------------------------------------------------------------------------------
```markdown
# Account Models
This document describes the data models used for account operations in Terraform Cloud.
## Overview
Account models provide structure for working with Terraform Cloud account information. These models help with accessing and validating account data retrieved from the Terraform Cloud API.
## Models Reference
### AccountDetailsRequest
**Type:** Request Validation Model
**Description:** Used to validate parameters for retrieving account details.
**Fields:** None - This model simply enforces the validation pattern for the account details endpoint.
**Usage Context:**
Used by the `get_account_details` function to validate request parameters (which are none in this case).
### AccountDetails
**Type:** Response Model
**Description:** Represents the account information of the current user or service account.
**Fields:**
- `id` (string): The ID of the account (e.g., "user-12345abcd")
- `username` (string): The username of the account
- `email` (string): The email address associated with the account
- `is_service_account` (boolean): Whether this is a service account (true) or user account (false)
- `avatar_url` (string, optional): URL to the user's avatar image
- `two_factor` (TwoFactorInfo, optional): Information about two-factor authentication status
- `enabled` (boolean): Whether 2FA is enabled
- `verified` (boolean): Whether 2FA has been verified
**JSON representation:**
```json
{
"data": {
"id": "user-12345abcd",
"type": "users",
"attributes": {
"username": "admin-user",
"email": "[email protected]",
"is-service-account": false,
"avatar-url": "https://example.com/avatar.png",
"two-factor": {
"enabled": true,
"verified": true
}
}
}
}
```
**Notes:**
- Field names in JSON responses use kebab-case format (e.g., "is-service-account")
- Field names in the model use snake_case format (e.g., is_service_account)
- The `from_api_response` static method helps parse API responses into this model
- Email is only returned for the authenticated user's own account
**Usage Examples:**
```python
# Parse account details from API response
account = AccountDetails.from_api_response(api_response)
# Check if account has two-factor authentication enabled
if account.two_factor and account.two_factor.enabled:
print(f"2FA is enabled for {account.username}")
# Check if the account is a service account
if account.is_service_account:
print(f"{account.username} is a service account")
```
## API Response Structure
When retrieving account details, the Terraform Cloud API returns a response with this structure:
```json
{
"data": {
"id": "user-12345abcd",
"type": "users",
"attributes": {
"username": "admin-user",
"email": "[email protected]",
"is-service-account": false,
"avatar-url": "https://example.com/avatar.png",
"two-factor": {
"enabled": true,
"verified": true
}
}
}
}
```
## Related Resources
- [Account Tools](../tools/account.md)
- [Terraform Cloud API - Account](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/account)
```
--------------------------------------------------------------------------------
/docs/models/cost_estimate.md:
--------------------------------------------------------------------------------
```markdown
# Cost Estimate Models
This document describes the data models used for cost estimation in Terraform Cloud.
## Overview
Cost estimate models provide structure and validation for interacting with the Terraform Cloud Cost Estimation API. These models define the format of cost estimate data, status values, and request validation.
## Models Reference
### CostEstimateStatus
**Type:** Enum (string)
**Description:** Represents the possible states a cost estimate can be in during its lifecycle.
**Values:**
- `pending`: Cost estimate has not yet started
- `queued`: Cost estimate is queued for execution
- `running`: Cost estimate is currently running
- `errored`: Cost estimate has encountered an error
- `canceled`: Cost estimate was canceled
- `finished`: Cost estimate has completed successfully
- `unreachable`: Cost estimate is in an unreachable state
**Usage Context:**
Used to determine the current state of a cost estimate operation and whether it has completed successfully.
### StatusTimestamps
**Type:** Object
**Description:** Captures timing information for various stages in a cost estimate's lifecycle.
**Fields:**
- `queued_at` (string, optional): ISO8601 timestamp when the cost estimate was queued
- `finished_at` (string, optional): ISO8601 timestamp when the cost estimate execution completed
**JSON representation:**
```json
{
"queued-at": "2023-05-01T12:34:56Z",
"finished-at": "2023-05-01T12:35:30Z"
}
```
**Notes:**
- Field names in JSON responses use kebab-case format (e.g., "queued-at")
- Field names in the model use snake_case format (e.g., queued_at)
- All timestamp fields follow ISO8601 format
### CostEstimateRequest
**Type:** Request Validation Model
**Description:** Used to validate cost estimate ID parameters in API requests.
**Fields:**
- `cost_estimate_id` (string, required): The ID of the cost estimate to retrieve
- Format: Must match pattern "ce-[a-zA-Z0-9]{16}"
- Example: "ce-BPvFFrYCqRV6qVBK"
**Validation Rules:**
- Cost estimate ID must start with "ce-" prefix
- Must contain exactly 16 alphanumeric characters after the prefix
**Used by:**
- `get_cost_estimate_details` tool function to validate the cost estimate ID format before making API requests
## API Response Structure
While models validate requests, responses are returned as raw JSON. A typical cost estimate response has this structure:
```json
{
"data": [
{
"id": "ce-BPvFFrYCqRV6qVBK",
"type": "cost-estimates",
"attributes": {
"error-message": null,
"status": "finished",
"status-timestamps": {
"queued-at": "2023-05-01T12:34:56Z",
"finished-at": "2023-05-01T12:35:30Z"
},
"resources-count": 4,
"matched-resources-count": 3,
"unmatched-resources-count": 1,
"prior-monthly-cost": "0.0",
"proposed-monthly-cost": "25.488",
"delta-monthly-cost": "25.488"
},
"links": {
"self": "/api/v2/cost-estimate/ce-BPvFFrYCqRV6qVBK"
}
}
]
}
```
## Related Resources
- [Cost Estimate Tools](../tools/cost_estimate.md)
- [Terraform Cloud API - Cost Estimates](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/cost-estimates)
```
--------------------------------------------------------------------------------
/docs/tools/apply.md:
--------------------------------------------------------------------------------
```markdown
# Apply Management Tools
This module provides tools for managing applies in Terraform Cloud.
## Overview
Applies in Terraform Cloud represent the execution phase of a Terraform run after the plan has been approved. These tools allow retrieving information about applies and recovering from failed state uploads.
## API Reference
These tools interact with the Terraform Cloud Apply API:
- [Apply API Documentation](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/applies)
- [Run Lifecycle](https://developer.hashicorp.com/terraform/cloud-docs/run/states)
## Tools Reference
### get_apply_details
**Function:** `get_apply_details(apply_id: str) -> Dict[str, Any]`
**Description:** Retrieves comprehensive information about an apply operation including its status, resource counts, and timestamps.
**Parameters:**
- `apply_id` (str): The ID of the apply to retrieve details for (format: "apply-xxxxxxxx")
**Returns:** JSON response containing apply details including:
- Status (queued, running, errored, canceled, finished)
- Status timestamps (queued-at, started-at, finished-at)
- Resource statistics (additions, changes, destructions)
- Log read URL
- Relationships to state versions and runs
**Notes:**
- Apply IDs can be found in the relationships of a run object
- Successful applies have a status of "finished"
- The log-read-url is a pre-signed URL to access the full apply logs
- Resource counts help track infrastructure changes
### get_apply_logs
**Function:** `get_apply_logs(apply_id: str) -> Dict[str, Any]`
**Description:** Retrieves the raw log output from a specific apply operation.
**Parameters:**
- `apply_id` (str): The ID of the apply to retrieve logs for (format: "apply-xxxxxxxx")
**Returns:** Text content of the apply logs including:
- Terraform output during apply
- Resource creation/modification details
- Error messages (if any occurred)
- Complete execution trace
**Notes:**
- Handles HTTP redirects automatically
- Useful for debugging failed applies
- Contains detailed information about each resource change
- Can be large for complex infrastructures
### get_errored_state
**Function:** `get_errored_state(apply_id: str) -> Dict[str, Any]`
**Description:** Retrieves information about a state file that failed to upload during an apply, enabling state recovery.
**Parameters:**
- `apply_id` (str): The ID of the apply with a failed state upload (format: "apply-xxxxxxxx")
**Returns:** Terraform state data that was not successfully uploaded, including:
- Complete state representation
- Resource definitions and attributes
- Terraform version information
- State metadata (serial, lineage)
**Notes:**
- Critical for recovering from failed state uploads
- Only available when a state upload has failed
- Allows administrators to manually recover state information
- Handles HTTP redirects automatically to retrieve the state data
**Common Error Scenarios:**
| Error | Cause | Solution |
|-------|-------|----------|
| 404 | Apply not found or no errored state | Verify the apply ID is correct and that it has a failed state |
| 403 | Insufficient permissions | Ensure your token has admin or state access permissions |
| 422 | Invalid apply ID format | Ensure the ID follows the format "apply-xxxxxxxx" |
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/tools/applies.py:
--------------------------------------------------------------------------------
```python
"""Terraform Cloud apply management tools.
This module provides tools for working with applies in Terraform Cloud.
It includes functions to retrieve apply details, logs, and errored state information.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/applies
"""
from ..api.client import api_request
from ..models.base import APIResponse
from ..models.applies import (
ApplyRequest,
ApplyErroredStateRequest,
)
from ..utils.decorators import handle_api_errors
@handle_api_errors
async def get_apply_details(apply_id: str) -> APIResponse:
"""Get details for a specific apply.
Retrieves comprehensive information about an apply including its current status,
logs, resource counts, and relationship to other resources.
API endpoint: GET /applies/{apply_id}
Args:
apply_id: The ID of the apply to retrieve details for (format: "apply-xxxxxxxx")
Returns:
Apply details including status, timestamps, and resource change counts
See:
docs/tools/apply.md for reference documentation
"""
# Validate parameters
params = ApplyRequest(apply_id=apply_id)
# Make API request
return await api_request(f"applies/{params.apply_id}")
@handle_api_errors
async def get_errored_state(apply_id: str) -> APIResponse:
"""Retrieve the errored state from a failed apply.
Gets information about a state file that failed to upload during an apply,
providing access to the state data for recovery purposes.
API endpoint: GET /applies/{apply_id}/errored-state
Args:
apply_id: The ID of the apply with a failed state upload (format: "apply-xxxxxxxx")
Returns:
Information about the errored state including access details.
The redirect to the state file is automatically followed.
See:
docs/tools/apply.md for reference documentation
"""
# Validate parameters
params = ApplyErroredStateRequest(apply_id=apply_id)
# Make API request - redirect handling happens automatically in the API client
return await api_request(f"applies/{params.apply_id}/errored-state")
@handle_api_errors
async def get_apply_logs(apply_id: str) -> APIResponse:
"""Retrieve logs from an apply.
Gets the raw log output from a Terraform Cloud apply operation,
providing detailed information about resource changes and any errors.
API endpoint: Uses the log-read-url from GET /applies/{apply_id}
Args:
apply_id: The ID of the apply to retrieve logs for (format: "apply-xxxxxxxx")
Returns:
The raw logs from the apply operation. The redirect to the log file
is automatically followed.
See:
docs/tools/apply.md for reference documentation
"""
# Validate parameters using existing model
params = ApplyRequest(apply_id=apply_id)
# First get apply details to get the log URL
apply_details = await api_request(f"applies/{params.apply_id}")
# Extract log read URL
log_read_url = (
apply_details.get("data", {}).get("attributes", {}).get("log-read-url")
)
if not log_read_url:
return {"error": "No log URL available for this apply"}
# Use the enhanced api_request to fetch logs from the external URL
return await api_request(log_read_url, external_url=True, accept_text=True)
```
--------------------------------------------------------------------------------
/docs/models/assessment_result.md:
--------------------------------------------------------------------------------
```markdown
# Assessment Results Models
This document describes the Pydantic models used for validating input to the Assessment Results API.
## Overview
Assessment Results models provide validation for requests to Terraform Cloud's health assessment API endpoints. Health assessments automatically check whether deployed infrastructure matches the requirements defined in Terraform configurations.
## Models
### AssessmentResultStatus
```python
class AssessmentResultStatus(str, Enum):
"""Status options for assessment results in Terraform Cloud."""
PENDING = "pending"
QUEUED = "queued"
RUNNING = "running"
ERRORED = "errored"
CANCELED = "canceled"
FINISHED = "finished"
```
This enum defines the various states an assessment result can be in during its lifecycle:
- `PENDING`: Assessment has not yet started
- `QUEUED`: Assessment is queued for execution
- `RUNNING`: Assessment is currently running
- `ERRORED`: Assessment has encountered an error
- `CANCELED`: Assessment was canceled
- `FINISHED`: Assessment has completed successfully
### AssessmentResultRequest
```python
class AssessmentResultRequest(APIRequest):
"""Request model for retrieving assessment result details."""
assessment_result_id: str = Field(
...,
description="The ID of the assessment result to retrieve",
pattern=r"^asmtres-[a-zA-Z0-9]{8,}$",
)
```
This model validates the assessment result ID parameter for basic API requests:
- `assessment_result_id`: The ID of the assessment result to retrieve (format: "asmtres-xxxxxxxx")
- Required field (no default value)
- Must match the pattern of Terraform Cloud assessment result IDs
- No alias needed as the field name matches the API parameter name
### AssessmentOutputRequest
```python
class AssessmentOutputRequest(AssessmentResultRequest):
"""Request model for retrieving assessment result outputs."""
pass # Uses the same validation as the parent class
```
This model extends `AssessmentResultRequest` to validate requests for specialized outputs:
- Used for JSON plan output requests
- Used for provider schema requests
- Used for log output requests
## API Response Structure
While the responses are not validated with Pydantic models, they typically follow this structure:
```json
{
"id": "asmtres-UG5rE9L1373hMYMA",
"type": "assessment-results",
"data": {
"attributes": {
"drifted": true,
"succeeded": true,
"error-msg": null,
"created-at": "2022-07-02T22:29:58+00:00"
},
"links": {
"self": "/api/v2/assessment-results/asmtres-UG5rE9L1373hMYMA/",
"json-output": "/api/v2/assessment-results/asmtres-UG5rE9L1373hMYMA/json-output",
"json-schema": "/api/v2/assessment-results/asmtres-UG5rE9L1373hMYMA/json-schema",
"log-output": "/api/v2/assessment-results/asmtres-UG5rE9L1373hMYMA/log-output"
}
}
}
```
For specialized endpoint responses like JSON output, JSON schema, and log output, the response is provided in "content" field as raw text:
```json
{
"content": "Raw output content here..."
}
```
## Reference
For more detailed information, see:
- [Terraform Cloud API Documentation](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/assessment-results)
- Tools implementation in `terraform_cloud_mcp/tools/assessment_results.py`
```
--------------------------------------------------------------------------------
/docs/tools/assessment_results.md:
--------------------------------------------------------------------------------
```markdown
# Assessment Results API Tools
This document describes the tools for interacting with Terraform Cloud Health Assessment Results.
Health assessments automatically verify that real infrastructure matches the requirements defined in Terraform configurations.
## Overview
Health assessments provide automated drift detection and continuous validation for Terraform Cloud workspaces.
The assessment results API allows you to retrieve assessment details, plans, schemas, and logs.
> **Note**: Health assessments are a premium feature available in HCP Terraform Plus and Premium editions.
## Available Tools
| Tool Name | Description |
|-----------|-------------|
| `get_assessment_result_details` | Retrieve basic information about an assessment result |
| `get_assessment_json_output` | Get the JSON plan from an assessment |
| `get_assessment_json_schema` | Get the provider schema from an assessment |
| `get_assessment_log_output` | Get logs from an assessment run |
## Tool Usage
### Get Assessment Result Details
Retrieves information about a specific health assessment result, including status and drift detection.
```python
get_assessment_result_details(assessment_result_id="asmtres-XXXXXXXXXX")
```
#### Parameters
- `assessment_result_id`: The ID of the assessment result (format: "asmtres-XXXXXXXXXX")
#### Response Example
```json
{
"id": "asmtres-UG5rE9L1373hMYMA",
"type": "assessment-results",
"data": {
"attributes": {
"drifted": true,
"succeeded": true,
"error-msg": null,
"created-at": "2022-07-02T22:29:58+00:00",
},
"links": {
"self": "/api/v2/assessment-results/asmtres-UG5rE9L1373hMYMA/",
"json-output": "/api/v2/assessment-results/asmtres-UG5rE9L1373hMYMA/json-output",
"json-schema": "/api/v2/assessment-results/asmtres-UG5rE9L1373hMYMA/json-schema",
"log-output": "/api/v2/assessment-results/asmtres-UG5rE9L1373hMYMA/log-output"
}
}
}
```
### Get Assessment JSON Output
Retrieves the JSON plan output from an assessment result.
```python
get_assessment_json_output(assessment_result_id="asmtres-XXXXXXXXXX")
```
#### Parameters
- `assessment_result_id`: The ID of the assessment result (format: "asmtres-XXXXXXXXXX")
#### Response Example
```json
{
"content": "{\n \"format_version\": \"1.0\",\n \"terraform_version\": \"1.3.0\",\n \"planned_values\": { ... },\n \"resource_changes\": [ ... ],\n \"configuration\": { ... }\n}"
}
```
### Get Assessment JSON Schema
Retrieves the provider schema from an assessment result.
```python
get_assessment_json_schema(assessment_result_id="asmtres-XXXXXXXXXX")
```
#### Parameters
- `assessment_result_id`: The ID of the assessment result (format: "asmtres-XXXXXXXXXX")
#### Response Example
```json
{
"content": "{\n \"format_version\": \"1.0\",\n \"provider_schemas\": { ... }\n}"
}
```
### Get Assessment Log Output
Retrieves the logs from an assessment run.
```python
get_assessment_log_output(assessment_result_id="asmtres-XXXXXXXXXX")
```
#### Parameters
- `assessment_result_id`: The ID of the assessment result (format: "asmtres-XXXXXXXXXX")
#### Response Example
```json
{
"content": "Terraform initiated...\nGathering resources...\nDetecting drift...\n"
}
```
## Authentication Requirements
The assessment results endpoints have specific authentication requirements:
- `get_assessment_result_details`: Accessible to users with read access to a workspace
- All other endpoints (`json-output`, `json-schema`, `log-output`): Require admin level access and cannot be accessed with organization tokens. You must use a user token or team token with admin access.
## Reference
For more detailed information, see the [official Terraform Cloud API documentation](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/assessment-results).
```
--------------------------------------------------------------------------------
/docs/tools/plan.md:
--------------------------------------------------------------------------------
```markdown
# Plan Management Tools
This module provides tools for retrieving information about Terraform plans.
## Overview
A plan in Terraform Cloud contains information about the changes Terraform will make to your infrastructure, including resources to add, change, or delete. Plans represent the execution plan that Terraform generates during a run before any changes are applied.
## API Reference
These tools interact with the Terraform Cloud Plan API:
- [Plan API Documentation](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/plans)
- [Run Workflow](https://developer.hashicorp.com/terraform/cloud-docs/run/states)
## Tools Reference
### get_plan_details
**Function:** `get_plan_details(plan_id: str) -> Dict[str, Any]`
**Description:** Retrieves comprehensive information about a specific plan including its status, resource counts, and timestamps.
**Parameters:**
- `plan_id` (str): The ID of the plan to retrieve details for (format: "plan-xxxxxxxx")
**Returns:** JSON response containing plan details including:
- Status (queued, running, errored, canceled, finished)
- Status timestamps (queued-at, started-at, finished-at)
- Resource statistics (additions, changes, destructions)
- Log read URL
- Relationships to runs and configuration versions
**Notes:**
- Plan IDs can be found in the relationships of a run object
- Successful plans have a status of "finished"
- The log-read-url is a pre-signed URL to access the full plan logs
- Resource counts are useful for understanding the impact of changes
### get_plan_json_output
**Function:** `get_plan_json_output(plan_id: str) -> Dict[str, Any]`
**Description:** Retrieves the JSON representation of a plan's execution details.
**Parameters:**
- `plan_id` (str): The ID of the plan to retrieve JSON output for (format: "plan-xxxxxxxx")
**Returns:** Machine-readable JSON format of the plan containing:
- Detailed resource changes
- Provider configurations
- Output changes
- Variables values used
- Complete planned actions in structured format
**Notes:**
- Automatically follows HTTP redirects to retrieve the plan JSON content
- Ideal for programmatic analysis of infrastructure changes
- Contains much more detailed information than the summary in plan details
- Useful for custom validation or visualization of changes
### get_run_plan_json_output
**Function:** `get_run_plan_json_output(run_id: str) -> Dict[str, Any]`
**Description:** Retrieves the JSON representation of a run's current plan execution details.
**Parameters:**
- `run_id` (str): The ID of the run to retrieve plan JSON output for (format: "run-xxxxxxxx")
**Returns:** Same JSON plan format as get_plan_json_output but referenced by run ID.
**Notes:**
- Provides the same JSON plan data as get_plan_json_output
- Useful when you only have the run ID and not the plan ID
- Automatically follows HTTP redirects to retrieve the content
- Returns the current plan for the run
### get_plan_logs
**Function:** `get_plan_logs(plan_id: str) -> Dict[str, Any]`
**Description:** Retrieves the raw log output from a Terraform plan operation.
**Parameters:**
- `plan_id` (str): The ID of the plan to retrieve logs for (format: "plan-xxxxxxxx")
**Returns:** Text content of the plan logs including:
- Terraform output during planning
- Resource change details in human-readable format
- Warnings and errors that occurred during planning
**Notes:**
- Handles HTTP redirects automatically to retrieve the log content
- Useful for troubleshooting plan failures
- Contains the same output as displayed in Terraform Cloud UI
- May include sensitive information depending on your code
**Common Error Scenarios:**
| Error | Cause | Solution |
|-------|-------|----------|
| 404 | Plan not found | Verify the plan ID is correct |
| 403 | Insufficient permissions | Ensure your token has read access to the workspace |
| 422 | Invalid plan ID format | Ensure the ID follows the format "plan-xxxxxxxx" |
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/tools/plans.py:
--------------------------------------------------------------------------------
```python
"""Terraform Cloud plan management tools.
This module provides tools for working with plans in Terraform Cloud.
It includes functions to retrieve plan details and JSON output.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/plans
"""
from ..api.client import api_request
from ..models.base import APIResponse
from ..models.plans import (
PlanJsonOutputRequest,
PlanRequest,
RunPlanJsonOutputRequest,
)
from ..utils.decorators import handle_api_errors
@handle_api_errors
async def get_plan_details(plan_id: str) -> APIResponse:
"""Get details for a specific plan.
Retrieves comprehensive information about a plan including its current status,
logs, resource counts, and relationship to other resources.
API endpoint: GET /plans/{plan_id}
Args:
plan_id: The ID of the plan to retrieve details for (format: "plan-xxxxxxxx")
Returns:
Plan details including status, timestamps, and resource change counts
See:
docs/tools/plan.md for reference documentation
"""
# Validate parameters
params = PlanRequest(plan_id=plan_id)
# Make API request
return await api_request(f"plans/{params.plan_id}")
@handle_api_errors
async def get_plan_json_output(plan_id: str) -> APIResponse:
"""Retrieve the JSON execution plan.
Gets the JSON representation of a plan's execution details, providing a
machine-readable format of the planned resource changes.
API endpoint: GET /plans/{plan_id}/json-output
Args:
plan_id: The ID of the plan to retrieve JSON output for (format: "plan-xxxxxxxx")
Returns:
The complete JSON formatted plan with resource changes, metadata,
and planned actions. The redirect is automatically followed.
See:
docs/tools/plan.md for reference documentation
"""
# Validate parameters
params = PlanJsonOutputRequest(plan_id=plan_id)
# Make API request
return await api_request(f"plans/{params.plan_id}/json-output")
@handle_api_errors
async def get_run_plan_json_output(run_id: str) -> APIResponse:
"""Retrieve the JSON execution plan from a run.
Gets the JSON representation of a run's current plan execution details,
providing a machine-readable format of the planned resource changes.
API endpoint: GET /runs/{run_id}/plan/json-output
Args:
run_id: The ID of the run to retrieve plan JSON output for (format: "run-xxxxxxxx")
Returns:
The complete JSON formatted plan with resource changes, metadata,
and planned actions. The redirect is automatically followed.
See:
docs/tools/plan.md for reference documentation
"""
# Validate parameters
params = RunPlanJsonOutputRequest(run_id=run_id)
# Make API request
return await api_request(f"runs/{params.run_id}/plan/json-output")
@handle_api_errors
async def get_plan_logs(plan_id: str) -> APIResponse:
"""Retrieve logs from a plan.
Gets the raw log output from a Terraform Cloud plan operation,
providing detailed information about the execution plan.
API endpoint: Uses the log-read-url from GET /plans/{plan_id}
Args:
plan_id: The ID of the plan to retrieve logs for (format: "plan-xxxxxxxx")
Returns:
The raw logs from the plan operation. The redirect to the log file
is automatically followed.
See:
docs/tools/plan.md for reference documentation
"""
# Validate parameters using existing model
params = PlanRequest(plan_id=plan_id)
# First get plan details to get the log URL
plan_details = await api_request(f"plans/{params.plan_id}")
# Extract log read URL
log_read_url = (
plan_details.get("data", {}).get("attributes", {}).get("log-read-url")
)
if not log_read_url:
return {"error": "No log URL available for this plan"}
# Use the enhanced api_request to fetch logs from the external URL
return await api_request(log_read_url, external_url=True, accept_text=True)
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/models/applies.py:
--------------------------------------------------------------------------------
```python
"""Apply models for Terraform Cloud API
This module contains models for Terraform Cloud apply-related requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/applies
"""
from enum import Enum
from typing import Optional
from pydantic import Field
from .base import APIRequest
class ApplyStatus(str, Enum):
"""Status options for applies in Terraform Cloud.
Defines the various states an apply can be in during its lifecycle:
- PENDING: Apply has not yet started
- MANAGED_QUEUED/QUEUED: Apply is queued for execution
- RUNNING: Apply is currently running
- ERRORED: Apply has encountered an error
- CANCELED: Apply was canceled
- FINISHED: Apply has completed successfully
- UNREACHABLE: Apply is in an unreachable state
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/applies
See:
docs/models/apply.md for reference
"""
PENDING = "pending"
MANAGED_QUEUED = "managed_queued"
QUEUED = "queued"
RUNNING = "running"
ERRORED = "errored"
CANCELED = "canceled"
FINISHED = "finished"
UNREACHABLE = "unreachable"
class ExecutionDetails(APIRequest):
"""Model for apply execution details.
Represents execution mode specific details for an apply, including agent
information when running in agent execution mode.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/applies
See:
docs/models/apply.md for reference
"""
agent_id: Optional[str] = Field(
None,
alias="agent-id",
description="ID of the agent that ran the apply",
)
agent_name: Optional[str] = Field(
None,
alias="agent-name",
description="Name of the agent that ran the apply",
)
agent_pool_id: Optional[str] = Field(
None,
alias="agent-pool-id",
description="ID of the agent pool the apply ran in",
)
agent_pool_name: Optional[str] = Field(
None,
alias="agent-pool-name",
description="Name of the agent pool the apply ran in",
)
# Additional execution mode details can be added here
class StatusTimestamps(APIRequest):
"""Model for apply execution timestamps.
Captures the timestamps for various stages in an apply's lifecycle.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/applies
See:
docs/models/apply.md for reference
"""
queued_at: Optional[str] = Field(
None,
alias="queued-at",
description="When the apply was queued",
)
pending_at: Optional[str] = Field(
None,
alias="pending-at",
description="When the apply entered pending state",
)
started_at: Optional[str] = Field(
None,
alias="started-at",
description="When the apply execution started",
)
finished_at: Optional[str] = Field(
None,
alias="finished-at",
description="When the apply execution completed",
)
class ApplyRequest(APIRequest):
"""Request model for retrieving an apply.
Used to validate the apply ID parameter for API requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/applies#show-an-apply
See:
docs/models/apply.md for reference
"""
apply_id: str = Field(
...,
# No alias needed as field name matches API parameter
description="The ID of the apply to retrieve",
pattern=r"^apply-[a-zA-Z0-9]{16}$", # Standard apply ID pattern
)
class ApplyErroredStateRequest(APIRequest):
"""Request model for retrieving an apply's errored state.
Used to validate the apply ID parameter for errored state API requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/applies#recover-a-failed-state-upload-after-applying
See:
docs/models/apply.md for reference
"""
apply_id: str = Field(
...,
# No alias needed as field name matches API parameter
description="The ID of the apply with a failed state upload",
pattern=r"^apply-[a-zA-Z0-9]{16}$", # Standard apply ID pattern
)
# Response handling is implemented through raw dictionaries
```
--------------------------------------------------------------------------------
/docs/conversations/assessment-results-conversation.md:
--------------------------------------------------------------------------------
```markdown
# Assessment Results Conversation Example
This document demonstrates how to use the assessment results tools in a conversation with Claude.
## Retrieving Assessment Results
```
### User
I need to check if there's been any drift in my infrastructure based on the latest health assessment. The assessment result ID is asmtres-UG5rE9L1373hMYMA.
### Claude
I'll help you check for drift in your infrastructure by retrieving the latest health assessment results. Let me fetch that information for you.
[Claude uses get_assessment_result_details tool]
Here's what I found:
- The assessment was completed successfully
- Drift was detected in your infrastructure
- The assessment was created on July 2, 2022
- There are no error messages
Would you like me to retrieve the detailed JSON output to see exactly what resources have drifted?
```
```
### User
Yes, please show me the JSON output so I can see what resources have drifted.
### Claude
I'll get the detailed JSON output that shows exactly which resources have drifted.
[Claude uses get_assessment_json_output tool]
Here's a summary of the drift detected:
1. **EC2 Instance (aws_instance.web_server)**
- Current tags don't match configuration
- Security group attachments have changed
2. **S3 Bucket (aws_s3_bucket.logs)**
- Versioning configuration has changed from enabled to disabled
3. **IAM Role (aws_iam_role.lambda_execution)**
- An additional policy has been attached that wasn't in the Terraform configuration
Would you like me to help you understand how to address this drift?
```
```
### User
Yes, what are my options for addressing this drift?
### Claude
You have several options for addressing the infrastructure drift:
1. **Update Your Terraform Configuration**:
- Modify your Terraform code to match the current state
- This approach "accepts" the current state as the new desired state
- Use terraform plan to verify changes before applying
2. **Apply Your Existing Configuration**:
- Run terraform apply to revert infrastructure back to the state defined in your configuration
- This will undo manual changes and enforce your existing code
- Good for cases where the drift was accidental or unauthorized
3. **Selective Updates**:
- Import some real-world changes into your configuration and revert others
- For example, keep the new IAM policies but revert the S3 bucket versioning change
4. **Investigate Before Deciding**:
- Review the logs to understand when and possibly why these changes occurred
- Let me fetch the assessment logs for more context:
[Claude uses get_assessment_log_output tool]
Based on what I'm seeing in the logs, I recommend first updating your Terraform configuration to document the security group changes on the EC2 instance as those appear to be intentional, but reverting the S3 bucket versioning change as that appears to be accidental and could affect your compliance requirements.
Would you like me to help you draft the Terraform code updates needed to address this drift?
```
## Investigating Provider Schema
```
### User
Before updating the code, I want to check the provider schema to make sure I understand all available attributes for the S3 bucket resource.
### Claude
Good idea! Let me retrieve the provider schema from the assessment result to show you all available attributes for the S3 bucket resource.
[Claude uses get_assessment_json_schema tool]
Here are the key attributes for the AWS S3 bucket resource based on the provider schema:
**aws_s3_bucket Attributes:**
**Required:**
- bucket - Name of the bucket
**Optional:**
- acl - Canned ACL to apply
- bucket_prefix - Creates a unique bucket name with this prefix
- force_destroy - Allow deletion even if bucket not empty
- versioning - Configuration for versioning
- enabled - Whether versioning is enabled
- mfa_delete - Whether MFA is required for deletion
- website - Website configuration
- cors_rule - CORS configuration
- lifecycle_rule - Lifecycle rules
- replication_configuration - Replication configuration
- server_side_encryption_configuration - Server-side encryption
- tags - Key-value map of tags
For your issue with versioning, you'll want to focus on the versioning block. The schema confirms you need to set enabled = true to maintain versioning on your bucket.
Would you like me to help you draft the necessary Terraform code to restore the versioning configuration?
```
```
--------------------------------------------------------------------------------
/docs/conversations/apply-management-conversation.md:
--------------------------------------------------------------------------------
```markdown
# Apply Management Conversation Examples
This document provides example conversations demonstrating how to use the apply management tools in the Terraform Cloud MCP.
## Getting Apply Details
```
**User:** I'd like to check on the status of an apply in my Terraform Cloud workspace. The apply ID is apply-gH2Ns7VBP9a2QMcr
**Claude:** I'll retrieve the details for that apply for you.
[Claude uses get_apply_details tool]
Here are the details for apply-gH2Ns7VBP9a2QMcr:
Status: finished
Resources added: 3
Resources changed: 1
Resources destroyed: 0
The apply was queued at 2023-11-15T14:31:05Z, started at 2023-11-15T14:32:18Z, and finished at 2023-11-15T14:35:42Z.
The operation took about 3 minutes and 24 seconds to complete.
```
## Retrieving Apply Logs
```
**User:** Can you show me the logs for apply-gH2Ns7VBP9a2QMcr? I want to see what changes were made.
**Claude:** I'll retrieve the logs for that apply.
[Claude uses get_apply_logs tool]
Here are the logs from the apply:
The apply used Terraform v1.4.6 on linux and successfully completed the following actions:
- Created an AWS security group (sg-0a1b2c3d4e5f6g7h8) in 3 seconds
- Created an AWS instance for the web server (i-0a1b2c3d4e5f6g7h8) which took 30 seconds
- Created an S3 bucket for logs (my-example-logs-bucket) in 5 seconds
- Modified a security group rule to allow HTTPS traffic in 2 seconds
The apply completed successfully with 3 resources added and 1 resource changed, taking about 40 seconds total.
Would you like me to explain any specific part of the apply process?
```
## Working with Errored States
```
**User:** I found an apply with an error: apply-rT5Kp2VsMx3zNPqf. Can you check if there's a failed state to recover?
**Claude:** I'll check if there's an errored state for that apply that we can recover.
[Claude uses get_errored_state tool]
I found the failed state upload for apply-rT5Kp2VsMx3zNPqf. It contains state data for 12 resources.
The state version is 4, and it includes resources like:
- aws_vpc.main (vpc-0a1b2c3d)
- aws_subnet.public (2 instances)
- aws_subnet.private (2 instances)
- aws_route_table.public
- aws_route_table.private
- aws_internet_gateway.main
- aws_security_group.web
- aws_instance.app_server (2 instances)
- aws_s3_bucket.logs
This state can be recovered to prevent drift between what Terraform Cloud knows about your infrastructure and what actually exists. Would you like me to help with the recovery process?
```
## Understanding Apply Failures
```
**User:** Can you check the details of apply-xYz123AbCd and tell me why it failed?
**Claude:** I'll examine what went wrong with that apply.
[Claude uses get_apply_details tool]
[Claude uses get_apply_logs tool]
I've analyzed the failed apply. Here's what happened:
The apply failed during resource creation with error code 403 (Forbidden).
The specific error occurred while creating an AWS IAM role:
- Resource: aws_iam_role.lambda_execution_role
- Error: "AccessDenied: User is not authorized to perform iam:CreateRole"
The Terraform credentials being used don't have sufficient permissions to create IAM roles. The apply attempted to create 3 resources but failed after creating the first 2 (an S3 bucket and Lambda function), failing on the IAM role creation.
To resolve this, you'll need to either:
1. Add IAM permissions to the credentials being used
2. Have someone with appropriate permissions create the IAM role separately
3. Use a different approach that doesn't require creating new IAM roles
```
## Comparing Multiple Applies
```
**User:** Can you compare the details of two different applies for me? apply-8HqpY2tKnGbR5xVz and apply-cJm3L7sWvZ4dNfQk
**Claude:** I'll retrieve and compare the details of both applies.
[Claude uses get_apply_details tool]
[Claude uses get_apply_details tool]
Here's a comparison of the two applies:
Apply 1 (apply-8HqpY2tKnGbR5xVz):
- Status: finished
- Duration: 4m 12s
- Resources Added: 8
- Resources Changed: 3
- Resources Destroyed: 0
- Total Actions: 11
- Started: 2025-03-10T15:22:18Z
Apply 2 (apply-cJm3L7sWvZ4dNfQk):
- Status: finished
- Duration: 2m 38s
- Resources Added: 2
- Resources Changed: 5
- Resources Destroyed: 4
- Total Actions: 11
- Started: 2025-03-15T09:45:32Z
Key differences:
- Apply 2 completed faster (2m 38s vs 4m 12s)
- Apply 1 created more resources (8 vs 2)
- Apply 2 destroyed resources (4) while Apply 1 had none
- Both applies had the same total number of resource actions (11)
Would you like me to analyze any specific aspect of these applies in more detail?
```
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/models/__init__.py:
--------------------------------------------------------------------------------
```python
"""Data models for Terraform Cloud MCP"""
# Re-export the base models for easier access
from .base import ( # noqa: F401
BaseModelConfig,
APIRequest,
APIResponse,
ReqT,
ExecutionMode,
CollaboratorAuthPolicy,
)
# Import specific models
from .account import AccountDetailsRequest # noqa: F401
from .applies import ( # noqa: F401
ApplyStatus,
ExecutionDetails as ApplyExecutionDetails,
StatusTimestamps as ApplyStatusTimestamps,
ApplyRequest,
ApplyErroredStateRequest,
)
from .cost_estimates import ( # noqa: F401
CostEstimateStatus,
StatusTimestamps as CostEstimateStatusTimestamps,
CostEstimateRequest,
)
from .organizations import ( # noqa: F401
OrganizationDetailsRequest,
OrganizationEntitlementsRequest,
OrganizationDeleteRequest,
OrganizationListRequest,
OrganizationCreateRequest,
OrganizationUpdateRequest,
OrganizationParams,
)
from .plans import ( # noqa: F401
PlanStatus,
ExecutionDetails,
StatusTimestamps,
PlanRequest,
PlanJsonOutputRequest,
RunPlanJsonOutputRequest,
)
from .projects import ( # noqa: F401
TagBinding,
ProjectListRequest,
BaseProjectRequest,
ProjectCreateRequest,
ProjectUpdateRequest,
ProjectParams,
ProjectTagBindingRequest,
WorkspaceMoveRequest,
)
from .state_versions import ( # noqa: F401
StateVersionStatus,
StateVersionListRequest,
StateVersionRequest,
CurrentStateVersionRequest,
StateVersionCreateRequest,
StateVersionParams,
)
from .state_version_outputs import ( # noqa: F401
StateVersionOutputListRequest,
StateVersionOutputRequest,
)
from .variables import ( # noqa: F401
VariableCategory,
WorkspaceVariable,
WorkspaceVariableParams,
WorkspaceVariableCreateRequest,
WorkspaceVariableUpdateRequest,
VariableSet,
VariableSetParams,
VariableSetCreateRequest,
VariableSetUpdateRequest,
VariableSetVariable,
VariableSetAssignmentRequest,
VariableSetVariableParams,
)
from .runs import ( # noqa: F401
RunOperation,
RunStatus,
RunSource,
RunStatusGroup,
RunVariable,
RunListInWorkspaceRequest,
RunListInOrganizationRequest,
RunCreateRequest,
RunActionRequest,
RunParams,
)
from .workspaces import ( # noqa: F401
VcsRepoConfig,
WorkspaceListRequest,
WorkspaceCreateRequest,
WorkspaceUpdateRequest,
WorkspaceParams,
)
# Define __all__ to control what's imported with wildcard imports
__all__ = [
# Base models
"BaseModelConfig",
"APIRequest",
"APIResponse",
"ReqT",
# Common enums
"CollaboratorAuthPolicy",
"ExecutionMode",
# Account models
"AccountDetailsRequest",
# Apply models
"ApplyStatus",
"ApplyExecutionDetails",
"ApplyStatusTimestamps",
"ApplyRequest",
"ApplyErroredStateRequest",
# Cost estimate models
"CostEstimateStatus",
"CostEstimateStatusTimestamps",
"CostEstimateRequest",
# Organization models
"OrganizationDetailsRequest",
"OrganizationEntitlementsRequest",
"OrganizationDeleteRequest",
"OrganizationListRequest",
"OrganizationCreateRequest",
"OrganizationUpdateRequest",
"OrganizationParams",
# Plan models
"PlanStatus",
"ExecutionDetails",
"StatusTimestamps",
"PlanRequest",
"PlanJsonOutputRequest",
"RunPlanJsonOutputRequest",
# Project models
"TagBinding",
"ProjectListRequest",
"BaseProjectRequest",
"ProjectCreateRequest",
"ProjectUpdateRequest",
"ProjectParams",
"ProjectTagBindingRequest",
"WorkspaceMoveRequest",
# Run models
"RunOperation",
"RunStatus",
"RunSource",
"RunStatusGroup",
"RunVariable",
"RunListInWorkspaceRequest",
"RunListInOrganizationRequest",
"RunCreateRequest",
"RunActionRequest",
"RunParams",
# State Version models
"StateVersionStatus",
"StateVersionListRequest",
"StateVersionRequest",
"CurrentStateVersionRequest",
"StateVersionCreateRequest",
"StateVersionParams",
# State Version Output models
"StateVersionOutputListRequest",
"StateVersionOutputRequest",
# Variable models
"VariableCategory",
"WorkspaceVariable",
"WorkspaceVariableParams",
"WorkspaceVariableCreateRequest",
"WorkspaceVariableUpdateRequest",
"VariableSet",
"VariableSetParams",
"VariableSetCreateRequest",
"VariableSetUpdateRequest",
"VariableSetVariable",
"VariableSetAssignmentRequest",
"VariableSetVariableParams",
# Workspace models
"VcsRepoConfig",
"WorkspaceListRequest",
"WorkspaceCreateRequest",
"WorkspaceUpdateRequest",
"WorkspaceParams",
]
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/models/plans.py:
--------------------------------------------------------------------------------
```python
"""Plan models for Terraform Cloud API
This module contains models for Terraform Cloud plan-related requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/plans
"""
from enum import Enum
from typing import Optional
from pydantic import Field
from .base import APIRequest
class PlanStatus(str, Enum):
"""Status options for plans in Terraform Cloud.
Defines the various states a plan can be in during its lifecycle:
- PENDING: Plan has not yet started
- MANAGED_QUEUED/QUEUED: Plan is queued for execution
- RUNNING: Plan is currently running
- ERRORED: Plan has encountered an error
- CANCELED: Plan was canceled
- FINISHED: Plan has completed successfully
- UNREACHABLE: Plan is in an unreachable state
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/plans
See:
docs/models/plan.md for reference
"""
PENDING = "pending"
MANAGED_QUEUED = "managed_queued"
QUEUED = "queued"
RUNNING = "running"
ERRORED = "errored"
CANCELED = "canceled"
FINISHED = "finished"
UNREACHABLE = "unreachable"
class ExecutionDetails(APIRequest):
"""Model for plan execution details.
Represents execution mode specific details for a plan, including agent
information when running in agent execution mode.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/plans
See:
docs/models/plan.md for reference
"""
agent_id: Optional[str] = Field(
None,
alias="agent-id",
description="ID of the agent that ran the plan",
)
agent_name: Optional[str] = Field(
None,
alias="agent-name",
description="Name of the agent that ran the plan",
)
agent_pool_id: Optional[str] = Field(
None,
alias="agent-pool-id",
description="ID of the agent pool the plan ran in",
)
agent_pool_name: Optional[str] = Field(
None,
alias="agent-pool-name",
description="Name of the agent pool the plan ran in",
)
# Additional execution mode details can be added here
class StatusTimestamps(APIRequest):
"""Model for plan execution timestamps.
Captures the timestamps for various stages in a plan's lifecycle.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/plans
See:
docs/models/plan.md for reference
"""
queued_at: Optional[str] = Field(
None,
alias="queued-at",
description="When the plan was queued",
)
pending_at: Optional[str] = Field(
None,
alias="pending-at",
description="When the plan entered pending state",
)
started_at: Optional[str] = Field(
None,
alias="started-at",
description="When the plan execution started",
)
finished_at: Optional[str] = Field(
None,
alias="finished-at",
description="When the plan execution completed",
)
class PlanRequest(APIRequest):
"""Request model for retrieving a plan.
Used to validate the plan ID parameter for API requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/plans#show-a-plan
See:
docs/models/plan.md for reference
"""
plan_id: str = Field(
...,
# No alias needed as field name matches API parameter
description="The ID of the plan to retrieve",
pattern=r"^plan-[a-zA-Z0-9]{16}$", # Standard plan ID pattern
)
class PlanJsonOutputRequest(APIRequest):
"""Request model for retrieving a plan's JSON output.
Used to validate the plan ID parameter for JSON output API requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/plans#retrieve-the-json-execution-plan
See:
docs/models/plan.md for reference
"""
plan_id: str = Field(
...,
# No alias needed as field name matches API parameter
description="The ID of the plan to retrieve JSON output for",
pattern=r"^plan-[a-zA-Z0-9]{16}$", # Standard plan ID pattern
)
class RunPlanJsonOutputRequest(APIRequest):
"""Request model for retrieving a run's plan JSON output.
Used to validate the run ID parameter for JSON output API requests.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/plans#retrieve-the-json-execution-plan-from-a-run
See:
docs/models/plan.md for reference
"""
run_id: str = Field(
...,
# No alias needed as field name matches API parameter
description="The ID of the run to retrieve plan JSON output for",
pattern=r"^run-[a-zA-Z0-9]{16}$", # Standard run ID pattern
)
# Response handling is implemented through raw dictionaries
```
--------------------------------------------------------------------------------
/docs/models/state_versions.md:
--------------------------------------------------------------------------------
```markdown
# State Version Models
This document describes the data models used for state version operations in Terraform Cloud.
## Overview
State version models provide structure and validation for interacting with the Terraform Cloud State Versions API. These models define the format of state version data, status values, and request validation for managing point-in-time snapshots of Terraform state files.
## Models Reference
### StateVersionStatus
**Type:** Enum (string)
**Description:** Represents the possible states a state version can be in during its lifecycle.
**Values:**
- `pending`: State version has been created but the state data is not encoded within the request
- `finalized`: State version has been successfully uploaded or created with valid state attribute
- `discarded`: State version was discarded because it was superseded by a newer state version
- `backing_data_soft_deleted`: *Enterprise only* - backing files are marked for garbage collection
- `backing_data_permanently_deleted`: *Enterprise only* - backing files have been permanently deleted
**Usage Context:**
Used to filter state versions by their current status and determine if processing is complete.
### StateVersionListRequest
**Type:** Request Validation Model
**Description:** Used to validate parameters for listing state versions in a workspace.
**Fields:**
- `workspace_name` (string, required): The name of the workspace to list state versions for
- `organization` (string, required): The name of the organization that owns the workspace
- `filter_status` (StateVersionStatus, optional): Filter state versions by status
- `page_number` (integer, optional): Page number to fetch (default: 1, minimum: 1)
- `page_size` (integer, optional): Number of results per page (default: 20, max: 100)
### StateVersionRequest
**Type:** Request Validation Model
**Description:** Used to validate state version ID parameters in API requests.
**Fields:**
- `state_version_id` (string, required): The ID of the state version to retrieve
- Format: Must match pattern "sv-[a-zA-Z0-9]{16}"
- Example: "sv-BPvFFrYCqRV6qVBK"
### CurrentStateVersionRequest
**Type:** Request Validation Model
**Description:** Used to validate workspace ID parameters for current state version requests.
**Fields:**
- `workspace_id` (string, required): The ID of the workspace
- Format: Must match pattern "ws-[a-zA-Z0-9]{16}"
- Example: "ws-BPvFFrYCqRV6qVBK"
### StateVersionCreateRequest
**Type:** Request Validation Model
**Description:** Used to validate parameters for creating new state versions.
**Fields:**
- `workspace_id` (string, required): The ID of the workspace to create the state version in
- `serial` (integer, required): The serial number of this state instance (minimum: 0)
- `md5` (string, required): An MD5 hash of the raw state version (32 character hex string)
- `state` (string, optional): Base64 encoded raw state file
- `lineage` (string, optional): Lineage of the state version
- `json_state` (string, optional): Base64 encoded JSON state from "terraform show -json"
- `json_state_outputs` (string, optional): Base64 encoded JSON state outputs
- `run_id` (string, optional): The ID of the run to associate with the state version
### StateVersionParams
**Type:** Parameter Object
**Description:** Flexible parameter model for state version operations without routing fields.
**Fields:**
All fields from StateVersionCreateRequest excluding workspace_id, with all fields optional.
**JSON representation:**
```json
{
"serial": 1,
"md5": "d41d8cd98f00b204e9800998ecf8427e",
"lineage": "871d1b4a-e579-fb7c-ffdb-f0c858a647a7",
"json-state": "H4sIAAAA...",
"json-state-outputs": "H4sIAAAA..."
}
```
**Notes:**
- Field names in JSON responses use kebab-case format (e.g., "json-state")
- Field names in the model use snake_case format (e.g., json_state)
- All encoded fields expect Base64 format
## API Response Structure
While models validate requests, responses are returned as raw JSON. A typical state version response has this structure:
```json
{
"data": {
"id": "sv-BPvFFrYCqRV6qVBK",
"type": "state-versions",
"attributes": {
"created-at": "2023-05-01T12:34:56Z",
"size": 1024,
"hosted-state-download-url": "https://...",
"hosted-json-state-download-url": "https://...",
"modules": {...},
"providers": {...},
"resources": [...],
"serial": 1,
"state-version": 4,
"terraform-version": "1.5.0",
"vcs-commit-sha": "abc123",
"vcs-commit-url": "https://..."
},
"relationships": {
"run": {...},
"created-by": {...}
}
}
}
```
## Related Resources
- [State Version Tools](../tools/state_versions.md)
- [State Version Outputs Models](./state_version_outputs.md)
- [Terraform Cloud API - State Versions](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/state-versions)
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/tools/assessment_results.py:
--------------------------------------------------------------------------------
```python
"""Terraform Cloud assessment results management tools.
This module provides tools for working with health assessment results in Terraform Cloud.
It includes functions to retrieve assessment details, JSON output, schema files, and logs.
Reference: https://developer.hashicorp.com/terraform/cloud-docs/api-docs/assessment-results
"""
from ..api.client import api_request
from ..models.base import APIResponse
from ..models.assessment_results import AssessmentResultRequest, AssessmentOutputRequest
from ..utils.decorators import handle_api_errors
@handle_api_errors
async def get_assessment_result_details(assessment_result_id: str) -> APIResponse:
"""Get details for a specific assessment result.
Retrieves comprehensive information about an assessment result including its current status,
whether drift was detected, and links to related resources like JSON output and logs.
API endpoint: GET /api/v2/assessment-results/{assessment_result_id}
Args:
assessment_result_id: The ID of the assessment result to retrieve details for (format: "asmtres-xxxxxxxx")
Returns:
Assessment result details including status, timestamps, and drift detection information
See:
docs/tools/assessment_results.md for reference documentation
"""
# Validate parameters
params = AssessmentResultRequest(assessment_result_id=assessment_result_id)
# Make API request
return await api_request(f"assessment-results/{params.assessment_result_id}")
@handle_api_errors
async def get_assessment_json_output(assessment_result_id: str) -> APIResponse:
"""Retrieve the JSON execution plan from an assessment result.
Gets the JSON representation of the plan execution details from an assessment,
providing a machine-readable format of the planned resource changes.
API endpoint: GET /api/v2/assessment-results/{assessment_result_id}/json-output
Args:
assessment_result_id: The ID of the assessment result to retrieve JSON output for (format: "asmtres-xxxxxxxx")
Returns:
The complete JSON formatted plan with resource changes, metadata,
and planned actions. The redirect is automatically followed.
Note:
This endpoint requires admin level access to the workspace and cannot be accessed
with organization tokens.
See:
docs/tools/assessment_results.md for reference documentation
"""
# Validate parameters
params = AssessmentOutputRequest(assessment_result_id=assessment_result_id)
# Make API request with text acceptance since it may be a large JSON file
return await api_request(
f"assessment-results/{params.assessment_result_id}/json-output",
accept_text=True,
)
@handle_api_errors
async def get_assessment_json_schema(assessment_result_id: str) -> APIResponse:
"""Retrieve the JSON schema file from an assessment result.
Gets the JSON schema representation of the provider schema used during the assessment,
providing information about available resources and their configuration options.
API endpoint: GET /api/v2/assessment-results/{assessment_result_id}/json-schema
Args:
assessment_result_id: The ID of the assessment result to retrieve schema for (format: "asmtres-xxxxxxxx")
Returns:
The JSON schema file containing provider information. The redirect is automatically followed.
Note:
This endpoint requires admin level access to the workspace and cannot be accessed
with organization tokens.
See:
docs/tools/assessment_results.md for reference documentation
"""
# Validate parameters
params = AssessmentOutputRequest(assessment_result_id=assessment_result_id)
# Make API request with text acceptance since it may be a large JSON file
return await api_request(
f"assessment-results/{params.assessment_result_id}/json-schema",
accept_text=True,
)
@handle_api_errors
async def get_assessment_log_output(assessment_result_id: str) -> APIResponse:
"""Retrieve logs from an assessment result.
Gets the raw log output from a Terraform Cloud assessment operation,
providing detailed information about the execution and any errors.
API endpoint: GET /api/v2/assessment-results/{assessment_result_id}/log-output
Args:
assessment_result_id: The ID of the assessment result to retrieve logs for (format: "asmtres-xxxxxxxx")
Returns:
The raw logs from the assessment operation. The redirect to the log file
is automatically followed.
Note:
This endpoint requires admin level access to the workspace and cannot be accessed
with organization tokens.
See:
docs/tools/assessment_results.md for reference documentation
"""
# Validate parameters
params = AssessmentOutputRequest(assessment_result_id=assessment_result_id)
# Make API request with text acceptance for the logs
return await api_request(
f"assessment-results/{params.assessment_result_id}/log-output", accept_text=True
)
```
--------------------------------------------------------------------------------
/docs/tools/state_versions.md:
--------------------------------------------------------------------------------
```markdown
# State Version Tools
The state version tools module provides functions for working with state versions in Terraform Cloud.
## Overview
State versions represent point-in-time snapshots of Terraform state files. These tools allow you to manage the complete lifecycle of state versions, including listing, retrieving, creating, and downloading state data. This is particularly useful for state migration and workspace management.
## API Reference
These tools interact with the Terraform Cloud State Versions API:
- [State Versions API Documentation](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/state-versions)
- [State Versions Concepts](https://developer.hashicorp.com/terraform/cloud-docs/workspaces/state)
## Tools Reference
### list_state_versions
**Function:** `list_state_versions(organization: str, workspace_name: str, page_number: int = 1, page_size: int = 20, filter_status: Optional[str] = None) -> Dict[str, Any]`
**Description:** Retrieves a paginated list of state versions for a workspace with optional status filtering.
**Parameters:**
- `organization` (str): The organization name
- `workspace_name` (str): The workspace name
- `page_number` (int): Page number to fetch (default: 1)
- `page_size` (int): Number of results per page (default: 20, max: 100)
- `filter_status` (str): Filter by status: 'pending', 'finalized', or 'discarded'
**Returns:** JSON response containing paginated state versions with metadata including serial numbers, creation timestamps, and download URLs.
**Notes:**
- State versions are ordered by creation date (newest first)
- The `resources-processed` attribute indicates if asynchronous processing is complete
- Requires "read" permission for the workspace
### get_current_state_version
**Function:** `get_current_state_version(workspace_id: str) -> Dict[str, Any]`
**Description:** Retrieves the current active state version for a workspace.
**Parameters:**
- `workspace_id` (str): The ID of the workspace (format: "ws-xxxxxxxx")
**Returns:** JSON response containing the current state version details including download URLs and metadata.
**Notes:**
- Returns the state version that serves as input for new runs
- May return empty if no state versions exist for the workspace
- Requires "read" permission for the workspace
### get_state_version
**Function:** `get_state_version(state_version_id: str) -> Dict[str, Any]`
**Description:** Retrieves detailed information about a specific state version by ID.
**Parameters:**
- `state_version_id` (str): The ID of the state version (format: "sv-xxxxxxxx")
**Returns:** JSON response containing comprehensive state version details including resources, modules, providers, and download URLs.
**Notes:**
- Includes detailed resource and module information if processing is complete
- Contains both raw state and JSON state download URLs (when available)
- Requires "read" permission for the associated workspace
### create_state_version
**Function:** `create_state_version(workspace_id: str, serial: int, md5: str, params: Optional[StateVersionParams] = None) -> Dict[str, Any]`
**Description:** Creates a new state version in a workspace, useful for migrating state from Terraform Community Edition.
**Parameters:**
- `workspace_id` (str): The ID of the workspace (format: "ws-xxxxxxxx")
- `serial` (int): The serial number of this state instance
- `md5` (str): An MD5 hash of the raw state version
- `params` (StateVersionParams): Additional configuration including state data and lineage
**Returns:** JSON response containing the created state version with upload URLs if state data wasn't provided.
**Notes:**
- Workspace must be locked by the user creating the state version
- Can provide state data directly or use returned upload URLs
- Requires "write" permission for the workspace
### download_state_file
**Function:** `download_state_file(state_version_id: str, json_format: bool = False) -> Dict[str, Any]`
**Description:** Downloads the raw state file content or JSON formatted state for a specific state version.
**Parameters:**
- `state_version_id` (str): The ID of the state version (format: "sv-xxxxxxxx")
- `json_format` (bool): Whether to download JSON formatted state (default: False)
**Returns:** Raw state file content or JSON formatted state content.
**Notes:**
- JSON format requires Terraform 1.3+ to be available
- Raw format returns the actual Terraform state file
- Requires "read" permission for the associated workspace
**Common Error Scenarios:**
| Error | Cause | Solution |
|-------|-------|----------|
| 404 | State version not found | Verify the ID exists and you have proper permissions |
| 422 | Invalid state version ID format | Ensure the ID matches pattern "sv-xxxxxxxx" |
| 403 | Insufficient permissions | Verify your API token has proper workspace access |
| 409 | Workspace not locked (create only) | Lock the workspace before creating state versions |
## Related Resources
- [State Version Models](../models/state_versions.md)
- [State Version Outputs Tools](./state_version_outputs.md)
- [Terraform Cloud API - State Versions](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/state-versions)
```
--------------------------------------------------------------------------------
/docs/models/plan.md:
--------------------------------------------------------------------------------
```markdown
# Plan Models
This document describes the data models used for plan operations in Terraform Cloud.
## Overview
Plan models provide structure and validation for interacting with the Terraform Cloud Plans API. These models define plan statuses, execution details, and request validation for retrieving plan information.
## Models Reference
### PlanStatus
**Type:** Enum (string)
**Description:** Represents the possible states a plan can be in during its lifecycle.
**Values:**
- `pending`: Plan has not yet started
- `managed_queued`: Plan is waiting for other runs in the queue
- `queued`: Plan is queued for execution
- `running`: Plan is currently executing
- `errored`: Plan encountered an error
- `canceled`: Plan was canceled
- `finished`: Plan completed successfully
**Usage Context:**
Used to determine the current state of a plan and whether it has completed successfully.
### ExecutionDetails
**Type:** Object
**Description:** Contains information about how a plan was executed, particularly for agent-based execution.
**Fields:**
- `agent_id` (string, optional): ID of the agent that executed the plan
- `agent_name` (string, optional): Name of the agent that executed the plan
- `agent_pool_id` (string, optional): ID of the agent pool the executing agent belongs to
- `agent_pool_name` (string, optional): Name of the agent pool the executing agent belongs to
**JSON representation:**
```json
{
"execution-details": {
"agent-id": "agent-AbCdEfGhIjKlMnOp",
"agent-name": "production-agent-01",
"agent-pool-id": "apool-AbCdEfGhIjKlMnOp",
"agent-pool-name": "Production Agents"
}
}
```
### StatusTimestamps
**Type:** Object
**Description:** Captures timing information for various stages in a plan's lifecycle.
**Fields:**
- `queued_at` (string, optional): ISO8601 timestamp when the plan was queued
- `pending_at` (string, optional): ISO8601 timestamp when the plan was pending
- `started_at` (string, optional): ISO8601 timestamp when plan execution started
- `finished_at` (string, optional): ISO8601 timestamp when plan execution completed
**JSON representation:**
```json
{
"status-timestamps": {
"queued-at": "2023-09-01T12:00:00Z",
"pending-at": "2023-09-01T12:01:00Z",
"started-at": "2023-09-01T12:02:00Z",
"finished-at": "2023-09-01T12:10:00Z"
}
}
```
**Notes:**
- Field names in JSON responses use kebab-case format (e.g., "queued-at")
- Field names in the model use snake_case format (e.g., queued_at)
- All timestamp fields follow ISO8601 format
### PlanRequest
**Type:** Request Validation Model
**Description:** Used to validate plan ID parameters in API requests.
**Fields:**
- `plan_id` (string, required): The ID of the plan to retrieve
- Format: Must match pattern "plan-[a-zA-Z0-9]{16}"
- Example: "plan-AbCdEfGhIjKlMnOp"
**Validation Rules:**
- Plan ID must start with "plan-" prefix
- Must contain exactly 16 alphanumeric characters after the prefix
### PlanJsonOutputRequest
**Type:** Request Validation Model
**Description:** Used to validate parameters for retrieving the JSON output of a plan.
**Fields:**
- `plan_id` (string, required): The ID of the plan to retrieve JSON output for
- Format: Must match pattern "plan-[a-zA-Z0-9]{16}"
- Example: "plan-AbCdEfGhIjKlMnOp"
### RunPlanJsonOutputRequest
**Type:** Request Validation Model
**Description:** Used to validate parameters for retrieving the JSON output of a run's plan.
**Fields:**
- `run_id` (string, required): The ID of the run to retrieve plan JSON output for
- Format: Must match pattern "run-[a-zA-Z0-9]{16}"
- Example: "run-AbCdEfGhIjKlMnOp"
## API Response Structure
### Plan Details Response
```json
{
"data": {
"id": "plan-AbCdEfGhIjKlMnOp",
"type": "plans",
"attributes": {
"status": "finished",
"status-timestamps": {
"queued-at": "2023-09-01T12:00:00Z",
"pending-at": "2023-09-01T12:01:00Z",
"started-at": "2023-09-01T12:02:00Z",
"finished-at": "2023-09-01T12:10:00Z"
},
"resource-additions": 3,
"resource-changes": 2,
"resource-destructions": 1,
"execution-details": {
"agent-id": "agent-AbCdEfGhIjKlMnOp",
"agent-name": "production-agent-01",
"agent-pool-id": "apool-AbCdEfGhIjKlMnOp",
"agent-pool-name": "Production Agents"
}
},
"relationships": {
"run": {
"data": {
"id": "run-AbCdEfGhIjKlMnOp",
"type": "runs"
}
},
"state-versions": {
"data": {
"id": "sv-AbCdEfGhIjKlMnOp",
"type": "state-versions"
}
}
},
"links": {
"self": "/api/v2/plans/plan-AbCdEfGhIjKlMnOp",
"json-output": "/api/v2/plans/plan-AbCdEfGhIjKlMnOp/json-output"
}
}
}
```
### Plan JSON Output Response
The plan JSON output response contains a string in the `data` field that can be parsed to access detailed information about the planned resource changes:
```json
{
"data": "<JSON string containing the full plan details>"
}
```
The parsed plan data includes information about:
- Resource additions, changes, and deletions
- Output changes
- Provider configurations
- Prior state details
- Terraform version information
## Related Resources
- [Plan Tools](../tools/plan.md)
- [Run Models](run.md)
- [Terraform Cloud API - Plans](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/plans)
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/server.py:
--------------------------------------------------------------------------------
```python
#!/usr/bin/env python3
"""
Terraform Cloud MCP Server
"""
import logging
from fastmcp import FastMCP
# Import environment configuration
from terraform_cloud_mcp.utils.env import should_enable_delete_tools
# Import tools and models
from terraform_cloud_mcp.tools import account
from terraform_cloud_mcp.tools import workspaces
from terraform_cloud_mcp.tools import runs
from terraform_cloud_mcp.tools import organizations
from terraform_cloud_mcp.tools import plans
from terraform_cloud_mcp.tools import applies
from terraform_cloud_mcp.tools import projects
from terraform_cloud_mcp.tools import cost_estimates
from terraform_cloud_mcp.tools import assessment_results
from terraform_cloud_mcp.tools import state_versions
from terraform_cloud_mcp.tools import state_version_outputs
from terraform_cloud_mcp.tools import variables
# Configure logging
logging.basicConfig(level=logging.DEBUG)
# Create server instance
mcp: FastMCP = FastMCP("Terraform Cloud MCP Server")
# Check if delete tools should be enabled
enable_delete_tools = should_enable_delete_tools()
# Register account management tools
mcp.tool()(account.get_account_details)
# Register workspace management tools
mcp.tool()(workspaces.list_workspaces)
mcp.tool()(workspaces.get_workspace_details)
mcp.tool()(workspaces.create_workspace)
mcp.tool()(workspaces.update_workspace)
mcp.tool(
enabled=enable_delete_tools,
annotations={"destructiveHint": True, "readOnlyHint": False},
)(workspaces.delete_workspace)
mcp.tool(
enabled=enable_delete_tools,
annotations={"destructiveHint": True, "readOnlyHint": False},
)(workspaces.safe_delete_workspace)
mcp.tool()(workspaces.lock_workspace)
mcp.tool()(workspaces.unlock_workspace)
mcp.tool()(workspaces.force_unlock_workspace)
# Register run management tools
mcp.tool()(runs.create_run)
mcp.tool()(runs.list_runs_in_workspace)
mcp.tool()(runs.list_runs_in_organization)
mcp.tool()(runs.get_run_details)
mcp.tool()(runs.apply_run)
mcp.tool()(runs.discard_run)
mcp.tool()(runs.cancel_run)
mcp.tool()(runs.force_cancel_run)
mcp.tool()(runs.force_execute_run)
# Register organization management tools
mcp.tool()(organizations.get_organization_details)
mcp.tool()(organizations.get_organization_entitlements)
mcp.tool()(organizations.list_organizations)
mcp.tool()(organizations.create_organization)
mcp.tool()(organizations.update_organization)
mcp.tool(
enabled=enable_delete_tools,
annotations={"destructiveHint": True, "readOnlyHint": False},
)(organizations.delete_organization)
# Register plan management tools
mcp.tool()(plans.get_plan_details)
mcp.tool()(plans.get_plan_json_output)
mcp.tool()(plans.get_run_plan_json_output)
mcp.tool()(plans.get_plan_logs)
# Register apply management tools
mcp.tool()(applies.get_apply_details)
mcp.tool()(applies.get_errored_state)
mcp.tool()(applies.get_apply_logs)
# Register project management tools
mcp.tool()(projects.create_project)
mcp.tool()(projects.update_project)
mcp.tool()(projects.list_projects)
mcp.tool()(projects.get_project_details)
mcp.tool(
enabled=enable_delete_tools,
annotations={"destructiveHint": True, "readOnlyHint": False},
)(projects.delete_project)
mcp.tool()(projects.list_project_tag_bindings)
mcp.tool()(projects.add_update_project_tag_bindings)
mcp.tool()(projects.move_workspaces_to_project)
# Register cost estimates tools
mcp.tool()(cost_estimates.get_cost_estimate_details)
# Register assessment results tools
mcp.tool()(assessment_results.get_assessment_result_details)
mcp.tool()(assessment_results.get_assessment_json_output)
mcp.tool()(assessment_results.get_assessment_json_schema)
mcp.tool()(assessment_results.get_assessment_log_output)
# Register state version tools
mcp.tool()(state_versions.list_state_versions)
mcp.tool()(state_versions.get_current_state_version)
mcp.tool()(state_versions.get_state_version)
mcp.tool()(state_versions.create_state_version)
mcp.tool()(state_versions.download_state_file)
# Register state version outputs tools
mcp.tool()(state_version_outputs.list_state_version_outputs)
mcp.tool()(state_version_outputs.get_state_version_output)
# Register variable management tools
# Workspace Variables
mcp.tool()(variables.list_workspace_variables)
mcp.tool()(variables.create_workspace_variable)
mcp.tool()(variables.update_workspace_variable)
mcp.tool(
enabled=enable_delete_tools,
annotations={"destructiveHint": True, "readOnlyHint": False},
)(variables.delete_workspace_variable)
# Variable Sets
mcp.tool()(variables.list_variable_sets)
mcp.tool()(variables.get_variable_set)
mcp.tool()(variables.create_variable_set)
mcp.tool()(variables.update_variable_set)
mcp.tool(
enabled=enable_delete_tools,
annotations={"destructiveHint": True, "readOnlyHint": False},
)(variables.delete_variable_set)
mcp.tool()(variables.assign_variable_set_to_workspaces)
mcp.tool()(variables.unassign_variable_set_from_workspaces)
mcp.tool()(variables.assign_variable_set_to_projects)
mcp.tool()(variables.unassign_variable_set_from_projects)
# Variable Set Variables
mcp.tool()(variables.list_variables_in_variable_set)
mcp.tool()(variables.create_variable_in_variable_set)
mcp.tool()(variables.update_variable_in_variable_set)
mcp.tool(
enabled=enable_delete_tools,
annotations={"destructiveHint": True, "readOnlyHint": False},
)(variables.delete_variable_from_variable_set)
def main() -> None:
"""Run the Terraform Cloud MCP server."""
mcp.run(transport="stdio")
if __name__ == "__main__":
main()
```
--------------------------------------------------------------------------------
/docs/tools/organization.md:
--------------------------------------------------------------------------------
```markdown
# Organization Tools
This module provides tools for managing organizations in Terraform Cloud.
## Overview
Organizations in Terraform Cloud are the top-level resource that represents a group or company. Organizations contain workspaces, teams, and other shared settings. These tools allow you to create, read, update, delete organizations, and view organization entitlements.
## API Reference
These tools interact with the Terraform Cloud Organizations API:
- [Organizations API Documentation](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/organizations)
- [Organizations and Teams](https://developer.hashicorp.com/terraform/cloud-docs/users-teams-organizations/organizations)
## Tools Reference
### list_organizations
**Function:** `list_organizations(page_number: int = 1, page_size: int = 20, q: Optional[str] = None, query_email: Optional[str] = None, query_name: Optional[str] = None) -> Dict[str, Any]`
**Description:** Retrieves a paginated list of organizations the current user has access to.
**Parameters:**
- `page_number` (int, optional): Page number to fetch (default: 1)
- `page_size` (int, optional): Number of results per page (default: 20)
- `q` (str, optional): Search query to filter by name and email
- `query_email` (str, optional): Search query to filter by email only
- `query_name` (str, optional): Search query to filter by name only
**Returns:** JSON response containing list of organizations with their metadata.
**Notes:**
- Returns only organizations the authenticated user has access to
- Search filters are partial matches (substrings)
- Response is paginated with links to next/previous pages
### get_organization_details
**Function:** `get_organization_details(organization: str) -> Dict[str, Any]`
**Description:** Retrieves comprehensive information about a specific organization.
**Parameters:**
- `organization` (str): The name of the organization to retrieve details for
**Returns:** JSON response containing detailed organization information including:
- Name, email, and creation timestamp
- Authentication policy settings
- Default execution mode
- Subscription and feature settings
**Notes:**
- Requires read access to the organization
- Provides essential information before working with organization resources
### get_organization_entitlements
**Function:** `get_organization_entitlements(organization: str) -> Dict[str, Any]`
**Description:** Retrieves information about available features based on the organization's subscription tier.
**Parameters:**
- `organization` (str): The name of the organization to retrieve entitlements for
**Returns:** JSON response containing feature limits and subscription information:
- Available features (e.g., cost estimation, policy enforcement)
- Resource limits (e.g., teams, private modules)
- Subscription tier information
**Notes:**
- Essential for determining which features are available to an organization
- Useful before attempting to use premium features
- Different subscription tiers have different feature sets
### create_organization
**Function:** `create_organization(name: str, email: str, params: Optional[OrganizationParams] = None) -> Dict[str, Any]`
**Description:** Creates a new organization with the specified name and email.
**Parameters:**
- `name` (str): The name for the organization (must follow naming rules)
- `email` (str): Email address for the admin contact
- `params` (OrganizationParams, optional): Additional configuration settings:
- `collaborator_auth_policy`: Authentication policy (password or two_factor_mandatory)
- `session_timeout`: Session timeout after inactivity in minutes
- `cost_estimation_enabled`: Whether to enable cost estimation
- `default_execution_mode`: Default workspace execution mode
- And many other options...
**Returns:** JSON response with the created organization details.
**Notes:**
- Organization names must follow format constraints (lowercase alphanumeric, hyphens)
- Names must be globally unique across Terraform Cloud
- Only certain users have permission to create organizations
### update_organization
**Function:** `update_organization(organization: str, params: Optional[OrganizationParams] = None) -> Dict[str, Any]`
**Description:** Updates an existing organization's settings.
**Parameters:**
- `organization` (str): The name of the organization to update
- `params` (OrganizationParams, optional): Settings to update:
- `email`: Admin email address
- `session_timeout`: Session timeout after inactivity
- `collaborator_auth_policy`: Authentication policy
- And all other options available in create_organization...
**Returns:** JSON response with the updated organization details.
**Notes:**
- Only specified attributes will be updated
- Requires admin permissions on the organization
- Cannot change the organization name (immutable)
### delete_organization
**Function:** `delete_organization(organization: str) -> Dict[str, Any]`
**Description:** Permanently deletes an organization and all its content.
**Parameters:**
- `organization` (str): The name of the organization to delete
**Returns:** Success confirmation (HTTP 204 No Content) or error details.
**Notes:**
- This is an extremely destructive operation
- Will delete all workspaces, teams, configurations, and state files
- Organization names cannot be recreated after deletion
- Requires owner permissions
**Common Error Scenarios:**
| Error | Cause | Solution |
|-------|-------|----------|
| 404 | Organization not found | Verify the organization name |
| 403 | Insufficient permissions | Must be an organization owner to delete |
| 422 | Validation failure | Ensure the name format is correct |
| 409 | Organization has child resources | Delete all child resources first |
```
--------------------------------------------------------------------------------
/docs/conversations/state_management.md:
--------------------------------------------------------------------------------
```markdown
# State Management Conversation Example
This document demonstrates how to use the State Management tools in Terraform Cloud MCP.
## Scenario: Working with State Versions and Their Outputs
In this scenario, we'll show how to:
1. List state versions for a workspace
2. Get details of the current state version
3. Extract output values from the state
4. Create a new state version
5. Download a state file
### Conversation
```
**User:** I need to examine the state versions in my "production" workspace and understand the outputs in the current state.
**Claude:** I'll help you examine the state versions in your production workspace. Let me start by listing all the state versions.
[Claude uses list_state_versions tool]
I found 15 state versions in your production workspace. Here are the most recent ones:
- State version 15 created on 2024-03-15 at 14:30 UTC (status: finalized)
- State version 14 created on 2024-03-10 at 09:15 UTC (status: finalized)
- State version 13 created on 2024-03-08 at 16:45 UTC (status: finalized)
Now let me get the details of your current state version to understand what's deployed.
[Claude uses get_workspace_details tool]
[Claude uses get_current_state_version tool]
Your current state (ID: sv-abc123def456) is at serial 15 and was created with Terraform version 1.6.2. It contains 47 managed resources including VPCs, security groups, EC2 instances, and RDS databases.
Let me examine the outputs from this state version.
[Claude uses list_state_version_outputs tool]
Here are the outputs from your current state:
- vpc_id (string): vpc-1234567890abcdef
- database_endpoint (string): prod-db.abc123.us-east-1.rds.amazonaws.com
- load_balancer_dns (string): prod-alb-123456789.us-east-1.elb.amazonaws.com
- database_password (string): [SENSITIVE]
- api_key (string): [SENSITIVE]
The state includes 5 outputs total, with 2 marked as sensitive for security.
```
```
**User:** I need to create a new state version for this workspace. I have a state file exported from my local environment that I want to upload.
**Claude:** I'll help you create a new state version for your workspace using your local state file. This process requires several steps to ensure the state is properly uploaded and validated.
First, I need to lock the workspace since state version creation requires exclusive access.
[Claude uses lock_workspace tool]
The workspace is now locked with the reason "Uploading new state version from local environment".
Now I'll create the new state version using your local state file. I've processed your terraform.tfstate file and extracted the necessary information:
- Serial number: 16 (incremented from current state)
- MD5 hash: d41d8cd98f00b204e9800998ecf8427e
- Lineage: 871d1b4a-e579-fb7c-ffdb-f0c858a647a7
[Claude uses create_state_version tool]
Great! I've successfully created the new state version with ID: sv-xyz789abc123. The state has been uploaded and is now being processed by Terraform Cloud.
Let me unlock the workspace now that the upload is complete.
[Claude uses unlock_workspace tool]
The workspace is now unlocked and your new state version is active. The state contains 52 resources (5 more than the previous version) and includes the changes from your local environment.
```
```
**User:** I want to see what resources are included in one of our state versions. Can I download a specific state version and examine it?
**Claude:** Absolutely! I can help you examine the contents of a specific state version. Let me get the details of a state version and show you what resources it contains.
Let me start by getting the details of state version sv-abc123def456:
[Claude uses get_state_version tool]
Great! This state version has been fully processed and contains detailed resource information. Here's what I found:
**Resources in state (47 total):**
- aws_vpc.main
- aws_subnet.public_1a
- aws_subnet.public_1b
- aws_security_group.web
- aws_security_group.database
- aws_instance.web_server_1
- aws_instance.web_server_2
- aws_db_instance.main
- aws_load_balancer.main
- aws_route53_record.main
- And 37 more resources...
**Modules used:**
- root module: 35 resources
- module.networking: 8 resources
- module.security: 4 resources
**Providers in state:**
- hashicorp/aws: 45 resources
- hashicorp/random: 2 resources
Now let me download the actual state file so you can examine it locally:
[Claude uses download_state_file tool]
I've downloaded the raw state file successfully. The state contains all the resource configurations and current values.
I can also download it in JSON format since this state was created with Terraform 1.6.2:
[Claude uses download_state_file tool with JSON format]
Perfect! I've downloaded both the raw state file and the JSON-formatted version. The JSON format is easier to parse and examine programmatically.
The state file has been saved locally as "downloaded_state.tfstate" for your inspection. You can now examine all the resource details, dependencies, and current values managed by Terraform.
```
## Summary
In this conversation, we demonstrated how to:
1. **List state versions** - View all state versions in a workspace with their creation dates and status
2. **Examine current state** - Get details about the active state version including resource counts and Terraform version
3. **Review state outputs** - Access output values from the state, with proper handling of sensitive values
4. **Create new state versions** - Upload state from local environments with proper workspace locking
5. **Download and analyze state** - Retrieve state files in both raw and JSON formats for detailed examination
These operations help in managing Terraform state, examining infrastructure outputs, and troubleshooting state-related issues without requiring technical implementation knowledge.
```
--------------------------------------------------------------------------------
/docs/models/apply.md:
--------------------------------------------------------------------------------
```markdown
# Apply Models
This document describes the data models used for apply operations in Terraform Cloud.
## Overview
Apply models provide structure and validation for interacting with the Terraform Cloud Apply API. These models define apply statuses, execution details, and request validation for retrieving apply information and logs.
## Models Reference
### ApplyStatus
**Type:** Enum (string)
**Description:** Represents the possible states an apply can be in during its lifecycle.
**Values:**
- `pending`: Apply has not yet started
- `queued`: Apply is queued for execution
- `running`: Apply is currently executing
- `errored`: Apply encountered an error
- `canceled`: Apply was canceled
- `finished`: Apply completed successfully
- `unreachable`: Apply is in an unreachable state
**Usage Context:**
Used to determine the current state of an apply and whether it has completed successfully.
```python
from terraform_cloud_mcp.models import ApplyStatus
# Check the status of an apply
if apply_response["data"]["attributes"]["status"] == ApplyStatus.FINISHED:
print("Apply completed successfully")
elif apply_response["data"]["attributes"]["status"] == ApplyStatus.ERRORED:
print("Apply encountered an error")
```
### ApplyExecutionDetails
**Type:** Object
**Description:** Contains information about how an apply was executed, particularly for agent-based execution.
**Fields:**
- `agent_id` (string, optional): ID of the agent that executed the apply
- `agent_name` (string, optional): Name of the agent that executed the apply
- `agent_pool_id` (string, optional): ID of the agent pool the executing agent belongs to
- `agent_pool_name` (string, optional): Name of the agent pool the executing agent belongs to
**JSON representation:**
```json
{
"execution-details": {
"agent-id": "agent-AbCdEfGhIjKlMnOp",
"agent-name": "production-agent-01",
"agent-pool-id": "apool-AbCdEfGhIjKlMnOp",
"agent-pool-name": "Production Agents"
}
}
```
**Notes:**
- Field names in JSON responses use kebab-case format (e.g., "agent-id")
- Field names in the model use snake_case format (e.g., agent_id)
### ApplyStatusTimestamps
**Type:** Object
**Description:** Captures timing information for various stages in an apply's lifecycle.
**Fields:**
- `queued_at` (string, optional): ISO8601 timestamp when the apply was queued
- `started_at` (string, optional): ISO8601 timestamp when apply execution started
- `finished_at` (string, optional): ISO8601 timestamp when apply execution completed
**JSON representation:**
```json
{
"status-timestamps": {
"queued-at": "2023-09-01T12:00:00Z",
"started-at": "2023-09-01T12:01:00Z",
"finished-at": "2023-09-01T12:05:00Z"
}
}
```
**Notes:**
- Field names in JSON responses use kebab-case format (e.g., "queued-at")
- Field names in the model use snake_case format (e.g., queued_at)
- All timestamp fields follow ISO8601 format
- Can be used to calculate apply duration and queue waiting time
### ApplyRequest
**Type:** Request Validation Model
**Description:** Used to validate apply ID parameters in API requests.
**Fields:**
- `apply_id` (string, required): The ID of the apply to retrieve
- Format: Must match pattern "apply-[a-zA-Z0-9]{16}"
- Example: "apply-AbCdEfGhIjKlMnOp"
**Validation Rules:**
- Apply ID must start with "apply-" prefix
- Must contain exactly 16 alphanumeric characters after the prefix
**Used by:**
- `get_apply_details` and `get_apply_logs` tool functions to validate the apply ID format before making API requests
### ApplyErroredStateRequest
**Type:** Request Validation Model
**Description:** Used to validate parameters for retrieving errored state information.
**Fields:**
- `apply_id` (string, required): The ID of the apply with a failed state upload
- Format: Must match pattern "apply-[a-zA-Z0-9]{16}"
- Example: "apply-AbCdEfGhIjKlMnOp"
**Validation Rules:**
- Apply ID must start with "apply-" prefix
- Must contain exactly 16 alphanumeric characters after the prefix
**Used by:**
- `get_errored_state` tool function to validate the apply ID format before making API requests
## API Response Structure
### Apply Details Response
```json
{
"data": {
"id": "apply-AbCdEfGhIjKlMnOp",
"type": "applies",
"attributes": {
"status": "finished",
"status-timestamps": {
"queued-at": "2023-09-01T12:00:00Z",
"started-at": "2023-09-01T12:01:00Z",
"finished-at": "2023-09-01T12:05:00Z"
},
"log-read-url": "https://archivist.terraform.io/v1/object/apply-AbCdEfGhIjKlMnOp",
"resource-additions": 3,
"resource-changes": 2,
"resource-destructions": 1,
"execution-details": {
"agent-id": "agent-AbCdEfGhIjKlMnOp",
"agent-name": "production-agent-01",
"agent-pool-id": "apool-AbCdEfGhIjKlMnOp",
"agent-pool-name": "Production Agents"
}
},
"relationships": {
"state-versions": {
"data": [
{
"id": "sv-AbCdEfGhIjKlMnOp",
"type": "state-versions"
}
]
}
}
}
}
```
### Apply Logs Response
```json
{
"content": "Terraform v1.4.6\nApplying changes...\nAWS instance: Creating...\nAWS instance: Creation complete\nApply complete! Resources: 1 added, 0 changed, 0 destroyed."
}
```
### Errored State Response
```json
{
"terraform_state": {
"version": 4,
"terraform_version": "1.4.6",
"serial": 15,
"lineage": "12345678-90ab-cdef-1234-567890abcdef",
"resources": [
{
"mode": "managed",
"type": "aws_instance",
"name": "example",
"provider": "provider[\"registry.terraform.io/hashicorp/aws\"]",
"instances": [...]
}
]
}
}
```
## Related Resources
- [Apply Tools](../tools/apply.md)
- [Run Models](run.md)
- [Terraform Cloud API - Applies](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/applies)
```
--------------------------------------------------------------------------------
/docs/conversations/organization-entitlements-conversation.md:
--------------------------------------------------------------------------------
```markdown
# Organization Entitlements - Example Conversation
This document shows example conversations with Claude using the Terraform Cloud MCP organization entitlements feature.
## Viewing Basic Organization Entitlements
```
**User**: Can you show me the entitlements for my organization "hashicorp"?
**Claude**: I'll retrieve the entitlements for your "hashicorp" organization.
[Claude uses get_organization_entitlements tool]
Here are the entitlements for your "hashicorp" organization:
**Features:**
- Operations: True
- Private Module Registry: True
- Sentinel Policies: False
- Teams: False
- Cost Estimation: False
- SSO: False
- State Storage: True
- VCS Integrations: True
- Self-Serve Billing: True
- Configuration Designer: True
**Limits:**
- User Limit: 5
- Policy Limit: 5
- Policy Set Limit: 1
- Run Task Limit: 1
- Run Task Workspace Limit: 10
These entitlements indicate you're on a basic tier plan with access to core features like state storage, operations, and private module registry, but without premium features like SSO, teams, or Sentinel policies.
```
## Understanding Plan Features and Upgrades
```
**User**: What features would I get if I upgraded my plan?
**Claude**: Based on your current entitlements, if you upgraded your plan (likely to a Team or Enterprise tier), you would gain access to several premium features:
1. **Teams functionality** - Create and manage teams with different permission levels
2. **SSO (Single Sign-On)** - Enterprise identity management integration
3. **Sentinel Policies** - Advanced policy as code enforcement
4. **Cost Estimation** - Get cost estimates for your infrastructure changes
5. **Run Tasks** - Custom integrations that run during the plan/apply cycle
6. **Audit Logging** - Detailed activity logs for compliance
7. **Higher Limits** - Increased limits for:
- Users (beyond your current 5-user limit)
- Policy sets
- Run tasks
You might also gain access to:
- Agents - Run Terraform in your own infrastructure
- Global Run Tasks - Run tasks that apply across multiple workspaces
- Module Tests Generation - Automated test creation for modules
The exact features would depend on the specific tier you upgrade to. Would you like me to help you determine which plan would be best for your needs?
```
## Analyzing Usage Against Entitlements
```
**User**: Can you tell me which features we're actually using in our current organization?
**Claude**: To determine which features you're actually using in your organization, I'll need to check various aspects of your Terraform Cloud setup.
[Claude uses get_organization_details tool]
[Claude uses list_workspaces tool]
[Claude uses list_runs_in_organization tool]
Based on my analysis of your "hashicorp" organization:
**Features You're Currently Using:**
- State Storage: You have 12 workspaces with state files
- Remote Operations: You've run 87 Terraform operations in the last month
- Private Module Registry: You have 3 modules registered
- VCS Integration: Connected to GitHub for 75% of your workspaces
**Features Available But Not Used:**
- Configuration Designer: Available but no evidence of usage
- Self-Serve Billing: Available but limited usage patterns
**Features Limited By Your Current Plan:**
- Teams: You can't create teams (5 individual users maximum)
- Sentinel Policies: Not available on your current plan
- Cost Estimation: Not available on your current plan
- SSO: Not available on your current plan
Based on your usage patterns, you might benefit most from upgrading if you need:
1. More than 5 users (teams functionality)
2. Policy enforcement for governance
3. Cost visibility for your infrastructure
Would you like more specific recommendations based on your organization's needs?
```
## Checking User Capacity Against Limits
```
**User**: Are we close to hitting our user limit?
**Claude**: Let me check your current user count against your limit.
[Claude uses get_organization_entitlements tool]
Based on your organization entitlements:
- **User Limit**: 5 users
- **Current Users**: 4 users
- **Remaining Capacity**: 1 user (80% of limit utilized)
You are approaching your user limit with 4 out of 5 available seats used. This means you only have 1 seat remaining before you hit your limit.
If you're planning to add more team members soon, you might want to consider upgrading to a Team or Enterprise tier plan which would provide:
1. Higher or unlimited user seats
2. Teams functionality to manage user permissions more effectively
3. More granular access control options
Would you like me to help you explore upgrade options to accommodate your growing team?
```
## Comparing Against Different Organizations
```
**User**: How do our entitlements compare to our "hashicorp-staging" organization?
**Claude**: I'll compare the entitlements between your "hashicorp" and "hashicorp-staging" organizations.
[Claude uses get_organization_entitlements tool for both organizations]
Here's a comparison of entitlements between your organizations:
| Feature | hashicorp (Production) | hashicorp-staging |
|---------|------------------------|-------------------|
| **Core Features** | | |
| Operations | ✓ | ✓ |
| State Storage | ✓ | ✓ |
| Private Module Registry | ✓ | ✓ |
| VCS Integration | ✓ | ✓ |
| **Premium Features** | | |
| Teams | ✗ | ✓ |
| Sentinel Policies | ✗ | ✓ |
| Cost Estimation | ✗ | ✓ |
| SSO | ✗ | ✗ |
| **Limits** | | |
| User Limit | 5 | 25 |
| Policy Limit | 5 | 15 |
| Run Task Limit | 1 | 5 |
Interesting findings:
1. Your staging organization is actually on a higher tier plan with more premium features
2. The staging organization has significantly higher user and policy limits
3. Both organizations lack SSO, which is typically only available on Enterprise plans
This suggests your staging environment might be used for testing advanced features before adopting them in production. Would you like me to recommend how to align these environments, or explain why certain features might be important to have in production?
```
```
--------------------------------------------------------------------------------
/terraform_cloud_mcp/configs/filter_configs.py:
--------------------------------------------------------------------------------
```python
"""Filter configurations for Terraform Cloud MCP."""
from ..models.filters import FilterConfig, ResourceType
# Audit-safe filter configurations - conservative filtering to preserve compliance data
FILTER_CONFIGS = {
ResourceType.WORKSPACE: FilterConfig(
always_remove={
# Only remove statistical aggregations - not audit-relevant
"apply-duration-average",
"plan-duration-average",
"policy-check-failures",
"run-failures",
},
list_remove={
# Remove only internal system fields - preserve all user/audit tracking
"workspace-kpis-runs-count",
"unarchived-workspace-change-requests-count",
},
essential_relationships={
"organization",
"project",
"current-run",
"current-state-version",
"current-configuration-version",
},
),
ResourceType.RUN: FilterConfig(
# Preserve permissions, actions, source for audit trails
always_remove=set(), # Don't filter anything critical
list_remove=set(), # Preserve all data for audit context
essential_relationships={
"workspace",
"plan",
"apply",
"cost-estimate",
},
),
ResourceType.ORGANIZATION: FilterConfig(
always_remove={
# Remove only internal system flags - preserve auth policy for audits
"fair-run-queuing-enabled",
"send-passing-statuses-for-untriggered-speculative-plans",
},
# Preserve created-at, trial-expires-at, permissions, saml-enabled, two-factor-conformant
list_remove=set(),
),
ResourceType.PROJECT: FilterConfig(
# Preserve created-at, updated-at for audit trails
list_remove=set(),
essential_relationships={
"organization",
},
),
ResourceType.VARIABLE: FilterConfig(
# Preserve version-id and created-at for audit/change tracking
always_remove=set(),
list_remove=set(),
),
ResourceType.PLAN: FilterConfig(
# Preserve permissions for audit context
always_remove=set(),
read_remove={
"resource-drift", # Detailed drift info can be filtered in reads
},
list_remove={
# Only filter detailed execution info in lists - preserve timing for audits
"execution-details",
},
essential_relationships={
"run",
"state-versions",
},
),
ResourceType.APPLY: FilterConfig(
# Preserve permissions and status-timestamps for audit trails
always_remove=set(),
list_remove={
# Only filter detailed execution info in lists
"execution-details",
},
essential_relationships={
"run",
"state-versions",
},
),
ResourceType.STATE_VERSION: FilterConfig(
always_remove={
# Only remove VCS-specific fields that don't impact audit
"vcs-commit-sha",
"vcs-commit-url",
},
list_remove={
# Remove only internal hosted URLs - can be derived when needed
"hosted-state-download-url",
"hosted-json-state-download-url",
"hosted-state-upload-url",
},
essential_relationships={
"workspace",
"run",
"outputs",
},
),
ResourceType.COST_ESTIMATE: FilterConfig(
# Preserve status-timestamps for audit timeline
always_remove=set(),
read_remove=set(), # Preserve error messages for audit context
list_remove={
# Only remove detailed resource counts in lists
"resources-count",
},
essential_relationships={
"run",
},
),
ResourceType.ASSESSMENT: FilterConfig(
# Preserve result-count, created-at, updated-at for audit tracking
always_remove=set(),
read_remove=set(), # Preserve log URLs for audit access
list_remove=set(),
essential_relationships={
"workspace",
},
),
ResourceType.ACCOUNT: FilterConfig(
always_remove={
# Remove only personal/UI fields - preserve auth info for audits
"password", # Never should be present anyway
"avatar-url", # UI-only field
},
# Preserve is-sudo, is-site-admin, auth-method, etc. for security audits
list_remove=set(),
),
}
# Path patterns for resource type detection (order matters for specificity)
PATH_PATTERNS = [
("state-versions", ResourceType.STATE_VERSION),
("state-version-outputs", ResourceType.STATE_VERSION),
("assessment-results", ResourceType.ASSESSMENT),
("cost-estimates", ResourceType.COST_ESTIMATE),
("workspaces", ResourceType.WORKSPACE),
("runs", ResourceType.RUN),
("organizations", ResourceType.ORGANIZATION),
("projects", ResourceType.PROJECT),
("plans", ResourceType.PLAN),
("applies", ResourceType.APPLY),
("vars", ResourceType.VARIABLE),
("variables", ResourceType.VARIABLE),
("account/details", ResourceType.ACCOUNT),
("users", ResourceType.ACCOUNT),
]
# Data type mapping for response-based detection
DATA_TYPE_MAP = {
"projects": ResourceType.PROJECT,
"vars": ResourceType.VARIABLE,
"runs": ResourceType.RUN,
}
# Resource type mapping for consistent filtering
RESOURCE_TYPE_MAP = {
"workspace": ResourceType.WORKSPACE,
"workspaces": ResourceType.WORKSPACE,
"run": ResourceType.RUN,
"runs": ResourceType.RUN,
"organization": ResourceType.ORGANIZATION,
"organizations": ResourceType.ORGANIZATION,
"project": ResourceType.PROJECT,
"projects": ResourceType.PROJECT,
"var": ResourceType.VARIABLE,
"vars": ResourceType.VARIABLE,
"plan": ResourceType.PLAN,
"plans": ResourceType.PLAN,
"apply": ResourceType.APPLY,
"applies": ResourceType.APPLY,
"state-version": ResourceType.STATE_VERSION,
"state-versions": ResourceType.STATE_VERSION,
"cost-estimate": ResourceType.COST_ESTIMATE,
"cost-estimates": ResourceType.COST_ESTIMATE,
"assessment-result": ResourceType.ASSESSMENT,
"assessment-results": ResourceType.ASSESSMENT,
"user": ResourceType.ACCOUNT,
"users": ResourceType.ACCOUNT,
}
```