#
tokens: 48947/50000 50/59 files (page 1/3)
lines: off (toggle) GitHub
raw markdown copy
This is page 1 of 3. Use http://codebase.md/aashari/mcp-server-atlassian-bitbucket?lines=false&page={x} to view the full context.

# Directory Structure

```
├── .env.example
├── .github
│   ├── dependabot.yml
│   └── workflows
│       ├── ci-dependabot-auto-merge.yml
│       ├── ci-dependency-check.yml
│       └── ci-semantic-release.yml
├── .gitignore
├── .gitkeep
├── .npmignore
├── .npmrc
├── .prettierrc
├── .releaserc.json
├── .trigger-ci
├── CHANGELOG.md
├── eslint.config.mjs
├── package-lock.json
├── package.json
├── README.md
├── scripts
│   ├── ensure-executable.js
│   ├── package.json
│   └── update-version.js
├── src
│   ├── cli
│   │   ├── atlassian.api.cli.ts
│   │   ├── atlassian.repositories.cli.ts
│   │   └── index.ts
│   ├── controllers
│   │   ├── atlassian.api.controller.ts
│   │   └── atlassian.repositories.content.controller.ts
│   ├── index.ts
│   ├── services
│   │   ├── vendor.atlassian.repositories.service.test.ts
│   │   ├── vendor.atlassian.repositories.service.ts
│   │   ├── vendor.atlassian.repositories.types.ts
│   │   ├── vendor.atlassian.workspaces.service.ts
│   │   ├── vendor.atlassian.workspaces.test.ts
│   │   └── vendor.atlassian.workspaces.types.ts
│   ├── tools
│   │   ├── atlassian.api.tool.ts
│   │   ├── atlassian.api.types.ts
│   │   ├── atlassian.repositories.tool.ts
│   │   └── atlassian.repositories.types.ts
│   ├── types
│   │   └── common.types.ts
│   └── utils
│       ├── bitbucket-error-detection.test.ts
│       ├── cli.test.util.ts
│       ├── config.util.test.ts
│       ├── config.util.ts
│       ├── constants.util.ts
│       ├── error-handler.util.test.ts
│       ├── error-handler.util.ts
│       ├── error.util.test.ts
│       ├── error.util.ts
│       ├── formatter.util.ts
│       ├── jest.setup.ts
│       ├── jq.util.ts
│       ├── logger.util.ts
│       ├── pagination.util.ts
│       ├── response.util.ts
│       ├── shell.util.ts
│       ├── toon.util.test.ts
│       ├── toon.util.ts
│       ├── transport.util.test.ts
│       ├── transport.util.ts
│       └── workspace.util.ts
├── STYLE_GUIDE.md
└── tsconfig.json
```

# Files

--------------------------------------------------------------------------------
/.gitkeep:
--------------------------------------------------------------------------------

```

```

--------------------------------------------------------------------------------
/.trigger-ci:
--------------------------------------------------------------------------------

```
# CI/CD trigger Thu Sep 18 00:41:08 WIB 2025

```

--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------

```
{
  "singleQuote": true,
  "semi": true,
  "useTabs": true,
  "tabWidth": 4,
  "printWidth": 80,
  "trailingComma": "all"
} 
```

--------------------------------------------------------------------------------
/.npmrc:
--------------------------------------------------------------------------------

```
# This file is for local development only
# The CI/CD workflow will create its own .npmrc files

# For npm registry
registry=https://registry.npmjs.org/

# GitHub Packages configuration removed

```

--------------------------------------------------------------------------------
/.npmignore:
--------------------------------------------------------------------------------

```
# Source code
src/
*.ts
!*.d.ts

# Tests
*.test.ts
*.test.js
__tests__/
coverage/
jest.config.js

# Development files
.github/
.git/
.gitignore
.eslintrc
.eslintrc.js
.eslintignore
.prettierrc
.prettierrc.js
tsconfig.json
*.tsbuildinfo

# Editor directories
.idea/
.vscode/
*.swp
*.swo

# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*

# CI/CD
.travis.yml

# Runtime data
.env
.env.* 
```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
# Dependency directories
node_modules/
.npm

# TypeScript output
dist/
build/
*.tsbuildinfo

# Coverage directories
coverage/
.nyc_output/

# Environment variables
.env
.env.local
.env.*.local

# Log files
logs/
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*

# IDE files
.idea/
.vscode/
*.sublime-project
*.sublime-workspace
.project
.classpath
.settings/
.DS_Store

# Temp directories
.tmp/
temp/

# Backup files
*.bak

# Editor directories and files
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?

# macOS
.DS_Store

# Misc
.yarn-integrity

```

--------------------------------------------------------------------------------
/.releaserc.json:
--------------------------------------------------------------------------------

```json
{
	"branches": ["main"],
	"plugins": [
		"@semantic-release/commit-analyzer",
		"@semantic-release/release-notes-generator",
		"@semantic-release/changelog",
		[
			"@semantic-release/exec",
			{
				"prepareCmd": "node scripts/update-version.js ${nextRelease.version} && npm run build && chmod +x dist/index.js"
			}
		],
		[
			"@semantic-release/npm",
			{
				"npmPublish": true,
				"pkgRoot": "."
			}
		],
		[
			"@semantic-release/git",
			{
				"assets": [
					"package.json",
					"CHANGELOG.md",
					"src/index.ts",
					"src/cli/index.ts"
				],
				"message": "chore(release): ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}"
			}
		],
		"@semantic-release/github"
	]
}

```

--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------

```
# Enable debug logging (optional)
DEBUG=false

# ============================================================================
# AUTHENTICATION - Choose ONE method below
# ============================================================================

# Method 1: Scoped API Token (RECOMMENDED - future-proof)
# Generate at: https://id.atlassian.com/manage-profile/security/api-tokens
# Token should start with "ATATT"
# Required scopes: repository, workspace, pullrequest (for PR management)
[email protected]
ATLASSIAN_API_TOKEN=your_scoped_api_token_starting_with_ATATT

# Method 2: Bitbucket App Password (LEGACY - deprecated June 2026)
# Generate at: https://bitbucket.org/account/settings/app-passwords/
# Required permissions: Workspaces (Read), Repositories (Read/Write), Pull Requests (Read/Write)
# ATLASSIAN_BITBUCKET_USERNAME=your-bitbucket-username
# ATLASSIAN_BITBUCKET_APP_PASSWORD=your-app-password

# ============================================================================
# OPTIONAL CONFIGURATION
# ============================================================================

# Default workspace for commands (optional - uses first workspace if not set)
# BITBUCKET_DEFAULT_WORKSPACE=your-main-workspace-slug

# Note: ATLASSIAN_SITE_NAME is NOT needed for Bitbucket Cloud
# Only use it if you're configuring Jira/Confluence alongside Bitbucket

```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
# Connect AI to Your Bitbucket Repositories

Transform how you work with Bitbucket by connecting Claude, Cursor AI, and other AI assistants directly to your repositories, pull requests, and code. Get instant insights, automate code reviews, and streamline your development workflow.

[![NPM Version](https://img.shields.io/npm/v/@aashari/mcp-server-atlassian-bitbucket)](https://www.npmjs.com/package/@aashari/mcp-server-atlassian-bitbucket)
[![License](https://img.shields.io/npm/l/@aashari/mcp-server-atlassian-bitbucket)](https://github.com/aashari/mcp-server-atlassian-bitbucket/blob/main/LICENSE)

**Current Version:** 2.2.0

## What You Can Do

- **Ask AI about your code**: "What's the latest commit in my main repository?"
- **Get PR insights**: "Show me all open pull requests that need review"
- **Search your codebase**: "Find all JavaScript files that use the authentication function"
- **Review code changes**: "Compare the differences between my feature branch and main"
- **Manage pull requests**: "Create a PR for my new-feature branch"
- **Automate workflows**: "Add a comment to PR #123 with the test results"

## Perfect For

- **Developers** who want AI assistance with code reviews and repository management
- **Team Leads** needing quick insights into project status and pull request activity
- **DevOps Engineers** automating repository workflows and branch management
- **Anyone** who wants to interact with Bitbucket using natural language

## Requirements

- **Node.js** 18.0.0 or higher
- **Bitbucket Cloud** account (not Bitbucket Server/Data Center)
- **Authentication credentials**: Scoped API Token (recommended) or App Password (legacy)

## Quick Start

Get up and running in 2 minutes:

### 1. Get Your Bitbucket Credentials

> **IMPORTANT**: Bitbucket App Passwords are being deprecated and will be removed by **June 2026**. We recommend using **Scoped API Tokens** for new setups.

#### Option A: Scoped API Token (Recommended - Future-Proof)

**Bitbucket is deprecating app passwords**. Use the new scoped API tokens instead:

1. Go to [Atlassian API Tokens](https://id.atlassian.com/manage-profile/security/api-tokens)
2. Click **"Create API token with scopes"**
3. Select **"Bitbucket"** as the product
4. Choose the appropriate scopes:
   - **For read-only access**: `repository`, `workspace`
   - **For full functionality**: `repository`, `workspace`, `pullrequest`
5. Copy the generated token (starts with `ATATT`)
6. Use with your Atlassian email as the username

#### Option B: App Password (Legacy - Will be deprecated)

Generate a Bitbucket App Password (legacy method):
1. Go to [Bitbucket App Passwords](https://bitbucket.org/account/settings/app-passwords/)
2. Click "Create app password"
3. Give it a name like "AI Assistant"
4. Select these permissions:
   - **Workspaces**: Read
   - **Repositories**: Read (and Write if you want AI to create PRs/comments)
   - **Pull Requests**: Read (and Write for PR management)

### 2. Try It Instantly

```bash
# Set your credentials (choose one method)

# Method 1: Scoped API Token (recommended - future-proof)
export ATLASSIAN_USER_EMAIL="[email protected]"
export ATLASSIAN_API_TOKEN="your_scoped_api_token"  # Token starting with ATATT

# OR Method 2: Legacy App Password (will be deprecated June 2026)
export ATLASSIAN_BITBUCKET_USERNAME="your_username"
export ATLASSIAN_BITBUCKET_APP_PASSWORD="your_app_password"

# List your workspaces
npx -y @aashari/mcp-server-atlassian-bitbucket get --path "/workspaces"

# List repositories in a workspace
npx -y @aashari/mcp-server-atlassian-bitbucket get --path "/repositories/your-workspace"

# Get pull requests for a repository
npx -y @aashari/mcp-server-atlassian-bitbucket get --path "/repositories/your-workspace/your-repo/pullrequests"

# Get repository details with JMESPath filtering
npx -y @aashari/mcp-server-atlassian-bitbucket get --path "/repositories/your-workspace/your-repo" --jq "{name: name, language: language}"
```

## Connect to AI Assistants

### For Claude Desktop Users

Add this to your Claude configuration file (`~/.claude/claude_desktop_config.json`):

**Option 1: Scoped API Token (recommended - future-proof)**
```json
{
  "mcpServers": {
    "bitbucket": {
      "command": "npx",
      "args": ["-y", "@aashari/mcp-server-atlassian-bitbucket"],
      "env": {
        "ATLASSIAN_USER_EMAIL": "[email protected]",
        "ATLASSIAN_API_TOKEN": "your_scoped_api_token"
      }
    }
  }
}
```

**Option 2: Legacy App Password (will be deprecated June 2026)**
```json
{
  "mcpServers": {
    "bitbucket": {
      "command": "npx",
      "args": ["-y", "@aashari/mcp-server-atlassian-bitbucket"],
      "env": {
        "ATLASSIAN_BITBUCKET_USERNAME": "your_username",
        "ATLASSIAN_BITBUCKET_APP_PASSWORD": "your_app_password"
      }
    }
  }
}
```

Restart Claude Desktop, and you'll see the bitbucket server in the status bar.

### For Other AI Assistants

Most AI assistants support MCP. You can either:

**Option 1: Use npx (recommended - always latest version):**
Configure your AI assistant to run: `npx -y @aashari/mcp-server-atlassian-bitbucket`

**Option 2: Install globally:**
```bash
npm install -g @aashari/mcp-server-atlassian-bitbucket
```

Then configure your AI assistant to use the MCP server with STDIO transport.

**Supported AI assistants:**
- Claude Desktop (official support)
- Cursor AI
- Continue.dev
- Cline
- Any MCP-compatible client

### Alternative: Configuration File

Create `~/.mcp/configs.json` for system-wide configuration:

**Option 1: Scoped API Token (recommended - future-proof)**
```json
{
  "bitbucket": {
    "environments": {
      "ATLASSIAN_USER_EMAIL": "[email protected]",
      "ATLASSIAN_API_TOKEN": "your_scoped_api_token",
      "BITBUCKET_DEFAULT_WORKSPACE": "your_main_workspace"
    }
  }
}
```

**Option 2: Legacy App Password (will be deprecated June 2026)**
```json
{
  "bitbucket": {
    "environments": {
      "ATLASSIAN_BITBUCKET_USERNAME": "your_username",
      "ATLASSIAN_BITBUCKET_APP_PASSWORD": "your_app_password",
      "BITBUCKET_DEFAULT_WORKSPACE": "your_main_workspace"
    }
  }
}
```

**Alternative config keys:** The system also accepts `"atlassian-bitbucket"`, `"@aashari/mcp-server-atlassian-bitbucket"`, or `"mcp-server-atlassian-bitbucket"` instead of `"bitbucket"`.

## Available Tools

This MCP server provides 6 generic tools that can access any Bitbucket API endpoint:

| Tool | Description | Parameters |
|------|-------------|------------|
| `bb_get` | GET any Bitbucket API endpoint (read data) | `path`, `queryParams?`, `jq?`, `outputFormat?` |
| `bb_post` | POST to any endpoint (create resources) | `path`, `body`, `queryParams?`, `jq?`, `outputFormat?` |
| `bb_put` | PUT to any endpoint (replace resources) | `path`, `body`, `queryParams?`, `jq?`, `outputFormat?` |
| `bb_patch` | PATCH any endpoint (partial updates) | `path`, `body`, `queryParams?`, `jq?`, `outputFormat?` |
| `bb_delete` | DELETE any endpoint (remove resources) | `path`, `queryParams?`, `jq?`, `outputFormat?` |
| `bb_clone` | Clone a repository locally | `workspaceSlug?`, `repoSlug`, `targetPath` |

### Tool Parameters

All API tools support these common parameters:

- **`path`** (required): API endpoint path starting with `/` (the `/2.0` prefix is added automatically)
- **`queryParams`** (optional): Key-value pairs for query parameters (e.g., `{"pagelen": "25", "page": "2"}`)
- **`jq`** (optional): JMESPath expression to filter/transform the response - **highly recommended** to reduce token costs
- **`outputFormat`** (optional): `"toon"` (default, 30-60% fewer tokens) or `"json"`
- **`body`** (required for POST/PUT/PATCH): Request body as JSON object

### Common API Paths

All paths automatically have `/2.0` prepended. Full Bitbucket Cloud REST API 2.0 reference: https://developer.atlassian.com/cloud/bitbucket/rest/

**Workspaces & Repositories:**
- `/workspaces` - List all workspaces
- `/repositories/{workspace}` - List repos in workspace
- `/repositories/{workspace}/{repo}` - Get repo details
- `/repositories/{workspace}/{repo}/refs/branches` - List branches
- `/repositories/{workspace}/{repo}/refs/branches/{branch_name}` - Get/delete branch
- `/repositories/{workspace}/{repo}/commits` - List commits
- `/repositories/{workspace}/{repo}/commits/{commit}` - Get commit details
- `/repositories/{workspace}/{repo}/src/{commit}/{filepath}` - Get file content

**Pull Requests:**
- `/repositories/{workspace}/{repo}/pullrequests` - List PRs (GET) or create PR (POST)
- `/repositories/{workspace}/{repo}/pullrequests/{id}` - Get/update/delete PR
- `/repositories/{workspace}/{repo}/pullrequests/{id}/diff` - Get PR diff
- `/repositories/{workspace}/{repo}/pullrequests/{id}/comments` - List/add PR comments
- `/repositories/{workspace}/{repo}/pullrequests/{id}/approve` - Approve PR (POST) or remove approval (DELETE)
- `/repositories/{workspace}/{repo}/pullrequests/{id}/request-changes` - Request changes (POST)
- `/repositories/{workspace}/{repo}/pullrequests/{id}/merge` - Merge PR (POST)
- `/repositories/{workspace}/{repo}/pullrequests/{id}/decline` - Decline PR (POST)

**Comparisons:**
- `/repositories/{workspace}/{repo}/diff/{source}..{destination}` - Compare branches/commits

**Other Resources:**
- `/repositories/{workspace}/{repo}/issues` - List/manage issues
- `/repositories/{workspace}/{repo}/downloads` - List/manage downloads
- `/repositories/{workspace}/{repo}/pipelines` - Access Bitbucket Pipelines
- `/repositories/{workspace}/{repo}/deployments` - View deployments

### TOON Output Format

**What is TOON?** Token-Oriented Object Notation is a format optimized for LLMs that reduces token consumption by 30-60% compared to JSON. It uses tabular arrays and minimal syntax while preserving all data.

**Default behavior:** All tools return TOON format by default. You can override this with `outputFormat: "json"` if needed.

**Example comparison:**
```
JSON (verbose):
{
  "values": [
    {"name": "repo1", "slug": "repo-1"},
    {"name": "repo2", "slug": "repo-2"}
  ]
}

TOON (efficient):
values:
  name  | slug
  repo1 | repo-1
  repo2 | repo-2
```

Learn more: https://github.com/toon-format/toon

### JMESPath Filtering

All tools support optional JMESPath (`jq`) filtering to extract specific data and reduce token costs further:

**Important:** Always use `jq` to filter responses! Unfiltered API responses can be very large and expensive in terms of tokens.

```bash
# Get just repository names
npx -y @aashari/mcp-server-atlassian-bitbucket get \
  --path "/repositories/myworkspace" \
  --jq "values[].name"

# Get PR titles and states (custom object shape)
npx -y @aashari/mcp-server-atlassian-bitbucket get \
  --path "/repositories/myworkspace/myrepo/pullrequests" \
  --jq "values[].{title: title, state: state, author: author.display_name}"

# Get first result only
npx -y @aashari/mcp-server-atlassian-bitbucket get \
  --path "/repositories/myworkspace" \
  --jq "values[0]"

# Explore schema with one item first, then filter
npx -y @aashari/mcp-server-atlassian-bitbucket get \
  --path "/workspaces" \
  --query-params '{"pagelen": "1"}'
```

**Common JMESPath patterns:**
- `values[*].fieldName` - Extract single field from all items
- `values[*].{key1: field1, key2: field2}` - Create custom object shape
- `values[0]` - Get first item only
- `values[:5]` - Get first 5 items
- `values[?state=='OPEN']` - Filter by condition

Full JMESPath reference: https://jmespath.org

## Real-World Examples

### Explore Your Repositories

Ask your AI assistant:
- *"List all repositories in my main workspace"*
- *"Show me details about the backend-api repository"*
- *"What's the commit history for the feature-auth branch?"*
- *"Get the content of src/config.js from the main branch"*

### Manage Pull Requests

Ask your AI assistant:
- *"Show me all open pull requests that need review"*
- *"Get details about pull request #42 including the code changes"*
- *"Create a pull request from feature-login to main branch"*
- *"Add a comment to PR #15 saying the tests passed"*
- *"Approve pull request #33"*

### Work with Branches and Code

Ask your AI assistant:
- *"Compare my feature branch with the main branch"*
- *"List all branches in the user-service repository"*
- *"Show me the differences between commits abc123 and def456"*

## Advanced Usage

### Cost Optimization Tips

1. **Always use JMESPath filtering** - Extract only needed fields to minimize token usage
2. **Use pagination wisely** - Set `pagelen` query parameter to limit results (e.g., `{"pagelen": "10"}`)
3. **Explore schema first** - Fetch one item without filters to see available fields, then filter subsequent calls
4. **Leverage TOON format** - Default TOON format saves 30-60% tokens vs JSON
5. **Query parameters for filtering** - Use Bitbucket's `q` parameter for server-side filtering before results are returned

### Query Parameter Examples

```bash
# Filter PRs by state
npx -y @aashari/mcp-server-atlassian-bitbucket get \
  --path "/repositories/workspace/repo/pullrequests" \
  --query-params '{"state": "OPEN", "pagelen": "5"}' \
  --jq "values[*].{id: id, title: title}"

# Search PRs by title
npx -y @aashari/mcp-server-atlassian-bitbucket get \
  --path "/repositories/workspace/repo/pullrequests" \
  --query-params '{"q": "title~\"bug\""}' \
  --jq "values[*].{id: id, title: title}"

# Filter repositories by role
npx -y @aashari/mcp-server-atlassian-bitbucket get \
  --path "/repositories/workspace" \
  --query-params '{"role": "owner", "pagelen": "10"}'

# Sort by updated date
npx -y @aashari/mcp-server-atlassian-bitbucket get \
  --path "/repositories/workspace/repo/pullrequests" \
  --query-params '{"sort": "-updated_on"}' \
  --jq "values[*].{id: id, title: title, updated: updated_on}"
```

### Working with Large Responses

When dealing with APIs that return large payloads:

1. **Use sparse fieldsets** - Add `fields` query parameter: `{"fields": "values.name,values.slug"}`
2. **Paginate results** - Use `pagelen` and `page` parameters
3. **Filter at the source** - Use Bitbucket's `q` parameter for server-side filtering
4. **Post-process with JQ** - Further filter the response with JMESPath

Example combining all techniques:
```bash
npx -y @aashari/mcp-server-atlassian-bitbucket get \
  --path "/repositories/workspace/repo/pullrequests" \
  --query-params '{"state": "OPEN", "pagelen": "10", "fields": "values.id,values.title,values.state"}' \
  --jq "values[*].{id: id, title: title}"
```

### Best Practices for AI Interactions

1. **Be specific with paths** - Use exact workspace/repo slugs (case-sensitive)
2. **Test with CLI first** - Verify paths and authentication before using in AI context
3. **Use descriptive JQ filters** - Extract meaningful field names for better AI understanding
4. **Enable DEBUG for troubleshooting** - See exactly what's being sent to Bitbucket API
5. **Check API limits** - Bitbucket Cloud has rate limits; use filtering to reduce calls

## CLI Commands

The CLI mirrors the MCP tools for direct terminal access. All commands return JSON output (not TOON - TOON is only for MCP mode).

### Available Commands

```bash
# Get help
npx -y @aashari/mcp-server-atlassian-bitbucket --help

# GET request
npx -y @aashari/mcp-server-atlassian-bitbucket get \
  --path "/workspaces" \
  --jq "values[*].{name: name, slug: slug}"

# GET with query parameters
npx -y @aashari/mcp-server-atlassian-bitbucket get \
  --path "/repositories/myworkspace/myrepo/pullrequests" \
  --query-params '{"state": "OPEN", "pagelen": "10"}' \
  --jq "values[*].{id: id, title: title}"

# POST request (create a PR)
npx -y @aashari/mcp-server-atlassian-bitbucket post \
  --path "/repositories/myworkspace/myrepo/pullrequests" \
  --body '{"title": "My PR", "source": {"branch": {"name": "feature"}}, "destination": {"branch": {"name": "main"}}}' \
  --jq "{id: id, title: title}"

# POST with query parameters
npx -y @aashari/mcp-server-atlassian-bitbucket post \
  --path "/repositories/myworkspace/myrepo/pullrequests/42/comments" \
  --body '{"content": {"raw": "Looks good!"}}' \
  --query-params '{"fields": "id,content"}' \
  --jq "{id: id, content: content.raw}"

# PUT request (replace resource)
npx -y @aashari/mcp-server-atlassian-bitbucket put \
  --path "/repositories/myworkspace/myrepo" \
  --body '{"description": "Updated description", "is_private": true}'

# PATCH request (partial update)
npx -y @aashari/mcp-server-atlassian-bitbucket patch \
  --path "/repositories/myworkspace/myrepo/pullrequests/123" \
  --body '{"title": "Updated PR title"}'

# DELETE request
npx -y @aashari/mcp-server-atlassian-bitbucket delete \
  --path "/repositories/myworkspace/myrepo/refs/branches/old-branch"

# Clone repository
npx -y @aashari/mcp-server-atlassian-bitbucket clone \
  --workspace-slug myworkspace \
  --repo-slug myrepo \
  --target-path /absolute/path/to/parent/directory
```

### CLI Options

**For `get` and `delete` commands:**
- `-p, --path <path>` (required) - API endpoint path
- `-q, --query-params <json>` (optional) - Query parameters as JSON string
- `--jq <expression>` (optional) - JMESPath filter expression

**For `post`, `put`, and `patch` commands:**
- `-p, --path <path>` (required) - API endpoint path
- `-b, --body <json>` (required) - Request body as JSON string
- `-q, --query-params <json>` (optional) - Query parameters as JSON string
- `--jq <expression>` (optional) - JMESPath filter expression

**For `clone` command:**
- `--workspace-slug <slug>` (optional) - Workspace slug (uses default if not provided)
- `--repo-slug <slug>` (required) - Repository slug
- `--target-path <path>` (required) - Absolute path to parent directory where repo will be cloned

## Debugging

### Enable Debug Mode

Set the `DEBUG` environment variable to see detailed logging:

```bash
# For CLI testing
DEBUG=true npx -y @aashari/mcp-server-atlassian-bitbucket get --path "/workspaces"

# For Claude Desktop - add to config
{
  "mcpServers": {
    "bitbucket": {
      "command": "npx",
      "args": ["-y", "@aashari/mcp-server-atlassian-bitbucket"],
      "env": {
        "DEBUG": "true",
        "ATLASSIAN_USER_EMAIL": "...",
        "ATLASSIAN_API_TOKEN": "..."
      }
    }
  }
}
```

**Log files:** When running in MCP mode, logs are written to `~/.mcp/data/@aashari-mcp-server-atlassian-bitbucket.[session-id].log`

### Test with HTTP Mode

For interactive debugging, run the server in HTTP mode and use the MCP Inspector:

```bash
# Set credentials first
export ATLASSIAN_USER_EMAIL="[email protected]"
export ATLASSIAN_API_TOKEN="your_token"
export DEBUG=true

# Start HTTP server with MCP Inspector
npx -y @aashari/mcp-server-atlassian-bitbucket
# Then in another terminal:
PORT=3000 npm run mcp:inspect
```

This opens a visual interface to test tools and see request/response data.

### Common Issues

**Server not appearing in Claude Desktop:**
1. Check config file syntax (valid JSON)
2. Restart Claude Desktop completely
3. Check Claude Desktop logs: `~/Library/Logs/Claude/mcp*.log` (macOS)

**Tools not working:**
1. Enable DEBUG mode to see detailed errors
2. Test with CLI first to isolate MCP vs credentials issues
3. Verify API paths are correct (case-sensitive)

## Troubleshooting

### "Authentication failed" or "403 Forbidden"

1. **Choose the right authentication method**:
   - **Standard Atlassian method** (recommended): Use your Atlassian account email + API token (works with any Atlassian service)
   - **Bitbucket-specific method** (legacy): Use your Bitbucket username + App password (Bitbucket only)

2. **For Scoped API Tokens** (recommended):
   - Go to [Atlassian API Tokens](https://id.atlassian.com/manage-profile/security/api-tokens)
   - Make sure your token is still active and has the right scopes
   - Required scopes: `repository`, `workspace` (add `pullrequest` for PR management)
   - Token should start with `ATATT`

3. **For Bitbucket App Passwords** (legacy):
   - Go to [Bitbucket App Passwords](https://bitbucket.org/account/settings/app-passwords/)
   - Make sure your app password has the right permissions
   - Remember: App passwords will be deprecated by June 2026

4. **Verify your credentials**:
   ```bash
   # Test credentials with CLI
   export ATLASSIAN_USER_EMAIL="[email protected]"
   export ATLASSIAN_API_TOKEN="your_token"
   npx -y @aashari/mcp-server-atlassian-bitbucket get --path "/workspaces"
   ```

5. **Environment variable naming**:
   - Use `ATLASSIAN_USER_EMAIL` + `ATLASSIAN_API_TOKEN` for scoped tokens
   - Use `ATLASSIAN_BITBUCKET_USERNAME` + `ATLASSIAN_BITBUCKET_APP_PASSWORD` for app passwords
   - Don't use `ATLASSIAN_SITE_NAME` - it's not needed for Bitbucket Cloud

### "Resource not found" or "404"

1. **Check the API path**:
   - Paths are case-sensitive
   - Use workspace slug (from URL), not display name
   - Example: If your repo URL is `https://bitbucket.org/myteam/my-repo`, use `myteam` and `my-repo`

2. **Verify the resource exists**:
   ```bash
   # List workspaces to find the correct slug
   npx -y @aashari/mcp-server-atlassian-bitbucket get --path "/workspaces"
   ```

### Claude Desktop Integration Issues

1. **Restart Claude Desktop** after updating the config file
2. **Verify config file location**:
   - macOS: `~/.claude/claude_desktop_config.json`
   - Windows: `%APPDATA%\Claude\claude_desktop_config.json`

### Getting Help

If you're still having issues:
1. Run a simple test command to verify everything works
2. Check the [GitHub Issues](https://github.com/aashari/mcp-server-atlassian-bitbucket/issues) for similar problems
3. Create a new issue with your error message and setup details

## Frequently Asked Questions

### What permissions do I need?

**For Scoped API Tokens** (recommended):
- Required scopes: `repository`, `workspace`
- Add `pullrequest` for PR management

**For Bitbucket App Passwords** (legacy):
- For **read-only access**: Workspaces: Read, Repositories: Read, Pull Requests: Read
- For **full functionality**: Add "Write" permissions for Repositories and Pull Requests

### Can I use this with private repositories?

Yes! This works with both public and private repositories. You just need the appropriate permissions through your credentials.

### What AI assistants does this work with?

Any AI assistant that supports the Model Context Protocol (MCP):
- Claude Desktop
- Cursor AI
- Continue.dev
- Many others

### Is my data secure?

Yes! This tool:
- Runs entirely on your local machine
- Uses your own Bitbucket credentials
- Never sends your data to third parties
- Only accesses what you give it permission to access

## What's New

### Version 2.2.0 (December 2024)
- Modernized to MCP SDK v1.23.0 with `registerTool` API
- Added raw response logging with truncation for large API responses
- Improved debugging capabilities

### Version 2.1.0 (November 2024)
- **TOON output format** - 30-60% fewer tokens than JSON
- Token-efficient responses by default with JSON fallback option
- Significant cost reduction for LLM interactions

### Version 2.0.0 (November 2024) - Breaking Changes
- Replaced 20+ specific tools with 6 generic HTTP method tools
- Simplified architecture: ~14,000 fewer lines of code
- Future-proof: new API endpoints work without code changes
- Added optional JMESPath filtering for all responses

## Migration from v1.x

Version 2.0 represents a major architectural change. If you're upgrading from v1.x:

**Before (v1.x) - 20+ specific tools:**
```
bb_ls_workspaces, bb_get_workspace, bb_ls_repos, bb_get_repo,
bb_list_branches, bb_add_branch, bb_get_commit_history, bb_get_file,
bb_ls_prs, bb_get_pr, bb_add_pr, bb_update_pr, bb_approve_pr, bb_reject_pr,
bb_ls_pr_comments, bb_add_pr_comment, bb_diff_branches, bb_diff_commits, bb_search
```

**After (v2.0+) - 6 generic tools:**
```
bb_get, bb_post, bb_put, bb_patch, bb_delete, bb_clone
```

### Migration Examples

| v1.x Tool | v2.0+ Equivalent |
|-----------|------------------|
| `bb_ls_workspaces()` | `bb_get(path: "/workspaces")` |
| `bb_ls_repos(workspace: "myteam")` | `bb_get(path: "/repositories/myteam")` |
| `bb_get_repo(workspace: "myteam", repo: "myrepo")` | `bb_get(path: "/repositories/myteam/myrepo")` |
| `bb_list_branches(workspace: "myteam", repo: "myrepo")` | `bb_get(path: "/repositories/myteam/myrepo/refs/branches")` |
| `bb_add_branch(...)` | `bb_post(path: "/repositories/.../refs/branches", body: {...})` |
| `bb_ls_prs(workspace: "myteam", repo: "myrepo")` | `bb_get(path: "/repositories/myteam/myrepo/pullrequests")` |
| `bb_get_pr(workspace: "myteam", repo: "myrepo", id: 42)` | `bb_get(path: "/repositories/myteam/myrepo/pullrequests/42")` |
| `bb_add_pr(...)` | `bb_post(path: "/repositories/.../pullrequests", body: {...})` |
| `bb_update_pr(...)` | `bb_patch(path: "/repositories/.../pullrequests/42", body: {...})` |
| `bb_approve_pr(workspace: "myteam", repo: "myrepo", id: 42)` | `bb_post(path: "/repositories/myteam/myrepo/pullrequests/42/approve", body: {})` |
| `bb_diff_branches(...)` | `bb_get(path: "/repositories/.../diff/branch1..branch2")` |

### Key Changes
1. **All tools now require explicit paths** - more verbose but more flexible
2. **Use JMESPath filtering** - extract only what you need to reduce tokens
3. **TOON format by default** - 30-60% fewer tokens (can override with `outputFormat: "json"`)
4. **Direct Bitbucket API access** - any API endpoint works, no code changes needed for new features

## Support

Need help? Here's how to get assistance:

1. **Check the troubleshooting section above** - most common issues are covered there
2. **Visit our GitHub repository** for documentation and examples: [github.com/aashari/mcp-server-atlassian-bitbucket](https://github.com/aashari/mcp-server-atlassian-bitbucket)
3. **Report issues** at [GitHub Issues](https://github.com/aashari/mcp-server-atlassian-bitbucket/issues)
4. **Start a discussion** for feature requests or general questions

---

*Made with care for developers who want to bring AI into their Bitbucket workflow.*

```

--------------------------------------------------------------------------------
/scripts/package.json:
--------------------------------------------------------------------------------

```json
{
  "type": "module"
} 
```

--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------

```yaml
version: 2
updates:
  - package-ecosystem: "npm"
    directory: "/"
    schedule:
      interval: "weekly"
    open-pull-requests-limit: 10
    versioning-strategy: auto
    labels:
      - "dependencies"
    commit-message:
      prefix: "chore"
      include: "scope"
    allow:
      - dependency-type: "direct"
    ignore:
      - dependency-name: "*"
        update-types: ["version-update:semver-patch"]
  - package-ecosystem: "github-actions"
    directory: "/"
    schedule:
      interval: "weekly"
    open-pull-requests-limit: 5
    labels:
      - "dependencies"
      - "github-actions" 
```

--------------------------------------------------------------------------------
/.github/workflows/ci-dependency-check.yml:
--------------------------------------------------------------------------------

```yaml
name: CI - Dependency Check

on:
    schedule:
        - cron: '0 5 * * 1' # Run at 5 AM UTC every Monday
    workflow_dispatch: # Allow manual triggering

jobs:
    validate:
        runs-on: ubuntu-latest
        steps:
            - name: Checkout code
              uses: actions/checkout@v5

            - name: Setup Node.js
              uses: actions/setup-node@v5
              with:
                  node-version: '22'
                  cache: 'npm'

            - name: Install dependencies
              run: npm ci

            - name: Run npm audit
              run: npm audit

            - name: Check for outdated dependencies
              run: npm outdated || true

            - name: Run tests
              run: npm test

            - name: Run linting
              run: npm run lint

            - name: Build project
              run: npm run build

```

--------------------------------------------------------------------------------
/src/utils/jest.setup.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Jest global setup for suppressing console output during tests
 * This file is used to mock console methods to reduce noise in test output
 */

import { jest, beforeEach, afterEach, afterAll } from '@jest/globals';

// Store original console methods
const originalConsole = {
	log: console.log,
	info: console.info,
	warn: console.warn,
	error: console.error,
	debug: console.debug,
};

// Global setup to suppress console output during tests
beforeEach(() => {
	// Mock console methods to suppress output
	console.log = jest.fn();
	console.info = jest.fn();
	console.warn = jest.fn();
	console.error = jest.fn();
	console.debug = jest.fn();
});

afterEach(() => {
	// Clear mock calls after each test
	jest.clearAllMocks();
});

afterAll(() => {
	// Restore original console methods after all tests
	console.log = originalConsole.log;
	console.info = originalConsole.info;
	console.warn = originalConsole.warn;
	console.error = originalConsole.error;
	console.debug = originalConsole.debug;
});

```

--------------------------------------------------------------------------------
/eslint.config.mjs:
--------------------------------------------------------------------------------

```
import eslint from '@eslint/js';
import tseslint from 'typescript-eslint';
import prettierPlugin from 'eslint-plugin-prettier';
import eslintConfigPrettier from 'eslint-config-prettier';

export default tseslint.config(
	{
		ignores: ['node_modules/**', 'dist/**', 'examples/**'],
	},
	eslint.configs.recommended,
	...tseslint.configs.recommended,
	{
		plugins: {
			prettier: prettierPlugin,
		},
		rules: {
			'prettier/prettier': 'error',
			indent: ['error', 'tab', { SwitchCase: 1 }],
			'@typescript-eslint/no-unused-vars': [
				'error',
				{ argsIgnorePattern: '^_' },
			],
		},
		languageOptions: {
			parserOptions: {
				ecmaVersion: 'latest',
				sourceType: 'module',
			},
			globals: {
				node: 'readonly',
				jest: 'readonly',
			},
		},
	},
	// Special rules for test files
	{
		files: ['**/*.test.ts'],
		rules: {
			'@typescript-eslint/no-explicit-any': 'off',
			'@typescript-eslint/no-require-imports': 'off',
			'@typescript-eslint/no-unsafe-function-type': 'off',
			'@typescript-eslint/no-unused-vars': 'off',
		},
	},
	eslintConfigPrettier,
);

```

--------------------------------------------------------------------------------
/.github/workflows/ci-dependabot-auto-merge.yml:
--------------------------------------------------------------------------------

```yaml
name: CI - Dependabot Auto-merge

on:
    pull_request:
        branches: [main]

permissions:
    contents: write
    pull-requests: write
    checks: read

jobs:
    auto-merge-dependabot:
        runs-on: ubuntu-latest
        if: github.actor == 'dependabot[bot]'
        steps:
            - name: Checkout code
              uses: actions/checkout@v5

            - name: Setup Node.js
              uses: actions/setup-node@v5
              with:
                  node-version: '22'
                  cache: 'npm'

            - name: Install dependencies
              run: npm ci

            - name: Run tests
              run: npm test

            - name: Run linting
              run: npm run lint

            - name: Auto-approve PR
              uses: hmarr/auto-approve-action@v4
              with:
                  github-token: ${{ secrets.GITHUB_TOKEN }}

            - name: Enable auto-merge
              if: success()
              run: gh pr merge --auto --merge "$PR_URL"
              env:
                  PR_URL: ${{ github.event.pull_request.html_url }}
                  GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

```

--------------------------------------------------------------------------------
/src/utils/constants.util.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Application constants
 *
 * This file contains constants used throughout the application.
 * Centralizing these values makes them easier to maintain and update.
 */

/**
 * Current application version
 * This should match the version in package.json
 */
export const VERSION = '1.23.6';

/**
 * Package name with scope
 * Used for initialization and identification
 */
export const PACKAGE_NAME = '@aashari/mcp-server-atlassian-bitbucket';

/**
 * CLI command name
 * Used for binary name and CLI help text
 */
export const CLI_NAME = 'mcp-atlassian-bitbucket';

/**
 * Network timeout constants (in milliseconds)
 */
export const NETWORK_TIMEOUTS = {
	/** Default timeout for API requests (30 seconds) */
	DEFAULT_REQUEST_TIMEOUT: 30 * 1000,

	/** Timeout for large file operations like diffs (60 seconds) */
	LARGE_REQUEST_TIMEOUT: 60 * 1000,

	/** Timeout for search operations (45 seconds) */
	SEARCH_REQUEST_TIMEOUT: 45 * 1000,
} as const;

/**
 * Data limits to prevent excessive resource consumption (CWE-770)
 */
export const DATA_LIMITS = {
	/** Maximum response size in bytes (10MB) */
	MAX_RESPONSE_SIZE: 10 * 1024 * 1024,

	/** Maximum items per page for paginated requests */
	MAX_PAGE_SIZE: 100,

	/** Default page size when not specified */
	DEFAULT_PAGE_SIZE: 50,
} as const;

```

--------------------------------------------------------------------------------
/scripts/ensure-executable.js:
--------------------------------------------------------------------------------

```javascript
#!/usr/bin/env node
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';

// Use dynamic import meta for ESM compatibility
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const rootDir = path.resolve(__dirname, '..');
const entryPoint = path.join(rootDir, 'dist', 'index.js');

try {
	if (fs.existsSync(entryPoint)) {
		// Ensure the file is executable (cross-platform)
		const currentMode = fs.statSync(entryPoint).mode;
		// Check if executable bits are set (user, group, or other)
		// Mode constants differ slightly across platforms, checking broadly
		const isExecutable =
			currentMode & fs.constants.S_IXUSR ||
			currentMode & fs.constants.S_IXGRP ||
			currentMode & fs.constants.S_IXOTH;

		if (!isExecutable) {
			// Set permissions to 755 (rwxr-xr-x) if not executable
			fs.chmodSync(entryPoint, 0o755);
			console.log(
				`Made ${path.relative(rootDir, entryPoint)} executable`,
			);
		} else {
			// console.log(`${path.relative(rootDir, entryPoint)} is already executable`);
		}
	} else {
		// console.warn(`${path.relative(rootDir, entryPoint)} not found, skipping chmod`);
	}
} catch (err) {
	// console.warn(`Failed to set executable permissions: ${err.message}`);
	// We use '|| true' in package.json, so no need to exit here
}

```

--------------------------------------------------------------------------------
/src/cli/index.ts:
--------------------------------------------------------------------------------

```typescript
import { Command } from 'commander';
import { Logger } from '../utils/logger.util.js';
import { VERSION, CLI_NAME } from '../utils/constants.util.js';

// Import CLI modules
import atlassianApiCli from './atlassian.api.cli.js';
import atlassianRepositoriesCli from './atlassian.repositories.cli.js';

// Package description
const DESCRIPTION =
	'A Model Context Protocol (MCP) server for Atlassian Bitbucket integration';

// Create a contextualized logger for this file
const cliLogger = Logger.forContext('cli/index.ts');

// Log CLI initialization
cliLogger.debug('Bitbucket CLI module initialized');

export async function runCli(args: string[]) {
	const methodLogger = Logger.forContext('cli/index.ts', 'runCli');

	const program = new Command();

	program.name(CLI_NAME).description(DESCRIPTION).version(VERSION);

	// Register CLI commands
	atlassianApiCli.register(program);
	cliLogger.debug('API commands registered (get, post)');

	atlassianRepositoriesCli.register(program);
	cliLogger.debug('Repository commands registered (clone)');

	// Handle unknown commands
	program.on('command:*', (operands) => {
		methodLogger.error(`Unknown command: ${operands[0]}`);
		console.log('');
		program.help();
		process.exit(1);
	});

	// Parse arguments; default to help if no command provided
	await program.parseAsync(args.length ? args : ['--help'], { from: 'user' });
}

```

--------------------------------------------------------------------------------
/src/types/common.types.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Common type definitions shared across controllers.
 * These types provide a standard interface for controller interactions.
 * Centralized here to ensure consistency across the codebase.
 */

/**
 * Common pagination information for API responses.
 * This is used for providing consistent pagination details internally.
 * Its formatted representation will be included directly in the content string.
 */
export interface ResponsePagination {
	/**
	 * Cursor for the next page of results, if available.
	 * This should be passed to subsequent requests to retrieve the next page.
	 */
	nextCursor?: string;

	/**
	 * Whether more results are available beyond the current page.
	 * When true, clients should use the nextCursor to retrieve more results.
	 */
	hasMore: boolean;

	/**
	 * The number of items in the current result set.
	 * This helps clients track how many items they've received.
	 */
	count?: number;

	/**
	 * The total number of items available across all pages, if known.
	 * Note: Not all APIs provide this. Check the specific API/tool documentation.
	 */
	total?: number;

	/**
	 * Page number for page-based pagination.
	 */
	page?: number;

	/**
	 * Page size for page-based pagination.
	 */
	size?: number;
}

/**
 * Common response structure for controller operations.
 * All controller methods should return this structure.
 */
export interface ControllerResponse {
	/**
	 * Formatted content to be displayed to the user.
	 * Contains a comprehensive Markdown-formatted string that includes all information:
	 * - Primary content (e.g., list items, details)
	 * - Any metadata (previously in metadata field)
	 * - Pagination information (previously in pagination field)
	 */
	content: string;

	/**
	 * Optional path to the raw API response file.
	 * When the response is truncated, this path allows AI to access the full data.
	 */
	rawResponsePath?: string | null;
}

```

--------------------------------------------------------------------------------
/src/utils/shell.util.ts:
--------------------------------------------------------------------------------

```typescript
import { promisify } from 'util';
import { exec as callbackExec } from 'child_process';
import { Logger } from './logger.util.js';

const exec = promisify(callbackExec);
const utilLogger = Logger.forContext('utils/shell.util.ts');

/**
 * Executes a shell command.
 *
 * @param command The command string to execute.
 * @param operationDesc A brief description of the operation for logging purposes.
 * @returns A promise that resolves with the stdout of the command.
 * @throws An error if the command execution fails, including stderr.
 */
export async function executeShellCommand(
	command: string,
	operationDesc: string,
): Promise<string> {
	const methodLogger = utilLogger.forMethod('executeShellCommand');
	methodLogger.debug(`Attempting to ${operationDesc}: ${command}`);
	try {
		const { stdout, stderr } = await exec(command);
		if (stderr) {
			methodLogger.warn(`Stderr from ${operationDesc}: ${stderr}`);
			// Depending on the command, stderr might not always indicate a failure,
			// but for git clone, it usually does if stdout is empty.
			// If stdout is also present, it might be a warning.
		}
		methodLogger.info(
			`Successfully executed ${operationDesc}. Stdout: ${stdout}`,
		);
		return stdout || `Successfully ${operationDesc}.`; // Return stdout or a generic success message
	} catch (error: unknown) {
		methodLogger.error(`Failed to ${operationDesc}: ${command}`, error);

		let errorMessage = 'Unknown error during shell command execution.';
		if (error instanceof Error) {
			// Node's child_process.ExecException often has stdout and stderr properties
			const execError = error as Error & {
				stdout?: string;
				stderr?: string;
			};
			errorMessage =
				execError.stderr || execError.stdout || execError.message;
		} else if (typeof error === 'string') {
			errorMessage = error;
		}
		// Ensure a default message if somehow it's still undefined (though unlikely with above checks)
		if (!errorMessage && error) {
			errorMessage = String(error);
		}

		throw new Error(`Failed to ${operationDesc}: ${errorMessage}`);
	}
}

```

--------------------------------------------------------------------------------
/src/cli/atlassian.repositories.cli.ts:
--------------------------------------------------------------------------------

```typescript
import { Command } from 'commander';
import { Logger } from '../utils/logger.util.js';
import { handleCliError } from '../utils/error.util.js';
import { handleCloneRepository } from '../controllers/atlassian.repositories.content.controller.js';

/**
 * CLI module for Bitbucket repository operations.
 * Provides the clone command. Other operations (list repos, branches, etc.)
 * are available via the generic 'get' command.
 */

// Create a contextualized logger for this file
const cliLogger = Logger.forContext('cli/atlassian.repositories.cli.ts');

// Log CLI initialization
cliLogger.debug('Bitbucket repositories CLI module initialized');

/**
 * Register Bitbucket repositories CLI commands with the Commander program
 *
 * @param program - The Commander program instance to register commands with
 */
function register(program: Command): void {
	const methodLogger = Logger.forContext(
		'cli/atlassian.repositories.cli.ts',
		'register',
	);
	methodLogger.debug('Registering Bitbucket Repositories CLI commands...');

	program
		.command('clone')
		.description(
			'Clone a Bitbucket repository to your local filesystem using SSH (preferred) or HTTPS.',
		)
		.requiredOption('-r, --repo-slug <slug>', 'Repository slug to clone.')
		.requiredOption(
			'-t, --target-path <path>',
			'Directory path where the repository will be cloned (absolute path recommended).',
		)
		.option(
			'-w, --workspace-slug <slug>',
			'Workspace slug containing the repository. Uses default workspace if not provided.',
		)
		.action(async (options) => {
			const actionLogger = cliLogger.forMethod('clone');
			try {
				actionLogger.debug(
					'Processing clone command options:',
					options,
				);

				const result = await handleCloneRepository({
					workspaceSlug: options.workspaceSlug,
					repoSlug: options.repoSlug,
					targetPath: options.targetPath,
				});

				console.log(result.content);
			} catch (error) {
				actionLogger.error('Clone operation failed:', error);
				handleCliError(error);
			}
		});

	methodLogger.debug('CLI commands registered successfully');
}

export default { register };

```

--------------------------------------------------------------------------------
/src/tools/atlassian.repositories.tool.ts:
--------------------------------------------------------------------------------

```typescript
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { Logger } from '../utils/logger.util.js';
import { formatErrorForMcpTool } from '../utils/error.util.js';
import { truncateForAI } from '../utils/formatter.util.js';
import {
	CloneRepositoryToolArgs,
	type CloneRepositoryToolArgsType,
} from './atlassian.repositories.types.js';

// Import directly from specialized controllers
import { handleCloneRepository } from '../controllers/atlassian.repositories.content.controller.js';

// Create a contextualized logger for this file
const toolLogger = Logger.forContext('tools/atlassian.repositories.tool.ts');

// Log tool initialization
toolLogger.debug('Bitbucket repositories tool initialized');

/**
 * Handler for cloning a repository.
 */
async function handleRepoClone(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.repositories.tool.ts',
		'handleRepoClone',
	);
	try {
		methodLogger.debug('Cloning repository:', args);

		// Pass args directly to controller
		const result = await handleCloneRepository(
			args as CloneRepositoryToolArgsType,
		);

		methodLogger.debug('Successfully cloned repository via controller');

		return {
			content: [
				{
					type: 'text' as const,
					text: truncateForAI(result.content, result.rawResponsePath),
				},
			],
		};
	} catch (error) {
		methodLogger.error('Failed to clone repository', error);
		return formatErrorForMcpTool(error);
	}
}

// Tool description
const BB_CLONE_DESCRIPTION = `Clone a Bitbucket repository to your local filesystem using SSH (preferred) or HTTPS.

Provide \`repoSlug\` and \`targetPath\` (absolute path). Clones into \`targetPath/repoSlug\`. SSH keys must be configured; falls back to HTTPS if unavailable.`;

/**
 * Register all Bitbucket repository tools with the MCP server.
 * Uses the modern registerTool API (SDK v1.22.0+) instead of deprecated tool() method.
 *
 * Branch creation is now handled by bb_post tool.
 */
function registerTools(server: McpServer) {
	const registerLogger = Logger.forContext(
		'tools/atlassian.repositories.tool.ts',
		'registerTools',
	);
	registerLogger.debug('Registering Repository tools...');

	// Register the clone repository tool using modern registerTool API
	server.registerTool(
		'bb_clone',
		{
			title: 'Clone Bitbucket Repository',
			description: BB_CLONE_DESCRIPTION,
			inputSchema: CloneRepositoryToolArgs,
		},
		handleRepoClone,
	);

	registerLogger.debug('Successfully registered Repository tools');
}

export default { registerTools };

```

--------------------------------------------------------------------------------
/src/utils/jq.util.ts:
--------------------------------------------------------------------------------

```typescript
import jmespath from 'jmespath';
import { Logger } from './logger.util.js';
import { toToonOrJson } from './toon.util.js';

const logger = Logger.forContext('utils/jq.util.ts');

/**
 * Apply a JMESPath filter to JSON data
 *
 * @param data - The data to filter (any JSON-serializable value)
 * @param filter - JMESPath expression to apply
 * @returns Filtered data or original data if filter is empty/invalid
 *
 * @example
 * // Get single field
 * applyJqFilter(data, "name")
 *
 * // Get nested field
 * applyJqFilter(data, "links.html.href")
 *
 * // Get multiple fields as object
 * applyJqFilter(data, "{name: name, slug: slug}")
 *
 * // Array operations
 * applyJqFilter(data, "values[*].name")
 */
export function applyJqFilter(data: unknown, filter?: string): unknown {
	const methodLogger = logger.forMethod('applyJqFilter');

	// Return original data if no filter provided
	if (!filter || filter.trim() === '') {
		methodLogger.debug('No filter provided, returning original data');
		return data;
	}

	try {
		methodLogger.debug(`Applying JMESPath filter: ${filter}`);
		const result = jmespath.search(data, filter);
		methodLogger.debug('Filter applied successfully');
		return result;
	} catch (error) {
		methodLogger.error(`Invalid JMESPath expression: ${filter}`, error);
		// Return original data with error info if filter is invalid
		return {
			_jqError: `Invalid JMESPath expression: ${filter}`,
			_originalData: data,
		};
	}
}

/**
 * Convert data to JSON string for MCP response
 *
 * @param data - The data to stringify
 * @param pretty - Whether to pretty-print the JSON (default: true)
 * @returns JSON string
 */
export function toJsonString(data: unknown, pretty: boolean = true): string {
	if (pretty) {
		return JSON.stringify(data, null, 2);
	}
	return JSON.stringify(data);
}

/**
 * Convert data to output string for MCP response
 *
 * By default, converts to TOON format (Token-Oriented Object Notation)
 * for improved LLM token efficiency (30-60% fewer tokens).
 * Falls back to JSON if TOON conversion fails or if useToon is false.
 *
 * @param data - The data to convert
 * @param useToon - Whether to use TOON format (default: true)
 * @param pretty - Whether to pretty-print JSON (default: true)
 * @returns TOON formatted string (default), or JSON string
 */
export async function toOutputString(
	data: unknown,
	useToon: boolean = true,
	pretty: boolean = true,
): Promise<string> {
	const jsonString = toJsonString(data, pretty);

	// Return JSON directly if TOON is not requested
	if (!useToon) {
		return jsonString;
	}

	// Try TOON conversion with JSON fallback
	return toToonOrJson(data, jsonString);
}

```

--------------------------------------------------------------------------------
/.github/workflows/ci-semantic-release.yml:
--------------------------------------------------------------------------------

```yaml
name: CI - Semantic Release

# This workflow is triggered on every push to the main branch
# It analyzes commits and automatically releases a new version when needed
on:
    push:
        branches: [main]

jobs:
    release:
        name: Semantic Release
        runs-on: ubuntu-latest
        # Permissions needed for creating releases, updating issues, and publishing packages
        permissions:
            contents: write # Needed to create releases and tags
            issues: write # Needed to comment on issues
            pull-requests: write # Needed to comment on pull requests
            # packages permission removed as we're not using GitHub Packages
        steps:
            # Step 1: Check out the full Git history for proper versioning
            - name: Checkout
              uses: actions/checkout@v5
              with:
                  fetch-depth: 0 # Fetches all history for all branches and tags

            # Step 2: Setup Node.js environment
            - name: Setup Node.js
              uses: actions/setup-node@v5
              with:
                  node-version: 22 # Using Node.js 22
                  cache: 'npm' # Enable npm caching

            # Step 3: Install dependencies with clean install
            - name: Install dependencies
              run: npm ci # Clean install preserving package-lock.json

            # Step 4: Build the package
            - name: Build
              run: npm run build # Compiles TypeScript to JavaScript

            # Step 5: Ensure executable permissions
            - name: Set executable permissions
              run: chmod +x dist/index.js

            # Step 6: Run tests to ensure quality
            - name: Verify tests
              run: npm test # Runs Jest tests

            # Step 7: Configure Git identity for releases
            - name: Configure Git User
              run: |
                  git config --global user.email "github-actions[bot]@users.noreply.github.com"
                  git config --global user.name "github-actions[bot]"

            # Step 8: Run semantic-release to analyze commits and publish to npm
            - name: Semantic Release
              id: semantic
              env:
                  # Tokens needed for GitHub and npm authentication
                  GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For creating releases and commenting
                  NPM_TOKEN: ${{ secrets.NPM_TOKEN }} # For publishing to npm
              run: |
                  echo "Running semantic-release for version bump and npm publishing"
                  npx semantic-release

                  # Note: GitHub Packages publishing has been removed

```

--------------------------------------------------------------------------------
/src/utils/workspace.util.ts:
--------------------------------------------------------------------------------

```typescript
import { Logger } from './logger.util.js';
import { config } from './config.util.js';
import atlassianWorkspacesService from '../services/vendor.atlassian.workspaces.service.js';
import { WorkspaceMembership } from '../services/vendor.atlassian.workspaces.types.js';

const workspaceLogger = Logger.forContext('utils/workspace.util.ts');

/**
 * Cache for workspace data to avoid repeated API calls
 */
let cachedDefaultWorkspace: string | null = null;
let cachedWorkspaces: WorkspaceMembership[] | null = null;

/**
 * Get the default workspace slug
 *
 * This function follows this priority:
 * 1. Use cached value if available
 * 2. Check BITBUCKET_DEFAULT_WORKSPACE environment variable
 * 3. Fetch from API and use the first workspace in the list
 *
 * @returns {Promise<string|null>} The default workspace slug or null if not available
 */
export async function getDefaultWorkspace(): Promise<string | null> {
	const methodLogger = workspaceLogger.forMethod('getDefaultWorkspace');

	// Step 1: Return cached value if available
	if (cachedDefaultWorkspace) {
		methodLogger.debug(
			`Using cached default workspace: ${cachedDefaultWorkspace}`,
		);
		return cachedDefaultWorkspace;
	}

	// Step 2: Check environment variable
	const envWorkspace = config.get('BITBUCKET_DEFAULT_WORKSPACE');
	if (envWorkspace) {
		methodLogger.debug(
			`Using default workspace from environment: ${envWorkspace}`,
		);
		cachedDefaultWorkspace = envWorkspace;
		return envWorkspace;
	}

	// Step 3: Fetch from API
	methodLogger.debug('No default workspace configured, fetching from API...');
	try {
		const workspaces = await getWorkspaces();

		if (workspaces.length > 0) {
			const defaultWorkspace = workspaces[0].workspace.slug;
			methodLogger.debug(
				`Using first workspace from API as default: ${defaultWorkspace}`,
			);
			cachedDefaultWorkspace = defaultWorkspace;
			return defaultWorkspace;
		} else {
			methodLogger.warn('No workspaces found in the account');
			return null;
		}
	} catch (error) {
		methodLogger.error('Failed to fetch default workspace', error);
		return null;
	}
}

/**
 * Get list of workspaces from API or cache
 *
 * @returns {Promise<WorkspaceMembership[]>} Array of workspace membership objects
 */
export async function getWorkspaces(): Promise<WorkspaceMembership[]> {
	const methodLogger = workspaceLogger.forMethod('getWorkspaces');

	if (cachedWorkspaces) {
		methodLogger.debug(
			`Using ${cachedWorkspaces.length} cached workspaces`,
		);
		return cachedWorkspaces;
	}

	try {
		const result = await atlassianWorkspacesService.list({
			pagelen: 10, // Limit to first 10 workspaces
		});

		if (result.values) {
			cachedWorkspaces = result.values;
			methodLogger.debug(`Cached ${result.values.length} workspaces`);
			return result.values;
		} else {
			return [];
		}
	} catch (error) {
		methodLogger.error('Failed to fetch workspaces list', error);
		return [];
	}
}

```

--------------------------------------------------------------------------------
/src/utils/toon.util.ts:
--------------------------------------------------------------------------------

```typescript
import { Logger } from './logger.util.js';

const logger = Logger.forContext('utils/toon.util.ts');

/**
 * TOON encode function type (dynamically imported)
 */
type ToonEncode = (input: unknown, options?: { indent?: number }) => string;

/**
 * Cached TOON encode function
 */
let toonEncode: ToonEncode | null = null;

/**
 * Load the TOON encoder dynamically (ESM module in CommonJS project)
 */
async function loadToonEncoder(): Promise<ToonEncode | null> {
	if (toonEncode) {
		return toonEncode;
	}

	try {
		const toon = await import('@toon-format/toon');
		toonEncode = toon.encode;
		logger.debug('TOON encoder loaded successfully');
		return toonEncode;
	} catch (error) {
		logger.error('Failed to load TOON encoder', error);
		return null;
	}
}

/**
 * Convert data to TOON format with JSON fallback
 *
 * Attempts to encode data as TOON (Token-Oriented Object Notation) for
 * more efficient LLM token usage. Falls back to JSON if TOON encoding fails.
 *
 * @param data - The data to convert
 * @param jsonFallback - The JSON string to return if TOON conversion fails
 * @returns TOON formatted string, or JSON fallback on error
 *
 * @example
 * const json = JSON.stringify(data, null, 2);
 * const output = await toToonOrJson(data, json);
 */
export async function toToonOrJson(
	data: unknown,
	jsonFallback: string,
): Promise<string> {
	const methodLogger = logger.forMethod('toToonOrJson');

	try {
		const encode = await loadToonEncoder();
		if (!encode) {
			methodLogger.debug(
				'TOON encoder not available, using JSON fallback',
			);
			return jsonFallback;
		}

		const toonResult = encode(data, { indent: 2 });
		methodLogger.debug('Successfully converted to TOON format');
		return toonResult;
	} catch (error) {
		methodLogger.error(
			'TOON conversion failed, using JSON fallback',
			error,
		);
		return jsonFallback;
	}
}

/**
 * Synchronous TOON conversion with JSON fallback
 *
 * Uses cached encoder if available, otherwise returns JSON fallback.
 * Prefer toToonOrJson for first-time conversion.
 *
 * @param data - The data to convert
 * @param jsonFallback - The JSON string to return if TOON is unavailable
 * @returns TOON formatted string, or JSON fallback
 */
export function toToonOrJsonSync(data: unknown, jsonFallback: string): string {
	const methodLogger = logger.forMethod('toToonOrJsonSync');

	if (!toonEncode) {
		methodLogger.debug('TOON encoder not loaded, using JSON fallback');
		return jsonFallback;
	}

	try {
		const toonResult = toonEncode(data, { indent: 2 });
		methodLogger.debug('Successfully converted to TOON format');
		return toonResult;
	} catch (error) {
		methodLogger.error(
			'TOON conversion failed, using JSON fallback',
			error,
		);
		return jsonFallback;
	}
}

/**
 * Pre-load the TOON encoder for synchronous usage later
 * Call this during server initialization
 */
export async function preloadToonEncoder(): Promise<boolean> {
	const encode = await loadToonEncoder();
	return encode !== null;
}

```

--------------------------------------------------------------------------------
/src/services/vendor.atlassian.workspaces.types.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from 'zod';

/**
 * Types for Atlassian Bitbucket Workspaces API
 */

/**
 * Workspace type (basic object)
 */
export const WorkspaceTypeSchema = z.literal('workspace');
export type WorkspaceType = z.infer<typeof WorkspaceTypeSchema>;

/**
 * Workspace user object
 */
export const WorkspaceUserSchema = z.object({
	type: z.literal('user'),
	uuid: z.string(),
	nickname: z.string(),
	display_name: z.string(),
});

/**
 * Workspace permission type
 */
export const WorkspacePermissionSchema = z.enum([
	'owner',
	'collaborator',
	'member',
]);

/**
 * Workspace links object
 */
const LinkSchema = z.object({
	href: z.string(),
	name: z.string().optional(),
});

export const WorkspaceLinksSchema = z.object({
	avatar: LinkSchema.optional(),
	html: LinkSchema.optional(),
	members: LinkSchema.optional(),
	owners: LinkSchema.optional(),
	projects: LinkSchema.optional(),
	repositories: LinkSchema.optional(),
	snippets: LinkSchema.optional(),
	self: LinkSchema.optional(),
});
export type WorkspaceLinks = z.infer<typeof WorkspaceLinksSchema>;

/**
 * Workspace forking mode
 */
export const WorkspaceForkingModeSchema = z.enum([
	'allow_forks',
	'no_public_forks',
	'no_forks',
]);
export type WorkspaceForkingMode = z.infer<typeof WorkspaceForkingModeSchema>;

/**
 * Workspace object returned from the API
 */
export const WorkspaceSchema: z.ZodType<{
	type: WorkspaceType;
	uuid: string;
	name: string;
	slug: string;
	is_private?: boolean;
	is_privacy_enforced?: boolean;
	forking_mode?: WorkspaceForkingMode;
	created_on?: string;
	updated_on?: string;
	links: WorkspaceLinks;
}> = z.object({
	type: WorkspaceTypeSchema,
	uuid: z.string(),
	name: z.string(),
	slug: z.string(),
	is_private: z.boolean().optional(),
	is_privacy_enforced: z.boolean().optional(),
	forking_mode: WorkspaceForkingModeSchema.optional(),
	created_on: z.string().optional(),
	updated_on: z.string().optional(),
	links: WorkspaceLinksSchema,
});

/**
 * Workspace membership object
 */
export const WorkspaceMembershipSchema = z.object({
	type: z.literal('workspace_membership'),
	permission: WorkspacePermissionSchema,
	last_accessed: z.string().optional(),
	added_on: z.string().optional(),
	user: WorkspaceUserSchema,
	workspace: WorkspaceSchema,
});
export type WorkspaceMembership = z.infer<typeof WorkspaceMembershipSchema>;

/**
 * Extended workspace object with optional fields
 * @remarks Currently identical to Workspace, but allows for future extension
 */
export const WorkspaceDetailedSchema = WorkspaceSchema;
export type WorkspaceDetailed = z.infer<typeof WorkspaceDetailedSchema>;

/**
 * Parameters for listing workspaces
 */
export const ListWorkspacesParamsSchema = z.object({
	q: z.string().optional(),
	page: z.number().optional(),
	pagelen: z.number().optional(),
});
export type ListWorkspacesParams = z.infer<typeof ListWorkspacesParamsSchema>;

/**
 * API response for user permissions on workspaces
 */
export const WorkspacePermissionsResponseSchema = z.object({
	pagelen: z.number(),
	page: z.number(),
	size: z.number(),
	next: z.string().optional(),
	previous: z.string().optional(),
	values: z.array(WorkspaceMembershipSchema),
});
export type WorkspacePermissionsResponse = z.infer<
	typeof WorkspacePermissionsResponseSchema
>;

```

--------------------------------------------------------------------------------
/src/utils/response.util.ts:
--------------------------------------------------------------------------------

```typescript
import * as fs from 'fs';
import * as path from 'path';
import * as crypto from 'crypto';
import { Logger } from './logger.util.js';
import { PACKAGE_NAME } from './constants.util.js';

// Create a contextualized logger for this file
const responseLogger = Logger.forContext('utils/response.util.ts');

/**
 * Get the project name from PACKAGE_NAME, stripping the scope prefix
 * e.g., "@aashari/mcp-server-atlassian-bitbucket" -> "mcp-server-atlassian-bitbucket"
 */
function getProjectName(): string {
	const name = PACKAGE_NAME.replace(/^@[^/]+\//, '');
	return name;
}

/**
 * Generate a unique filename with timestamp and random string
 * Format: <timestamp>-<random>.txt
 */
function generateFilename(): string {
	const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
	const randomStr = crypto.randomBytes(4).toString('hex');
	return `${timestamp}-${randomStr}.txt`;
}

/**
 * Ensure the directory exists, creating it if necessary
 */
function ensureDirectoryExists(dirPath: string): void {
	if (!fs.existsSync(dirPath)) {
		fs.mkdirSync(dirPath, { recursive: true });
		responseLogger.debug(`Created directory: ${dirPath}`);
	}
}

/**
 * Save raw API response to a file in /tmp/mcp/<project-name>/
 *
 * @param url The URL that was called
 * @param method The HTTP method used
 * @param requestBody The request body (if any)
 * @param responseData The raw response data
 * @param statusCode The HTTP status code
 * @param durationMs The request duration in milliseconds
 * @returns The path to the saved file, or null if saving failed
 */
export function saveRawResponse(
	url: string,
	method: string,
	requestBody: unknown,
	responseData: unknown,
	statusCode: number,
	durationMs: number,
): string | null {
	const methodLogger = Logger.forContext(
		'utils/response.util.ts',
		'saveRawResponse',
	);

	try {
		const projectName = getProjectName();
		const dirPath = path.join('/tmp', 'mcp', projectName);
		const filename = generateFilename();
		const filePath = path.join(dirPath, filename);

		ensureDirectoryExists(dirPath);

		// Build the content
		const content = buildResponseContent(
			url,
			method,
			requestBody,
			responseData,
			statusCode,
			durationMs,
		);

		// Write to file
		fs.writeFileSync(filePath, content, 'utf8');
		methodLogger.debug(`Saved raw response to: ${filePath}`);

		return filePath;
	} catch (error) {
		methodLogger.error('Failed to save raw response', error);
		return null;
	}
}

/**
 * Build the content string for the response file
 */
function buildResponseContent(
	url: string,
	method: string,
	requestBody: unknown,
	responseData: unknown,
	statusCode: number,
	durationMs: number,
): string {
	const timestamp = new Date().toISOString();
	const separator = '='.repeat(80);

	let content = `${separator}
RAW API RESPONSE LOG
${separator}

Timestamp: ${timestamp}
URL: ${url}
Method: ${method}
Status Code: ${statusCode}
Duration: ${durationMs.toFixed(2)}ms

${separator}
REQUEST BODY
${separator}
`;

	if (requestBody) {
		content +=
			typeof requestBody === 'string'
				? requestBody
				: JSON.stringify(requestBody, null, 2);
	} else {
		content += '(no request body)';
	}

	content += `

${separator}
RESPONSE DATA
${separator}
`;

	if (responseData !== undefined && responseData !== null) {
		content +=
			typeof responseData === 'string'
				? responseData
				: JSON.stringify(responseData, null, 2);
	} else {
		content += '(no response data)';
	}

	content += `
${separator}
`;

	return content;
}

```

--------------------------------------------------------------------------------
/src/tools/atlassian.api.types.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from 'zod';

/**
 * Output format options for API responses
 * - toon: Token-Oriented Object Notation (default, more token-efficient for LLMs)
 * - json: Standard JSON format
 */
export const OutputFormat = z
	.enum(['toon', 'json'])
	.optional()
	.describe(
		'Output format: "toon" (default, 30-60% fewer tokens) or "json". TOON is optimized for LLMs with tabular arrays and minimal syntax.',
	);

/**
 * Base schema fields shared by all API tool arguments
 * Contains path, queryParams, jq filter, and outputFormat
 */
const BaseApiToolArgs = {
	/**
	 * The API endpoint path (without base URL)
	 * Examples:
	 * - "/workspaces" - list workspaces
	 * - "/workspaces/{workspace}" - get workspace details
	 * - "/repositories/{workspace}/{repo_slug}" - get repository
	 * - "/repositories/{workspace}/{repo_slug}/pullrequests" - list PRs
	 * - "/repositories/{workspace}/{repo_slug}/pullrequests/{pull_request_id}" - get PR
	 * - "/repositories/{workspace}/{repo_slug}/commits" - get commits
	 * - "/repositories/{workspace}/{repo_slug}/src/{commit}/{path}" - get file content
	 */
	path: z
		.string()
		.min(1, 'Path is required')
		.describe(
			'The Bitbucket API endpoint path (without base URL). Must start with "/". Examples: "/workspaces", "/repositories/{workspace}/{repo_slug}", "/repositories/{workspace}/{repo_slug}/pullrequests/{id}"',
		),

	/**
	 * Optional query parameters as key-value pairs
	 */
	queryParams: z
		.record(z.string(), z.string())
		.optional()
		.describe(
			'Optional query parameters as key-value pairs. Examples: {"pagelen": "25", "page": "2", "q": "state=\\"OPEN\\"", "fields": "values.title,values.state"}',
		),

	/**
	 * Optional JMESPath expression to filter/transform the response
	 * IMPORTANT: Always use this to reduce response size and token costs
	 */
	jq: z
		.string()
		.optional()
		.describe(
			'JMESPath expression to filter/transform the response. IMPORTANT: Always use this to extract only needed fields and reduce token costs. Examples: "values[*].{name: name, slug: slug}" (extract specific fields), "values[0]" (first result), "values[*].name" (names only). See https://jmespath.org',
		),

	/**
	 * Output format for the response
	 * Defaults to TOON (token-efficient), can be set to JSON if needed
	 */
	outputFormat: OutputFormat,
};

/**
 * Body field for requests that include a request body (POST, PUT, PATCH)
 */
const bodyField = z
	.record(z.string(), z.unknown())
	.describe(
		'Request body as a JSON object. Structure depends on the endpoint. Example for PR: {"title": "My PR", "source": {"branch": {"name": "feature"}}}',
	);

/**
 * Schema for bb_get tool arguments (GET requests - no body)
 */
export const GetApiToolArgs = z.object(BaseApiToolArgs);
export type GetApiToolArgsType = z.infer<typeof GetApiToolArgs>;

/**
 * Schema for requests with body (POST, PUT, PATCH)
 */
export const RequestWithBodyArgs = z.object({
	...BaseApiToolArgs,
	body: bodyField,
});
export type RequestWithBodyArgsType = z.infer<typeof RequestWithBodyArgs>;

/**
 * Schema for bb_post tool arguments (POST requests)
 */
export const PostApiToolArgs = RequestWithBodyArgs;
export type PostApiToolArgsType = RequestWithBodyArgsType;

/**
 * Schema for bb_put tool arguments (PUT requests)
 */
export const PutApiToolArgs = RequestWithBodyArgs;
export type PutApiToolArgsType = RequestWithBodyArgsType;

/**
 * Schema for bb_patch tool arguments (PATCH requests)
 */
export const PatchApiToolArgs = RequestWithBodyArgs;
export type PatchApiToolArgsType = RequestWithBodyArgsType;

/**
 * Schema for bb_delete tool arguments (DELETE requests - no body)
 */
export const DeleteApiToolArgs = GetApiToolArgs;
export type DeleteApiToolArgsType = GetApiToolArgsType;

```

--------------------------------------------------------------------------------
/src/utils/config.util.test.ts:
--------------------------------------------------------------------------------

```typescript
import {
	ErrorType,
	McpError,
	createApiError,
	createAuthMissingError,
	createAuthInvalidError,
	createUnexpectedError,
	ensureMcpError,
	formatErrorForMcpTool,
	formatErrorForMcpResource,
} from './error.util.js';

describe('Error Utility', () => {
	describe('McpError', () => {
		it('should create an error with the correct properties', () => {
			const error = new McpError('Test error', ErrorType.API_ERROR, 404);

			expect(error).toBeInstanceOf(Error);
			expect(error).toBeInstanceOf(McpError);
			expect(error.message).toBe('Test error');
			expect(error.type).toBe(ErrorType.API_ERROR);
			expect(error.statusCode).toBe(404);
			expect(error.name).toBe('McpError');
		});
	});

	describe('Error Factory Functions', () => {
		it('should create auth missing error', () => {
			const error = createAuthMissingError();

			expect(error).toBeInstanceOf(McpError);
			expect(error.type).toBe(ErrorType.AUTH_MISSING);
			expect(error.message).toBe(
				'Authentication credentials are missing',
			);
		});

		it('should create auth invalid error', () => {
			const error = createAuthInvalidError('Invalid token');

			expect(error).toBeInstanceOf(McpError);
			expect(error.type).toBe(ErrorType.AUTH_INVALID);
			expect(error.statusCode).toBe(401);
			expect(error.message).toBe('Invalid token');
		});

		it('should create API error', () => {
			const originalError = new Error('Original error');
			const error = createApiError('API failed', 500, originalError);

			expect(error).toBeInstanceOf(McpError);
			expect(error.type).toBe(ErrorType.API_ERROR);
			expect(error.statusCode).toBe(500);
			expect(error.message).toBe('API failed');
			expect(error.originalError).toBe(originalError);
		});

		it('should create unexpected error', () => {
			const error = createUnexpectedError();

			expect(error).toBeInstanceOf(McpError);
			expect(error.type).toBe(ErrorType.UNEXPECTED_ERROR);
			expect(error.message).toBe('An unexpected error occurred');
		});
	});

	describe('ensureMcpError', () => {
		it('should return the same error if it is already an McpError', () => {
			const originalError = createApiError('Original error');
			const error = ensureMcpError(originalError);

			expect(error).toBe(originalError);
		});

		it('should wrap a standard Error', () => {
			const originalError = new Error('Standard error');
			const error = ensureMcpError(originalError);

			expect(error).toBeInstanceOf(McpError);
			expect(error.type).toBe(ErrorType.UNEXPECTED_ERROR);
			expect(error.message).toBe('Standard error');
			expect(error.originalError).toBe(originalError);
		});

		it('should handle non-Error objects', () => {
			const error = ensureMcpError('String error');

			expect(error).toBeInstanceOf(McpError);
			expect(error.type).toBe(ErrorType.UNEXPECTED_ERROR);
			expect(error.message).toBe('String error');
		});
	});

	describe('formatErrorForMcpTool', () => {
		it('should format an error for MCP tool response', () => {
			const error = createApiError('API error');
			const response = formatErrorForMcpTool(error);

			expect(response).toHaveProperty('content');
			expect(response.content).toHaveLength(1);
			expect(response.content[0]).toHaveProperty('type', 'text');
			expect(response.content[0]).toHaveProperty(
				'text',
				'Error: API error',
			);
		});
	});

	describe('formatErrorForMcpResource', () => {
		it('should format an error for MCP resource response', () => {
			const error = createApiError('API error');
			const response = formatErrorForMcpResource(error, 'test://uri');

			expect(response).toHaveProperty('contents');
			expect(response.contents).toHaveLength(1);
			expect(response.contents[0]).toHaveProperty('uri', 'test://uri');
			expect(response.contents[0]).toHaveProperty(
				'text',
				'Error: API error',
			);
			expect(response.contents[0]).toHaveProperty(
				'mimeType',
				'text/plain',
			);
			expect(response.contents[0]).toHaveProperty(
				'description',
				'Error: API_ERROR',
			);
		});
	});
});

```

--------------------------------------------------------------------------------
/src/utils/cli.test.util.ts:
--------------------------------------------------------------------------------

```typescript
import { spawn } from 'child_process';
import { join } from 'path';

/**
 * Utility for testing CLI commands with real execution
 */
export class CliTestUtil {
	/**
	 * Executes a CLI command and returns the result
	 *
	 * @param args - CLI arguments to pass to the command
	 * @param options - Test options
	 * @returns Promise with stdout, stderr, and exit code
	 */
	static async runCommand(
		args: string[],
		options: {
			timeoutMs?: number;
			env?: Record<string, string>;
		} = {},
	): Promise<{
		stdout: string;
		stderr: string;
		exitCode: number;
	}> {
		// Default timeout of 30 seconds
		const timeoutMs = options.timeoutMs || 30000;

		// CLI execution path - points to the built CLI script
		const cliPath = join(process.cwd(), 'dist', 'index.js');

		return new Promise((resolve, reject) => {
			// Set up timeout handler
			const timeoutId = setTimeout(() => {
				child.kill();
				reject(new Error(`CLI command timed out after ${timeoutMs}ms`));
			}, timeoutMs);

			// Capture stdout and stderr
			let stdout = '';
			let stderr = '';

			// Spawn the process with given arguments
			const child = spawn('node', [cliPath, ...args], {
				env: {
					...process.env,
					...options.env,
				},
			});

			// Collect stdout data
			child.stdout.on('data', (data) => {
				stdout += data.toString();
			});

			// Collect stderr data
			child.stderr.on('data', (data) => {
				stderr += data.toString();
			});

			// Handle process completion
			child.on('close', (exitCode) => {
				clearTimeout(timeoutId);
				resolve({
					stdout,
					stderr,
					exitCode: exitCode ?? 0,
				});
			});

			// Handle process errors
			child.on('error', (err) => {
				clearTimeout(timeoutId);
				reject(err);
			});
		});
	}

	/**
	 * Validates that stdout contains expected strings/patterns
	 */
	static validateOutputContains(
		output: string,
		expectedPatterns: (string | RegExp)[],
	): void {
		for (const pattern of expectedPatterns) {
			if (typeof pattern === 'string') {
				expect(output).toContain(pattern);
			} else {
				expect(output).toMatch(pattern);
			}
		}
	}

	/**
	 * Validates Markdown output format
	 */
	static validateMarkdownOutput(output: string): void {
		// Check for Markdown heading
		expect(output).toMatch(/^#\s.+/m);

		// Check for markdown formatting elements like bold text, lists, etc.
		const markdownElements = [
			/\*\*.+\*\*/, // Bold text
			/-\s.+/, // List items
			/\|.+\|.+\|/, // Table rows
			/\[.+\]\(.+\)/, // Links
		];

		expect(markdownElements.some((pattern) => pattern.test(output))).toBe(
			true,
		);
	}

	/**
	 * Extracts and parses JSON from CLI output
	 * Handles output that may contain log lines before the JSON
	 *
	 * @param output - The CLI output string
	 * @returns Parsed JSON object or null if no valid JSON found
	 */
	static extractJsonFromOutput(
		output: string,
	): Record<string, unknown> | null {
		// Split by newlines and find lines that could be start of JSON
		const lines = output.split('\n');
		let jsonStartIndex = -1;

		// Find the first line that starts with '{' (the actual JSON output)
		for (let i = 0; i < lines.length; i++) {
			const trimmed = lines[i].trim();
			if (trimmed.startsWith('{') && !trimmed.includes('[')) {
				// This looks like start of JSON, not a log line with timestamp
				jsonStartIndex = i;
				break;
			}
		}

		if (jsonStartIndex === -1) {
			return null;
		}

		// Join from the JSON start to the end
		const jsonStr = lines.slice(jsonStartIndex).join('\n');

		try {
			return JSON.parse(jsonStr);
		} catch {
			// Try to find the matching closing brace
			let braceCount = 0;
			let endIndex = 0;
			for (let i = 0; i < jsonStr.length; i++) {
				if (jsonStr[i] === '{') braceCount++;
				if (jsonStr[i] === '}') braceCount--;
				if (braceCount === 0) {
					endIndex = i + 1;
					break;
				}
			}
			if (endIndex > 0) {
				try {
					return JSON.parse(jsonStr.substring(0, endIndex));
				} catch {
					return null;
				}
			}
			return null;
		}
	}
}

```

--------------------------------------------------------------------------------
/src/utils/bitbucket-error-detection.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, expect, test } from '@jest/globals';
import { detectErrorType, ErrorCode } from './error-handler.util.js';
import { createApiError } from './error.util.js';

describe('Bitbucket Error Detection', () => {
	describe('Classic Bitbucket error structure: { error: { message, detail } }', () => {
		test('detects not found errors', () => {
			// Create a mock Bitbucket error structure
			const bitbucketError = {
				error: {
					message: 'Repository not found',
					detail: 'The repository does not exist or you do not have access',
				},
			};
			const mcpError = createApiError('API Error', 404, bitbucketError);

			const result = detectErrorType(mcpError);
			expect(result).toEqual({
				code: ErrorCode.NOT_FOUND,
				statusCode: 404,
			});
		});

		test('detects access denied errors', () => {
			const bitbucketError = {
				error: {
					message: 'Access denied to this repository',
					detail: 'You need admin permissions to perform this action',
				},
			};
			const mcpError = createApiError('API Error', 403, bitbucketError);

			const result = detectErrorType(mcpError);
			expect(result).toEqual({
				code: ErrorCode.ACCESS_DENIED,
				statusCode: 403,
			});
		});

		test('detects validation errors', () => {
			const bitbucketError = {
				error: {
					message: 'Invalid parameter: repository name',
					detail: 'Repository name can only contain alphanumeric characters',
				},
			};
			const mcpError = createApiError('API Error', 400, bitbucketError);

			const result = detectErrorType(mcpError);
			expect(result).toEqual({
				code: ErrorCode.VALIDATION_ERROR,
				statusCode: 400,
			});
		});

		test('detects rate limit errors', () => {
			const bitbucketError = {
				error: {
					message: 'Too many requests',
					detail: 'Rate limit exceeded. Try again later.',
				},
			};
			const mcpError = createApiError('API Error', 429, bitbucketError);

			const result = detectErrorType(mcpError);
			expect(result).toEqual({
				code: ErrorCode.RATE_LIMIT_ERROR,
				statusCode: 429,
			});
		});
	});

	describe('Alternate Bitbucket error structure: { type: "error", ... }', () => {
		test('detects not found errors', () => {
			const altBitbucketError = {
				type: 'error',
				status: 404,
				message: 'Resource not found',
			};
			const mcpError = createApiError(
				'API Error',
				404,
				altBitbucketError,
			);

			const result = detectErrorType(mcpError);
			expect(result).toEqual({
				code: ErrorCode.NOT_FOUND,
				statusCode: 404,
			});
		});

		test('detects access denied errors', () => {
			const altBitbucketError = {
				type: 'error',
				status: 403,
				message: 'Forbidden',
			};
			const mcpError = createApiError(
				'API Error',
				403,
				altBitbucketError,
			);

			const result = detectErrorType(mcpError);
			expect(result).toEqual({
				code: ErrorCode.ACCESS_DENIED,
				statusCode: 403,
			});
		});
	});

	describe('Bitbucket errors array structure: { errors: [{ ... }] }', () => {
		test('detects errors from array structure', () => {
			const arrayBitbucketError = {
				errors: [
					{
						status: 400,
						code: 'INVALID_REQUEST_PARAMETER',
						title: 'Invalid parameter value',
						message: 'The parameter is not valid',
					},
				],
			};
			const mcpError = createApiError(
				'API Error',
				400,
				arrayBitbucketError,
			);

			const result = detectErrorType(mcpError);
			expect(result).toEqual({
				code: ErrorCode.VALIDATION_ERROR,
				statusCode: 400,
			});
		});
	});

	describe('Network errors in Bitbucket context', () => {
		test('detects network errors from TypeError', () => {
			const networkError = new TypeError('Failed to fetch');
			const mcpError = createApiError('Network Error', 500, networkError);

			const result = detectErrorType(mcpError);
			expect(result).toEqual({
				code: ErrorCode.NETWORK_ERROR,
				statusCode: 500,
			});
		});

		test('detects other common network error messages', () => {
			const errorMessages = [
				'network error occurred',
				'ECONNREFUSED',
				'ENOTFOUND',
				'Network request failed',
				'Failed to fetch',
			];

			errorMessages.forEach((msg) => {
				const error = new Error(msg);
				const result = detectErrorType(error);
				expect(result).toEqual({
					code: ErrorCode.NETWORK_ERROR,
					statusCode: 500,
				});
			});
		});
	});
});

```

--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------

```json
{
  "name": "@aashari/mcp-server-atlassian-bitbucket",
  "version": "2.3.0",
  "description": "Node.js/TypeScript MCP server for Atlassian Bitbucket. Enables AI systems (LLMs) to interact with workspaces, repositories, and pull requests via tools (list, get, comment, search). Connects AI directly to version control workflows through the standard MCP interface.",
  "main": "dist/index.js",
  "types": "dist/index.d.ts",
  "type": "commonjs",
  "repository": {
    "type": "git",
    "url": "https://github.com/aashari/mcp-server-atlassian-bitbucket.git"
  },
  "bin": {
    "mcp-atlassian-bitbucket": "./dist/index.js"
  },
  "scripts": {
    "build": "tsc",
    "prepare": "npm run build && node scripts/ensure-executable.js",
    "postinstall": "node scripts/ensure-executable.js",
    "clean": "rm -rf dist coverage",
    "test": "jest",
    "test:coverage": "jest --coverage",
    "test:cli": "jest src/cli/.*\\.cli\\.test\\.ts --runInBand --testTimeout=60000",
    "lint": "eslint src --ext .ts --config eslint.config.mjs",
    "format": "prettier --write 'src/**/*.ts' 'scripts/**/*.js'",
    "publish:npm": "npm publish",
    "update:check": "npx npm-check-updates",
    "update:deps": "npx npm-check-updates -u && npm install --legacy-peer-deps",
    "update:version": "node scripts/update-version.js",
    "mcp:stdio": "TRANSPORT_MODE=stdio npm run build && node dist/index.js",
    "mcp:http": "TRANSPORT_MODE=http npm run build && node dist/index.js",
    "mcp:inspect": "TRANSPORT_MODE=http npm run build && (node dist/index.js &) && sleep 2 && npx @modelcontextprotocol/inspector http://localhost:3000/mcp",
    "dev:stdio": "npm run build && npx @modelcontextprotocol/inspector -e TRANSPORT_MODE=stdio -e DEBUG=true node dist/index.js",
    "dev:http": "DEBUG=true TRANSPORT_MODE=http npm run build && node dist/index.js",
    "dev:server": "DEBUG=true npm run build && npx @modelcontextprotocol/inspector -e DEBUG=true node dist/index.js",
    "dev:cli": "DEBUG=true npm run build && DEBUG=true node dist/index.js",
    "start:server": "npm run build && npx @modelcontextprotocol/inspector node dist/index.js",
    "start:cli": "npm run build && node dist/index.js",
    "cli": "npm run build && node dist/index.js"
  },
  "keywords": [
    "mcp",
    "typescript",
    "claude",
    "anthropic",
    "ai",
    "atlassian",
    "bitbucket",
    "repository",
    "version-control",
    "pull-request",
    "server",
    "model-context-protocol",
    "tools",
    "resources",
    "tooling",
    "ai-integration",
    "mcp-server",
    "llm",
    "ai-connector",
    "external-tools",
    "cli",
    "mcp-inspector"
  ],
  "author": "Andi Ashari",
  "license": "ISC",
  "devDependencies": {
    "@eslint/js": "^9.39.1",
    "@semantic-release/changelog": "^6.0.3",
    "@semantic-release/exec": "^7.1.0",
    "@semantic-release/git": "^10.0.1",
    "@semantic-release/github": "^12.0.2",
    "@semantic-release/npm": "^13.1.2",
    "@types/cors": "^2.8.19",
    "@types/express": "^5.0.5",
    "@types/jest": "^30.0.0",
    "@types/jmespath": "^0.15.2",
    "@types/node": "^24.10.1",
    "@typescript-eslint/eslint-plugin": "^8.48.0",
    "@typescript-eslint/parser": "^8.48.0",
    "eslint": "^9.39.1",
    "eslint-config-prettier": "^10.1.8",
    "eslint-plugin-filenames": "^1.3.2",
    "eslint-plugin-prettier": "^5.5.4",
    "jest": "^30.2.0",
    "nodemon": "^3.1.11",
    "npm-check-updates": "^19.1.2",
    "prettier": "^3.7.3",
    "semantic-release": "^25.0.2",
    "ts-jest": "^29.4.5",
    "ts-node": "^10.9.2",
    "typescript": "^5.9.3",
    "typescript-eslint": "^8.48.0"
  },
  "publishConfig": {
    "registry": "https://registry.npmjs.org/",
    "access": "public"
  },
  "dependencies": {
    "@modelcontextprotocol/sdk": "^1.23.0",
    "@toon-format/toon": "^2.0.1",
    "commander": "^14.0.2",
    "cors": "^2.8.5",
    "dotenv": "^17.2.3",
    "express": "^5.1.0",
    "jmespath": "^0.16.0",
    "zod": "^4.1.13"
  },
  "directories": {
    "example": "examples"
  },
  "jest": {
    "preset": "ts-jest",
    "testEnvironment": "node",
    "setupFilesAfterEnv": [
      "<rootDir>/src/utils/jest.setup.ts"
    ],
    "testMatch": [
      "**/src/**/*.test.ts"
    ],
    "collectCoverageFrom": [
      "src/**/*.ts",
      "!src/**/*.test.ts",
      "!src/**/*.spec.ts"
    ],
    "coveragePathIgnorePatterns": [
      "/node_modules/",
      "/dist/",
      "/coverage/"
    ],
    "coverageReporters": [
      "text",
      "lcov",
      "json-summary"
    ],
    "transform": {
      "^.+\\.tsx?$": [
        "ts-jest",
        {
          "useESM": true
        }
      ]
    },
    "moduleNameMapper": {
      "(.*)\\.(js|jsx)$": "$1"
    },
    "extensionsToTreatAsEsm": [
      ".ts"
    ],
    "moduleFileExtensions": [
      "ts",
      "tsx",
      "js",
      "jsx",
      "json",
      "node"
    ]
  },
  "engines": {
    "node": ">=18.0.0"
  }
}

```

--------------------------------------------------------------------------------
/src/utils/toon.util.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, expect, test } from '@jest/globals';
import { toToonOrJson, toToonOrJsonSync } from './toon.util.js';

/**
 * NOTE: The TOON encoder (@toon-format/toon) is an ESM-only package.
 * In Jest's CommonJS test environment, dynamic imports may not work,
 * causing TOON conversion to fall back to JSON. These tests verify:
 * 1. The fallback mechanism works correctly
 * 2. Functions return valid output (either TOON or JSON fallback)
 * 3. Error handling is robust
 *
 * TOON conversion is verified at runtime via CLI/integration tests.
 */

describe('TOON Utilities', () => {
	describe('toToonOrJson', () => {
		test('returns valid output for simple object', async () => {
			const data = { name: 'Alice', age: 30 };
			const jsonFallback = JSON.stringify(data, null, 2);

			const result = await toToonOrJson(data, jsonFallback);

			// Should return either TOON or JSON fallback
			expect(result).toBeDefined();
			expect(result.length).toBeGreaterThan(0);
			// Should contain the data values regardless of format
			expect(result).toContain('Alice');
			expect(result).toContain('30');
		});

		test('returns valid output for array of objects', async () => {
			const data = {
				users: [
					{ id: 1, name: 'Alice', role: 'admin' },
					{ id: 2, name: 'Bob', role: 'user' },
				],
			};
			const jsonFallback = JSON.stringify(data, null, 2);

			const result = await toToonOrJson(data, jsonFallback);

			expect(result).toBeDefined();
			expect(result).toContain('Alice');
			expect(result).toContain('Bob');
		});

		test('returns valid output for nested object', async () => {
			const data = {
				context: {
					task: 'Test task',
					location: 'Test location',
				},
				items: ['a', 'b', 'c'],
			};
			const jsonFallback = JSON.stringify(data, null, 2);

			const result = await toToonOrJson(data, jsonFallback);

			expect(result).toBeDefined();
			expect(result).toContain('Test task');
			expect(result).toContain('Test location');
		});

		test('handles primitive values', async () => {
			const stringData = 'hello';
			const numberData = 42;
			const boolData = true;
			const nullData = null;

			// All primitives should produce valid output
			const strResult = await toToonOrJson(stringData, '"hello"');
			const numResult = await toToonOrJson(numberData, '42');
			const boolResult = await toToonOrJson(boolData, 'true');
			const nullResult = await toToonOrJson(nullData, 'null');

			expect(strResult).toContain('hello');
			expect(numResult).toContain('42');
			expect(boolResult).toContain('true');
			expect(nullResult).toContain('null');
		});

		test('handles empty objects and arrays', async () => {
			const emptyObj = {};
			const emptyArr: unknown[] = [];

			const objResult = await toToonOrJson(emptyObj, '{}');
			const arrResult = await toToonOrJson(emptyArr, '[]');

			expect(objResult).toBeDefined();
			expect(arrResult).toBeDefined();
		});

		test('returns fallback when data contains special characters', async () => {
			const data = { message: 'Hello\nWorld', path: '/some/path' };
			const jsonFallback = JSON.stringify(data, null, 2);

			const result = await toToonOrJson(data, jsonFallback);

			expect(result).toBeDefined();
			expect(result.length).toBeGreaterThan(0);
		});
	});

	describe('toToonOrJsonSync', () => {
		test('returns JSON fallback when encoder not loaded', () => {
			const data = { name: 'Test', value: 123 };
			const jsonFallback = JSON.stringify(data, null, 2);

			// Without preloading, sync version should return fallback
			const result = toToonOrJsonSync(data, jsonFallback);

			expect(result).toBeDefined();
			expect(result).toContain('Test');
			expect(result).toContain('123');
		});

		test('handles complex data gracefully', () => {
			const data = {
				pages: [
					{ id: 1, title: 'Page One' },
					{ id: 2, title: 'Page Two' },
				],
			};
			const jsonFallback = JSON.stringify(data, null, 2);

			const result = toToonOrJsonSync(data, jsonFallback);

			expect(result).toBeDefined();
			expect(result).toContain('Page One');
			expect(result).toContain('Page Two');
		});
	});

	describe('Fallback behavior', () => {
		test('fallback JSON is valid and parseable', async () => {
			const data = {
				spaces: [
					{ id: '123', name: 'Engineering', key: 'ENG' },
					{ id: '456', name: 'Product', key: 'PROD' },
				],
			};
			const jsonFallback = JSON.stringify(data, null, 2);

			const result = await toToonOrJson(data, jsonFallback);

			// If it's JSON fallback, it should be parseable
			// If it's TOON, this will fail, but the test still passes
			// because we're just checking the result is valid
			expect(result).toBeDefined();
			expect(result.length).toBeGreaterThan(0);
		});

		test('function does not throw on edge case data', async () => {
			// Test with various edge cases (excluding undefined which JSON.stringify handles specially)
			const testCases = [
				{ data: null, fallback: 'null' },
				{ data: 0, fallback: '0' },
				{ data: '', fallback: '""' },
				{ data: [], fallback: '[]' },
				{ data: {}, fallback: '{}' },
				{ data: { deep: { nested: { value: 1 } } }, fallback: '{}' },
			];

			for (const { data, fallback } of testCases) {
				// Should not throw
				const result = await toToonOrJson(data, fallback);
				expect(result).toBeDefined();
			}
		});
	});
});

```

--------------------------------------------------------------------------------
/src/tools/atlassian.repositories.types.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from 'zod';

/**
 * Repository tool types.
 */

/**
 * Base pagination arguments for all tools
 */
const PaginationArgs = {
	limit: z
		.number()
		.int()
		.positive()
		.max(100)
		.optional()
		.describe(
			'Maximum number of items to return (1-100). Controls the response size. Defaults to 25 if omitted.',
		),

	cursor: z
		.string()
		.optional()
		.describe(
			'Pagination cursor for retrieving the next set of results. Obtained from previous response when more results are available.',
		),
};

/**
 * Schema for list-repositories tool arguments
 */
export const ListRepositoriesToolArgs = z.object({
	/**
	 * Workspace slug containing the repositories
	 */
	workspaceSlug: z
		.string()
		.optional()
		.describe(
			'Workspace slug containing the repositories. If not provided, the system will use your default workspace (either configured via BITBUCKET_DEFAULT_WORKSPACE or the first workspace in your account). Example: "myteam"',
		),

	/**
	 * Optional query to filter repositories
	 */
	query: z
		.string()
		.optional()
		.describe(
			'Query string to filter repositories by name or other properties (text search). Example: "api" for repositories with "api" in the name/description. If omitted, returns all repositories.',
		),

	/**
	 * Optional sort parameter
	 */
	sort: z
		.string()
		.optional()
		.describe(
			'Field to sort results by. Common values: "name", "created_on", "updated_on". Prefix with "-" for descending order. Example: "-updated_on" for most recently updated first.',
		),

	/**
	 * Optional role filter
	 */
	role: z
		.string()
		.optional()
		.describe(
			'Filter repositories by the authenticated user\'s role. Common values: "owner", "admin", "contributor", "member". If omitted, returns repositories of all roles.',
		),

	/**
	 * Optional project key filter
	 */
	projectKey: z
		.string()
		.optional()
		.describe('Filter repositories by project key. Example: "project-api"'),

	/**
	 * Maximum number of repositories to return (default: 25)
	 */
	...PaginationArgs,
});

export type ListRepositoriesToolArgsType = z.infer<
	typeof ListRepositoriesToolArgs
>;

/**
 * Schema for create-branch tool arguments.
 */
export const CreateBranchToolArgsSchema = z.object({
	workspaceSlug: z
		.string()
		.optional()
		.describe(
			'Workspace slug containing the repository. If not provided, the system will use your default workspace (either configured via BITBUCKET_DEFAULT_WORKSPACE or the first workspace in your account). Example: "myteam"',
		),
	repoSlug: z
		.string()
		.min(1, 'Repository slug is required')
		.describe('Repository slug where the branch will be created.'),
	newBranchName: z
		.string()
		.min(1, 'New branch name is required')
		.describe('The name for the new branch.'),
	sourceBranchOrCommit: z
		.string()
		.min(1, 'Source branch or commit is required')
		.describe('The name of the branch or the commit hash to branch from.'),
});

export type CreateBranchToolArgsType = z.infer<
	typeof CreateBranchToolArgsSchema
>;

/**
 * Schema for clone-repository tool arguments.
 */
export const CloneRepositoryToolArgs = z.object({
	workspaceSlug: z
		.string()
		.optional()
		.describe(
			'Bitbucket workspace slug containing the repository. If not provided, the tool will use your default workspace (either configured via BITBUCKET_DEFAULT_WORKSPACE or the first workspace in your account). Example: "myteam"',
		),
	repoSlug: z
		.string()
		.min(1, 'Repository slug is required')
		.describe(
			'Repository name/slug to clone. This is the short name of the repository. Example: "project-api"',
		),
	targetPath: z
		.string()
		.min(1, 'Target path is required')
		.describe(
			'Directory path where the repository will be cloned. IMPORTANT: Absolute paths are strongly recommended (e.g., "/home/user/projects" or "C:\\Users\\name\\projects"). Relative paths will be resolved relative to the server\'s working directory, which may not be what you expect. The repository will be cloned into a subdirectory at targetPath/repoSlug. Make sure you have write permissions to this location.',
		),
});

export type CloneRepositoryToolArgsType = z.infer<
	typeof CloneRepositoryToolArgs
>;

/**
 * Schema for list-branches tool arguments
 */
export const ListBranchesToolArgs = z.object({
	/**
	 * Workspace slug containing the repository
	 */
	workspaceSlug: z
		.string()
		.optional()
		.describe(
			'Workspace slug containing the repository. If not provided, the system will use your default workspace. Example: "myteam"',
		),

	/**
	 * Repository slug to list branches from
	 */
	repoSlug: z
		.string()
		.min(1, 'Repository slug is required')
		.describe(
			'Repository slug to list branches from. Must be a valid repository slug in the specified workspace. Example: "project-api"',
		),

	/**
	 * Optional query to filter branches
	 */
	query: z
		.string()
		.optional()
		.describe(
			'Query string to filter branches by name or other properties (text search).',
		),

	/**
	 * Optional sort parameter
	 */
	sort: z
		.string()
		.optional()
		.describe(
			'Field to sort branches by. Common values: "name" (default), "-name", "target.date". Prefix with "-" for descending order.',
		),

	/**
	 * Maximum number of branches to return (default: 25)
	 */
	...PaginationArgs,
});

export type ListBranchesToolArgsType = z.infer<typeof ListBranchesToolArgs>;

```

--------------------------------------------------------------------------------
/scripts/update-version.js:
--------------------------------------------------------------------------------

```javascript
#!/usr/bin/env node

/**
 * Script to update version numbers across the project
 * Usage: node scripts/update-version.js [version] [options]
 * Options:
 *   --dry-run   Show what changes would be made without applying them
 *   --verbose   Show detailed logging information
 *
 * If no version is provided, it will use the version from package.json
 */

import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';

// Get the directory name of the current module
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const rootDir = path.resolve(__dirname, '..');

// Parse command line arguments
const args = process.argv.slice(2);
const options = {
	dryRun: args.includes('--dry-run'),
	verbose: args.includes('--verbose'),
};

// Get the version (first non-flag argument)
let newVersion = args.find((arg) => !arg.startsWith('--'));

// Log helper function
const log = (message, verbose = false) => {
	if (!verbose || options.verbose) {
		console.log(message);
	}
};

// File paths that may contain version information
const versionFiles = [
	{
		path: path.join(rootDir, 'package.json'),
		pattern: /"version": "([^"]*)"/,
		replacement: (match, currentVersion) =>
			match.replace(currentVersion, newVersion),
	},
	{
		path: path.join(rootDir, 'src', 'utils', 'constants.util.ts'),
		pattern: /export const VERSION = ['"]([^'"]*)['"]/,
		replacement: (match, currentVersion) =>
			match.replace(currentVersion, newVersion),
	},
	// Also update the compiled JavaScript files if they exist
	{
		path: path.join(rootDir, 'dist', 'utils', 'constants.util.js'),
		pattern: /exports.VERSION = ['"]([^'"]*)['"]/,
		replacement: (match, currentVersion) =>
			match.replace(currentVersion, newVersion),
		optional: true, // Mark this file as optional
	},
	// Additional files can be added here with their patterns and replacement logic
];

/**
 * Read the version from package.json
 * @returns {string} The version from package.json
 */
function getPackageVersion() {
	try {
		const packageJsonPath = path.join(rootDir, 'package.json');
		log(`Reading version from ${packageJsonPath}`, true);

		const packageJson = JSON.parse(
			fs.readFileSync(packageJsonPath, 'utf8'),
		);

		if (!packageJson.version) {
			throw new Error('No version field found in package.json');
		}

		return packageJson.version;
	} catch (error) {
		console.error(`Error reading package.json: ${error.message}`);
		process.exit(1);
	}
}

/**
 * Validate the semantic version format
 * @param {string} version - The version to validate
 * @returns {boolean} True if valid, throws error if invalid
 */
function validateVersion(version) {
	// More comprehensive semver regex
	const semverRegex =
		/^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$/;

	if (!semverRegex.test(version)) {
		throw new Error(
			`Invalid version format: ${version}\nPlease use semantic versioning format (e.g., 1.2.3, 1.2.3-beta.1, etc.)`,
		);
	}

	return true;
}

/**
 * Update version in a specific file
 * @param {Object} fileConfig - Configuration for the file to update
 */
function updateFileVersion(fileConfig) {
	const {
		path: filePath,
		pattern,
		replacement,
		optional = false,
	} = fileConfig;

	try {
		log(`Checking ${filePath}...`, true);

		if (!fs.existsSync(filePath)) {
			if (optional) {
				log(`Optional file not found (skipping): ${filePath}`, true);
				return;
			}
			console.warn(`Warning: File not found: ${filePath}`);
			return;
		}

		// Read file content
		const fileContent = fs.readFileSync(filePath, 'utf8');
		const match = fileContent.match(pattern);

		if (!match) {
			console.warn(`Warning: Version pattern not found in ${filePath}`);
			return;
		}

		const currentVersion = match[1];
		if (currentVersion === newVersion) {
			log(
				`Version in ${path.basename(filePath)} is already ${newVersion}`,
				true,
			);
			return;
		}

		// Create new content with the updated version
		const updatedContent = fileContent.replace(pattern, replacement);

		// Write the changes or log them in dry run mode
		if (options.dryRun) {
			log(
				`Would update version in ${filePath} from ${currentVersion} to ${newVersion}`,
			);
		} else {
			// Create a backup of the original file
			fs.writeFileSync(`${filePath}.bak`, fileContent);
			log(`Backup created: ${filePath}.bak`, true);

			// Write the updated content
			fs.writeFileSync(filePath, updatedContent);
			log(
				`Updated version in ${path.basename(filePath)} from ${currentVersion} to ${newVersion}`,
			);
		}
	} catch (error) {
		if (optional) {
			log(`Error with optional file ${filePath}: ${error.message}`, true);
			return;
		}
		console.error(`Error updating ${filePath}: ${error.message}`);
		process.exit(1);
	}
}

// Main execution
try {
	// If no version specified, get from package.json
	if (!newVersion) {
		newVersion = getPackageVersion();
		log(
			`No version specified, using version from package.json: ${newVersion}`,
		);
	}

	// Validate the version format
	validateVersion(newVersion);

	// Update all configured files
	for (const fileConfig of versionFiles) {
		updateFileVersion(fileConfig);
	}

	if (options.dryRun) {
		log(`\nDry run completed. No files were modified.`);
	} else {
		log(`\nVersion successfully updated to ${newVersion}`);
	}
} catch (error) {
	console.error(`\nVersion update failed: ${error.message}`);
	process.exit(1);
}

```

--------------------------------------------------------------------------------
/src/services/vendor.atlassian.workspaces.test.ts:
--------------------------------------------------------------------------------

```typescript
import atlassianWorkspacesService from './vendor.atlassian.workspaces.service.js';
import { getAtlassianCredentials } from '../utils/transport.util.js';
import { config } from '../utils/config.util.js';
import { McpError } from '../utils/error.util.js';

describe('Vendor Atlassian Workspaces Service', () => {
	// Load configuration and check for credentials before all tests
	beforeAll(() => {
		config.load(); // Ensure config is loaded
		const credentials = getAtlassianCredentials();
		if (!credentials) {
			console.warn(
				'Skipping Atlassian Workspaces Service tests: No credentials available',
			);
		}
	});

	// Helper function to skip tests when credentials are missing
	const skipIfNoCredentials = () => !getAtlassianCredentials();

	describe('list', () => {
		it('should return a list of workspaces (permissions)', async () => {
			if (skipIfNoCredentials()) return;

			const result = await atlassianWorkspacesService.list();

			// Verify the response structure based on WorkspacePermissionsResponse
			expect(result).toHaveProperty('values');
			expect(Array.isArray(result.values)).toBe(true);
			expect(result).toHaveProperty('pagelen'); // Bitbucket uses pagelen
			expect(result).toHaveProperty('page');
			expect(result).toHaveProperty('size');

			if (result.values.length > 0) {
				const membership = result.values[0];
				expect(membership).toHaveProperty(
					'type',
					'workspace_membership',
				);
				expect(membership).toHaveProperty('permission');
				expect(membership).toHaveProperty('user');
				expect(membership).toHaveProperty('workspace');
				expect(membership.workspace).toHaveProperty('slug');
				expect(membership.workspace).toHaveProperty('uuid');
			}
		}, 30000); // Increased timeout

		it('should support pagination with pagelen', async () => {
			if (skipIfNoCredentials()) return;

			const result = await atlassianWorkspacesService.list({
				pagelen: 1,
			});

			expect(result).toHaveProperty('pagelen');
			// Allow pagelen to be greater than requested if API enforces minimum
			expect(result.pagelen).toBeGreaterThanOrEqual(1);
			expect(result.values.length).toBeLessThanOrEqual(result.pagelen); // Items should not exceed pagelen

			if (result.size > result.pagelen) {
				// If there are more items than the page size, expect pagination links
				expect(result).toHaveProperty('next');
			}
		}, 30000);

		it('should handle query filtering if supported by the API', async () => {
			if (skipIfNoCredentials()) return;

			// First get all workspaces to find a potential query term
			const allWorkspaces = await atlassianWorkspacesService.list();

			// Skip if no workspaces available
			if (allWorkspaces.values.length === 0) {
				console.warn(
					'Skipping query filtering test: No workspaces available',
				);
				return;
			}

			// Try to search using a workspace name - note that this might not work if
			// the API doesn't fully support 'q' parameter for this endpoint
			// This test basically checks that the request doesn't fail
			const firstWorkspace = allWorkspaces.values[0].workspace;
			try {
				const result = await atlassianWorkspacesService.list({
					q: `workspace.name="${firstWorkspace.name}"`,
				});

				// We're mostly testing that this request completes without error
				expect(result).toHaveProperty('values');

				// The result might be empty if filtering isn't supported,
				// so we don't assert on the number of results returned
			} catch (error) {
				// If filtering isn't supported, the API might return an error
				// This is acceptable, so we just log it
				console.warn(
					'Query filtering test encountered an error:',
					error instanceof Error ? error.message : String(error),
				);
			}
		}, 30000);
	});

	describe('get', () => {
		// Helper to get a valid slug for testing 'get'
		async function getFirstWorkspaceSlug(): Promise<string | null> {
			if (skipIfNoCredentials()) return null;
			try {
				const listResult = await atlassianWorkspacesService.list({
					pagelen: 1,
				});
				return listResult.values.length > 0
					? listResult.values[0].workspace.slug
					: null;
			} catch (error) {
				console.warn(
					"Could not fetch workspace list for 'get' test setup:",
					error,
				);
				return null;
			}
		}

		it('should return details for a valid workspace slug', async () => {
			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) {
				console.warn('Skipping get test: No workspace slug found.');
				return;
			}

			const result = await atlassianWorkspacesService.get(workspaceSlug);

			// Verify the response structure based on WorkspaceDetailed
			expect(result).toHaveProperty('uuid');
			expect(result).toHaveProperty('slug', workspaceSlug);
			expect(result).toHaveProperty('name');
			expect(result).toHaveProperty('type', 'workspace');
			expect(result).toHaveProperty('links');
			expect(result.links).toHaveProperty('html');
		}, 30000);

		it('should throw an McpError for an invalid workspace slug', async () => {
			if (skipIfNoCredentials()) return;

			const invalidSlug = 'this-slug-definitely-does-not-exist-12345';

			// Expect the service call to reject with an McpError (likely 404)
			await expect(
				atlassianWorkspacesService.get(invalidSlug),
			).rejects.toThrow(McpError);

			// Optionally check the status code if needed
			try {
				await atlassianWorkspacesService.get(invalidSlug);
			} catch (e) {
				expect(e).toBeInstanceOf(McpError);
				expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
			}
		}, 30000);
	});
});

```

--------------------------------------------------------------------------------
/src/utils/config.util.ts:
--------------------------------------------------------------------------------

```typescript
import fs from 'fs';
import path from 'path';
import { Logger } from './logger.util.js';
import dotenv from 'dotenv';
import os from 'os';

/**
 * Configuration loader that handles multiple sources with priority:
 * 1. Direct ENV pass (process.env)
 * 2. .env file in project root
 * 3. Global config file at $HOME/.mcp/configs.json
 */
class ConfigLoader {
	private packageName: string;
	private configLoaded: boolean = false;

	/**
	 * Create a new ConfigLoader instance
	 * @param packageName The package name to use for global config lookup
	 */
	constructor(packageName: string) {
		this.packageName = packageName;
	}

	/**
	 * Load configuration from all sources with proper priority
	 */
	load(): void {
		const methodLogger = Logger.forContext('utils/config.util.ts', 'load');
		if (this.configLoaded) {
			methodLogger.debug('Configuration already loaded, skipping');
			return;
		}

		methodLogger.debug('Loading configuration...');

		// Priority 3: Load from global config file
		this.loadFromGlobalConfig();

		// Priority 2: Load from .env file
		this.loadFromEnvFile();

		// Priority 1: Direct ENV pass is already in process.env
		// No need to do anything as it already has highest priority

		this.configLoaded = true;
		methodLogger.debug('Configuration loaded successfully');
	}

	/**
	 * Load configuration from .env file in project root
	 */
	private loadFromEnvFile(): void {
		const methodLogger = Logger.forContext(
			'utils/config.util.ts',
			'loadFromEnvFile',
		);
		try {
			// Use quiet mode to prevent dotenv from outputting to STDIO
			// which interferes with MCP's JSON-RPC communication
			const result = dotenv.config({ quiet: true });
			if (result.error) {
				methodLogger.debug('No .env file found or error reading it');
				return;
			}
			methodLogger.debug('Loaded configuration from .env file');
		} catch (error) {
			methodLogger.error('Error loading .env file', error);
		}
	}

	/**
	 * Load configuration from global config file at $HOME/.mcp/configs.json
	 */
	private loadFromGlobalConfig(): void {
		const methodLogger = Logger.forContext(
			'utils/config.util.ts',
			'loadFromGlobalConfig',
		);
		try {
			const homedir = os.homedir();
			const globalConfigPath = path.join(homedir, '.mcp', 'configs.json');

			if (!fs.existsSync(globalConfigPath)) {
				methodLogger.debug('Global config file not found');
				return;
			}

			const configContent = fs.readFileSync(globalConfigPath, 'utf8');
			const config = JSON.parse(configContent);

			// Determine the potential keys for the current package
			const shortKey = 'bitbucket'; // Project-specific short key
			const atlassianProductKey = 'atlassian-bitbucket'; // New supported key
			const fullPackageName = this.packageName; // e.g., '@aashari/mcp-server-atlassian-bitbucket'
			const unscopedPackageName =
				fullPackageName.split('/')[1] || fullPackageName; // e.g., 'mcp-server-atlassian-bitbucket'

			// Define the prioritized order of keys to check
			const potentialKeys = [
				shortKey,
				atlassianProductKey,
				fullPackageName,
				unscopedPackageName,
			];
			let foundConfigSection: {
				environments?: Record<string, unknown>;
			} | null = null;
			let usedKey: string | null = null;

			for (const key of potentialKeys) {
				if (
					config[key] &&
					typeof config[key] === 'object' &&
					config[key].environments
				) {
					foundConfigSection = config[key];
					usedKey = key;
					methodLogger.debug(`Found configuration using key: ${key}`);
					break; // Stop once found
				}
			}

			if (!foundConfigSection || !foundConfigSection.environments) {
				methodLogger.debug(
					`No configuration found for ${
						this.packageName
					} using keys: ${potentialKeys.join(', ')}`,
				);
				return;
			}

			const environments = foundConfigSection.environments;
			for (const [key, value] of Object.entries(environments)) {
				// Only set if not already defined in process.env
				if (process.env[key] === undefined) {
					process.env[key] = String(value);
				}
			}

			methodLogger.debug(
				`Loaded configuration from global config file using key: ${usedKey}`,
			);
		} catch (error) {
			methodLogger.error('Error loading global config file', error);
		}
	}

	/**
	 * Get a configuration value
	 * @param key The configuration key
	 * @param defaultValue The default value if the key is not found
	 * @returns The configuration value or the default value
	 */
	get(key: string, defaultValue?: string): string | undefined {
		return process.env[key] || defaultValue;
	}

	/**
	 * Get a boolean configuration value
	 * @param key The configuration key
	 * @param defaultValue The default value if the key is not found
	 * @returns The boolean configuration value or the default value
	 */
	getBoolean(key: string, defaultValue: boolean = false): boolean {
		const value = this.get(key);
		if (value === undefined) {
			return defaultValue;
		}
		return value.toLowerCase() === 'true';
	}

	/**
	 * Get a number configuration value
	 * @param key The configuration key
	 * @param defaultValue The default value if the key is not found
	 * @returns The number configuration value or the default value
	 */
	getNumber(key: string, defaultValue: number = 0): number {
		const value = this.get(key);
		if (value === undefined) {
			return defaultValue;
		}
		const parsed = parseInt(value, 10);
		return isNaN(parsed) ? defaultValue : parsed;
	}
}

// Create and export a singleton instance with the package name from package.json
export const config = new ConfigLoader(
	'@aashari/mcp-server-atlassian-bitbucket',
);

```

--------------------------------------------------------------------------------
/STYLE_GUIDE.md:
--------------------------------------------------------------------------------

```markdown
# MCP Server Style Guide

Based on the MCP SDK v1.22.0+ best practices and observed patterns, this guide ensures consistency across all MCP servers.

## Naming Conventions

| Element              | Convention                                                                                                                                    | Rationale / Examples                                                                                                                              |
| :------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------ |
| **CLI Commands**     | `verb-noun` in `kebab-case`. Use the shortest unambiguous verb (`ls`, `get`, `create`, `add`, `exec`, `search`).                              | `ls-repos`, `get-pr`, `create-comment`, `exec-command`                                                                                            |
| **CLI Options**      | `--kebab-case`. Be specific (e.g., `--workspace-slug`, not just `--slug`).                                                                    | `--project-key-or-id`, `--source-branch`                                                                                                          |
| **MCP Tool Names**   | `<namespace>_<verb>_<noun>` in `snake_case`. Use a concise 2-4 char namespace. Avoid noun repetition.                                         | `bb_ls_repos` (Bitbucket list repos), `conf_get_page` (Confluence get page), `aws_exec_command` (AWS execute command). Avoid `ip_ip_get_details`. |
| **MCP Resource Names**| `kebab-case`. Descriptive identifier for the resource type.                                                                                  | `ip-lookup`, `user-profile`, `config-data`                                                                                                        |
| **MCP Arguments**    | `camelCase`. Suffix identifiers consistently (e.g., `Id`, `Key`, `Slug`). Avoid abbreviations unless universal.                               | `workspaceSlug`, `pullRequestId`, `sourceBranch`, `pageId`.                                                                                       |
| **Boolean Args**     | Use verb prefixes for clarity (`includeXxx`, `launchBrowser`). Avoid bare adjectives (`--https`).                                             | `includeExtendedData: boolean`, `launchBrowser: boolean`                                                                                          |
| **Array Args**       | Use plural names (`spaceIds`, `labels`, `statuses`).                                                                                          | `spaceIds: string[]`, `labels: string[]`                                                                                                          |
| **Descriptions**     | **Start with an imperative verb.** Keep the first sentence concise (≤120 chars). Add 1-2 sentences detail. Mention pre-requisites/notes last. | `List available Confluence spaces. Filters by type, status, or query. Returns formatted list including ID, key, name.`                            |
| **Arg Descriptions** | Start lowercase, explain purpose clearly. Mention defaults or constraints.                                                                    | `numeric ID of the page to retrieve (e.g., "456789"). Required.`                                                                                  |
| **ID/Key Naming**    | Use consistent suffixes like `Id`, `Key`, `Slug`, `KeyOrId` where appropriate.                                                                | `pageId`, `projectKeyOrId`, `workspaceSlug`                                                                                                       |

## SDK Best Practices (v1.22.0+)

### Title vs Name

All registrations (`registerTool`, `registerResource`, `registerPrompt`) support both `name` and `title`:

| Field | Purpose | Example |
| :---- | :------ | :------ |
| `name` | Unique identifier for programmatic use | `bb_get` |
| `title` | Human-readable display name for UI | `Bitbucket GET Request` |

**Always provide both** - `name` for code, `title` for user interfaces.

### Modern Registration APIs

Use the modern `register*` methods instead of deprecated alternatives:

| Deprecated | Modern (SDK v1.22.0+) |
| :--------- | :-------------------- |
| `server.tool()` | `server.registerTool()` |
| `server.resource()` | `server.registerResource()` |
| `server.prompt()` | `server.registerPrompt()` |

### Resource Templates

Use `ResourceTemplate` for parameterized resource URIs:

```typescript
import { ResourceTemplate } from '@modelcontextprotocol/sdk/server/mcp.js';

// Static resource - fixed URI
server.registerResource('config', 'config://app', { ... }, handler);

// Dynamic resource - parameterized URI
server.registerResource(
    'user-profile',
    new ResourceTemplate('users://{userId}/profile', { list: undefined }),
    { title: 'User Profile', description: '...' },
    async (uri, variables) => {
        const userId = variables.userId as string;
        // ...
    }
);
```

### Error Handling

Use `isError: true` for tool execution failures:

```typescript
return {
    content: [{ type: 'text', text: 'Error: Something went wrong' }],
    isError: true
};
```

Adopting this guide will make the tools more predictable and easier for both humans and AI agents to understand and use correctly.

```

--------------------------------------------------------------------------------
/src/controllers/atlassian.api.controller.ts:
--------------------------------------------------------------------------------

```typescript
import {
	fetchAtlassian,
	getAtlassianCredentials,
} from '../utils/transport.util.js';
import { Logger } from '../utils/logger.util.js';
import { handleControllerError } from '../utils/error-handler.util.js';
import { ControllerResponse } from '../types/common.types.js';
import {
	GetApiToolArgsType,
	RequestWithBodyArgsType,
} from '../tools/atlassian.api.types.js';
import { applyJqFilter, toOutputString } from '../utils/jq.util.js';
import { createAuthMissingError } from '../utils/error.util.js';

// Logger instance for this module
const logger = Logger.forContext('controllers/atlassian.api.controller.ts');

/**
 * Supported HTTP methods for API requests
 */
type HttpMethod = 'GET' | 'POST' | 'PUT' | 'PATCH' | 'DELETE';

/**
 * Output format type
 */
type OutputFormat = 'toon' | 'json';

/**
 * Base options for all API requests
 */
interface BaseRequestOptions {
	path: string;
	queryParams?: Record<string, string>;
	jq?: string;
	outputFormat?: OutputFormat;
}

/**
 * Options for requests that include a body (POST, PUT, PATCH)
 */
interface RequestWithBodyOptions extends BaseRequestOptions {
	body?: Record<string, unknown>;
}

/**
 * Normalizes the API path by ensuring it starts with /2.0
 * @param path - The raw path provided by the user
 * @returns Normalized path with /2.0 prefix
 */
function normalizePath(path: string): string {
	let normalizedPath = path;
	if (!normalizedPath.startsWith('/')) {
		normalizedPath = '/' + normalizedPath;
	}
	if (!normalizedPath.startsWith('/2.0')) {
		normalizedPath = '/2.0' + normalizedPath;
	}
	return normalizedPath;
}

/**
 * Appends query parameters to a path
 * @param path - The base path
 * @param queryParams - Optional query parameters
 * @returns Path with query string appended
 */
function appendQueryParams(
	path: string,
	queryParams?: Record<string, string>,
): string {
	if (!queryParams || Object.keys(queryParams).length === 0) {
		return path;
	}
	const queryString = new URLSearchParams(queryParams).toString();
	return path + (path.includes('?') ? '&' : '?') + queryString;
}

/**
 * Shared handler for all HTTP methods
 *
 * @param method - HTTP method (GET, POST, PUT, PATCH, DELETE)
 * @param options - Request options including path, queryParams, body (for non-GET), and jq filter
 * @returns Promise with raw JSON response (optionally filtered)
 */
async function handleRequest(
	method: HttpMethod,
	options: RequestWithBodyOptions,
): Promise<ControllerResponse> {
	const methodLogger = logger.forMethod(`handle${method}`);

	try {
		methodLogger.debug(`Making ${method} request`, {
			path: options.path,
			...(options.body && { bodyKeys: Object.keys(options.body) }),
		});

		// Get credentials
		const credentials = getAtlassianCredentials();
		if (!credentials) {
			throw createAuthMissingError();
		}

		// Normalize path and append query params
		let path = normalizePath(options.path);
		path = appendQueryParams(path, options.queryParams);

		methodLogger.debug(`${method}ing: ${path}`);

		const fetchOptions: {
			method: 'GET' | 'POST' | 'PUT' | 'PATCH' | 'DELETE';
			body?: Record<string, unknown>;
		} = {
			method,
		};

		// Add body for methods that support it
		if (options.body && ['POST', 'PUT', 'PATCH'].includes(method)) {
			fetchOptions.body = options.body;
		}

		const response = await fetchAtlassian<unknown>(
			credentials,
			path,
			fetchOptions,
		);
		methodLogger.debug('Successfully received response');

		// Apply JQ filter if provided, otherwise return raw data
		const result = applyJqFilter(response.data, options.jq);

		// Convert to output format (TOON by default, JSON if requested)
		const useToon = options.outputFormat !== 'json';
		const content = await toOutputString(result, useToon);

		return {
			content,
			rawResponsePath: response.rawResponsePath,
		};
	} catch (error) {
		throw handleControllerError(error, {
			entityType: 'API',
			operation: `${method} request`,
			source: `controllers/atlassian.api.controller.ts@handle${method}`,
			additionalInfo: { path: options.path },
		});
	}
}

/**
 * Generic GET request to Bitbucket API
 *
 * @param options - Options containing path, queryParams, and optional jq filter
 * @returns Promise with raw JSON response (optionally filtered)
 */
export async function handleGet(
	options: GetApiToolArgsType,
): Promise<ControllerResponse> {
	return handleRequest('GET', options);
}

/**
 * Generic POST request to Bitbucket API
 *
 * @param options - Options containing path, body, queryParams, and optional jq filter
 * @returns Promise with raw JSON response (optionally filtered)
 */
export async function handlePost(
	options: RequestWithBodyArgsType,
): Promise<ControllerResponse> {
	return handleRequest('POST', options);
}

/**
 * Generic PUT request to Bitbucket API
 *
 * @param options - Options containing path, body, queryParams, and optional jq filter
 * @returns Promise with raw JSON response (optionally filtered)
 */
export async function handlePut(
	options: RequestWithBodyArgsType,
): Promise<ControllerResponse> {
	return handleRequest('PUT', options);
}

/**
 * Generic PATCH request to Bitbucket API
 *
 * @param options - Options containing path, body, queryParams, and optional jq filter
 * @returns Promise with raw JSON response (optionally filtered)
 */
export async function handlePatch(
	options: RequestWithBodyArgsType,
): Promise<ControllerResponse> {
	return handleRequest('PATCH', options);
}

/**
 * Generic DELETE request to Bitbucket API
 *
 * @param options - Options containing path, queryParams, and optional jq filter
 * @returns Promise with raw JSON response (optionally filtered)
 */
export async function handleDelete(
	options: GetApiToolArgsType,
): Promise<ControllerResponse> {
	return handleRequest('DELETE', options);
}

```

--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import { Logger } from './utils/logger.util.js';
import { config } from './utils/config.util.js';
import { VERSION, PACKAGE_NAME } from './utils/constants.util.js';
import { runCli } from './cli/index.js';
import type { Request, Response } from 'express';
import express from 'express';
import cors from 'cors';

// Import tools
import atlassianApi from './tools/atlassian.api.tool.js';
import atlassianRepositories from './tools/atlassian.repositories.tool.js';

// Create a contextualized logger for this file
const indexLogger = Logger.forContext('index.ts');

// Log initialization at debug level
indexLogger.debug('Bitbucket MCP server module loaded');

let serverInstance: McpServer | null = null;
let transportInstance:
	| StreamableHTTPServerTransport
	| StdioServerTransport
	| null = null;

/**
 * Start the MCP server with the specified transport mode
 *
 * @param mode The transport mode to use (stdio or http)
 * @returns Promise that resolves to the server instance when started successfully
 */
export async function startServer(
	mode: 'stdio' | 'http' = 'stdio',
): Promise<McpServer> {
	const serverLogger = Logger.forContext('index.ts', 'startServer');

	// Load configuration
	serverLogger.info('Starting MCP server initialization...');
	config.load();

	if (config.getBoolean('DEBUG')) {
		serverLogger.debug('Debug mode enabled');
	}

	serverLogger.info(`Initializing Bitbucket MCP server v${VERSION}`);
	serverInstance = new McpServer({
		name: PACKAGE_NAME,
		version: VERSION,
	});

	// Register all tools
	serverLogger.info('Registering MCP tools...');
	atlassianApi.registerTools(serverInstance);
	atlassianRepositories.registerTools(serverInstance);
	serverLogger.info('All tools registered successfully');

	if (mode === 'stdio') {
		// STDIO Transport
		serverLogger.info('Using STDIO transport for MCP communication');
		transportInstance = new StdioServerTransport();

		try {
			await serverInstance.connect(transportInstance);
			serverLogger.info(
				'MCP server started successfully on STDIO transport',
			);
			setupGracefulShutdown();
			return serverInstance;
		} catch (err) {
			serverLogger.error(
				'Failed to start server on STDIO transport',
				err,
			);
			process.exit(1);
		}
	} else {
		// HTTP Transport with Express
		serverLogger.info(
			'Using Streamable HTTP transport for MCP communication',
		);

		const app = express();
		app.use(cors());
		app.use(express.json());

		const mcpEndpoint = '/mcp';
		serverLogger.debug(`MCP endpoint: ${mcpEndpoint}`);

		// Create transport instance
		const transport = new StreamableHTTPServerTransport({
			sessionIdGenerator: undefined,
		});

		// Connect server to transport
		await serverInstance.connect(transport);
		transportInstance = transport;

		// Handle all MCP requests
		app.all(mcpEndpoint, (req: Request, res: Response) => {
			transport
				.handleRequest(req, res, req.body)
				.catch((err: unknown) => {
					serverLogger.error('Error in transport.handleRequest', err);
					if (!res.headersSent) {
						res.status(500).json({
							error: 'Internal Server Error',
						});
					}
				});
		});

		// Health check endpoint
		app.get('/', (_req: Request, res: Response) => {
			res.send(`Bitbucket MCP Server v${VERSION} is running`);
		});

		// Start HTTP server
		const PORT = Number(process.env.PORT ?? 3000);
		await new Promise<void>((resolve) => {
			app.listen(PORT, () => {
				serverLogger.info(
					`HTTP transport listening on http://localhost:${PORT}${mcpEndpoint}`,
				);
				resolve();
			});
		});

		setupGracefulShutdown();
		return serverInstance;
	}
}

/**
 * Main entry point - this will run when executed directly
 * Determines whether to run in CLI or server mode based on command-line arguments
 */
async function main() {
	const mainLogger = Logger.forContext('index.ts', 'main');

	// Load configuration
	config.load();

	// CLI mode - if any arguments are provided
	if (process.argv.length > 2) {
		mainLogger.info('Starting in CLI mode');
		await runCli(process.argv.slice(2));
		mainLogger.info('CLI execution completed');
		return;
	}

	// Server mode - determine transport
	const transportMode = (process.env.TRANSPORT_MODE || 'stdio').toLowerCase();
	let mode: 'http' | 'stdio';

	if (transportMode === 'stdio') {
		mode = 'stdio';
	} else if (transportMode === 'http') {
		mode = 'http';
	} else {
		mainLogger.warn(
			`Unknown TRANSPORT_MODE "${transportMode}", defaulting to stdio`,
		);
		mode = 'stdio';
	}

	mainLogger.info(`Starting server with ${mode.toUpperCase()} transport`);
	await startServer(mode);
	mainLogger.info('Server is now running');
}

/**
 * Set up graceful shutdown handlers for the server
 */
function setupGracefulShutdown() {
	const shutdownLogger = Logger.forContext('index.ts', 'shutdown');

	const shutdown = async () => {
		try {
			shutdownLogger.info('Shutting down gracefully...');

			if (
				transportInstance &&
				'close' in transportInstance &&
				typeof transportInstance.close === 'function'
			) {
				await transportInstance.close();
			}

			if (serverInstance && typeof serverInstance.close === 'function') {
				await serverInstance.close();
			}

			process.exit(0);
		} catch (err) {
			shutdownLogger.error('Error during shutdown', err);
			process.exit(1);
		}
	};

	['SIGINT', 'SIGTERM'].forEach((signal) => {
		process.on(signal as NodeJS.Signals, shutdown);
	});
}

// If this file is being executed directly (not imported), run the main function
if (require.main === module) {
	main().catch((err) => {
		indexLogger.error('Unhandled error in main process', err);
		process.exit(1);
	});
}

```

--------------------------------------------------------------------------------
/src/cli/atlassian.api.cli.ts:
--------------------------------------------------------------------------------

```typescript
import { Command } from 'commander';
import { Logger } from '../utils/logger.util.js';
import { handleCliError } from '../utils/error.util.js';
import {
	handleGet,
	handlePost,
	handlePut,
	handlePatch,
	handleDelete,
} from '../controllers/atlassian.api.controller.js';

/**
 * CLI module for generic Bitbucket API access.
 * Provides commands for making GET, POST, PUT, PATCH, and DELETE requests to any Bitbucket API endpoint.
 */

// Create a contextualized logger for this file
const cliLogger = Logger.forContext('cli/atlassian.api.cli.ts');

// Log CLI initialization
cliLogger.debug('Bitbucket API CLI module initialized');

/**
 * Parse JSON string with error handling and basic validation
 * @param jsonString - JSON string to parse
 * @param fieldName - Name of the field for error messages
 * @returns Parsed JSON object
 */
function parseJson<T extends Record<string, unknown>>(
	jsonString: string,
	fieldName: string,
): T {
	let parsed: unknown;
	try {
		parsed = JSON.parse(jsonString);
	} catch {
		throw new Error(
			`Invalid JSON in --${fieldName}. Please provide valid JSON.`,
		);
	}

	// Validate that the parsed value is an object (not null, array, or primitive)
	if (
		parsed === null ||
		typeof parsed !== 'object' ||
		Array.isArray(parsed)
	) {
		throw new Error(
			`Invalid --${fieldName}: expected a JSON object, got ${parsed === null ? 'null' : Array.isArray(parsed) ? 'array' : typeof parsed}.`,
		);
	}

	return parsed as T;
}

/**
 * Register a read command (GET/DELETE - no body)
 * @param program - Commander program instance
 * @param name - Command name
 * @param description - Command description
 * @param handler - Controller handler function
 */
function registerReadCommand(
	program: Command,
	name: string,
	description: string,
	handler: (options: {
		path: string;
		queryParams?: Record<string, string>;
		jq?: string;
	}) => Promise<{ content: string }>,
): void {
	program
		.command(name)
		.description(description)
		.requiredOption(
			'-p, --path <path>',
			'API endpoint path (e.g., "/workspaces", "/repositories/{workspace}/{repo}").',
		)
		.option(
			'-q, --query-params <json>',
			'Query parameters as JSON string (e.g., \'{"pagelen": "25"}\').',
		)
		.option(
			'--jq <expression>',
			'JMESPath expression to filter/transform the response.',
		)
		.action(async (options) => {
			const actionLogger = cliLogger.forMethod(name);
			try {
				actionLogger.debug(`CLI ${name} called`, options);

				// Parse query params if provided
				let queryParams: Record<string, string> | undefined;
				if (options.queryParams) {
					queryParams = parseJson<Record<string, string>>(
						options.queryParams,
						'query-params',
					);
				}

				const result = await handler({
					path: options.path,
					queryParams,
					jq: options.jq,
				});

				console.log(result.content);
			} catch (error) {
				handleCliError(error);
			}
		});
}

/**
 * Register a write command (POST/PUT/PATCH - with body)
 * @param program - Commander program instance
 * @param name - Command name
 * @param description - Command description
 * @param handler - Controller handler function
 */
function registerWriteCommand(
	program: Command,
	name: string,
	description: string,
	handler: (options: {
		path: string;
		body: Record<string, unknown>;
		queryParams?: Record<string, string>;
		jq?: string;
	}) => Promise<{ content: string }>,
): void {
	program
		.command(name)
		.description(description)
		.requiredOption(
			'-p, --path <path>',
			'API endpoint path (e.g., "/repositories/{workspace}/{repo}/pullrequests").',
		)
		.requiredOption('-b, --body <json>', 'Request body as JSON string.')
		.option('-q, --query-params <json>', 'Query parameters as JSON string.')
		.option(
			'--jq <expression>',
			'JMESPath expression to filter/transform the response.',
		)
		.action(async (options) => {
			const actionLogger = cliLogger.forMethod(name);
			try {
				actionLogger.debug(`CLI ${name} called`, options);

				// Parse body
				const body = parseJson<Record<string, unknown>>(
					options.body,
					'body',
				);

				// Parse query params if provided
				let queryParams: Record<string, string> | undefined;
				if (options.queryParams) {
					queryParams = parseJson<Record<string, string>>(
						options.queryParams,
						'query-params',
					);
				}

				const result = await handler({
					path: options.path,
					body,
					queryParams,
					jq: options.jq,
				});

				console.log(result.content);
			} catch (error) {
				handleCliError(error);
			}
		});
}

/**
 * Register generic Bitbucket API CLI commands with the Commander program
 *
 * @param program - The Commander program instance to register commands with
 */
function register(program: Command): void {
	const methodLogger = Logger.forContext(
		'cli/atlassian.api.cli.ts',
		'register',
	);
	methodLogger.debug('Registering Bitbucket API CLI commands...');

	// Register GET command
	registerReadCommand(
		program,
		'get',
		'GET any Bitbucket endpoint. Returns JSON, optionally filtered with JMESPath.',
		handleGet,
	);

	// Register POST command
	registerWriteCommand(
		program,
		'post',
		'POST to any Bitbucket endpoint. Returns JSON, optionally filtered with JMESPath.',
		handlePost,
	);

	// Register PUT command
	registerWriteCommand(
		program,
		'put',
		'PUT to any Bitbucket endpoint. Returns JSON, optionally filtered with JMESPath.',
		handlePut,
	);

	// Register PATCH command
	registerWriteCommand(
		program,
		'patch',
		'PATCH any Bitbucket endpoint. Returns JSON, optionally filtered with JMESPath.',
		handlePatch,
	);

	// Register DELETE command
	registerReadCommand(
		program,
		'delete',
		'DELETE any Bitbucket endpoint. Returns JSON (if any), optionally filtered with JMESPath.',
		handleDelete,
	);

	methodLogger.debug('CLI commands registered successfully');
}

export default { register };

```

--------------------------------------------------------------------------------
/src/utils/transport.util.test.ts:
--------------------------------------------------------------------------------

```typescript
import { getAtlassianCredentials, fetchAtlassian } from './transport.util.js';
import { config } from './config.util.js';

/**
 * Generic response type for testing
 */
interface TestResponse {
	values: Array<Record<string, unknown>>;
	next?: string;
	total?: number;
}

// NOTE: We are no longer mocking fetch or logger, using real implementations instead

describe('Transport Utility', () => {
	// Load configuration before all tests
	beforeAll(() => {
		// Load configuration from all sources
		config.load();
	});

	describe('getAtlassianCredentials', () => {
		it('should return credentials when environment variables are set', () => {
			// This test will be skipped if credentials are not available
			const credentials = getAtlassianCredentials();
			if (!credentials) {
				return; // Skip silently - no credentials available for testing
			}

			// Check if the credentials are for standard Atlassian or Bitbucket-specific
			if (credentials.useBitbucketAuth) {
				// Verify the Bitbucket-specific credentials
				expect(credentials).toHaveProperty('bitbucketUsername');
				expect(credentials).toHaveProperty('bitbucketAppPassword');
				expect(credentials).toHaveProperty('useBitbucketAuth');

				// Verify the credentials are not empty
				expect(credentials.bitbucketUsername).toBeTruthy();
				expect(credentials.bitbucketAppPassword).toBeTruthy();
				expect(credentials.useBitbucketAuth).toBe(true);
			} else {
				// Verify the standard Atlassian credentials
				expect(credentials).toHaveProperty('userEmail');
				expect(credentials).toHaveProperty('apiToken');

				// Verify the credentials are not empty
				expect(credentials.userEmail).toBeTruthy();
				expect(credentials.apiToken).toBeTruthy();
				// Note: siteName is optional for API tokens
			}
		});

		it('should return null and log a warning when environment variables are missing', () => {
			// Store original environment variables
			const originalEnv = { ...process.env };

			// Clear relevant environment variables to simulate missing credentials
			delete process.env.ATLASSIAN_SITE_NAME;
			delete process.env.ATLASSIAN_USER_EMAIL;
			delete process.env.ATLASSIAN_API_TOKEN;
			delete process.env.ATLASSIAN_BITBUCKET_USERNAME;
			delete process.env.ATLASSIAN_BITBUCKET_APP_PASSWORD;

			// Force reload configuration
			config.load();

			// Call the function
			const credentials = getAtlassianCredentials();

			// Verify the result is null
			expect(credentials).toBeNull();

			// Restore original environment
			process.env = originalEnv;

			// Reload config with original environment
			config.load();
		});
	});

	describe('fetchAtlassian', () => {
		it('should successfully fetch data from the Atlassian API', async () => {
			// This test will be skipped if credentials are not available
			const credentials = getAtlassianCredentials();
			if (!credentials) {
				return; // Skip silently - no credentials available for testing
			}

			// Make a call to a real API endpoint
			// For Bitbucket, we'll use the workspaces endpoint
			const result = await fetchAtlassian<TestResponse>(
				credentials,
				'/2.0/workspaces',
				{
					method: 'GET',
					headers: {
						'Content-Type': 'application/json',
					},
				},
			);

			// Verify the response structure from real API
			expect(result.data).toHaveProperty('values');
			expect(Array.isArray(result.data.values)).toBe(true);
			// Different property names than mocked data to match actual API response
			if (result.data.values.length > 0) {
				// Verify an actual workspace result
				const workspace = result.data.values[0];
				expect(workspace).toHaveProperty('uuid');
				expect(workspace).toHaveProperty('name');
				expect(workspace).toHaveProperty('slug');
			}
		}, 15000); // Increased timeout for real API call

		it('should handle API errors correctly', async () => {
			// This test will be skipped if credentials are not available
			const credentials = getAtlassianCredentials();
			if (!credentials) {
				return; // Skip silently - no credentials available for testing
			}

			// Call a non-existent endpoint and expect it to throw
			await expect(
				fetchAtlassian(credentials, '/2.0/non-existent-endpoint'),
			).rejects.toThrow();
		}, 15000); // Increased timeout for real API call

		it('should normalize paths that do not start with a slash', async () => {
			// This test will be skipped if credentials are not available
			const credentials = getAtlassianCredentials();
			if (!credentials) {
				return; // Skip silently - no credentials available for testing
			}

			// Call the function with a path that doesn't start with a slash
			const result = await fetchAtlassian<TestResponse>(
				credentials,
				'2.0/workspaces',
				{
					method: 'GET',
				},
			);

			// Verify the response structure from real API
			expect(result.data).toHaveProperty('values');
			expect(Array.isArray(result.data.values)).toBe(true);
		}, 15000); // Increased timeout for real API call

		it('should support custom request options', async () => {
			// This test will be skipped if credentials are not available
			const credentials = getAtlassianCredentials();
			if (!credentials) {
				return; // Skip silently - no credentials available for testing
			}

			// Custom request options with pagination
			const options = {
				method: 'GET' as const,
				headers: {
					Accept: 'application/json',
					'Content-Type': 'application/json',
				},
			};

			// Call a real endpoint with pagination parameter
			const result = await fetchAtlassian<TestResponse>(
				credentials,
				'/2.0/workspaces?pagelen=1',
				options,
			);

			// Verify the response structure from real API
			expect(result.data).toHaveProperty('values');
			expect(Array.isArray(result.data.values)).toBe(true);
			expect(result.data.values.length).toBeLessThanOrEqual(1); // Should respect pagelen=1
		}, 15000); // Increased timeout for real API call
	});
});

```

--------------------------------------------------------------------------------
/src/services/vendor.atlassian.workspaces.service.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from 'zod';
import {
	createAuthMissingError,
	createApiError,
	McpError,
} from '../utils/error.util.js';
import { Logger } from '../utils/logger.util.js';
import {
	fetchAtlassian,
	getAtlassianCredentials,
} from '../utils/transport.util.js';
import {
	WorkspaceDetailedSchema,
	WorkspacePermissionsResponseSchema,
	ListWorkspacesParamsSchema,
	type ListWorkspacesParams,
} from './vendor.atlassian.workspaces.types.js';

/**
 * Base API path for Bitbucket REST API v2
 * @see https://developer.atlassian.com/cloud/bitbucket/rest/api-group-workspaces/
 * @constant {string}
 */
const API_PATH = '/2.0';

/**
 * @namespace VendorAtlassianWorkspacesService
 * @description Service for interacting with Bitbucket Workspaces API.
 * Provides methods for listing workspaces and retrieving workspace details.
 * All methods require valid Atlassian credentials configured in the environment.
 */

// Create a contextualized logger for this file
const serviceLogger = Logger.forContext(
	'services/vendor.atlassian.workspaces.service.ts',
);

// Log service initialization
serviceLogger.debug('Bitbucket workspaces service initialized');

/**
 * List Bitbucket workspaces with optional filtering and pagination
 *
 * Retrieves a list of workspaces from Bitbucket with support for various filters
 * and pagination options.
 *
 * NOTE: The /2.0/user/permissions/workspaces endpoint does not support sorting,
 * despite the ListWorkspacesParams type including a sort parameter.
 *
 * @async
 * @memberof VendorAtlassianWorkspacesService
 * @param {ListWorkspacesParams} [params={}] - Optional parameters for customizing the request
 * @param {string} [params.q] - Filter by workspace name
 * @param {number} [params.page] - Page number
 * @param {number} [params.pagelen] - Number of items per page
 * @returns {Promise<z.infer<typeof WorkspacePermissionsResponseSchema>>} Promise containing the validated workspaces response
 * @throws {McpError} If validation fails, credentials are missing, or API request fails
 * @example
 * // List workspaces with pagination
 * const response = await list({
 *   pagelen: 10
 * });
 */
async function list(
	params: ListWorkspacesParams = {},
): Promise<z.infer<typeof WorkspacePermissionsResponseSchema>> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.workspaces.service.ts',
		'list',
	);
	methodLogger.debug('Listing Bitbucket workspaces with params:', params);

	// Validate params with Zod
	try {
		ListWorkspacesParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error(
				'Invalid parameters provided to list workspaces:',
				error.format(),
			);
			throw createApiError(
				`Invalid parameters for listing workspaces: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	// Build query parameters
	const queryParams = new URLSearchParams();

	// Add optional query parameters if provided
	// NOTE: Sort is intentionally not included as the /2.0/user/permissions/workspaces endpoint
	// does not support sorting on any field
	if (params.q) {
		queryParams.set('q', params.q);
	}
	if (params.pagelen) {
		queryParams.set('pagelen', params.pagelen.toString());
	}
	if (params.page) {
		queryParams.set('page', params.page.toString());
	}

	const queryString = queryParams.toString()
		? `?${queryParams.toString()}`
		: '';
	const path = `${API_PATH}/user/permissions/workspaces${queryString}`;

	methodLogger.debug(`Sending request to: ${path}`);
	try {
		const response = await fetchAtlassian(credentials, path);
		// Validate response with Zod schema
		try {
			const validatedData = WorkspacePermissionsResponseSchema.parse(
				response.data,
			);
			return validatedData;
		} catch (error) {
			if (error instanceof z.ZodError) {
				methodLogger.error(
					'Invalid response from Bitbucket API:',
					error.format(),
				);
				throw createApiError(
					`Invalid response format from Bitbucket API for workspace list: ${error.message}`,
					500,
					error,
				);
			}
			throw error;
		}
	} catch (error) {
		if (error instanceof McpError) {
			throw error;
		}
		throw createApiError(
			`Failed to list workspaces: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

/**
 * Get detailed information about a specific Bitbucket workspace
 *
 * Retrieves comprehensive details about a single workspace.
 *
 * @async
 * @memberof VendorAtlassianWorkspacesService
 * @param {string} workspace - The workspace slug
 * @returns {Promise<z.infer<typeof WorkspaceDetailedSchema>>} Promise containing the validated workspace information
 * @throws {McpError} If validation fails, credentials are missing, or API request fails
 * @example
 * // Get workspace details
 * const workspace = await get('my-workspace');
 */
async function get(
	workspace: string,
): Promise<z.infer<typeof WorkspaceDetailedSchema>> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.workspaces.service.ts',
		'get',
	);
	methodLogger.debug(`Getting Bitbucket workspace with slug: ${workspace}`);

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	// Currently no query parameters for workspace details API
	const path = `${API_PATH}/workspaces/${workspace}`;

	methodLogger.debug(`Sending request to: ${path}`);
	try {
		const response = await fetchAtlassian(credentials, path);
		// Validate response with Zod schema
		try {
			const validatedData = WorkspaceDetailedSchema.parse(response.data);
			return validatedData;
		} catch (error) {
			if (error instanceof z.ZodError) {
				methodLogger.error(
					'Invalid response from Bitbucket API:',
					error.format(),
				);
				throw createApiError(
					`Invalid response format from Bitbucket API for workspace details: ${error.message}`,
					500,
					error,
				);
			}
			throw error;
		}
	} catch (error) {
		if (error instanceof McpError) {
			throw error;
		}
		throw createApiError(
			`Failed to get workspace details: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

export default { list, get };

```

--------------------------------------------------------------------------------
/src/utils/error.util.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, expect, test } from '@jest/globals';
import {
	ErrorType,
	McpError,
	createApiError,
	createAuthMissingError,
	createAuthInvalidError,
	createUnexpectedError,
	ensureMcpError,
	formatErrorForMcpTool,
	formatErrorForMcpResource,
	getDeepOriginalError,
} from './error.util.js';

describe('Error Utilities', () => {
	describe('Error creation functions', () => {
		test('createAuthMissingError creates an error with AUTH_MISSING type', () => {
			const error = createAuthMissingError('Missing credentials');
			expect(error).toBeInstanceOf(McpError);
			expect(error.type).toBe(ErrorType.AUTH_MISSING);
			expect(error.message).toBe('Missing credentials');
			expect(error.statusCode).toBeUndefined();
		});

		test('createAuthInvalidError creates an error with AUTH_INVALID type and 401 status', () => {
			const error = createAuthInvalidError('Invalid token');
			expect(error).toBeInstanceOf(McpError);
			expect(error.type).toBe(ErrorType.AUTH_INVALID);
			expect(error.message).toBe('Invalid token');
			expect(error.statusCode).toBe(401);
		});

		test('createApiError creates an error with API_ERROR type and specified status', () => {
			const error = createApiError('Not found', 404, {
				details: 'Resource missing',
			});
			expect(error).toBeInstanceOf(McpError);
			expect(error.type).toBe(ErrorType.API_ERROR);
			expect(error.message).toBe('Not found');
			expect(error.statusCode).toBe(404);
			expect(error.originalError).toEqual({
				details: 'Resource missing',
			});
		});

		test('createUnexpectedError creates an error with UNEXPECTED_ERROR type', () => {
			const originalError = new Error('Original error');
			const error = createUnexpectedError(
				'Something went wrong',
				originalError,
			);
			expect(error).toBeInstanceOf(McpError);
			expect(error.type).toBe(ErrorType.UNEXPECTED_ERROR);
			expect(error.message).toBe('Something went wrong');
			expect(error.statusCode).toBeUndefined();
			expect(error.originalError).toBe(originalError);
		});
	});

	describe('ensureMcpError function', () => {
		test('returns the error if it is already an McpError', () => {
			const error = createApiError('API error', 500);
			expect(ensureMcpError(error)).toBe(error);
		});

		test('wraps a standard Error with McpError', () => {
			const stdError = new Error('Standard error');
			const mcpError = ensureMcpError(stdError);
			expect(mcpError).toBeInstanceOf(McpError);
			expect(mcpError.message).toBe('Standard error');
			expect(mcpError.type).toBe(ErrorType.UNEXPECTED_ERROR);
			expect(mcpError.originalError).toBe(stdError);
		});

		test('wraps a string with McpError', () => {
			const mcpError = ensureMcpError('Error message');
			expect(mcpError).toBeInstanceOf(McpError);
			expect(mcpError.message).toBe('Error message');
			expect(mcpError.type).toBe(ErrorType.UNEXPECTED_ERROR);
		});

		test('wraps other types with McpError', () => {
			const mcpError = ensureMcpError({ message: 'Object error' });
			expect(mcpError).toBeInstanceOf(McpError);
			expect(mcpError.message).toBe('[object Object]');
			expect(mcpError.type).toBe(ErrorType.UNEXPECTED_ERROR);
		});
	});

	describe('getDeepOriginalError function', () => {
		test('returns the deepest error in a chain', () => {
			const deepestError = { message: 'Root cause' };
			const level3 = createApiError('Level 3', 500, deepestError);
			const level2 = createApiError('Level 2', 500, level3);
			const level1 = createApiError('Level 1', 500, level2);

			expect(getDeepOriginalError(level1)).toEqual(deepestError);
		});

		test('handles non-McpError values', () => {
			const originalValue = 'Original error text';
			expect(getDeepOriginalError(originalValue)).toBe(originalValue);
		});

		test('stops traversing at maximum depth', () => {
			// Create a circular error chain that would cause infinite recursion
			const circular1: any = new McpError(
				'Circular 1',
				ErrorType.API_ERROR,
			);
			const circular2: any = new McpError(
				'Circular 2',
				ErrorType.API_ERROR,
			);
			circular1.originalError = circular2;
			circular2.originalError = circular1;

			// Should not cause infinite recursion
			const result = getDeepOriginalError(circular1);

			// Expect either circular1 or circular2 depending on max depth
			expect([circular1, circular2]).toContain(result);
		});
	});

	describe('formatErrorForMcpTool function', () => {
		test('formats an McpError for MCP tool response with raw error details', () => {
			const originalError = {
				code: 'NOT_FOUND',
				message: 'Repository does not exist',
			};
			const error = createApiError(
				'Resource not found',
				404,
				originalError,
			);

			const formatted = formatErrorForMcpTool(error);

			expect(formatted).toHaveProperty('content');
			expect(formatted).toHaveProperty('isError', true);
			expect(formatted.content[0].type).toBe('text');
			// Should contain the error message
			expect(formatted.content[0].text).toContain(
				'Error: Resource not found',
			);
			// Should contain HTTP status
			expect(formatted.content[0].text).toContain('HTTP Status: 404');
			// Should contain raw API response with original error details
			expect(formatted.content[0].text).toContain('Raw API Response:');
			expect(formatted.content[0].text).toContain('NOT_FOUND');
			expect(formatted.content[0].text).toContain(
				'Repository does not exist',
			);
		});

		test('formats a non-McpError for MCP tool response', () => {
			const error = new Error('Standard error');

			const formatted = formatErrorForMcpTool(error);

			expect(formatted).toHaveProperty('content');
			expect(formatted).toHaveProperty('isError', true);
			expect(formatted.content[0].type).toBe('text');
			expect(formatted.content[0].text).toContain(
				'Error: Standard error',
			);
		});

		test('extracts detailed error information from nested errors', () => {
			const deepError = {
				message: 'API quota exceeded',
				type: 'RateLimitError',
			};
			const midError = createApiError(
				'Rate limit exceeded',
				429,
				deepError,
			);
			const topError = createApiError('API error', 429, midError);

			const formatted = formatErrorForMcpTool(topError);

			expect(formatted.content[0].text).toContain('Error: API error');
			// Should include the deep error details in raw response
			expect(formatted.content[0].text).toContain('API quota exceeded');
			expect(formatted.content[0].text).toContain('RateLimitError');
		});
	});

	describe('formatErrorForMcpResource', () => {
		it('should format an error for MCP resource response', () => {
			const error = createApiError('API error');
			const response = formatErrorForMcpResource(error, 'test://uri');

			expect(response).toHaveProperty('contents');
			expect(response.contents).toHaveLength(1);
			expect(response.contents[0]).toHaveProperty('uri', 'test://uri');
			expect(response.contents[0]).toHaveProperty(
				'text',
				'Error: API error',
			);
			expect(response.contents[0]).toHaveProperty(
				'mimeType',
				'text/plain',
			);
			expect(response.contents[0]).toHaveProperty(
				'description',
				'Error: API_ERROR',
			);
		});
	});
});

```

--------------------------------------------------------------------------------
/src/utils/pagination.util.ts:
--------------------------------------------------------------------------------

```typescript
import { Logger } from './logger.util.js';
import { DATA_LIMITS } from './constants.util.js';
import { ResponsePagination } from '../types/common.types.js';

/**
 * Represents the possible pagination types.
 */
export enum PaginationType {
	CURSOR = 'cursor', // Confluence, Bitbucket (some endpoints)
	OFFSET = 'offset', // Jira
	PAGE = 'page', // Bitbucket (most endpoints)
}

/**
 * Interface representing the common structure of paginated data from APIs.
 * This union type covers properties used by offset, cursor, and page-based pagination.
 */
interface PaginationData {
	// Shared
	results?: unknown[];
	values?: unknown[];
	count?: number;
	size?: number; // Total count in Bitbucket page responses
	hasMore?: boolean;
	_links?: { next?: string }; // Confluence cursor
	// Offset-based (Jira)
	startAt?: number;
	maxResults?: number;
	total?: number;
	nextPage?: string; // Alternative next indicator for offset
	// Page-based (Bitbucket)
	page?: number;
	pagelen?: number;
	next?: string; // Bitbucket page URL
}

/**
 * Extract pagination information from API response
 * @param data The API response containing pagination information
 * @param paginationType The type of pagination mechanism used
 * @returns Object with nextCursor, hasMore, and count properties
 */
export function extractPaginationInfo<T extends Partial<PaginationData>>(
	data: T,
	paginationType: PaginationType,
): ResponsePagination | undefined {
	if (!data) {
		return undefined;
	}

	let pagination: ResponsePagination | undefined;
	const methodLogger = Logger.forContext(
		'utils/pagination.util.ts',
		'extractPaginationInfo',
	);

	switch (paginationType) {
		case PaginationType.PAGE: {
			// Bitbucket page-based pagination (page, pagelen, size, next)
			if (data.page !== undefined && data.pagelen !== undefined) {
				const hasMore = !!data.next;
				let nextCursorValue: string | undefined = undefined;

				if (hasMore) {
					try {
						// First attempt to parse the full URL if it looks like one
						if (
							typeof data.next === 'string' &&
							data.next.includes('://')
						) {
							const nextUrl = new URL(data.next);
							nextCursorValue =
								nextUrl.searchParams.get('page') || undefined;
							methodLogger.debug(
								`Successfully extracted page from URL: ${nextCursorValue}`,
							);
						} else if (data.next === 'available') {
							// Handle the 'available' placeholder used in some transformedResponses
							nextCursorValue = String(Number(data.page) + 1);
							methodLogger.debug(
								`Using calculated next page from 'available': ${nextCursorValue}`,
							);
						} else if (typeof data.next === 'string') {
							// Try to use data.next directly if it's not a URL but still a string
							nextCursorValue = data.next;
							methodLogger.debug(
								`Using next value directly: ${nextCursorValue}`,
							);
						}
					} catch (e) {
						// If URL parsing fails, calculate the next page based on current page
						nextCursorValue = String(Number(data.page) + 1);
						methodLogger.debug(
							`Calculated next page after URL parsing error: ${nextCursorValue}`,
						);
						methodLogger.warn(
							`Failed to parse next URL: ${data.next}`,
							e,
						);
					}
				}

				pagination = {
					hasMore,
					count: data.values?.length ?? 0,
					page: data.page,
					size: data.pagelen,
					total: data.size,
					nextCursor: nextCursorValue, // Store next page number as cursor
				};
			}
			break;
		}

		case PaginationType.OFFSET: {
			// Jira offset-based pagination
			const countOffset = data.values?.length;
			if (
				data.startAt !== undefined &&
				data.maxResults !== undefined &&
				data.total !== undefined &&
				data.startAt + data.maxResults < data.total
			) {
				pagination = {
					hasMore: true,
					count: countOffset,
					total: data.total,
					nextCursor: String(data.startAt + data.maxResults),
				};
			} else if (data.nextPage) {
				pagination = {
					hasMore: true,
					count: countOffset,
					nextCursor: data.nextPage,
				};
			}
			break;
		}

		case PaginationType.CURSOR: {
			// Confluence cursor-based pagination
			const countCursor = data.results?.length;
			if (data._links && data._links.next) {
				const nextUrl = data._links.next;
				const cursorMatch = nextUrl.match(/cursor=([^&]+)/);
				if (cursorMatch && cursorMatch[1]) {
					pagination = {
						hasMore: true,
						count: countCursor,
						nextCursor: decodeURIComponent(cursorMatch[1]),
					};
				}
			}
			break;
		}

		default:
			methodLogger.warn(`Unknown pagination type: ${paginationType}`);
	}

	// Ensure a default pagination object if none was created but data exists
	if (!pagination && (data.results || data.values)) {
		pagination = {
			hasMore: false,
			count: data.results?.length ?? data.values?.length ?? 0,
		};
	}

	return pagination;
}

/**
 * Validates and enforces page size limits to prevent excessive data exposure (CWE-770)
 * @param requestedPageSize The requested page size from the client
 * @param contextInfo Optional context for logging (e.g., endpoint name)
 * @returns The validated page size (clamped to maximum allowed)
 */
export function validatePageSize(
	requestedPageSize?: number,
	contextInfo?: string,
): number {
	const methodLogger = Logger.forContext(
		'utils/pagination.util.ts',
		'validatePageSize',
	);

	// Use default if not specified
	if (!requestedPageSize || requestedPageSize <= 0) {
		const defaultSize = DATA_LIMITS.DEFAULT_PAGE_SIZE;
		methodLogger.debug(
			`Using default page size: ${defaultSize}${contextInfo ? ` for ${contextInfo}` : ''}`,
		);
		return defaultSize;
	}

	// Enforce maximum page size limit
	if (requestedPageSize > DATA_LIMITS.MAX_PAGE_SIZE) {
		const clampedSize = DATA_LIMITS.MAX_PAGE_SIZE;
		methodLogger.warn(
			`Page size ${requestedPageSize} exceeds maximum limit. Clamped to ${clampedSize}${contextInfo ? ` for ${contextInfo}` : ''}`,
		);
		return clampedSize;
	}

	methodLogger.debug(
		`Using requested page size: ${requestedPageSize}${contextInfo ? ` for ${contextInfo}` : ''}`,
	);
	return requestedPageSize;
}

/**
 * Validates pagination data to ensure it doesn't exceed configured limits
 * @param paginationData The pagination data to validate
 * @param contextInfo Optional context for logging
 * @returns True if data is within limits, false otherwise
 */
export function validatePaginationLimits(
	paginationData: { count?: number; size?: number; pagelen?: number },
	contextInfo?: string,
): boolean {
	const methodLogger = Logger.forContext(
		'utils/pagination.util.ts',
		'validatePaginationLimits',
	);

	// Check if the response contains more items than our maximum allowed
	const itemCount = paginationData.count ?? 0;
	const pageSize = paginationData.size ?? paginationData.pagelen ?? 0;

	if (itemCount > DATA_LIMITS.MAX_PAGE_SIZE) {
		methodLogger.warn(
			`Response contains ${itemCount} items, exceeding maximum of ${DATA_LIMITS.MAX_PAGE_SIZE}${contextInfo ? ` for ${contextInfo}` : ''}`,
		);
		return false;
	}

	if (pageSize > DATA_LIMITS.MAX_PAGE_SIZE) {
		methodLogger.warn(
			`Response page size ${pageSize} exceeds maximum of ${DATA_LIMITS.MAX_PAGE_SIZE}${contextInfo ? ` for ${contextInfo}` : ''}`,
		);
		return false;
	}

	return true;
}

```

--------------------------------------------------------------------------------
/src/controllers/atlassian.repositories.content.controller.ts:
--------------------------------------------------------------------------------

```typescript
import atlassianRepositoriesService from '../services/vendor.atlassian.repositories.service.js';
import { Logger } from '../utils/logger.util.js';
import { handleControllerError } from '../utils/error-handler.util.js';
import { ControllerResponse } from '../types/common.types.js';
import { CloneRepositoryToolArgsType } from '../tools/atlassian.repositories.types.js';
import { getDefaultWorkspace } from '../utils/workspace.util.js';
import { executeShellCommand } from '../utils/shell.util.js';
import * as path from 'path';
import * as fs from 'fs/promises';
import { constants } from 'fs';

// Logger instance for this module
const logger = Logger.forContext(
	'controllers/atlassian.repositories.content.controller.ts',
);

/**
 * Clones a Bitbucket repository to the local filesystem
 * @param options Options including repository identifiers and target path
 * @returns Information about the cloned repository
 */
export async function handleCloneRepository(
	options: CloneRepositoryToolArgsType,
): Promise<ControllerResponse> {
	const methodLogger = logger.forMethod('handleCloneRepository');
	methodLogger.debug('Cloning repository with options:', options);

	try {
		// Handle optional workspaceSlug
		let { workspaceSlug } = options;
		if (!workspaceSlug) {
			methodLogger.debug(
				'No workspace provided, fetching default workspace',
			);
			const defaultWorkspace = await getDefaultWorkspace();
			if (!defaultWorkspace) {
				throw new Error(
					'No default workspace found. Please provide a workspace slug.',
				);
			}
			workspaceSlug = defaultWorkspace;
			methodLogger.debug(`Using default workspace: ${defaultWorkspace}`);
		}

		// Required parameters check
		const { repoSlug, targetPath } = options;
		if (!repoSlug) {
			throw new Error('Repository slug is required');
		}
		if (!targetPath) {
			throw new Error('Target path is required');
		}

		// Normalize and resolve the target path
		// If it's a relative path, convert it to absolute based on current working directory
		const processedTargetPath = path.isAbsolute(targetPath)
			? targetPath
			: path.resolve(process.cwd(), targetPath);

		methodLogger.debug(
			`Normalized target path: ${processedTargetPath} (original: ${targetPath})`,
		);

		// Validate directory access and permissions before proceeding
		try {
			// Check if target directory exists
			try {
				await fs.access(processedTargetPath, constants.F_OK);
				methodLogger.debug(
					`Target directory exists: ${processedTargetPath}`,
				);

				// If it exists, check if we have write permission
				try {
					await fs.access(processedTargetPath, constants.W_OK);
					methodLogger.debug(
						`Have write permission to: ${processedTargetPath}`,
					);
				} catch {
					throw new Error(
						`Permission denied: You don't have write access to the target directory: ${processedTargetPath}`,
					);
				}
			} catch {
				// Directory doesn't exist, try to create it
				methodLogger.debug(
					`Target directory doesn't exist, creating: ${processedTargetPath}`,
				);
				try {
					await fs.mkdir(processedTargetPath, { recursive: true });
					methodLogger.debug(
						`Successfully created directory: ${processedTargetPath}`,
					);
				} catch (mkdirError) {
					throw new Error(
						`Failed to create target directory ${processedTargetPath}: ${(mkdirError as Error).message}. Please ensure you have write permissions to the parent directory.`,
					);
				}
			}
		} catch (accessError) {
			methodLogger.error('Path access error:', accessError);
			throw accessError;
		}

		// Get repository details to determine clone URL
		methodLogger.debug(
			`Getting repository details for ${workspaceSlug}/${repoSlug}`,
		);
		const repoDetails = await atlassianRepositoriesService.get({
			workspace: workspaceSlug,
			repo_slug: repoSlug,
		});

		// Find SSH clone URL (preferred) or fall back to HTTPS
		let cloneUrl: string | undefined;
		let cloneProtocol: string = 'SSH'; // Default to SSH

		if (repoDetails.links?.clone) {
			// First try to find SSH clone URL
			const sshClone = repoDetails.links.clone.find(
				(link) => link.name === 'ssh',
			);

			if (sshClone) {
				cloneUrl = sshClone.href;
			} else {
				// Fall back to HTTPS if SSH is not available
				const httpsClone = repoDetails.links.clone.find(
					(link) => link.name === 'https',
				);

				if (httpsClone) {
					cloneUrl = httpsClone.href;
					cloneProtocol = 'HTTPS';
					methodLogger.warn(
						'SSH clone URL not found, falling back to HTTPS',
					);
				}
			}
		}

		if (!cloneUrl) {
			throw new Error(
				'Could not find a valid clone URL for the repository',
			);
		}

		// Determine full target directory path
		// Clone into a subdirectory named after the repo slug
		const targetDir = path.join(processedTargetPath, repoSlug);
		methodLogger.debug(`Will clone to: ${targetDir}`);

		// Check if directory already exists
		try {
			const stats = await fs.stat(targetDir);
			if (stats.isDirectory()) {
				methodLogger.warn(
					`Target directory already exists: ${targetDir}`,
				);
				return {
					content: `Target directory \`${targetDir}\` already exists. Please choose a different target path or remove the existing directory.`,
				};
			}
		} catch {
			// Error means directory doesn't exist, which is what we want
			methodLogger.debug(
				`Target directory doesn't exist, proceeding with clone`,
			);
		}

		// Execute git clone command
		methodLogger.debug(`Cloning from URL (${cloneProtocol}): ${cloneUrl}`);
		const command = `git clone ${cloneUrl} "${targetDir}"`;

		try {
			const result = await executeShellCommand(
				command,
				'cloning repository',
			);

			// Return success message with more detailed information
			return {
				content:
					`Successfully cloned repository \`${workspaceSlug}/${repoSlug}\` to \`${targetDir}\` using ${cloneProtocol}.\n\n` +
					`**Details:**\n` +
					`- **Repository**: ${workspaceSlug}/${repoSlug}\n` +
					`- **Clone Protocol**: ${cloneProtocol}\n` +
					`- **Target Location**: ${targetDir}\n\n` +
					`**Output:**\n\`\`\`\n${result}\n\`\`\`\n\n` +
					`**Note**: If this is your first time cloning with SSH, ensure your SSH keys are set up correctly.`,
			};
		} catch (cloneError) {
			// Enhanced error message with troubleshooting steps
			const errorMsg = `Failed to clone repository: ${(cloneError as Error).message}`;
			let troubleshooting = '';

			if (cloneProtocol === 'SSH') {
				troubleshooting =
					`\n\n**Troubleshooting SSH Clone Issues:**\n` +
					`1. Ensure you have SSH keys set up with Bitbucket\n` +
					`2. Check if your SSH agent is running: \`eval "$(ssh-agent -s)"; ssh-add\`\n` +
					`3. Verify connectivity: \`ssh -T [email protected]\`\n` +
					`4. Try using HTTPS instead (modify your tool call with a different repository URL)`;
			} else {
				troubleshooting =
					`\n\n**Troubleshooting HTTPS Clone Issues:**\n` +
					`1. Check your Bitbucket credentials\n` +
					`2. Ensure the target directory is writable\n` +
					`3. Try running the command manually to see detailed errors`;
			}

			throw new Error(errorMsg + troubleshooting);
		}
	} catch (error) {
		throw handleControllerError(error, {
			entityType: 'Repository',
			operation: 'clone',
			source: 'controllers/atlassian.repositories.content.controller.ts@handleCloneRepository',
			additionalInfo: options,
		});
	}
}

```

--------------------------------------------------------------------------------
/src/utils/error-handler.util.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, expect, test } from '@jest/globals';
import {
	ErrorCode,
	buildErrorContext,
	detectErrorType,
	createUserFriendlyErrorMessage,
	handleControllerError,
} from './error-handler.util.js';
import { McpError, ErrorType, createApiError } from './error.util.js';

describe('Error Handler Utilities', () => {
	describe('buildErrorContext function', () => {
		test('builds a complete error context object', () => {
			const context = buildErrorContext(
				'Repository',
				'retrieving',
				'controllers/repositories.controller.ts@get',
				{ workspaceSlug: 'atlassian', repoSlug: 'bitbucket' },
				{ queryParams: { sort: 'name' } },
			);

			expect(context).toEqual({
				entityType: 'Repository',
				operation: 'retrieving',
				source: 'controllers/repositories.controller.ts@get',
				entityId: { workspaceSlug: 'atlassian', repoSlug: 'bitbucket' },
				additionalInfo: { queryParams: { sort: 'name' } },
			});
		});

		test('handles minimal required parameters', () => {
			const context = buildErrorContext(
				'Repository',
				'listing',
				'controllers/repositories.controller.ts@list',
			);

			expect(context).toEqual({
				entityType: 'Repository',
				operation: 'listing',
				source: 'controllers/repositories.controller.ts@list',
			});
			expect(context.entityId).toBeUndefined();
			expect(context.additionalInfo).toBeUndefined();
		});
	});

	describe('detectErrorType function', () => {
		test('detects network errors', () => {
			const error = new Error('network error: connection refused');
			const result = detectErrorType(error);
			expect(result).toEqual({
				code: ErrorCode.NETWORK_ERROR,
				statusCode: 500,
			});
		});

		test('detects rate limit errors', () => {
			const error = new Error('too many requests');
			const result = detectErrorType(error);
			expect(result).toEqual({
				code: ErrorCode.RATE_LIMIT_ERROR,
				statusCode: 429,
			});
		});

		test('detects not found errors', () => {
			const error = new Error('resource not found');
			const result = detectErrorType(error);
			expect(result).toEqual({
				code: ErrorCode.NOT_FOUND,
				statusCode: 404,
			});
		});

		test('detects access denied errors', () => {
			const error = new Error('insufficient permissions');
			const result = detectErrorType(error);
			expect(result).toEqual({
				code: ErrorCode.ACCESS_DENIED,
				statusCode: 403,
			});
		});

		test('detects validation errors', () => {
			const error = new Error('validation failed: invalid input');
			const result = detectErrorType(error);
			expect(result).toEqual({
				code: ErrorCode.VALIDATION_ERROR,
				statusCode: 400,
			});
		});

		test('defaults to unexpected error', () => {
			const error = new Error('something unexpected happened');
			const result = detectErrorType(error);
			expect(result).toEqual({
				code: ErrorCode.UNEXPECTED_ERROR,
				statusCode: 500,
			});
		});

		test('respects explicit status code from error', () => {
			const error = new McpError(
				'Custom error',
				ErrorType.API_ERROR,
				418,
			);
			const result = detectErrorType(error);
			expect(result.statusCode).toBe(418);
		});

		test('detects Bitbucket-specific repository not found errors', () => {
			const bitbucketError = {
				error: {
					message: 'repository not found',
				},
			};
			const mcpError = createApiError('API Error', 404, bitbucketError);
			const result = detectErrorType(mcpError);
			expect(result).toEqual({
				code: ErrorCode.NOT_FOUND,
				statusCode: 404,
			});
		});

		test('detects Bitbucket-specific permission errors', () => {
			const bitbucketError = {
				error: {
					message: 'access denied for this repository',
				},
			};
			const mcpError = createApiError('API Error', 403, bitbucketError);
			const result = detectErrorType(mcpError);
			expect(result).toEqual({
				code: ErrorCode.ACCESS_DENIED,
				statusCode: 403,
			});
		});

		test('detects Bitbucket-specific validation errors', () => {
			const bitbucketError = {
				error: {
					message: 'invalid parameter: repository name',
				},
			};
			const mcpError = createApiError('API Error', 400, bitbucketError);
			const result = detectErrorType(mcpError);
			expect(result).toEqual({
				code: ErrorCode.VALIDATION_ERROR,
				statusCode: 400,
			});
		});
	});

	describe('createUserFriendlyErrorMessage function', () => {
		test('creates NOT_FOUND message with entityId string', () => {
			const message = createUserFriendlyErrorMessage(
				ErrorCode.NOT_FOUND,
				{
					entityType: 'Repository',
					entityId: 'atlassian/bitbucket',
				},
			);
			expect(message).toContain(
				'Repository atlassian/bitbucket not found',
			);
		});

		test('creates NOT_FOUND message with entityId object', () => {
			const message = createUserFriendlyErrorMessage(
				ErrorCode.NOT_FOUND,
				{
					entityType: 'Repository',
					entityId: {
						workspaceSlug: 'atlassian',
						repoSlug: 'bitbucket',
					},
				},
			);
			expect(message).toContain(
				'Repository atlassian/bitbucket not found',
			);
		});

		test('creates ACCESS_DENIED message', () => {
			const message = createUserFriendlyErrorMessage(
				ErrorCode.ACCESS_DENIED,
				{
					entityType: 'Repository',
					entityId: 'atlassian/bitbucket',
				},
			);
			expect(message).toContain(
				'Access denied for repository atlassian/bitbucket',
			);
		});

		test('creates VALIDATION_ERROR message', () => {
			const originalMessage = 'Invalid repository name';
			const message = createUserFriendlyErrorMessage(
				ErrorCode.VALIDATION_ERROR,
				{
					entityType: 'Repository',
					operation: 'creating',
				},
				originalMessage,
			);
			expect(message).toBe(
				`${originalMessage} Error details: ${originalMessage}`,
			);
		});

		test('creates NETWORK_ERROR message', () => {
			const message = createUserFriendlyErrorMessage(
				ErrorCode.NETWORK_ERROR,
				{
					entityType: 'Repository',
					operation: 'retrieving',
				},
			);
			expect(message).toContain('Network error');
			expect(message).toContain('Bitbucket API');
		});

		test('creates RATE_LIMIT_ERROR message', () => {
			const message = createUserFriendlyErrorMessage(
				ErrorCode.RATE_LIMIT_ERROR,
			);
			expect(message).toContain('Bitbucket API rate limit exceeded');
		});

		test('includes original message for non-specific errors', () => {
			const message = createUserFriendlyErrorMessage(
				ErrorCode.UNEXPECTED_ERROR,
				{
					entityType: 'Repository',
					operation: 'processing',
				},
				'Something went wrong',
			);
			expect(message).toContain('unexpected error');
			expect(message).toContain('Something went wrong');
		});
	});

	describe('handleControllerError function', () => {
		test('throws appropriate API error with user-friendly message', () => {
			const originalError = new Error('Repository not found');
			const context = buildErrorContext(
				'Repository',
				'retrieving',
				'controllers/repositories.controller.ts@get',
				'atlassian/bitbucket',
			);

			expect(() => {
				handleControllerError(originalError, context);
			}).toThrow(McpError);

			try {
				handleControllerError(originalError, context);
			} catch (error) {
				expect(error).toBeInstanceOf(McpError);
				expect((error as McpError).type).toBe(ErrorType.API_ERROR);
				expect((error as McpError).statusCode).toBe(404);
				expect((error as McpError).message).toContain(
					'Repository atlassian/bitbucket not found',
				);
				expect((error as McpError).originalError).toBe(originalError);
			}
		});
	});
});

```

--------------------------------------------------------------------------------
/src/utils/error.util.ts:
--------------------------------------------------------------------------------

```typescript
import { Logger } from './logger.util.js';
import { formatSeparator } from './formatter.util.js';

/**
 * Error types for MCP errors
 */
export type McpErrorType =
	| 'AUTHENTICATION_REQUIRED'
	| 'NOT_FOUND'
	| 'VALIDATION_ERROR'
	| 'RATE_LIMIT_EXCEEDED'
	| 'API_ERROR'
	| 'UNEXPECTED_ERROR';

/**
 * Error types for classification
 */
export enum ErrorType {
	AUTH_MISSING = 'AUTH_MISSING',
	AUTH_INVALID = 'AUTH_INVALID',
	API_ERROR = 'API_ERROR',
	UNEXPECTED_ERROR = 'UNEXPECTED_ERROR',
}

/**
 * Custom error class with type classification
 */
export class McpError extends Error {
	type: ErrorType;
	errorType?: McpErrorType; // Add errorType property used by error-handler.util.ts
	statusCode?: number;
	originalError?: unknown;

	constructor(
		message: string,
		type: ErrorType,
		statusCode?: number,
		originalError?: unknown,
	) {
		super(message);
		this.name = 'McpError';
		this.type = type;
		this.statusCode = statusCode;
		this.originalError = originalError;

		// Set errorType based on type
		switch (type) {
			case ErrorType.AUTH_MISSING:
			case ErrorType.AUTH_INVALID:
				this.errorType = 'AUTHENTICATION_REQUIRED';
				break;
			case ErrorType.API_ERROR:
				this.errorType =
					statusCode === 404
						? 'NOT_FOUND'
						: statusCode === 429
							? 'RATE_LIMIT_EXCEEDED'
							: 'API_ERROR';
				break;
			case ErrorType.UNEXPECTED_ERROR:
			default:
				this.errorType = 'UNEXPECTED_ERROR';
				break;
		}
	}
}

/**
 * Helper to unwrap nested McpErrors and return the deepest original error.
 * This is useful when an McpError contains another McpError as `originalError`
 * which in turn may wrap the vendor (Bitbucket) error text or object.
 */
export function getDeepOriginalError(error: unknown): unknown {
	if (!error) {
		return error;
	}

	let current = error;
	let depth = 0;
	const maxDepth = 10; // Prevent infinite recursion

	while (
		depth < maxDepth &&
		current instanceof Error &&
		'originalError' in current &&
		current.originalError
	) {
		current = current.originalError;
		depth++;
	}

	return current;
}

/**
 * Create an authentication missing error
 */
export function createAuthMissingError(
	message: string = 'Authentication credentials are missing',
	originalError?: unknown,
): McpError {
	return new McpError(
		message,
		ErrorType.AUTH_MISSING,
		undefined,
		originalError,
	);
}

/**
 * Create an authentication invalid error
 */
export function createAuthInvalidError(
	message: string = 'Authentication credentials are invalid',
	originalError?: unknown,
): McpError {
	return new McpError(message, ErrorType.AUTH_INVALID, 401, originalError);
}

/**
 * Create an API error
 */
export function createApiError(
	message: string,
	statusCode?: number,
	originalError?: unknown,
): McpError {
	return new McpError(
		message,
		ErrorType.API_ERROR,
		statusCode,
		originalError,
	);
}

/**
 * Create an unexpected error
 */
export function createUnexpectedError(
	message: string = 'An unexpected error occurred',
	originalError?: unknown,
): McpError {
	return new McpError(
		message,
		ErrorType.UNEXPECTED_ERROR,
		undefined,
		originalError,
	);
}

/**
 * Ensure an error is an McpError
 */
export function ensureMcpError(error: unknown): McpError {
	if (error instanceof McpError) {
		return error;
	}

	if (error instanceof Error) {
		return createUnexpectedError(error.message, error);
	}

	return createUnexpectedError(String(error));
}

/**
 * Format error for MCP tool response
 * Includes raw error details in the text content so AI can see the full context
 */
export function formatErrorForMcpTool(error: unknown): {
	content: Array<{ type: 'text'; text: string }>;
	isError: boolean;
} {
	const methodLogger = Logger.forContext(
		'utils/error.util.ts',
		'formatErrorForMcpTool',
	);
	const mcpError = ensureMcpError(error);
	methodLogger.error(`${mcpError.type} error`, mcpError);

	// Get the deep original error for additional context
	const originalError = getDeepOriginalError(mcpError.originalError);

	// Build error text with full details visible to AI
	let errorText = `Error: ${mcpError.message}`;

	// Add status code if available
	if (mcpError.statusCode) {
		errorText += `\nHTTP Status: ${mcpError.statusCode}`;
	}

	// Add raw error details if available (this is the actual Bitbucket API response)
	if (originalError && originalError !== mcpError.message) {
		if (typeof originalError === 'object') {
			errorText += `\n\nRaw API Response:\n${JSON.stringify(originalError, null, 2)}`;
		} else if (typeof originalError === 'string') {
			errorText += `\n\nRaw API Response:\n${originalError}`;
		}
	}

	return {
		content: [
			{
				type: 'text' as const,
				text: errorText,
			},
		],
		isError: true,
	};
}

/**
 * Format error for MCP resource response
 */
export function formatErrorForMcpResource(
	error: unknown,
	uri: string,
): {
	contents: Array<{
		uri: string;
		text: string;
		mimeType: string;
		description?: string;
	}>;
} {
	const methodLogger = Logger.forContext(
		'utils/error.util.ts',
		'formatErrorForMcpResource',
	);
	const mcpError = ensureMcpError(error);
	methodLogger.error(`${mcpError.type} error`, mcpError);

	return {
		contents: [
			{
				uri,
				text: `Error: ${mcpError.message}`,
				mimeType: 'text/plain',
				description: `Error: ${mcpError.type}`,
			},
		],
	};
}

/**
 * Handle error in CLI context with improved user feedback
 */
export function handleCliError(error: unknown): never {
	const methodLogger = Logger.forContext(
		'utils/error.util.ts',
		'handleCliError',
	);
	const mcpError = ensureMcpError(error);
	methodLogger.error(`${mcpError.type} error`, mcpError);

	// Get the deep original error for more context
	const originalError = getDeepOriginalError(mcpError.originalError);

	// Build a well-formatted CLI output using markdown-style helpers
	const cliLines: string[] = [];

	// Primary error headline
	cliLines.push(`❌  ${mcpError.message}`);

	// Status code (if any)
	if (mcpError.statusCode) {
		cliLines.push(`HTTP Status: ${mcpError.statusCode}`);
	}

	// Separator
	cliLines.push(formatSeparator());

	// Provide helpful context based on error type
	if (mcpError.type === ErrorType.AUTH_MISSING) {
		cliLines.push(
			'Tip: Make sure to set up your Atlassian credentials in the configuration file or environment variables:',
		);
		cliLines.push(
			'- ATLASSIAN_SITE_NAME, ATLASSIAN_USER_EMAIL, and ATLASSIAN_API_TOKEN; or',
		);
		cliLines.push(
			'- ATLASSIAN_BITBUCKET_USERNAME and ATLASSIAN_BITBUCKET_APP_PASSWORD',
		);
	} else if (mcpError.type === ErrorType.AUTH_INVALID) {
		cliLines.push(
			'Tip: Check that your Atlassian API token or app password is correct and has not expired.',
		);
		cliLines.push(
			'Also verify that the configured user has access to the requested resource.',
		);
	} else if (mcpError.type === ErrorType.API_ERROR) {
		if (mcpError.statusCode === 429) {
			cliLines.push(
				'Tip: You may have exceeded your Bitbucket API rate limits. Try again later.',
			);
		}
	}

	// Vendor error details (if available)
	if (originalError) {
		cliLines.push('Bitbucket API Error:');
		cliLines.push('```');
		if (typeof originalError === 'object' && originalError !== null) {
			// Try to extract the most useful parts of Bitbucket's error response
			const origErr = originalError as Record<string, unknown>;
			if (origErr.error && typeof origErr.error === 'object') {
				// Format {"error": {"message": "..."}} structure
				const bitbucketError = origErr.error as Record<string, unknown>;
				cliLines.push(
					`Message: ${bitbucketError.message || 'Unknown error'}`,
				);
				if (bitbucketError.detail)
					cliLines.push(`Detail: ${bitbucketError.detail}`);
			} else if (origErr.message) {
				// Simple message
				cliLines.push(`${String(origErr.message)}`);
			} else {
				// Fall back to JSON representation for anything else
				cliLines.push(JSON.stringify(originalError, null, 2));
			}
		} else {
			cliLines.push(String(originalError).trim());
		}
		cliLines.push('```');
	}

	// Display DEBUG tip
	if (!process.env.DEBUG || !process.env.DEBUG.includes('mcp:')) {
		cliLines.push(
			'For more detailed error information, run with DEBUG=mcp:* environment variable.',
		);
	}

	console.error(cliLines.join('\n'));
	process.exit(1);
}

```

--------------------------------------------------------------------------------
/src/services/vendor.atlassian.repositories.types.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from 'zod';

/**
 * Types for Atlassian Bitbucket Repositories API
 */

// Link href schema
const LinkSchema = z.object({
	href: z.string(),
	name: z.string().optional(),
});

/**
 * Repository SCM type
 */
export const RepositorySCMSchema = z.enum(['git', 'hg']);

/**
 * Repository fork policy
 */
export const RepositoryForkPolicySchema = z.enum([
	'allow_forks',
	'no_public_forks',
	'no_forks',
]);

/**
 * Repository links object
 */
export const RepositoryLinksSchema = z.object({
	self: LinkSchema.optional(),
	html: LinkSchema.optional(),
	avatar: LinkSchema.optional(),
	pullrequests: LinkSchema.optional(),
	commits: LinkSchema.optional(),
	forks: LinkSchema.optional(),
	watchers: LinkSchema.optional(),
	downloads: LinkSchema.optional(),
	clone: z.array(LinkSchema).optional(),
	hooks: LinkSchema.optional(),
	issues: LinkSchema.optional(),
});

/**
 * Repository owner links schema
 */
const OwnerLinksSchema = z.object({
	self: LinkSchema.optional(),
	html: LinkSchema.optional(),
	avatar: LinkSchema.optional(),
});

/**
 * Repository owner object
 */
export const RepositoryOwnerSchema = z.object({
	type: z.enum(['user', 'team']),
	username: z.string().optional(),
	display_name: z.string().optional(),
	uuid: z.string().optional(),
	links: OwnerLinksSchema.optional(),
});

/**
 * Repository branch object
 */
export const RepositoryBranchSchema = z.object({
	type: z.literal('branch'),
	name: z.string(),
});

/**
 * Repository project links schema
 */
const ProjectLinksSchema = z.object({
	self: LinkSchema.optional(),
	html: LinkSchema.optional(),
});

/**
 * Repository project object
 */
export const RepositoryProjectSchema = z.object({
	type: z.literal('project'),
	key: z.string(),
	uuid: z.string(),
	name: z.string(),
	links: ProjectLinksSchema.optional(),
});

/**
 * Repository object returned from the API
 */
export const RepositorySchema = z.object({
	type: z.literal('repository'),
	uuid: z.string(),
	full_name: z.string(),
	name: z.string(),
	description: z.string().optional(),
	is_private: z.boolean(),
	fork_policy: RepositoryForkPolicySchema.optional(),
	created_on: z.string().optional(),
	updated_on: z.string().optional(),
	size: z.number().optional(),
	language: z.string().optional(),
	has_issues: z.boolean().optional(),
	has_wiki: z.boolean().optional(),
	scm: RepositorySCMSchema,
	owner: RepositoryOwnerSchema,
	mainbranch: RepositoryBranchSchema.optional(),
	project: RepositoryProjectSchema.optional(),
	links: RepositoryLinksSchema,
});
export type Repository = z.infer<typeof RepositorySchema>;

/**
 * Parameters for listing repositories
 */
export const ListRepositoriesParamsSchema = z.object({
	workspace: z.string(),
	q: z.string().optional(),
	sort: z.string().optional(),
	page: z.number().optional(),
	pagelen: z.number().optional(),
	role: z.string().optional(),
});
export type ListRepositoriesParams = z.infer<
	typeof ListRepositoriesParamsSchema
>;

/**
 * Parameters for getting a repository by identifier
 */
export const GetRepositoryParamsSchema = z.object({
	workspace: z.string(),
	repo_slug: z.string(),
});
export type GetRepositoryParams = z.infer<typeof GetRepositoryParamsSchema>;

/**
 * API response for listing repositories
 */
export const RepositoriesResponseSchema = z.object({
	pagelen: z.number(),
	page: z.number(),
	size: z.number(),
	next: z.string().optional(),
	previous: z.string().optional(),
	values: z.array(RepositorySchema),
});
export type RepositoriesResponse = z.infer<typeof RepositoriesResponseSchema>;

// --- Commit History Types ---

/**
 * Parameters for listing commits.
 */
export const ListCommitsParamsSchema = z.object({
	workspace: z.string(),
	repo_slug: z.string(),
	include: z.string().optional(), // Branch, tag, or hash to include history from
	exclude: z.string().optional(), // Branch, tag, or hash to exclude history up to
	path: z.string().optional(), // File path to filter commits by
	page: z.number().optional(),
	pagelen: z.number().optional(),
});
export type ListCommitsParams = z.infer<typeof ListCommitsParamsSchema>;

/**
 * Commit author user links schema
 */
const CommitAuthorUserLinksSchema = z.object({
	self: LinkSchema.optional(),
	avatar: LinkSchema.optional(),
});

/**
 * Commit author user schema
 */
const CommitAuthorUserSchema = z.object({
	display_name: z.string().optional(),
	nickname: z.string().optional(),
	account_id: z.string().optional(),
	uuid: z.string().optional(),
	type: z.string(), // Usually 'user'
	links: CommitAuthorUserLinksSchema.optional(),
});

/**
 * Commit author schema
 */
export const CommitAuthorSchema = z.object({
	raw: z.string(),
	type: z.string(), // Usually 'author'
	user: CommitAuthorUserSchema.optional(),
});

/**
 * Commit links schema
 */
const CommitLinksSchema = z.object({
	self: LinkSchema.optional(),
	html: LinkSchema.optional(),
	diff: LinkSchema.optional(),
	approve: LinkSchema.optional(),
	comments: LinkSchema.optional(),
});

/**
 * Commit summary schema
 */
const CommitSummarySchema = z.object({
	raw: z.string().optional(),
	markup: z.string().optional(),
	html: z.string().optional(),
});

/**
 * Commit parent schema
 */
const CommitParentSchema = z.object({
	hash: z.string(),
	type: z.string(),
	links: z.unknown(),
});

/**
 * Represents a single commit in the history.
 */
export const CommitSchema = z.object({
	hash: z.string(),
	type: z.string(), // Usually 'commit'
	author: CommitAuthorSchema,
	date: z.string(), // ISO 8601 format date string
	message: z.string(),
	links: CommitLinksSchema,
	summary: CommitSummarySchema.optional(),
	parents: z.array(CommitParentSchema),
});
export type Commit = z.infer<typeof CommitSchema>;

/**
 * API response for listing commits (paginated).
 */
export const PaginatedCommitsSchema = z.object({
	pagelen: z.number(),
	page: z.number().optional(),
	size: z.number().optional(),
	next: z.string().optional(),
	previous: z.string().optional(),
	values: z.array(CommitSchema),
});
export type PaginatedCommits = z.infer<typeof PaginatedCommitsSchema>;

/**
 * Parameters for creating a branch.
 */
export const CreateBranchParamsSchema = z.object({
	workspace: z.string(),
	repo_slug: z.string(),
	name: z.string(), // New branch name
	target: z.object({
		hash: z.string(), // Source branch name or commit hash
	}),
});
export type CreateBranchParams = z.infer<typeof CreateBranchParamsSchema>;

/**
 * Response object when creating a branch.
 * Contains details about the newly created branch reference.
 */
export const BranchRefSchema = z.object({
	type: z.literal('branch'),
	name: z.string(),
	target: z.object({
		hash: z.string(),
		type: z.string(), // e.g., 'commit'
	}),
});
export type BranchRef = z.infer<typeof BranchRefSchema>;

/**
 * Parameters for getting a file's content from a repository.
 */
export const GetFileContentParamsSchema = z.object({
	workspace: z.string(),
	repo_slug: z.string(),
	commit: z.string(), // Branch name, tag, or commit hash
	path: z.string(), // File path within the repository
});
export type GetFileContentParams = z.infer<typeof GetFileContentParamsSchema>;

/**
 * Represents a branch target (usually a commit).
 */
export const BranchTargetSchema = z.object({
	hash: z.string(),
	type: z.string(), // Usually 'commit'
});

/**
 * Represents a branch in a Bitbucket repository.
 */
export const BranchSchema = z.object({
	name: z.string(),
	type: z.literal('branch'),
	target: BranchTargetSchema,
	merge_strategies: z.array(z.string()).optional(),
	default_merge_strategy: z.string().optional(),
	links: z.record(z.string(), z.unknown()).optional(),
});

/**
 * Parameters for listing branches in a repository.
 */
export const ListBranchesParamsSchema = z.object({
	workspace: z.string(),
	repo_slug: z.string(),
	page: z.number().optional(),
	pagelen: z.number().optional(),
	q: z.string().optional(), // Query for filtering branches
	sort: z.string().optional(), // Sort field
});
export type ListBranchesParams = z.infer<typeof ListBranchesParamsSchema>;

/**
 * API response for listing branches (paginated).
 */
export const BranchesResponseSchema = z.object({
	pagelen: z.number(),
	page: z.number().optional(),
	size: z.number().optional(),
	next: z.string().optional(),
	previous: z.string().optional(),
	values: z.array(BranchSchema),
});
export type BranchesResponse = z.infer<typeof BranchesResponseSchema>;

```
Page 1/3FirstPrevNextLast