#
tokens: 26748/50000 8/59 files (page 2/3)
lines: off (toggle) GitHub
raw markdown copy
This is page 2 of 3. Use http://codebase.md/aashari/mcp-server-atlassian-bitbucket?page={x} to view the full context.

# Directory Structure

```
├── .env.example
├── .github
│   ├── dependabot.yml
│   └── workflows
│       ├── ci-dependabot-auto-merge.yml
│       ├── ci-dependency-check.yml
│       └── ci-semantic-release.yml
├── .gitignore
├── .gitkeep
├── .npmignore
├── .npmrc
├── .prettierrc
├── .releaserc.json
├── .trigger-ci
├── CHANGELOG.md
├── eslint.config.mjs
├── package-lock.json
├── package.json
├── README.md
├── scripts
│   ├── ensure-executable.js
│   ├── package.json
│   └── update-version.js
├── src
│   ├── cli
│   │   ├── atlassian.api.cli.ts
│   │   ├── atlassian.repositories.cli.ts
│   │   └── index.ts
│   ├── controllers
│   │   ├── atlassian.api.controller.ts
│   │   └── atlassian.repositories.content.controller.ts
│   ├── index.ts
│   ├── services
│   │   ├── vendor.atlassian.repositories.service.test.ts
│   │   ├── vendor.atlassian.repositories.service.ts
│   │   ├── vendor.atlassian.repositories.types.ts
│   │   ├── vendor.atlassian.workspaces.service.ts
│   │   ├── vendor.atlassian.workspaces.test.ts
│   │   └── vendor.atlassian.workspaces.types.ts
│   ├── tools
│   │   ├── atlassian.api.tool.ts
│   │   ├── atlassian.api.types.ts
│   │   ├── atlassian.repositories.tool.ts
│   │   └── atlassian.repositories.types.ts
│   ├── types
│   │   └── common.types.ts
│   └── utils
│       ├── bitbucket-error-detection.test.ts
│       ├── cli.test.util.ts
│       ├── config.util.test.ts
│       ├── config.util.ts
│       ├── constants.util.ts
│       ├── error-handler.util.test.ts
│       ├── error-handler.util.ts
│       ├── error.util.test.ts
│       ├── error.util.ts
│       ├── formatter.util.ts
│       ├── jest.setup.ts
│       ├── jq.util.ts
│       ├── logger.util.ts
│       ├── pagination.util.ts
│       ├── response.util.ts
│       ├── shell.util.ts
│       ├── toon.util.test.ts
│       ├── toon.util.ts
│       ├── transport.util.test.ts
│       ├── transport.util.ts
│       └── workspace.util.ts
├── STYLE_GUIDE.md
└── tsconfig.json
```

# Files

--------------------------------------------------------------------------------
/src/utils/logger.util.ts:
--------------------------------------------------------------------------------

```typescript
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
import * as crypto from 'crypto';

/**
 * Format a timestamp for logging
 * @returns Formatted timestamp [HH:MM:SS]
 */
function getTimestamp(): string {
	const now = new Date();
	return `[${now.toISOString().split('T')[1].split('.')[0]}]`;
}

/**
 * Safely convert object to string with size limits
 * @param obj Object to stringify
 * @param maxLength Maximum length of the resulting string
 * @returns Safely stringified object
 */
function safeStringify(obj: unknown, maxLength = 1000): string {
	try {
		const str = JSON.stringify(obj);
		if (str.length <= maxLength) {
			return str;
		}
		return `${str.substring(0, maxLength)}... (truncated, ${str.length} chars total)`;
	} catch {
		return '[Object cannot be stringified]';
	}
}

/**
 * Extract essential values from larger objects for logging
 * @param obj The object to extract values from
 * @param keys Keys to extract (if available)
 * @returns Object containing only the specified keys
 */
function extractEssentialValues(
	obj: Record<string, unknown>,
	keys: string[],
): Record<string, unknown> {
	const result: Record<string, unknown> = {};
	keys.forEach((key) => {
		if (Object.prototype.hasOwnProperty.call(obj, key)) {
			result[key] = obj[key];
		}
	});
	return result;
}

/**
 * Format source path consistently using the standardized format:
 * [module/file.ts@function] or [module/file.ts]
 *
 * @param filePath File path (with or without src/ prefix)
 * @param functionName Optional function name
 * @returns Formatted source path according to standard pattern
 */
function formatSourcePath(filePath: string, functionName?: string): string {
	// Always strip 'src/' prefix for consistency
	const normalizedPath = filePath.replace(/^src\//, '');

	return functionName
		? `[${normalizedPath}@${functionName}]`
		: `[${normalizedPath}]`;
}

/**
 * Check if debug logging is enabled for a specific module
 *
 * This function parses the DEBUG environment variable to determine if a specific
 * module should have debug logging enabled. The DEBUG variable can be:
 * - 'true' or '1': Enable all debug logging
 * - Comma-separated list of modules: Enable debug only for those modules
 * - Module patterns with wildcards: e.g., 'controllers/*' enables all controllers
 *
 * Examples:
 * - DEBUG=true
 * - DEBUG=controllers/*,services/aws.sso.auth.service.ts
 * - DEBUG=transport,utils/formatter*
 *
 * @param modulePath The module path to check against DEBUG patterns
 * @returns true if debug is enabled for this module, false otherwise
 */
function isDebugEnabledForModule(modulePath: string): boolean {
	const debugEnv = process.env.DEBUG;

	if (!debugEnv) {
		return false;
	}

	// If debug is set to true or 1, enable all debug logging
	if (debugEnv === 'true' || debugEnv === '1') {
		return true;
	}

	// Parse comma-separated debug patterns
	const debugPatterns = debugEnv.split(',').map((p) => p.trim());

	// Check if the module matches any pattern
	return debugPatterns.some((pattern) => {
		// Convert glob-like patterns to regex
		// * matches anything within a path segment
		// ** matches across path segments
		const regexPattern = pattern
			.replace(/\*/g, '.*') // Convert * to regex .*
			.replace(/\?/g, '.'); // Convert ? to regex .

		const regex = new RegExp(`^${regexPattern}$`);
		return (
			regex.test(modulePath) ||
			// Check for pattern matches without the 'src/' prefix
			regex.test(modulePath.replace(/^src\//, ''))
		);
	});
}

// Generate a unique session ID for this process
const SESSION_ID = crypto.randomUUID();

// Get the package name from environment variables or default to 'mcp-server'
const getPkgName = (): string => {
	try {
		// Try to get it from package.json first if available
		const packageJsonPath = path.resolve(process.cwd(), 'package.json');
		if (fs.existsSync(packageJsonPath)) {
			const packageJson = JSON.parse(
				fs.readFileSync(packageJsonPath, 'utf8'),
			);
			if (packageJson.name) {
				// Extract the last part of the name if it's scoped
				const match = packageJson.name.match(/(@[\w-]+\/)?(.+)/);
				return match ? match[2] : packageJson.name;
			}
		}
	} catch {
		// Silently fail and use default
	}

	// Fallback to environment variable or default
	return process.env.PACKAGE_NAME || 'mcp-server';
};

// MCP logs directory setup
const HOME_DIR = os.homedir();
const MCP_DATA_DIR = path.join(HOME_DIR, '.mcp', 'data');
const CLI_NAME = getPkgName();

// Ensure the MCP data directory exists
if (!fs.existsSync(MCP_DATA_DIR)) {
	fs.mkdirSync(MCP_DATA_DIR, { recursive: true });
}

// Create the log file path with session ID
const LOG_FILENAME = `${CLI_NAME}.${SESSION_ID}.log`;
const LOG_FILEPATH = path.join(MCP_DATA_DIR, LOG_FILENAME);

// Write initial log header
fs.writeFileSync(
	LOG_FILEPATH,
	`# ${CLI_NAME} Log Session\n` +
		`Session ID: ${SESSION_ID}\n` +
		`Started: ${new Date().toISOString()}\n` +
		`Process ID: ${process.pid}\n` +
		`Working Directory: ${process.cwd()}\n` +
		`Command: ${process.argv.join(' ')}\n\n` +
		`## Log Entries\n\n`,
	'utf8',
);

// Logger singleton to track initialization
let isLoggerInitialized = false;

/**
 * Logger class for consistent logging across the application.
 *
 * RECOMMENDED USAGE:
 *
 * 1. Create a file-level logger using the static forContext method:
 *    ```
 *    const logger = Logger.forContext('controllers/myController.ts');
 *    ```
 *
 * 2. For method-specific logging, create a method logger:
 *    ```
 *    const methodLogger = Logger.forContext('controllers/myController.ts', 'myMethod');
 *    ```
 *
 * 3. Avoid using raw string prefixes in log messages. Instead, use contextualized loggers.
 *
 * 4. For debugging objects, use the debugResponse method to log only essential properties.
 *
 * 5. Set DEBUG environment variable to control which modules show debug logs:
 *    - DEBUG=true (enable all debug logs)
 *    - DEBUG=controllers/*,services/* (enable for specific module groups)
 *    - DEBUG=transport,utils/formatter* (enable specific modules, supports wildcards)
 */
class Logger {
	private context?: string;
	private modulePath: string;
	private static sessionId = SESSION_ID;
	private static logFilePath = LOG_FILEPATH;

	constructor(context?: string, modulePath: string = '') {
		this.context = context;
		this.modulePath = modulePath;

		// Log initialization message only once
		if (!isLoggerInitialized) {
			this.info(
				`Logger initialized with session ID: ${Logger.sessionId}`,
			);
			this.info(`Logs will be saved to: ${Logger.logFilePath}`);
			isLoggerInitialized = true;
		}
	}

	/**
	 * Create a contextualized logger for a specific file or component.
	 * This is the preferred method for creating loggers.
	 *
	 * @param filePath The file path (e.g., 'controllers/aws.sso.auth.controller.ts')
	 * @param functionName Optional function name for more specific context
	 * @returns A new Logger instance with the specified context
	 *
	 * @example
	 * // File-level logger
	 * const logger = Logger.forContext('controllers/myController.ts');
	 *
	 * // Method-level logger
	 * const methodLogger = Logger.forContext('controllers/myController.ts', 'myMethod');
	 */
	static forContext(filePath: string, functionName?: string): Logger {
		return new Logger(formatSourcePath(filePath, functionName), filePath);
	}

	/**
	 * Create a method level logger from a context logger
	 * @param method Method name
	 * @returns A new logger with the method context
	 */
	forMethod(method: string): Logger {
		return Logger.forContext(this.modulePath, method);
	}

	private _formatMessage(message: string): string {
		return this.context ? `${this.context} ${message}` : message;
	}

	private _formatArgs(args: unknown[]): unknown[] {
		// If the first argument is an object and not an Error, safely stringify it
		if (
			args.length > 0 &&
			typeof args[0] === 'object' &&
			args[0] !== null &&
			!(args[0] instanceof Error)
		) {
			args[0] = safeStringify(args[0]);
		}
		return args;
	}

	_log(
		level: 'info' | 'warn' | 'error' | 'debug',
		message: string,
		...args: unknown[]
	) {
		// Skip debug messages if not enabled for this module
		if (level === 'debug' && !isDebugEnabledForModule(this.modulePath)) {
			return;
		}

		const timestamp = getTimestamp();
		const prefix = `${timestamp} [${level.toUpperCase()}]`;
		let logMessage = `${prefix} ${this._formatMessage(message)}`;

		const formattedArgs = this._formatArgs(args);
		if (formattedArgs.length > 0) {
			// Handle errors specifically
			if (formattedArgs[0] instanceof Error) {
				const error = formattedArgs[0] as Error;
				logMessage += ` Error: ${error.message}`;
				if (error.stack) {
					logMessage += `\n${error.stack}`;
				}
				// If there are more args, add them after the error
				if (formattedArgs.length > 1) {
					logMessage += ` ${formattedArgs
						.slice(1)
						.map((arg) =>
							typeof arg === 'string' ? arg : safeStringify(arg),
						)
						.join(' ')}`;
				}
			} else {
				logMessage += ` ${formattedArgs
					.map((arg) =>
						typeof arg === 'string' ? arg : safeStringify(arg),
					)
					.join(' ')}`;
			}
		}

		// Write to log file
		try {
			fs.appendFileSync(Logger.logFilePath, `${logMessage}\n`, 'utf8');
		} catch (err) {
			// If we can't write to the log file, log the error to console
			console.error(`Failed to write to log file: ${err}`);
		}

		if (process.env.NODE_ENV === 'test') {
			console[level](logMessage);
		} else {
			console.error(logMessage);
		}
	}

	info(message: string, ...args: unknown[]) {
		this._log('info', message, ...args);
	}

	warn(message: string, ...args: unknown[]) {
		this._log('warn', message, ...args);
	}

	error(message: string, ...args: unknown[]) {
		this._log('error', message, ...args);
	}

	debug(message: string, ...args: unknown[]) {
		this._log('debug', message, ...args);
	}

	/**
	 * Log essential information about an API response
	 * @param message Log message
	 * @param response API response object
	 * @param essentialKeys Keys to extract from the response
	 */
	debugResponse(
		message: string,
		response: Record<string, unknown>,
		essentialKeys: string[],
	) {
		const essentialInfo = extractEssentialValues(response, essentialKeys);
		this.debug(message, essentialInfo);
	}

	/**
	 * Get the current session ID
	 * @returns The UUID for the current logging session
	 */
	static getSessionId(): string {
		return Logger.sessionId;
	}

	/**
	 * Get the current log file path
	 * @returns The path to the current log file
	 */
	static getLogFilePath(): string {
		return Logger.logFilePath;
	}
}

// Only export the Logger class to enforce contextual logging via Logger.forContext
export { Logger };

```

--------------------------------------------------------------------------------
/src/tools/atlassian.api.tool.ts:
--------------------------------------------------------------------------------

```typescript
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { Logger } from '../utils/logger.util.js';
import { formatErrorForMcpTool } from '../utils/error.util.js';
import { truncateForAI } from '../utils/formatter.util.js';
import {
	GetApiToolArgs,
	type GetApiToolArgsType,
	RequestWithBodyArgs,
	type RequestWithBodyArgsType,
	DeleteApiToolArgs,
} from './atlassian.api.types.js';
import {
	handleGet,
	handlePost,
	handlePut,
	handlePatch,
	handleDelete,
} from '../controllers/atlassian.api.controller.js';

// Create a contextualized logger for this file
const toolLogger = Logger.forContext('tools/atlassian.api.tool.ts');

// Log tool initialization
toolLogger.debug('Bitbucket API tool initialized');

/**
 * Creates an MCP tool handler for GET/DELETE requests (no body)
 *
 * @param methodName - Name of the HTTP method for logging
 * @param handler - Controller handler function
 * @returns MCP tool handler function
 */
function createReadHandler(
	methodName: string,
	handler: (
		options: GetApiToolArgsType,
	) => Promise<{ content: string; rawResponsePath?: string | null }>,
) {
	return async (args: Record<string, unknown>) => {
		const methodLogger = Logger.forContext(
			'tools/atlassian.api.tool.ts',
			methodName.toLowerCase(),
		);
		methodLogger.debug(`Making ${methodName} request with args:`, args);

		try {
			const result = await handler(args as GetApiToolArgsType);

			methodLogger.debug(
				'Successfully retrieved response from controller',
			);

			return {
				content: [
					{
						type: 'text' as const,
						text: truncateForAI(
							result.content,
							result.rawResponsePath,
						),
					},
				],
			};
		} catch (error) {
			methodLogger.error(`Failed to make ${methodName} request`, error);
			return formatErrorForMcpTool(error);
		}
	};
}

/**
 * Creates an MCP tool handler for POST/PUT/PATCH requests (with body)
 *
 * @param methodName - Name of the HTTP method for logging
 * @param handler - Controller handler function
 * @returns MCP tool handler function
 */
function createWriteHandler(
	methodName: string,
	handler: (
		options: RequestWithBodyArgsType,
	) => Promise<{ content: string; rawResponsePath?: string | null }>,
) {
	return async (args: Record<string, unknown>) => {
		const methodLogger = Logger.forContext(
			'tools/atlassian.api.tool.ts',
			methodName.toLowerCase(),
		);
		methodLogger.debug(`Making ${methodName} request with args:`, {
			path: args.path,
			bodyKeys: args.body ? Object.keys(args.body as object) : [],
		});

		try {
			const result = await handler(args as RequestWithBodyArgsType);

			methodLogger.debug(
				'Successfully received response from controller',
			);

			return {
				content: [
					{
						type: 'text' as const,
						text: truncateForAI(
							result.content,
							result.rawResponsePath,
						),
					},
				],
			};
		} catch (error) {
			methodLogger.error(`Failed to make ${methodName} request`, error);
			return formatErrorForMcpTool(error);
		}
	};
}

// Create tool handlers
const get = createReadHandler('GET', handleGet);
const post = createWriteHandler('POST', handlePost);
const put = createWriteHandler('PUT', handlePut);
const patch = createWriteHandler('PATCH', handlePatch);
const del = createReadHandler('DELETE', handleDelete);

// Tool descriptions
const BB_GET_DESCRIPTION = `Read any Bitbucket data. Returns TOON format by default (30-60% fewer tokens than JSON).

**IMPORTANT - Cost Optimization:**
- ALWAYS use \`jq\` param to filter response fields. Unfiltered responses are very expensive!
- Use \`pagelen\` query param to restrict result count (e.g., \`pagelen: "5"\`)
- If unsure about available fields, first fetch ONE item with \`pagelen: "1"\` and NO jq filter to explore the schema, then use jq in subsequent calls

**Schema Discovery Pattern:**
1. First call: \`path: "/workspaces", queryParams: {"pagelen": "1"}\` (no jq) - explore available fields
2. Then use: \`jq: "values[*].{slug: slug, name: name, uuid: uuid}"\` - extract only what you need

**Output format:** TOON (default, token-efficient) or JSON (\`outputFormat: "json"\`)

**Common paths:**
- \`/workspaces\` - list workspaces
- \`/repositories/{workspace}\` - list repos in workspace
- \`/repositories/{workspace}/{repo}\` - get repo details
- \`/repositories/{workspace}/{repo}/pullrequests\` - list PRs
- \`/repositories/{workspace}/{repo}/pullrequests/{id}\` - get PR details
- \`/repositories/{workspace}/{repo}/pullrequests/{id}/comments\` - list PR comments
- \`/repositories/{workspace}/{repo}/pullrequests/{id}/diff\` - get PR diff
- \`/repositories/{workspace}/{repo}/refs/branches\` - list branches
- \`/repositories/{workspace}/{repo}/commits\` - list commits
- \`/repositories/{workspace}/{repo}/src/{commit}/{filepath}\` - get file content
- \`/repositories/{workspace}/{repo}/diff/{source}..{destination}\` - compare branches/commits

**Query params:** \`pagelen\` (page size), \`page\` (page number), \`q\` (filter), \`sort\` (order), \`fields\` (sparse response)

**Example filters (q param):** \`state="OPEN"\`, \`source.branch.name="feature"\`, \`title~"bug"\`

**JQ examples:** \`values[*].slug\`, \`values[0]\`, \`values[*].{name: name, uuid: uuid}\`

The \`/2.0\` prefix is added automatically. API reference: https://developer.atlassian.com/cloud/bitbucket/rest/`;

const BB_POST_DESCRIPTION = `Create Bitbucket resources. Returns TOON format by default (token-efficient).

**IMPORTANT - Cost Optimization:**
- Use \`jq\` param to extract only needed fields from response (e.g., \`jq: "{id: id, title: title}"\`)
- Unfiltered responses include all metadata and are expensive!

**Output format:** TOON (default) or JSON (\`outputFormat: "json"\`)

**Common operations:**

1. **Create PR:** \`/repositories/{workspace}/{repo}/pullrequests\`
   body: \`{"title": "...", "source": {"branch": {"name": "feature"}}, "destination": {"branch": {"name": "main"}}}\`

2. **Add PR comment:** \`/repositories/{workspace}/{repo}/pullrequests/{id}/comments\`
   body: \`{"content": {"raw": "Comment text"}}\`

3. **Approve PR:** \`/repositories/{workspace}/{repo}/pullrequests/{id}/approve\`
   body: \`{}\`

4. **Request changes:** \`/repositories/{workspace}/{repo}/pullrequests/{id}/request-changes\`
   body: \`{}\`

5. **Merge PR:** \`/repositories/{workspace}/{repo}/pullrequests/{id}/merge\`
   body: \`{"merge_strategy": "squash"}\` (strategies: merge_commit, squash, fast_forward)

The \`/2.0\` prefix is added automatically. API reference: https://developer.atlassian.com/cloud/bitbucket/rest/`;

const BB_PUT_DESCRIPTION = `Replace Bitbucket resources (full update). Returns TOON format by default.

**IMPORTANT - Cost Optimization:**
- Use \`jq\` param to extract only needed fields from response
- Example: \`jq: "{uuid: uuid, name: name}"\`

**Output format:** TOON (default) or JSON (\`outputFormat: "json"\`)

**Common operations:**

1. **Update repository:** \`/repositories/{workspace}/{repo}\`
   body: \`{"description": "...", "is_private": true, "has_issues": true}\`

2. **Create/update file:** \`/repositories/{workspace}/{repo}/src\`
   Note: Use multipart form data for file uploads (complex - prefer PATCH for metadata)

3. **Update branch restriction:** \`/repositories/{workspace}/{repo}/branch-restrictions/{id}\`
   body: \`{"kind": "push", "pattern": "main", "users": [{"uuid": "..."}]}\`

The \`/2.0\` prefix is added automatically. API reference: https://developer.atlassian.com/cloud/bitbucket/rest/`;

const BB_PATCH_DESCRIPTION = `Partially update Bitbucket resources. Returns TOON format by default.

**IMPORTANT - Cost Optimization:** Use \`jq\` param to filter response fields.

**Output format:** TOON (default) or JSON (\`outputFormat: "json"\`)

**Common operations:**

1. **Update PR title/description:** \`/repositories/{workspace}/{repo}/pullrequests/{id}\`
   body: \`{"title": "New title", "description": "Updated description"}\`

2. **Update PR reviewers:** \`/repositories/{workspace}/{repo}/pullrequests/{id}\`
   body: \`{"reviewers": [{"uuid": "{user-uuid}"}]}\`

3. **Update repository properties:** \`/repositories/{workspace}/{repo}\`
   body: \`{"description": "New description"}\`

4. **Update comment:** \`/repositories/{workspace}/{repo}/pullrequests/{pr_id}/comments/{comment_id}\`
   body: \`{"content": {"raw": "Updated comment"}}\`

The \`/2.0\` prefix is added automatically. API reference: https://developer.atlassian.com/cloud/bitbucket/rest/`;

const BB_DELETE_DESCRIPTION = `Delete Bitbucket resources. Returns TOON format by default.

**Output format:** TOON (default) or JSON (\`outputFormat: "json"\`)

**Common operations:**

1. **Delete branch:** \`/repositories/{workspace}/{repo}/refs/branches/{branch_name}\`
2. **Delete PR comment:** \`/repositories/{workspace}/{repo}/pullrequests/{pr_id}/comments/{comment_id}\`
3. **Decline PR:** \`/repositories/{workspace}/{repo}/pullrequests/{id}/decline\`
4. **Remove PR approval:** \`/repositories/{workspace}/{repo}/pullrequests/{id}/approve\`
5. **Delete repository:** \`/repositories/{workspace}/{repo}\` (caution: irreversible)

Note: Most DELETE endpoints return 204 No Content on success.

The \`/2.0\` prefix is added automatically. API reference: https://developer.atlassian.com/cloud/bitbucket/rest/`;

/**
 * Register generic Bitbucket API tools with the MCP server.
 * Uses the modern registerTool API (SDK v1.22.0+) instead of deprecated tool() method.
 */
function registerTools(server: McpServer) {
	const registerLogger = Logger.forContext(
		'tools/atlassian.api.tool.ts',
		'registerTools',
	);
	registerLogger.debug('Registering API tools...');

	// Register the GET tool using modern registerTool API
	server.registerTool(
		'bb_get',
		{
			title: 'Bitbucket GET Request',
			description: BB_GET_DESCRIPTION,
			inputSchema: GetApiToolArgs,
		},
		get,
	);

	// Register the POST tool using modern registerTool API
	server.registerTool(
		'bb_post',
		{
			title: 'Bitbucket POST Request',
			description: BB_POST_DESCRIPTION,
			inputSchema: RequestWithBodyArgs,
		},
		post,
	);

	// Register the PUT tool using modern registerTool API
	server.registerTool(
		'bb_put',
		{
			title: 'Bitbucket PUT Request',
			description: BB_PUT_DESCRIPTION,
			inputSchema: RequestWithBodyArgs,
		},
		put,
	);

	// Register the PATCH tool using modern registerTool API
	server.registerTool(
		'bb_patch',
		{
			title: 'Bitbucket PATCH Request',
			description: BB_PATCH_DESCRIPTION,
			inputSchema: RequestWithBodyArgs,
		},
		patch,
	);

	// Register the DELETE tool using modern registerTool API
	server.registerTool(
		'bb_delete',
		{
			title: 'Bitbucket DELETE Request',
			description: BB_DELETE_DESCRIPTION,
			inputSchema: DeleteApiToolArgs,
		},
		del,
	);

	registerLogger.debug('Successfully registered API tools');
}

export default { registerTools };

```

--------------------------------------------------------------------------------
/src/services/vendor.atlassian.repositories.service.test.ts:
--------------------------------------------------------------------------------

```typescript
import atlassianRepositoriesService from './vendor.atlassian.repositories.service.js';
import { getAtlassianCredentials } from '../utils/transport.util.js';
import { config } from '../utils/config.util.js';
import { McpError } from '../utils/error.util.js';
import atlassianWorkspacesService from './vendor.atlassian.workspaces.service.js';
import { Repository } from './vendor.atlassian.repositories.types.js';
import { Logger } from '../utils/logger.util.js';

// Instantiate logger for the test file
const logger = Logger.forContext(
	'services/vendor.atlassian.repositories.service.test.ts',
);

describe('Vendor Atlassian Repositories Service', () => {
	// Load configuration and check for credentials before all tests
	beforeAll(() => {
		config.load(); // Ensure config is loaded
		const credentials = getAtlassianCredentials();
		if (!credentials) {
			console.warn(
				'Skipping Atlassian Repositories Service tests: No credentials available',
			);
		}
	});

	// Helper function to skip tests when credentials are missing
	const skipIfNoCredentials = () => !getAtlassianCredentials();

	// Helper to get a valid workspace slug for testing
	async function getFirstWorkspaceSlug(): Promise<string | null> {
		if (skipIfNoCredentials()) return null;

		try {
			const listResult = await atlassianWorkspacesService.list({
				pagelen: 1,
			});
			return listResult.values.length > 0
				? listResult.values[0].workspace.slug
				: null;
		} catch (error) {
			console.warn(
				'Could not fetch workspace list for repository tests:',
				error,
			);
			return null;
		}
	}

	describe('list', () => {
		it('should return a list of repositories for a valid workspace', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			const result = await atlassianRepositoriesService.list({
				workspace: workspaceSlug,
			});
			logger.debug('List repositories result:', result);

			// Verify the response structure based on RepositoriesResponse
			expect(result).toHaveProperty('values');
			expect(Array.isArray(result.values)).toBe(true);
			expect(result).toHaveProperty('pagelen'); // Bitbucket uses pagelen
			expect(result).toHaveProperty('page');
			expect(result).toHaveProperty('size');

			if (result.values.length > 0) {
				// Verify the structure of the first repository in the list
				verifyRepositoryStructure(result.values[0]);
			}
		}, 30000); // Increased timeout

		it('should support pagination with pagelen and page', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			// Get first page with limited results
			const result = await atlassianRepositoriesService.list({
				workspace: workspaceSlug,
				pagelen: 1,
			});

			expect(result).toHaveProperty('pagelen');
			// Allow pagelen to be greater than requested if API enforces minimum
			expect(result.pagelen).toBeGreaterThanOrEqual(1);
			expect(result.values.length).toBeLessThanOrEqual(result.pagelen);

			// If there are more items than the page size, expect pagination links
			if (result.size > result.pagelen) {
				expect(result).toHaveProperty('next');

				// Test requesting page 2 if available
				// Extract page parameter from next link if available
				if (result.next) {
					const nextPageUrl = new URL(result.next);
					const pageParam = nextPageUrl.searchParams.get('page');

					if (pageParam) {
						const page2 = parseInt(pageParam, 10);
						const page2Result =
							await atlassianRepositoriesService.list({
								workspace: workspaceSlug,
								pagelen: 1,
								page: page2,
							});

						expect(page2Result).toHaveProperty('page', page2);

						// If both pages have values, verify they're different repositories
						if (
							result.values.length > 0 &&
							page2Result.values.length > 0
						) {
							expect(result.values[0].uuid).not.toBe(
								page2Result.values[0].uuid,
							);
						}
					}
				}
			}
		}, 30000);

		it('should support filtering with q parameter', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			// First get all repositories to find a potential query term
			const allRepos = await atlassianRepositoriesService.list({
				workspace: workspaceSlug,
			});

			// Skip if no repositories available
			if (allRepos.values.length === 0) {
				console.warn(
					'Skipping query filtering test: No repositories available',
				);
				return;
			}

			// Use the first repo's name as a query term
			const firstRepo = allRepos.values[0];
			// Take just the first word or first few characters to make filter less restrictive
			const queryTerm = firstRepo.name.split(' ')[0];

			// Test the query filter
			try {
				const result = await atlassianRepositoriesService.list({
					workspace: workspaceSlug,
					q: `name~"${queryTerm}"`,
				});

				// Verify basic response structure
				expect(result).toHaveProperty('values');

				// All returned repos should contain the query term in their name
				if (result.values.length > 0) {
					const nameMatches = result.values.some((repo) =>
						repo.name
							.toLowerCase()
							.includes(queryTerm.toLowerCase()),
					);
					expect(nameMatches).toBe(true);
				}
			} catch (error) {
				// If filtering isn't fully supported, we just log it
				console.warn(
					'Query filtering test encountered an error:',
					error instanceof Error ? error.message : String(error),
				);
			}
		}, 30000);

		it('should support sorting with sort parameter', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			// Skip this test if fewer than 2 repositories (can't verify sort order)
			const checkResult = await atlassianRepositoriesService.list({
				workspace: workspaceSlug,
				pagelen: 2,
			});

			if (checkResult.values.length < 2) {
				console.warn(
					'Skipping sort test: Need at least 2 repositories to verify sort order',
				);
				return;
			}

			// Test sorting by name ascending
			const resultAsc = await atlassianRepositoriesService.list({
				workspace: workspaceSlug,
				sort: 'name',
				pagelen: 2,
			});

			// Test sorting by name descending
			const resultDesc = await atlassianRepositoriesService.list({
				workspace: workspaceSlug,
				sort: '-name',
				pagelen: 2,
			});

			// Verify basic response structure
			expect(resultAsc).toHaveProperty('values');
			expect(resultDesc).toHaveProperty('values');

			// Ensure both responses have at least 2 items to compare
			if (resultAsc.values.length >= 2 && resultDesc.values.length >= 2) {
				// For ascending order, first item should come before second alphabetically
				const ascNameComparison =
					resultAsc.values[0].name.localeCompare(
						resultAsc.values[1].name,
					);
				// For descending order, first item should come after second alphabetically
				const descNameComparison =
					resultDesc.values[0].name.localeCompare(
						resultDesc.values[1].name,
					);

				// Ascending should be ≤ 0 (first before or equal to second)
				expect(ascNameComparison).toBeLessThanOrEqual(0);
				// Descending should be ≥ 0 (first after or equal to second)
				expect(descNameComparison).toBeGreaterThanOrEqual(0);
			}
		}, 30000);

		it('should throw an error for an invalid workspace', async () => {
			if (skipIfNoCredentials()) return;

			const invalidWorkspace =
				'this-workspace-definitely-does-not-exist-12345';

			// Expect the service call to reject with an McpError (likely 404)
			await expect(
				atlassianRepositoriesService.list({
					workspace: invalidWorkspace,
				}),
			).rejects.toThrow();

			// Check for specific error properties
			try {
				await atlassianRepositoriesService.list({
					workspace: invalidWorkspace,
				});
			} catch (e) {
				expect(e).toBeInstanceOf(McpError);
				expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
			}
		}, 30000);
	});

	describe('get', () => {
		// Helper to get a valid repo for testing 'get'
		async function getFirstRepositoryInfo(): Promise<{
			workspace: string;
			repoSlug: string;
		} | null> {
			if (skipIfNoCredentials()) return null;

			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) return null;

			try {
				const listResult = await atlassianRepositoriesService.list({
					workspace: workspaceSlug,
					pagelen: 1,
				});

				if (listResult.values.length === 0) return null;

				const fullName = listResult.values[0].full_name;
				// full_name is in format "workspace/repo_slug"
				const [workspace, repoSlug] = fullName.split('/');

				return { workspace, repoSlug };
			} catch (error) {
				console.warn(
					"Could not fetch repository list for 'get' test setup:",
					error,
				);
				return null;
			}
		}

		it('should return details for a valid workspace and repo_slug', async () => {
			const repoInfo = await getFirstRepositoryInfo();
			if (!repoInfo) {
				console.warn('Skipping get test: No repository found.');
				return;
			}

			const result = await atlassianRepositoriesService.get({
				workspace: repoInfo.workspace,
				repo_slug: repoInfo.repoSlug,
			});

			// Verify the response structure based on RepositoryDetailed
			expect(result).toHaveProperty('uuid');
			expect(result).toHaveProperty(
				'full_name',
				`${repoInfo.workspace}/${repoInfo.repoSlug}`,
			);
			expect(result).toHaveProperty('name');
			expect(result).toHaveProperty('type', 'repository');
			expect(result).toHaveProperty('is_private');
			expect(result).toHaveProperty('links');
			expect(result.links).toHaveProperty('html');
			expect(result).toHaveProperty('owner');
			expect(result.owner).toHaveProperty('type');
		}, 30000);

		it('should throw an McpError for a non-existent repo_slug', async () => {
			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			const invalidRepoSlug = 'this-repo-definitely-does-not-exist-12345';

			// Expect the service call to reject with an McpError (likely 404)
			await expect(
				atlassianRepositoriesService.get({
					workspace: workspaceSlug,
					repo_slug: invalidRepoSlug,
				}),
			).rejects.toThrow(McpError);

			// Check for specific error properties
			try {
				await atlassianRepositoriesService.get({
					workspace: workspaceSlug,
					repo_slug: invalidRepoSlug,
				});
			} catch (e) {
				expect(e).toBeInstanceOf(McpError);
				expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
			}
		}, 30000);

		it('should throw an McpError for a non-existent workspace', async () => {
			if (skipIfNoCredentials()) return;

			const invalidWorkspace =
				'this-workspace-definitely-does-not-exist-12345';
			const invalidRepoSlug = 'some-repo';

			// Expect the service call to reject with an McpError (likely 404)
			await expect(
				atlassianRepositoriesService.get({
					workspace: invalidWorkspace,
					repo_slug: invalidRepoSlug,
				}),
			).rejects.toThrow(McpError);

			// Check for specific error properties
			try {
				await atlassianRepositoriesService.get({
					workspace: invalidWorkspace,
					repo_slug: invalidRepoSlug,
				});
			} catch (e) {
				expect(e).toBeInstanceOf(McpError);
				expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
			}
		}, 30000);
	});
});

// Helper function to verify the Repository structure
function verifyRepositoryStructure(repo: Repository) {
	expect(repo).toHaveProperty('uuid');
	expect(repo).toHaveProperty('name');
	expect(repo).toHaveProperty('full_name');
	expect(repo).toHaveProperty('is_private');
	expect(repo).toHaveProperty('links');
	expect(repo).toHaveProperty('owner');
	expect(repo).toHaveProperty('type', 'repository');
}

```

--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------

```json
{
  "compilerOptions": {
    /* Visit https://aka.ms/tsconfig to read more about this file */

    /* Projects */
    // "incremental": true,                              /* Save .tsbuildinfo files to allow for incremental compilation of projects. */
    // "composite": true,                                /* Enable constraints that allow a TypeScript project to be used with project references. */
    // "tsBuildInfoFile": "./.tsbuildinfo",              /* Specify the path to .tsbuildinfo incremental compilation file. */
    // "disableSourceOfProjectReferenceRedirect": true,  /* Disable preferring source files instead of declaration files when referencing composite projects. */
    // "disableSolutionSearching": true,                 /* Opt a project out of multi-project reference checking when editing. */
    // "disableReferencedProjectLoad": true,             /* Reduce the number of projects loaded automatically by TypeScript. */

    /* Language and Environment */
    "target": "ES2020",                                  /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
    "lib": ["ES2020"],                                   /* Specify a set of bundled library declaration files that describe the target runtime environment. */
    // "jsx": "preserve",                                /* Specify what JSX code is generated. */
    // "libReplacement": true,                           /* Enable lib replacement. */
    // "experimentalDecorators": true,                   /* Enable experimental support for legacy experimental decorators. */
    // "emitDecoratorMetadata": true,                    /* Emit design-type metadata for decorated declarations in source files. */
    // "jsxFactory": "",                                 /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */
    // "jsxFragmentFactory": "",                         /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */
    // "jsxImportSource": "",                            /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */
    // "reactNamespace": "",                             /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */
    // "noLib": true,                                    /* Disable including any library files, including the default lib.d.ts. */
    // "useDefineForClassFields": true,                  /* Emit ECMAScript-standard-compliant class fields. */
    // "moduleDetection": "auto",                        /* Control what method is used to detect module-format JS files. */

    /* Modules */
    "module": "NodeNext",                                /* Specify what module code is generated. */
    // "rootDir": "./",                                  /* Specify the root folder within your source files. */
    "moduleResolution": "NodeNext",                      /* Specify how TypeScript looks up a file from a given module specifier. */
    // "baseUrl": "./",                                  /* Specify the base directory to resolve non-relative module names. */
    // "paths": {},                                      /* Specify a set of entries that re-map imports to additional lookup locations. */
    // "rootDirs": [],                                   /* Allow multiple folders to be treated as one when resolving modules. */
    // "typeRoots": [],                                  /* Specify multiple folders that act like './node_modules/@types'. */
    // "types": [],                                      /* Specify type package names to be included without being referenced in a source file. */
    // "allowUmdGlobalAccess": true,                     /* Allow accessing UMD globals from modules. */
    // "moduleSuffixes": [],                             /* List of file name suffixes to search when resolving a module. */
    // "allowImportingTsExtensions": true,               /* Allow imports to include TypeScript file extensions. Requires '--moduleResolution bundler' and either '--noEmit' or '--emitDeclarationOnly' to be set. */
    // "rewriteRelativeImportExtensions": true,          /* Rewrite '.ts', '.tsx', '.mts', and '.cts' file extensions in relative import paths to their JavaScript equivalent in output files. */
    // "resolvePackageJsonExports": true,                /* Use the package.json 'exports' field when resolving package imports. */
    // "resolvePackageJsonImports": true,                /* Use the package.json 'imports' field when resolving imports. */
    // "customConditions": [],                           /* Conditions to set in addition to the resolver-specific defaults when resolving imports. */
    // "noUncheckedSideEffectImports": true,             /* Check side effect imports. */
    "resolveJsonModule": true,                           /* Enable importing .json files. */
    // "allowArbitraryExtensions": true,                 /* Enable importing files with any extension, provided a declaration file is present. */
    // "noResolve": true,                                /* Disallow 'import's, 'require's or '<reference>'s from expanding the number of files TypeScript should add to a project. */

    /* JavaScript Support */
    // "allowJs": true,                                  /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */
    // "checkJs": true,                                  /* Enable error reporting in type-checked JavaScript files. */
    // "maxNodeModuleJsDepth": 1,                        /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */

    /* Emit */
    "declaration": true,                                 /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
    // "declarationMap": true,                           /* Create sourcemaps for d.ts files. */
    // "emitDeclarationOnly": true,                      /* Only output d.ts files and not JavaScript files. */
    // "sourceMap": true,                                /* Create source map files for emitted JavaScript files. */
    // "inlineSourceMap": true,                          /* Include sourcemap files inside the emitted JavaScript. */
    // "noEmit": true,                                   /* Disable emitting files from a compilation. */
    // "outFile": "./",                                  /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */
    "outDir": "./dist",                                  /* Specify an output folder for all emitted files. */
    // "removeComments": true,                           /* Disable emitting comments. */
    // "importHelpers": true,                            /* Allow importing helper functions from tslib once per project, instead of including them per-file. */
    // "downlevelIteration": true,                       /* Emit more compliant, but verbose and less performant JavaScript for iteration. */
    // "sourceRoot": "",                                 /* Specify the root path for debuggers to find the reference source code. */
    // "mapRoot": "",                                    /* Specify the location where debugger should locate map files instead of generated locations. */
    // "inlineSources": true,                            /* Include source code in the sourcemaps inside the emitted JavaScript. */
    // "emitBOM": true,                                  /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */
    // "newLine": "crlf",                                /* Set the newline character for emitting files. */
    // "stripInternal": true,                            /* Disable emitting declarations that have '@internal' in their JSDoc comments. */
    // "noEmitHelpers": true,                            /* Disable generating custom helper functions like '__extends' in compiled output. */
    // "noEmitOnError": true,                            /* Disable emitting files if any type checking errors are reported. */
    // "preserveConstEnums": true,                       /* Disable erasing 'const enum' declarations in generated code. */
    // "declarationDir": "./",                           /* Specify the output directory for generated declaration files. */

    /* Interop Constraints */
    "isolatedModules": true,
    // "verbatimModuleSyntax": true,                     /* Do not transform or elide any imports or exports not marked as type-only, ensuring they are written in the output file's format based on the 'module' setting. */
    // "isolatedDeclarations": true,                     /* Require sufficient annotation on exports so other tools can trivially generate declaration files. */
    // "erasableSyntaxOnly": true,                       /* Do not allow runtime constructs that are not part of ECMAScript. */
    // "allowSyntheticDefaultImports": true,             /* Allow 'import x from y' when a module doesn't have a default export. */
    "esModuleInterop": true,                             /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */
    // "preserveSymlinks": true,                         /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */
    "forceConsistentCasingInFileNames": true,            /* Ensure that casing is correct in imports. */

    /* Type Checking */
    "strict": true,                                      /* Enable all strict type-checking options. */
    "noImplicitAny": true,                               /* Enable error reporting for expressions and declarations with an implied 'any' type. */
    "strictNullChecks": true,                            /* When type checking, take into account 'null' and 'undefined'. */
    // "strictFunctionTypes": true,                      /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */
    // "strictBindCallApply": true,                      /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */
    // "strictPropertyInitialization": true,             /* Check for class properties that are declared but not set in the constructor. */
    // "strictBuiltinIteratorReturn": true,              /* Built-in iterators are instantiated with a 'TReturn' type of 'undefined' instead of 'any'. */
    // "noImplicitThis": true,                           /* Enable error reporting when 'this' is given the type 'any'. */
    // "useUnknownInCatchVariables": true,               /* Default catch clause variables as 'unknown' instead of 'any'. */
    // "alwaysStrict": true,                             /* Ensure 'use strict' is always emitted. */
    "noUnusedLocals": true,                              /* Enable error reporting when local variables aren't read. */
    "noUnusedParameters": true,                          /* Raise an error when a function parameter isn't read. */
    // "exactOptionalPropertyTypes": true,               /* Interpret optional property types as written, rather than adding 'undefined'. */
    "noImplicitReturns": true,                           /* Enable error reporting for codepaths that do not explicitly return in a function. */
    // "noFallthroughCasesInSwitch": true,               /* Enable error reporting for fallthrough cases in switch statements. */
    // "noUncheckedIndexedAccess": true,                 /* Add 'undefined' to a type when accessed using an index. */
    // "noImplicitOverride": true,                       /* Ensure overriding members in derived classes are marked with an override modifier. */
    // "noPropertyAccessFromIndexSignature": true,       /* Enforces using indexed accessors for keys declared using an indexed type. */
    // "allowUnusedLabels": true,                        /* Disable error reporting for unused labels. */
    // "allowUnreachableCode": true,                     /* Disable error reporting for unreachable code. */

    /* Completeness */
    // "skipDefaultLibCheck": true,                      /* Skip type checking .d.ts files that are included with TypeScript. */
    "skipLibCheck": true                                 /* Skip type checking all .d.ts files. */
  },
  "include": ["src/**/*"]
}

```

--------------------------------------------------------------------------------
/src/utils/transport.util.ts:
--------------------------------------------------------------------------------

```typescript
import { Logger } from './logger.util.js';
import { config } from './config.util.js';
import { NETWORK_TIMEOUTS, DATA_LIMITS } from './constants.util.js';
import {
	createAuthInvalidError,
	createApiError,
	createUnexpectedError,
	McpError,
} from './error.util.js';
import { saveRawResponse } from './response.util.js';

/**
 * Interface for Atlassian API credentials
 */
export interface AtlassianCredentials {
	// Standard Atlassian credentials
	siteName?: string;
	userEmail?: string;
	apiToken?: string;
	// Bitbucket-specific credentials (alternative approach)
	bitbucketUsername?: string;
	bitbucketAppPassword?: string;
	// Indicates which auth method to use
	useBitbucketAuth?: boolean;
}

/**
 * Interface for HTTP request options
 */
export interface RequestOptions {
	method?: 'GET' | 'POST' | 'PUT' | 'PATCH' | 'DELETE';
	headers?: Record<string, string>;
	body?: unknown;
	timeout?: number;
}

/**
 * Transport response wrapper that includes the data and the path to the raw response file
 */
export interface TransportResponse<T> {
	data: T;
	rawResponsePath: string | null;
}

// Create a contextualized logger for this file
const transportLogger = Logger.forContext('utils/transport.util.ts');

// Log transport utility initialization
transportLogger.debug('Transport utility initialized');

/**
 * Get Atlassian credentials from environment variables
 * @returns AtlassianCredentials object or null if credentials are missing
 */
export function getAtlassianCredentials(): AtlassianCredentials | null {
	const methodLogger = Logger.forContext(
		'utils/transport.util.ts',
		'getAtlassianCredentials',
	);

	// First try standard Atlassian credentials (preferred for consistency)
	const siteName = config.get('ATLASSIAN_SITE_NAME');
	const userEmail = config.get('ATLASSIAN_USER_EMAIL');
	const apiToken = config.get('ATLASSIAN_API_TOKEN');

	// If standard credentials are available, use them
	if (userEmail && apiToken) {
		methodLogger.debug('Using standard Atlassian credentials');
		return {
			siteName,
			userEmail,
			apiToken,
			useBitbucketAuth: false,
		};
	}

	// If standard credentials are not available, try Bitbucket-specific credentials
	const bitbucketUsername = config.get('ATLASSIAN_BITBUCKET_USERNAME');
	const bitbucketAppPassword = config.get('ATLASSIAN_BITBUCKET_APP_PASSWORD');

	if (bitbucketUsername && bitbucketAppPassword) {
		methodLogger.debug('Using Bitbucket-specific credentials');
		return {
			bitbucketUsername,
			bitbucketAppPassword,
			useBitbucketAuth: true,
		};
	}

	// If neither set of credentials is available, return null
	methodLogger.warn(
		'Missing Atlassian credentials. Please set either ATLASSIAN_SITE_NAME, ATLASSIAN_USER_EMAIL, and ATLASSIAN_API_TOKEN environment variables, or ATLASSIAN_BITBUCKET_USERNAME and ATLASSIAN_BITBUCKET_APP_PASSWORD for Bitbucket-specific auth.',
	);
	return null;
}

/**
 * Fetch data from Atlassian API
 * @param credentials Atlassian API credentials
 * @param path API endpoint path (without base URL)
 * @param options Request options
 * @returns Response data wrapped with raw response path
 */
export async function fetchAtlassian<T>(
	credentials: AtlassianCredentials,
	path: string,
	options: RequestOptions = {},
): Promise<TransportResponse<T>> {
	const methodLogger = Logger.forContext(
		'utils/transport.util.ts',
		'fetchAtlassian',
	);

	const baseUrl = 'https://api.bitbucket.org';

	// Set up auth headers based on credential type
	let authHeader: string;

	if (credentials.useBitbucketAuth) {
		// Bitbucket API uses a different auth format
		if (
			!credentials.bitbucketUsername ||
			!credentials.bitbucketAppPassword
		) {
			throw createAuthInvalidError(
				'Missing Bitbucket username or app password',
			);
		}
		authHeader = `Basic ${Buffer.from(
			`${credentials.bitbucketUsername}:${credentials.bitbucketAppPassword}`,
		).toString('base64')}`;
	} else {
		// Standard Atlassian API (Jira, Confluence)
		if (!credentials.userEmail || !credentials.apiToken) {
			throw createAuthInvalidError('Missing Atlassian credentials');
		}
		authHeader = `Basic ${Buffer.from(
			`${credentials.userEmail}:${credentials.apiToken}`,
		).toString('base64')}`;
	}

	// Ensure path starts with a slash
	const normalizedPath = path.startsWith('/') ? path : `/${path}`;

	// Construct the full URL
	const url = `${baseUrl}${normalizedPath}`;

	// Set up authentication and headers
	const headers = {
		Authorization: authHeader,
		'Content-Type': 'application/json',
		Accept: 'application/json',
		...options.headers,
	};

	// Prepare request options
	const requestOptions: RequestInit = {
		method: options.method || 'GET',
		headers,
		body: options.body ? JSON.stringify(options.body) : undefined,
	};

	methodLogger.debug(`Calling Atlassian API: ${url}`);

	// Set up timeout handling with configurable values
	const defaultTimeout = config.getNumber(
		'ATLASSIAN_REQUEST_TIMEOUT',
		NETWORK_TIMEOUTS.DEFAULT_REQUEST_TIMEOUT,
	);
	const timeoutMs = options.timeout ?? defaultTimeout;
	const controller = new AbortController();
	const timeoutId = setTimeout(() => {
		methodLogger.warn(`Request timeout after ${timeoutMs}ms: ${url}`);
		controller.abort();
	}, timeoutMs);

	// Add abort signal to request options
	requestOptions.signal = controller.signal;

	// Track API call performance
	const startTime = performance.now();

	try {
		const response = await fetch(url, requestOptions);
		clearTimeout(timeoutId);
		const endTime = performance.now();
		const requestDuration = (endTime - startTime).toFixed(2);

		// Log the raw response status and headers
		methodLogger.debug(
			`Raw response received: ${response.status} ${response.statusText}`,
			{
				url,
				status: response.status,
				statusText: response.statusText,
				headers: Object.fromEntries(response.headers.entries()),
			},
		);

		// Validate response size to prevent excessive memory usage (CWE-770)
		const contentLength = response.headers.get('content-length');
		if (contentLength) {
			const responseSize = parseInt(contentLength, 10);
			if (responseSize > DATA_LIMITS.MAX_RESPONSE_SIZE) {
				methodLogger.warn(
					`Response size ${responseSize} bytes exceeds limit of ${DATA_LIMITS.MAX_RESPONSE_SIZE} bytes`,
				);
				throw createApiError(
					`Response size (${Math.round(responseSize / (1024 * 1024))}MB) exceeds maximum limit of ${Math.round(DATA_LIMITS.MAX_RESPONSE_SIZE / (1024 * 1024))}MB`,
					413,
					{ responseSize, limit: DATA_LIMITS.MAX_RESPONSE_SIZE },
				);
			}
		}

		if (!response.ok) {
			const errorText = await response.text();
			methodLogger.error(
				`API error: ${response.status} ${response.statusText}`,
				errorText,
			);

			// Try to parse the error response
			let errorMessage = `${response.status} ${response.statusText}`;
			let parsedBitbucketError = null;

			try {
				if (
					errorText &&
					(errorText.startsWith('{') || errorText.startsWith('['))
				) {
					const parsedError = JSON.parse(errorText);

					// Extract specific error details from various Bitbucket API response formats
					if (
						parsedError.type === 'error' &&
						parsedError.error &&
						parsedError.error.message
					) {
						// Format: {"type":"error", "error":{"message":"...", "detail":"..."}}
						parsedBitbucketError = parsedError.error;
						errorMessage = parsedBitbucketError.message;
						if (parsedBitbucketError.detail) {
							errorMessage += ` Detail: ${parsedBitbucketError.detail}`;
						}
					} else if (parsedError.error && parsedError.error.message) {
						// Alternative error format: {"error": {"message": "..."}}
						parsedBitbucketError = parsedError.error;
						errorMessage = parsedBitbucketError.message;
					} else if (
						parsedError.errors &&
						Array.isArray(parsedError.errors) &&
						parsedError.errors.length > 0
					) {
						// Format: {"errors":[{"status":400,"code":"INVALID_REQUEST_PARAMETER","title":"..."}]}
						const atlassianError = parsedError.errors[0];
						if (atlassianError.title) {
							errorMessage = atlassianError.title;
							parsedBitbucketError = atlassianError;
						}
					} else if (parsedError.message) {
						// Format: {"message":"Some error message"}
						errorMessage = parsedError.message;
						parsedBitbucketError = parsedError;
					}
				}
			} catch (parseError) {
				methodLogger.debug(`Error parsing error response:`, parseError);
				// Fall back to the default error message
			}

			// Log the parsed error or raw error text
			methodLogger.debug(
				'Parsed Bitbucket error:',
				parsedBitbucketError || errorText,
			);

			// Use parsedBitbucketError (or errorText if parsing failed) as originalError
			const originalErrorForMcp = parsedBitbucketError || errorText;

			// Handle common Bitbucket API error status codes
			if (response.status === 401) {
				throw createAuthInvalidError(
					`Bitbucket API: Authentication failed - ${errorMessage}`,
					originalErrorForMcp,
				);
			}

			if (response.status === 403) {
				throw createApiError(
					`Bitbucket API: Permission denied - ${errorMessage}`,
					403,
					originalErrorForMcp,
				);
			}

			if (response.status === 404) {
				throw createApiError(
					`Bitbucket API: Resource not found - ${errorMessage}`,
					404,
					originalErrorForMcp,
				);
			}

			if (response.status === 429) {
				throw createApiError(
					`Bitbucket API: Rate limit exceeded - ${errorMessage}`,
					429,
					originalErrorForMcp,
				);
			}

			if (response.status >= 500) {
				throw createApiError(
					`Bitbucket API: Service error - ${errorMessage}`,
					response.status,
					originalErrorForMcp,
				);
			}

			// For other API errors, preserve the original vendor message
			throw createApiError(
				`Bitbucket API Error: ${errorMessage}`,
				response.status,
				originalErrorForMcp,
			);
		}

		// Handle 204 No Content responses (common for DELETE operations)
		if (response.status === 204) {
			methodLogger.debug('Received 204 No Content response');
			return { data: {} as T, rawResponsePath: null };
		}

		// Check if the response is expected to be plain text
		const contentType = response.headers.get('content-type') || '';
		if (contentType.includes('text/plain')) {
			// If we're expecting text (like a diff), return the raw text
			const textResponse = await response.text();
			methodLogger.debug(
				`Text response received (truncated)`,
				textResponse.substring(0, 200) + '...',
			);
			return {
				data: textResponse as unknown as T,
				rawResponsePath: null,
			};
		}

		// Handle empty responses (some endpoints return 200/201 with no body)
		const responseText = await response.text();
		if (!responseText || responseText.trim() === '') {
			methodLogger.debug('Received empty response body');
			return { data: {} as T, rawResponsePath: null };
		}

		// For JSON responses, parse the text we already read
		try {
			const responseJson = JSON.parse(responseText);
			methodLogger.debug(`Response body:`, responseJson);

			// Save raw response to file
			const rawResponsePath = saveRawResponse(
				url,
				requestOptions.method || 'GET',
				options.body,
				responseJson,
				response.status,
				parseFloat(requestDuration),
			);

			return { data: responseJson as T, rawResponsePath };
		} catch {
			methodLogger.debug(
				`Could not parse response as JSON, returning raw content`,
			);
			return {
				data: responseText as unknown as T,
				rawResponsePath: null,
			};
		}
	} catch (error) {
		clearTimeout(timeoutId);
		methodLogger.error(`Request failed`, error);

		// If it's already an McpError, just rethrow it
		if (error instanceof McpError) {
			throw error;
		}

		// Handle timeout errors
		if (error instanceof Error && error.name === 'AbortError') {
			methodLogger.error(
				`Request timed out after ${timeoutMs}ms: ${url}`,
			);
			throw createApiError(
				`Request timeout: Bitbucket API did not respond within ${timeoutMs / 1000} seconds`,
				408,
				error,
			);
		}

		// Handle network errors more explicitly
		if (error instanceof TypeError) {
			// TypeError is typically a network/fetch error in this context
			const errorMessage = error.message || 'Network error occurred';
			methodLogger.debug(`Network error details: ${errorMessage}`);

			throw createApiError(
				`Network error while calling Bitbucket API: ${errorMessage}`,
				500, // This will be classified as NETWORK_ERROR by detectErrorType
				error,
			);
		}

		// Handle JSON parsing errors
		if (error instanceof SyntaxError) {
			methodLogger.debug(`JSON parsing error: ${error.message}`);

			throw createApiError(
				`Invalid response format from Bitbucket API: ${error.message}`,
				500,
				error,
			);
		}

		// Generic error handler for any other types of errors
		throw createUnexpectedError(
			`Unexpected error while calling Bitbucket API: ${error instanceof Error ? error.message : String(error)}`,
			error,
		);
	}
}

```

--------------------------------------------------------------------------------
/src/utils/formatter.util.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Standardized formatting utilities for consistent output across all CLI and Tool interfaces.
 * These functions should be used by all formatters to ensure consistent formatting.
 */

import { Logger } from './logger.util.js'; // Ensure logger is imported
import { ResponsePagination } from '../types/common.types.js';

// const formatterLogger = Logger.forContext('utils/formatter.util.ts'); // Define logger instance - Removed as unused

/**
 * Format a date in a standardized way: YYYY-MM-DD HH:MM:SS UTC
 * @param dateString - ISO date string or Date object
 * @returns Formatted date string
 */
export function formatDate(dateString?: string | Date): string {
	if (!dateString) {
		return 'Not available';
	}

	try {
		const date =
			typeof dateString === 'string' ? new Date(dateString) : dateString;

		// Format: YYYY-MM-DD HH:MM:SS UTC
		return date
			.toISOString()
			.replace('T', ' ')
			.replace(/\.\d+Z$/, ' UTC');
	} catch {
		return 'Invalid date';
	}
}

/**
 * Format a URL as a markdown link
 * @param url - URL to format
 * @param title - Link title
 * @returns Formatted markdown link
 */
export function formatUrl(url?: string, title?: string): string {
	if (!url) {
		return 'Not available';
	}

	const linkTitle = title || url;
	return `[${linkTitle}](${url})`;
}

/**
 * Format pagination information in a standardized way for CLI output.
 * Includes separator, item counts, availability message, next page instructions, and timestamp.
 * @param pagination - The ResponsePagination object containing pagination details.
 * @returns Formatted pagination footer string for CLI.
 */
export function formatPagination(pagination: ResponsePagination): string {
	const methodLogger = Logger.forContext(
		'utils/formatter.util.ts',
		'formatPagination',
	);
	const parts: string[] = [formatSeparator()]; // Start with separator

	const { count = 0, hasMore, nextCursor, total, page } = pagination;

	// Showing count and potentially total
	if (total !== undefined && total >= 0) {
		parts.push(`*Showing ${count} of ${total} total items.*`);
	} else if (count >= 0) {
		parts.push(`*Showing ${count} item${count !== 1 ? 's' : ''}.*`);
	}

	// More results availability
	if (hasMore) {
		parts.push('More results are available.');
	}

	// Include the actual cursor value for programmatic use
	if (hasMore && nextCursor) {
		parts.push(`*Next cursor: \`${nextCursor}\`*`);
		// Assuming nextCursor holds the next page number for Bitbucket
		parts.push(`*Use --page ${nextCursor} to view more.*`);
	} else if (hasMore && page !== undefined) {
		// Fallback if nextCursor wasn't parsed but page exists
		const nextPage = page + 1;
		parts.push(`*Next cursor: \`${nextPage}\`*`);
		parts.push(`*Use --page ${nextPage} to view more.*`);
	}

	// Add standard timestamp
	parts.push(`*Information retrieved at: ${formatDate(new Date())}*`);

	const result = parts.join('\n').trim(); // Join with newline
	methodLogger.debug(`Formatted pagination footer: ${result}`);
	return result;
}

/**
 * Format a heading with consistent style
 * @param text - Heading text
 * @param level - Heading level (1-6)
 * @returns Formatted heading
 */
export function formatHeading(text: string, level: number = 1): string {
	const validLevel = Math.min(Math.max(level, 1), 6);
	const prefix = '#'.repeat(validLevel);
	return `${prefix} ${text}`;
}

/**
 * Format a list of key-value pairs as a bullet list
 * @param items - Object with key-value pairs
 * @param keyFormatter - Optional function to format keys
 * @returns Formatted bullet list
 */
export function formatBulletList(
	items: Record<string, unknown>,
	keyFormatter?: (key: string) => string,
): string {
	const lines: string[] = [];

	for (const [key, value] of Object.entries(items)) {
		if (value === undefined || value === null) {
			continue;
		}

		const formattedKey = keyFormatter ? keyFormatter(key) : key;
		const formattedValue = formatValue(value);
		lines.push(`- **${formattedKey}**: ${formattedValue}`);
	}

	return lines.join('\n');
}

/**
 * Format a value based on its type
 * @param value - Value to format
 * @returns Formatted value
 */
function formatValue(value: unknown): string {
	if (value === undefined || value === null) {
		return 'Not available';
	}

	if (value instanceof Date) {
		return formatDate(value);
	}

	// Handle URL objects with url and title properties
	if (typeof value === 'object' && value !== null && 'url' in value) {
		const urlObj = value as { url: string; title?: string };
		if (typeof urlObj.url === 'string') {
			return formatUrl(urlObj.url, urlObj.title);
		}
	}

	if (typeof value === 'string') {
		// Check if it's a URL
		if (value.startsWith('http://') || value.startsWith('https://')) {
			return formatUrl(value);
		}

		// Check if it might be a date
		if (/^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}/.test(value)) {
			return formatDate(value);
		}

		return value;
	}

	if (typeof value === 'boolean') {
		return value ? 'Yes' : 'No';
	}

	return String(value);
}

/**
 * Format a separator line
 * @returns Separator line
 */
export function formatSeparator(): string {
	return '---';
}

/**
 * Format a numbered list of items
 * @param items - Array of items to format
 * @param formatter - Function to format each item
 * @returns Formatted numbered list
 */
export function formatNumberedList<T>(
	items: T[],
	formatter: (item: T, index: number) => string,
): string {
	if (items.length === 0) {
		return 'No items.';
	}

	return items.map((item, index) => formatter(item, index)).join('\n\n');
}

/**
 * Format a raw diff output for display
 *
 * Parses and formats a raw unified diff string into a Markdown
 * formatted display with proper code block syntax highlighting.
 *
 * @param {string} rawDiff - The raw diff content from the API
 * @param {number} maxFiles - Maximum number of files to display in detail (optional, default: 5)
 * @param {number} maxLinesPerFile - Maximum number of lines to display per file (optional, default: 100)
 * @returns {string} Markdown formatted diff content
 */
export function formatDiff(
	rawDiff: string,
	maxFiles: number = 5,
	maxLinesPerFile: number = 100,
): string {
	if (!rawDiff || rawDiff.trim() === '') {
		return '*No changes found in this pull request.*';
	}

	const lines = rawDiff.split('\n');
	const formattedLines: string[] = [];
	let currentFile = '';
	let fileCount = 0;
	let inFile = false;
	let truncated = false;
	let lineCount = 0;

	for (const line of lines) {
		// New file is marked by a line starting with "diff --git"
		if (line.startsWith('diff --git')) {
			if (inFile) {
				// Close previous file code block
				formattedLines.push('```');
				formattedLines.push('');
			}

			// Only process up to maxFiles
			fileCount++;
			if (fileCount > maxFiles) {
				truncated = true;
				break;
			}

			// Extract filename
			const filePath = line.match(/diff --git a\/(.*) b\/(.*)/);
			currentFile = filePath ? filePath[1] : 'unknown file';
			formattedLines.push(`### ${currentFile}`);
			formattedLines.push('');
			formattedLines.push('```diff');
			inFile = true;
			lineCount = 0;
		} else if (inFile) {
			lineCount++;

			// Truncate files that are too long
			if (lineCount > maxLinesPerFile) {
				formattedLines.push(
					'// ... more lines omitted for brevity ...',
				);
				formattedLines.push('```');
				formattedLines.push('');
				inFile = false;
				continue;
			}

			// Format diff lines with appropriate highlighting
			if (line.startsWith('+')) {
				formattedLines.push(line);
			} else if (line.startsWith('-')) {
				formattedLines.push(line);
			} else if (line.startsWith('@@')) {
				// Change section header
				formattedLines.push(line);
			} else {
				// Context line
				formattedLines.push(line);
			}
		}
	}

	// Close the last code block if necessary
	if (inFile) {
		formattedLines.push('```');
	}

	// Add truncation notice if we limited the output
	if (truncated) {
		formattedLines.push('');
		formattedLines.push(
			`*Output truncated. Only showing the first ${maxFiles} files.*`,
		);
	}

	return formattedLines.join('\n');
}

/**
 * Optimizes markdown content to address Bitbucket Cloud's rendering quirks
 *
 * IMPORTANT: This function does NOT convert between formats (unlike Jira's ADF conversion).
 * Bitbucket Cloud API natively accepts and returns markdown format. This function specifically
 * addresses documented rendering issues in Bitbucket's markdown renderer by applying targeted
 * formatting adjustments for better display in the Bitbucket UI.
 *
 * Known Bitbucket rendering issues this function fixes:
 * - List spacing and indentation (prevents items from concatenating on a single line)
 * - Code block formatting (addresses BCLOUD-20503 and similar bugs)
 * - Nested list indentation (ensures proper hierarchy display)
 * - Inline code formatting (adds proper spacing around backticks)
 * - Diff syntax preservation (maintains +/- at line starts)
 * - Excessive line break normalization
 * - Heading spacing consistency
 *
 * Use this function for both:
 * - Content received FROM the Bitbucket API (to properly display in CLI/tools)
 * - Content being sent TO the Bitbucket API (to ensure proper rendering in Bitbucket UI)
 *
 * @param {string} markdown - The original markdown content
 * @returns {string} Optimized markdown with workarounds for Bitbucket rendering issues
 */
export function optimizeBitbucketMarkdown(markdown: string): string {
	const methodLogger = Logger.forContext(
		'utils/formatter.util.ts',
		'optimizeBitbucketMarkdown',
	);

	if (!markdown || markdown.trim() === '') {
		return markdown;
	}

	methodLogger.debug('Optimizing markdown for Bitbucket rendering');

	// First, let's extract code blocks to protect them from other transformations
	const codeBlocks: string[] = [];
	let optimized = markdown.replace(
		/```(\w*)\n([\s\S]*?)```/g,
		(_match, language, code) => {
			// Store the code block and replace with a placeholder
			const placeholder = `__CODE_BLOCK_${codeBlocks.length}__`;
			codeBlocks.push(`\n\n\`\`\`${language}\n${code}\n\`\`\`\n\n`);
			return placeholder;
		},
	);

	// Fix numbered lists with proper spacing
	// Match numbered lists (1. Item) and ensure proper spacing between items
	optimized = optimized.replace(
		/^(\d+\.)\s+(.*?)$/gm,
		(_match, number, content) => {
			// Keep the list item and ensure it ends with double line breaks if it doesn't already
			return `${number} ${content.trim()}\n\n`;
		},
	);

	// Fix bullet lists with proper spacing
	optimized = optimized.replace(
		/^(\s*)[-*]\s+(.*?)$/gm,
		(_match, indent, content) => {
			// Ensure proper indentation and spacing for bullet lists
			return `${indent}- ${content.trim()}\n\n`;
		},
	);

	// Ensure nested lists have proper indentation
	// Matches lines that are part of nested lists and ensures proper indentation
	// REMOVED: This step added excessive leading spaces causing Bitbucket to treat lists as code blocks
	// optimized = optimized.replace(
	// 	/^(\s+)[-*]\s+(.*?)$/gm,
	// 	(_match, indent, content) => {
	// 		// For nested items, ensure proper indentation (4 spaces per level)
	// 		const indentLevel = Math.ceil(indent.length / 2);
	// 		const properIndent = '    '.repeat(indentLevel);
	// 		return `${properIndent}- ${content.trim()}\n\n`;
	// 	},
	// );

	// Fix inline code formatting - ensure it has spaces around it for rendering
	optimized = optimized.replace(/`([^`]+)`/g, (_match, code) => {
		// Ensure inline code is properly formatted with spaces before and after
		// but avoid adding spaces within diff lines (+ or - prefixed)
		const trimmedCode = code.trim();
		const firstChar = trimmedCode.charAt(0);

		// Don't add spaces if it's part of a diff line
		if (firstChar === '+' || firstChar === '-') {
			return `\`${trimmedCode}\``;
		}

		return ` \`${trimmedCode}\` `;
	});

	// Ensure diff lines are properly preserved
	// This helps with preserving + and - prefixes in diff code blocks
	optimized = optimized.replace(
		/^([+-])(.*?)$/gm,
		(_match, prefix, content) => {
			return `${prefix}${content}`;
		},
	);

	// Remove excessive line breaks (more than 2 consecutive)
	optimized = optimized.replace(/\n{3,}/g, '\n\n');

	// Restore code blocks
	codeBlocks.forEach((codeBlock, index) => {
		optimized = optimized.replace(`__CODE_BLOCK_${index}__`, codeBlock);
	});

	// Fix double formatting issues (heading + bold) which Bitbucket renders incorrectly
	// Remove bold formatting from headings as headings are already emphasized
	optimized = optimized.replace(
		/^(#{1,6})\s+\*\*(.*?)\*\*\s*$/gm,
		(_match, hashes, content) => {
			return `\n${hashes} ${content.trim()}\n\n`;
		},
	);

	// Fix bold text within headings (alternative pattern)
	optimized = optimized.replace(
		/^(#{1,6})\s+(.*?)\*\*(.*?)\*\*(.*?)$/gm,
		(_match, hashes, before, boldText, after) => {
			// Combine text without bold formatting since heading already provides emphasis
			const cleanContent = (before + boldText + after).trim();
			return `\n${hashes} ${cleanContent}\n\n`;
		},
	);

	// Ensure headings have proper spacing (for headings without bold issues)
	optimized = optimized.replace(
		/^(#{1,6})\s+(.*?)$/gm,
		(_match, hashes, content) => {
			// Skip if already processed by bold removal above
			if (content.includes('**')) {
				return _match; // Leave as-is, will be handled by bold removal patterns
			}
			return `\n${hashes} ${content.trim()}\n\n`;
		},
	);

	// Ensure the content ends with a single line break
	optimized = optimized.trim() + '\n';

	methodLogger.debug('Markdown optimization complete');
	return optimized;
}

/**
 * Maximum character limit for AI responses (~10k tokens)
 * 1 token ≈ 4 characters, so 10k tokens ≈ 40,000 characters
 */
const MAX_RESPONSE_CHARS = 40000;

/**
 * Truncate content for AI consumption and add guidance if truncated
 *
 * When responses exceed the token limit, this function truncates the content
 * and appends guidance for the AI to either access the full response from
 * the raw log file or refine the request with better filtering.
 *
 * @param content - The formatted response content
 * @param rawResponsePath - Optional path to the raw response file in /tmp/mcp/
 * @returns Truncated content with guidance if needed, or original content if within limits
 */
export function truncateForAI(
	content: string,
	rawResponsePath?: string | null,
): string {
	if (content.length <= MAX_RESPONSE_CHARS) {
		return content;
	}

	// Truncate at a reasonable boundary (try to find a newline near the limit)
	let truncateAt = MAX_RESPONSE_CHARS;
	const searchStart = Math.max(0, MAX_RESPONSE_CHARS - 500);
	const lastNewline = content.lastIndexOf('\n', MAX_RESPONSE_CHARS);
	if (lastNewline > searchStart) {
		truncateAt = lastNewline;
	}

	const truncatedContent = content.substring(0, truncateAt);
	const originalSize = content.length;
	const truncatedSize = truncatedContent.length;
	const percentShown = Math.round((truncatedSize / originalSize) * 100);

	// Build guidance section
	const guidance: string[] = [
		'',
		formatSeparator(),
		formatHeading('Response Truncated', 2),
		'',
		`This response was truncated to ~${Math.round(truncatedSize / 4000)}k tokens (${percentShown}% of original ${Math.round(originalSize / 1000)}k chars).`,
		'',
		'**To access the complete data:**',
	];

	if (rawResponsePath) {
		guidance.push(
			`- The full raw API response is saved at: \`${rawResponsePath}\``,
		);
	}

	guidance.push(
		'- Consider refining your request with more specific filters or selecting fewer fields',
		'- For paginated data, use smaller page sizes or specific identifiers',
		'- When searching, use more targeted queries to reduce result sets',
	);

	return truncatedContent + guidance.join('\n');
}

```

--------------------------------------------------------------------------------
/src/utils/error-handler.util.ts:
--------------------------------------------------------------------------------

```typescript
import { createApiError } from './error.util.js';
import { Logger } from './logger.util.js';
import { getDeepOriginalError } from './error.util.js';
import { McpError } from './error.util.js';

/**
 * Standard error codes for consistent handling
 */
export enum ErrorCode {
	NOT_FOUND = 'NOT_FOUND',
	INVALID_CURSOR = 'INVALID_CURSOR',
	ACCESS_DENIED = 'ACCESS_DENIED',
	VALIDATION_ERROR = 'VALIDATION_ERROR',
	UNEXPECTED_ERROR = 'UNEXPECTED_ERROR',
	NETWORK_ERROR = 'NETWORK_ERROR',
	RATE_LIMIT_ERROR = 'RATE_LIMIT_ERROR',
	PRIVATE_IP_ERROR = 'PRIVATE_IP_ERROR',
	RESERVED_RANGE_ERROR = 'RESERVED_RANGE_ERROR',
}

/**
 * Context information for error handling
 */
export interface ErrorContext {
	/**
	 * Source of the error (e.g., file path and function)
	 */
	source?: string;

	/**
	 * Type of entity being processed (e.g., 'Repository', 'PullRequest')
	 */
	entityType?: string;

	/**
	 * Identifier of the entity being processed
	 */
	entityId?: string | Record<string, string>;

	/**
	 * Operation being performed (e.g., 'listing', 'creating')
	 */
	operation?: string;

	/**
	 * Additional information for debugging
	 */
	additionalInfo?: Record<string, unknown>;
}

/**
 * Helper function to create a consistent error context object
 * @param entityType Type of entity being processed
 * @param operation Operation being performed
 * @param source Source of the error (typically file path and function)
 * @param entityId Optional identifier of the entity
 * @param additionalInfo Optional additional information for debugging
 * @returns A formatted ErrorContext object
 */
export function buildErrorContext(
	entityType: string,
	operation: string,
	source: string,
	entityId?: string | Record<string, string>,
	additionalInfo?: Record<string, unknown>,
): ErrorContext {
	return {
		entityType,
		operation,
		source,
		...(entityId && { entityId }),
		...(additionalInfo && { additionalInfo }),
	};
}

/**
 * Detect specific error types from raw errors
 * @param error The error to analyze
 * @param context Context information for better error detection
 * @returns Object containing the error code and status code
 */
export function detectErrorType(
	error: unknown,
	context: ErrorContext = {},
): { code: ErrorCode; statusCode: number } {
	const methodLogger = Logger.forContext(
		'utils/error-handler.util.ts',
		'detectErrorType',
	);
	methodLogger.debug(`Detecting error type`, { error, context });

	const errorMessage = error instanceof Error ? error.message : String(error);
	const statusCode =
		error instanceof Error && 'statusCode' in error
			? (error as { statusCode: number }).statusCode
			: undefined;

	// PR ID validation error detection
	if (
		errorMessage.includes('Invalid pull request ID') ||
		errorMessage.includes('Pull request ID must be a positive integer')
	) {
		return { code: ErrorCode.VALIDATION_ERROR, statusCode: 400 };
	}

	// Network error detection
	if (
		errorMessage.includes('network error') ||
		errorMessage.includes('fetch failed') ||
		errorMessage.includes('ECONNREFUSED') ||
		errorMessage.includes('ENOTFOUND') ||
		errorMessage.includes('Failed to fetch') ||
		errorMessage.includes('Network request failed')
	) {
		return { code: ErrorCode.NETWORK_ERROR, statusCode: 500 };
	}

	// Network error detection in originalError
	if (
		error instanceof Error &&
		'originalError' in error &&
		error.originalError
	) {
		// Check for TypeError in originalError (common for network issues)
		if (error.originalError instanceof TypeError) {
			return { code: ErrorCode.NETWORK_ERROR, statusCode: 500 };
		}

		// Check for network error messages in originalError
		if (
			error.originalError instanceof Error &&
			(error.originalError.message.includes('fetch') ||
				error.originalError.message.includes('network') ||
				error.originalError.message.includes('ECON'))
		) {
			return { code: ErrorCode.NETWORK_ERROR, statusCode: 500 };
		}
	}

	// Rate limiting detection
	if (
		errorMessage.includes('rate limit') ||
		errorMessage.includes('too many requests') ||
		statusCode === 429
	) {
		return { code: ErrorCode.RATE_LIMIT_ERROR, statusCode: 429 };
	}

	// Bitbucket-specific error detection
	if (
		error instanceof Error &&
		'originalError' in error &&
		error.originalError
	) {
		const originalError = getDeepOriginalError(error.originalError);

		if (originalError && typeof originalError === 'object') {
			const oe = originalError as Record<string, unknown>;

			// Check for Bitbucket API error structure
			if (oe.error && typeof oe.error === 'object') {
				const bbError = oe.error as Record<string, unknown>;
				const errorMsg = String(bbError.message || '').toLowerCase();
				const errorDetail = bbError.detail
					? String(bbError.detail).toLowerCase()
					: '';

				methodLogger.debug('Found Bitbucket error structure', {
					message: errorMsg,
					detail: errorDetail,
				});

				// Repository not found / Does not exist errors
				if (
					errorMsg.includes('repository not found') ||
					errorMsg.includes('does not exist') ||
					errorMsg.includes('no such resource') ||
					errorMsg.includes('not found')
				) {
					return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
				}

				// Access and permission errors
				if (
					errorMsg.includes('access') ||
					errorMsg.includes('permission') ||
					errorMsg.includes('credentials') ||
					errorMsg.includes('unauthorized') ||
					errorMsg.includes('forbidden') ||
					errorMsg.includes('authentication')
				) {
					return { code: ErrorCode.ACCESS_DENIED, statusCode: 403 };
				}

				// Validation errors
				if (
					errorMsg.includes('invalid') ||
					(errorMsg.includes('parameter') &&
						errorMsg.includes('error')) ||
					errorMsg.includes('input') ||
					errorMsg.includes('validation') ||
					errorMsg.includes('required field') ||
					errorMsg.includes('bad request')
				) {
					return {
						code: ErrorCode.VALIDATION_ERROR,
						statusCode: 400,
					};
				}

				// Rate limiting errors
				if (
					errorMsg.includes('rate limit') ||
					errorMsg.includes('too many requests') ||
					errorMsg.includes('throttled')
				) {
					return {
						code: ErrorCode.RATE_LIMIT_ERROR,
						statusCode: 429,
					};
				}
			}

			// Check for alternate Bitbucket error structure: {"type": "error", ...}
			if (oe.type === 'error') {
				methodLogger.debug('Found Bitbucket type:error structure', oe);

				// Check for status code if available in the error object
				if (typeof oe.status === 'number') {
					if (oe.status === 404) {
						return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
					}
					if (oe.status === 403 || oe.status === 401) {
						return {
							code: ErrorCode.ACCESS_DENIED,
							statusCode: oe.status,
						};
					}
					if (oe.status === 400) {
						return {
							code: ErrorCode.VALIDATION_ERROR,
							statusCode: 400,
						};
					}
					if (oe.status === 429) {
						return {
							code: ErrorCode.RATE_LIMIT_ERROR,
							statusCode: 429,
						};
					}
				}
			}

			// Check for Bitbucket error structure: {"errors": [{...}]}
			if (Array.isArray(oe.errors) && oe.errors.length > 0) {
				const firstError = oe.errors[0] as Record<string, unknown>;
				methodLogger.debug(
					'Found Bitbucket errors array structure',
					firstError,
				);

				if (typeof firstError.status === 'number') {
					if (firstError.status === 404) {
						return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
					}
					if (
						firstError.status === 403 ||
						firstError.status === 401
					) {
						return {
							code: ErrorCode.ACCESS_DENIED,
							statusCode: firstError.status,
						};
					}
					if (firstError.status === 400) {
						return {
							code: ErrorCode.VALIDATION_ERROR,
							statusCode: 400,
						};
					}
					if (firstError.status === 429) {
						return {
							code: ErrorCode.RATE_LIMIT_ERROR,
							statusCode: 429,
						};
					}
				}

				// Look for error messages in the title or message fields
				if (firstError.title || firstError.message) {
					const errorText = String(
						firstError.title || firstError.message,
					).toLowerCase();
					if (errorText.includes('not found')) {
						return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
					}
					if (
						errorText.includes('access') ||
						errorText.includes('permission')
					) {
						return {
							code: ErrorCode.ACCESS_DENIED,
							statusCode: 403,
						};
					}
					if (
						errorText.includes('invalid') ||
						errorText.includes('required')
					) {
						return {
							code: ErrorCode.VALIDATION_ERROR,
							statusCode: 400,
						};
					}
					if (
						errorText.includes('rate limit') ||
						errorText.includes('too many requests')
					) {
						return {
							code: ErrorCode.RATE_LIMIT_ERROR,
							statusCode: 429,
						};
					}
				}
			}
		}
	}

	// Not Found detection
	if (
		errorMessage.includes('not found') ||
		errorMessage.includes('does not exist') ||
		statusCode === 404
	) {
		return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
	}

	// Access Denied detection
	if (
		errorMessage.includes('access') ||
		errorMessage.includes('permission') ||
		errorMessage.includes('authorize') ||
		errorMessage.includes('authentication') ||
		statusCode === 401 ||
		statusCode === 403
	) {
		return { code: ErrorCode.ACCESS_DENIED, statusCode: statusCode || 403 };
	}

	// Invalid Cursor detection
	if (
		(errorMessage.includes('cursor') ||
			errorMessage.includes('startAt') ||
			errorMessage.includes('page')) &&
		(errorMessage.includes('invalid') || errorMessage.includes('not valid'))
	) {
		return { code: ErrorCode.INVALID_CURSOR, statusCode: 400 };
	}

	// Validation Error detection
	if (
		errorMessage.includes('validation') ||
		errorMessage.includes('invalid') ||
		errorMessage.includes('required') ||
		statusCode === 400 ||
		statusCode === 422
	) {
		return {
			code: ErrorCode.VALIDATION_ERROR,
			statusCode: statusCode || 400,
		};
	}

	// Default to unexpected error
	return {
		code: ErrorCode.UNEXPECTED_ERROR,
		statusCode: statusCode || 500,
	};
}

/**
 * Create user-friendly error messages based on error type and context
 * @param code The error code
 * @param context Context information for better error messages
 * @param originalMessage The original error message
 * @returns User-friendly error message
 */
export function createUserFriendlyErrorMessage(
	code: ErrorCode,
	context: ErrorContext = {},
	originalMessage?: string,
): string {
	const methodLogger = Logger.forContext(
		'utils/error-handler.util.ts',
		'createUserFriendlyErrorMessage',
	);
	const { entityType, entityId, operation } = context;

	// Format entity ID for display
	let entityIdStr = '';
	if (entityId) {
		if (typeof entityId === 'string') {
			entityIdStr = entityId;
		} else {
			// Handle object entityId (like ProjectIdentifier)
			entityIdStr = Object.values(entityId).join('/');
		}
	}

	// Determine entity display name
	const entity = entityType
		? `${entityType}${entityIdStr ? ` ${entityIdStr}` : ''}`
		: 'Resource';

	let message = '';

	switch (code) {
		case ErrorCode.NOT_FOUND:
			message = `${entity} not found${entityIdStr ? `: ${entityIdStr}` : ''}. Verify the ID is correct and that you have access to this ${entityType?.toLowerCase() || 'resource'}.`;

			// Bitbucket-specific guidance
			if (
				entityType === 'Repository' ||
				entityType === 'PullRequest' ||
				entityType === 'Branch'
			) {
				message += ` Make sure the workspace and ${entityType.toLowerCase()} names are spelled correctly and that you have permission to access it.`;
			}
			break;

		case ErrorCode.ACCESS_DENIED:
			message = `Access denied for ${entity.toLowerCase()}${entityIdStr ? ` ${entityIdStr}` : ''}. Verify your credentials and permissions.`;

			// Bitbucket-specific guidance
			message += ` Ensure your Bitbucket API token/app password has sufficient privileges and hasn't expired. If using a workspace/repository name, check that it's spelled correctly.`;
			break;

		case ErrorCode.INVALID_CURSOR:
			message = `Invalid pagination cursor. Use the exact cursor string returned from previous results.`;

			// Bitbucket-specific guidance
			message += ` Bitbucket pagination typically uses page numbers. Check that the page number is valid and within range.`;
			break;

		case ErrorCode.VALIDATION_ERROR:
			message =
				originalMessage ||
				`Invalid data provided for ${operation || 'operation'} ${entity.toLowerCase()}.`;

			// The originalMessage already includes error details for VALIDATION_ERROR
			break;

		case ErrorCode.NETWORK_ERROR:
			message = `Network error while ${operation || 'connecting to'} the Bitbucket API. Please check your internet connection and try again.`;
			break;

		case ErrorCode.RATE_LIMIT_ERROR:
			message = `Bitbucket API rate limit exceeded. Please wait a moment and try again, or reduce the frequency of requests.`;

			// Bitbucket-specific guidance
			message += ` Bitbucket's API has rate limits per IP address and additional limits for authenticated users.`;
			break;

		default:
			message = `An unexpected error occurred while ${operation || 'processing'} ${entity.toLowerCase()}.`;
	}

	// Include original message details if available and appropriate
	if (
		originalMessage &&
		code !== ErrorCode.NOT_FOUND &&
		code !== ErrorCode.ACCESS_DENIED
	) {
		message += ` Error details: ${originalMessage}`;
	}

	methodLogger.debug(`Created user-friendly message: ${message}`, {
		code,
		context,
	});
	return message;
}

/**
 * Handle controller errors consistently
 * @param error The error to handle
 * @param context Context information for better error messages
 * @returns Never returns, always throws an error
 */
export function handleControllerError(
	error: unknown,
	context: ErrorContext = {},
): never {
	const methodLogger = Logger.forContext(
		'utils/error-handler.util.ts',
		'handleControllerError',
	);

	// Extract error details
	const errorMessage = error instanceof Error ? error.message : String(error);
	const statusCode =
		error instanceof Error && 'statusCode' in error
			? (error as { statusCode: number }).statusCode
			: undefined;

	// Detect error type using utility
	const { code, statusCode: detectedStatus } = detectErrorType(
		error,
		context,
	);

	// Combine detected status with explicit status
	const finalStatusCode = statusCode || detectedStatus;

	// Format entity information for logging
	const { entityType, entityId, operation } = context;
	const entity = entityType || 'resource';
	const entityIdStr = entityId
		? typeof entityId === 'string'
			? entityId
			: JSON.stringify(entityId)
		: '';
	const actionStr = operation || 'processing';

	// Log detailed error information
	methodLogger.error(
		`Error ${actionStr} ${entity}${
			entityIdStr ? `: ${entityIdStr}` : ''
		}: ${errorMessage}`,
		error,
	);

	// Create user-friendly error message for the response
	const message =
		code === ErrorCode.VALIDATION_ERROR
			? errorMessage
			: createUserFriendlyErrorMessage(code, context, errorMessage);

	// Throw an appropriate API error with the user-friendly message
	throw createApiError(message, finalStatusCode, error);
}

/**
 * Handles errors from CLI commands
 * Logs the error and exits the process with appropriate exit code
 *
 * @param error The error to handle
 */
export function handleCliError(error: unknown): never {
	const logger = Logger.forContext(
		'utils/error-handler.util.ts',
		'handleCliError',
	);

	logger.error('CLI error:', error);

	// Process different error types
	if (error instanceof McpError) {
		// Format user-friendly error message for MCP errors
		console.error(`Error: ${error.message}`);

		// Use specific exit codes based on error type
		switch (error.errorType) {
			case 'AUTHENTICATION_REQUIRED':
				process.exit(2);
				break; // Not strictly needed after process.exit but added for clarity
			case 'NOT_FOUND':
				process.exit(3);
				break;
			case 'VALIDATION_ERROR':
				process.exit(4);
				break;
			case 'RATE_LIMIT_EXCEEDED':
				process.exit(5);
				break;
			case 'API_ERROR':
				process.exit(6);
				break;
			default:
				process.exit(1);
				break;
		}
	} else if (error instanceof Error) {
		// Standard Error objects
		console.error(`Error: ${error.message}`);
		process.exit(1);
	} else {
		// Unknown error types
		console.error(`Unknown error occurred: ${String(error)}`);
		process.exit(1);
	}
}

```

--------------------------------------------------------------------------------
/src/services/vendor.atlassian.repositories.service.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from 'zod';
import {
	createAuthMissingError,
	createApiError,
	McpError,
} from '../utils/error.util.js';
import { Logger } from '../utils/logger.util.js';
import {
	fetchAtlassian,
	getAtlassianCredentials,
} from '../utils/transport.util.js';
import {
	validatePageSize,
	validatePaginationLimits,
} from '../utils/pagination.util.js';
import {
	ListRepositoriesParamsSchema,
	GetRepositoryParamsSchema,
	ListCommitsParamsSchema,
	RepositoriesResponseSchema,
	RepositorySchema,
	PaginatedCommitsSchema,
	CreateBranchParamsSchema,
	BranchRefSchema,
	GetFileContentParamsSchema,
	type ListRepositoriesParams,
	type GetRepositoryParams,
	type ListCommitsParams,
	type Repository,
	type CreateBranchParams,
	type BranchRef,
	type GetFileContentParams,
	ListBranchesParamsSchema,
	BranchesResponseSchema,
	type ListBranchesParams,
	type BranchesResponse,
} from './vendor.atlassian.repositories.types.js';

/**
 * Base API path for Bitbucket REST API v2
 * @see https://developer.atlassian.com/cloud/bitbucket/rest/api-group-repositories/
 * @constant {string}
 */
const API_PATH = '/2.0';

/**
 * @namespace VendorAtlassianRepositoriesService
 * @description Service for interacting with Bitbucket Repositories API.
 * Provides methods for listing repositories and retrieving repository details.
 * All methods require valid Atlassian credentials configured in the environment.
 */

// Create a contextualized logger for this file
const serviceLogger = Logger.forContext(
	'services/vendor.atlassian.repositories.service.ts',
);

// Log service initialization
serviceLogger.debug('Bitbucket repositories service initialized');

/**
 * List repositories for a workspace
 * @param {string} workspace - Workspace name or UUID
 * @param {ListRepositoriesParams} [params={}] - Optional parameters
 * @param {string} [params.q] - Query string to filter repositories
 * @param {string} [params.sort] - Property to sort by (e.g., 'name', '-created_on')
 * @param {number} [params.page] - Page number for pagination
 * @param {number} [params.pagelen] - Number of items per page
 * @returns {Promise<RepositoriesResponse>} Response containing repositories
 * @example
 * ```typescript
 * // List repositories in a workspace, filtered and sorted
 * const response = await listRepositories('myworkspace', {
 *   q: 'name~"api"',
 *   sort: 'name',
 *   pagelen: 25
 * });
 * ```
 */
async function list(
	params: ListRepositoriesParams,
): Promise<z.infer<typeof RepositoriesResponseSchema>> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.repositories.service.ts',
		'list',
	);
	methodLogger.debug('Listing Bitbucket repositories with params:', params);

	// Validate params with Zod
	try {
		ListRepositoriesParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error(
				'Invalid parameters provided to list repositories:',
				error.format(),
			);
			throw createApiError(
				`Invalid parameters: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	// Construct query parameters
	const queryParams = new URLSearchParams();

	// Add optional query parameters
	if (params.q) {
		queryParams.set('q', params.q);
	}
	if (params.sort) {
		queryParams.set('sort', params.sort);
	}
	if (params.role) {
		queryParams.set('role', params.role);
	}

	// Validate and enforce page size limits (CWE-770)
	const validatedPagelen = validatePageSize(
		params.pagelen,
		'listRepositories',
	);
	queryParams.set('pagelen', validatedPagelen.toString());

	if (params.page) {
		queryParams.set('page', params.page.toString());
	}

	const queryString = queryParams.toString()
		? `?${queryParams.toString()}`
		: '';
	const path = `${API_PATH}/repositories/${params.workspace}${queryString}`;

	methodLogger.debug(`Sending request to: ${path}`);
	try {
		const response = await fetchAtlassian(credentials, path);
		// Validate response with Zod schema
		try {
			const validatedData = RepositoriesResponseSchema.parse(
				response.data,
			);

			// Validate pagination limits to prevent excessive data exposure (CWE-770)
			if (!validatePaginationLimits(validatedData, 'listRepositories')) {
				methodLogger.warn(
					'Response pagination exceeds configured limits',
				);
			}

			return validatedData;
		} catch (error) {
			if (error instanceof z.ZodError) {
				methodLogger.error(
					'Invalid response from Bitbucket API:',
					error.format(),
				);
				throw createApiError(
					'Received invalid response format from Bitbucket API',
					500,
					error,
				);
			}
			throw error;
		}
	} catch (error) {
		if (error instanceof McpError) {
			throw error;
		}
		throw createApiError(
			`Failed to list repositories: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

/**
 * Get detailed information about a specific Bitbucket repository
 *
 * Retrieves comprehensive details about a single repository.
 *
 * @async
 * @memberof VendorAtlassianRepositoriesService
 * @param {GetRepositoryParams} params - Parameters for the request
 * @param {string} params.workspace - The workspace slug
 * @param {string} params.repo_slug - The repository slug
 * @returns {Promise<Repository>} Promise containing the detailed repository information
 * @throws {Error} If Atlassian credentials are missing or API request fails
 * @example
 * // Get repository details
 * const repository = await get({
 *   workspace: 'my-workspace',
 *   repo_slug: 'my-repo'
 * });
 */
async function get(params: GetRepositoryParams): Promise<Repository> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.repositories.service.ts',
		'get',
	);
	methodLogger.debug(
		`Getting Bitbucket repository: ${params.workspace}/${params.repo_slug}`,
	);

	// Validate params with Zod
	try {
		GetRepositoryParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error(
				'Invalid parameters provided to get repository:',
				error.format(),
			);
			throw createApiError(
				`Invalid parameters: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	const path = `${API_PATH}/repositories/${params.workspace}/${params.repo_slug}`;

	methodLogger.debug(`Sending request to: ${path}`);
	try {
		const response = await fetchAtlassian(credentials, path);

		// Validate response with Zod schema
		try {
			const validatedData = RepositorySchema.parse(response.data);
			return validatedData;
		} catch (error) {
			if (error instanceof z.ZodError) {
				// Log the detailed formatting errors but provide a clear message to users
				methodLogger.error(
					'Bitbucket API response validation failed:',
					error.format(),
				);

				// Create API error with appropriate context for validation failures
				throw createApiError(
					`Invalid response format from Bitbucket API for repository ${params.workspace}/${params.repo_slug}`,
					500, // Internal server error since the API responded but with unexpected format
					error, // Include the Zod error as originalError for better debugging
				);
			}
			throw error; // Re-throw any other errors
		}
	} catch (error) {
		// If it's already an McpError (from fetchAtlassian or Zod validation), just rethrow it
		if (error instanceof McpError) {
			throw error;
		}

		// Otherwise, wrap in a standard API error with context
		throw createApiError(
			`Failed to get repository details for ${params.workspace}/${params.repo_slug}: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

/**
 * Lists commits for a specific repository and optional revision/path.
 *
 * @param params Parameters including workspace, repo slug, and optional filters.
 * @returns Promise resolving to paginated commit data.
 * @throws {Error} If workspace or repo_slug are missing, or if credentials are not found.
 */
async function listCommits(
	params: ListCommitsParams,
): Promise<z.infer<typeof PaginatedCommitsSchema>> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.repositories.service.ts',
		'listCommits',
	);
	methodLogger.debug(
		`Listing commits for ${params.workspace}/${params.repo_slug}`,
		params,
	);

	// Validate params with Zod
	try {
		ListCommitsParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error(
				'Invalid parameters provided to list commits:',
				error.format(),
			);
			throw createApiError(
				`Invalid parameters: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	const queryParams = new URLSearchParams();
	if (params.include) {
		queryParams.set('include', params.include);
	}
	if (params.exclude) {
		queryParams.set('exclude', params.exclude);
	}
	if (params.path) {
		queryParams.set('path', params.path);
	}
	if (params.pagelen) {
		queryParams.set('pagelen', params.pagelen.toString());
	}
	if (params.page) {
		queryParams.set('page', params.page.toString());
	}

	const queryString = queryParams.toString()
		? `?${queryParams.toString()}`
		: '';
	const path = `${API_PATH}/repositories/${params.workspace}/${params.repo_slug}/commits${queryString}`;

	methodLogger.debug(`Sending commit history request to: ${path}`);
	try {
		const response = await fetchAtlassian(credentials, path);
		// Validate response with Zod schema
		try {
			const validatedData = PaginatedCommitsSchema.parse(response.data);
			return validatedData;
		} catch (error) {
			if (error instanceof z.ZodError) {
				methodLogger.error(
					'Invalid response from Bitbucket API:',
					error.format(),
				);
				throw createApiError(
					'Received invalid response format from Bitbucket API',
					500,
					error,
				);
			}
			throw error;
		}
	} catch (error) {
		if (error instanceof McpError) {
			throw error;
		}
		throw createApiError(
			`Failed to list commits: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

/**
 * Creates a new branch in the specified repository.
 *
 * @param params Parameters including workspace, repo slug, new branch name, and source target.
 * @returns Promise resolving to details about the newly created branch reference.
 * @throws {Error} If required parameters are missing or API request fails.
 */
async function createBranch(params: CreateBranchParams): Promise<BranchRef> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.repositories.service.ts',
		'createBranch',
	);
	methodLogger.debug(
		`Creating branch '${params.name}' from target '${params.target.hash}' in ${params.workspace}/${params.repo_slug}`,
	);

	// Validate params with Zod
	try {
		CreateBranchParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error('Invalid parameters provided:', error.format());
			throw createApiError(
				`Invalid parameters: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	const path = `${API_PATH}/repositories/${params.workspace}/${params.repo_slug}/refs/branches`;

	const requestBody = {
		name: params.name,
		target: {
			hash: params.target.hash,
		},
	};

	methodLogger.debug(`Sending POST request to: ${path}`);
	try {
		const response = await fetchAtlassian<BranchRef>(credentials, path, {
			method: 'POST',
			body: requestBody,
		});

		// Validate response with Zod schema
		try {
			const validatedData = BranchRefSchema.parse(response.data);
			methodLogger.debug('Branch created successfully:', validatedData);
			return validatedData;
		} catch (error) {
			if (error instanceof z.ZodError) {
				methodLogger.error(
					'Invalid response from Bitbucket API:',
					error.format(),
				);
				throw createApiError(
					'Received invalid response format from Bitbucket API',
					500,
					error,
				);
			}
			throw error;
		}
	} catch (error) {
		if (error instanceof McpError) {
			throw error;
		}
		throw createApiError(
			`Failed to create branch: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

/**
 * Get the content of a file from a repository.
 *
 * This retrieves the raw content of a file at the specified path from a repository at a specific commit.
 *
 * @param {GetFileContentParams} params - Parameters for the request
 * @param {string} params.workspace - The workspace slug or UUID
 * @param {string} params.repo_slug - The repository slug or UUID
 * @param {string} params.commit - The commit, branch name, or tag to get the file from
 * @param {string} params.path - The file path within the repository
 * @returns {Promise<string>} Promise containing the file content as a string
 * @throws {Error} If parameters are invalid, credentials are missing, or API request fails
 * @example
 * // Get README.md content from the main branch
 * const fileContent = await getFileContent({
 *   workspace: 'my-workspace',
 *   repo_slug: 'my-repo',
 *   commit: 'main',
 *   path: 'README.md'
 * });
 */
async function getFileContent(params: GetFileContentParams): Promise<string> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.repositories.service.ts',
		'getFileContent',
	);
	methodLogger.debug(
		`Getting file content from ${params.workspace}/${params.repo_slug}/${params.commit}/${params.path}`,
	);

	// Validate params with Zod
	try {
		GetFileContentParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error(
				'Invalid parameters provided to get file content:',
				error.format(),
			);
			throw createApiError(
				`Invalid parameters: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	const path = `${API_PATH}/repositories/${params.workspace}/${params.repo_slug}/src/${params.commit}/${params.path}`;

	methodLogger.debug(`Sending request to: ${path}`);
	try {
		// Use fetchAtlassian to get the file content directly as string
		// The function already detects text/plain content type and returns it appropriately
		const response = await fetchAtlassian<string>(credentials, path);

		methodLogger.debug(
			`Successfully retrieved file content (${response.data.length} characters)`,
		);
		return response.data;
	} catch (error) {
		if (error instanceof McpError) {
			throw error;
		}

		// More specific error messages for common file issues
		if (error instanceof Error && error.message.includes('404')) {
			throw createApiError(
				`File not found: ${params.path} at ${params.commit}`,
				404,
				error,
			);
		}

		throw createApiError(
			`Failed to get file content: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

/**
 * Lists branches for a specific repository.
 *
 * @param params Parameters including workspace, repo slug, and optional filters.
 * @returns Promise resolving to paginated branches data.
 * @throws {Error} If workspace or repo_slug are missing, or if credentials are not found.
 */
async function listBranches(
	params: ListBranchesParams,
): Promise<BranchesResponse> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.repositories.service.ts',
		'listBranches',
	);
	methodLogger.debug(
		`Listing branches for ${params.workspace}/${params.repo_slug}`,
		params,
	);

	// Validate params with Zod
	try {
		ListBranchesParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error(
				'Invalid parameters provided to list branches:',
				error.format(),
			);
			throw createApiError(
				`Invalid parameters: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	const queryParams = new URLSearchParams();
	if (params.q) {
		queryParams.set('q', params.q);
	}
	if (params.sort) {
		queryParams.set('sort', params.sort);
	}
	if (params.pagelen) {
		queryParams.set('pagelen', params.pagelen.toString());
	}
	if (params.page) {
		queryParams.set('page', params.page.toString());
	}

	const queryString = queryParams.toString()
		? `?${queryParams.toString()}`
		: '';
	const path = `${API_PATH}/repositories/${params.workspace}/${params.repo_slug}/refs/branches${queryString}`;

	methodLogger.debug(`Sending branches request to: ${path}`);
	try {
		const response = await fetchAtlassian(credentials, path);
		// Validate response with Zod schema
		try {
			const validatedData = BranchesResponseSchema.parse(response.data);
			return validatedData;
		} catch (error) {
			if (error instanceof z.ZodError) {
				methodLogger.error(
					'Invalid response from Bitbucket API:',
					error.format(),
				);
				throw createApiError(
					'Received invalid response format from Bitbucket API',
					500,
					error,
				);
			}
			throw error;
		}
	} catch (error) {
		if (error instanceof McpError) {
			throw error;
		}
		throw createApiError(
			`Failed to list branches: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

export default {
	list,
	get,
	listCommits,
	createBranch,
	getFileContent,
	listBranches,
};

```
Page 2/3FirstPrevNextLast