#
tokens: 49021/50000 14/114 files (page 3/4)
lines: off (toggle) GitHub
raw markdown copy
This is page 3 of 4. Use http://codebase.md/aashari/mcp-server-atlassian-bitbucket?page={x} to view the full context.

# Directory Structure

```
├── .env.example
├── .github
│   ├── dependabot.yml
│   └── workflows
│       ├── ci-dependabot-auto-merge.yml
│       ├── ci-dependency-check.yml
│       └── ci-semantic-release.yml
├── .gitignore
├── .gitkeep
├── .npmignore
├── .npmrc
├── .prettierrc
├── .releaserc.json
├── .trigger-ci
├── CHANGELOG.md
├── eslint.config.mjs
├── jest.setup.js
├── package-lock.json
├── package.json
├── README.md
├── scripts
│   ├── ensure-executable.js
│   ├── package.json
│   └── update-version.js
├── src
│   ├── cli
│   │   ├── atlassian.diff.cli.ts
│   │   ├── atlassian.pullrequests.cli.test.ts
│   │   ├── atlassian.pullrequests.cli.ts
│   │   ├── atlassian.repositories.cli.test.ts
│   │   ├── atlassian.repositories.cli.ts
│   │   ├── atlassian.search.cli.test.ts
│   │   ├── atlassian.search.cli.ts
│   │   ├── atlassian.workspaces.cli.test.ts
│   │   ├── atlassian.workspaces.cli.ts
│   │   └── index.ts
│   ├── controllers
│   │   ├── atlassian.diff.controller.ts
│   │   ├── atlassian.diff.formatter.ts
│   │   ├── atlassian.pullrequests.approve.controller.ts
│   │   ├── atlassian.pullrequests.base.controller.ts
│   │   ├── atlassian.pullrequests.comments.controller.ts
│   │   ├── atlassian.pullrequests.controller.test.ts
│   │   ├── atlassian.pullrequests.controller.ts
│   │   ├── atlassian.pullrequests.create.controller.ts
│   │   ├── atlassian.pullrequests.formatter.ts
│   │   ├── atlassian.pullrequests.get.controller.ts
│   │   ├── atlassian.pullrequests.list.controller.ts
│   │   ├── atlassian.pullrequests.reject.controller.ts
│   │   ├── atlassian.pullrequests.update.controller.ts
│   │   ├── atlassian.repositories.branch.controller.ts
│   │   ├── atlassian.repositories.commit.controller.ts
│   │   ├── atlassian.repositories.content.controller.ts
│   │   ├── atlassian.repositories.controller.test.ts
│   │   ├── atlassian.repositories.details.controller.ts
│   │   ├── atlassian.repositories.formatter.ts
│   │   ├── atlassian.repositories.list.controller.ts
│   │   ├── atlassian.search.code.controller.ts
│   │   ├── atlassian.search.content.controller.ts
│   │   ├── atlassian.search.controller.test.ts
│   │   ├── atlassian.search.controller.ts
│   │   ├── atlassian.search.formatter.ts
│   │   ├── atlassian.search.pullrequests.controller.ts
│   │   ├── atlassian.search.repositories.controller.ts
│   │   ├── atlassian.workspaces.controller.test.ts
│   │   ├── atlassian.workspaces.controller.ts
│   │   └── atlassian.workspaces.formatter.ts
│   ├── index.ts
│   ├── services
│   │   ├── vendor.atlassian.pullrequests.service.ts
│   │   ├── vendor.atlassian.pullrequests.test.ts
│   │   ├── vendor.atlassian.pullrequests.types.ts
│   │   ├── vendor.atlassian.repositories.diff.service.ts
│   │   ├── vendor.atlassian.repositories.diff.types.ts
│   │   ├── vendor.atlassian.repositories.service.test.ts
│   │   ├── vendor.atlassian.repositories.service.ts
│   │   ├── vendor.atlassian.repositories.types.ts
│   │   ├── vendor.atlassian.search.service.ts
│   │   ├── vendor.atlassian.search.types.ts
│   │   ├── vendor.atlassian.workspaces.service.ts
│   │   ├── vendor.atlassian.workspaces.test.ts
│   │   └── vendor.atlassian.workspaces.types.ts
│   ├── tools
│   │   ├── atlassian.diff.tool.ts
│   │   ├── atlassian.diff.types.ts
│   │   ├── atlassian.pullrequests.tool.ts
│   │   ├── atlassian.pullrequests.types.test.ts
│   │   ├── atlassian.pullrequests.types.ts
│   │   ├── atlassian.repositories.tool.ts
│   │   ├── atlassian.repositories.types.ts
│   │   ├── atlassian.search.tool.ts
│   │   ├── atlassian.search.types.ts
│   │   ├── atlassian.workspaces.tool.ts
│   │   └── atlassian.workspaces.types.ts
│   ├── types
│   │   └── common.types.ts
│   └── utils
│       ├── adf.util.test.ts
│       ├── adf.util.ts
│       ├── atlassian.util.ts
│       ├── bitbucket-error-detection.test.ts
│       ├── cli.test.util.ts
│       ├── config.util.test.ts
│       ├── config.util.ts
│       ├── constants.util.ts
│       ├── defaults.util.ts
│       ├── diff.util.ts
│       ├── error-handler.util.test.ts
│       ├── error-handler.util.ts
│       ├── error.util.test.ts
│       ├── error.util.ts
│       ├── formatter.util.ts
│       ├── logger.util.ts
│       ├── markdown.util.test.ts
│       ├── markdown.util.ts
│       ├── pagination.util.ts
│       ├── path.util.test.ts
│       ├── path.util.ts
│       ├── query.util.ts
│       ├── shell.util.ts
│       ├── transport.util.test.ts
│       ├── transport.util.ts
│       └── workspace.util.ts
├── STYLE_GUIDE.md
└── tsconfig.json
```

# Files

--------------------------------------------------------------------------------
/src/tools/atlassian.pullrequests.types.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from 'zod';

/**
 * Base pagination arguments for all tools
 */
const PaginationArgs = {
	limit: z
		.number()
		.int()
		.positive()
		.max(100)
		.optional()
		.describe(
			'Maximum number of items to return (1-100). Controls the response size. Defaults to 25 if omitted.',
		),

	cursor: z
		.string()
		.optional()
		.describe(
			'Pagination cursor for retrieving the next set of results. Obtained from previous response when more results are available.',
		),
};

/**
 * Schema for list-pull-requests tool arguments
 */
export const ListPullRequestsToolArgs = z.object({
	/**
	 * Workspace slug containing the repository
	 */
	workspaceSlug: z
		.string()
		.optional()
		.describe(
			'Workspace slug containing the repository. If not provided, the system will use your default workspace (either configured via BITBUCKET_DEFAULT_WORKSPACE or the first workspace in your account). Example: "myteam"',
		),

	/**
	 * Repository slug containing the pull requests
	 */
	repoSlug: z
		.string()
		.min(1, 'Repository slug is required')
		.describe(
			'Repository slug containing the pull requests. This must be a valid repository in the specified workspace. Example: "project-api"',
		),

	/**
	 * Filter by pull request state
	 */
	state: z
		.enum(['OPEN', 'MERGED', 'DECLINED', 'SUPERSEDED'])
		.optional()
		.describe(
			'Filter pull requests by state. Options: "OPEN" (active PRs), "MERGED" (completed PRs), "DECLINED" (rejected PRs), or "SUPERSEDED" (replaced PRs). If omitted, defaults to showing all states.',
		),

	/**
	 * Filter query for pull requests
	 */
	query: z
		.string()
		.optional()
		.describe(
			'Filter pull requests by title, description, or author (text search). Uses Bitbucket query syntax.',
		),

	/**
	 * Maximum number of pull requests to return (default: 50)
	 */
	...PaginationArgs,
});

export type ListPullRequestsToolArgsType = z.infer<
	typeof ListPullRequestsToolArgs
>;

/**
 * Schema for get-pull-request tool arguments
 */
export const GetPullRequestToolArgs = z.object({
	/**
	 * Workspace slug containing the repository
	 */
	workspaceSlug: z
		.string()
		.optional()
		.describe(
			'Workspace slug containing the repository. If not provided, the system will use your default workspace. Example: "myteam"',
		),

	/**
	 * Repository slug containing the pull request
	 */
	repoSlug: z
		.string()
		.min(1, 'Repository slug is required')
		.describe(
			'Repository slug containing the pull request. This must be a valid repository in the specified workspace. Example: "project-api"',
		),

	/**
	 * Pull request identifier
	 */
	prId: z
		.string()
		.min(1, 'Pull request ID is required')
		.describe(
			'Numeric ID of the pull request to retrieve as a string. Must be a valid pull request ID in the specified repository. Example: "42"',
		),

	/**
	 * Optional flag to request the full diff
	 */
	includeFullDiff: z
		.boolean()
		.optional()
		.describe(
			'Set to true to retrieve the full diff content instead of just the summary. Default: true (rich output by default)',
		)
		.default(true),

	/**
	 * Optional flag to include comments
	 */
	includeComments: z
		.boolean()
		.optional()
		.describe(
			'Set to true to retrieve comments for the pull request. Default: false. Note: Enabling this may increase response time for pull requests with many comments due to additional API calls.',
		)
		.default(false),
});

export type GetPullRequestToolArgsType = z.infer<typeof GetPullRequestToolArgs>;

/**
 * Schema for list-pr-comments tool arguments
 */
export const ListPullRequestCommentsToolArgs = z.object({
	/**
	 * Workspace slug containing the repository
	 */
	workspaceSlug: z
		.string()
		.optional()
		.describe(
			'Workspace slug containing the repository. If not provided, the system will use your default workspace. Example: "myteam"',
		),

	/**
	 * Repository slug containing the pull request
	 */
	repoSlug: z
		.string()
		.min(1, 'Repository slug is required')
		.describe(
			'Repository slug containing the pull request. This must be a valid repository in the specified workspace. Example: "project-api"',
		),

	/**
	 * Pull request identifier
	 */
	prId: z
		.string()
		.min(1, 'Pull request ID is required')
		.describe(
			'Numeric ID of the pull request to retrieve comments from as a string. Must be a valid pull request ID in the specified repository. Example: "42"',
		),

	/**
	 * Pagination parameters
	 */
	...PaginationArgs,
});

export type ListPullRequestCommentsToolArgsType = z.infer<
	typeof ListPullRequestCommentsToolArgs
>;

/**
 * Schema for create-pr-comment tool arguments
 */
export const CreatePullRequestCommentToolArgs = z.object({
	/**
	 * Workspace slug containing the repository
	 */
	workspaceSlug: z
		.string()
		.optional()
		.describe(
			'Workspace slug containing the repository. If not provided, the system will use your default workspace. Example: "myteam"',
		),

	/**
	 * Repository slug containing the pull request
	 */
	repoSlug: z
		.string()
		.min(1, 'Repository slug is required')
		.describe(
			'Repository slug containing the pull request. This must be a valid repository in the specified workspace. Example: "project-api"',
		),

	/**
	 * Pull request identifier
	 */
	prId: z
		.string()
		.min(1, 'Pull request ID is required')
		.describe(
			'Numeric ID of the pull request to add a comment to as a string. Must be a valid pull request ID in the specified repository. Example: "42"',
		),

	/**
	 * Comment content
	 */
	content: z
		.string()
		.min(1, 'Comment content is required')
		.describe(
			'The content of the comment to add to the pull request in Markdown format. Bitbucket Cloud natively accepts Markdown - supports headings, lists, code blocks, links, and other standard Markdown syntax.',
		),

	/**
	 * Optional inline location for the comment
	 */
	inline: z
		.object({
			path: z
				.string()
				.min(1, 'File path is required for inline comments')
				.describe('The file path to add the comment to.'),
			line: z
				.number()
				.int()
				.positive()
				.describe('The line number to add the comment to.'),
		})
		.optional()
		.describe(
			'Optional inline location for the comment. If provided, this will create a comment on a specific line in a file.',
		),

	parentId: z
		.string()
		.optional()
		.describe(
			'The ID of the parent comment to reply to. If not provided, the comment will be a top-level comment.',
		),
});

/**
 * Type for create pull request comment tool arguments (inferred from schema)
 */
export type CreatePullRequestCommentToolArgsType = z.infer<
	typeof CreatePullRequestCommentToolArgs
>;

/**
 * Arguments schema for the pull_requests_create tool
 */
export const CreatePullRequestToolArgs = z.object({
	/**
	 * Workspace slug containing the repository
	 */
	workspaceSlug: z
		.string()
		.optional()
		.describe(
			'Workspace slug containing the repository. If not provided, the system will use your default workspace. Example: "myteam"',
		),

	/**
	 * Repository slug to create the pull request in
	 */
	repoSlug: z
		.string()
		.min(1, 'Repository slug is required')
		.describe(
			'Repository slug to create the pull request in. This must be a valid repository in the specified workspace. Example: "project-api"',
		),

	/**
	 * Title of the pull request
	 */
	title: z
		.string()
		.min(1, 'Pull request title is required')
		.describe('Title for the pull request. Example: "Add new feature"'),

	/**
	 * Source branch name
	 */
	sourceBranch: z
		.string()
		.min(1, 'Source branch name is required')
		.describe(
			'Source branch name (the branch containing your changes). Example: "feature/new-login"',
		),

	/**
	 * Destination branch name
	 */
	destinationBranch: z
		.string()
		.optional()
		.describe(
			'Destination branch name (the branch you want to merge into, defaults to main). Example: "develop"',
		),

	/**
	 * Description for the pull request
	 */
	description: z
		.string()
		.optional()
		.describe(
			'Optional description for the pull request in Markdown format. Supports standard Markdown syntax including headings, lists, code blocks, and links.',
		),

	/**
	 * Whether to close the source branch after merge
	 */
	closeSourceBranch: z
		.boolean()
		.optional()
		.describe(
			'Whether to close the source branch after the pull request is merged. Default: false',
		),
});

export type CreatePullRequestToolArgsType = z.infer<
	typeof CreatePullRequestToolArgs
>;

/**
 * Schema for update-pull-request tool arguments
 */
export const UpdatePullRequestToolArgs = z.object({
	/**
	 * Workspace slug containing the repository
	 */
	workspaceSlug: z
		.string()
		.optional()
		.describe(
			'Workspace slug containing the repository. If not provided, the system will use your default workspace (either configured via BITBUCKET_DEFAULT_WORKSPACE or the first workspace in your account). Example: "myteam"',
		),

	/**
	 * Repository slug containing the pull request
	 */
	repoSlug: z
		.string()
		.min(1, 'Repository slug is required')
		.describe(
			'Repository slug containing the pull request. This must be a valid repository in the specified workspace. Example: "project-api"',
		),

	/**
	 * Pull request ID
	 */
	pullRequestId: z
		.number()
		.int()
		.positive()
		.describe('Pull request ID to update. Example: 123'),

	/**
	 * Updated title for the pull request
	 */
	title: z
		.string()
		.optional()
		.describe(
			'Updated title for the pull request. Example: "Updated Feature Implementation"',
		),

	/**
	 * Updated description for the pull request
	 */
	description: z
		.string()
		.optional()
		.describe(
			'Updated description for the pull request in Markdown format. Supports standard Markdown syntax including headings, lists, code blocks, and links.',
		),
});

export type UpdatePullRequestToolArgsType = z.infer<
	typeof UpdatePullRequestToolArgs
>;

/**
 * Schema for approve-pull-request tool arguments
 */
export const ApprovePullRequestToolArgs = z.object({
	/**
	 * Workspace slug containing the repository
	 */
	workspaceSlug: z
		.string()
		.optional()
		.describe(
			'Workspace slug containing the repository. If not provided, the system will use your default workspace (either configured via BITBUCKET_DEFAULT_WORKSPACE or the first workspace in your account). Example: "myteam"',
		),

	/**
	 * Repository slug containing the pull request
	 */
	repoSlug: z
		.string()
		.min(1, 'Repository slug is required')
		.describe(
			'Repository slug containing the pull request. This must be a valid repository in the specified workspace. Example: "project-api"',
		),

	/**
	 * Pull request ID
	 */
	pullRequestId: z
		.number()
		.int()
		.positive()
		.describe('Pull request ID to approve. Example: 123'),
});

export type ApprovePullRequestToolArgsType = z.infer<
	typeof ApprovePullRequestToolArgs
>;

/**
 * Schema for reject-pull-request tool arguments
 */
export const RejectPullRequestToolArgs = z.object({
	/**
	 * Workspace slug containing the repository
	 */
	workspaceSlug: z
		.string()
		.optional()
		.describe(
			'Workspace slug containing the repository. If not provided, the system will use your default workspace (either configured via BITBUCKET_DEFAULT_WORKSPACE or the first workspace in your account). Example: "myteam"',
		),

	/**
	 * Repository slug containing the pull request
	 */
	repoSlug: z
		.string()
		.min(1, 'Repository slug is required')
		.describe(
			'Repository slug containing the pull request. This must be a valid repository in the specified workspace. Example: "project-api"',
		),

	/**
	 * Pull request ID
	 */
	pullRequestId: z
		.number()
		.int()
		.positive()
		.describe('Pull request ID to request changes on. Example: 123'),
});

export type RejectPullRequestToolArgsType = z.infer<
	typeof RejectPullRequestToolArgs
>;

```

--------------------------------------------------------------------------------
/src/utils/adf.util.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Utility functions for converting Atlassian Document Format (ADF) to Markdown
 *
 * NOTE: Unlike Jira, Bitbucket Cloud API natively accepts and returns Markdown format.
 * This utility only includes adfToMarkdown for potential edge cases where Bitbucket
 * might return ADF content (though this is rare).
 *
 * Functions like markdownToAdf and textToAdf (needed in the Jira project) are NOT needed
 * in the Bitbucket integration and have been removed to avoid confusion.
 */

import { Logger } from './logger.util.js';
// Placeholder for AdfDocument type if specific types are needed for Bitbucket
// For now, assuming a similar structure to Jira's AdfDocument
type AdfDocument = {
	version: number;
	type: 'doc';
	content: AdfNode[];
};

// Create a contextualized logger for this file
const adfLogger = Logger.forContext('utils/adf.util.ts');

// Log ADF utility initialization
adfLogger.debug('ADF utility initialized');

/**
 * Interface for ADF node
 */
interface AdfNode {
	type: string;
	text?: string;
	content?: AdfNode[];
	attrs?: Record<string, unknown>;
	marks?: Array<{ type: string; attrs?: Record<string, unknown> }>;
}

/**
 * Convert Atlassian Document Format (ADF) to Markdown
 *
 * @param adf - The ADF content to convert (can be string or object)
 * @returns The converted Markdown content
 */
export function adfToMarkdown(adf: unknown): string {
	const methodLogger = Logger.forContext(
		'utils/adf.util.ts',
		'adfToMarkdown',
	);

	try {
		// Handle empty or undefined input
		if (!adf) {
			return '';
		}

		// Parse ADF if it's a string
		let adfDoc: AdfDocument;
		if (typeof adf === 'string') {
			try {
				adfDoc = JSON.parse(adf);
			} catch {
				return adf; // Return as-is if not valid JSON
			}
		} else if (typeof adf === 'object') {
			adfDoc = adf as AdfDocument;
		} else {
			return String(adf);
		}

		// Check if it's a valid ADF document
		if (!adfDoc.content || !Array.isArray(adfDoc.content)) {
			return '';
		}

		// Process the document
		const markdown = processAdfContent(adfDoc.content);
		methodLogger.debug(
			`Converted ADF to Markdown, length: ${markdown.length}`,
		);
		return markdown;
	} catch (error) {
		methodLogger.error(
			'[src/utils/adf.util.ts@adfToMarkdown] Error converting ADF to Markdown:',
			error,
		);
		return '*Error converting description format*';
	}
}

/**
 * Process ADF content nodes
 */
function processAdfContent(content: AdfNode[]): string {
	if (!content || !Array.isArray(content)) {
		return '';
	}

	return content.map((node) => processAdfNode(node)).join('\n\n');
}

/**
 * Process mention node
 */
function processMention(node: AdfNode): string {
	if (!node.attrs) {
		return '';
	}

	const text = node.attrs.text || node.attrs.displayName || '';
	if (!text) {
		return '';
	}

	// Format as @username to preserve the mention format
	// Remove any existing @ symbol to avoid double @@ in the output
	const cleanText =
		typeof text === 'string' && text.startsWith('@')
			? text.substring(1)
			: text;
	return `@${cleanText}`;
}

/**
 * Process a single ADF node
 */
function processAdfNode(node: AdfNode): string {
	if (!node || !node.type) {
		return '';
	}

	switch (node.type) {
		case 'paragraph':
			return processParagraph(node);
		case 'heading':
			return processHeading(node);
		case 'bulletList':
			return processBulletList(node);
		case 'orderedList':
			return processOrderedList(node);
		case 'listItem':
			return processListItem(node);
		case 'codeBlock':
			return processCodeBlock(node);
		case 'blockquote':
			return processBlockquote(node);
		case 'rule':
			return '---';
		case 'mediaGroup':
			return processMediaGroup(node);
		case 'media':
			return processMedia(node);
		case 'table':
			return processTable(node);
		case 'text':
			return processText(node);
		case 'mention':
			return processMention(node);
		case 'inlineCard':
			return processInlineCard(node);
		case 'emoji':
			return processEmoji(node);
		case 'date':
			return processDate(node);
		case 'status':
			return processStatus(node);
		default:
			// For unknown node types, try to process content if available
			if (node.content) {
				return processAdfContent(node.content);
			}
			return '';
	}
}

/**
 * Process paragraph node
 */
function processParagraph(node: AdfNode): string {
	if (!node.content) {
		return '';
	}

	// Process each child node and join them with proper spacing
	return node.content
		.map((childNode, index) => {
			// Add a space between text nodes if needed
			const needsSpace =
				index > 0 &&
				childNode.type === 'text' &&
				node.content![index - 1].type === 'text' &&
				!childNode.text?.startsWith(' ') &&
				!node.content![index - 1].text?.endsWith(' ');

			return (needsSpace ? ' ' : '') + processAdfNode(childNode);
		})
		.join('');
}

/**
 * Process heading node
 */
function processHeading(node: AdfNode): string {
	if (!node.content || !node.attrs) {
		return '';
	}

	const level = typeof node.attrs.level === 'number' ? node.attrs.level : 1;
	const headingMarker = '#'.repeat(level);
	const content = node.content
		.map((childNode) => processAdfNode(childNode))
		.join('');

	return `${headingMarker} ${content}`;
}

/**
 * Process bullet list node
 */
function processBulletList(node: AdfNode): string {
	if (!node.content) {
		return '';
	}

	return node.content.map((item) => processAdfNode(item)).join('\n');
}

/**
 * Process ordered list node
 */
function processOrderedList(node: AdfNode): string {
	if (!node.content) {
		return '';
	}

	return node.content
		.map((item, index) => {
			const processedItem = processAdfNode(item);
			// Replace the first "- " with "1. ", "2. ", etc.
			return processedItem.replace(/^- /, `${index + 1}. `);
		})
		.join('\n');
}

/**
 * Process list item node
 */
function processListItem(node: AdfNode): string {
	if (!node.content) {
		return '';
	}

	const content = node.content
		.map((childNode) => {
			const processed = processAdfNode(childNode);
			// For nested lists, add indentation
			if (
				childNode.type === 'bulletList' ||
				childNode.type === 'orderedList'
			) {
				return processed
					.split('\n')
					.map((line) => `  ${line}`)
					.join('\n');
			}
			return processed;
		})
		.join('\n');

	return `- ${content}`;
}

/**
 * Process code block node
 */
function processCodeBlock(node: AdfNode): string {
	if (!node.content) {
		return '```\n```';
	}

	const language = node.attrs?.language || '';
	const code = node.content
		.map((childNode) => processAdfNode(childNode))
		.join('');

	return `\`\`\`${language}\n${code}\n\`\`\``;
}

/**
 * Process blockquote node
 */
function processBlockquote(node: AdfNode): string {
	if (!node.content) {
		return '';
	}

	const content = node.content
		.map((childNode) => processAdfNode(childNode))
		.join('\n\n');

	// Add > to each line
	return content
		.split('\n')
		.map((line) => `> ${line}`)
		.join('\n');
}

/**
 * Process media group node
 */
function processMediaGroup(node: AdfNode): string {
	if (!node.content) {
		return '';
	}

	return node.content
		.map((mediaNode) => {
			if (mediaNode.type === 'media' && mediaNode.attrs) {
				const { id, type } = mediaNode.attrs;
				if (type === 'file') {
					return `[Attachment: ${id}]`;
				} else if (type === 'link') {
					return `[External Link]`;
				}
			}
			return '';
		})
		.filter(Boolean)
		.join('\n');
}

/**
 * Process media node
 */
function processMedia(node: AdfNode): string {
	if (!node.attrs) {
		return '';
	}

	// Handle file attachments
	if (node.attrs.type === 'file') {
		const id = node.attrs.id || '';
		const altText = node.attrs.alt ? node.attrs.alt : `Attachment: ${id}`;
		return `![${altText}](attachment:${id})`;
	}

	// Handle external media (e.g., YouTube embeds)
	if (node.attrs.type === 'external' && node.attrs.url) {
		return `[External Media](${node.attrs.url})`;
	}

	return '';
}

/**
 * Process table node
 */
function processTable(node: AdfNode): string {
	if (!node.content) {
		return '';
	}

	const rows: string[][] = [];

	// Process table rows
	node.content.forEach((row) => {
		if (row.type === 'tableRow' && row.content) {
			const cells: string[] = [];

			row.content.forEach((cell) => {
				if (
					(cell.type === 'tableCell' ||
						cell.type === 'tableHeader') &&
					cell.content
				) {
					const cellContent = cell.content
						.map((cellNode) => processAdfNode(cellNode))
						.join('');
					cells.push(cellContent.trim());
				}
			});

			if (cells.length > 0) {
				rows.push(cells);
			}
		}
	});

	if (rows.length === 0) {
		return '';
	}

	// Create markdown table
	const columnCount = Math.max(...rows.map((row) => row.length));

	// Ensure all rows have the same number of columns
	const normalizedRows = rows.map((row) => {
		while (row.length < columnCount) {
			row.push('');
		}
		return row;
	});

	// Create header row
	const headerRow = normalizedRows[0].map((cell) => cell || '');

	// Create separator row
	const separatorRow = headerRow.map(() => '---');

	// Create content rows
	const contentRows = normalizedRows.slice(1);

	// Build the table
	const tableRows = [
		headerRow.join(' | '),
		separatorRow.join(' | '),
		...contentRows.map((row) => row.join(' | ')),
	];

	return tableRows.join('\n');
}

/**
 * Process text node
 */
function processText(node: AdfNode): string {
	if (!node.text) {
		return '';
	}

	let text = node.text;

	// Apply marks if available
	if (node.marks && node.marks.length > 0) {
		// Sort marks to ensure consistent application (process links last)
		const sortedMarks = [...node.marks].sort((a, b) => {
			if (a.type === 'link') return 1;
			if (b.type === 'link') return -1;
			return 0;
		});

		// Apply non-link marks first
		sortedMarks.forEach((mark) => {
			switch (mark.type) {
				case 'strong':
					text = `**${text}**`;
					break;
				case 'em':
					text = `*${text}*`;
					break;
				case 'code':
					text = `\`${text}\``;
					break;
				case 'strike':
					text = `~~${text}~~`;
					break;
				case 'underline':
					// Markdown doesn't support underline, use emphasis instead
					text = `_${text}_`;
					break;
				case 'textColor':
					// Ignore in Markdown (no equivalent)
					break;
				case 'superscript':
					// Some flavors of Markdown support ^superscript^
					text = `^${text}^`;
					break;
				case 'subscript':
					// Some flavors of Markdown support ~subscript~
					// but this conflicts with strikethrough
					text = `~${text}~`;
					break;
				case 'link':
					if (mark.attrs && mark.attrs.href) {
						text = `[${text}](${mark.attrs.href})`;
					}
					break;
			}
		});
	}

	return text;
}

/**
 * Process inline card node (references to Jira issues, Confluence pages, etc.)
 */
function processInlineCard(node: AdfNode): string {
	if (!node.attrs) {
		return '[Link]';
	}

	const url = (node.attrs.url as string) || '';
	// Extract the name/ID from the URL if possible
	const match = url.match(/\/([^/]+)$/);
	const name = match ? match[1] : 'Link';

	return `[${name}](${url})`;
}

/**
 * Process emoji node
 */
function processEmoji(node: AdfNode): string {
	if (!node.attrs) {
		return '';
	}

	// Return shortName if available, otherwise fallback
	return (
		(node.attrs.shortName as string) || (node.attrs.id as string) || '📝'
	);
}

/**
 * Process date node
 */
function processDate(node: AdfNode): string {
	if (!node.attrs) {
		return '';
	}

	return (node.attrs.timestamp as string) || '';
}

/**
 * Process status node (status lozenges)
 */
function processStatus(node: AdfNode): string {
	if (!node.attrs) {
		return '[Status]';
	}

	const text = (node.attrs.text as string) || 'Status';
	// Markdown doesn't support colored lozenges, so we use brackets
	return `[${text}]`;
}

/**
 * The following functions have been removed since they are not needed in Bitbucket:
 * - textToAdf (removed)
 * - markdownToAdf (removed)
 *
 * Unlike Jira, Bitbucket's API natively accepts Markdown content,
 * so there's no need to convert Markdown to ADF when sending data.
 * Instead, see formatter.util.ts and optimizeBitbucketMarkdown() for
 * Bitbucket-specific markdown handling.
 */

```

--------------------------------------------------------------------------------
/src/services/vendor.atlassian.repositories.service.test.ts:
--------------------------------------------------------------------------------

```typescript
import atlassianRepositoriesService from './vendor.atlassian.repositories.service.js';
import { getAtlassianCredentials } from '../utils/transport.util.js';
import { config } from '../utils/config.util.js';
import { McpError } from '../utils/error.util.js';
import atlassianWorkspacesService from './vendor.atlassian.workspaces.service.js';
import { Repository } from './vendor.atlassian.repositories.types.js';
import { Logger } from '../utils/logger.util.js';

// Instantiate logger for the test file
const logger = Logger.forContext(
	'services/vendor.atlassian.repositories.service.test.ts',
);

describe('Vendor Atlassian Repositories Service', () => {
	// Load configuration and check for credentials before all tests
	beforeAll(() => {
		config.load(); // Ensure config is loaded
		const credentials = getAtlassianCredentials();
		if (!credentials) {
			console.warn(
				'Skipping Atlassian Repositories Service tests: No credentials available',
			);
		}
	});

	// Helper function to skip tests when credentials are missing
	const skipIfNoCredentials = () => !getAtlassianCredentials();

	// Helper to get a valid workspace slug for testing
	async function getFirstWorkspaceSlug(): Promise<string | null> {
		if (skipIfNoCredentials()) return null;

		try {
			const listResult = await atlassianWorkspacesService.list({
				pagelen: 1,
			});
			return listResult.values.length > 0
				? listResult.values[0].workspace.slug
				: null;
		} catch (error) {
			console.warn(
				'Could not fetch workspace list for repository tests:',
				error,
			);
			return null;
		}
	}

	describe('list', () => {
		it('should return a list of repositories for a valid workspace', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			const result = await atlassianRepositoriesService.list({
				workspace: workspaceSlug,
			});
			logger.debug('List repositories result:', result);

			// Verify the response structure based on RepositoriesResponse
			expect(result).toHaveProperty('values');
			expect(Array.isArray(result.values)).toBe(true);
			expect(result).toHaveProperty('pagelen'); // Bitbucket uses pagelen
			expect(result).toHaveProperty('page');
			expect(result).toHaveProperty('size');

			if (result.values.length > 0) {
				// Verify the structure of the first repository in the list
				verifyRepositoryStructure(result.values[0]);
			}
		}, 30000); // Increased timeout

		it('should support pagination with pagelen and page', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			// Get first page with limited results
			const result = await atlassianRepositoriesService.list({
				workspace: workspaceSlug,
				pagelen: 1,
			});

			expect(result).toHaveProperty('pagelen');
			// Allow pagelen to be greater than requested if API enforces minimum
			expect(result.pagelen).toBeGreaterThanOrEqual(1);
			expect(result.values.length).toBeLessThanOrEqual(result.pagelen);

			// If there are more items than the page size, expect pagination links
			if (result.size > result.pagelen) {
				expect(result).toHaveProperty('next');

				// Test requesting page 2 if available
				// Extract page parameter from next link if available
				if (result.next) {
					const nextPageUrl = new URL(result.next);
					const pageParam = nextPageUrl.searchParams.get('page');

					if (pageParam) {
						const page2 = parseInt(pageParam, 10);
						const page2Result =
							await atlassianRepositoriesService.list({
								workspace: workspaceSlug,
								pagelen: 1,
								page: page2,
							});

						expect(page2Result).toHaveProperty('page', page2);

						// If both pages have values, verify they're different repositories
						if (
							result.values.length > 0 &&
							page2Result.values.length > 0
						) {
							expect(result.values[0].uuid).not.toBe(
								page2Result.values[0].uuid,
							);
						}
					}
				}
			}
		}, 30000);

		it('should support filtering with q parameter', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			// First get all repositories to find a potential query term
			const allRepos = await atlassianRepositoriesService.list({
				workspace: workspaceSlug,
			});

			// Skip if no repositories available
			if (allRepos.values.length === 0) {
				console.warn(
					'Skipping query filtering test: No repositories available',
				);
				return;
			}

			// Use the first repo's name as a query term
			const firstRepo = allRepos.values[0];
			// Take just the first word or first few characters to make filter less restrictive
			const queryTerm = firstRepo.name.split(' ')[0];

			// Test the query filter
			try {
				const result = await atlassianRepositoriesService.list({
					workspace: workspaceSlug,
					q: `name~"${queryTerm}"`,
				});

				// Verify basic response structure
				expect(result).toHaveProperty('values');

				// All returned repos should contain the query term in their name
				if (result.values.length > 0) {
					const nameMatches = result.values.some((repo) =>
						repo.name
							.toLowerCase()
							.includes(queryTerm.toLowerCase()),
					);
					expect(nameMatches).toBe(true);
				}
			} catch (error) {
				// If filtering isn't fully supported, we just log it
				console.warn(
					'Query filtering test encountered an error:',
					error instanceof Error ? error.message : String(error),
				);
			}
		}, 30000);

		it('should support sorting with sort parameter', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			// Skip this test if fewer than 2 repositories (can't verify sort order)
			const checkResult = await atlassianRepositoriesService.list({
				workspace: workspaceSlug,
				pagelen: 2,
			});

			if (checkResult.values.length < 2) {
				console.warn(
					'Skipping sort test: Need at least 2 repositories to verify sort order',
				);
				return;
			}

			// Test sorting by name ascending
			const resultAsc = await atlassianRepositoriesService.list({
				workspace: workspaceSlug,
				sort: 'name',
				pagelen: 2,
			});

			// Test sorting by name descending
			const resultDesc = await atlassianRepositoriesService.list({
				workspace: workspaceSlug,
				sort: '-name',
				pagelen: 2,
			});

			// Verify basic response structure
			expect(resultAsc).toHaveProperty('values');
			expect(resultDesc).toHaveProperty('values');

			// Ensure both responses have at least 2 items to compare
			if (resultAsc.values.length >= 2 && resultDesc.values.length >= 2) {
				// For ascending order, first item should come before second alphabetically
				const ascNameComparison =
					resultAsc.values[0].name.localeCompare(
						resultAsc.values[1].name,
					);
				// For descending order, first item should come after second alphabetically
				const descNameComparison =
					resultDesc.values[0].name.localeCompare(
						resultDesc.values[1].name,
					);

				// Ascending should be ≤ 0 (first before or equal to second)
				expect(ascNameComparison).toBeLessThanOrEqual(0);
				// Descending should be ≥ 0 (first after or equal to second)
				expect(descNameComparison).toBeGreaterThanOrEqual(0);
			}
		}, 30000);

		it('should throw an error for an invalid workspace', async () => {
			if (skipIfNoCredentials()) return;

			const invalidWorkspace =
				'this-workspace-definitely-does-not-exist-12345';

			// Expect the service call to reject with an McpError (likely 404)
			await expect(
				atlassianRepositoriesService.list({
					workspace: invalidWorkspace,
				}),
			).rejects.toThrow();

			// Check for specific error properties
			try {
				await atlassianRepositoriesService.list({
					workspace: invalidWorkspace,
				});
			} catch (e) {
				expect(e).toBeInstanceOf(McpError);
				expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
			}
		}, 30000);
	});

	describe('get', () => {
		// Helper to get a valid repo for testing 'get'
		async function getFirstRepositoryInfo(): Promise<{
			workspace: string;
			repoSlug: string;
		} | null> {
			if (skipIfNoCredentials()) return null;

			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) return null;

			try {
				const listResult = await atlassianRepositoriesService.list({
					workspace: workspaceSlug,
					pagelen: 1,
				});

				if (listResult.values.length === 0) return null;

				const fullName = listResult.values[0].full_name;
				// full_name is in format "workspace/repo_slug"
				const [workspace, repoSlug] = fullName.split('/');

				return { workspace, repoSlug };
			} catch (error) {
				console.warn(
					"Could not fetch repository list for 'get' test setup:",
					error,
				);
				return null;
			}
		}

		it('should return details for a valid workspace and repo_slug', async () => {
			const repoInfo = await getFirstRepositoryInfo();
			if (!repoInfo) {
				console.warn('Skipping get test: No repository found.');
				return;
			}

			const result = await atlassianRepositoriesService.get({
				workspace: repoInfo.workspace,
				repo_slug: repoInfo.repoSlug,
			});

			// Verify the response structure based on RepositoryDetailed
			expect(result).toHaveProperty('uuid');
			expect(result).toHaveProperty(
				'full_name',
				`${repoInfo.workspace}/${repoInfo.repoSlug}`,
			);
			expect(result).toHaveProperty('name');
			expect(result).toHaveProperty('type', 'repository');
			expect(result).toHaveProperty('is_private');
			expect(result).toHaveProperty('links');
			expect(result.links).toHaveProperty('html');
			expect(result).toHaveProperty('owner');
			expect(result.owner).toHaveProperty('type');
		}, 30000);

		it('should throw an McpError for a non-existent repo_slug', async () => {
			const workspaceSlug = await getFirstWorkspaceSlug();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			const invalidRepoSlug = 'this-repo-definitely-does-not-exist-12345';

			// Expect the service call to reject with an McpError (likely 404)
			await expect(
				atlassianRepositoriesService.get({
					workspace: workspaceSlug,
					repo_slug: invalidRepoSlug,
				}),
			).rejects.toThrow(McpError);

			// Check for specific error properties
			try {
				await atlassianRepositoriesService.get({
					workspace: workspaceSlug,
					repo_slug: invalidRepoSlug,
				});
			} catch (e) {
				expect(e).toBeInstanceOf(McpError);
				expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
			}
		}, 30000);

		it('should throw an McpError for a non-existent workspace', async () => {
			if (skipIfNoCredentials()) return;

			const invalidWorkspace =
				'this-workspace-definitely-does-not-exist-12345';
			const invalidRepoSlug = 'some-repo';

			// Expect the service call to reject with an McpError (likely 404)
			await expect(
				atlassianRepositoriesService.get({
					workspace: invalidWorkspace,
					repo_slug: invalidRepoSlug,
				}),
			).rejects.toThrow(McpError);

			// Check for specific error properties
			try {
				await atlassianRepositoriesService.get({
					workspace: invalidWorkspace,
					repo_slug: invalidRepoSlug,
				});
			} catch (e) {
				expect(e).toBeInstanceOf(McpError);
				expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
			}
		}, 30000);
	});
});

// Helper function to verify the Repository structure
function verifyRepositoryStructure(repo: Repository) {
	expect(repo).toHaveProperty('uuid');
	expect(repo).toHaveProperty('name');
	expect(repo).toHaveProperty('full_name');
	expect(repo).toHaveProperty('is_private');
	expect(repo).toHaveProperty('links');
	expect(repo).toHaveProperty('owner');
	expect(repo).toHaveProperty('type', 'repository');
}

```

--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------

```json
{
  "compilerOptions": {
    /* Visit https://aka.ms/tsconfig to read more about this file */

    /* Projects */
    // "incremental": true,                              /* Save .tsbuildinfo files to allow for incremental compilation of projects. */
    // "composite": true,                                /* Enable constraints that allow a TypeScript project to be used with project references. */
    // "tsBuildInfoFile": "./.tsbuildinfo",              /* Specify the path to .tsbuildinfo incremental compilation file. */
    // "disableSourceOfProjectReferenceRedirect": true,  /* Disable preferring source files instead of declaration files when referencing composite projects. */
    // "disableSolutionSearching": true,                 /* Opt a project out of multi-project reference checking when editing. */
    // "disableReferencedProjectLoad": true,             /* Reduce the number of projects loaded automatically by TypeScript. */

    /* Language and Environment */
    "target": "ES2020",                                  /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
    "lib": ["ES2020"],                                   /* Specify a set of bundled library declaration files that describe the target runtime environment. */
    // "jsx": "preserve",                                /* Specify what JSX code is generated. */
    // "libReplacement": true,                           /* Enable lib replacement. */
    // "experimentalDecorators": true,                   /* Enable experimental support for legacy experimental decorators. */
    // "emitDecoratorMetadata": true,                    /* Emit design-type metadata for decorated declarations in source files. */
    // "jsxFactory": "",                                 /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */
    // "jsxFragmentFactory": "",                         /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */
    // "jsxImportSource": "",                            /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */
    // "reactNamespace": "",                             /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */
    // "noLib": true,                                    /* Disable including any library files, including the default lib.d.ts. */
    // "useDefineForClassFields": true,                  /* Emit ECMAScript-standard-compliant class fields. */
    // "moduleDetection": "auto",                        /* Control what method is used to detect module-format JS files. */

    /* Modules */
    "module": "NodeNext",                                /* Specify what module code is generated. */
    // "rootDir": "./",                                  /* Specify the root folder within your source files. */
    "moduleResolution": "NodeNext",                      /* Specify how TypeScript looks up a file from a given module specifier. */
    // "baseUrl": "./",                                  /* Specify the base directory to resolve non-relative module names. */
    // "paths": {},                                      /* Specify a set of entries that re-map imports to additional lookup locations. */
    // "rootDirs": [],                                   /* Allow multiple folders to be treated as one when resolving modules. */
    // "typeRoots": [],                                  /* Specify multiple folders that act like './node_modules/@types'. */
    // "types": [],                                      /* Specify type package names to be included without being referenced in a source file. */
    // "allowUmdGlobalAccess": true,                     /* Allow accessing UMD globals from modules. */
    // "moduleSuffixes": [],                             /* List of file name suffixes to search when resolving a module. */
    // "allowImportingTsExtensions": true,               /* Allow imports to include TypeScript file extensions. Requires '--moduleResolution bundler' and either '--noEmit' or '--emitDeclarationOnly' to be set. */
    // "rewriteRelativeImportExtensions": true,          /* Rewrite '.ts', '.tsx', '.mts', and '.cts' file extensions in relative import paths to their JavaScript equivalent in output files. */
    // "resolvePackageJsonExports": true,                /* Use the package.json 'exports' field when resolving package imports. */
    // "resolvePackageJsonImports": true,                /* Use the package.json 'imports' field when resolving imports. */
    // "customConditions": [],                           /* Conditions to set in addition to the resolver-specific defaults when resolving imports. */
    // "noUncheckedSideEffectImports": true,             /* Check side effect imports. */
    "resolveJsonModule": true,                           /* Enable importing .json files. */
    // "allowArbitraryExtensions": true,                 /* Enable importing files with any extension, provided a declaration file is present. */
    // "noResolve": true,                                /* Disallow 'import's, 'require's or '<reference>'s from expanding the number of files TypeScript should add to a project. */

    /* JavaScript Support */
    // "allowJs": true,                                  /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */
    // "checkJs": true,                                  /* Enable error reporting in type-checked JavaScript files. */
    // "maxNodeModuleJsDepth": 1,                        /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */

    /* Emit */
    "declaration": true,                                 /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
    // "declarationMap": true,                           /* Create sourcemaps for d.ts files. */
    // "emitDeclarationOnly": true,                      /* Only output d.ts files and not JavaScript files. */
    // "sourceMap": true,                                /* Create source map files for emitted JavaScript files. */
    // "inlineSourceMap": true,                          /* Include sourcemap files inside the emitted JavaScript. */
    // "noEmit": true,                                   /* Disable emitting files from a compilation. */
    // "outFile": "./",                                  /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */
    "outDir": "./dist",                                  /* Specify an output folder for all emitted files. */
    // "removeComments": true,                           /* Disable emitting comments. */
    // "importHelpers": true,                            /* Allow importing helper functions from tslib once per project, instead of including them per-file. */
    // "downlevelIteration": true,                       /* Emit more compliant, but verbose and less performant JavaScript for iteration. */
    // "sourceRoot": "",                                 /* Specify the root path for debuggers to find the reference source code. */
    // "mapRoot": "",                                    /* Specify the location where debugger should locate map files instead of generated locations. */
    // "inlineSources": true,                            /* Include source code in the sourcemaps inside the emitted JavaScript. */
    // "emitBOM": true,                                  /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */
    // "newLine": "crlf",                                /* Set the newline character for emitting files. */
    // "stripInternal": true,                            /* Disable emitting declarations that have '@internal' in their JSDoc comments. */
    // "noEmitHelpers": true,                            /* Disable generating custom helper functions like '__extends' in compiled output. */
    // "noEmitOnError": true,                            /* Disable emitting files if any type checking errors are reported. */
    // "preserveConstEnums": true,                       /* Disable erasing 'const enum' declarations in generated code. */
    // "declarationDir": "./",                           /* Specify the output directory for generated declaration files. */

    /* Interop Constraints */
    "isolatedModules": true,
    // "verbatimModuleSyntax": true,                     /* Do not transform or elide any imports or exports not marked as type-only, ensuring they are written in the output file's format based on the 'module' setting. */
    // "isolatedDeclarations": true,                     /* Require sufficient annotation on exports so other tools can trivially generate declaration files. */
    // "erasableSyntaxOnly": true,                       /* Do not allow runtime constructs that are not part of ECMAScript. */
    // "allowSyntheticDefaultImports": true,             /* Allow 'import x from y' when a module doesn't have a default export. */
    "esModuleInterop": true,                             /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */
    // "preserveSymlinks": true,                         /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */
    "forceConsistentCasingInFileNames": true,            /* Ensure that casing is correct in imports. */

    /* Type Checking */
    "strict": true,                                      /* Enable all strict type-checking options. */
    "noImplicitAny": true,                               /* Enable error reporting for expressions and declarations with an implied 'any' type. */
    "strictNullChecks": true,                            /* When type checking, take into account 'null' and 'undefined'. */
    // "strictFunctionTypes": true,                      /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */
    // "strictBindCallApply": true,                      /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */
    // "strictPropertyInitialization": true,             /* Check for class properties that are declared but not set in the constructor. */
    // "strictBuiltinIteratorReturn": true,              /* Built-in iterators are instantiated with a 'TReturn' type of 'undefined' instead of 'any'. */
    // "noImplicitThis": true,                           /* Enable error reporting when 'this' is given the type 'any'. */
    // "useUnknownInCatchVariables": true,               /* Default catch clause variables as 'unknown' instead of 'any'. */
    // "alwaysStrict": true,                             /* Ensure 'use strict' is always emitted. */
    "noUnusedLocals": true,                              /* Enable error reporting when local variables aren't read. */
    "noUnusedParameters": true,                          /* Raise an error when a function parameter isn't read. */
    // "exactOptionalPropertyTypes": true,               /* Interpret optional property types as written, rather than adding 'undefined'. */
    "noImplicitReturns": true,                           /* Enable error reporting for codepaths that do not explicitly return in a function. */
    // "noFallthroughCasesInSwitch": true,               /* Enable error reporting for fallthrough cases in switch statements. */
    // "noUncheckedIndexedAccess": true,                 /* Add 'undefined' to a type when accessed using an index. */
    // "noImplicitOverride": true,                       /* Ensure overriding members in derived classes are marked with an override modifier. */
    // "noPropertyAccessFromIndexSignature": true,       /* Enforces using indexed accessors for keys declared using an indexed type. */
    // "allowUnusedLabels": true,                        /* Disable error reporting for unused labels. */
    // "allowUnreachableCode": true,                     /* Disable error reporting for unreachable code. */

    /* Completeness */
    // "skipDefaultLibCheck": true,                      /* Skip type checking .d.ts files that are included with TypeScript. */
    "skipLibCheck": true                                 /* Skip type checking all .d.ts files. */
  },
  "include": ["src/**/*"]
}

```

--------------------------------------------------------------------------------
/src/tools/atlassian.repositories.tool.ts:
--------------------------------------------------------------------------------

```typescript
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { Logger } from '../utils/logger.util.js';
import { formatErrorForMcpTool } from '../utils/error.util.js';
import {
	ListRepositoriesToolArgs,
	type ListRepositoriesToolArgsType,
	GetRepositoryToolArgs,
	type GetRepositoryToolArgsType,
	GetCommitHistoryToolArgs,
	type GetCommitHistoryToolArgsType,
	CreateBranchToolArgsSchema,
	type CreateBranchToolArgsType,
	CloneRepositoryToolArgs,
	type CloneRepositoryToolArgsType,
	GetFileContentToolArgs,
	type GetFileContentToolArgsType,
	ListBranchesToolArgs,
	type ListBranchesToolArgsType,
} from './atlassian.repositories.types.js';

// Import directly from specialized controllers
import { handleRepositoriesList } from '../controllers/atlassian.repositories.list.controller.js';
import { handleRepositoryDetails } from '../controllers/atlassian.repositories.details.controller.js';
import { handleCommitHistory } from '../controllers/atlassian.repositories.commit.controller.js';
import {
	handleCreateBranch,
	handleListBranches,
} from '../controllers/atlassian.repositories.branch.controller.js';
import {
	handleCloneRepository,
	handleGetFileContent,
} from '../controllers/atlassian.repositories.content.controller.js';

// Create a contextualized logger for this file
const toolLogger = Logger.forContext('tools/atlassian.repositories.tool.ts');

// Log tool initialization
toolLogger.debug('Bitbucket repositories tool initialized');

/**
 * MCP Tool: List Bitbucket Repositories
 *
 * Lists Bitbucket repositories within a workspace with optional filtering.
 * Returns a formatted markdown response with repository details.
 *
 * @param args - Tool arguments for filtering repositories
 * @returns MCP response with formatted repositories list
 * @throws Will return error message if repository listing fails
 */
async function listRepositories(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.repositories.tool.ts',
		'listRepositories',
	);
	methodLogger.debug('Listing Bitbucket repositories with filters:', args);

	try {
		// Pass args directly to controller without any logic
		const result = await handleRepositoriesList(
			args as ListRepositoriesToolArgsType,
		);

		methodLogger.debug(
			'Successfully retrieved repositories from controller',
		);

		return {
			content: [
				{
					type: 'text' as const,
					text: result.content,
				},
			],
		};
	} catch (error) {
		methodLogger.error('Failed to list repositories', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * MCP Tool: Get Bitbucket Repository Details
 *
 * Retrieves detailed information about a specific Bitbucket repository.
 * Returns a formatted markdown response with repository metadata.
 *
 * @param args - Tool arguments containing the workspace and repository slug
 * @returns MCP response with formatted repository details
 * @throws Will return error message if repository retrieval fails
 */
async function getRepository(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.repositories.tool.ts',
		'getRepository',
	);
	methodLogger.debug('Getting repository details:', args);

	try {
		// Pass args directly to controller
		const result = await handleRepositoryDetails(
			args as GetRepositoryToolArgsType,
		);

		methodLogger.debug(
			'Successfully retrieved repository details from controller',
		);

		return {
			content: [
				{
					type: 'text' as const,
					text: result.content,
				},
			],
		};
	} catch (error) {
		methodLogger.error('Failed to get repository details', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * MCP Tool: Get Bitbucket Commit History
 *
 * Retrieves the commit history for a specific repository.
 *
 * @param args Tool arguments including workspace/repo slugs and optional filters.
 * @returns MCP response with formatted commit history.
 * @throws Will return error message if history retrieval fails.
 */
async function handleGetCommitHistory(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.repositories.tool.ts',
		'handleGetCommitHistory',
	);
	methodLogger.debug('Getting commit history with args:', args);

	try {
		// Pass args directly to controller
		const result = await handleCommitHistory(
			args as GetCommitHistoryToolArgsType,
		);

		methodLogger.debug(
			'Successfully retrieved commit history from controller',
		);

		return {
			content: [{ type: 'text' as const, text: result.content }],
		};
	} catch (error) {
		methodLogger.error('Failed to get commit history', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * Handler for adding a new branch.
 */
async function handleAddBranch(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.repositories.tool.ts',
		'handleAddBranch',
	);
	try {
		methodLogger.debug('Creating new branch:', args);

		// Pass args directly to controller
		const result = await handleCreateBranch(
			args as CreateBranchToolArgsType,
		);

		methodLogger.debug('Successfully created branch via controller');

		return {
			content: [{ type: 'text' as const, text: result.content }],
		};
	} catch (error) {
		methodLogger.error('Failed to create branch', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * Handler for cloning a repository.
 */
async function handleRepoClone(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.repositories.tool.ts',
		'handleRepoClone',
	);
	try {
		methodLogger.debug('Cloning repository:', args);

		// Pass args directly to controller
		const result = await handleCloneRepository(
			args as CloneRepositoryToolArgsType,
		);

		methodLogger.debug('Successfully cloned repository via controller');

		return {
			content: [{ type: 'text' as const, text: result.content }],
		};
	} catch (error) {
		methodLogger.error('Failed to clone repository', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * Handler for getting file content.
 */
async function getFileContent(args: Record<string, unknown>) {
	const methodLogger = toolLogger.forMethod('getFileContent');
	try {
		methodLogger.debug('Getting file content:', args);

		// Map tool args to controller args
		const typedArgs = args as GetFileContentToolArgsType;
		const result = await handleGetFileContent({
			workspaceSlug: typedArgs.workspaceSlug,
			repoSlug: typedArgs.repoSlug,
			path: typedArgs.filePath,
			ref: typedArgs.revision,
		});

		methodLogger.debug(
			'Successfully retrieved file content via controller',
		);

		return {
			content: [{ type: 'text' as const, text: result.content }],
		};
	} catch (error) {
		methodLogger.error('Failed to get file content', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * MCP Tool: List Branches in a Bitbucket Repository
 *
 * Lists branches within a specific repository with optional filtering.
 * Returns a formatted markdown response with branch details.
 *
 * @param args - Tool arguments for identifying the repository and filtering branches
 * @returns MCP response with formatted branches list
 * @throws Will return error message if branch listing fails
 */
async function listBranches(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.repositories.tool.ts',
		'listBranches',
	);
	methodLogger.debug('Listing branches with filters:', args);

	try {
		// Pass args directly to controller
		const result = await handleListBranches(
			args as ListBranchesToolArgsType,
		);

		methodLogger.debug('Successfully retrieved branches from controller');

		return {
			content: [
				{
					type: 'text' as const,
					text: result.content,
				},
			],
		};
	} catch (error) {
		methodLogger.error('Failed to list branches', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * Register all Bitbucket repository tools with the MCP server.
 */
function registerTools(server: McpServer) {
	const registerLogger = Logger.forContext(
		'tools/atlassian.repositories.tool.ts',
		'registerTools',
	);
	registerLogger.debug('Registering Repository tools...');

	// Register the list repositories tool
	server.tool(
		'bb_ls_repos',
		`Lists repositories within a workspace. If \`workspaceSlug\` is not provided, uses your default workspace (either configured via BITBUCKET_DEFAULT_WORKSPACE or the first workspace in your account). Filters repositories by the user\`s \`role\`, project key \`projectKey\`, or a \`query\` string (searches name/description). Supports sorting via \`sort\` and pagination via \`limit\` and \`cursor\`. Pagination details are included at the end of the text content. Returns a formatted Markdown list with comprehensive details. Requires Bitbucket credentials.`,
		ListRepositoriesToolArgs.shape,
		listRepositories,
	);

	// Register the get repository details tool
	server.tool(
		'bb_get_repo',
		`Retrieves detailed information for a specific repository identified by \`workspaceSlug\` and \`repoSlug\`. Returns comprehensive repository details as formatted Markdown, including owner, main branch, comment/task counts, recent pull requests, and relevant links. Requires Bitbucket credentials.`,
		GetRepositoryToolArgs.shape,
		getRepository,
	);

	// Register the get commit history tool
	server.tool(
		'bb_get_commit_history',
		`Retrieves the commit history for a repository identified by \`workspaceSlug\` and \`repoSlug\`. Supports pagination via \`limit\` (number of commits per page) and \`cursor\` (which acts as the page number for this endpoint). Optionally filters history starting from a specific branch, tag, or commit hash using \`revision\`, or shows only commits affecting a specific file using \`path\`. Returns the commit history as formatted Markdown, including commit hash, author, date, and message. Pagination details are included at the end of the text content. Requires Bitbucket credentials to be configured.`,
		GetCommitHistoryToolArgs.shape,
		handleGetCommitHistory,
	);

	// Add the new branch tool
	server.tool(
		'bb_add_branch',
		`Creates a new branch in a specified Bitbucket repository. Requires the workspace slug (\`workspaceSlug\`), repository slug (\`repoSlug\`), the desired new branch name (\`newBranchName\`), and the source branch or commit hash (\`sourceBranchOrCommit\`) to branch from. Requires repository write permissions. Returns a success message.`,
		CreateBranchToolArgsSchema.shape,
		handleAddBranch,
	);

	// Register the clone repository tool
	server.tool(
		'bb_clone_repo',
		`Clones a Bitbucket repository to your local filesystem using SSH (preferred) or HTTPS. Requires Bitbucket credentials and proper SSH key setup for optimal usage.

**Parameters:**
- \`workspaceSlug\`: The Bitbucket workspace containing the repository (optional - will use default if not provided)
- \`repoSlug\`: The repository name to clone (required)
- \`targetPath\`: Parent directory where repository will be cloned (required)

**Path Handling:**
- Absolute paths are strongly recommended (e.g., "/home/user/projects" or "C:\\Users\\name\\projects")
- Relative paths (e.g., "./my-repos" or "../downloads") will be resolved relative to the server's working directory, which may not be what you expect
- The repository will be cloned into a subdirectory at \`targetPath/repoSlug\`
- Make sure you have write permissions to the target directory

**SSH Requirements:**
- SSH keys must be properly configured for Bitbucket
- SSH agent should be running with your keys added
- Will automatically fall back to HTTPS if SSH is unavailable

**Example Usage:**
\`\`\`
// Clone a repository to a specific absolute path
bb_clone_repo({repoSlug: "my-project", targetPath: "/home/user/projects"})

// Specify the workspace and use a relative path (less reliable)
bb_clone_repo({workspaceSlug: "my-team", repoSlug: "api-service", targetPath: "./downloads"})
\`\`\`

**Returns:** Success message with clone details or an error message with troubleshooting steps.`,
		CloneRepositoryToolArgs.shape,
		handleRepoClone,
	);

	// Register the get file content tool
	server.tool(
		'bb_get_file',
		`Retrieves the content of a file from a Bitbucket repository identified by \`workspaceSlug\` and \`repoSlug\`. Specify the file to retrieve using the \`filePath\` parameter. Optionally, you can specify a \`revision\` (branch name, tag, or commit hash) to retrieve the file from - if omitted, the repository's default branch is used. Returns the raw content of the file as text. Requires Bitbucket credentials.`,
		GetFileContentToolArgs.shape,
		getFileContent,
	);

	// Register the list branches tool
	server.tool(
		'bb_list_branches',
		`Lists branches in a repository identified by \`workspaceSlug\` and \`repoSlug\`. Filters branches by an optional text \`query\` and supports custom \`sort\` order. Provides pagination via \`limit\` and \`cursor\`. Pagination details are included at the end of the text content. Returns branch details as Markdown with each branch's name, latest commit, and default merge strategy. Requires Bitbucket credentials.`,
		ListBranchesToolArgs.shape,
		listBranches,
	);

	registerLogger.debug('Successfully registered Repository tools');
}

export default { registerTools };

```

--------------------------------------------------------------------------------
/src/cli/atlassian.repositories.cli.test.ts:
--------------------------------------------------------------------------------

```typescript
import { CliTestUtil } from '../utils/cli.test.util.js';
import { getAtlassianCredentials } from '../utils/transport.util.js';
import { config } from '../utils/config.util.js';

describe('Atlassian Repositories CLI Commands', () => {
	// Load configuration and check for credentials before all tests
	beforeAll(() => {
		// Load configuration from all sources
		config.load();

		// Log warning if credentials aren't available
		const credentials = getAtlassianCredentials();
		if (!credentials) {
			console.warn(
				'Skipping Atlassian Repositories CLI tests: No credentials available',
			);
		}
	});

	// Helper function to skip tests when credentials are missing
	const skipIfNoCredentials = () => {
		const credentials = getAtlassianCredentials();
		if (!credentials) {
			return true;
		}
		return false;
	};

	// Helper to get a valid workspace slug for testing
	async function getWorkspaceSlug(): Promise<string | null> {
		// First, get a list of workspaces
		const workspacesResult = await CliTestUtil.runCommand([
			'ls-workspaces',
		]);

		// Skip if no workspaces are available
		if (
			workspacesResult.stdout.includes('No Bitbucket workspaces found.')
		) {
			return null; // Skip silently for this helper function
		}

		// Extract a workspace slug from the output
		const slugMatch = workspacesResult.stdout.match(
			/\*\*Slug\*\*:\s+([^\n]+)/,
		);
		if (!slugMatch || !slugMatch[1]) {
			return null; // Skip silently for this helper function
		}

		return slugMatch[1].trim();
	}

	describe('ls-repos command', () => {
		// Test listing repositories for a workspace
		it('should list repositories in a workspace', async () => {
			if (skipIfNoCredentials()) {
				return;
			}

			// Get a valid workspace
			const workspaceSlug = await getWorkspaceSlug();
			if (!workspaceSlug) {
				return; // Skip if no valid workspace found
			}

			// Run the CLI command
			const result = await CliTestUtil.runCommand([
				'ls-repos',
				'--workspace-slug',
				workspaceSlug,
			]);

			// Check command exit code
			expect(result.exitCode).toBe(0);

			// Verify the output format if there are repositories
			if (!result.stdout.includes('No repositories found')) {
				// Validate expected Markdown structure
				CliTestUtil.validateOutputContains(result.stdout, [
					'# Bitbucket Repositories',
					'**Name**',
					'**Full Name**',
					'**Owner**',
				]);

				// Validate Markdown formatting
				CliTestUtil.validateMarkdownOutput(result.stdout);
			}
		}, 30000); // Increased timeout for API call

		// Test with pagination
		it('should support pagination with --limit flag', async () => {
			if (skipIfNoCredentials()) {
				return;
			}

			// Get a valid workspace
			const workspaceSlug = await getWorkspaceSlug();
			if (!workspaceSlug) {
				return; // Skip if no valid workspace found
			}

			// Run the CLI command with limit
			const result = await CliTestUtil.runCommand([
				'ls-repos',
				'--workspace-slug',
				workspaceSlug,
				'--limit',
				'1',
			]);

			// Check command exit code
			expect(result.exitCode).toBe(0);

			// If there are multiple repositories, pagination section should be present
			if (
				!result.stdout.includes('No repositories found') &&
				result.stdout.includes('items remaining')
			) {
				CliTestUtil.validateOutputContains(result.stdout, [
					'Pagination',
					'Next cursor:',
				]);
			}
		}, 30000); // Increased timeout for API call

		// Test with query filtering
		it('should support filtering with --query parameter', async () => {
			if (skipIfNoCredentials()) {
				return;
			}

			// Get a valid workspace
			const workspaceSlug = await getWorkspaceSlug();
			if (!workspaceSlug) {
				return; // Skip if no valid workspace found
			}

			// Use a common term that might be in repository names
			const query = 'api';

			// Run the CLI command with query
			const result = await CliTestUtil.runCommand([
				'ls-repos',
				'--workspace-slug',
				workspaceSlug,
				'--query',
				query,
			]);

			// Check command exit code
			expect(result.exitCode).toBe(0);

			// Output might contain filtered results or no matches, both are valid
			if (result.stdout.includes('No repositories found')) {
				// Valid case - no repositories match the query
				CliTestUtil.validateOutputContains(result.stdout, [
					'No repositories found',
				]);
			} else {
				// Valid case - some repositories match, check formatting
				CliTestUtil.validateMarkdownOutput(result.stdout);
			}
		}, 30000); // Increased timeout for API call

		// Test with role filtering (if supported by the API)
		it('should support filtering by --role', async () => {
			if (skipIfNoCredentials()) {
				return;
			}

			// Get a valid workspace
			const workspaceSlug = await getWorkspaceSlug();
			if (!workspaceSlug) {
				return; // Skip if no valid workspace found
			}

			// Test one role - we pick 'contributor' as it's most likely to have results
			const result = await CliTestUtil.runCommand([
				'ls-repos',
				'--workspace-slug',
				workspaceSlug,
				'--role',
				'contributor',
			]);

			// Check command exit code
			expect(result.exitCode).toBe(0);

			// Output might contain filtered results or no matches, both are valid
			if (result.stdout.includes('No repositories found')) {
				// Valid case - no repositories match the role filter
				CliTestUtil.validateOutputContains(result.stdout, [
					'No repositories found',
				]);
			} else {
				// Valid case - some repositories match the role, check formatting
				CliTestUtil.validateMarkdownOutput(result.stdout);
			}
		}, 30000); // Increased timeout for API call

		// Test with sort parameter
		it('should support sorting with --sort parameter', async () => {
			if (skipIfNoCredentials()) {
				return;
			}

			// Get a valid workspace
			const workspaceSlug = await getWorkspaceSlug();
			if (!workspaceSlug) {
				return; // Skip if no valid workspace found
			}

			// Test sorting by name (alphabetical)
			const result = await CliTestUtil.runCommand([
				'ls-repos',
				'--workspace-slug',
				workspaceSlug,
				'--sort',
				'name',
			]);

			// Check command exit code
			expect(result.exitCode).toBe(0);

			// Sorting doesn't affect whether items are returned
			if (!result.stdout.includes('No repositories found')) {
				// Validate Markdown formatting
				CliTestUtil.validateMarkdownOutput(result.stdout);
			}
		}, 30000); // Increased timeout for API call

		// Test without workspace parameter (now optional)
		it('should use default workspace when workspace is not provided', async () => {
			if (skipIfNoCredentials()) {
				return;
			}

			// Run command without workspace parameter
			const result = await CliTestUtil.runCommand(['ls-repos']);

			// Should succeed with exit code 0 (using default workspace)
			expect(result.exitCode).toBe(0);

			// Output should contain either repositories or "No repositories found"
			const hasRepos = !result.stdout.includes('No repositories found');

			if (hasRepos) {
				// Validate expected Markdown structure if repos are found
				CliTestUtil.validateOutputContains(result.stdout, [
					'# Bitbucket Repositories',
				]);
			} else {
				// No repositories were found but command should still succeed
				CliTestUtil.validateOutputContains(result.stdout, [
					'No repositories found',
				]);
			}
		}, 15000);

		// Test with invalid parameter value
		it('should handle invalid limit values properly', async () => {
			if (skipIfNoCredentials()) {
				return;
			}

			// Get a valid workspace
			const workspaceSlug = await getWorkspaceSlug();
			if (!workspaceSlug) {
				return; // Skip if no valid workspace found
			}

			// Run with non-numeric limit
			const result = await CliTestUtil.runCommand([
				'ls-repos',
				'--workspace-slug',
				workspaceSlug,
				'--limit',
				'invalid',
			]);

			// This might either return an error (non-zero exit code) or handle it gracefully (zero exit code)
			// Both behaviors are acceptable, we just need to check that the command completes
			if (result.exitCode !== 0) {
				expect(result.stderr).toContain('error');
			} else {
				// Command completed without error, the implementation should handle it gracefully
				expect(result.exitCode).toBe(0);
			}
		}, 30000);
	});

	describe('get-repo command', () => {
		// Helper to get a valid repository for testing
		async function getRepositorySlug(
			workspaceSlug: string,
		): Promise<string | null> {
			// Get repositories for this workspace
			const reposResult = await CliTestUtil.runCommand([
				'ls-repos',
				'--workspace-slug',
				workspaceSlug,
			]);

			// Skip if no repositories are available
			if (reposResult.stdout.includes('No repositories found')) {
				return null; // Skip silently for this helper function
			}

			// Extract a repository slug from the output
			const repoMatch = reposResult.stdout.match(
				/\*\*Name\*\*:\s+([^\n]+)/,
			);
			if (!repoMatch || !repoMatch[1]) {
				console.warn(
					'Skipping test: Could not extract repository slug',
				);
				return null;
			}

			return repoMatch[1].trim();
		}

		// Test to fetch a specific repository
		it('should retrieve repository details', async () => {
			if (skipIfNoCredentials()) {
				return;
			}

			// Get valid workspace and repository slugs
			const workspaceSlug = await getWorkspaceSlug();
			if (!workspaceSlug) {
				return; // Skip if no valid workspace found
			}

			const repoSlug = await getRepositorySlug(workspaceSlug);
			if (!repoSlug) {
				return; // Skip if no valid repository found
			}

			// Run the get-repo command
			const result = await CliTestUtil.runCommand([
				'get-repo',
				'--workspace-slug',
				workspaceSlug,
				'--repo-slug',
				repoSlug,
			]);

			// Instead of expecting a success, check if the command ran
			// If access is unavailable, just note it and skip the test validation
			if (result.exitCode !== 0) {
				console.warn(
					'Skipping test validation: Could not retrieve repository details',
				);
				return;
			}

			// Verify the output structure and content
			CliTestUtil.validateOutputContains(result.stdout, [
				`# Repository: ${repoSlug}`,
				'## Basic Information',
				'**Name**',
				'**Full Name**',
				'**UUID**',
				'## Owner',
				'## Links',
			]);

			// Validate Markdown formatting
			CliTestUtil.validateMarkdownOutput(result.stdout);
		}, 30000); // Increased timeout for API calls

		// Test with missing workspace parameter
		it('should use default workspace when workspace is not provided', async () => {
			if (skipIfNoCredentials()) {
				return;
			}

			// Run command without the workspace parameter
			const result = await CliTestUtil.runCommand([
				'get-repo',
				'--repo-slug',
				'some-repo',
			]);

			// Now that workspace is optional, we should get a different error
			// (repository not found), but not a missing parameter error
			expect(result.exitCode).not.toBe(0);

			// Should NOT indicate missing required option for workspace
			expect(result.stderr).not.toContain('workspace-slug');
		}, 15000);

		// Test with missing repository parameter
		it('should fail when repository is not provided', async () => {
			if (skipIfNoCredentials()) {
				return;
			}

			// Get a valid workspace
			const workspaceSlug = await getWorkspaceSlug();
			if (!workspaceSlug) {
				return; // Skip if no valid workspace found
			}

			// Run command without the repository parameter
			const result = await CliTestUtil.runCommand([
				'get-repo',
				'--workspace-slug',
				workspaceSlug,
			]);

			// Should fail with non-zero exit code
			expect(result.exitCode).not.toBe(0);

			// Should indicate missing required option
			expect(result.stderr).toContain('required option');
		}, 15000);

		// Test with invalid repository slug
		it('should handle invalid repository slugs gracefully', async () => {
			if (skipIfNoCredentials()) {
				return;
			}

			// Get a valid workspace
			const workspaceSlug = await getWorkspaceSlug();
			if (!workspaceSlug) {
				return; // Skip if no valid workspace found
			}

			// Use a deliberately invalid repository slug
			const invalidSlug = 'invalid-repository-slug-that-does-not-exist';

			// Run command with invalid repository slug
			const result = await CliTestUtil.runCommand([
				'get-repo',
				'--workspace-slug',
				workspaceSlug,
				'--repo-slug',
				invalidSlug,
			]);

			// Should fail with non-zero exit code
			expect(result.exitCode).not.toBe(0);

			// Should contain error information
			expect(result.stderr).toContain('error');
		}, 30000);

		// Test with invalid workspace slug but valid repository format
		it('should handle invalid workspace slugs gracefully', async () => {
			if (skipIfNoCredentials()) {
				return;
			}

			// Use deliberately invalid workspace and repository slugs
			const invalidWorkspace = 'invalid-workspace-that-does-not-exist';
			const someRepo = 'some-repo';

			// Run command with invalid workspace slug
			const result = await CliTestUtil.runCommand([
				'get-repo',
				'--workspace-slug',
				invalidWorkspace,
				'--repo-slug',
				someRepo,
			]);

			// Should fail with non-zero exit code
			expect(result.exitCode).not.toBe(0);

			// Should contain error information
			expect(result.stderr).toContain('error');
		}, 30000);
	});
});

```

--------------------------------------------------------------------------------
/src/utils/formatter.util.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Standardized formatting utilities for consistent output across all CLI and Tool interfaces.
 * These functions should be used by all formatters to ensure consistent formatting.
 */

import { Logger } from './logger.util.js'; // Ensure logger is imported
import { ResponsePagination } from '../types/common.types.js';

// const formatterLogger = Logger.forContext('utils/formatter.util.ts'); // Define logger instance - Removed as unused

/**
 * Format a date in a standardized way: YYYY-MM-DD HH:MM:SS UTC
 * @param dateString - ISO date string or Date object
 * @returns Formatted date string
 */
export function formatDate(dateString?: string | Date): string {
	if (!dateString) {
		return 'Not available';
	}

	try {
		const date =
			typeof dateString === 'string' ? new Date(dateString) : dateString;

		// Format: YYYY-MM-DD HH:MM:SS UTC
		return date
			.toISOString()
			.replace('T', ' ')
			.replace(/\.\d+Z$/, ' UTC');
	} catch {
		return 'Invalid date';
	}
}

/**
 * Format a URL as a markdown link
 * @param url - URL to format
 * @param title - Link title
 * @returns Formatted markdown link
 */
export function formatUrl(url?: string, title?: string): string {
	if (!url) {
		return 'Not available';
	}

	const linkTitle = title || url;
	return `[${linkTitle}](${url})`;
}

/**
 * Format pagination information in a standardized way for CLI output.
 * Includes separator, item counts, availability message, next page instructions, and timestamp.
 * @param pagination - The ResponsePagination object containing pagination details.
 * @returns Formatted pagination footer string for CLI.
 */
export function formatPagination(pagination: ResponsePagination): string {
	const methodLogger = Logger.forContext(
		'utils/formatter.util.ts',
		'formatPagination',
	);
	const parts: string[] = [formatSeparator()]; // Start with separator

	const { count = 0, hasMore, nextCursor, total, page } = pagination;

	// Showing count and potentially total
	if (total !== undefined && total >= 0) {
		parts.push(`*Showing ${count} of ${total} total items.*`);
	} else if (count >= 0) {
		parts.push(`*Showing ${count} item${count !== 1 ? 's' : ''}.*`);
	}

	// More results availability
	if (hasMore) {
		parts.push('More results are available.');
	}

	// Include the actual cursor value for programmatic use
	if (hasMore && nextCursor) {
		parts.push(`*Next cursor: \`${nextCursor}\`*`);
		// Assuming nextCursor holds the next page number for Bitbucket
		parts.push(`*Use --page ${nextCursor} to view more.*`);
	} else if (hasMore && page !== undefined) {
		// Fallback if nextCursor wasn't parsed but page exists
		const nextPage = page + 1;
		parts.push(`*Next cursor: \`${nextPage}\`*`);
		parts.push(`*Use --page ${nextPage} to view more.*`);
	}

	// Add standard timestamp
	parts.push(`*Information retrieved at: ${formatDate(new Date())}*`);

	const result = parts.join('\n').trim(); // Join with newline
	methodLogger.debug(`Formatted pagination footer: ${result}`);
	return result;
}

/**
 * Format a heading with consistent style
 * @param text - Heading text
 * @param level - Heading level (1-6)
 * @returns Formatted heading
 */
export function formatHeading(text: string, level: number = 1): string {
	const validLevel = Math.min(Math.max(level, 1), 6);
	const prefix = '#'.repeat(validLevel);
	return `${prefix} ${text}`;
}

/**
 * Format a list of key-value pairs as a bullet list
 * @param items - Object with key-value pairs
 * @param keyFormatter - Optional function to format keys
 * @returns Formatted bullet list
 */
export function formatBulletList(
	items: Record<string, unknown>,
	keyFormatter?: (key: string) => string,
): string {
	const lines: string[] = [];

	for (const [key, value] of Object.entries(items)) {
		if (value === undefined || value === null) {
			continue;
		}

		const formattedKey = keyFormatter ? keyFormatter(key) : key;
		const formattedValue = formatValue(value);
		lines.push(`- **${formattedKey}**: ${formattedValue}`);
	}

	return lines.join('\n');
}

/**
 * Format a value based on its type
 * @param value - Value to format
 * @returns Formatted value
 */
function formatValue(value: unknown): string {
	if (value === undefined || value === null) {
		return 'Not available';
	}

	if (value instanceof Date) {
		return formatDate(value);
	}

	// Handle URL objects with url and title properties
	if (typeof value === 'object' && value !== null && 'url' in value) {
		const urlObj = value as { url: string; title?: string };
		if (typeof urlObj.url === 'string') {
			return formatUrl(urlObj.url, urlObj.title);
		}
	}

	if (typeof value === 'string') {
		// Check if it's a URL
		if (value.startsWith('http://') || value.startsWith('https://')) {
			return formatUrl(value);
		}

		// Check if it might be a date
		if (/^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}/.test(value)) {
			return formatDate(value);
		}

		return value;
	}

	if (typeof value === 'boolean') {
		return value ? 'Yes' : 'No';
	}

	return String(value);
}

/**
 * Format a separator line
 * @returns Separator line
 */
export function formatSeparator(): string {
	return '---';
}

/**
 * Format a numbered list of items
 * @param items - Array of items to format
 * @param formatter - Function to format each item
 * @returns Formatted numbered list
 */
export function formatNumberedList<T>(
	items: T[],
	formatter: (item: T, index: number) => string,
): string {
	if (items.length === 0) {
		return 'No items.';
	}

	return items.map((item, index) => formatter(item, index)).join('\n\n');
}

/**
 * Format a raw diff output for display
 *
 * Parses and formats a raw unified diff string into a Markdown
 * formatted display with proper code block syntax highlighting.
 *
 * @param {string} rawDiff - The raw diff content from the API
 * @param {number} maxFiles - Maximum number of files to display in detail (optional, default: 5)
 * @param {number} maxLinesPerFile - Maximum number of lines to display per file (optional, default: 100)
 * @returns {string} Markdown formatted diff content
 */
export function formatDiff(
	rawDiff: string,
	maxFiles: number = 5,
	maxLinesPerFile: number = 100,
): string {
	if (!rawDiff || rawDiff.trim() === '') {
		return '*No changes found in this pull request.*';
	}

	const lines = rawDiff.split('\n');
	const formattedLines: string[] = [];
	let currentFile = '';
	let fileCount = 0;
	let inFile = false;
	let truncated = false;
	let lineCount = 0;

	for (const line of lines) {
		// New file is marked by a line starting with "diff --git"
		if (line.startsWith('diff --git')) {
			if (inFile) {
				// Close previous file code block
				formattedLines.push('```');
				formattedLines.push('');
			}

			// Only process up to maxFiles
			fileCount++;
			if (fileCount > maxFiles) {
				truncated = true;
				break;
			}

			// Extract filename
			const filePath = line.match(/diff --git a\/(.*) b\/(.*)/);
			currentFile = filePath ? filePath[1] : 'unknown file';
			formattedLines.push(`### ${currentFile}`);
			formattedLines.push('');
			formattedLines.push('```diff');
			inFile = true;
			lineCount = 0;
		} else if (inFile) {
			lineCount++;

			// Truncate files that are too long
			if (lineCount > maxLinesPerFile) {
				formattedLines.push(
					'// ... more lines omitted for brevity ...',
				);
				formattedLines.push('```');
				formattedLines.push('');
				inFile = false;
				continue;
			}

			// Format diff lines with appropriate highlighting
			if (line.startsWith('+')) {
				formattedLines.push(line);
			} else if (line.startsWith('-')) {
				formattedLines.push(line);
			} else if (line.startsWith('@@')) {
				// Change section header
				formattedLines.push(line);
			} else {
				// Context line
				formattedLines.push(line);
			}
		}
	}

	// Close the last code block if necessary
	if (inFile) {
		formattedLines.push('```');
	}

	// Add truncation notice if we limited the output
	if (truncated) {
		formattedLines.push('');
		formattedLines.push(
			`*Output truncated. Only showing the first ${maxFiles} files.*`,
		);
	}

	return formattedLines.join('\n');
}

/**
 * Optimizes markdown content to address Bitbucket Cloud's rendering quirks
 *
 * IMPORTANT: This function does NOT convert between formats (unlike Jira's ADF conversion).
 * Bitbucket Cloud API natively accepts and returns markdown format. This function specifically
 * addresses documented rendering issues in Bitbucket's markdown renderer by applying targeted
 * formatting adjustments for better display in the Bitbucket UI.
 *
 * Known Bitbucket rendering issues this function fixes:
 * - List spacing and indentation (prevents items from concatenating on a single line)
 * - Code block formatting (addresses BCLOUD-20503 and similar bugs)
 * - Nested list indentation (ensures proper hierarchy display)
 * - Inline code formatting (adds proper spacing around backticks)
 * - Diff syntax preservation (maintains +/- at line starts)
 * - Excessive line break normalization
 * - Heading spacing consistency
 *
 * Use this function for both:
 * - Content received FROM the Bitbucket API (to properly display in CLI/tools)
 * - Content being sent TO the Bitbucket API (to ensure proper rendering in Bitbucket UI)
 *
 * @param {string} markdown - The original markdown content
 * @returns {string} Optimized markdown with workarounds for Bitbucket rendering issues
 */
export function optimizeBitbucketMarkdown(markdown: string): string {
	const methodLogger = Logger.forContext(
		'utils/formatter.util.ts',
		'optimizeBitbucketMarkdown',
	);

	if (!markdown || markdown.trim() === '') {
		return markdown;
	}

	methodLogger.debug('Optimizing markdown for Bitbucket rendering');

	// First, let's extract code blocks to protect them from other transformations
	const codeBlocks: string[] = [];
	let optimized = markdown.replace(
		/```(\w*)\n([\s\S]*?)```/g,
		(_match, language, code) => {
			// Store the code block and replace with a placeholder
			const placeholder = `__CODE_BLOCK_${codeBlocks.length}__`;
			codeBlocks.push(`\n\n\`\`\`${language}\n${code}\n\`\`\`\n\n`);
			return placeholder;
		},
	);

	// Fix numbered lists with proper spacing
	// Match numbered lists (1. Item) and ensure proper spacing between items
	optimized = optimized.replace(
		/^(\d+\.)\s+(.*?)$/gm,
		(_match, number, content) => {
			// Keep the list item and ensure it ends with double line breaks if it doesn't already
			return `${number} ${content.trim()}\n\n`;
		},
	);

	// Fix bullet lists with proper spacing
	optimized = optimized.replace(
		/^(\s*)[-*]\s+(.*?)$/gm,
		(_match, indent, content) => {
			// Ensure proper indentation and spacing for bullet lists
			return `${indent}- ${content.trim()}\n\n`;
		},
	);

	// Ensure nested lists have proper indentation
	// Matches lines that are part of nested lists and ensures proper indentation
	// REMOVED: This step added excessive leading spaces causing Bitbucket to treat lists as code blocks
	// optimized = optimized.replace(
	// 	/^(\s+)[-*]\s+(.*?)$/gm,
	// 	(_match, indent, content) => {
	// 		// For nested items, ensure proper indentation (4 spaces per level)
	// 		const indentLevel = Math.ceil(indent.length / 2);
	// 		const properIndent = '    '.repeat(indentLevel);
	// 		return `${properIndent}- ${content.trim()}\n\n`;
	// 	},
	// );

	// Fix inline code formatting - ensure it has spaces around it for rendering
	optimized = optimized.replace(/`([^`]+)`/g, (_match, code) => {
		// Ensure inline code is properly formatted with spaces before and after
		// but avoid adding spaces within diff lines (+ or - prefixed)
		const trimmedCode = code.trim();
		const firstChar = trimmedCode.charAt(0);

		// Don't add spaces if it's part of a diff line
		if (firstChar === '+' || firstChar === '-') {
			return `\`${trimmedCode}\``;
		}

		return ` \`${trimmedCode}\` `;
	});

	// Ensure diff lines are properly preserved
	// This helps with preserving + and - prefixes in diff code blocks
	optimized = optimized.replace(
		/^([+-])(.*?)$/gm,
		(_match, prefix, content) => {
			return `${prefix}${content}`;
		},
	);

	// Remove excessive line breaks (more than 2 consecutive)
	optimized = optimized.replace(/\n{3,}/g, '\n\n');

	// Restore code blocks
	codeBlocks.forEach((codeBlock, index) => {
		optimized = optimized.replace(`__CODE_BLOCK_${index}__`, codeBlock);
	});

	// Fix double formatting issues (heading + bold) which Bitbucket renders incorrectly
	// Remove bold formatting from headings as headings are already emphasized
	optimized = optimized.replace(
		/^(#{1,6})\s+\*\*(.*?)\*\*\s*$/gm,
		(_match, hashes, content) => {
			return `\n${hashes} ${content.trim()}\n\n`;
		},
	);

	// Fix bold text within headings (alternative pattern)
	optimized = optimized.replace(
		/^(#{1,6})\s+(.*?)\*\*(.*?)\*\*(.*?)$/gm,
		(_match, hashes, before, boldText, after) => {
			// Combine text without bold formatting since heading already provides emphasis
			const cleanContent = (before + boldText + after).trim();
			return `\n${hashes} ${cleanContent}\n\n`;
		},
	);

	// Ensure headings have proper spacing (for headings without bold issues)
	optimized = optimized.replace(
		/^(#{1,6})\s+(.*?)$/gm,
		(_match, hashes, content) => {
			// Skip if already processed by bold removal above
			if (content.includes('**')) {
				return _match; // Leave as-is, will be handled by bold removal patterns
			}
			return `\n${hashes} ${content.trim()}\n\n`;
		},
	);

	// Ensure the content ends with a single line break
	optimized = optimized.trim() + '\n';

	methodLogger.debug('Markdown optimization complete');
	return optimized;
}

```

--------------------------------------------------------------------------------
/src/controllers/atlassian.pullrequests.formatter.ts:
--------------------------------------------------------------------------------

```typescript
import {
	PullRequest,
	PullRequestsResponse,
	DiffstatResponse,
	PullRequestComment,
} from '../services/vendor.atlassian.pullrequests.types.js';
import {
	formatHeading,
	formatBulletList,
	formatUrl,
	formatSeparator,
	formatNumberedList,
	formatDiff,
	formatDate,
	optimizeBitbucketMarkdown,
} from '../utils/formatter.util.js';

// Define the extended type here as well for clarity
interface PullRequestCommentWithSnippet extends PullRequestComment {
	codeSnippet?: string;
}

/**
 * Format a list of pull requests for display
 * @param pullRequestsData - Raw pull requests data from the API
 * @returns Formatted string with pull requests information in markdown format
 */
export function formatPullRequestsList(
	pullRequestsData: PullRequestsResponse,
): string {
	const pullRequests = pullRequestsData.values || [];

	if (pullRequests.length === 0) {
		return 'No pull requests found matching your criteria.';
	}

	const lines: string[] = [formatHeading('Bitbucket Pull Requests', 1), ''];

	// Format each pull request with its details
	const formattedList = formatNumberedList(pullRequests, (pr, _index) => {
		const itemLines: string[] = [];
		itemLines.push(formatHeading(`#${pr.id}: ${pr.title}`, 2));

		// Prepare the description (truncated if too long)
		let description = 'No description provided';
		if (pr.summary?.raw && pr.summary.raw.trim() !== '') {
			description = pr.summary.raw;
		} else if (
			pr.summary?.markup &&
			pr.summary.markup.trim() !== '' &&
			pr.summary.markup !== 'markdown'
		) {
			description = pr.summary.markup;
		}

		if (description.length > 150) {
			description = description.substring(0, 150) + '...';
		}

		// Basic information
		const properties: Record<string, unknown> = {
			ID: pr.id,
			State: pr.state,
			Author: pr.author?.display_name || pr.author?.nickname || 'Unknown',
			Created: formatDate(pr.created_on),
			Updated: formatDate(pr.updated_on),
			'Source Branch': pr.source?.branch?.name || 'Unknown',
			'Destination Branch': pr.destination?.branch?.name || 'Unknown',
			Description: description,
			URL: pr.links?.html?.href
				? formatUrl(pr.links.html.href, `PR #${pr.id}`)
				: 'N/A',
		};

		// Format as a bullet list
		itemLines.push(formatBulletList(properties, (key) => key));

		return itemLines.join('\n');
	});

	lines.push(formattedList);

	// Add standard footer with timestamp
	lines.push('\n\n' + formatSeparator());
	lines.push(`*Information retrieved at: ${formatDate(new Date())}*`);

	return lines.join('\n');
}

/**
 * Format detailed pull request information for display
 * @param pullRequest - Raw pull request data from the API
 * @param diffstat - Optional diffstat data from the API
 * @param rawDiff - Optional raw diff content from the API
 * @param comments - Optional comments data from the API
 * @returns Formatted string with pull request details in markdown format
 */
export function formatPullRequestDetails(
	pullRequest: PullRequest,
	diffstat?: DiffstatResponse | null,
	rawDiff?: string | null,
	comments?: PullRequestCommentWithSnippet[] | null,
): string {
	const lines: string[] = [
		formatHeading(
			`Pull Request #${pullRequest.id}: ${pullRequest.title}`,
			1,
		),
		'',
		formatHeading('Basic Information', 2),
	];

	// Format basic information as a bullet list
	const basicProperties: Record<string, unknown> = {
		State: pullRequest.state,
		Repository: pullRequest.destination.repository.full_name,
		Source: pullRequest.source.branch.name,
		Destination: pullRequest.destination.branch.name,
		Author: pullRequest.author?.display_name,
		Created: formatDate(pullRequest.created_on),
		Updated: formatDate(pullRequest.updated_on),
		'Comment Count': pullRequest.comment_count ?? 0,
		'Task Count': pullRequest.task_count ?? 0,
	};

	lines.push(formatBulletList(basicProperties, (key) => key));

	// Reviewers
	if (pullRequest.reviewers && pullRequest.reviewers.length > 0) {
		lines.push('');
		lines.push(formatHeading('Reviewers', 2));
		const reviewerLines: string[] = [];
		pullRequest.reviewers.forEach((reviewer) => {
			reviewerLines.push(`- ${reviewer.display_name}`);
		});
		lines.push(reviewerLines.join('\n'));
	}

	// Summary or rendered content for description if available
	if (pullRequest.summary?.raw) {
		lines.push('');
		lines.push(formatHeading('Description', 2));
		// Optimize the markdown content for better rendering
		lines.push(optimizeBitbucketMarkdown(pullRequest.summary.raw));
	} else if (pullRequest.rendered?.description?.raw) {
		lines.push('');
		lines.push(formatHeading('Description', 2));
		// Optimize the markdown content for better rendering
		lines.push(
			optimizeBitbucketMarkdown(pullRequest.rendered.description.raw),
		);
	}

	// File Changes Summary from Diffstat
	if (diffstat && diffstat.values && diffstat.values.length > 0) {
		lines.push('');
		lines.push(formatHeading('File Changes', 2));

		// Calculate summary statistics
		const totalFiles = diffstat.values.length;
		let totalAdditions = 0;
		let totalDeletions = 0;

		diffstat.values.forEach((file) => {
			if (file.lines_added) totalAdditions += file.lines_added;
			if (file.lines_removed) totalDeletions += file.lines_removed;
		});

		// Add summary line
		lines.push(
			`${totalFiles} file${totalFiles !== 1 ? 's' : ''} changed with ${totalAdditions} insertion${totalAdditions !== 1 ? 's' : ''} and ${totalDeletions} deletion${totalDeletions !== 1 ? 's' : ''}`,
		);

		// Add file list (limited to 10 files for brevity)
		const maxFilesToShow = 10;
		if (totalFiles > 0) {
			lines.push('');
			diffstat.values.slice(0, maxFilesToShow).forEach((file) => {
				const changes = [];
				if (file.lines_added) changes.push(`+${file.lines_added}`);
				if (file.lines_removed) changes.push(`-${file.lines_removed}`);
				const changeStr =
					changes.length > 0 ? ` (${changes.join(', ')})` : '';
				lines.push(
					`- \`${file.old?.path || file.new?.path}\`${changeStr}`,
				);
			});

			if (totalFiles > maxFilesToShow) {
				lines.push(
					`- ... and ${totalFiles - maxFilesToShow} more files`,
				);
			}
		}
	}

	// Detailed Diff Content
	if (rawDiff) {
		lines.push('');
		lines.push(formatHeading('Code Changes (Full Diff)', 2));
		lines.push(formatDiff(rawDiff));
	}

	// Comments Section (when included)
	if (comments && comments.length > 0) {
		lines.push('');
		lines.push(formatHeading('Comments', 2));

		// Group comments by parent (to handle threads)
		const topLevelComments: PullRequestCommentWithSnippet[] = [];
		const childComments: {
			[parentId: number]: PullRequestCommentWithSnippet[];
		} = {};

		// First pass: organize comments by parent
		comments.forEach((comment) => {
			if (comment.parent) {
				const parentId = comment.parent.id;
				if (!childComments[parentId]) {
					childComments[parentId] = [];
				}
				childComments[parentId].push(comment);
			} else {
				topLevelComments.push(comment);
			}
		});

		// Format each top-level comment and its replies (limit to 5 comments for conciseness)
		const maxCommentsToShow = 5;
		const commentsToShow = topLevelComments.slice(0, maxCommentsToShow);

		commentsToShow.forEach((comment, index) => {
			formatComment(comment, lines);

			// Add replies if any exist (limit to 3 replies per comment for conciseness)
			const replies = childComments[comment.id] || [];
			if (replies.length > 0) {
				lines.push('');
				lines.push('**Replies:**');

				const maxRepliesToShow = 3;
				const repliesToShow = replies.slice(0, maxRepliesToShow);

				repliesToShow.forEach((reply) => {
					lines.push('');
					lines.push(
						`> **${reply.user.display_name || 'Unknown User'}** (${formatDate(reply.created_on)})`,
					);
					// Optimize the markdown content for replies as well
					const optimizedReplyContent = optimizeBitbucketMarkdown(
						reply.content.raw,
					);
					lines.push(
						`> ${optimizedReplyContent.replace(/\n/g, '\n> ')}`,
					);
				});

				// Show message if more replies were omitted
				if (replies.length > maxRepliesToShow) {
					lines.push('');
					lines.push(
						`> *...and ${replies.length - maxRepliesToShow} more replies*`,
					);
				}
			}

			if (index < commentsToShow.length - 1) {
				lines.push('');
				lines.push(formatSeparator());
			}
		});

		// Show message if more comments were omitted
		if (topLevelComments.length > maxCommentsToShow) {
			lines.push('');
			lines.push(
				`*...and ${topLevelComments.length - maxCommentsToShow} more comments*`,
			);
		}

		// Add link to view all comments if available
		if (pullRequest.links?.comments?.href) {
			lines.push('');
			lines.push(
				`[View all comments in browser](${pullRequest.links.comments.href})`,
			);
		}
	} else if (comments && comments.length === 0) {
		lines.push('');
		lines.push(formatHeading('Comments', 2));
		lines.push('*No comments found on this pull request.*');
	}

	// Links
	lines.push('');
	lines.push(formatHeading('Links', 2));

	const links: string[] = [];

	if (pullRequest.links.html?.href) {
		links.push(
			`- ${formatUrl(pullRequest.links.html.href, 'View in Browser')}`,
		);
	}
	if (pullRequest.links.commits?.href) {
		links.push(`- ${formatUrl(pullRequest.links.commits.href, 'Commits')}`);
	}
	if (pullRequest.links.comments?.href) {
		links.push(
			`- ${formatUrl(pullRequest.links.comments.href, 'Comments')}`,
		);
	}
	if (pullRequest.links.diff?.href) {
		links.push(`- ${formatUrl(pullRequest.links.diff.href, 'Diff')}`);
	}

	lines.push(links.join('\n'));

	// Add standard footer with timestamp
	lines.push('\n\n' + formatSeparator());
	lines.push(`*Information retrieved at: ${formatDate(new Date())}*`);

	return lines.join('\n');
}

/**
 * Format pull request comments for display, including code snippets for inline comments.
 * @param comments - Array of comment objects, potentially enhanced with code snippets.
 * @param prId - The ID of the pull request to include in the title.
 * @returns Formatted string with pull request comments in markdown format.
 */
export function formatPullRequestComments(
	comments: PullRequestCommentWithSnippet[], // Accept the array of enhanced comments directly
	prId: string,
): string {
	const lines: string[] = [];

	lines.push(formatHeading(`Comments on Pull Request #${prId}`, 1));
	lines.push('');

	if (!comments || comments.length === 0) {
		lines.push('*No comments found on this pull request.*');
		lines.push('\n\n' + formatSeparator());
		lines.push(`*Information retrieved at: ${formatDate(new Date())}*`);
		return lines.join('\n');
	}

	// Group comments by parent (to handle threads)
	const topLevelComments: PullRequestCommentWithSnippet[] = [];
	const childComments: {
		[parentId: number]: PullRequestCommentWithSnippet[]; // Use enhanced type here too
	} = {};

	// First pass: organize comments by parent
	comments.forEach((comment) => {
		if (comment.parent) {
			const parentId = comment.parent.id;
			if (!childComments[parentId]) {
				childComments[parentId] = [];
			}
			childComments[parentId].push(comment);
		} else {
			topLevelComments.push(comment);
		}
	});

	// Format each top-level comment and its replies
	topLevelComments.forEach((comment, index) => {
		formatComment(comment, lines); // Pass the enhanced comment object

		// Add replies if any exist
		const replies = childComments[comment.id] || [];
		if (replies.length > 0) {
			lines.push('');
			lines.push('**Replies:**');

			replies.forEach((reply) => {
				lines.push('');
				lines.push(
					`> **${reply.user.display_name || 'Unknown User'}** (${formatDate(reply.created_on)})`,
				);
				// Optimize the markdown content for replies as well
				const optimizedReplyContent = optimizeBitbucketMarkdown(
					reply.content.raw,
				);
				lines.push(`> ${optimizedReplyContent.replace(/\n/g, '\n> ')}`);
			});
		}

		if (index < topLevelComments.length - 1) {
			lines.push('');
			lines.push(formatSeparator());
		}
	});

	lines.push('\n\n' + formatSeparator());
	lines.push(`*Information retrieved at: ${formatDate(new Date())}*`);

	return lines.join('\n');
}

/**
 * Helper function to format a single comment, including code snippet if available.
 * @param comment - The comment object (potentially with codeSnippet).
 * @param lines - Array of string lines to append to.
 */
function formatComment(
	comment: PullRequestCommentWithSnippet, // Use the enhanced type
	lines: string[],
): void {
	const author = comment.user.display_name || 'Unknown User';
	const headerText = comment.deleted
		? `[DELETED] Comment by ${author}`
		: `Comment by ${author}`;

	lines.push(formatHeading(headerText, 3));
	lines.push(`*Posted on ${formatDate(comment.created_on)}*`);

	if (comment.updated_on && comment.updated_on !== comment.created_on) {
		lines.push(`*Updated on ${formatDate(comment.updated_on)}*`);
	}

	// If it's an inline comment, show file, line info, and snippet
	if (comment.inline) {
		const fileInfo = `File: \`${comment.inline.path}\``;
		let lineInfo = '';

		if (
			comment.inline.from !== undefined &&
			comment.inline.to !== undefined
		) {
			lineInfo = `(changed line ${comment.inline.from} -> ${comment.inline.to})`; // Slightly clearer wording
		} else if (comment.inline.to !== undefined) {
			lineInfo = `(line ${comment.inline.to})`;
		}

		lines.push(`**Inline Comment: ${fileInfo}** ${lineInfo}`);

		// Add the code snippet if it exists
		if (comment.codeSnippet) {
			lines.push('');
			lines.push('```diff'); // Use diff language for syntax highlighting
			lines.push(comment.codeSnippet);
			lines.push('```');
		} else if (comment.links?.code?.href) {
			// Fallback link if snippet fetch failed or wasn't applicable
			lines.push(
				`[View code context in browser](${comment.links.code.href})`,
			);
		}
	}

	lines.push('');
	// Show specific message for deleted comments, otherwise show optimized raw content
	lines.push(
		comment.deleted
			? '*This comment has been deleted.*'
			: optimizeBitbucketMarkdown(comment.content.raw) ||
					'*No content provided.*',
	);

	// Add link to view the comment itself in browser if available
	if (comment.links?.html?.href) {
		lines.push('');
		lines.push(
			`[View full comment thread in browser](${comment.links.html.href})`,
		); // Clarify link purpose
	}
}

```

--------------------------------------------------------------------------------
/src/cli/atlassian.repositories.cli.ts:
--------------------------------------------------------------------------------

```typescript
import { Command } from 'commander';
import { Logger } from '../utils/logger.util.js';
import { handleCliError } from '../utils/error.util.js';
// Import directly from specialized controllers
import { handleRepositoriesList } from '../controllers/atlassian.repositories.list.controller.js';
import { handleRepositoryDetails } from '../controllers/atlassian.repositories.details.controller.js';
import { handleCommitHistory } from '../controllers/atlassian.repositories.commit.controller.js';
import {
	handleCreateBranch,
	handleListBranches,
} from '../controllers/atlassian.repositories.branch.controller.js';
import {
	handleCloneRepository,
	handleGetFileContent,
} from '../controllers/atlassian.repositories.content.controller.js';

/**
 * CLI module for managing Bitbucket repositories.
 * Provides commands for listing repositories and retrieving repository details.
 * All commands require valid Atlassian credentials.
 */

// Create a contextualized logger for this file
const cliLogger = Logger.forContext('cli/atlassian.repositories.cli.ts');

// Log CLI initialization
cliLogger.debug('Bitbucket repositories CLI module initialized');

/**
 * Register Bitbucket repositories CLI commands with the Commander program
 *
 * @param program - The Commander program instance to register commands with
 * @throws Error if command registration fails
 */
function register(program: Command): void {
	const methodLogger = Logger.forContext(
		'cli/atlassian.repositories.cli.ts',
		'register',
	);
	methodLogger.debug('Registering Bitbucket Repositories CLI commands...');

	registerListRepositoriesCommand(program);
	registerGetRepositoryCommand(program);
	registerGetCommitHistoryCommand(program);
	registerAddBranchCommand(program);
	registerCloneRepositoryCommand(program);
	registerGetFileCommand(program);
	registerListBranchesCommand(program);

	methodLogger.debug('CLI commands registered successfully');
}

/**
 * Register the command for listing Bitbucket repositories in a workspace
 *
 * @param program - The Commander program instance
 */
function registerListRepositoriesCommand(program: Command): void {
	program
		.command('ls-repos')
		.description(
			'List repositories in a Bitbucket workspace, with filtering and pagination.',
		)
		.option(
			'-w, --workspace-slug <slug>',
			'Workspace slug containing the repositories. If not provided, uses your default workspace (configured via BITBUCKET_DEFAULT_WORKSPACE or first workspace in your account). Example: "myteam"',
		)
		.option(
			'-q, --query <string>',
			'Filter repositories by this query string. Searches repository name and description.',
		)
		.option(
			'-p, --project-key <key>',
			'Filter repositories belonging to the specified project key. Example: "PROJ"',
		)
		.option(
			'-r, --role <string>',
			'Filter repositories where the authenticated user has the specified role or higher. Valid roles: `owner`, `admin`, `contributor`, `member`. Note: `member` typically includes all accessible repositories.',
		)
		.option(
			'-s, --sort <string>',
			'Sort repositories by this field. Examples: "name", "-updated_on" (default), "size".',
		)
		.option(
			'-l, --limit <number>',
			'Maximum number of items to return (1-100). Defaults to 25 if omitted.',
		)
		.option(
			'-c, --cursor <string>',
			'Pagination cursor for retrieving the next set of results.',
		)
		.action(async (options) => {
			const actionLogger = cliLogger.forMethod('ls-repos');
			try {
				actionLogger.debug('CLI ls-repos called', options);

				// Map CLI options to controller options - keep only type conversions
				const controllerOptions = {
					workspaceSlug: options.workspaceSlug,
					query: options.query,
					projectKey: options.projectKey,
					role: options.role,
					sort: options.sort,
					limit: options.limit
						? parseInt(options.limit, 10)
						: undefined,
					cursor: options.cursor,
				};

				// Call controller directly
				const result = await handleRepositoriesList(controllerOptions);

				// Output result content
				console.log(result.content);
			} catch (error) {
				handleCliError(error);
			}
		});
}

/**
 * Register the command for retrieving a specific Bitbucket repository
 * @param program - The Commander program instance
 */
function registerGetRepositoryCommand(program: Command): void {
	program
		.command('get-repo')
		.description(
			'Get detailed information about a specific Bitbucket repository.',
		)
		.option(
			'-w, --workspace-slug <slug>',
			'Workspace slug containing the repository. If not provided, uses your default workspace (either configured via BITBUCKET_DEFAULT_WORKSPACE or first workspace in your account). Example: "myteam"',
		)
		.requiredOption(
			'-r, --repo-slug <slug>',
			'Repository slug to retrieve. Must be a valid repository in the workspace. Example: "project-api"',
		)
		.action(async (options) => {
			const actionLogger = Logger.forContext(
				'cli/atlassian.repositories.cli.ts',
				'get-repo',
			);
			try {
				actionLogger.debug(
					`Fetching repository: ${options.workspaceSlug}/${options.repoSlug}`,
				);

				const result = await handleRepositoryDetails({
					workspaceSlug: options.workspaceSlug,
					repoSlug: options.repoSlug,
				});

				console.log(result.content);
			} catch (error) {
				actionLogger.error('Operation failed:', error);
				handleCliError(error);
			}
		});
}

/**
 * Register the command for retrieving commit history from a repository
 * @param program - The Commander program instance
 */
function registerGetCommitHistoryCommand(program: Command): void {
	program
		.command('get-commit-history')
		.description('Get commit history for a Bitbucket repository.')
		.option(
			'-w, --workspace-slug <slug>',
			'Workspace slug containing the repository. If not provided, uses your default workspace. Example: "myteam"',
		)
		.requiredOption(
			'-r, --repo-slug <slug>',
			'Repository slug to get commit history from. Example: "project-api"',
		)
		.option(
			'-v, --revision <branch-or-tag>',
			'Filter commits by a specific branch, tag, or commit hash.',
		)
		.option(
			'--path <file-path>',
			'Filter commits to those that affect this specific file path.',
		)
		.option(
			'-l, --limit <number>',
			'Maximum number of commits to return (1-100). Defaults to 25 if omitted.',
		)
		.option(
			'-c, --cursor <string>',
			'Pagination cursor for retrieving the next set of results.',
		)
		.action(async (options) => {
			const actionLogger = Logger.forContext(
				'cli/atlassian.repositories.cli.ts',
				'get-commit-history',
			);
			try {
				actionLogger.debug('Processing command options:', options);

				// Map CLI options to controller params - keep only type conversions
				const requestOptions = {
					workspaceSlug: options.workspaceSlug,
					repoSlug: options.repoSlug,
					revision: options.revision,
					path: options.path,
					limit: options.limit
						? parseInt(options.limit, 10)
						: undefined,
					cursor: options.cursor,
				};

				actionLogger.debug(
					'Fetching commit history with options:',
					requestOptions,
				);
				const result = await handleCommitHistory(requestOptions);
				actionLogger.debug('Successfully retrieved commit history');

				console.log(result.content);
			} catch (error) {
				actionLogger.error('Operation failed:', error);
				handleCliError(error);
			}
		});
}

/**
 * Register the command for adding a branch to a repository
 * @param program - The Commander program instance
 */
function registerAddBranchCommand(program: Command): void {
	program
		.command('add-branch')
		.description('Add a new branch in a Bitbucket repository.')
		.requiredOption(
			'-w, --workspace-slug <slug>',
			'Workspace slug containing the repository.',
		)
		.requiredOption(
			'-r, --repo-slug <slug>',
			'Repository slug where the branch will be created.',
		)
		.requiredOption(
			'-n, --new-branch-name <n>',
			'The name for the new branch.',
		)
		.requiredOption(
			'-s, --source-branch-or-commit <target>',
			'The name of the existing branch or a full commit hash to branch from.',
		)
		.action(async (options) => {
			const actionLogger = Logger.forContext(
				'cli/atlassian.repositories.cli.ts',
				'add-branch',
			);
			try {
				actionLogger.debug('Processing command options:', options);

				// Map CLI options to controller params
				const requestOptions = {
					workspaceSlug: options.workspaceSlug,
					repoSlug: options.repoSlug,
					newBranchName: options.newBranchName,
					sourceBranchOrCommit: options.sourceBranchOrCommit,
				};

				actionLogger.debug(
					'Creating branch with options:',
					requestOptions,
				);
				const result = await handleCreateBranch(requestOptions);
				actionLogger.debug('Successfully created branch');

				console.log(result.content);
			} catch (error) {
				actionLogger.error('Operation failed:', error);
				handleCliError(error);
			}
		});
}

/**
 * Register the command for cloning a Bitbucket repository.
 *
 * @param program - The Commander program instance
 */
function registerCloneRepositoryCommand(program: Command): void {
	program
		.command('clone')
		.description(
			'Clone a Bitbucket repository to your local filesystem using SSH (preferred) or HTTPS. ' +
				'The repository will be cloned into a subdirectory at targetPath/repoSlug. ' +
				'Requires Bitbucket credentials and proper SSH key setup for optimal usage.',
		)
		.requiredOption(
			'-w, --workspace-slug <slug>',
			'Workspace slug containing the repository. Example: "myteam"',
		)
		.requiredOption(
			'-r, --repo-slug <slug>',
			'Repository slug to clone. Example: "project-api"',
		)
		.requiredOption(
			'-t, --target-path <path>',
			'Directory path where the repository will be cloned. Absolute paths are strongly recommended. Example: "/home/user/projects"',
		)
		.action(async (options) => {
			const actionLogger = Logger.forContext(
				'cli/atlassian.repositories.cli.ts',
				'clone',
			);
			try {
				actionLogger.debug(
					'Processing clone command options:',
					options,
				);

				// Map CLI options to controller params (already correct case)
				const controllerOptions = {
					workspaceSlug: options.workspaceSlug,
					repoSlug: options.repoSlug,
					targetPath: options.targetPath,
				};

				actionLogger.debug(
					'Initiating repository clone with options:',
					controllerOptions,
				);
				const result = await handleCloneRepository(controllerOptions);
				actionLogger.info('Clone operation initiated successfully.');

				console.log(result.content);
			} catch (error) {
				actionLogger.error('Clone operation failed:', error);
				handleCliError(error);
			}
		});
}

/**
 * Register the command for getting a file from a Bitbucket repository
 *
 * @param program - The Commander program instance
 */
function registerGetFileCommand(program: Command): void {
	program
		.command('get-file')
		.description('Get the content of a file from a Bitbucket repository.')
		.requiredOption(
			'-w, --workspace-slug <slug>',
			'Workspace slug containing the repository. Must be a valid workspace slug from your Bitbucket account. Example: "myteam"',
		)
		.requiredOption(
			'-r, --repo-slug <slug>',
			'Repository slug to get the file from. Must be a valid repository slug in the specified workspace. Example: "project-api"',
		)
		.requiredOption(
			'-f, --file-path <path>',
			'Path to the file in the repository. Example: "README.md" or "src/main.js"',
		)
		.option(
			'-v, --revision <branch-tag-or-commit>',
			'Branch name, tag, or commit hash to retrieve the file from. If omitted, the default branch is used.',
		)
		.action(async (options) => {
			const actionLogger = Logger.forContext(
				'cli/atlassian.repositories.cli.ts',
				'get-file',
			);
			try {
				actionLogger.debug(
					`Fetching file: ${options.workspaceSlug}/${options.repoSlug}/${options.filePath}`,
					options.revision ? { revision: options.revision } : {},
				);

				const result = await handleGetFileContent({
					workspaceSlug: options.workspaceSlug,
					repoSlug: options.repoSlug,
					path: options.filePath,
					ref: options.revision,
				});

				console.log(result.content);
			} catch (error) {
				actionLogger.error('Operation failed:', error);
				handleCliError(error);
			}
		});
}

/**
 * Register the command for listing branches in a repository
 * @param program - The Commander program instance
 */
function registerListBranchesCommand(program: Command): void {
	program
		.command('list-branches')
		.description('List branches in a Bitbucket repository.')
		.option(
			'-w, --workspace-slug <slug>',
			'Workspace slug containing the repository. If not provided, uses your default workspace. Example: "myteam"',
		)
		.requiredOption(
			'-r, --repo-slug <slug>',
			'Repository slug to list branches from. Example: "project-api"',
		)
		.option(
			'-q, --query <string>',
			'Filter branches by name or other properties (text search).',
		)
		.option(
			'-s, --sort <string>',
			'Sort branches by this field. Examples: "name" (default), "-name", "target.date".',
		)
		.option(
			'-l, --limit <number>',
			'Maximum number of branches to return (1-100). Defaults to 25 if omitted.',
		)
		.option(
			'-c, --cursor <string>',
			'Pagination cursor for retrieving the next set of results.',
		)
		.action(async (options) => {
			const actionLogger = Logger.forContext(
				'cli/atlassian.repositories.cli.ts',
				'list-branches',
			);
			try {
				actionLogger.debug('Processing command options:', options);

				// Map CLI options to controller params - keep only type conversions
				const params = {
					workspaceSlug: options.workspaceSlug,
					repoSlug: options.repoSlug,
					query: options.query,
					sort: options.sort,
					limit: options.limit
						? parseInt(options.limit, 10)
						: undefined,
					cursor: options.cursor,
				};

				actionLogger.debug(
					'Fetching branches with parameters:',
					params,
				);
				const result = await handleListBranches(params);
				actionLogger.debug('Successfully retrieved branches');

				console.log(result.content);
			} catch (error) {
				actionLogger.error('Operation failed:', error);
				handleCliError(error);
			}
		});
}

export default { register };

```

--------------------------------------------------------------------------------
/src/controllers/atlassian.repositories.controller.test.ts:
--------------------------------------------------------------------------------

```typescript
import { handleRepositoriesList } from './atlassian.repositories.list.controller.js';
import { handleRepositoryDetails } from './atlassian.repositories.details.controller.js';
import { getAtlassianCredentials } from '../utils/transport.util.js';
import { config } from '../utils/config.util.js';
import { McpError } from '../utils/error.util.js';
import atlassianWorkspacesController from './atlassian.workspaces.controller.js';

describe('Atlassian Repositories Controller', () => {
	// Load configuration and check for credentials before all tests
	beforeAll(() => {
		config.load(); // Ensure config is loaded
		const credentials = getAtlassianCredentials();
		if (!credentials) {
			console.warn(
				'Skipping Atlassian Repositories Controller tests: No credentials available',
			);
		}
	});

	// Helper function to skip tests when credentials are missing
	const skipIfNoCredentials = () => !getAtlassianCredentials();

	describe('list', () => {
		// Helper to get a valid workspace slug for testing
		async function getFirstWorkspaceSlugForController(): Promise<
			string | null
		> {
			if (skipIfNoCredentials()) return null;

			try {
				const listResult = await atlassianWorkspacesController.list({
					limit: 1,
				});

				if (listResult.content === 'No Bitbucket workspaces found.')
					return null;

				// Extract slug from Markdown content
				const slugMatch = listResult.content.match(
					/\*\*Slug\*\*:\s+([^\s\n]+)/,
				);
				return slugMatch ? slugMatch[1] : null;
			} catch (error) {
				console.warn(
					"Could not fetch workspace list for controller 'list' test setup:",
					error,
				);
				return null;
			}
		}

		it('should return a formatted list of repositories in Markdown', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlugForController();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			const result = await handleRepositoriesList({
				workspaceSlug,
			});

			// Verify the response structure
			expect(result).toHaveProperty('content');
			expect(typeof result.content).toBe('string');

			// Basic Markdown content checks
			if (result.content !== 'No repositories found in this workspace.') {
				expect(result.content).toMatch(/^# Bitbucket Repositories/m);
				expect(result.content).toContain('**Name**');
				expect(result.content).toContain('**Full Name**');
				expect(result.content).toContain('**Updated**');
			}

			// Check for pagination information in the content string
			expect(result.content).toMatch(
				/---[\s\S]*\*Showing \d+ (of \d+ total items|\S+ items?)[\s\S]*\*/,
			);
		}, 30000);

		it('should handle pagination options (limit/cursor)', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlugForController();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			// Fetch first page with limit 1
			const result1 = await handleRepositoriesList({
				workspaceSlug,
				limit: 1,
			});

			// Extract pagination info from content instead of accessing pagination object
			const countMatch = result1.content.match(
				/\*Showing (\d+) items?\.\*/,
			);
			const count = countMatch ? parseInt(countMatch[1], 10) : 0;
			expect(count).toBeLessThanOrEqual(1);

			// Extract cursor from content
			const cursorMatch = result1.content.match(
				/\*Next cursor: `([^`]+)`\*/,
			);
			const nextCursor = cursorMatch ? cursorMatch[1] : null;

			// Check if pagination indicates more results
			const hasMoreResults = result1.content.includes(
				'More results are available.',
			);

			// If there's a next page, fetch it
			if (hasMoreResults && nextCursor) {
				const result2 = await handleRepositoriesList({
					workspaceSlug,
					limit: 1,
					cursor: nextCursor,
				});
				expect(result2.content).toMatch(
					/---[\s\S]*\*Showing \d+ (of \d+ total items|\S+ items?)[\s\S]*\*/,
				);

				// Ensure content is different (or handle case where only 1 repo exists)
				if (
					result1.content !==
						'No repositories found in this workspace.' &&
					result2.content !==
						'No repositories found in this workspace.' &&
					count > 0 &&
					count > 0
				) {
					// Only compare if we actually have multiple repositories
					expect(result1.content).not.toEqual(result2.content);
				}
			} else {
				console.warn(
					'Skipping cursor part of pagination test: Only one page of repositories found.',
				);
			}
		}, 30000);

		it('should handle filtering options (query)', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlugForController();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			// First get all repositories to find a valid query term
			const allResult = await handleRepositoriesList({
				workspaceSlug,
			});

			if (
				allResult.content === 'No repositories found in this workspace.'
			) {
				console.warn('Skipping filtering test: No repositories found.');
				return;
			}

			// Extract a repository name from the first result to use as a query
			const repoNameMatch = allResult.content.match(
				/\*\*Name\*\*:\s+([^\n]+)/,
			);
			if (!repoNameMatch || !repoNameMatch[1]) {
				console.warn(
					'Skipping filtering test: Could not extract repository name.',
				);
				return;
			}

			// Use part of the repo name as a query term
			const queryTerm = repoNameMatch[1].trim().split(' ')[0];

			// Query with the extracted term
			const filteredResult = await handleRepositoriesList({
				workspaceSlug,
				query: queryTerm,
			});

			// The result should be a valid response
			expect(filteredResult).toHaveProperty('content');
			expect(typeof filteredResult.content).toBe('string');

			// We can't guarantee matches (query might not match anything), but response should be valid
			if (
				filteredResult.content !==
				'No repositories found in this workspace.'
			) {
				expect(filteredResult.content).toMatch(
					/^# Bitbucket Repositories/m,
				);
			}
		}, 30000);

		it('should handle sorting options', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlugForController();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			// Request with explicit sort by name
			const sortedResult = await handleRepositoriesList({
				workspaceSlug,
				sort: 'name',
			});

			// The result should be a valid response
			expect(sortedResult).toHaveProperty('content');
			expect(typeof sortedResult.content).toBe('string');

			// We can't verify the exact sort order in the Markdown output easily,
			// but we can verify the response is valid
			if (
				sortedResult.content !==
				'No repositories found in this workspace.'
			) {
				expect(sortedResult.content).toMatch(
					/^# Bitbucket Repositories/m,
				);
			}
		}, 30000);

		it('should handle role filtering if supported', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlugForController();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			// Try filtering by role
			try {
				const filteredResult = await handleRepositoriesList({
					workspaceSlug,
					role: 'owner', // Most likely role to have some results
				});

				// The result should be a valid response
				expect(filteredResult).toHaveProperty('content');
				expect(typeof filteredResult.content).toBe('string');

				// We can't guarantee matches, but response should be valid
				if (
					filteredResult.content !==
					'No repositories found in this workspace.'
				) {
					expect(filteredResult.content).toMatch(
						/^# Bitbucket Repositories/m,
					);
				}
			} catch (error) {
				// If role filtering isn't supported, log and continue
				console.warn(
					'Role filtering test encountered an error:',
					error,
				);
			}
		}, 30000);

		it('should handle empty result scenario', async () => {
			if (skipIfNoCredentials()) return;

			const workspaceSlug = await getFirstWorkspaceSlugForController();
			if (!workspaceSlug) {
				console.warn('Skipping test: No workspace slug found.');
				return;
			}

			// Use an extremely unlikely query to get empty results
			const noMatchQuery = 'thisstringwillnotmatchanyrepository12345xyz';

			const emptyResult = await handleRepositoriesList({
				workspaceSlug,
				query: noMatchQuery,
			});

			// Should return a specific "no results" message
			expect(emptyResult.content).toContain(
				'No repositories found matching your criteria.',
			);
		}, 30000);

		it('should throw an McpError for an invalid workspace slug', async () => {
			if (skipIfNoCredentials()) return;

			const invalidWorkspaceSlug =
				'this-workspace-definitely-does-not-exist-12345';

			// Expect the controller call to reject with an McpError
			await expect(
				handleRepositoriesList({
					workspaceSlug: invalidWorkspaceSlug,
				}),
			).rejects.toThrow(McpError);

			// Check the status code via the error handler's behavior
			try {
				await handleRepositoriesList({
					workspaceSlug: invalidWorkspaceSlug,
				});
			} catch (e) {
				expect(e).toBeInstanceOf(McpError);
				expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
				expect((e as McpError).message).toContain('not found');
			}
		}, 30000);
	});

	describe('get', () => {
		// Helper to get valid repo identifiers for testing
		async function getRepositoryIdentifier(): Promise<{
			workspaceSlug: string;
			repoSlug: string;
		} | null> {
			if (skipIfNoCredentials()) return null;

			try {
				const listWorkspacesResult =
					await atlassianWorkspacesController.list({
						limit: 1,
					});

				if (
					listWorkspacesResult.content ===
					'No Bitbucket workspaces found.'
				) {
					return null;
				}

				// Extract workspace slug
				const workspaceMatch = listWorkspacesResult.content.match(
					/\*\*Slug\*\*:\s+([^\s\n]+)/,
				);
				const workspaceSlug = workspaceMatch ? workspaceMatch[1] : null;

				if (!workspaceSlug) return null;

				// Get a repository from this workspace
				const listReposResult = await handleRepositoriesList({
					workspaceSlug,
					limit: 1,
				});

				if (
					listReposResult.content ===
					'No repositories found in this workspace.'
				) {
					return null;
				}

				// Extract repo slug - this may need adjustment based on actual Markdown format
				const repoSlugMatch = listReposResult.content.match(
					/\*\*Slug\*\*:\s+([^\s\n]+)/,
				);
				const repoSlug = repoSlugMatch ? repoSlugMatch[1] : null;

				if (!repoSlug) return null;

				return { workspaceSlug, repoSlug };
			} catch (error) {
				console.warn(
					'Could not fetch repository identifier for test:',
					error,
				);
				return null;
			}
		}

		it('should return formatted repository details in Markdown', async () => {
			if (skipIfNoCredentials()) return;

			const repoIdentifier = await getRepositoryIdentifier();
			if (!repoIdentifier) {
				console.warn('Skipping test: No repository identifier found.');
				return;
			}

			const result = await handleRepositoryDetails(repoIdentifier);

			// Verify the response structure
			expect(result).toHaveProperty('content');
			expect(typeof result.content).toBe('string');

			// Basic Markdown content checks
			expect(result.content).toMatch(/^# Repository:/m);
			expect(result.content).toContain('## Basic Information');
			expect(result.content).toContain('## Links');

			// Should contain the recent pull requests section (even if there are no PRs,
			// the section heading should be present, and there might be a "no pull requests found" message)
			expect(result.content).toContain('## Recent Pull Requests');

			// The URL to view all PRs should be present
			expect(result.content).toContain(
				'View all pull requests in Bitbucket',
			);
		}, 30000);

		it('should throw an McpError for a non-existent repository slug', async () => {
			if (skipIfNoCredentials()) return;

			// First get a valid workspace slug
			const listWorkspacesResult =
				await atlassianWorkspacesController.list({
					limit: 1,
				});

			if (
				listWorkspacesResult.content ===
				'No Bitbucket workspaces found.'
			) {
				console.warn('Skipping test: No workspaces available.');
				return;
			}

			// Extract workspace slug
			const workspaceMatch = listWorkspacesResult.content.match(
				/\*\*Slug\*\*:\s+([^\s\n]+)/,
			);
			const workspaceSlug = workspaceMatch ? workspaceMatch[1] : null;

			if (!workspaceSlug) {
				console.warn(
					'Skipping test: Could not extract workspace slug.',
				);
				return;
			}

			const invalidRepoSlug = 'this-repo-definitely-does-not-exist-12345';

			// Expect the controller call to reject with an McpError
			await expect(
				handleRepositoryDetails({
					workspaceSlug,
					repoSlug: invalidRepoSlug,
				}),
			).rejects.toThrow(McpError);

			// Check the status code via the error handler's behavior
			try {
				await handleRepositoryDetails({
					workspaceSlug,
					repoSlug: invalidRepoSlug,
				});
			} catch (e) {
				expect(e).toBeInstanceOf(McpError);
				expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
				expect((e as McpError).message).toContain('not found');
			}
		}, 30000);

		it('should throw an McpError for a non-existent workspace slug', async () => {
			if (skipIfNoCredentials()) return;

			const invalidWorkspaceSlug =
				'this-workspace-definitely-does-not-exist-12345';
			const someRepoSlug = 'some-repo';

			// Expect the controller call to reject with an McpError
			await expect(
				handleRepositoryDetails({
					workspaceSlug: invalidWorkspaceSlug,
					repoSlug: someRepoSlug,
				}),
			).rejects.toThrow(McpError);

			// Check the status code via the error handler's behavior
			try {
				await handleRepositoryDetails({
					workspaceSlug: invalidWorkspaceSlug,
					repoSlug: someRepoSlug,
				});
			} catch (e) {
				expect(e).toBeInstanceOf(McpError);
				expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
				expect((e as McpError).message).toContain('not found');
			}
		}, 30000);
	});
});

```

--------------------------------------------------------------------------------
/src/services/vendor.atlassian.pullrequests.test.ts:
--------------------------------------------------------------------------------

```typescript
import atlassianPullRequestsService from './vendor.atlassian.pullrequests.service.js';
import atlassianWorkspacesService from './vendor.atlassian.workspaces.service.js';
import atlassianRepositoriesService from './vendor.atlassian.repositories.service.js';
import { getAtlassianCredentials } from '../utils/transport.util.js';
import { config } from '../utils/config.util.js';

describe('Vendor Atlassian Pull Requests Service', () => {
	// Variables to store valid test data
	let validWorkspace: string | null = null;
	let validRepo: string | null = null;
	let validPrId: string | null = null;

	// Load configuration and skip all tests if Atlassian credentials are not available
	beforeAll(async () => {
		// Load configuration from all sources
		config.load();

		const credentials = getAtlassianCredentials();
		if (!credentials) {
			console.warn(
				'Skipping Atlassian Pull Requests tests: No credentials available',
			);
			return;
		}

		// Try to find a valid workspace, repository, and PR for tests
		try {
			// Get available workspaces
			const workspaces = await atlassianWorkspacesService.list();
			if (workspaces.values.length > 0) {
				validWorkspace = workspaces.values[0].workspace.slug;

				// Find repositories in this workspace
				const repositories = await atlassianRepositoriesService.list({
					workspace: validWorkspace,
				});

				if (repositories && repositories.values.length > 0) {
					validRepo = repositories.values[0].name.toLowerCase();

					// Try to find a PR in this repository
					try {
						const pullRequests =
							await atlassianPullRequestsService.list({
								workspace: validWorkspace,
								repo_slug: validRepo,
								pagelen: 1,
							});

						if (pullRequests.values.length > 0) {
							validPrId = String(pullRequests.values[0].id);
							console.log(
								`Found valid PR for testing: ${validWorkspace}/${validRepo}/${validPrId}`,
							);
						}
					} catch (error) {
						console.warn('Could not find a valid PR for testing');
					}
				}
			}
		} catch (error) {
			console.warn('Error setting up test data:', error);
		}
	}, 30000); // <--- Increased timeout for beforeAll hook

	describe('listPullRequests', () => {
		it('should return a list of pull requests', async () => {
			// Check if credentials are available
			const credentials = getAtlassianCredentials();
			if (!credentials) {
				return; // Skip this test if no credentials
			}

			// First get available workspaces
			const workspaces = await atlassianWorkspacesService.list();

			// Skip if no workspaces are available
			if (workspaces.values.length === 0) {
				console.warn('Skipping test: No workspaces available');
				return;
			}

			// Get the first workspace
			const workspace = workspaces.values[0].workspace.slug;
			console.log(`Using workspace: ${workspace}`);

			// Find repositories in this workspace
			let repositories;
			try {
				repositories = await atlassianRepositoriesService.list({
					workspace,
				});
			} catch (error) {
				console.warn(
					`Error fetching repositories for workspace ${workspace}: ${error instanceof Error ? error.message : String(error)}`,
				);
				return; // Skip this test if repositories can't be fetched
			}

			// Skip if no repositories are available
			if (!repositories || repositories.values.length === 0) {
				console.warn(
					`Skipping test: No repositories found in workspace ${workspace}`,
				);
				return;
			}

			// Get the first repository
			const repo_slug = repositories.values[0].name.toLowerCase();
			console.log(`Using repository: ${workspace}/${repo_slug}`);

			try {
				// Call the function with the real API
				const result = await atlassianPullRequestsService.list({
					workspace,
					repo_slug,
				});

				// Verify the response structure
				expect(result).toHaveProperty('values');
				expect(Array.isArray(result.values)).toBe(true);
				expect(result).toHaveProperty('pagelen');
				expect(result).toHaveProperty('page');
				expect(result).toHaveProperty('size');

				// If pull requests are returned, verify their structure
				if (result.values.length > 0) {
					const pullRequest = result.values[0];
					expect(pullRequest).toHaveProperty('type', 'pullrequest');
					expect(pullRequest).toHaveProperty('id');
					expect(pullRequest).toHaveProperty('title');
					expect(pullRequest).toHaveProperty('state');
					expect(pullRequest).toHaveProperty('author');
					expect(pullRequest).toHaveProperty('source');
					expect(pullRequest).toHaveProperty('destination');
					expect(pullRequest).toHaveProperty('links');
				} else {
					console.log(
						`Repository ${workspace}/${repo_slug} doesn't have any pull requests`,
					);
				}
			} catch (error) {
				// Allow test to pass if repository doesn't exist or has no PRs
				if (
					error instanceof Error &&
					(error.message.includes('Not Found') ||
						error.message.includes('No such repository'))
				) {
					console.warn(
						`Repository ${workspace}/${repo_slug} not found or no access to pull requests. Skipping test.`,
					);
					return;
				}
				throw error; // Re-throw if it's some other error
			}
		}, 15000); // Increase timeout for API call

		it('should support pagination', async () => {
			// Check if credentials are available
			const credentials = getAtlassianCredentials();
			if (!credentials) {
				return; // Skip this test if no credentials
			}

			// First get available workspaces
			const workspaces = await atlassianWorkspacesService.list();

			// Skip if no workspaces are available
			if (workspaces.values.length === 0) {
				console.warn('Skipping test: No workspaces available');
				return;
			}

			// Get the first workspace
			const workspace = workspaces.values[0].workspace.slug;

			// Find repositories in this workspace
			let repositories;
			try {
				repositories = await atlassianRepositoriesService.list({
					workspace,
				});
			} catch (error) {
				console.warn(
					`Error fetching repositories for workspace ${workspace}: ${error instanceof Error ? error.message : String(error)}`,
				);
				return; // Skip this test if repositories can't be fetched
			}

			// Skip if no repositories are available
			if (!repositories || repositories.values.length === 0) {
				console.warn(
					`Skipping test: No repositories found in workspace ${workspace}`,
				);
				return;
			}

			// Get the first repository
			const repo_slug = repositories.values[0].name.toLowerCase();

			try {
				// Call the function with the real API and limit results
				const result = await atlassianPullRequestsService.list({
					workspace,
					repo_slug,
					pagelen: 2,
				});

				// Verify the pagination parameters
				expect(result).toHaveProperty('pagelen', 2);
				expect(result.values.length).toBeLessThanOrEqual(2);
				console.log(
					`Found ${result.values.length} pull requests with pagination`,
				);
			} catch (error) {
				// Allow test to pass if repository doesn't exist or has no PRs
				if (
					error instanceof Error &&
					(error.message.includes('Not Found') ||
						error.message.includes('No such repository'))
				) {
					console.warn(
						`Repository ${workspace}/${repo_slug} not found or no access to pull requests. Skipping test.`,
					);
					return;
				}
				throw error; // Re-throw if it's some other error
			}
		}, 30000); // Increase timeout for API call

		it('should filter by state', async () => {
			// Check if credentials are available
			const credentials = getAtlassianCredentials();
			if (!credentials) {
				return; // Skip this test if no credentials
			}

			// First get available workspaces
			const workspaces = await atlassianWorkspacesService.list();

			// Skip if no workspaces are available
			if (workspaces.values.length === 0) {
				console.warn('Skipping test: No workspaces available');
				return;
			}

			// Get the first workspace
			const workspace = workspaces.values[0].workspace.slug;

			// Find repositories in this workspace
			let repositories;
			try {
				repositories = await atlassianRepositoriesService.list({
					workspace,
				});
			} catch (error) {
				console.warn(
					`Error fetching repositories for workspace ${workspace}: ${error instanceof Error ? error.message : String(error)}`,
				);
				return; // Skip this test if repositories can't be fetched
			}

			// Skip if no repositories are available
			if (!repositories || repositories.values.length === 0) {
				console.warn(
					`Skipping test: No repositories found in workspace ${workspace}`,
				);
				return;
			}

			// Get the first repository
			const repo_slug = repositories.values[0].name.toLowerCase();

			try {
				// Call the function with the real API and filter by state
				const result = await atlassianPullRequestsService.list({
					workspace,
					repo_slug,
					state: ['OPEN', 'MERGED'],
				});

				// Verify the states are as expected
				expect(result).toHaveProperty('values');

				// If pull requests are returned, verify they have the correct state
				if (result.values.length > 0) {
					result.values.forEach((pr) => {
						expect(['OPEN', 'MERGED']).toContain(pr.state);
					});
					console.log(
						`Found ${result.values.length} pull requests with states OPEN or MERGED`,
					);
				} else {
					console.log(
						`No pull requests found with states OPEN or MERGED`,
					);
				}
			} catch (error) {
				// Allow test to pass if repository doesn't exist or has no PRs
				if (
					error instanceof Error &&
					(error.message.includes('Not Found') ||
						error.message.includes('No such repository'))
				) {
					console.warn(
						`Repository ${workspace}/${repo_slug} not found or no access to pull requests. Skipping test.`,
					);
					return;
				}
				throw error; // Re-throw if it's some other error
			}
		}, 30000); // Increase timeout for API call
	});

	describe('getPullRequest', () => {
		it('should return details for a valid pull request ID', async () => {
			// Check if credentials are available
			const credentials = getAtlassianCredentials();
			if (!credentials) {
				return; // Skip this test if no credentials
			}

			// First get available workspaces
			const workspaces = await atlassianWorkspacesService.list();

			// Skip if no workspaces are available
			if (workspaces.values.length === 0) {
				console.warn('Skipping test: No workspaces available');
				return;
			}

			// Get the first workspace
			const workspace = workspaces.values[0].workspace.slug;

			// Find repositories in this workspace
			let repositories;
			try {
				repositories = await atlassianRepositoriesService.list({
					workspace,
				});
			} catch (error) {
				console.warn(
					`Error fetching repositories for workspace ${workspace}: ${error instanceof Error ? error.message : String(error)}`,
				);
				return; // Skip this test if repositories can't be fetched
			}

			// Skip if no repositories are available
			if (!repositories || repositories.values.length === 0) {
				console.warn(
					`Skipping test: No repositories found in workspace ${workspace}`,
				);
				return;
			}

			// Get the first repository
			const repo_slug = repositories.values[0].name.toLowerCase();

			try {
				// First, check if we can get a list of PRs to find a valid ID
				const prs = await atlassianPullRequestsService.list({
					workspace,
					repo_slug,
				});

				// Skip if no pull requests are available
				if (!prs.values.length) {
					console.warn(
						`Skipping test: No pull requests found in repository ${workspace}/${repo_slug}`,
					);
					return;
				}

				// Use the first PR's ID
				const prId = prs.values[0].id;
				console.log(`Testing pull request ID: ${prId}`);

				// Get the specific pull request
				const result = await atlassianPullRequestsService.get({
					workspace,
					repo_slug,
					pull_request_id: prId,
				});

				// Verify the response contains expected fields
				expect(result).toHaveProperty('id', prId);
				expect(result).toHaveProperty('type', 'pullrequest');
				expect(result).toHaveProperty('title');
				expect(result).toHaveProperty('state');
				expect(result).toHaveProperty('author');
				expect(result).toHaveProperty('source');
				expect(result).toHaveProperty('destination');
				expect(result).toHaveProperty('links');
			} catch (error) {
				// Allow test to pass if repository or PR doesn't exist
				if (
					error instanceof Error &&
					(error.message.includes('Not Found') ||
						error.message.includes('No such repository') ||
						error.message.includes('Pull request not found'))
				) {
					console.warn(
						`Repository ${workspace}/${repo_slug} or its pull requests not found. Skipping test.`,
					);
					return;
				}
				throw error; // Re-throw if it's some other error
			}
		}, 15000); // Increase timeout for API call

		it('should handle invalid pull request IDs', async () => {
			// Check if credentials are available
			const credentials = getAtlassianCredentials();
			if (!credentials) {
				return; // Skip this test if no credentials
			}

			// First get available workspaces
			const workspaces = await atlassianWorkspacesService.list();

			// Skip if no workspaces are available
			if (workspaces.values.length === 0) {
				console.warn('Skipping test: No workspaces available');
				return;
			}

			// Get the first workspace
			const workspace = workspaces.values[0].workspace.slug;

			// Find repositories in this workspace
			let repositories;
			try {
				repositories = await atlassianRepositoriesService.list({
					workspace,
				});
			} catch (error) {
				console.warn(
					`Error fetching repositories for workspace ${workspace}: ${error instanceof Error ? error.message : String(error)}`,
				);
				return; // Skip this test if repositories can't be fetched
			}

			// Skip if no repositories are available
			if (!repositories || repositories.values.length === 0) {
				console.warn(
					`Skipping test: No repositories found in workspace ${workspace}`,
				);
				return;
			}

			// Get the first repository
			const repo_slug = repositories.values[0].name.toLowerCase();

			try {
				// Use an invalid pull request ID (very large number unlikely to exist)
				const invalidId = 999999;
				console.log(`Testing invalid pull request ID: ${invalidId}`);

				// Call the function with the real API and expect it to throw
				await expect(
					atlassianPullRequestsService.get({
						workspace,
						repo_slug,
						pull_request_id: invalidId,
					}),
				).rejects.toThrow();
			} catch (error) {
				// If repo doesn't exist, just skip the test
				if (
					error instanceof Error &&
					(error.message.includes('Not Found') ||
						error.message.includes('No such repository'))
				) {
					console.warn(
						`Repository ${workspace}/${repo_slug} not found. Skipping test.`,
					);
					return;
				}
				// Otherwise, we should have caught the expected rejection
			}
		}, 15000); // Increase timeout for API call
	});

	// Note: addComment test suite has been removed to avoid creating comments on real PRs during tests
});

```

--------------------------------------------------------------------------------
/src/utils/error-handler.util.ts:
--------------------------------------------------------------------------------

```typescript
import { createApiError } from './error.util.js';
import { Logger } from './logger.util.js';
import { getDeepOriginalError } from './error.util.js';
import { McpError } from './error.util.js';

/**
 * Standard error codes for consistent handling
 */
export enum ErrorCode {
	NOT_FOUND = 'NOT_FOUND',
	INVALID_CURSOR = 'INVALID_CURSOR',
	ACCESS_DENIED = 'ACCESS_DENIED',
	VALIDATION_ERROR = 'VALIDATION_ERROR',
	UNEXPECTED_ERROR = 'UNEXPECTED_ERROR',
	NETWORK_ERROR = 'NETWORK_ERROR',
	RATE_LIMIT_ERROR = 'RATE_LIMIT_ERROR',
	PRIVATE_IP_ERROR = 'PRIVATE_IP_ERROR',
	RESERVED_RANGE_ERROR = 'RESERVED_RANGE_ERROR',
}

/**
 * Context information for error handling
 */
export interface ErrorContext {
	/**
	 * Source of the error (e.g., file path and function)
	 */
	source?: string;

	/**
	 * Type of entity being processed (e.g., 'Repository', 'PullRequest')
	 */
	entityType?: string;

	/**
	 * Identifier of the entity being processed
	 */
	entityId?: string | Record<string, string>;

	/**
	 * Operation being performed (e.g., 'listing', 'creating')
	 */
	operation?: string;

	/**
	 * Additional information for debugging
	 */
	additionalInfo?: Record<string, unknown>;
}

/**
 * Helper function to create a consistent error context object
 * @param entityType Type of entity being processed
 * @param operation Operation being performed
 * @param source Source of the error (typically file path and function)
 * @param entityId Optional identifier of the entity
 * @param additionalInfo Optional additional information for debugging
 * @returns A formatted ErrorContext object
 */
export function buildErrorContext(
	entityType: string,
	operation: string,
	source: string,
	entityId?: string | Record<string, string>,
	additionalInfo?: Record<string, unknown>,
): ErrorContext {
	return {
		entityType,
		operation,
		source,
		...(entityId && { entityId }),
		...(additionalInfo && { additionalInfo }),
	};
}

/**
 * Detect specific error types from raw errors
 * @param error The error to analyze
 * @param context Context information for better error detection
 * @returns Object containing the error code and status code
 */
export function detectErrorType(
	error: unknown,
	context: ErrorContext = {},
): { code: ErrorCode; statusCode: number } {
	const methodLogger = Logger.forContext(
		'utils/error-handler.util.ts',
		'detectErrorType',
	);
	methodLogger.debug(`Detecting error type`, { error, context });

	const errorMessage = error instanceof Error ? error.message : String(error);
	const statusCode =
		error instanceof Error && 'statusCode' in error
			? (error as { statusCode: number }).statusCode
			: undefined;

	// PR ID validation error detection
	if (
		errorMessage.includes('Invalid pull request ID') ||
		errorMessage.includes('Pull request ID must be a positive integer')
	) {
		return { code: ErrorCode.VALIDATION_ERROR, statusCode: 400 };
	}

	// Network error detection
	if (
		errorMessage.includes('network error') ||
		errorMessage.includes('fetch failed') ||
		errorMessage.includes('ECONNREFUSED') ||
		errorMessage.includes('ENOTFOUND') ||
		errorMessage.includes('Failed to fetch') ||
		errorMessage.includes('Network request failed')
	) {
		return { code: ErrorCode.NETWORK_ERROR, statusCode: 500 };
	}

	// Network error detection in originalError
	if (
		error instanceof Error &&
		'originalError' in error &&
		error.originalError
	) {
		// Check for TypeError in originalError (common for network issues)
		if (error.originalError instanceof TypeError) {
			return { code: ErrorCode.NETWORK_ERROR, statusCode: 500 };
		}

		// Check for network error messages in originalError
		if (
			error.originalError instanceof Error &&
			(error.originalError.message.includes('fetch') ||
				error.originalError.message.includes('network') ||
				error.originalError.message.includes('ECON'))
		) {
			return { code: ErrorCode.NETWORK_ERROR, statusCode: 500 };
		}
	}

	// Rate limiting detection
	if (
		errorMessage.includes('rate limit') ||
		errorMessage.includes('too many requests') ||
		statusCode === 429
	) {
		return { code: ErrorCode.RATE_LIMIT_ERROR, statusCode: 429 };
	}

	// Bitbucket-specific error detection
	if (
		error instanceof Error &&
		'originalError' in error &&
		error.originalError
	) {
		const originalError = getDeepOriginalError(error.originalError);

		if (originalError && typeof originalError === 'object') {
			const oe = originalError as Record<string, unknown>;

			// Check for Bitbucket API error structure
			if (oe.error && typeof oe.error === 'object') {
				const bbError = oe.error as Record<string, unknown>;
				const errorMsg = String(bbError.message || '').toLowerCase();
				const errorDetail = bbError.detail
					? String(bbError.detail).toLowerCase()
					: '';

				methodLogger.debug('Found Bitbucket error structure', {
					message: errorMsg,
					detail: errorDetail,
				});

				// Repository not found / Does not exist errors
				if (
					errorMsg.includes('repository not found') ||
					errorMsg.includes('does not exist') ||
					errorMsg.includes('no such resource') ||
					errorMsg.includes('not found')
				) {
					return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
				}

				// Access and permission errors
				if (
					errorMsg.includes('access') ||
					errorMsg.includes('permission') ||
					errorMsg.includes('credentials') ||
					errorMsg.includes('unauthorized') ||
					errorMsg.includes('forbidden') ||
					errorMsg.includes('authentication')
				) {
					return { code: ErrorCode.ACCESS_DENIED, statusCode: 403 };
				}

				// Validation errors
				if (
					errorMsg.includes('invalid') ||
					(errorMsg.includes('parameter') &&
						errorMsg.includes('error')) ||
					errorMsg.includes('input') ||
					errorMsg.includes('validation') ||
					errorMsg.includes('required field') ||
					errorMsg.includes('bad request')
				) {
					return {
						code: ErrorCode.VALIDATION_ERROR,
						statusCode: 400,
					};
				}

				// Rate limiting errors
				if (
					errorMsg.includes('rate limit') ||
					errorMsg.includes('too many requests') ||
					errorMsg.includes('throttled')
				) {
					return {
						code: ErrorCode.RATE_LIMIT_ERROR,
						statusCode: 429,
					};
				}
			}

			// Check for alternate Bitbucket error structure: {"type": "error", ...}
			if (oe.type === 'error') {
				methodLogger.debug('Found Bitbucket type:error structure', oe);

				// Check for status code if available in the error object
				if (typeof oe.status === 'number') {
					if (oe.status === 404) {
						return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
					}
					if (oe.status === 403 || oe.status === 401) {
						return {
							code: ErrorCode.ACCESS_DENIED,
							statusCode: oe.status,
						};
					}
					if (oe.status === 400) {
						return {
							code: ErrorCode.VALIDATION_ERROR,
							statusCode: 400,
						};
					}
					if (oe.status === 429) {
						return {
							code: ErrorCode.RATE_LIMIT_ERROR,
							statusCode: 429,
						};
					}
				}
			}

			// Check for Bitbucket error structure: {"errors": [{...}]}
			if (Array.isArray(oe.errors) && oe.errors.length > 0) {
				const firstError = oe.errors[0] as Record<string, unknown>;
				methodLogger.debug(
					'Found Bitbucket errors array structure',
					firstError,
				);

				if (typeof firstError.status === 'number') {
					if (firstError.status === 404) {
						return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
					}
					if (
						firstError.status === 403 ||
						firstError.status === 401
					) {
						return {
							code: ErrorCode.ACCESS_DENIED,
							statusCode: firstError.status,
						};
					}
					if (firstError.status === 400) {
						return {
							code: ErrorCode.VALIDATION_ERROR,
							statusCode: 400,
						};
					}
					if (firstError.status === 429) {
						return {
							code: ErrorCode.RATE_LIMIT_ERROR,
							statusCode: 429,
						};
					}
				}

				// Look for error messages in the title or message fields
				if (firstError.title || firstError.message) {
					const errorText = String(
						firstError.title || firstError.message,
					).toLowerCase();
					if (errorText.includes('not found')) {
						return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
					}
					if (
						errorText.includes('access') ||
						errorText.includes('permission')
					) {
						return {
							code: ErrorCode.ACCESS_DENIED,
							statusCode: 403,
						};
					}
					if (
						errorText.includes('invalid') ||
						errorText.includes('required')
					) {
						return {
							code: ErrorCode.VALIDATION_ERROR,
							statusCode: 400,
						};
					}
					if (
						errorText.includes('rate limit') ||
						errorText.includes('too many requests')
					) {
						return {
							code: ErrorCode.RATE_LIMIT_ERROR,
							statusCode: 429,
						};
					}
				}
			}
		}
	}

	// Not Found detection
	if (
		errorMessage.includes('not found') ||
		errorMessage.includes('does not exist') ||
		statusCode === 404
	) {
		return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
	}

	// Access Denied detection
	if (
		errorMessage.includes('access') ||
		errorMessage.includes('permission') ||
		errorMessage.includes('authorize') ||
		errorMessage.includes('authentication') ||
		statusCode === 401 ||
		statusCode === 403
	) {
		return { code: ErrorCode.ACCESS_DENIED, statusCode: statusCode || 403 };
	}

	// Invalid Cursor detection
	if (
		(errorMessage.includes('cursor') ||
			errorMessage.includes('startAt') ||
			errorMessage.includes('page')) &&
		(errorMessage.includes('invalid') || errorMessage.includes('not valid'))
	) {
		return { code: ErrorCode.INVALID_CURSOR, statusCode: 400 };
	}

	// Validation Error detection
	if (
		errorMessage.includes('validation') ||
		errorMessage.includes('invalid') ||
		errorMessage.includes('required') ||
		statusCode === 400 ||
		statusCode === 422
	) {
		return {
			code: ErrorCode.VALIDATION_ERROR,
			statusCode: statusCode || 400,
		};
	}

	// Default to unexpected error
	return {
		code: ErrorCode.UNEXPECTED_ERROR,
		statusCode: statusCode || 500,
	};
}

/**
 * Create user-friendly error messages based on error type and context
 * @param code The error code
 * @param context Context information for better error messages
 * @param originalMessage The original error message
 * @returns User-friendly error message
 */
export function createUserFriendlyErrorMessage(
	code: ErrorCode,
	context: ErrorContext = {},
	originalMessage?: string,
): string {
	const methodLogger = Logger.forContext(
		'utils/error-handler.util.ts',
		'createUserFriendlyErrorMessage',
	);
	const { entityType, entityId, operation } = context;

	// Format entity ID for display
	let entityIdStr = '';
	if (entityId) {
		if (typeof entityId === 'string') {
			entityIdStr = entityId;
		} else {
			// Handle object entityId (like ProjectIdentifier)
			entityIdStr = Object.values(entityId).join('/');
		}
	}

	// Determine entity display name
	const entity = entityType
		? `${entityType}${entityIdStr ? ` ${entityIdStr}` : ''}`
		: 'Resource';

	let message = '';

	switch (code) {
		case ErrorCode.NOT_FOUND:
			message = `${entity} not found${entityIdStr ? `: ${entityIdStr}` : ''}. Verify the ID is correct and that you have access to this ${entityType?.toLowerCase() || 'resource'}.`;

			// Bitbucket-specific guidance
			if (
				entityType === 'Repository' ||
				entityType === 'PullRequest' ||
				entityType === 'Branch'
			) {
				message += ` Make sure the workspace and ${entityType.toLowerCase()} names are spelled correctly and that you have permission to access it.`;
			}
			break;

		case ErrorCode.ACCESS_DENIED:
			message = `Access denied for ${entity.toLowerCase()}${entityIdStr ? ` ${entityIdStr}` : ''}. Verify your credentials and permissions.`;

			// Bitbucket-specific guidance
			message += ` Ensure your Bitbucket API token/app password has sufficient privileges and hasn't expired. If using a workspace/repository name, check that it's spelled correctly.`;
			break;

		case ErrorCode.INVALID_CURSOR:
			message = `Invalid pagination cursor. Use the exact cursor string returned from previous results.`;

			// Bitbucket-specific guidance
			message += ` Bitbucket pagination typically uses page numbers. Check that the page number is valid and within range.`;
			break;

		case ErrorCode.VALIDATION_ERROR:
			message =
				originalMessage ||
				`Invalid data provided for ${operation || 'operation'} ${entity.toLowerCase()}.`;

			// The originalMessage already includes error details for VALIDATION_ERROR
			break;

		case ErrorCode.NETWORK_ERROR:
			message = `Network error while ${operation || 'connecting to'} the Bitbucket API. Please check your internet connection and try again.`;
			break;

		case ErrorCode.RATE_LIMIT_ERROR:
			message = `Bitbucket API rate limit exceeded. Please wait a moment and try again, or reduce the frequency of requests.`;

			// Bitbucket-specific guidance
			message += ` Bitbucket's API has rate limits per IP address and additional limits for authenticated users.`;
			break;

		default:
			message = `An unexpected error occurred while ${operation || 'processing'} ${entity.toLowerCase()}.`;
	}

	// Include original message details if available and appropriate
	if (
		originalMessage &&
		code !== ErrorCode.NOT_FOUND &&
		code !== ErrorCode.ACCESS_DENIED
	) {
		message += ` Error details: ${originalMessage}`;
	}

	methodLogger.debug(`Created user-friendly message: ${message}`, {
		code,
		context,
	});
	return message;
}

/**
 * Handle controller errors consistently
 * @param error The error to handle
 * @param context Context information for better error messages
 * @returns Never returns, always throws an error
 */
export function handleControllerError(
	error: unknown,
	context: ErrorContext = {},
): never {
	const methodLogger = Logger.forContext(
		'utils/error-handler.util.ts',
		'handleControllerError',
	);

	// Extract error details
	const errorMessage = error instanceof Error ? error.message : String(error);
	const statusCode =
		error instanceof Error && 'statusCode' in error
			? (error as { statusCode: number }).statusCode
			: undefined;

	// Detect error type using utility
	const { code, statusCode: detectedStatus } = detectErrorType(
		error,
		context,
	);

	// Combine detected status with explicit status
	const finalStatusCode = statusCode || detectedStatus;

	// Format entity information for logging
	const { entityType, entityId, operation } = context;
	const entity = entityType || 'resource';
	const entityIdStr = entityId
		? typeof entityId === 'string'
			? entityId
			: JSON.stringify(entityId)
		: '';
	const actionStr = operation || 'processing';

	// Log detailed error information
	methodLogger.error(
		`Error ${actionStr} ${entity}${
			entityIdStr ? `: ${entityIdStr}` : ''
		}: ${errorMessage}`,
		error,
	);

	// Create user-friendly error message for the response
	const message =
		code === ErrorCode.VALIDATION_ERROR
			? errorMessage
			: createUserFriendlyErrorMessage(code, context, errorMessage);

	// Throw an appropriate API error with the user-friendly message
	throw createApiError(message, finalStatusCode, error);
}

/**
 * Handles errors from CLI commands
 * Logs the error and exits the process with appropriate exit code
 *
 * @param error The error to handle
 */
export function handleCliError(error: unknown): never {
	const logger = Logger.forContext(
		'utils/error-handler.util.ts',
		'handleCliError',
	);

	logger.error('CLI error:', error);

	// Process different error types
	if (error instanceof McpError) {
		// Format user-friendly error message for MCP errors
		console.error(`Error: ${error.message}`);

		// Use specific exit codes based on error type
		switch (error.errorType) {
			case 'AUTHENTICATION_REQUIRED':
				process.exit(2);
				break; // Not strictly needed after process.exit but added for clarity
			case 'NOT_FOUND':
				process.exit(3);
				break;
			case 'VALIDATION_ERROR':
				process.exit(4);
				break;
			case 'RATE_LIMIT_EXCEEDED':
				process.exit(5);
				break;
			case 'API_ERROR':
				process.exit(6);
				break;
			default:
				process.exit(1);
				break;
		}
	} else if (error instanceof Error) {
		// Standard Error objects
		console.error(`Error: ${error.message}`);
		process.exit(1);
	} else {
		// Unknown error types
		console.error(`Unknown error occurred: ${String(error)}`);
		process.exit(1);
	}
}

```

--------------------------------------------------------------------------------
/src/tools/atlassian.pullrequests.tool.ts:
--------------------------------------------------------------------------------

```typescript
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { z } from 'zod';
import { Logger } from '../utils/logger.util.js';
import { formatErrorForMcpTool } from '../utils/error.util.js';
import {
	ListPullRequestsToolArgs,
	type ListPullRequestsToolArgsType,
	GetPullRequestToolArgs,
	type GetPullRequestToolArgsType,
	ListPullRequestCommentsToolArgs,
	type ListPullRequestCommentsToolArgsType,
	CreatePullRequestCommentToolArgs,
	type CreatePullRequestCommentToolArgsType,
	CreatePullRequestToolArgs,
	type CreatePullRequestToolArgsType,
	UpdatePullRequestToolArgs,
	type UpdatePullRequestToolArgsType,
	ApprovePullRequestToolArgs,
	type ApprovePullRequestToolArgsType,
	RejectPullRequestToolArgs,
	type RejectPullRequestToolArgsType,
} from './atlassian.pullrequests.types.js';
import atlassianPullRequestsController from '../controllers/atlassian.pullrequests.controller.js';

// Create a contextualized logger for this file
const toolLogger = Logger.forContext('tools/atlassian.pullrequests.tool.ts');

// Log tool initialization
toolLogger.debug('Bitbucket pull requests tool initialized');

/**
 * MCP Tool: List Bitbucket Pull Requests
 *
 * Lists pull requests for a specific repository with optional filtering.
 * Returns a formatted markdown response with pull request details.
 *
 * @param args - Tool arguments for filtering pull requests
 * @returns MCP response with formatted pull requests list
 * @throws Will return error message if pull request listing fails
 */
async function listPullRequests(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.pullrequests.tool.ts',
		'listPullRequests',
	);
	methodLogger.debug('Listing Bitbucket pull requests with filters:', args);

	try {
		// Pass args directly to controller without any logic
		const result = await atlassianPullRequestsController.list(
			args as ListPullRequestsToolArgsType,
		);

		methodLogger.debug(
			'Successfully retrieved pull requests from controller',
		);

		return {
			content: [
				{
					type: 'text' as const,
					text: result.content,
				},
			],
		};
	} catch (error) {
		methodLogger.error('Failed to list pull requests', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * MCP Tool: Get Bitbucket Pull Request Details
 *
 * Retrieves detailed information about a specific pull request.
 * Returns a formatted markdown response with pull request details.
 *
 * @param args - Tool arguments containing the workspace, repository, and pull request identifiers
 * @returns MCP response with formatted pull request details
 * @throws Will return error message if pull request retrieval fails
 */
async function getPullRequest(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.pullrequests.tool.ts',
		'getPullRequest',
	);
	methodLogger.debug('Getting Bitbucket pull request details:', args);

	try {
		// Pass args directly to controller
		const result = await atlassianPullRequestsController.get(
			args as GetPullRequestToolArgsType,
		);

		methodLogger.debug(
			'Successfully retrieved pull request details from controller',
		);

		return {
			content: [
				{
					type: 'text' as const,
					text: result.content,
				},
			],
		};
	} catch (error) {
		methodLogger.error('Failed to get pull request details', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * MCP Tool: List Bitbucket Pull Request Comments
 *
 * Lists comments for a specific pull request, including general comments and inline code comments.
 * Returns a formatted markdown response with comment details.
 *
 * @param args - Tool arguments containing workspace, repository, and PR identifiers
 * @returns MCP response with formatted pull request comments
 * @throws Will return error message if comment retrieval fails
 */
async function listPullRequestComments(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.pullrequests.tool.ts',
		'listPullRequestComments',
	);
	methodLogger.debug('Listing pull request comments:', args);

	try {
		// Pass args directly to controller
		const result = await atlassianPullRequestsController.listComments(
			args as ListPullRequestCommentsToolArgsType,
		);

		methodLogger.debug(
			'Successfully retrieved pull request comments from controller',
		);

		return {
			content: [
				{
					type: 'text' as const,
					text: result.content,
				},
			],
		};
	} catch (error) {
		methodLogger.error('Failed to get pull request comments', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * MCP Tool: Add Bitbucket Pull Request Comment
 *
 * Adds a comment to a specific pull request, with support for general and inline comments.
 * Returns a success message as markdown.
 *
 * @param args - Tool arguments containing workspace, repository, PR ID, and comment content
 * @returns MCP response with formatted success message
 * @throws Will return error message if comment creation fails
 */
async function addPullRequestComment(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.pullrequests.tool.ts',
		'addPullRequestComment',
	);
	methodLogger.debug('Adding pull request comment:', {
		...args,
		content: args.content
			? `(length: ${(args.content as string).length})`
			: '(none)',
	});

	try {
		// Pass args directly to controller
		const result = await atlassianPullRequestsController.addComment(
			args as CreatePullRequestCommentToolArgsType,
		);

		methodLogger.debug(
			'Successfully added pull request comment via controller',
		);

		return {
			content: [
				{
					type: 'text' as const,
					text: result.content,
				},
			],
		};
	} catch (error) {
		methodLogger.error('Failed to add pull request comment', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * MCP Tool: Create Bitbucket Pull Request
 *
 * Creates a new pull request between two branches in a Bitbucket repository.
 * Returns a formatted markdown response with the newly created pull request details.
 *
 * @param args - Tool arguments containing workspace, repository, source branch, destination branch, and title
 * @returns MCP response with formatted pull request details
 * @throws Will return error message if pull request creation fails
 */
async function addPullRequest(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.pullrequests.tool.ts',
		'addPullRequest',
	);
	methodLogger.debug('Creating new pull request:', {
		...args,
		description: args.description
			? `(length: ${(args.description as string).length})`
			: '(none)',
	});

	try {
		// Pass args directly to controller
		const result = await atlassianPullRequestsController.add(
			args as CreatePullRequestToolArgsType,
		);

		methodLogger.debug('Successfully created pull request via controller');

		return {
			content: [
				{
					type: 'text' as const,
					text: result.content,
				},
			],
		};
	} catch (error) {
		methodLogger.error('Failed to create pull request', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * MCP Tool: Update Bitbucket Pull Request
 *
 * Updates an existing pull request's title and/or description.
 * Returns a formatted markdown response with updated pull request details.
 *
 * @param args - Tool arguments for updating pull request
 * @returns MCP response with formatted updated pull request details
 * @throws Will return error message if pull request update fails
 */
async function updatePullRequest(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.pullrequests.tool.ts',
		'updatePullRequest',
	);
	methodLogger.debug('Updating pull request:', {
		...args,
		description: args.description
			? `(length: ${(args.description as string).length})`
			: '(unchanged)',
	});

	try {
		// Pass args directly to controller
		const result = await atlassianPullRequestsController.update(
			args as UpdatePullRequestToolArgsType,
		);

		methodLogger.debug('Successfully updated pull request via controller');

		return {
			content: [
				{
					type: 'text' as const,
					text: result.content,
				},
			],
		};
	} catch (error) {
		methodLogger.error('Failed to update pull request', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * MCP Tool: Approve Bitbucket Pull Request
 *
 * Approves a pull request, marking it as approved by the current user.
 * Returns a formatted markdown response with approval confirmation.
 *
 * @param args - Tool arguments for approving pull request
 * @returns MCP response with formatted approval confirmation
 * @throws Will return error message if pull request approval fails
 */
async function approvePullRequest(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.pullrequests.tool.ts',
		'approvePullRequest',
	);
	methodLogger.debug('Approving pull request:', args);

	try {
		// Pass args directly to controller
		const result = await atlassianPullRequestsController.approve(
			args as ApprovePullRequestToolArgsType,
		);

		methodLogger.debug('Successfully approved pull request via controller');

		return {
			content: [
				{
					type: 'text' as const,
					text: result.content,
				},
			],
		};
	} catch (error) {
		methodLogger.error('Failed to approve pull request', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * MCP Tool: Request Changes on Bitbucket Pull Request
 *
 * Requests changes on a pull request, marking it as requiring changes by the current user.
 * Returns a formatted markdown response with rejection confirmation.
 *
 * @param args - Tool arguments for requesting changes on pull request
 * @returns MCP response with formatted rejection confirmation
 * @throws Will return error message if pull request rejection fails
 */
async function rejectPullRequest(args: Record<string, unknown>) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.pullrequests.tool.ts',
		'rejectPullRequest',
	);
	methodLogger.debug('Requesting changes on pull request:', args);

	try {
		// Pass args directly to controller
		const result = await atlassianPullRequestsController.reject(
			args as RejectPullRequestToolArgsType,
		);

		methodLogger.debug(
			'Successfully requested changes on pull request via controller',
		);

		return {
			content: [
				{
					type: 'text' as const,
					text: result.content,
				},
			],
		};
	} catch (error) {
		methodLogger.error('Failed to request changes on pull request', error);
		return formatErrorForMcpTool(error);
	}
}

/**
 * Register Atlassian Pull Requests MCP Tools
 *
 * Registers the pull requests-related tools with the MCP server.
 * Each tool is registered with its schema, description, and handler function.
 *
 * @param server - The MCP server instance to register tools with
 */
function registerTools(server: McpServer) {
	const methodLogger = Logger.forContext(
		'tools/atlassian.pullrequests.tool.ts',
		'registerTools',
	);
	methodLogger.debug('Registering Atlassian Pull Requests tools...');

	// Register the list pull requests tool
	server.tool(
		'bb_ls_prs',
		`Lists pull requests within a repository (\`repoSlug\`). If \`workspaceSlug\` is not provided, the system will use your default workspace. Filters by \`state\` (OPEN, MERGED, DECLINED, SUPERSEDED) and supports text search via \`query\`. Supports pagination via \`limit\` and \`cursor\`. Pagination details are included at the end of the text content. Returns a formatted Markdown list with each PR's title, status, author, reviewers, and creation date. Requires Bitbucket credentials to be configured.`,
		ListPullRequestsToolArgs.shape,
		listPullRequests,
	);

	// Register the get pull request tool
	server.tool(
		'bb_get_pr',
		`Retrieves detailed information about a specific pull request identified by \`prId\` within a repository (\`repoSlug\`). If \`workspaceSlug\` is not provided, the system will use your default workspace. Includes PR details, status, reviewers, and diff statistics. Set \`includeFullDiff\` to true (default) for the complete code changes. Set \`includeComments\` to true to also retrieve comments (default: false; Note: Enabling this may increase response time for pull requests with many comments). Returns rich information as formatted Markdown, including PR summary, code changes, and optionally comments. Requires Bitbucket credentials to be configured.`,
		GetPullRequestToolArgs.shape,
		getPullRequest,
	);

	// Register the list pull request comments tool
	server.tool(
		'bb_ls_pr_comments',
		`Lists comments on a specific pull request identified by \`prId\` within a repository (\`repoSlug\`). If \`workspaceSlug\` is not provided, the system will use your default workspace. Retrieves both general PR comments and inline code comments, indicating their location if applicable. Supports pagination via \`limit\` and \`cursor\`. Pagination details are included at the end of the text content. Returns a formatted Markdown list with each comment's author, timestamp, content, and location for inline comments. Requires Bitbucket credentials to be configured.`,
		ListPullRequestCommentsToolArgs.shape,
		listPullRequestComments,
	);

	// Register the add pull request comment tool
	server.tool(
		'bb_add_pr_comment',
		`Adds a comment to a specific pull request identified by \`prId\` within a repository (\`repoSlug\`). If \`workspaceSlug\` is not provided, the system will use your default workspace. The \`content\` parameter accepts Markdown-formatted text for the comment body. To reply to an existing comment, provide its ID in the \`parentId\` parameter. For inline code comments, provide both \`inline.path\` (file path) and \`inline.line\` (line number). Returns a success message as formatted Markdown. Requires Bitbucket credentials with write permissions to be configured.`,
		CreatePullRequestCommentToolArgs.shape,
		addPullRequestComment,
	);

	// Register the create pull request tool
	// Note: Using prTitle instead of title to avoid MCP SDK conflict
	const createPrSchema = z.object({
		workspaceSlug: CreatePullRequestToolArgs.shape.workspaceSlug,
		repoSlug: CreatePullRequestToolArgs.shape.repoSlug,
		prTitle: CreatePullRequestToolArgs.shape.title, // Renamed from 'title' to 'prTitle'
		sourceBranch: CreatePullRequestToolArgs.shape.sourceBranch,
		destinationBranch: CreatePullRequestToolArgs.shape.destinationBranch,
		description: CreatePullRequestToolArgs.shape.description,
		closeSourceBranch: CreatePullRequestToolArgs.shape.closeSourceBranch,
	});
	server.tool(
		'bb_add_pr',
		`Creates a new pull request in a repository (\`repoSlug\`). If \`workspaceSlug\` is not provided, the system will use your default workspace. Required parameters include \`prTitle\` (the PR title), \`sourceBranch\` (branch with changes), and optionally \`destinationBranch\` (target branch, defaults to main/master). The \`description\` parameter accepts Markdown-formatted text for the PR description. Set \`closeSourceBranch\` to true to automatically delete the source branch after merging. Returns the newly created pull request details as formatted Markdown. Requires Bitbucket credentials with write permissions to be configured.`,
		createPrSchema.shape,
		async (args: Record<string, unknown>) => {
			// Map prTitle back to title for the controller
			const mappedArgs = { ...args, title: args.prTitle };
			delete (mappedArgs as Record<string, unknown>).prTitle;
			return addPullRequest(mappedArgs);
		},
	);

	// Register the update pull request tool
	// Note: Using prTitle instead of title to avoid MCP SDK conflict
	const updatePrSchema = z.object({
		workspaceSlug: UpdatePullRequestToolArgs.shape.workspaceSlug,
		repoSlug: UpdatePullRequestToolArgs.shape.repoSlug,
		pullRequestId: UpdatePullRequestToolArgs.shape.pullRequestId,
		prTitle: UpdatePullRequestToolArgs.shape.title, // Renamed from 'title' to 'prTitle'
		description: UpdatePullRequestToolArgs.shape.description,
	});
	server.tool(
		'bb_update_pr',
		`Updates an existing pull request in a repository (\`repoSlug\`) identified by \`pullRequestId\`. If \`workspaceSlug\` is not provided, the system will use your default workspace. You can update the \`prTitle\` (the PR title) and/or \`description\` fields. At least one field must be provided. The \`description\` parameter accepts Markdown-formatted text. Returns the updated pull request details as formatted Markdown. Requires Bitbucket credentials with write permissions to be configured.`,
		updatePrSchema.shape,
		async (args: Record<string, unknown>) => {
			// Map prTitle back to title for the controller
			const mappedArgs = { ...args, title: args.prTitle };
			delete (mappedArgs as Record<string, unknown>).prTitle;
			return updatePullRequest(mappedArgs);
		},
	);

	// Register the approve pull request tool
	server.tool(
		'bb_approve_pr',
		`Approves a pull request in a repository (\`repoSlug\`) identified by \`pullRequestId\`. If \`workspaceSlug\` is not provided, the system will use your default workspace. This marks the pull request as approved by the current user, indicating that the changes are ready for merge (pending any other required approvals or checks). Returns an approval confirmation as formatted Markdown. Requires Bitbucket credentials with appropriate permissions to be configured.`,
		ApprovePullRequestToolArgs.shape,
		approvePullRequest,
	);

	// Register the reject pull request tool
	server.tool(
		'bb_reject_pr',
		`Requests changes on a pull request in a repository (\`repoSlug\`) identified by \`pullRequestId\`. If \`workspaceSlug\` is not provided, the system will use your default workspace. This marks the pull request as requiring changes by the current user, indicating that the author should address feedback before the pull request can be merged. Returns a rejection confirmation as formatted Markdown. Requires Bitbucket credentials with appropriate permissions to be configured.`,
		RejectPullRequestToolArgs.shape,
		rejectPullRequest,
	);

	methodLogger.debug('Successfully registered Pull Requests tools');
}

export default { registerTools };

```

--------------------------------------------------------------------------------
/src/services/vendor.atlassian.repositories.service.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from 'zod';
import {
	createAuthMissingError,
	createApiError,
	McpError,
} from '../utils/error.util.js';
import { Logger } from '../utils/logger.util.js';
import {
	fetchAtlassian,
	getAtlassianCredentials,
} from '../utils/transport.util.js';
import {
	validatePageSize,
	validatePaginationLimits,
} from '../utils/pagination.util.js';
import {
	ListRepositoriesParamsSchema,
	GetRepositoryParamsSchema,
	ListCommitsParamsSchema,
	RepositoriesResponseSchema,
	RepositorySchema,
	PaginatedCommitsSchema,
	CreateBranchParamsSchema,
	BranchRefSchema,
	GetFileContentParamsSchema,
	type ListRepositoriesParams,
	type GetRepositoryParams,
	type ListCommitsParams,
	type Repository,
	type CreateBranchParams,
	type BranchRef,
	type GetFileContentParams,
	ListBranchesParamsSchema,
	BranchesResponseSchema,
	type ListBranchesParams,
	type BranchesResponse,
} from './vendor.atlassian.repositories.types.js';

/**
 * Base API path for Bitbucket REST API v2
 * @see https://developer.atlassian.com/cloud/bitbucket/rest/api-group-repositories/
 * @constant {string}
 */
const API_PATH = '/2.0';

/**
 * @namespace VendorAtlassianRepositoriesService
 * @description Service for interacting with Bitbucket Repositories API.
 * Provides methods for listing repositories and retrieving repository details.
 * All methods require valid Atlassian credentials configured in the environment.
 */

// Create a contextualized logger for this file
const serviceLogger = Logger.forContext(
	'services/vendor.atlassian.repositories.service.ts',
);

// Log service initialization
serviceLogger.debug('Bitbucket repositories service initialized');

/**
 * List repositories for a workspace
 * @param {string} workspace - Workspace name or UUID
 * @param {ListRepositoriesParams} [params={}] - Optional parameters
 * @param {string} [params.q] - Query string to filter repositories
 * @param {string} [params.sort] - Property to sort by (e.g., 'name', '-created_on')
 * @param {number} [params.page] - Page number for pagination
 * @param {number} [params.pagelen] - Number of items per page
 * @returns {Promise<RepositoriesResponse>} Response containing repositories
 * @example
 * ```typescript
 * // List repositories in a workspace, filtered and sorted
 * const response = await listRepositories('myworkspace', {
 *   q: 'name~"api"',
 *   sort: 'name',
 *   pagelen: 25
 * });
 * ```
 */
async function list(
	params: ListRepositoriesParams,
): Promise<z.infer<typeof RepositoriesResponseSchema>> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.repositories.service.ts',
		'list',
	);
	methodLogger.debug('Listing Bitbucket repositories with params:', params);

	// Validate params with Zod
	try {
		ListRepositoriesParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error(
				'Invalid parameters provided to list repositories:',
				error.format(),
			);
			throw createApiError(
				`Invalid parameters: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	// Construct query parameters
	const queryParams = new URLSearchParams();

	// Add optional query parameters
	if (params.q) {
		queryParams.set('q', params.q);
	}
	if (params.sort) {
		queryParams.set('sort', params.sort);
	}
	if (params.role) {
		queryParams.set('role', params.role);
	}

	// Validate and enforce page size limits (CWE-770)
	const validatedPagelen = validatePageSize(
		params.pagelen,
		'listRepositories',
	);
	queryParams.set('pagelen', validatedPagelen.toString());

	if (params.page) {
		queryParams.set('page', params.page.toString());
	}

	const queryString = queryParams.toString()
		? `?${queryParams.toString()}`
		: '';
	const path = `${API_PATH}/repositories/${params.workspace}${queryString}`;

	methodLogger.debug(`Sending request to: ${path}`);
	try {
		const rawData = await fetchAtlassian(credentials, path);
		// Validate response with Zod schema
		try {
			const validatedData = RepositoriesResponseSchema.parse(rawData);

			// Validate pagination limits to prevent excessive data exposure (CWE-770)
			if (!validatePaginationLimits(validatedData, 'listRepositories')) {
				methodLogger.warn(
					'Response pagination exceeds configured limits',
				);
			}

			return validatedData;
		} catch (error) {
			if (error instanceof z.ZodError) {
				methodLogger.error(
					'Invalid response from Bitbucket API:',
					error.format(),
				);
				throw createApiError(
					'Received invalid response format from Bitbucket API',
					500,
					error,
				);
			}
			throw error;
		}
	} catch (error) {
		if (error instanceof McpError) {
			throw error;
		}
		throw createApiError(
			`Failed to list repositories: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

/**
 * Get detailed information about a specific Bitbucket repository
 *
 * Retrieves comprehensive details about a single repository.
 *
 * @async
 * @memberof VendorAtlassianRepositoriesService
 * @param {GetRepositoryParams} params - Parameters for the request
 * @param {string} params.workspace - The workspace slug
 * @param {string} params.repo_slug - The repository slug
 * @returns {Promise<Repository>} Promise containing the detailed repository information
 * @throws {Error} If Atlassian credentials are missing or API request fails
 * @example
 * // Get repository details
 * const repository = await get({
 *   workspace: 'my-workspace',
 *   repo_slug: 'my-repo'
 * });
 */
async function get(params: GetRepositoryParams): Promise<Repository> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.repositories.service.ts',
		'get',
	);
	methodLogger.debug(
		`Getting Bitbucket repository: ${params.workspace}/${params.repo_slug}`,
	);

	// Validate params with Zod
	try {
		GetRepositoryParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error(
				'Invalid parameters provided to get repository:',
				error.format(),
			);
			throw createApiError(
				`Invalid parameters: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	const path = `${API_PATH}/repositories/${params.workspace}/${params.repo_slug}`;

	methodLogger.debug(`Sending request to: ${path}`);
	try {
		const rawData = await fetchAtlassian(credentials, path);

		// Validate response with Zod schema
		try {
			const validatedData = RepositorySchema.parse(rawData);
			return validatedData;
		} catch (error) {
			if (error instanceof z.ZodError) {
				// Log the detailed formatting errors but provide a clear message to users
				methodLogger.error(
					'Bitbucket API response validation failed:',
					error.format(),
				);

				// Create API error with appropriate context for validation failures
				throw createApiError(
					`Invalid response format from Bitbucket API for repository ${params.workspace}/${params.repo_slug}`,
					500, // Internal server error since the API responded but with unexpected format
					error, // Include the Zod error as originalError for better debugging
				);
			}
			throw error; // Re-throw any other errors
		}
	} catch (error) {
		// If it's already an McpError (from fetchAtlassian or Zod validation), just rethrow it
		if (error instanceof McpError) {
			throw error;
		}

		// Otherwise, wrap in a standard API error with context
		throw createApiError(
			`Failed to get repository details for ${params.workspace}/${params.repo_slug}: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

/**
 * Lists commits for a specific repository and optional revision/path.
 *
 * @param params Parameters including workspace, repo slug, and optional filters.
 * @returns Promise resolving to paginated commit data.
 * @throws {Error} If workspace or repo_slug are missing, or if credentials are not found.
 */
async function listCommits(
	params: ListCommitsParams,
): Promise<z.infer<typeof PaginatedCommitsSchema>> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.repositories.service.ts',
		'listCommits',
	);
	methodLogger.debug(
		`Listing commits for ${params.workspace}/${params.repo_slug}`,
		params,
	);

	// Validate params with Zod
	try {
		ListCommitsParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error(
				'Invalid parameters provided to list commits:',
				error.format(),
			);
			throw createApiError(
				`Invalid parameters: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	const queryParams = new URLSearchParams();
	if (params.include) {
		queryParams.set('include', params.include);
	}
	if (params.exclude) {
		queryParams.set('exclude', params.exclude);
	}
	if (params.path) {
		queryParams.set('path', params.path);
	}
	if (params.pagelen) {
		queryParams.set('pagelen', params.pagelen.toString());
	}
	if (params.page) {
		queryParams.set('page', params.page.toString());
	}

	const queryString = queryParams.toString()
		? `?${queryParams.toString()}`
		: '';
	const path = `${API_PATH}/repositories/${params.workspace}/${params.repo_slug}/commits${queryString}`;

	methodLogger.debug(`Sending commit history request to: ${path}`);
	try {
		const rawData = await fetchAtlassian(credentials, path);
		// Validate response with Zod schema
		try {
			const validatedData = PaginatedCommitsSchema.parse(rawData);
			return validatedData;
		} catch (error) {
			if (error instanceof z.ZodError) {
				methodLogger.error(
					'Invalid response from Bitbucket API:',
					error.format(),
				);
				throw createApiError(
					'Received invalid response format from Bitbucket API',
					500,
					error,
				);
			}
			throw error;
		}
	} catch (error) {
		if (error instanceof McpError) {
			throw error;
		}
		throw createApiError(
			`Failed to list commits: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

/**
 * Creates a new branch in the specified repository.
 *
 * @param params Parameters including workspace, repo slug, new branch name, and source target.
 * @returns Promise resolving to details about the newly created branch reference.
 * @throws {Error} If required parameters are missing or API request fails.
 */
async function createBranch(params: CreateBranchParams): Promise<BranchRef> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.repositories.service.ts',
		'createBranch',
	);
	methodLogger.debug(
		`Creating branch '${params.name}' from target '${params.target.hash}' in ${params.workspace}/${params.repo_slug}`,
	);

	// Validate params with Zod
	try {
		CreateBranchParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error('Invalid parameters provided:', error.format());
			throw createApiError(
				`Invalid parameters: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	const path = `${API_PATH}/repositories/${params.workspace}/${params.repo_slug}/refs/branches`;

	const requestBody = {
		name: params.name,
		target: {
			hash: params.target.hash,
		},
	};

	methodLogger.debug(`Sending POST request to: ${path}`);
	try {
		const rawData = await fetchAtlassian<BranchRef>(credentials, path, {
			method: 'POST',
			body: requestBody,
		});

		// Validate response with Zod schema
		try {
			const validatedData = BranchRefSchema.parse(rawData);
			methodLogger.debug('Branch created successfully:', validatedData);
			return validatedData;
		} catch (error) {
			if (error instanceof z.ZodError) {
				methodLogger.error(
					'Invalid response from Bitbucket API:',
					error.format(),
				);
				throw createApiError(
					'Received invalid response format from Bitbucket API',
					500,
					error,
				);
			}
			throw error;
		}
	} catch (error) {
		if (error instanceof McpError) {
			throw error;
		}
		throw createApiError(
			`Failed to create branch: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

/**
 * Get the content of a file from a repository.
 *
 * This retrieves the raw content of a file at the specified path from a repository at a specific commit.
 *
 * @param {GetFileContentParams} params - Parameters for the request
 * @param {string} params.workspace - The workspace slug or UUID
 * @param {string} params.repo_slug - The repository slug or UUID
 * @param {string} params.commit - The commit, branch name, or tag to get the file from
 * @param {string} params.path - The file path within the repository
 * @returns {Promise<string>} Promise containing the file content as a string
 * @throws {Error} If parameters are invalid, credentials are missing, or API request fails
 * @example
 * // Get README.md content from the main branch
 * const fileContent = await getFileContent({
 *   workspace: 'my-workspace',
 *   repo_slug: 'my-repo',
 *   commit: 'main',
 *   path: 'README.md'
 * });
 */
async function getFileContent(params: GetFileContentParams): Promise<string> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.repositories.service.ts',
		'getFileContent',
	);
	methodLogger.debug(
		`Getting file content from ${params.workspace}/${params.repo_slug}/${params.commit}/${params.path}`,
	);

	// Validate params with Zod
	try {
		GetFileContentParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error(
				'Invalid parameters provided to get file content:',
				error.format(),
			);
			throw createApiError(
				`Invalid parameters: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	const path = `${API_PATH}/repositories/${params.workspace}/${params.repo_slug}/src/${params.commit}/${params.path}`;

	methodLogger.debug(`Sending request to: ${path}`);
	try {
		// Use fetchAtlassian to get the file content directly as string
		// The function already detects text/plain content type and returns it appropriately
		const fileContent = await fetchAtlassian<string>(credentials, path);

		methodLogger.debug(
			`Successfully retrieved file content (${fileContent.length} characters)`,
		);
		return fileContent;
	} catch (error) {
		if (error instanceof McpError) {
			throw error;
		}

		// More specific error messages for common file issues
		if (error instanceof Error && error.message.includes('404')) {
			throw createApiError(
				`File not found: ${params.path} at ${params.commit}`,
				404,
				error,
			);
		}

		throw createApiError(
			`Failed to get file content: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

/**
 * Lists branches for a specific repository.
 *
 * @param params Parameters including workspace, repo slug, and optional filters.
 * @returns Promise resolving to paginated branches data.
 * @throws {Error} If workspace or repo_slug are missing, or if credentials are not found.
 */
async function listBranches(
	params: ListBranchesParams,
): Promise<BranchesResponse> {
	const methodLogger = Logger.forContext(
		'services/vendor.atlassian.repositories.service.ts',
		'listBranches',
	);
	methodLogger.debug(
		`Listing branches for ${params.workspace}/${params.repo_slug}`,
		params,
	);

	// Validate params with Zod
	try {
		ListBranchesParamsSchema.parse(params);
	} catch (error) {
		if (error instanceof z.ZodError) {
			methodLogger.error(
				'Invalid parameters provided to list branches:',
				error.format(),
			);
			throw createApiError(
				`Invalid parameters: ${error.issues.map((e) => e.message).join(', ')}`,
				400,
				error,
			);
		}
		throw error;
	}

	const credentials = getAtlassianCredentials();
	if (!credentials) {
		throw createAuthMissingError(
			'Atlassian credentials are required for this operation',
		);
	}

	const queryParams = new URLSearchParams();
	if (params.q) {
		queryParams.set('q', params.q);
	}
	if (params.sort) {
		queryParams.set('sort', params.sort);
	}
	if (params.pagelen) {
		queryParams.set('pagelen', params.pagelen.toString());
	}
	if (params.page) {
		queryParams.set('page', params.page.toString());
	}

	const queryString = queryParams.toString()
		? `?${queryParams.toString()}`
		: '';
	const path = `${API_PATH}/repositories/${params.workspace}/${params.repo_slug}/refs/branches${queryString}`;

	methodLogger.debug(`Sending branches request to: ${path}`);
	try {
		const rawData = await fetchAtlassian(credentials, path);
		// Validate response with Zod schema
		try {
			const validatedData = BranchesResponseSchema.parse(rawData);
			return validatedData;
		} catch (error) {
			if (error instanceof z.ZodError) {
				methodLogger.error(
					'Invalid response from Bitbucket API:',
					error.format(),
				);
				throw createApiError(
					'Received invalid response format from Bitbucket API',
					500,
					error,
				);
			}
			throw error;
		}
	} catch (error) {
		if (error instanceof McpError) {
			throw error;
		}
		throw createApiError(
			`Failed to list branches: ${error instanceof Error ? error.message : String(error)}`,
			500,
			error,
		);
	}
}

export default {
	list,
	get,
	listCommits,
	createBranch,
	getFileContent,
	listBranches,
};

```
Page 3/4FirstPrevNextLast