This is page 2 of 3. Use http://codebase.md/aashari/mcp-server-atlassian-bitbucket?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .env.example
├── .github
│ ├── dependabot.yml
│ └── workflows
│ ├── ci-dependabot-auto-merge.yml
│ ├── ci-dependency-check.yml
│ └── ci-semantic-release.yml
├── .gitignore
├── .gitkeep
├── .npmignore
├── .npmrc
├── .prettierrc
├── .releaserc.json
├── .trigger-ci
├── CHANGELOG.md
├── eslint.config.mjs
├── package-lock.json
├── package.json
├── README.md
├── scripts
│ ├── ensure-executable.js
│ ├── package.json
│ └── update-version.js
├── src
│ ├── cli
│ │ ├── atlassian.api.cli.ts
│ │ ├── atlassian.repositories.cli.ts
│ │ └── index.ts
│ ├── controllers
│ │ ├── atlassian.api.controller.ts
│ │ └── atlassian.repositories.content.controller.ts
│ ├── index.ts
│ ├── services
│ │ ├── vendor.atlassian.repositories.service.test.ts
│ │ ├── vendor.atlassian.repositories.service.ts
│ │ ├── vendor.atlassian.repositories.types.ts
│ │ ├── vendor.atlassian.workspaces.service.ts
│ │ ├── vendor.atlassian.workspaces.test.ts
│ │ └── vendor.atlassian.workspaces.types.ts
│ ├── tools
│ │ ├── atlassian.api.tool.ts
│ │ ├── atlassian.api.types.ts
│ │ ├── atlassian.repositories.tool.ts
│ │ └── atlassian.repositories.types.ts
│ ├── types
│ │ └── common.types.ts
│ └── utils
│ ├── bitbucket-error-detection.test.ts
│ ├── cli.test.util.ts
│ ├── config.util.test.ts
│ ├── config.util.ts
│ ├── constants.util.ts
│ ├── error-handler.util.test.ts
│ ├── error-handler.util.ts
│ ├── error.util.test.ts
│ ├── error.util.ts
│ ├── formatter.util.ts
│ ├── jest.setup.ts
│ ├── jq.util.ts
│ ├── logger.util.ts
│ ├── pagination.util.ts
│ ├── response.util.ts
│ ├── shell.util.ts
│ ├── toon.util.test.ts
│ ├── toon.util.ts
│ ├── transport.util.test.ts
│ ├── transport.util.ts
│ └── workspace.util.ts
├── STYLE_GUIDE.md
└── tsconfig.json
```
# Files
--------------------------------------------------------------------------------
/src/services/vendor.atlassian.workspaces.service.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod';
2 | import {
3 | createAuthMissingError,
4 | createApiError,
5 | McpError,
6 | } from '../utils/error.util.js';
7 | import { Logger } from '../utils/logger.util.js';
8 | import {
9 | fetchAtlassian,
10 | getAtlassianCredentials,
11 | } from '../utils/transport.util.js';
12 | import {
13 | WorkspaceDetailedSchema,
14 | WorkspacePermissionsResponseSchema,
15 | ListWorkspacesParamsSchema,
16 | type ListWorkspacesParams,
17 | } from './vendor.atlassian.workspaces.types.js';
18 |
19 | /**
20 | * Base API path for Bitbucket REST API v2
21 | * @see https://developer.atlassian.com/cloud/bitbucket/rest/api-group-workspaces/
22 | * @constant {string}
23 | */
24 | const API_PATH = '/2.0';
25 |
26 | /**
27 | * @namespace VendorAtlassianWorkspacesService
28 | * @description Service for interacting with Bitbucket Workspaces API.
29 | * Provides methods for listing workspaces and retrieving workspace details.
30 | * All methods require valid Atlassian credentials configured in the environment.
31 | */
32 |
33 | // Create a contextualized logger for this file
34 | const serviceLogger = Logger.forContext(
35 | 'services/vendor.atlassian.workspaces.service.ts',
36 | );
37 |
38 | // Log service initialization
39 | serviceLogger.debug('Bitbucket workspaces service initialized');
40 |
41 | /**
42 | * List Bitbucket workspaces with optional filtering and pagination
43 | *
44 | * Retrieves a list of workspaces from Bitbucket with support for various filters
45 | * and pagination options.
46 | *
47 | * NOTE: The /2.0/user/permissions/workspaces endpoint does not support sorting,
48 | * despite the ListWorkspacesParams type including a sort parameter.
49 | *
50 | * @async
51 | * @memberof VendorAtlassianWorkspacesService
52 | * @param {ListWorkspacesParams} [params={}] - Optional parameters for customizing the request
53 | * @param {string} [params.q] - Filter by workspace name
54 | * @param {number} [params.page] - Page number
55 | * @param {number} [params.pagelen] - Number of items per page
56 | * @returns {Promise<z.infer<typeof WorkspacePermissionsResponseSchema>>} Promise containing the validated workspaces response
57 | * @throws {McpError} If validation fails, credentials are missing, or API request fails
58 | * @example
59 | * // List workspaces with pagination
60 | * const response = await list({
61 | * pagelen: 10
62 | * });
63 | */
64 | async function list(
65 | params: ListWorkspacesParams = {},
66 | ): Promise<z.infer<typeof WorkspacePermissionsResponseSchema>> {
67 | const methodLogger = Logger.forContext(
68 | 'services/vendor.atlassian.workspaces.service.ts',
69 | 'list',
70 | );
71 | methodLogger.debug('Listing Bitbucket workspaces with params:', params);
72 |
73 | // Validate params with Zod
74 | try {
75 | ListWorkspacesParamsSchema.parse(params);
76 | } catch (error) {
77 | if (error instanceof z.ZodError) {
78 | methodLogger.error(
79 | 'Invalid parameters provided to list workspaces:',
80 | error.format(),
81 | );
82 | throw createApiError(
83 | `Invalid parameters for listing workspaces: ${error.issues.map((e) => e.message).join(', ')}`,
84 | 400,
85 | error,
86 | );
87 | }
88 | throw error;
89 | }
90 |
91 | const credentials = getAtlassianCredentials();
92 | if (!credentials) {
93 | throw createAuthMissingError(
94 | 'Atlassian credentials are required for this operation',
95 | );
96 | }
97 |
98 | // Build query parameters
99 | const queryParams = new URLSearchParams();
100 |
101 | // Add optional query parameters if provided
102 | // NOTE: Sort is intentionally not included as the /2.0/user/permissions/workspaces endpoint
103 | // does not support sorting on any field
104 | if (params.q) {
105 | queryParams.set('q', params.q);
106 | }
107 | if (params.pagelen) {
108 | queryParams.set('pagelen', params.pagelen.toString());
109 | }
110 | if (params.page) {
111 | queryParams.set('page', params.page.toString());
112 | }
113 |
114 | const queryString = queryParams.toString()
115 | ? `?${queryParams.toString()}`
116 | : '';
117 | const path = `${API_PATH}/user/permissions/workspaces${queryString}`;
118 |
119 | methodLogger.debug(`Sending request to: ${path}`);
120 | try {
121 | const response = await fetchAtlassian(credentials, path);
122 | // Validate response with Zod schema
123 | try {
124 | const validatedData = WorkspacePermissionsResponseSchema.parse(
125 | response.data,
126 | );
127 | return validatedData;
128 | } catch (error) {
129 | if (error instanceof z.ZodError) {
130 | methodLogger.error(
131 | 'Invalid response from Bitbucket API:',
132 | error.format(),
133 | );
134 | throw createApiError(
135 | `Invalid response format from Bitbucket API for workspace list: ${error.message}`,
136 | 500,
137 | error,
138 | );
139 | }
140 | throw error;
141 | }
142 | } catch (error) {
143 | if (error instanceof McpError) {
144 | throw error;
145 | }
146 | throw createApiError(
147 | `Failed to list workspaces: ${error instanceof Error ? error.message : String(error)}`,
148 | 500,
149 | error,
150 | );
151 | }
152 | }
153 |
154 | /**
155 | * Get detailed information about a specific Bitbucket workspace
156 | *
157 | * Retrieves comprehensive details about a single workspace.
158 | *
159 | * @async
160 | * @memberof VendorAtlassianWorkspacesService
161 | * @param {string} workspace - The workspace slug
162 | * @returns {Promise<z.infer<typeof WorkspaceDetailedSchema>>} Promise containing the validated workspace information
163 | * @throws {McpError} If validation fails, credentials are missing, or API request fails
164 | * @example
165 | * // Get workspace details
166 | * const workspace = await get('my-workspace');
167 | */
168 | async function get(
169 | workspace: string,
170 | ): Promise<z.infer<typeof WorkspaceDetailedSchema>> {
171 | const methodLogger = Logger.forContext(
172 | 'services/vendor.atlassian.workspaces.service.ts',
173 | 'get',
174 | );
175 | methodLogger.debug(`Getting Bitbucket workspace with slug: ${workspace}`);
176 |
177 | const credentials = getAtlassianCredentials();
178 | if (!credentials) {
179 | throw createAuthMissingError(
180 | 'Atlassian credentials are required for this operation',
181 | );
182 | }
183 |
184 | // Currently no query parameters for workspace details API
185 | const path = `${API_PATH}/workspaces/${workspace}`;
186 |
187 | methodLogger.debug(`Sending request to: ${path}`);
188 | try {
189 | const response = await fetchAtlassian(credentials, path);
190 | // Validate response with Zod schema
191 | try {
192 | const validatedData = WorkspaceDetailedSchema.parse(response.data);
193 | return validatedData;
194 | } catch (error) {
195 | if (error instanceof z.ZodError) {
196 | methodLogger.error(
197 | 'Invalid response from Bitbucket API:',
198 | error.format(),
199 | );
200 | throw createApiError(
201 | `Invalid response format from Bitbucket API for workspace details: ${error.message}`,
202 | 500,
203 | error,
204 | );
205 | }
206 | throw error;
207 | }
208 | } catch (error) {
209 | if (error instanceof McpError) {
210 | throw error;
211 | }
212 | throw createApiError(
213 | `Failed to get workspace details: ${error instanceof Error ? error.message : String(error)}`,
214 | 500,
215 | error,
216 | );
217 | }
218 | }
219 |
220 | export default { list, get };
221 |
```
--------------------------------------------------------------------------------
/src/utils/error.util.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, expect, test } from '@jest/globals';
2 | import {
3 | ErrorType,
4 | McpError,
5 | createApiError,
6 | createAuthMissingError,
7 | createAuthInvalidError,
8 | createUnexpectedError,
9 | ensureMcpError,
10 | formatErrorForMcpTool,
11 | formatErrorForMcpResource,
12 | getDeepOriginalError,
13 | } from './error.util.js';
14 |
15 | describe('Error Utilities', () => {
16 | describe('Error creation functions', () => {
17 | test('createAuthMissingError creates an error with AUTH_MISSING type', () => {
18 | const error = createAuthMissingError('Missing credentials');
19 | expect(error).toBeInstanceOf(McpError);
20 | expect(error.type).toBe(ErrorType.AUTH_MISSING);
21 | expect(error.message).toBe('Missing credentials');
22 | expect(error.statusCode).toBeUndefined();
23 | });
24 |
25 | test('createAuthInvalidError creates an error with AUTH_INVALID type and 401 status', () => {
26 | const error = createAuthInvalidError('Invalid token');
27 | expect(error).toBeInstanceOf(McpError);
28 | expect(error.type).toBe(ErrorType.AUTH_INVALID);
29 | expect(error.message).toBe('Invalid token');
30 | expect(error.statusCode).toBe(401);
31 | });
32 |
33 | test('createApiError creates an error with API_ERROR type and specified status', () => {
34 | const error = createApiError('Not found', 404, {
35 | details: 'Resource missing',
36 | });
37 | expect(error).toBeInstanceOf(McpError);
38 | expect(error.type).toBe(ErrorType.API_ERROR);
39 | expect(error.message).toBe('Not found');
40 | expect(error.statusCode).toBe(404);
41 | expect(error.originalError).toEqual({
42 | details: 'Resource missing',
43 | });
44 | });
45 |
46 | test('createUnexpectedError creates an error with UNEXPECTED_ERROR type', () => {
47 | const originalError = new Error('Original error');
48 | const error = createUnexpectedError(
49 | 'Something went wrong',
50 | originalError,
51 | );
52 | expect(error).toBeInstanceOf(McpError);
53 | expect(error.type).toBe(ErrorType.UNEXPECTED_ERROR);
54 | expect(error.message).toBe('Something went wrong');
55 | expect(error.statusCode).toBeUndefined();
56 | expect(error.originalError).toBe(originalError);
57 | });
58 | });
59 |
60 | describe('ensureMcpError function', () => {
61 | test('returns the error if it is already an McpError', () => {
62 | const error = createApiError('API error', 500);
63 | expect(ensureMcpError(error)).toBe(error);
64 | });
65 |
66 | test('wraps a standard Error with McpError', () => {
67 | const stdError = new Error('Standard error');
68 | const mcpError = ensureMcpError(stdError);
69 | expect(mcpError).toBeInstanceOf(McpError);
70 | expect(mcpError.message).toBe('Standard error');
71 | expect(mcpError.type).toBe(ErrorType.UNEXPECTED_ERROR);
72 | expect(mcpError.originalError).toBe(stdError);
73 | });
74 |
75 | test('wraps a string with McpError', () => {
76 | const mcpError = ensureMcpError('Error message');
77 | expect(mcpError).toBeInstanceOf(McpError);
78 | expect(mcpError.message).toBe('Error message');
79 | expect(mcpError.type).toBe(ErrorType.UNEXPECTED_ERROR);
80 | });
81 |
82 | test('wraps other types with McpError', () => {
83 | const mcpError = ensureMcpError({ message: 'Object error' });
84 | expect(mcpError).toBeInstanceOf(McpError);
85 | expect(mcpError.message).toBe('[object Object]');
86 | expect(mcpError.type).toBe(ErrorType.UNEXPECTED_ERROR);
87 | });
88 | });
89 |
90 | describe('getDeepOriginalError function', () => {
91 | test('returns the deepest error in a chain', () => {
92 | const deepestError = { message: 'Root cause' };
93 | const level3 = createApiError('Level 3', 500, deepestError);
94 | const level2 = createApiError('Level 2', 500, level3);
95 | const level1 = createApiError('Level 1', 500, level2);
96 |
97 | expect(getDeepOriginalError(level1)).toEqual(deepestError);
98 | });
99 |
100 | test('handles non-McpError values', () => {
101 | const originalValue = 'Original error text';
102 | expect(getDeepOriginalError(originalValue)).toBe(originalValue);
103 | });
104 |
105 | test('stops traversing at maximum depth', () => {
106 | // Create a circular error chain that would cause infinite recursion
107 | const circular1: any = new McpError(
108 | 'Circular 1',
109 | ErrorType.API_ERROR,
110 | );
111 | const circular2: any = new McpError(
112 | 'Circular 2',
113 | ErrorType.API_ERROR,
114 | );
115 | circular1.originalError = circular2;
116 | circular2.originalError = circular1;
117 |
118 | // Should not cause infinite recursion
119 | const result = getDeepOriginalError(circular1);
120 |
121 | // Expect either circular1 or circular2 depending on max depth
122 | expect([circular1, circular2]).toContain(result);
123 | });
124 | });
125 |
126 | describe('formatErrorForMcpTool function', () => {
127 | test('formats an McpError for MCP tool response with raw error details', () => {
128 | const originalError = {
129 | code: 'NOT_FOUND',
130 | message: 'Repository does not exist',
131 | };
132 | const error = createApiError(
133 | 'Resource not found',
134 | 404,
135 | originalError,
136 | );
137 |
138 | const formatted = formatErrorForMcpTool(error);
139 |
140 | expect(formatted).toHaveProperty('content');
141 | expect(formatted).toHaveProperty('isError', true);
142 | expect(formatted.content[0].type).toBe('text');
143 | // Should contain the error message
144 | expect(formatted.content[0].text).toContain(
145 | 'Error: Resource not found',
146 | );
147 | // Should contain HTTP status
148 | expect(formatted.content[0].text).toContain('HTTP Status: 404');
149 | // Should contain raw API response with original error details
150 | expect(formatted.content[0].text).toContain('Raw API Response:');
151 | expect(formatted.content[0].text).toContain('NOT_FOUND');
152 | expect(formatted.content[0].text).toContain(
153 | 'Repository does not exist',
154 | );
155 | });
156 |
157 | test('formats a non-McpError for MCP tool response', () => {
158 | const error = new Error('Standard error');
159 |
160 | const formatted = formatErrorForMcpTool(error);
161 |
162 | expect(formatted).toHaveProperty('content');
163 | expect(formatted).toHaveProperty('isError', true);
164 | expect(formatted.content[0].type).toBe('text');
165 | expect(formatted.content[0].text).toContain(
166 | 'Error: Standard error',
167 | );
168 | });
169 |
170 | test('extracts detailed error information from nested errors', () => {
171 | const deepError = {
172 | message: 'API quota exceeded',
173 | type: 'RateLimitError',
174 | };
175 | const midError = createApiError(
176 | 'Rate limit exceeded',
177 | 429,
178 | deepError,
179 | );
180 | const topError = createApiError('API error', 429, midError);
181 |
182 | const formatted = formatErrorForMcpTool(topError);
183 |
184 | expect(formatted.content[0].text).toContain('Error: API error');
185 | // Should include the deep error details in raw response
186 | expect(formatted.content[0].text).toContain('API quota exceeded');
187 | expect(formatted.content[0].text).toContain('RateLimitError');
188 | });
189 | });
190 |
191 | describe('formatErrorForMcpResource', () => {
192 | it('should format an error for MCP resource response', () => {
193 | const error = createApiError('API error');
194 | const response = formatErrorForMcpResource(error, 'test://uri');
195 |
196 | expect(response).toHaveProperty('contents');
197 | expect(response.contents).toHaveLength(1);
198 | expect(response.contents[0]).toHaveProperty('uri', 'test://uri');
199 | expect(response.contents[0]).toHaveProperty(
200 | 'text',
201 | 'Error: API error',
202 | );
203 | expect(response.contents[0]).toHaveProperty(
204 | 'mimeType',
205 | 'text/plain',
206 | );
207 | expect(response.contents[0]).toHaveProperty(
208 | 'description',
209 | 'Error: API_ERROR',
210 | );
211 | });
212 | });
213 | });
214 |
```
--------------------------------------------------------------------------------
/src/utils/pagination.util.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Logger } from './logger.util.js';
2 | import { DATA_LIMITS } from './constants.util.js';
3 | import { ResponsePagination } from '../types/common.types.js';
4 |
5 | /**
6 | * Represents the possible pagination types.
7 | */
8 | export enum PaginationType {
9 | CURSOR = 'cursor', // Confluence, Bitbucket (some endpoints)
10 | OFFSET = 'offset', // Jira
11 | PAGE = 'page', // Bitbucket (most endpoints)
12 | }
13 |
14 | /**
15 | * Interface representing the common structure of paginated data from APIs.
16 | * This union type covers properties used by offset, cursor, and page-based pagination.
17 | */
18 | interface PaginationData {
19 | // Shared
20 | results?: unknown[];
21 | values?: unknown[];
22 | count?: number;
23 | size?: number; // Total count in Bitbucket page responses
24 | hasMore?: boolean;
25 | _links?: { next?: string }; // Confluence cursor
26 | // Offset-based (Jira)
27 | startAt?: number;
28 | maxResults?: number;
29 | total?: number;
30 | nextPage?: string; // Alternative next indicator for offset
31 | // Page-based (Bitbucket)
32 | page?: number;
33 | pagelen?: number;
34 | next?: string; // Bitbucket page URL
35 | }
36 |
37 | /**
38 | * Extract pagination information from API response
39 | * @param data The API response containing pagination information
40 | * @param paginationType The type of pagination mechanism used
41 | * @returns Object with nextCursor, hasMore, and count properties
42 | */
43 | export function extractPaginationInfo<T extends Partial<PaginationData>>(
44 | data: T,
45 | paginationType: PaginationType,
46 | ): ResponsePagination | undefined {
47 | if (!data) {
48 | return undefined;
49 | }
50 |
51 | let pagination: ResponsePagination | undefined;
52 | const methodLogger = Logger.forContext(
53 | 'utils/pagination.util.ts',
54 | 'extractPaginationInfo',
55 | );
56 |
57 | switch (paginationType) {
58 | case PaginationType.PAGE: {
59 | // Bitbucket page-based pagination (page, pagelen, size, next)
60 | if (data.page !== undefined && data.pagelen !== undefined) {
61 | const hasMore = !!data.next;
62 | let nextCursorValue: string | undefined = undefined;
63 |
64 | if (hasMore) {
65 | try {
66 | // First attempt to parse the full URL if it looks like one
67 | if (
68 | typeof data.next === 'string' &&
69 | data.next.includes('://')
70 | ) {
71 | const nextUrl = new URL(data.next);
72 | nextCursorValue =
73 | nextUrl.searchParams.get('page') || undefined;
74 | methodLogger.debug(
75 | `Successfully extracted page from URL: ${nextCursorValue}`,
76 | );
77 | } else if (data.next === 'available') {
78 | // Handle the 'available' placeholder used in some transformedResponses
79 | nextCursorValue = String(Number(data.page) + 1);
80 | methodLogger.debug(
81 | `Using calculated next page from 'available': ${nextCursorValue}`,
82 | );
83 | } else if (typeof data.next === 'string') {
84 | // Try to use data.next directly if it's not a URL but still a string
85 | nextCursorValue = data.next;
86 | methodLogger.debug(
87 | `Using next value directly: ${nextCursorValue}`,
88 | );
89 | }
90 | } catch (e) {
91 | // If URL parsing fails, calculate the next page based on current page
92 | nextCursorValue = String(Number(data.page) + 1);
93 | methodLogger.debug(
94 | `Calculated next page after URL parsing error: ${nextCursorValue}`,
95 | );
96 | methodLogger.warn(
97 | `Failed to parse next URL: ${data.next}`,
98 | e,
99 | );
100 | }
101 | }
102 |
103 | pagination = {
104 | hasMore,
105 | count: data.values?.length ?? 0,
106 | page: data.page,
107 | size: data.pagelen,
108 | total: data.size,
109 | nextCursor: nextCursorValue, // Store next page number as cursor
110 | };
111 | }
112 | break;
113 | }
114 |
115 | case PaginationType.OFFSET: {
116 | // Jira offset-based pagination
117 | const countOffset = data.values?.length;
118 | if (
119 | data.startAt !== undefined &&
120 | data.maxResults !== undefined &&
121 | data.total !== undefined &&
122 | data.startAt + data.maxResults < data.total
123 | ) {
124 | pagination = {
125 | hasMore: true,
126 | count: countOffset,
127 | total: data.total,
128 | nextCursor: String(data.startAt + data.maxResults),
129 | };
130 | } else if (data.nextPage) {
131 | pagination = {
132 | hasMore: true,
133 | count: countOffset,
134 | nextCursor: data.nextPage,
135 | };
136 | }
137 | break;
138 | }
139 |
140 | case PaginationType.CURSOR: {
141 | // Confluence cursor-based pagination
142 | const countCursor = data.results?.length;
143 | if (data._links && data._links.next) {
144 | const nextUrl = data._links.next;
145 | const cursorMatch = nextUrl.match(/cursor=([^&]+)/);
146 | if (cursorMatch && cursorMatch[1]) {
147 | pagination = {
148 | hasMore: true,
149 | count: countCursor,
150 | nextCursor: decodeURIComponent(cursorMatch[1]),
151 | };
152 | }
153 | }
154 | break;
155 | }
156 |
157 | default:
158 | methodLogger.warn(`Unknown pagination type: ${paginationType}`);
159 | }
160 |
161 | // Ensure a default pagination object if none was created but data exists
162 | if (!pagination && (data.results || data.values)) {
163 | pagination = {
164 | hasMore: false,
165 | count: data.results?.length ?? data.values?.length ?? 0,
166 | };
167 | }
168 |
169 | return pagination;
170 | }
171 |
172 | /**
173 | * Validates and enforces page size limits to prevent excessive data exposure (CWE-770)
174 | * @param requestedPageSize The requested page size from the client
175 | * @param contextInfo Optional context for logging (e.g., endpoint name)
176 | * @returns The validated page size (clamped to maximum allowed)
177 | */
178 | export function validatePageSize(
179 | requestedPageSize?: number,
180 | contextInfo?: string,
181 | ): number {
182 | const methodLogger = Logger.forContext(
183 | 'utils/pagination.util.ts',
184 | 'validatePageSize',
185 | );
186 |
187 | // Use default if not specified
188 | if (!requestedPageSize || requestedPageSize <= 0) {
189 | const defaultSize = DATA_LIMITS.DEFAULT_PAGE_SIZE;
190 | methodLogger.debug(
191 | `Using default page size: ${defaultSize}${contextInfo ? ` for ${contextInfo}` : ''}`,
192 | );
193 | return defaultSize;
194 | }
195 |
196 | // Enforce maximum page size limit
197 | if (requestedPageSize > DATA_LIMITS.MAX_PAGE_SIZE) {
198 | const clampedSize = DATA_LIMITS.MAX_PAGE_SIZE;
199 | methodLogger.warn(
200 | `Page size ${requestedPageSize} exceeds maximum limit. Clamped to ${clampedSize}${contextInfo ? ` for ${contextInfo}` : ''}`,
201 | );
202 | return clampedSize;
203 | }
204 |
205 | methodLogger.debug(
206 | `Using requested page size: ${requestedPageSize}${contextInfo ? ` for ${contextInfo}` : ''}`,
207 | );
208 | return requestedPageSize;
209 | }
210 |
211 | /**
212 | * Validates pagination data to ensure it doesn't exceed configured limits
213 | * @param paginationData The pagination data to validate
214 | * @param contextInfo Optional context for logging
215 | * @returns True if data is within limits, false otherwise
216 | */
217 | export function validatePaginationLimits(
218 | paginationData: { count?: number; size?: number; pagelen?: number },
219 | contextInfo?: string,
220 | ): boolean {
221 | const methodLogger = Logger.forContext(
222 | 'utils/pagination.util.ts',
223 | 'validatePaginationLimits',
224 | );
225 |
226 | // Check if the response contains more items than our maximum allowed
227 | const itemCount = paginationData.count ?? 0;
228 | const pageSize = paginationData.size ?? paginationData.pagelen ?? 0;
229 |
230 | if (itemCount > DATA_LIMITS.MAX_PAGE_SIZE) {
231 | methodLogger.warn(
232 | `Response contains ${itemCount} items, exceeding maximum of ${DATA_LIMITS.MAX_PAGE_SIZE}${contextInfo ? ` for ${contextInfo}` : ''}`,
233 | );
234 | return false;
235 | }
236 |
237 | if (pageSize > DATA_LIMITS.MAX_PAGE_SIZE) {
238 | methodLogger.warn(
239 | `Response page size ${pageSize} exceeds maximum of ${DATA_LIMITS.MAX_PAGE_SIZE}${contextInfo ? ` for ${contextInfo}` : ''}`,
240 | );
241 | return false;
242 | }
243 |
244 | return true;
245 | }
246 |
```
--------------------------------------------------------------------------------
/src/controllers/atlassian.repositories.content.controller.ts:
--------------------------------------------------------------------------------
```typescript
1 | import atlassianRepositoriesService from '../services/vendor.atlassian.repositories.service.js';
2 | import { Logger } from '../utils/logger.util.js';
3 | import { handleControllerError } from '../utils/error-handler.util.js';
4 | import { ControllerResponse } from '../types/common.types.js';
5 | import { CloneRepositoryToolArgsType } from '../tools/atlassian.repositories.types.js';
6 | import { getDefaultWorkspace } from '../utils/workspace.util.js';
7 | import { executeShellCommand } from '../utils/shell.util.js';
8 | import * as path from 'path';
9 | import * as fs from 'fs/promises';
10 | import { constants } from 'fs';
11 |
12 | // Logger instance for this module
13 | const logger = Logger.forContext(
14 | 'controllers/atlassian.repositories.content.controller.ts',
15 | );
16 |
17 | /**
18 | * Clones a Bitbucket repository to the local filesystem
19 | * @param options Options including repository identifiers and target path
20 | * @returns Information about the cloned repository
21 | */
22 | export async function handleCloneRepository(
23 | options: CloneRepositoryToolArgsType,
24 | ): Promise<ControllerResponse> {
25 | const methodLogger = logger.forMethod('handleCloneRepository');
26 | methodLogger.debug('Cloning repository with options:', options);
27 |
28 | try {
29 | // Handle optional workspaceSlug
30 | let { workspaceSlug } = options;
31 | if (!workspaceSlug) {
32 | methodLogger.debug(
33 | 'No workspace provided, fetching default workspace',
34 | );
35 | const defaultWorkspace = await getDefaultWorkspace();
36 | if (!defaultWorkspace) {
37 | throw new Error(
38 | 'No default workspace found. Please provide a workspace slug.',
39 | );
40 | }
41 | workspaceSlug = defaultWorkspace;
42 | methodLogger.debug(`Using default workspace: ${defaultWorkspace}`);
43 | }
44 |
45 | // Required parameters check
46 | const { repoSlug, targetPath } = options;
47 | if (!repoSlug) {
48 | throw new Error('Repository slug is required');
49 | }
50 | if (!targetPath) {
51 | throw new Error('Target path is required');
52 | }
53 |
54 | // Normalize and resolve the target path
55 | // If it's a relative path, convert it to absolute based on current working directory
56 | const processedTargetPath = path.isAbsolute(targetPath)
57 | ? targetPath
58 | : path.resolve(process.cwd(), targetPath);
59 |
60 | methodLogger.debug(
61 | `Normalized target path: ${processedTargetPath} (original: ${targetPath})`,
62 | );
63 |
64 | // Validate directory access and permissions before proceeding
65 | try {
66 | // Check if target directory exists
67 | try {
68 | await fs.access(processedTargetPath, constants.F_OK);
69 | methodLogger.debug(
70 | `Target directory exists: ${processedTargetPath}`,
71 | );
72 |
73 | // If it exists, check if we have write permission
74 | try {
75 | await fs.access(processedTargetPath, constants.W_OK);
76 | methodLogger.debug(
77 | `Have write permission to: ${processedTargetPath}`,
78 | );
79 | } catch {
80 | throw new Error(
81 | `Permission denied: You don't have write access to the target directory: ${processedTargetPath}`,
82 | );
83 | }
84 | } catch {
85 | // Directory doesn't exist, try to create it
86 | methodLogger.debug(
87 | `Target directory doesn't exist, creating: ${processedTargetPath}`,
88 | );
89 | try {
90 | await fs.mkdir(processedTargetPath, { recursive: true });
91 | methodLogger.debug(
92 | `Successfully created directory: ${processedTargetPath}`,
93 | );
94 | } catch (mkdirError) {
95 | throw new Error(
96 | `Failed to create target directory ${processedTargetPath}: ${(mkdirError as Error).message}. Please ensure you have write permissions to the parent directory.`,
97 | );
98 | }
99 | }
100 | } catch (accessError) {
101 | methodLogger.error('Path access error:', accessError);
102 | throw accessError;
103 | }
104 |
105 | // Get repository details to determine clone URL
106 | methodLogger.debug(
107 | `Getting repository details for ${workspaceSlug}/${repoSlug}`,
108 | );
109 | const repoDetails = await atlassianRepositoriesService.get({
110 | workspace: workspaceSlug,
111 | repo_slug: repoSlug,
112 | });
113 |
114 | // Find SSH clone URL (preferred) or fall back to HTTPS
115 | let cloneUrl: string | undefined;
116 | let cloneProtocol: string = 'SSH'; // Default to SSH
117 |
118 | if (repoDetails.links?.clone) {
119 | // First try to find SSH clone URL
120 | const sshClone = repoDetails.links.clone.find(
121 | (link) => link.name === 'ssh',
122 | );
123 |
124 | if (sshClone) {
125 | cloneUrl = sshClone.href;
126 | } else {
127 | // Fall back to HTTPS if SSH is not available
128 | const httpsClone = repoDetails.links.clone.find(
129 | (link) => link.name === 'https',
130 | );
131 |
132 | if (httpsClone) {
133 | cloneUrl = httpsClone.href;
134 | cloneProtocol = 'HTTPS';
135 | methodLogger.warn(
136 | 'SSH clone URL not found, falling back to HTTPS',
137 | );
138 | }
139 | }
140 | }
141 |
142 | if (!cloneUrl) {
143 | throw new Error(
144 | 'Could not find a valid clone URL for the repository',
145 | );
146 | }
147 |
148 | // Determine full target directory path
149 | // Clone into a subdirectory named after the repo slug
150 | const targetDir = path.join(processedTargetPath, repoSlug);
151 | methodLogger.debug(`Will clone to: ${targetDir}`);
152 |
153 | // Check if directory already exists
154 | try {
155 | const stats = await fs.stat(targetDir);
156 | if (stats.isDirectory()) {
157 | methodLogger.warn(
158 | `Target directory already exists: ${targetDir}`,
159 | );
160 | return {
161 | content: `Target directory \`${targetDir}\` already exists. Please choose a different target path or remove the existing directory.`,
162 | };
163 | }
164 | } catch {
165 | // Error means directory doesn't exist, which is what we want
166 | methodLogger.debug(
167 | `Target directory doesn't exist, proceeding with clone`,
168 | );
169 | }
170 |
171 | // Execute git clone command
172 | methodLogger.debug(`Cloning from URL (${cloneProtocol}): ${cloneUrl}`);
173 | const command = `git clone ${cloneUrl} "${targetDir}"`;
174 |
175 | try {
176 | const result = await executeShellCommand(
177 | command,
178 | 'cloning repository',
179 | );
180 |
181 | // Return success message with more detailed information
182 | return {
183 | content:
184 | `Successfully cloned repository \`${workspaceSlug}/${repoSlug}\` to \`${targetDir}\` using ${cloneProtocol}.\n\n` +
185 | `**Details:**\n` +
186 | `- **Repository**: ${workspaceSlug}/${repoSlug}\n` +
187 | `- **Clone Protocol**: ${cloneProtocol}\n` +
188 | `- **Target Location**: ${targetDir}\n\n` +
189 | `**Output:**\n\`\`\`\n${result}\n\`\`\`\n\n` +
190 | `**Note**: If this is your first time cloning with SSH, ensure your SSH keys are set up correctly.`,
191 | };
192 | } catch (cloneError) {
193 | // Enhanced error message with troubleshooting steps
194 | const errorMsg = `Failed to clone repository: ${(cloneError as Error).message}`;
195 | let troubleshooting = '';
196 |
197 | if (cloneProtocol === 'SSH') {
198 | troubleshooting =
199 | `\n\n**Troubleshooting SSH Clone Issues:**\n` +
200 | `1. Ensure you have SSH keys set up with Bitbucket\n` +
201 | `2. Check if your SSH agent is running: \`eval "$(ssh-agent -s)"; ssh-add\`\n` +
202 | `3. Verify connectivity: \`ssh -T [email protected]\`\n` +
203 | `4. Try using HTTPS instead (modify your tool call with a different repository URL)`;
204 | } else {
205 | troubleshooting =
206 | `\n\n**Troubleshooting HTTPS Clone Issues:**\n` +
207 | `1. Check your Bitbucket credentials\n` +
208 | `2. Ensure the target directory is writable\n` +
209 | `3. Try running the command manually to see detailed errors`;
210 | }
211 |
212 | throw new Error(errorMsg + troubleshooting);
213 | }
214 | } catch (error) {
215 | throw handleControllerError(error, {
216 | entityType: 'Repository',
217 | operation: 'clone',
218 | source: 'controllers/atlassian.repositories.content.controller.ts@handleCloneRepository',
219 | additionalInfo: options,
220 | });
221 | }
222 | }
223 |
```
--------------------------------------------------------------------------------
/src/utils/error-handler.util.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, expect, test } from '@jest/globals';
2 | import {
3 | ErrorCode,
4 | buildErrorContext,
5 | detectErrorType,
6 | createUserFriendlyErrorMessage,
7 | handleControllerError,
8 | } from './error-handler.util.js';
9 | import { McpError, ErrorType, createApiError } from './error.util.js';
10 |
11 | describe('Error Handler Utilities', () => {
12 | describe('buildErrorContext function', () => {
13 | test('builds a complete error context object', () => {
14 | const context = buildErrorContext(
15 | 'Repository',
16 | 'retrieving',
17 | 'controllers/repositories.controller.ts@get',
18 | { workspaceSlug: 'atlassian', repoSlug: 'bitbucket' },
19 | { queryParams: { sort: 'name' } },
20 | );
21 |
22 | expect(context).toEqual({
23 | entityType: 'Repository',
24 | operation: 'retrieving',
25 | source: 'controllers/repositories.controller.ts@get',
26 | entityId: { workspaceSlug: 'atlassian', repoSlug: 'bitbucket' },
27 | additionalInfo: { queryParams: { sort: 'name' } },
28 | });
29 | });
30 |
31 | test('handles minimal required parameters', () => {
32 | const context = buildErrorContext(
33 | 'Repository',
34 | 'listing',
35 | 'controllers/repositories.controller.ts@list',
36 | );
37 |
38 | expect(context).toEqual({
39 | entityType: 'Repository',
40 | operation: 'listing',
41 | source: 'controllers/repositories.controller.ts@list',
42 | });
43 | expect(context.entityId).toBeUndefined();
44 | expect(context.additionalInfo).toBeUndefined();
45 | });
46 | });
47 |
48 | describe('detectErrorType function', () => {
49 | test('detects network errors', () => {
50 | const error = new Error('network error: connection refused');
51 | const result = detectErrorType(error);
52 | expect(result).toEqual({
53 | code: ErrorCode.NETWORK_ERROR,
54 | statusCode: 500,
55 | });
56 | });
57 |
58 | test('detects rate limit errors', () => {
59 | const error = new Error('too many requests');
60 | const result = detectErrorType(error);
61 | expect(result).toEqual({
62 | code: ErrorCode.RATE_LIMIT_ERROR,
63 | statusCode: 429,
64 | });
65 | });
66 |
67 | test('detects not found errors', () => {
68 | const error = new Error('resource not found');
69 | const result = detectErrorType(error);
70 | expect(result).toEqual({
71 | code: ErrorCode.NOT_FOUND,
72 | statusCode: 404,
73 | });
74 | });
75 |
76 | test('detects access denied errors', () => {
77 | const error = new Error('insufficient permissions');
78 | const result = detectErrorType(error);
79 | expect(result).toEqual({
80 | code: ErrorCode.ACCESS_DENIED,
81 | statusCode: 403,
82 | });
83 | });
84 |
85 | test('detects validation errors', () => {
86 | const error = new Error('validation failed: invalid input');
87 | const result = detectErrorType(error);
88 | expect(result).toEqual({
89 | code: ErrorCode.VALIDATION_ERROR,
90 | statusCode: 400,
91 | });
92 | });
93 |
94 | test('defaults to unexpected error', () => {
95 | const error = new Error('something unexpected happened');
96 | const result = detectErrorType(error);
97 | expect(result).toEqual({
98 | code: ErrorCode.UNEXPECTED_ERROR,
99 | statusCode: 500,
100 | });
101 | });
102 |
103 | test('respects explicit status code from error', () => {
104 | const error = new McpError(
105 | 'Custom error',
106 | ErrorType.API_ERROR,
107 | 418,
108 | );
109 | const result = detectErrorType(error);
110 | expect(result.statusCode).toBe(418);
111 | });
112 |
113 | test('detects Bitbucket-specific repository not found errors', () => {
114 | const bitbucketError = {
115 | error: {
116 | message: 'repository not found',
117 | },
118 | };
119 | const mcpError = createApiError('API Error', 404, bitbucketError);
120 | const result = detectErrorType(mcpError);
121 | expect(result).toEqual({
122 | code: ErrorCode.NOT_FOUND,
123 | statusCode: 404,
124 | });
125 | });
126 |
127 | test('detects Bitbucket-specific permission errors', () => {
128 | const bitbucketError = {
129 | error: {
130 | message: 'access denied for this repository',
131 | },
132 | };
133 | const mcpError = createApiError('API Error', 403, bitbucketError);
134 | const result = detectErrorType(mcpError);
135 | expect(result).toEqual({
136 | code: ErrorCode.ACCESS_DENIED,
137 | statusCode: 403,
138 | });
139 | });
140 |
141 | test('detects Bitbucket-specific validation errors', () => {
142 | const bitbucketError = {
143 | error: {
144 | message: 'invalid parameter: repository name',
145 | },
146 | };
147 | const mcpError = createApiError('API Error', 400, bitbucketError);
148 | const result = detectErrorType(mcpError);
149 | expect(result).toEqual({
150 | code: ErrorCode.VALIDATION_ERROR,
151 | statusCode: 400,
152 | });
153 | });
154 | });
155 |
156 | describe('createUserFriendlyErrorMessage function', () => {
157 | test('creates NOT_FOUND message with entityId string', () => {
158 | const message = createUserFriendlyErrorMessage(
159 | ErrorCode.NOT_FOUND,
160 | {
161 | entityType: 'Repository',
162 | entityId: 'atlassian/bitbucket',
163 | },
164 | );
165 | expect(message).toContain(
166 | 'Repository atlassian/bitbucket not found',
167 | );
168 | });
169 |
170 | test('creates NOT_FOUND message with entityId object', () => {
171 | const message = createUserFriendlyErrorMessage(
172 | ErrorCode.NOT_FOUND,
173 | {
174 | entityType: 'Repository',
175 | entityId: {
176 | workspaceSlug: 'atlassian',
177 | repoSlug: 'bitbucket',
178 | },
179 | },
180 | );
181 | expect(message).toContain(
182 | 'Repository atlassian/bitbucket not found',
183 | );
184 | });
185 |
186 | test('creates ACCESS_DENIED message', () => {
187 | const message = createUserFriendlyErrorMessage(
188 | ErrorCode.ACCESS_DENIED,
189 | {
190 | entityType: 'Repository',
191 | entityId: 'atlassian/bitbucket',
192 | },
193 | );
194 | expect(message).toContain(
195 | 'Access denied for repository atlassian/bitbucket',
196 | );
197 | });
198 |
199 | test('creates VALIDATION_ERROR message', () => {
200 | const originalMessage = 'Invalid repository name';
201 | const message = createUserFriendlyErrorMessage(
202 | ErrorCode.VALIDATION_ERROR,
203 | {
204 | entityType: 'Repository',
205 | operation: 'creating',
206 | },
207 | originalMessage,
208 | );
209 | expect(message).toBe(
210 | `${originalMessage} Error details: ${originalMessage}`,
211 | );
212 | });
213 |
214 | test('creates NETWORK_ERROR message', () => {
215 | const message = createUserFriendlyErrorMessage(
216 | ErrorCode.NETWORK_ERROR,
217 | {
218 | entityType: 'Repository',
219 | operation: 'retrieving',
220 | },
221 | );
222 | expect(message).toContain('Network error');
223 | expect(message).toContain('Bitbucket API');
224 | });
225 |
226 | test('creates RATE_LIMIT_ERROR message', () => {
227 | const message = createUserFriendlyErrorMessage(
228 | ErrorCode.RATE_LIMIT_ERROR,
229 | );
230 | expect(message).toContain('Bitbucket API rate limit exceeded');
231 | });
232 |
233 | test('includes original message for non-specific errors', () => {
234 | const message = createUserFriendlyErrorMessage(
235 | ErrorCode.UNEXPECTED_ERROR,
236 | {
237 | entityType: 'Repository',
238 | operation: 'processing',
239 | },
240 | 'Something went wrong',
241 | );
242 | expect(message).toContain('unexpected error');
243 | expect(message).toContain('Something went wrong');
244 | });
245 | });
246 |
247 | describe('handleControllerError function', () => {
248 | test('throws appropriate API error with user-friendly message', () => {
249 | const originalError = new Error('Repository not found');
250 | const context = buildErrorContext(
251 | 'Repository',
252 | 'retrieving',
253 | 'controllers/repositories.controller.ts@get',
254 | 'atlassian/bitbucket',
255 | );
256 |
257 | expect(() => {
258 | handleControllerError(originalError, context);
259 | }).toThrow(McpError);
260 |
261 | try {
262 | handleControllerError(originalError, context);
263 | } catch (error) {
264 | expect(error).toBeInstanceOf(McpError);
265 | expect((error as McpError).type).toBe(ErrorType.API_ERROR);
266 | expect((error as McpError).statusCode).toBe(404);
267 | expect((error as McpError).message).toContain(
268 | 'Repository atlassian/bitbucket not found',
269 | );
270 | expect((error as McpError).originalError).toBe(originalError);
271 | }
272 | });
273 | });
274 | });
275 |
```
--------------------------------------------------------------------------------
/src/utils/error.util.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Logger } from './logger.util.js';
2 | import { formatSeparator } from './formatter.util.js';
3 |
4 | /**
5 | * Error types for MCP errors
6 | */
7 | export type McpErrorType =
8 | | 'AUTHENTICATION_REQUIRED'
9 | | 'NOT_FOUND'
10 | | 'VALIDATION_ERROR'
11 | | 'RATE_LIMIT_EXCEEDED'
12 | | 'API_ERROR'
13 | | 'UNEXPECTED_ERROR';
14 |
15 | /**
16 | * Error types for classification
17 | */
18 | export enum ErrorType {
19 | AUTH_MISSING = 'AUTH_MISSING',
20 | AUTH_INVALID = 'AUTH_INVALID',
21 | API_ERROR = 'API_ERROR',
22 | UNEXPECTED_ERROR = 'UNEXPECTED_ERROR',
23 | }
24 |
25 | /**
26 | * Custom error class with type classification
27 | */
28 | export class McpError extends Error {
29 | type: ErrorType;
30 | errorType?: McpErrorType; // Add errorType property used by error-handler.util.ts
31 | statusCode?: number;
32 | originalError?: unknown;
33 |
34 | constructor(
35 | message: string,
36 | type: ErrorType,
37 | statusCode?: number,
38 | originalError?: unknown,
39 | ) {
40 | super(message);
41 | this.name = 'McpError';
42 | this.type = type;
43 | this.statusCode = statusCode;
44 | this.originalError = originalError;
45 |
46 | // Set errorType based on type
47 | switch (type) {
48 | case ErrorType.AUTH_MISSING:
49 | case ErrorType.AUTH_INVALID:
50 | this.errorType = 'AUTHENTICATION_REQUIRED';
51 | break;
52 | case ErrorType.API_ERROR:
53 | this.errorType =
54 | statusCode === 404
55 | ? 'NOT_FOUND'
56 | : statusCode === 429
57 | ? 'RATE_LIMIT_EXCEEDED'
58 | : 'API_ERROR';
59 | break;
60 | case ErrorType.UNEXPECTED_ERROR:
61 | default:
62 | this.errorType = 'UNEXPECTED_ERROR';
63 | break;
64 | }
65 | }
66 | }
67 |
68 | /**
69 | * Helper to unwrap nested McpErrors and return the deepest original error.
70 | * This is useful when an McpError contains another McpError as `originalError`
71 | * which in turn may wrap the vendor (Bitbucket) error text or object.
72 | */
73 | export function getDeepOriginalError(error: unknown): unknown {
74 | if (!error) {
75 | return error;
76 | }
77 |
78 | let current = error;
79 | let depth = 0;
80 | const maxDepth = 10; // Prevent infinite recursion
81 |
82 | while (
83 | depth < maxDepth &&
84 | current instanceof Error &&
85 | 'originalError' in current &&
86 | current.originalError
87 | ) {
88 | current = current.originalError;
89 | depth++;
90 | }
91 |
92 | return current;
93 | }
94 |
95 | /**
96 | * Create an authentication missing error
97 | */
98 | export function createAuthMissingError(
99 | message: string = 'Authentication credentials are missing',
100 | originalError?: unknown,
101 | ): McpError {
102 | return new McpError(
103 | message,
104 | ErrorType.AUTH_MISSING,
105 | undefined,
106 | originalError,
107 | );
108 | }
109 |
110 | /**
111 | * Create an authentication invalid error
112 | */
113 | export function createAuthInvalidError(
114 | message: string = 'Authentication credentials are invalid',
115 | originalError?: unknown,
116 | ): McpError {
117 | return new McpError(message, ErrorType.AUTH_INVALID, 401, originalError);
118 | }
119 |
120 | /**
121 | * Create an API error
122 | */
123 | export function createApiError(
124 | message: string,
125 | statusCode?: number,
126 | originalError?: unknown,
127 | ): McpError {
128 | return new McpError(
129 | message,
130 | ErrorType.API_ERROR,
131 | statusCode,
132 | originalError,
133 | );
134 | }
135 |
136 | /**
137 | * Create an unexpected error
138 | */
139 | export function createUnexpectedError(
140 | message: string = 'An unexpected error occurred',
141 | originalError?: unknown,
142 | ): McpError {
143 | return new McpError(
144 | message,
145 | ErrorType.UNEXPECTED_ERROR,
146 | undefined,
147 | originalError,
148 | );
149 | }
150 |
151 | /**
152 | * Ensure an error is an McpError
153 | */
154 | export function ensureMcpError(error: unknown): McpError {
155 | if (error instanceof McpError) {
156 | return error;
157 | }
158 |
159 | if (error instanceof Error) {
160 | return createUnexpectedError(error.message, error);
161 | }
162 |
163 | return createUnexpectedError(String(error));
164 | }
165 |
166 | /**
167 | * Format error for MCP tool response
168 | * Includes raw error details in the text content so AI can see the full context
169 | */
170 | export function formatErrorForMcpTool(error: unknown): {
171 | content: Array<{ type: 'text'; text: string }>;
172 | isError: boolean;
173 | } {
174 | const methodLogger = Logger.forContext(
175 | 'utils/error.util.ts',
176 | 'formatErrorForMcpTool',
177 | );
178 | const mcpError = ensureMcpError(error);
179 | methodLogger.error(`${mcpError.type} error`, mcpError);
180 |
181 | // Get the deep original error for additional context
182 | const originalError = getDeepOriginalError(mcpError.originalError);
183 |
184 | // Build error text with full details visible to AI
185 | let errorText = `Error: ${mcpError.message}`;
186 |
187 | // Add status code if available
188 | if (mcpError.statusCode) {
189 | errorText += `\nHTTP Status: ${mcpError.statusCode}`;
190 | }
191 |
192 | // Add raw error details if available (this is the actual Bitbucket API response)
193 | if (originalError && originalError !== mcpError.message) {
194 | if (typeof originalError === 'object') {
195 | errorText += `\n\nRaw API Response:\n${JSON.stringify(originalError, null, 2)}`;
196 | } else if (typeof originalError === 'string') {
197 | errorText += `\n\nRaw API Response:\n${originalError}`;
198 | }
199 | }
200 |
201 | return {
202 | content: [
203 | {
204 | type: 'text' as const,
205 | text: errorText,
206 | },
207 | ],
208 | isError: true,
209 | };
210 | }
211 |
212 | /**
213 | * Format error for MCP resource response
214 | */
215 | export function formatErrorForMcpResource(
216 | error: unknown,
217 | uri: string,
218 | ): {
219 | contents: Array<{
220 | uri: string;
221 | text: string;
222 | mimeType: string;
223 | description?: string;
224 | }>;
225 | } {
226 | const methodLogger = Logger.forContext(
227 | 'utils/error.util.ts',
228 | 'formatErrorForMcpResource',
229 | );
230 | const mcpError = ensureMcpError(error);
231 | methodLogger.error(`${mcpError.type} error`, mcpError);
232 |
233 | return {
234 | contents: [
235 | {
236 | uri,
237 | text: `Error: ${mcpError.message}`,
238 | mimeType: 'text/plain',
239 | description: `Error: ${mcpError.type}`,
240 | },
241 | ],
242 | };
243 | }
244 |
245 | /**
246 | * Handle error in CLI context with improved user feedback
247 | */
248 | export function handleCliError(error: unknown): never {
249 | const methodLogger = Logger.forContext(
250 | 'utils/error.util.ts',
251 | 'handleCliError',
252 | );
253 | const mcpError = ensureMcpError(error);
254 | methodLogger.error(`${mcpError.type} error`, mcpError);
255 |
256 | // Get the deep original error for more context
257 | const originalError = getDeepOriginalError(mcpError.originalError);
258 |
259 | // Build a well-formatted CLI output using markdown-style helpers
260 | const cliLines: string[] = [];
261 |
262 | // Primary error headline
263 | cliLines.push(`❌ ${mcpError.message}`);
264 |
265 | // Status code (if any)
266 | if (mcpError.statusCode) {
267 | cliLines.push(`HTTP Status: ${mcpError.statusCode}`);
268 | }
269 |
270 | // Separator
271 | cliLines.push(formatSeparator());
272 |
273 | // Provide helpful context based on error type
274 | if (mcpError.type === ErrorType.AUTH_MISSING) {
275 | cliLines.push(
276 | 'Tip: Make sure to set up your Atlassian credentials in the configuration file or environment variables:',
277 | );
278 | cliLines.push(
279 | '- ATLASSIAN_SITE_NAME, ATLASSIAN_USER_EMAIL, and ATLASSIAN_API_TOKEN; or',
280 | );
281 | cliLines.push(
282 | '- ATLASSIAN_BITBUCKET_USERNAME and ATLASSIAN_BITBUCKET_APP_PASSWORD',
283 | );
284 | } else if (mcpError.type === ErrorType.AUTH_INVALID) {
285 | cliLines.push(
286 | 'Tip: Check that your Atlassian API token or app password is correct and has not expired.',
287 | );
288 | cliLines.push(
289 | 'Also verify that the configured user has access to the requested resource.',
290 | );
291 | } else if (mcpError.type === ErrorType.API_ERROR) {
292 | if (mcpError.statusCode === 429) {
293 | cliLines.push(
294 | 'Tip: You may have exceeded your Bitbucket API rate limits. Try again later.',
295 | );
296 | }
297 | }
298 |
299 | // Vendor error details (if available)
300 | if (originalError) {
301 | cliLines.push('Bitbucket API Error:');
302 | cliLines.push('```');
303 | if (typeof originalError === 'object' && originalError !== null) {
304 | // Try to extract the most useful parts of Bitbucket's error response
305 | const origErr = originalError as Record<string, unknown>;
306 | if (origErr.error && typeof origErr.error === 'object') {
307 | // Format {"error": {"message": "..."}} structure
308 | const bitbucketError = origErr.error as Record<string, unknown>;
309 | cliLines.push(
310 | `Message: ${bitbucketError.message || 'Unknown error'}`,
311 | );
312 | if (bitbucketError.detail)
313 | cliLines.push(`Detail: ${bitbucketError.detail}`);
314 | } else if (origErr.message) {
315 | // Simple message
316 | cliLines.push(`${String(origErr.message)}`);
317 | } else {
318 | // Fall back to JSON representation for anything else
319 | cliLines.push(JSON.stringify(originalError, null, 2));
320 | }
321 | } else {
322 | cliLines.push(String(originalError).trim());
323 | }
324 | cliLines.push('```');
325 | }
326 |
327 | // Display DEBUG tip
328 | if (!process.env.DEBUG || !process.env.DEBUG.includes('mcp:')) {
329 | cliLines.push(
330 | 'For more detailed error information, run with DEBUG=mcp:* environment variable.',
331 | );
332 | }
333 |
334 | console.error(cliLines.join('\n'));
335 | process.exit(1);
336 | }
337 |
```
--------------------------------------------------------------------------------
/src/services/vendor.atlassian.repositories.types.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod';
2 |
3 | /**
4 | * Types for Atlassian Bitbucket Repositories API
5 | */
6 |
7 | // Link href schema
8 | const LinkSchema = z.object({
9 | href: z.string(),
10 | name: z.string().optional(),
11 | });
12 |
13 | /**
14 | * Repository SCM type
15 | */
16 | export const RepositorySCMSchema = z.enum(['git', 'hg']);
17 |
18 | /**
19 | * Repository fork policy
20 | */
21 | export const RepositoryForkPolicySchema = z.enum([
22 | 'allow_forks',
23 | 'no_public_forks',
24 | 'no_forks',
25 | ]);
26 |
27 | /**
28 | * Repository links object
29 | */
30 | export const RepositoryLinksSchema = z.object({
31 | self: LinkSchema.optional(),
32 | html: LinkSchema.optional(),
33 | avatar: LinkSchema.optional(),
34 | pullrequests: LinkSchema.optional(),
35 | commits: LinkSchema.optional(),
36 | forks: LinkSchema.optional(),
37 | watchers: LinkSchema.optional(),
38 | downloads: LinkSchema.optional(),
39 | clone: z.array(LinkSchema).optional(),
40 | hooks: LinkSchema.optional(),
41 | issues: LinkSchema.optional(),
42 | });
43 |
44 | /**
45 | * Repository owner links schema
46 | */
47 | const OwnerLinksSchema = z.object({
48 | self: LinkSchema.optional(),
49 | html: LinkSchema.optional(),
50 | avatar: LinkSchema.optional(),
51 | });
52 |
53 | /**
54 | * Repository owner object
55 | */
56 | export const RepositoryOwnerSchema = z.object({
57 | type: z.enum(['user', 'team']),
58 | username: z.string().optional(),
59 | display_name: z.string().optional(),
60 | uuid: z.string().optional(),
61 | links: OwnerLinksSchema.optional(),
62 | });
63 |
64 | /**
65 | * Repository branch object
66 | */
67 | export const RepositoryBranchSchema = z.object({
68 | type: z.literal('branch'),
69 | name: z.string(),
70 | });
71 |
72 | /**
73 | * Repository project links schema
74 | */
75 | const ProjectLinksSchema = z.object({
76 | self: LinkSchema.optional(),
77 | html: LinkSchema.optional(),
78 | });
79 |
80 | /**
81 | * Repository project object
82 | */
83 | export const RepositoryProjectSchema = z.object({
84 | type: z.literal('project'),
85 | key: z.string(),
86 | uuid: z.string(),
87 | name: z.string(),
88 | links: ProjectLinksSchema.optional(),
89 | });
90 |
91 | /**
92 | * Repository object returned from the API
93 | */
94 | export const RepositorySchema = z.object({
95 | type: z.literal('repository'),
96 | uuid: z.string(),
97 | full_name: z.string(),
98 | name: z.string(),
99 | description: z.string().optional(),
100 | is_private: z.boolean(),
101 | fork_policy: RepositoryForkPolicySchema.optional(),
102 | created_on: z.string().optional(),
103 | updated_on: z.string().optional(),
104 | size: z.number().optional(),
105 | language: z.string().optional(),
106 | has_issues: z.boolean().optional(),
107 | has_wiki: z.boolean().optional(),
108 | scm: RepositorySCMSchema,
109 | owner: RepositoryOwnerSchema,
110 | mainbranch: RepositoryBranchSchema.optional(),
111 | project: RepositoryProjectSchema.optional(),
112 | links: RepositoryLinksSchema,
113 | });
114 | export type Repository = z.infer<typeof RepositorySchema>;
115 |
116 | /**
117 | * Parameters for listing repositories
118 | */
119 | export const ListRepositoriesParamsSchema = z.object({
120 | workspace: z.string(),
121 | q: z.string().optional(),
122 | sort: z.string().optional(),
123 | page: z.number().optional(),
124 | pagelen: z.number().optional(),
125 | role: z.string().optional(),
126 | });
127 | export type ListRepositoriesParams = z.infer<
128 | typeof ListRepositoriesParamsSchema
129 | >;
130 |
131 | /**
132 | * Parameters for getting a repository by identifier
133 | */
134 | export const GetRepositoryParamsSchema = z.object({
135 | workspace: z.string(),
136 | repo_slug: z.string(),
137 | });
138 | export type GetRepositoryParams = z.infer<typeof GetRepositoryParamsSchema>;
139 |
140 | /**
141 | * API response for listing repositories
142 | */
143 | export const RepositoriesResponseSchema = z.object({
144 | pagelen: z.number(),
145 | page: z.number(),
146 | size: z.number(),
147 | next: z.string().optional(),
148 | previous: z.string().optional(),
149 | values: z.array(RepositorySchema),
150 | });
151 | export type RepositoriesResponse = z.infer<typeof RepositoriesResponseSchema>;
152 |
153 | // --- Commit History Types ---
154 |
155 | /**
156 | * Parameters for listing commits.
157 | */
158 | export const ListCommitsParamsSchema = z.object({
159 | workspace: z.string(),
160 | repo_slug: z.string(),
161 | include: z.string().optional(), // Branch, tag, or hash to include history from
162 | exclude: z.string().optional(), // Branch, tag, or hash to exclude history up to
163 | path: z.string().optional(), // File path to filter commits by
164 | page: z.number().optional(),
165 | pagelen: z.number().optional(),
166 | });
167 | export type ListCommitsParams = z.infer<typeof ListCommitsParamsSchema>;
168 |
169 | /**
170 | * Commit author user links schema
171 | */
172 | const CommitAuthorUserLinksSchema = z.object({
173 | self: LinkSchema.optional(),
174 | avatar: LinkSchema.optional(),
175 | });
176 |
177 | /**
178 | * Commit author user schema
179 | */
180 | const CommitAuthorUserSchema = z.object({
181 | display_name: z.string().optional(),
182 | nickname: z.string().optional(),
183 | account_id: z.string().optional(),
184 | uuid: z.string().optional(),
185 | type: z.string(), // Usually 'user'
186 | links: CommitAuthorUserLinksSchema.optional(),
187 | });
188 |
189 | /**
190 | * Commit author schema
191 | */
192 | export const CommitAuthorSchema = z.object({
193 | raw: z.string(),
194 | type: z.string(), // Usually 'author'
195 | user: CommitAuthorUserSchema.optional(),
196 | });
197 |
198 | /**
199 | * Commit links schema
200 | */
201 | const CommitLinksSchema = z.object({
202 | self: LinkSchema.optional(),
203 | html: LinkSchema.optional(),
204 | diff: LinkSchema.optional(),
205 | approve: LinkSchema.optional(),
206 | comments: LinkSchema.optional(),
207 | });
208 |
209 | /**
210 | * Commit summary schema
211 | */
212 | const CommitSummarySchema = z.object({
213 | raw: z.string().optional(),
214 | markup: z.string().optional(),
215 | html: z.string().optional(),
216 | });
217 |
218 | /**
219 | * Commit parent schema
220 | */
221 | const CommitParentSchema = z.object({
222 | hash: z.string(),
223 | type: z.string(),
224 | links: z.unknown(),
225 | });
226 |
227 | /**
228 | * Represents a single commit in the history.
229 | */
230 | export const CommitSchema = z.object({
231 | hash: z.string(),
232 | type: z.string(), // Usually 'commit'
233 | author: CommitAuthorSchema,
234 | date: z.string(), // ISO 8601 format date string
235 | message: z.string(),
236 | links: CommitLinksSchema,
237 | summary: CommitSummarySchema.optional(),
238 | parents: z.array(CommitParentSchema),
239 | });
240 | export type Commit = z.infer<typeof CommitSchema>;
241 |
242 | /**
243 | * API response for listing commits (paginated).
244 | */
245 | export const PaginatedCommitsSchema = z.object({
246 | pagelen: z.number(),
247 | page: z.number().optional(),
248 | size: z.number().optional(),
249 | next: z.string().optional(),
250 | previous: z.string().optional(),
251 | values: z.array(CommitSchema),
252 | });
253 | export type PaginatedCommits = z.infer<typeof PaginatedCommitsSchema>;
254 |
255 | /**
256 | * Parameters for creating a branch.
257 | */
258 | export const CreateBranchParamsSchema = z.object({
259 | workspace: z.string(),
260 | repo_slug: z.string(),
261 | name: z.string(), // New branch name
262 | target: z.object({
263 | hash: z.string(), // Source branch name or commit hash
264 | }),
265 | });
266 | export type CreateBranchParams = z.infer<typeof CreateBranchParamsSchema>;
267 |
268 | /**
269 | * Response object when creating a branch.
270 | * Contains details about the newly created branch reference.
271 | */
272 | export const BranchRefSchema = z.object({
273 | type: z.literal('branch'),
274 | name: z.string(),
275 | target: z.object({
276 | hash: z.string(),
277 | type: z.string(), // e.g., 'commit'
278 | }),
279 | });
280 | export type BranchRef = z.infer<typeof BranchRefSchema>;
281 |
282 | /**
283 | * Parameters for getting a file's content from a repository.
284 | */
285 | export const GetFileContentParamsSchema = z.object({
286 | workspace: z.string(),
287 | repo_slug: z.string(),
288 | commit: z.string(), // Branch name, tag, or commit hash
289 | path: z.string(), // File path within the repository
290 | });
291 | export type GetFileContentParams = z.infer<typeof GetFileContentParamsSchema>;
292 |
293 | /**
294 | * Represents a branch target (usually a commit).
295 | */
296 | export const BranchTargetSchema = z.object({
297 | hash: z.string(),
298 | type: z.string(), // Usually 'commit'
299 | });
300 |
301 | /**
302 | * Represents a branch in a Bitbucket repository.
303 | */
304 | export const BranchSchema = z.object({
305 | name: z.string(),
306 | type: z.literal('branch'),
307 | target: BranchTargetSchema,
308 | merge_strategies: z.array(z.string()).optional(),
309 | default_merge_strategy: z.string().optional(),
310 | links: z.record(z.string(), z.unknown()).optional(),
311 | });
312 |
313 | /**
314 | * Parameters for listing branches in a repository.
315 | */
316 | export const ListBranchesParamsSchema = z.object({
317 | workspace: z.string(),
318 | repo_slug: z.string(),
319 | page: z.number().optional(),
320 | pagelen: z.number().optional(),
321 | q: z.string().optional(), // Query for filtering branches
322 | sort: z.string().optional(), // Sort field
323 | });
324 | export type ListBranchesParams = z.infer<typeof ListBranchesParamsSchema>;
325 |
326 | /**
327 | * API response for listing branches (paginated).
328 | */
329 | export const BranchesResponseSchema = z.object({
330 | pagelen: z.number(),
331 | page: z.number().optional(),
332 | size: z.number().optional(),
333 | next: z.string().optional(),
334 | previous: z.string().optional(),
335 | values: z.array(BranchSchema),
336 | });
337 | export type BranchesResponse = z.infer<typeof BranchesResponseSchema>;
338 |
```
--------------------------------------------------------------------------------
/src/utils/logger.util.ts:
--------------------------------------------------------------------------------
```typescript
1 | import * as fs from 'fs';
2 | import * as path from 'path';
3 | import * as os from 'os';
4 | import * as crypto from 'crypto';
5 |
6 | /**
7 | * Format a timestamp for logging
8 | * @returns Formatted timestamp [HH:MM:SS]
9 | */
10 | function getTimestamp(): string {
11 | const now = new Date();
12 | return `[${now.toISOString().split('T')[1].split('.')[0]}]`;
13 | }
14 |
15 | /**
16 | * Safely convert object to string with size limits
17 | * @param obj Object to stringify
18 | * @param maxLength Maximum length of the resulting string
19 | * @returns Safely stringified object
20 | */
21 | function safeStringify(obj: unknown, maxLength = 1000): string {
22 | try {
23 | const str = JSON.stringify(obj);
24 | if (str.length <= maxLength) {
25 | return str;
26 | }
27 | return `${str.substring(0, maxLength)}... (truncated, ${str.length} chars total)`;
28 | } catch {
29 | return '[Object cannot be stringified]';
30 | }
31 | }
32 |
33 | /**
34 | * Extract essential values from larger objects for logging
35 | * @param obj The object to extract values from
36 | * @param keys Keys to extract (if available)
37 | * @returns Object containing only the specified keys
38 | */
39 | function extractEssentialValues(
40 | obj: Record<string, unknown>,
41 | keys: string[],
42 | ): Record<string, unknown> {
43 | const result: Record<string, unknown> = {};
44 | keys.forEach((key) => {
45 | if (Object.prototype.hasOwnProperty.call(obj, key)) {
46 | result[key] = obj[key];
47 | }
48 | });
49 | return result;
50 | }
51 |
52 | /**
53 | * Format source path consistently using the standardized format:
54 | * [module/file.ts@function] or [module/file.ts]
55 | *
56 | * @param filePath File path (with or without src/ prefix)
57 | * @param functionName Optional function name
58 | * @returns Formatted source path according to standard pattern
59 | */
60 | function formatSourcePath(filePath: string, functionName?: string): string {
61 | // Always strip 'src/' prefix for consistency
62 | const normalizedPath = filePath.replace(/^src\//, '');
63 |
64 | return functionName
65 | ? `[${normalizedPath}@${functionName}]`
66 | : `[${normalizedPath}]`;
67 | }
68 |
69 | /**
70 | * Check if debug logging is enabled for a specific module
71 | *
72 | * This function parses the DEBUG environment variable to determine if a specific
73 | * module should have debug logging enabled. The DEBUG variable can be:
74 | * - 'true' or '1': Enable all debug logging
75 | * - Comma-separated list of modules: Enable debug only for those modules
76 | * - Module patterns with wildcards: e.g., 'controllers/*' enables all controllers
77 | *
78 | * Examples:
79 | * - DEBUG=true
80 | * - DEBUG=controllers/*,services/aws.sso.auth.service.ts
81 | * - DEBUG=transport,utils/formatter*
82 | *
83 | * @param modulePath The module path to check against DEBUG patterns
84 | * @returns true if debug is enabled for this module, false otherwise
85 | */
86 | function isDebugEnabledForModule(modulePath: string): boolean {
87 | const debugEnv = process.env.DEBUG;
88 |
89 | if (!debugEnv) {
90 | return false;
91 | }
92 |
93 | // If debug is set to true or 1, enable all debug logging
94 | if (debugEnv === 'true' || debugEnv === '1') {
95 | return true;
96 | }
97 |
98 | // Parse comma-separated debug patterns
99 | const debugPatterns = debugEnv.split(',').map((p) => p.trim());
100 |
101 | // Check if the module matches any pattern
102 | return debugPatterns.some((pattern) => {
103 | // Convert glob-like patterns to regex
104 | // * matches anything within a path segment
105 | // ** matches across path segments
106 | const regexPattern = pattern
107 | .replace(/\*/g, '.*') // Convert * to regex .*
108 | .replace(/\?/g, '.'); // Convert ? to regex .
109 |
110 | const regex = new RegExp(`^${regexPattern}$`);
111 | return (
112 | regex.test(modulePath) ||
113 | // Check for pattern matches without the 'src/' prefix
114 | regex.test(modulePath.replace(/^src\//, ''))
115 | );
116 | });
117 | }
118 |
119 | // Generate a unique session ID for this process
120 | const SESSION_ID = crypto.randomUUID();
121 |
122 | // Get the package name from environment variables or default to 'mcp-server'
123 | const getPkgName = (): string => {
124 | try {
125 | // Try to get it from package.json first if available
126 | const packageJsonPath = path.resolve(process.cwd(), 'package.json');
127 | if (fs.existsSync(packageJsonPath)) {
128 | const packageJson = JSON.parse(
129 | fs.readFileSync(packageJsonPath, 'utf8'),
130 | );
131 | if (packageJson.name) {
132 | // Extract the last part of the name if it's scoped
133 | const match = packageJson.name.match(/(@[\w-]+\/)?(.+)/);
134 | return match ? match[2] : packageJson.name;
135 | }
136 | }
137 | } catch {
138 | // Silently fail and use default
139 | }
140 |
141 | // Fallback to environment variable or default
142 | return process.env.PACKAGE_NAME || 'mcp-server';
143 | };
144 |
145 | // MCP logs directory setup
146 | const HOME_DIR = os.homedir();
147 | const MCP_DATA_DIR = path.join(HOME_DIR, '.mcp', 'data');
148 | const CLI_NAME = getPkgName();
149 |
150 | // Ensure the MCP data directory exists
151 | if (!fs.existsSync(MCP_DATA_DIR)) {
152 | fs.mkdirSync(MCP_DATA_DIR, { recursive: true });
153 | }
154 |
155 | // Create the log file path with session ID
156 | const LOG_FILENAME = `${CLI_NAME}.${SESSION_ID}.log`;
157 | const LOG_FILEPATH = path.join(MCP_DATA_DIR, LOG_FILENAME);
158 |
159 | // Write initial log header
160 | fs.writeFileSync(
161 | LOG_FILEPATH,
162 | `# ${CLI_NAME} Log Session\n` +
163 | `Session ID: ${SESSION_ID}\n` +
164 | `Started: ${new Date().toISOString()}\n` +
165 | `Process ID: ${process.pid}\n` +
166 | `Working Directory: ${process.cwd()}\n` +
167 | `Command: ${process.argv.join(' ')}\n\n` +
168 | `## Log Entries\n\n`,
169 | 'utf8',
170 | );
171 |
172 | // Logger singleton to track initialization
173 | let isLoggerInitialized = false;
174 |
175 | /**
176 | * Logger class for consistent logging across the application.
177 | *
178 | * RECOMMENDED USAGE:
179 | *
180 | * 1. Create a file-level logger using the static forContext method:
181 | * ```
182 | * const logger = Logger.forContext('controllers/myController.ts');
183 | * ```
184 | *
185 | * 2. For method-specific logging, create a method logger:
186 | * ```
187 | * const methodLogger = Logger.forContext('controllers/myController.ts', 'myMethod');
188 | * ```
189 | *
190 | * 3. Avoid using raw string prefixes in log messages. Instead, use contextualized loggers.
191 | *
192 | * 4. For debugging objects, use the debugResponse method to log only essential properties.
193 | *
194 | * 5. Set DEBUG environment variable to control which modules show debug logs:
195 | * - DEBUG=true (enable all debug logs)
196 | * - DEBUG=controllers/*,services/* (enable for specific module groups)
197 | * - DEBUG=transport,utils/formatter* (enable specific modules, supports wildcards)
198 | */
199 | class Logger {
200 | private context?: string;
201 | private modulePath: string;
202 | private static sessionId = SESSION_ID;
203 | private static logFilePath = LOG_FILEPATH;
204 |
205 | constructor(context?: string, modulePath: string = '') {
206 | this.context = context;
207 | this.modulePath = modulePath;
208 |
209 | // Log initialization message only once
210 | if (!isLoggerInitialized) {
211 | this.info(
212 | `Logger initialized with session ID: ${Logger.sessionId}`,
213 | );
214 | this.info(`Logs will be saved to: ${Logger.logFilePath}`);
215 | isLoggerInitialized = true;
216 | }
217 | }
218 |
219 | /**
220 | * Create a contextualized logger for a specific file or component.
221 | * This is the preferred method for creating loggers.
222 | *
223 | * @param filePath The file path (e.g., 'controllers/aws.sso.auth.controller.ts')
224 | * @param functionName Optional function name for more specific context
225 | * @returns A new Logger instance with the specified context
226 | *
227 | * @example
228 | * // File-level logger
229 | * const logger = Logger.forContext('controllers/myController.ts');
230 | *
231 | * // Method-level logger
232 | * const methodLogger = Logger.forContext('controllers/myController.ts', 'myMethod');
233 | */
234 | static forContext(filePath: string, functionName?: string): Logger {
235 | return new Logger(formatSourcePath(filePath, functionName), filePath);
236 | }
237 |
238 | /**
239 | * Create a method level logger from a context logger
240 | * @param method Method name
241 | * @returns A new logger with the method context
242 | */
243 | forMethod(method: string): Logger {
244 | return Logger.forContext(this.modulePath, method);
245 | }
246 |
247 | private _formatMessage(message: string): string {
248 | return this.context ? `${this.context} ${message}` : message;
249 | }
250 |
251 | private _formatArgs(args: unknown[]): unknown[] {
252 | // If the first argument is an object and not an Error, safely stringify it
253 | if (
254 | args.length > 0 &&
255 | typeof args[0] === 'object' &&
256 | args[0] !== null &&
257 | !(args[0] instanceof Error)
258 | ) {
259 | args[0] = safeStringify(args[0]);
260 | }
261 | return args;
262 | }
263 |
264 | _log(
265 | level: 'info' | 'warn' | 'error' | 'debug',
266 | message: string,
267 | ...args: unknown[]
268 | ) {
269 | // Skip debug messages if not enabled for this module
270 | if (level === 'debug' && !isDebugEnabledForModule(this.modulePath)) {
271 | return;
272 | }
273 |
274 | const timestamp = getTimestamp();
275 | const prefix = `${timestamp} [${level.toUpperCase()}]`;
276 | let logMessage = `${prefix} ${this._formatMessage(message)}`;
277 |
278 | const formattedArgs = this._formatArgs(args);
279 | if (formattedArgs.length > 0) {
280 | // Handle errors specifically
281 | if (formattedArgs[0] instanceof Error) {
282 | const error = formattedArgs[0] as Error;
283 | logMessage += ` Error: ${error.message}`;
284 | if (error.stack) {
285 | logMessage += `\n${error.stack}`;
286 | }
287 | // If there are more args, add them after the error
288 | if (formattedArgs.length > 1) {
289 | logMessage += ` ${formattedArgs
290 | .slice(1)
291 | .map((arg) =>
292 | typeof arg === 'string' ? arg : safeStringify(arg),
293 | )
294 | .join(' ')}`;
295 | }
296 | } else {
297 | logMessage += ` ${formattedArgs
298 | .map((arg) =>
299 | typeof arg === 'string' ? arg : safeStringify(arg),
300 | )
301 | .join(' ')}`;
302 | }
303 | }
304 |
305 | // Write to log file
306 | try {
307 | fs.appendFileSync(Logger.logFilePath, `${logMessage}\n`, 'utf8');
308 | } catch (err) {
309 | // If we can't write to the log file, log the error to console
310 | console.error(`Failed to write to log file: ${err}`);
311 | }
312 |
313 | if (process.env.NODE_ENV === 'test') {
314 | console[level](logMessage);
315 | } else {
316 | console.error(logMessage);
317 | }
318 | }
319 |
320 | info(message: string, ...args: unknown[]) {
321 | this._log('info', message, ...args);
322 | }
323 |
324 | warn(message: string, ...args: unknown[]) {
325 | this._log('warn', message, ...args);
326 | }
327 |
328 | error(message: string, ...args: unknown[]) {
329 | this._log('error', message, ...args);
330 | }
331 |
332 | debug(message: string, ...args: unknown[]) {
333 | this._log('debug', message, ...args);
334 | }
335 |
336 | /**
337 | * Log essential information about an API response
338 | * @param message Log message
339 | * @param response API response object
340 | * @param essentialKeys Keys to extract from the response
341 | */
342 | debugResponse(
343 | message: string,
344 | response: Record<string, unknown>,
345 | essentialKeys: string[],
346 | ) {
347 | const essentialInfo = extractEssentialValues(response, essentialKeys);
348 | this.debug(message, essentialInfo);
349 | }
350 |
351 | /**
352 | * Get the current session ID
353 | * @returns The UUID for the current logging session
354 | */
355 | static getSessionId(): string {
356 | return Logger.sessionId;
357 | }
358 |
359 | /**
360 | * Get the current log file path
361 | * @returns The path to the current log file
362 | */
363 | static getLogFilePath(): string {
364 | return Logger.logFilePath;
365 | }
366 | }
367 |
368 | // Only export the Logger class to enforce contextual logging via Logger.forContext
369 | export { Logger };
370 |
```
--------------------------------------------------------------------------------
/src/tools/atlassian.api.tool.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2 | import { Logger } from '../utils/logger.util.js';
3 | import { formatErrorForMcpTool } from '../utils/error.util.js';
4 | import { truncateForAI } from '../utils/formatter.util.js';
5 | import {
6 | GetApiToolArgs,
7 | type GetApiToolArgsType,
8 | RequestWithBodyArgs,
9 | type RequestWithBodyArgsType,
10 | DeleteApiToolArgs,
11 | } from './atlassian.api.types.js';
12 | import {
13 | handleGet,
14 | handlePost,
15 | handlePut,
16 | handlePatch,
17 | handleDelete,
18 | } from '../controllers/atlassian.api.controller.js';
19 |
20 | // Create a contextualized logger for this file
21 | const toolLogger = Logger.forContext('tools/atlassian.api.tool.ts');
22 |
23 | // Log tool initialization
24 | toolLogger.debug('Bitbucket API tool initialized');
25 |
26 | /**
27 | * Creates an MCP tool handler for GET/DELETE requests (no body)
28 | *
29 | * @param methodName - Name of the HTTP method for logging
30 | * @param handler - Controller handler function
31 | * @returns MCP tool handler function
32 | */
33 | function createReadHandler(
34 | methodName: string,
35 | handler: (
36 | options: GetApiToolArgsType,
37 | ) => Promise<{ content: string; rawResponsePath?: string | null }>,
38 | ) {
39 | return async (args: Record<string, unknown>) => {
40 | const methodLogger = Logger.forContext(
41 | 'tools/atlassian.api.tool.ts',
42 | methodName.toLowerCase(),
43 | );
44 | methodLogger.debug(`Making ${methodName} request with args:`, args);
45 |
46 | try {
47 | const result = await handler(args as GetApiToolArgsType);
48 |
49 | methodLogger.debug(
50 | 'Successfully retrieved response from controller',
51 | );
52 |
53 | return {
54 | content: [
55 | {
56 | type: 'text' as const,
57 | text: truncateForAI(
58 | result.content,
59 | result.rawResponsePath,
60 | ),
61 | },
62 | ],
63 | };
64 | } catch (error) {
65 | methodLogger.error(`Failed to make ${methodName} request`, error);
66 | return formatErrorForMcpTool(error);
67 | }
68 | };
69 | }
70 |
71 | /**
72 | * Creates an MCP tool handler for POST/PUT/PATCH requests (with body)
73 | *
74 | * @param methodName - Name of the HTTP method for logging
75 | * @param handler - Controller handler function
76 | * @returns MCP tool handler function
77 | */
78 | function createWriteHandler(
79 | methodName: string,
80 | handler: (
81 | options: RequestWithBodyArgsType,
82 | ) => Promise<{ content: string; rawResponsePath?: string | null }>,
83 | ) {
84 | return async (args: Record<string, unknown>) => {
85 | const methodLogger = Logger.forContext(
86 | 'tools/atlassian.api.tool.ts',
87 | methodName.toLowerCase(),
88 | );
89 | methodLogger.debug(`Making ${methodName} request with args:`, {
90 | path: args.path,
91 | bodyKeys: args.body ? Object.keys(args.body as object) : [],
92 | });
93 |
94 | try {
95 | const result = await handler(args as RequestWithBodyArgsType);
96 |
97 | methodLogger.debug(
98 | 'Successfully received response from controller',
99 | );
100 |
101 | return {
102 | content: [
103 | {
104 | type: 'text' as const,
105 | text: truncateForAI(
106 | result.content,
107 | result.rawResponsePath,
108 | ),
109 | },
110 | ],
111 | };
112 | } catch (error) {
113 | methodLogger.error(`Failed to make ${methodName} request`, error);
114 | return formatErrorForMcpTool(error);
115 | }
116 | };
117 | }
118 |
119 | // Create tool handlers
120 | const get = createReadHandler('GET', handleGet);
121 | const post = createWriteHandler('POST', handlePost);
122 | const put = createWriteHandler('PUT', handlePut);
123 | const patch = createWriteHandler('PATCH', handlePatch);
124 | const del = createReadHandler('DELETE', handleDelete);
125 |
126 | // Tool descriptions
127 | const BB_GET_DESCRIPTION = `Read any Bitbucket data. Returns TOON format by default (30-60% fewer tokens than JSON).
128 |
129 | **IMPORTANT - Cost Optimization:**
130 | - ALWAYS use \`jq\` param to filter response fields. Unfiltered responses are very expensive!
131 | - Use \`pagelen\` query param to restrict result count (e.g., \`pagelen: "5"\`)
132 | - If unsure about available fields, first fetch ONE item with \`pagelen: "1"\` and NO jq filter to explore the schema, then use jq in subsequent calls
133 |
134 | **Schema Discovery Pattern:**
135 | 1. First call: \`path: "/workspaces", queryParams: {"pagelen": "1"}\` (no jq) - explore available fields
136 | 2. Then use: \`jq: "values[*].{slug: slug, name: name, uuid: uuid}"\` - extract only what you need
137 |
138 | **Output format:** TOON (default, token-efficient) or JSON (\`outputFormat: "json"\`)
139 |
140 | **Common paths:**
141 | - \`/workspaces\` - list workspaces
142 | - \`/repositories/{workspace}\` - list repos in workspace
143 | - \`/repositories/{workspace}/{repo}\` - get repo details
144 | - \`/repositories/{workspace}/{repo}/pullrequests\` - list PRs
145 | - \`/repositories/{workspace}/{repo}/pullrequests/{id}\` - get PR details
146 | - \`/repositories/{workspace}/{repo}/pullrequests/{id}/comments\` - list PR comments
147 | - \`/repositories/{workspace}/{repo}/pullrequests/{id}/diff\` - get PR diff
148 | - \`/repositories/{workspace}/{repo}/refs/branches\` - list branches
149 | - \`/repositories/{workspace}/{repo}/commits\` - list commits
150 | - \`/repositories/{workspace}/{repo}/src/{commit}/{filepath}\` - get file content
151 | - \`/repositories/{workspace}/{repo}/diff/{source}..{destination}\` - compare branches/commits
152 |
153 | **Query params:** \`pagelen\` (page size), \`page\` (page number), \`q\` (filter), \`sort\` (order), \`fields\` (sparse response)
154 |
155 | **Example filters (q param):** \`state="OPEN"\`, \`source.branch.name="feature"\`, \`title~"bug"\`
156 |
157 | **JQ examples:** \`values[*].slug\`, \`values[0]\`, \`values[*].{name: name, uuid: uuid}\`
158 |
159 | The \`/2.0\` prefix is added automatically. API reference: https://developer.atlassian.com/cloud/bitbucket/rest/`;
160 |
161 | const BB_POST_DESCRIPTION = `Create Bitbucket resources. Returns TOON format by default (token-efficient).
162 |
163 | **IMPORTANT - Cost Optimization:**
164 | - Use \`jq\` param to extract only needed fields from response (e.g., \`jq: "{id: id, title: title}"\`)
165 | - Unfiltered responses include all metadata and are expensive!
166 |
167 | **Output format:** TOON (default) or JSON (\`outputFormat: "json"\`)
168 |
169 | **Common operations:**
170 |
171 | 1. **Create PR:** \`/repositories/{workspace}/{repo}/pullrequests\`
172 | body: \`{"title": "...", "source": {"branch": {"name": "feature"}}, "destination": {"branch": {"name": "main"}}}\`
173 |
174 | 2. **Add PR comment:** \`/repositories/{workspace}/{repo}/pullrequests/{id}/comments\`
175 | body: \`{"content": {"raw": "Comment text"}}\`
176 |
177 | 3. **Approve PR:** \`/repositories/{workspace}/{repo}/pullrequests/{id}/approve\`
178 | body: \`{}\`
179 |
180 | 4. **Request changes:** \`/repositories/{workspace}/{repo}/pullrequests/{id}/request-changes\`
181 | body: \`{}\`
182 |
183 | 5. **Merge PR:** \`/repositories/{workspace}/{repo}/pullrequests/{id}/merge\`
184 | body: \`{"merge_strategy": "squash"}\` (strategies: merge_commit, squash, fast_forward)
185 |
186 | The \`/2.0\` prefix is added automatically. API reference: https://developer.atlassian.com/cloud/bitbucket/rest/`;
187 |
188 | const BB_PUT_DESCRIPTION = `Replace Bitbucket resources (full update). Returns TOON format by default.
189 |
190 | **IMPORTANT - Cost Optimization:**
191 | - Use \`jq\` param to extract only needed fields from response
192 | - Example: \`jq: "{uuid: uuid, name: name}"\`
193 |
194 | **Output format:** TOON (default) or JSON (\`outputFormat: "json"\`)
195 |
196 | **Common operations:**
197 |
198 | 1. **Update repository:** \`/repositories/{workspace}/{repo}\`
199 | body: \`{"description": "...", "is_private": true, "has_issues": true}\`
200 |
201 | 2. **Create/update file:** \`/repositories/{workspace}/{repo}/src\`
202 | Note: Use multipart form data for file uploads (complex - prefer PATCH for metadata)
203 |
204 | 3. **Update branch restriction:** \`/repositories/{workspace}/{repo}/branch-restrictions/{id}\`
205 | body: \`{"kind": "push", "pattern": "main", "users": [{"uuid": "..."}]}\`
206 |
207 | The \`/2.0\` prefix is added automatically. API reference: https://developer.atlassian.com/cloud/bitbucket/rest/`;
208 |
209 | const BB_PATCH_DESCRIPTION = `Partially update Bitbucket resources. Returns TOON format by default.
210 |
211 | **IMPORTANT - Cost Optimization:** Use \`jq\` param to filter response fields.
212 |
213 | **Output format:** TOON (default) or JSON (\`outputFormat: "json"\`)
214 |
215 | **Common operations:**
216 |
217 | 1. **Update PR title/description:** \`/repositories/{workspace}/{repo}/pullrequests/{id}\`
218 | body: \`{"title": "New title", "description": "Updated description"}\`
219 |
220 | 2. **Update PR reviewers:** \`/repositories/{workspace}/{repo}/pullrequests/{id}\`
221 | body: \`{"reviewers": [{"uuid": "{user-uuid}"}]}\`
222 |
223 | 3. **Update repository properties:** \`/repositories/{workspace}/{repo}\`
224 | body: \`{"description": "New description"}\`
225 |
226 | 4. **Update comment:** \`/repositories/{workspace}/{repo}/pullrequests/{pr_id}/comments/{comment_id}\`
227 | body: \`{"content": {"raw": "Updated comment"}}\`
228 |
229 | The \`/2.0\` prefix is added automatically. API reference: https://developer.atlassian.com/cloud/bitbucket/rest/`;
230 |
231 | const BB_DELETE_DESCRIPTION = `Delete Bitbucket resources. Returns TOON format by default.
232 |
233 | **Output format:** TOON (default) or JSON (\`outputFormat: "json"\`)
234 |
235 | **Common operations:**
236 |
237 | 1. **Delete branch:** \`/repositories/{workspace}/{repo}/refs/branches/{branch_name}\`
238 | 2. **Delete PR comment:** \`/repositories/{workspace}/{repo}/pullrequests/{pr_id}/comments/{comment_id}\`
239 | 3. **Decline PR:** \`/repositories/{workspace}/{repo}/pullrequests/{id}/decline\`
240 | 4. **Remove PR approval:** \`/repositories/{workspace}/{repo}/pullrequests/{id}/approve\`
241 | 5. **Delete repository:** \`/repositories/{workspace}/{repo}\` (caution: irreversible)
242 |
243 | Note: Most DELETE endpoints return 204 No Content on success.
244 |
245 | The \`/2.0\` prefix is added automatically. API reference: https://developer.atlassian.com/cloud/bitbucket/rest/`;
246 |
247 | /**
248 | * Register generic Bitbucket API tools with the MCP server.
249 | * Uses the modern registerTool API (SDK v1.22.0+) instead of deprecated tool() method.
250 | */
251 | function registerTools(server: McpServer) {
252 | const registerLogger = Logger.forContext(
253 | 'tools/atlassian.api.tool.ts',
254 | 'registerTools',
255 | );
256 | registerLogger.debug('Registering API tools...');
257 |
258 | // Register the GET tool using modern registerTool API
259 | server.registerTool(
260 | 'bb_get',
261 | {
262 | title: 'Bitbucket GET Request',
263 | description: BB_GET_DESCRIPTION,
264 | inputSchema: GetApiToolArgs,
265 | },
266 | get,
267 | );
268 |
269 | // Register the POST tool using modern registerTool API
270 | server.registerTool(
271 | 'bb_post',
272 | {
273 | title: 'Bitbucket POST Request',
274 | description: BB_POST_DESCRIPTION,
275 | inputSchema: RequestWithBodyArgs,
276 | },
277 | post,
278 | );
279 |
280 | // Register the PUT tool using modern registerTool API
281 | server.registerTool(
282 | 'bb_put',
283 | {
284 | title: 'Bitbucket PUT Request',
285 | description: BB_PUT_DESCRIPTION,
286 | inputSchema: RequestWithBodyArgs,
287 | },
288 | put,
289 | );
290 |
291 | // Register the PATCH tool using modern registerTool API
292 | server.registerTool(
293 | 'bb_patch',
294 | {
295 | title: 'Bitbucket PATCH Request',
296 | description: BB_PATCH_DESCRIPTION,
297 | inputSchema: RequestWithBodyArgs,
298 | },
299 | patch,
300 | );
301 |
302 | // Register the DELETE tool using modern registerTool API
303 | server.registerTool(
304 | 'bb_delete',
305 | {
306 | title: 'Bitbucket DELETE Request',
307 | description: BB_DELETE_DESCRIPTION,
308 | inputSchema: DeleteApiToolArgs,
309 | },
310 | del,
311 | );
312 |
313 | registerLogger.debug('Successfully registered API tools');
314 | }
315 |
316 | export default { registerTools };
317 |
```
--------------------------------------------------------------------------------
/src/services/vendor.atlassian.repositories.service.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import atlassianRepositoriesService from './vendor.atlassian.repositories.service.js';
2 | import { getAtlassianCredentials } from '../utils/transport.util.js';
3 | import { config } from '../utils/config.util.js';
4 | import { McpError } from '../utils/error.util.js';
5 | import atlassianWorkspacesService from './vendor.atlassian.workspaces.service.js';
6 | import { Repository } from './vendor.atlassian.repositories.types.js';
7 | import { Logger } from '../utils/logger.util.js';
8 |
9 | // Instantiate logger for the test file
10 | const logger = Logger.forContext(
11 | 'services/vendor.atlassian.repositories.service.test.ts',
12 | );
13 |
14 | describe('Vendor Atlassian Repositories Service', () => {
15 | // Load configuration and check for credentials before all tests
16 | beforeAll(() => {
17 | config.load(); // Ensure config is loaded
18 | const credentials = getAtlassianCredentials();
19 | if (!credentials) {
20 | console.warn(
21 | 'Skipping Atlassian Repositories Service tests: No credentials available',
22 | );
23 | }
24 | });
25 |
26 | // Helper function to skip tests when credentials are missing
27 | const skipIfNoCredentials = () => !getAtlassianCredentials();
28 |
29 | // Helper to get a valid workspace slug for testing
30 | async function getFirstWorkspaceSlug(): Promise<string | null> {
31 | if (skipIfNoCredentials()) return null;
32 |
33 | try {
34 | const listResult = await atlassianWorkspacesService.list({
35 | pagelen: 1,
36 | });
37 | return listResult.values.length > 0
38 | ? listResult.values[0].workspace.slug
39 | : null;
40 | } catch (error) {
41 | console.warn(
42 | 'Could not fetch workspace list for repository tests:',
43 | error,
44 | );
45 | return null;
46 | }
47 | }
48 |
49 | describe('list', () => {
50 | it('should return a list of repositories for a valid workspace', async () => {
51 | if (skipIfNoCredentials()) return;
52 |
53 | const workspaceSlug = await getFirstWorkspaceSlug();
54 | if (!workspaceSlug) {
55 | console.warn('Skipping test: No workspace slug found.');
56 | return;
57 | }
58 |
59 | const result = await atlassianRepositoriesService.list({
60 | workspace: workspaceSlug,
61 | });
62 | logger.debug('List repositories result:', result);
63 |
64 | // Verify the response structure based on RepositoriesResponse
65 | expect(result).toHaveProperty('values');
66 | expect(Array.isArray(result.values)).toBe(true);
67 | expect(result).toHaveProperty('pagelen'); // Bitbucket uses pagelen
68 | expect(result).toHaveProperty('page');
69 | expect(result).toHaveProperty('size');
70 |
71 | if (result.values.length > 0) {
72 | // Verify the structure of the first repository in the list
73 | verifyRepositoryStructure(result.values[0]);
74 | }
75 | }, 30000); // Increased timeout
76 |
77 | it('should support pagination with pagelen and page', async () => {
78 | if (skipIfNoCredentials()) return;
79 |
80 | const workspaceSlug = await getFirstWorkspaceSlug();
81 | if (!workspaceSlug) {
82 | console.warn('Skipping test: No workspace slug found.');
83 | return;
84 | }
85 |
86 | // Get first page with limited results
87 | const result = await atlassianRepositoriesService.list({
88 | workspace: workspaceSlug,
89 | pagelen: 1,
90 | });
91 |
92 | expect(result).toHaveProperty('pagelen');
93 | // Allow pagelen to be greater than requested if API enforces minimum
94 | expect(result.pagelen).toBeGreaterThanOrEqual(1);
95 | expect(result.values.length).toBeLessThanOrEqual(result.pagelen);
96 |
97 | // If there are more items than the page size, expect pagination links
98 | if (result.size > result.pagelen) {
99 | expect(result).toHaveProperty('next');
100 |
101 | // Test requesting page 2 if available
102 | // Extract page parameter from next link if available
103 | if (result.next) {
104 | const nextPageUrl = new URL(result.next);
105 | const pageParam = nextPageUrl.searchParams.get('page');
106 |
107 | if (pageParam) {
108 | const page2 = parseInt(pageParam, 10);
109 | const page2Result =
110 | await atlassianRepositoriesService.list({
111 | workspace: workspaceSlug,
112 | pagelen: 1,
113 | page: page2,
114 | });
115 |
116 | expect(page2Result).toHaveProperty('page', page2);
117 |
118 | // If both pages have values, verify they're different repositories
119 | if (
120 | result.values.length > 0 &&
121 | page2Result.values.length > 0
122 | ) {
123 | expect(result.values[0].uuid).not.toBe(
124 | page2Result.values[0].uuid,
125 | );
126 | }
127 | }
128 | }
129 | }
130 | }, 30000);
131 |
132 | it('should support filtering with q parameter', async () => {
133 | if (skipIfNoCredentials()) return;
134 |
135 | const workspaceSlug = await getFirstWorkspaceSlug();
136 | if (!workspaceSlug) {
137 | console.warn('Skipping test: No workspace slug found.');
138 | return;
139 | }
140 |
141 | // First get all repositories to find a potential query term
142 | const allRepos = await atlassianRepositoriesService.list({
143 | workspace: workspaceSlug,
144 | });
145 |
146 | // Skip if no repositories available
147 | if (allRepos.values.length === 0) {
148 | console.warn(
149 | 'Skipping query filtering test: No repositories available',
150 | );
151 | return;
152 | }
153 |
154 | // Use the first repo's name as a query term
155 | const firstRepo = allRepos.values[0];
156 | // Take just the first word or first few characters to make filter less restrictive
157 | const queryTerm = firstRepo.name.split(' ')[0];
158 |
159 | // Test the query filter
160 | try {
161 | const result = await atlassianRepositoriesService.list({
162 | workspace: workspaceSlug,
163 | q: `name~"${queryTerm}"`,
164 | });
165 |
166 | // Verify basic response structure
167 | expect(result).toHaveProperty('values');
168 |
169 | // All returned repos should contain the query term in their name
170 | if (result.values.length > 0) {
171 | const nameMatches = result.values.some((repo) =>
172 | repo.name
173 | .toLowerCase()
174 | .includes(queryTerm.toLowerCase()),
175 | );
176 | expect(nameMatches).toBe(true);
177 | }
178 | } catch (error) {
179 | // If filtering isn't fully supported, we just log it
180 | console.warn(
181 | 'Query filtering test encountered an error:',
182 | error instanceof Error ? error.message : String(error),
183 | );
184 | }
185 | }, 30000);
186 |
187 | it('should support sorting with sort parameter', async () => {
188 | if (skipIfNoCredentials()) return;
189 |
190 | const workspaceSlug = await getFirstWorkspaceSlug();
191 | if (!workspaceSlug) {
192 | console.warn('Skipping test: No workspace slug found.');
193 | return;
194 | }
195 |
196 | // Skip this test if fewer than 2 repositories (can't verify sort order)
197 | const checkResult = await atlassianRepositoriesService.list({
198 | workspace: workspaceSlug,
199 | pagelen: 2,
200 | });
201 |
202 | if (checkResult.values.length < 2) {
203 | console.warn(
204 | 'Skipping sort test: Need at least 2 repositories to verify sort order',
205 | );
206 | return;
207 | }
208 |
209 | // Test sorting by name ascending
210 | const resultAsc = await atlassianRepositoriesService.list({
211 | workspace: workspaceSlug,
212 | sort: 'name',
213 | pagelen: 2,
214 | });
215 |
216 | // Test sorting by name descending
217 | const resultDesc = await atlassianRepositoriesService.list({
218 | workspace: workspaceSlug,
219 | sort: '-name',
220 | pagelen: 2,
221 | });
222 |
223 | // Verify basic response structure
224 | expect(resultAsc).toHaveProperty('values');
225 | expect(resultDesc).toHaveProperty('values');
226 |
227 | // Ensure both responses have at least 2 items to compare
228 | if (resultAsc.values.length >= 2 && resultDesc.values.length >= 2) {
229 | // For ascending order, first item should come before second alphabetically
230 | const ascNameComparison =
231 | resultAsc.values[0].name.localeCompare(
232 | resultAsc.values[1].name,
233 | );
234 | // For descending order, first item should come after second alphabetically
235 | const descNameComparison =
236 | resultDesc.values[0].name.localeCompare(
237 | resultDesc.values[1].name,
238 | );
239 |
240 | // Ascending should be ≤ 0 (first before or equal to second)
241 | expect(ascNameComparison).toBeLessThanOrEqual(0);
242 | // Descending should be ≥ 0 (first after or equal to second)
243 | expect(descNameComparison).toBeGreaterThanOrEqual(0);
244 | }
245 | }, 30000);
246 |
247 | it('should throw an error for an invalid workspace', async () => {
248 | if (skipIfNoCredentials()) return;
249 |
250 | const invalidWorkspace =
251 | 'this-workspace-definitely-does-not-exist-12345';
252 |
253 | // Expect the service call to reject with an McpError (likely 404)
254 | await expect(
255 | atlassianRepositoriesService.list({
256 | workspace: invalidWorkspace,
257 | }),
258 | ).rejects.toThrow();
259 |
260 | // Check for specific error properties
261 | try {
262 | await atlassianRepositoriesService.list({
263 | workspace: invalidWorkspace,
264 | });
265 | } catch (e) {
266 | expect(e).toBeInstanceOf(McpError);
267 | expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
268 | }
269 | }, 30000);
270 | });
271 |
272 | describe('get', () => {
273 | // Helper to get a valid repo for testing 'get'
274 | async function getFirstRepositoryInfo(): Promise<{
275 | workspace: string;
276 | repoSlug: string;
277 | } | null> {
278 | if (skipIfNoCredentials()) return null;
279 |
280 | const workspaceSlug = await getFirstWorkspaceSlug();
281 | if (!workspaceSlug) return null;
282 |
283 | try {
284 | const listResult = await atlassianRepositoriesService.list({
285 | workspace: workspaceSlug,
286 | pagelen: 1,
287 | });
288 |
289 | if (listResult.values.length === 0) return null;
290 |
291 | const fullName = listResult.values[0].full_name;
292 | // full_name is in format "workspace/repo_slug"
293 | const [workspace, repoSlug] = fullName.split('/');
294 |
295 | return { workspace, repoSlug };
296 | } catch (error) {
297 | console.warn(
298 | "Could not fetch repository list for 'get' test setup:",
299 | error,
300 | );
301 | return null;
302 | }
303 | }
304 |
305 | it('should return details for a valid workspace and repo_slug', async () => {
306 | const repoInfo = await getFirstRepositoryInfo();
307 | if (!repoInfo) {
308 | console.warn('Skipping get test: No repository found.');
309 | return;
310 | }
311 |
312 | const result = await atlassianRepositoriesService.get({
313 | workspace: repoInfo.workspace,
314 | repo_slug: repoInfo.repoSlug,
315 | });
316 |
317 | // Verify the response structure based on RepositoryDetailed
318 | expect(result).toHaveProperty('uuid');
319 | expect(result).toHaveProperty(
320 | 'full_name',
321 | `${repoInfo.workspace}/${repoInfo.repoSlug}`,
322 | );
323 | expect(result).toHaveProperty('name');
324 | expect(result).toHaveProperty('type', 'repository');
325 | expect(result).toHaveProperty('is_private');
326 | expect(result).toHaveProperty('links');
327 | expect(result.links).toHaveProperty('html');
328 | expect(result).toHaveProperty('owner');
329 | expect(result.owner).toHaveProperty('type');
330 | }, 30000);
331 |
332 | it('should throw an McpError for a non-existent repo_slug', async () => {
333 | const workspaceSlug = await getFirstWorkspaceSlug();
334 | if (!workspaceSlug) {
335 | console.warn('Skipping test: No workspace slug found.');
336 | return;
337 | }
338 |
339 | const invalidRepoSlug = 'this-repo-definitely-does-not-exist-12345';
340 |
341 | // Expect the service call to reject with an McpError (likely 404)
342 | await expect(
343 | atlassianRepositoriesService.get({
344 | workspace: workspaceSlug,
345 | repo_slug: invalidRepoSlug,
346 | }),
347 | ).rejects.toThrow(McpError);
348 |
349 | // Check for specific error properties
350 | try {
351 | await atlassianRepositoriesService.get({
352 | workspace: workspaceSlug,
353 | repo_slug: invalidRepoSlug,
354 | });
355 | } catch (e) {
356 | expect(e).toBeInstanceOf(McpError);
357 | expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
358 | }
359 | }, 30000);
360 |
361 | it('should throw an McpError for a non-existent workspace', async () => {
362 | if (skipIfNoCredentials()) return;
363 |
364 | const invalidWorkspace =
365 | 'this-workspace-definitely-does-not-exist-12345';
366 | const invalidRepoSlug = 'some-repo';
367 |
368 | // Expect the service call to reject with an McpError (likely 404)
369 | await expect(
370 | atlassianRepositoriesService.get({
371 | workspace: invalidWorkspace,
372 | repo_slug: invalidRepoSlug,
373 | }),
374 | ).rejects.toThrow(McpError);
375 |
376 | // Check for specific error properties
377 | try {
378 | await atlassianRepositoriesService.get({
379 | workspace: invalidWorkspace,
380 | repo_slug: invalidRepoSlug,
381 | });
382 | } catch (e) {
383 | expect(e).toBeInstanceOf(McpError);
384 | expect((e as McpError).statusCode).toBe(404); // Expecting Not Found
385 | }
386 | }, 30000);
387 | });
388 | });
389 |
390 | // Helper function to verify the Repository structure
391 | function verifyRepositoryStructure(repo: Repository) {
392 | expect(repo).toHaveProperty('uuid');
393 | expect(repo).toHaveProperty('name');
394 | expect(repo).toHaveProperty('full_name');
395 | expect(repo).toHaveProperty('is_private');
396 | expect(repo).toHaveProperty('links');
397 | expect(repo).toHaveProperty('owner');
398 | expect(repo).toHaveProperty('type', 'repository');
399 | }
400 |
```
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "compilerOptions": {
3 | /* Visit https://aka.ms/tsconfig to read more about this file */
4 |
5 | /* Projects */
6 | // "incremental": true, /* Save .tsbuildinfo files to allow for incremental compilation of projects. */
7 | // "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */
8 | // "tsBuildInfoFile": "./.tsbuildinfo", /* Specify the path to .tsbuildinfo incremental compilation file. */
9 | // "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects. */
10 | // "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */
11 | // "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */
12 |
13 | /* Language and Environment */
14 | "target": "ES2020", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
15 | "lib": ["ES2020"], /* Specify a set of bundled library declaration files that describe the target runtime environment. */
16 | // "jsx": "preserve", /* Specify what JSX code is generated. */
17 | // "libReplacement": true, /* Enable lib replacement. */
18 | // "experimentalDecorators": true, /* Enable experimental support for legacy experimental decorators. */
19 | // "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */
20 | // "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */
21 | // "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */
22 | // "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */
23 | // "reactNamespace": "", /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */
24 | // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */
25 | // "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */
26 | // "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */
27 |
28 | /* Modules */
29 | "module": "NodeNext", /* Specify what module code is generated. */
30 | // "rootDir": "./", /* Specify the root folder within your source files. */
31 | "moduleResolution": "NodeNext", /* Specify how TypeScript looks up a file from a given module specifier. */
32 | // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
33 | // "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */
34 | // "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */
35 | // "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */
36 | // "types": [], /* Specify type package names to be included without being referenced in a source file. */
37 | // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
38 | // "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */
39 | // "allowImportingTsExtensions": true, /* Allow imports to include TypeScript file extensions. Requires '--moduleResolution bundler' and either '--noEmit' or '--emitDeclarationOnly' to be set. */
40 | // "rewriteRelativeImportExtensions": true, /* Rewrite '.ts', '.tsx', '.mts', and '.cts' file extensions in relative import paths to their JavaScript equivalent in output files. */
41 | // "resolvePackageJsonExports": true, /* Use the package.json 'exports' field when resolving package imports. */
42 | // "resolvePackageJsonImports": true, /* Use the package.json 'imports' field when resolving imports. */
43 | // "customConditions": [], /* Conditions to set in addition to the resolver-specific defaults when resolving imports. */
44 | // "noUncheckedSideEffectImports": true, /* Check side effect imports. */
45 | "resolveJsonModule": true, /* Enable importing .json files. */
46 | // "allowArbitraryExtensions": true, /* Enable importing files with any extension, provided a declaration file is present. */
47 | // "noResolve": true, /* Disallow 'import's, 'require's or '<reference>'s from expanding the number of files TypeScript should add to a project. */
48 |
49 | /* JavaScript Support */
50 | // "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */
51 | // "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */
52 | // "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */
53 |
54 | /* Emit */
55 | "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
56 | // "declarationMap": true, /* Create sourcemaps for d.ts files. */
57 | // "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */
58 | // "sourceMap": true, /* Create source map files for emitted JavaScript files. */
59 | // "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */
60 | // "noEmit": true, /* Disable emitting files from a compilation. */
61 | // "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */
62 | "outDir": "./dist", /* Specify an output folder for all emitted files. */
63 | // "removeComments": true, /* Disable emitting comments. */
64 | // "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */
65 | // "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */
66 | // "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */
67 | // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
68 | // "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */
69 | // "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */
70 | // "newLine": "crlf", /* Set the newline character for emitting files. */
71 | // "stripInternal": true, /* Disable emitting declarations that have '@internal' in their JSDoc comments. */
72 | // "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */
73 | // "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */
74 | // "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */
75 | // "declarationDir": "./", /* Specify the output directory for generated declaration files. */
76 |
77 | /* Interop Constraints */
78 | "isolatedModules": true,
79 | // "verbatimModuleSyntax": true, /* Do not transform or elide any imports or exports not marked as type-only, ensuring they are written in the output file's format based on the 'module' setting. */
80 | // "isolatedDeclarations": true, /* Require sufficient annotation on exports so other tools can trivially generate declaration files. */
81 | // "erasableSyntaxOnly": true, /* Do not allow runtime constructs that are not part of ECMAScript. */
82 | // "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */
83 | "esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */
84 | // "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */
85 | "forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */
86 |
87 | /* Type Checking */
88 | "strict": true, /* Enable all strict type-checking options. */
89 | "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */
90 | "strictNullChecks": true, /* When type checking, take into account 'null' and 'undefined'. */
91 | // "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */
92 | // "strictBindCallApply": true, /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */
93 | // "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */
94 | // "strictBuiltinIteratorReturn": true, /* Built-in iterators are instantiated with a 'TReturn' type of 'undefined' instead of 'any'. */
95 | // "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */
96 | // "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */
97 | // "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */
98 | "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */
99 | "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */
100 | // "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */
101 | "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */
102 | // "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */
103 | // "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */
104 | // "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */
105 | // "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */
106 | // "allowUnusedLabels": true, /* Disable error reporting for unused labels. */
107 | // "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */
108 |
109 | /* Completeness */
110 | // "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */
111 | "skipLibCheck": true /* Skip type checking all .d.ts files. */
112 | },
113 | "include": ["src/**/*"]
114 | }
115 |
```
--------------------------------------------------------------------------------
/src/utils/transport.util.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Logger } from './logger.util.js';
2 | import { config } from './config.util.js';
3 | import { NETWORK_TIMEOUTS, DATA_LIMITS } from './constants.util.js';
4 | import {
5 | createAuthInvalidError,
6 | createApiError,
7 | createUnexpectedError,
8 | McpError,
9 | } from './error.util.js';
10 | import { saveRawResponse } from './response.util.js';
11 |
12 | /**
13 | * Interface for Atlassian API credentials
14 | */
15 | export interface AtlassianCredentials {
16 | // Standard Atlassian credentials
17 | siteName?: string;
18 | userEmail?: string;
19 | apiToken?: string;
20 | // Bitbucket-specific credentials (alternative approach)
21 | bitbucketUsername?: string;
22 | bitbucketAppPassword?: string;
23 | // Indicates which auth method to use
24 | useBitbucketAuth?: boolean;
25 | }
26 |
27 | /**
28 | * Interface for HTTP request options
29 | */
30 | export interface RequestOptions {
31 | method?: 'GET' | 'POST' | 'PUT' | 'PATCH' | 'DELETE';
32 | headers?: Record<string, string>;
33 | body?: unknown;
34 | timeout?: number;
35 | }
36 |
37 | /**
38 | * Transport response wrapper that includes the data and the path to the raw response file
39 | */
40 | export interface TransportResponse<T> {
41 | data: T;
42 | rawResponsePath: string | null;
43 | }
44 |
45 | // Create a contextualized logger for this file
46 | const transportLogger = Logger.forContext('utils/transport.util.ts');
47 |
48 | // Log transport utility initialization
49 | transportLogger.debug('Transport utility initialized');
50 |
51 | /**
52 | * Get Atlassian credentials from environment variables
53 | * @returns AtlassianCredentials object or null if credentials are missing
54 | */
55 | export function getAtlassianCredentials(): AtlassianCredentials | null {
56 | const methodLogger = Logger.forContext(
57 | 'utils/transport.util.ts',
58 | 'getAtlassianCredentials',
59 | );
60 |
61 | // First try standard Atlassian credentials (preferred for consistency)
62 | const siteName = config.get('ATLASSIAN_SITE_NAME');
63 | const userEmail = config.get('ATLASSIAN_USER_EMAIL');
64 | const apiToken = config.get('ATLASSIAN_API_TOKEN');
65 |
66 | // If standard credentials are available, use them
67 | if (userEmail && apiToken) {
68 | methodLogger.debug('Using standard Atlassian credentials');
69 | return {
70 | siteName,
71 | userEmail,
72 | apiToken,
73 | useBitbucketAuth: false,
74 | };
75 | }
76 |
77 | // If standard credentials are not available, try Bitbucket-specific credentials
78 | const bitbucketUsername = config.get('ATLASSIAN_BITBUCKET_USERNAME');
79 | const bitbucketAppPassword = config.get('ATLASSIAN_BITBUCKET_APP_PASSWORD');
80 |
81 | if (bitbucketUsername && bitbucketAppPassword) {
82 | methodLogger.debug('Using Bitbucket-specific credentials');
83 | return {
84 | bitbucketUsername,
85 | bitbucketAppPassword,
86 | useBitbucketAuth: true,
87 | };
88 | }
89 |
90 | // If neither set of credentials is available, return null
91 | methodLogger.warn(
92 | 'Missing Atlassian credentials. Please set either ATLASSIAN_SITE_NAME, ATLASSIAN_USER_EMAIL, and ATLASSIAN_API_TOKEN environment variables, or ATLASSIAN_BITBUCKET_USERNAME and ATLASSIAN_BITBUCKET_APP_PASSWORD for Bitbucket-specific auth.',
93 | );
94 | return null;
95 | }
96 |
97 | /**
98 | * Fetch data from Atlassian API
99 | * @param credentials Atlassian API credentials
100 | * @param path API endpoint path (without base URL)
101 | * @param options Request options
102 | * @returns Response data wrapped with raw response path
103 | */
104 | export async function fetchAtlassian<T>(
105 | credentials: AtlassianCredentials,
106 | path: string,
107 | options: RequestOptions = {},
108 | ): Promise<TransportResponse<T>> {
109 | const methodLogger = Logger.forContext(
110 | 'utils/transport.util.ts',
111 | 'fetchAtlassian',
112 | );
113 |
114 | const baseUrl = 'https://api.bitbucket.org';
115 |
116 | // Set up auth headers based on credential type
117 | let authHeader: string;
118 |
119 | if (credentials.useBitbucketAuth) {
120 | // Bitbucket API uses a different auth format
121 | if (
122 | !credentials.bitbucketUsername ||
123 | !credentials.bitbucketAppPassword
124 | ) {
125 | throw createAuthInvalidError(
126 | 'Missing Bitbucket username or app password',
127 | );
128 | }
129 | authHeader = `Basic ${Buffer.from(
130 | `${credentials.bitbucketUsername}:${credentials.bitbucketAppPassword}`,
131 | ).toString('base64')}`;
132 | } else {
133 | // Standard Atlassian API (Jira, Confluence)
134 | if (!credentials.userEmail || !credentials.apiToken) {
135 | throw createAuthInvalidError('Missing Atlassian credentials');
136 | }
137 | authHeader = `Basic ${Buffer.from(
138 | `${credentials.userEmail}:${credentials.apiToken}`,
139 | ).toString('base64')}`;
140 | }
141 |
142 | // Ensure path starts with a slash
143 | const normalizedPath = path.startsWith('/') ? path : `/${path}`;
144 |
145 | // Construct the full URL
146 | const url = `${baseUrl}${normalizedPath}`;
147 |
148 | // Set up authentication and headers
149 | const headers = {
150 | Authorization: authHeader,
151 | 'Content-Type': 'application/json',
152 | Accept: 'application/json',
153 | ...options.headers,
154 | };
155 |
156 | // Prepare request options
157 | const requestOptions: RequestInit = {
158 | method: options.method || 'GET',
159 | headers,
160 | body: options.body ? JSON.stringify(options.body) : undefined,
161 | };
162 |
163 | methodLogger.debug(`Calling Atlassian API: ${url}`);
164 |
165 | // Set up timeout handling with configurable values
166 | const defaultTimeout = config.getNumber(
167 | 'ATLASSIAN_REQUEST_TIMEOUT',
168 | NETWORK_TIMEOUTS.DEFAULT_REQUEST_TIMEOUT,
169 | );
170 | const timeoutMs = options.timeout ?? defaultTimeout;
171 | const controller = new AbortController();
172 | const timeoutId = setTimeout(() => {
173 | methodLogger.warn(`Request timeout after ${timeoutMs}ms: ${url}`);
174 | controller.abort();
175 | }, timeoutMs);
176 |
177 | // Add abort signal to request options
178 | requestOptions.signal = controller.signal;
179 |
180 | // Track API call performance
181 | const startTime = performance.now();
182 |
183 | try {
184 | const response = await fetch(url, requestOptions);
185 | clearTimeout(timeoutId);
186 | const endTime = performance.now();
187 | const requestDuration = (endTime - startTime).toFixed(2);
188 |
189 | // Log the raw response status and headers
190 | methodLogger.debug(
191 | `Raw response received: ${response.status} ${response.statusText}`,
192 | {
193 | url,
194 | status: response.status,
195 | statusText: response.statusText,
196 | headers: Object.fromEntries(response.headers.entries()),
197 | },
198 | );
199 |
200 | // Validate response size to prevent excessive memory usage (CWE-770)
201 | const contentLength = response.headers.get('content-length');
202 | if (contentLength) {
203 | const responseSize = parseInt(contentLength, 10);
204 | if (responseSize > DATA_LIMITS.MAX_RESPONSE_SIZE) {
205 | methodLogger.warn(
206 | `Response size ${responseSize} bytes exceeds limit of ${DATA_LIMITS.MAX_RESPONSE_SIZE} bytes`,
207 | );
208 | throw createApiError(
209 | `Response size (${Math.round(responseSize / (1024 * 1024))}MB) exceeds maximum limit of ${Math.round(DATA_LIMITS.MAX_RESPONSE_SIZE / (1024 * 1024))}MB`,
210 | 413,
211 | { responseSize, limit: DATA_LIMITS.MAX_RESPONSE_SIZE },
212 | );
213 | }
214 | }
215 |
216 | if (!response.ok) {
217 | const errorText = await response.text();
218 | methodLogger.error(
219 | `API error: ${response.status} ${response.statusText}`,
220 | errorText,
221 | );
222 |
223 | // Try to parse the error response
224 | let errorMessage = `${response.status} ${response.statusText}`;
225 | let parsedBitbucketError = null;
226 |
227 | try {
228 | if (
229 | errorText &&
230 | (errorText.startsWith('{') || errorText.startsWith('['))
231 | ) {
232 | const parsedError = JSON.parse(errorText);
233 |
234 | // Extract specific error details from various Bitbucket API response formats
235 | if (
236 | parsedError.type === 'error' &&
237 | parsedError.error &&
238 | parsedError.error.message
239 | ) {
240 | // Format: {"type":"error", "error":{"message":"...", "detail":"..."}}
241 | parsedBitbucketError = parsedError.error;
242 | errorMessage = parsedBitbucketError.message;
243 | if (parsedBitbucketError.detail) {
244 | errorMessage += ` Detail: ${parsedBitbucketError.detail}`;
245 | }
246 | } else if (parsedError.error && parsedError.error.message) {
247 | // Alternative error format: {"error": {"message": "..."}}
248 | parsedBitbucketError = parsedError.error;
249 | errorMessage = parsedBitbucketError.message;
250 | } else if (
251 | parsedError.errors &&
252 | Array.isArray(parsedError.errors) &&
253 | parsedError.errors.length > 0
254 | ) {
255 | // Format: {"errors":[{"status":400,"code":"INVALID_REQUEST_PARAMETER","title":"..."}]}
256 | const atlassianError = parsedError.errors[0];
257 | if (atlassianError.title) {
258 | errorMessage = atlassianError.title;
259 | parsedBitbucketError = atlassianError;
260 | }
261 | } else if (parsedError.message) {
262 | // Format: {"message":"Some error message"}
263 | errorMessage = parsedError.message;
264 | parsedBitbucketError = parsedError;
265 | }
266 | }
267 | } catch (parseError) {
268 | methodLogger.debug(`Error parsing error response:`, parseError);
269 | // Fall back to the default error message
270 | }
271 |
272 | // Log the parsed error or raw error text
273 | methodLogger.debug(
274 | 'Parsed Bitbucket error:',
275 | parsedBitbucketError || errorText,
276 | );
277 |
278 | // Use parsedBitbucketError (or errorText if parsing failed) as originalError
279 | const originalErrorForMcp = parsedBitbucketError || errorText;
280 |
281 | // Handle common Bitbucket API error status codes
282 | if (response.status === 401) {
283 | throw createAuthInvalidError(
284 | `Bitbucket API: Authentication failed - ${errorMessage}`,
285 | originalErrorForMcp,
286 | );
287 | }
288 |
289 | if (response.status === 403) {
290 | throw createApiError(
291 | `Bitbucket API: Permission denied - ${errorMessage}`,
292 | 403,
293 | originalErrorForMcp,
294 | );
295 | }
296 |
297 | if (response.status === 404) {
298 | throw createApiError(
299 | `Bitbucket API: Resource not found - ${errorMessage}`,
300 | 404,
301 | originalErrorForMcp,
302 | );
303 | }
304 |
305 | if (response.status === 429) {
306 | throw createApiError(
307 | `Bitbucket API: Rate limit exceeded - ${errorMessage}`,
308 | 429,
309 | originalErrorForMcp,
310 | );
311 | }
312 |
313 | if (response.status >= 500) {
314 | throw createApiError(
315 | `Bitbucket API: Service error - ${errorMessage}`,
316 | response.status,
317 | originalErrorForMcp,
318 | );
319 | }
320 |
321 | // For other API errors, preserve the original vendor message
322 | throw createApiError(
323 | `Bitbucket API Error: ${errorMessage}`,
324 | response.status,
325 | originalErrorForMcp,
326 | );
327 | }
328 |
329 | // Handle 204 No Content responses (common for DELETE operations)
330 | if (response.status === 204) {
331 | methodLogger.debug('Received 204 No Content response');
332 | return { data: {} as T, rawResponsePath: null };
333 | }
334 |
335 | // Check if the response is expected to be plain text
336 | const contentType = response.headers.get('content-type') || '';
337 | if (contentType.includes('text/plain')) {
338 | // If we're expecting text (like a diff), return the raw text
339 | const textResponse = await response.text();
340 | methodLogger.debug(
341 | `Text response received (truncated)`,
342 | textResponse.substring(0, 200) + '...',
343 | );
344 | return {
345 | data: textResponse as unknown as T,
346 | rawResponsePath: null,
347 | };
348 | }
349 |
350 | // Handle empty responses (some endpoints return 200/201 with no body)
351 | const responseText = await response.text();
352 | if (!responseText || responseText.trim() === '') {
353 | methodLogger.debug('Received empty response body');
354 | return { data: {} as T, rawResponsePath: null };
355 | }
356 |
357 | // For JSON responses, parse the text we already read
358 | try {
359 | const responseJson = JSON.parse(responseText);
360 | methodLogger.debug(`Response body:`, responseJson);
361 |
362 | // Save raw response to file
363 | const rawResponsePath = saveRawResponse(
364 | url,
365 | requestOptions.method || 'GET',
366 | options.body,
367 | responseJson,
368 | response.status,
369 | parseFloat(requestDuration),
370 | );
371 |
372 | return { data: responseJson as T, rawResponsePath };
373 | } catch {
374 | methodLogger.debug(
375 | `Could not parse response as JSON, returning raw content`,
376 | );
377 | return {
378 | data: responseText as unknown as T,
379 | rawResponsePath: null,
380 | };
381 | }
382 | } catch (error) {
383 | clearTimeout(timeoutId);
384 | methodLogger.error(`Request failed`, error);
385 |
386 | // If it's already an McpError, just rethrow it
387 | if (error instanceof McpError) {
388 | throw error;
389 | }
390 |
391 | // Handle timeout errors
392 | if (error instanceof Error && error.name === 'AbortError') {
393 | methodLogger.error(
394 | `Request timed out after ${timeoutMs}ms: ${url}`,
395 | );
396 | throw createApiError(
397 | `Request timeout: Bitbucket API did not respond within ${timeoutMs / 1000} seconds`,
398 | 408,
399 | error,
400 | );
401 | }
402 |
403 | // Handle network errors more explicitly
404 | if (error instanceof TypeError) {
405 | // TypeError is typically a network/fetch error in this context
406 | const errorMessage = error.message || 'Network error occurred';
407 | methodLogger.debug(`Network error details: ${errorMessage}`);
408 |
409 | throw createApiError(
410 | `Network error while calling Bitbucket API: ${errorMessage}`,
411 | 500, // This will be classified as NETWORK_ERROR by detectErrorType
412 | error,
413 | );
414 | }
415 |
416 | // Handle JSON parsing errors
417 | if (error instanceof SyntaxError) {
418 | methodLogger.debug(`JSON parsing error: ${error.message}`);
419 |
420 | throw createApiError(
421 | `Invalid response format from Bitbucket API: ${error.message}`,
422 | 500,
423 | error,
424 | );
425 | }
426 |
427 | // Generic error handler for any other types of errors
428 | throw createUnexpectedError(
429 | `Unexpected error while calling Bitbucket API: ${error instanceof Error ? error.message : String(error)}`,
430 | error,
431 | );
432 | }
433 | }
434 |
```
--------------------------------------------------------------------------------
/src/utils/formatter.util.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Standardized formatting utilities for consistent output across all CLI and Tool interfaces.
3 | * These functions should be used by all formatters to ensure consistent formatting.
4 | */
5 |
6 | import { Logger } from './logger.util.js'; // Ensure logger is imported
7 | import { ResponsePagination } from '../types/common.types.js';
8 |
9 | // const formatterLogger = Logger.forContext('utils/formatter.util.ts'); // Define logger instance - Removed as unused
10 |
11 | /**
12 | * Format a date in a standardized way: YYYY-MM-DD HH:MM:SS UTC
13 | * @param dateString - ISO date string or Date object
14 | * @returns Formatted date string
15 | */
16 | export function formatDate(dateString?: string | Date): string {
17 | if (!dateString) {
18 | return 'Not available';
19 | }
20 |
21 | try {
22 | const date =
23 | typeof dateString === 'string' ? new Date(dateString) : dateString;
24 |
25 | // Format: YYYY-MM-DD HH:MM:SS UTC
26 | return date
27 | .toISOString()
28 | .replace('T', ' ')
29 | .replace(/\.\d+Z$/, ' UTC');
30 | } catch {
31 | return 'Invalid date';
32 | }
33 | }
34 |
35 | /**
36 | * Format a URL as a markdown link
37 | * @param url - URL to format
38 | * @param title - Link title
39 | * @returns Formatted markdown link
40 | */
41 | export function formatUrl(url?: string, title?: string): string {
42 | if (!url) {
43 | return 'Not available';
44 | }
45 |
46 | const linkTitle = title || url;
47 | return `[${linkTitle}](${url})`;
48 | }
49 |
50 | /**
51 | * Format pagination information in a standardized way for CLI output.
52 | * Includes separator, item counts, availability message, next page instructions, and timestamp.
53 | * @param pagination - The ResponsePagination object containing pagination details.
54 | * @returns Formatted pagination footer string for CLI.
55 | */
56 | export function formatPagination(pagination: ResponsePagination): string {
57 | const methodLogger = Logger.forContext(
58 | 'utils/formatter.util.ts',
59 | 'formatPagination',
60 | );
61 | const parts: string[] = [formatSeparator()]; // Start with separator
62 |
63 | const { count = 0, hasMore, nextCursor, total, page } = pagination;
64 |
65 | // Showing count and potentially total
66 | if (total !== undefined && total >= 0) {
67 | parts.push(`*Showing ${count} of ${total} total items.*`);
68 | } else if (count >= 0) {
69 | parts.push(`*Showing ${count} item${count !== 1 ? 's' : ''}.*`);
70 | }
71 |
72 | // More results availability
73 | if (hasMore) {
74 | parts.push('More results are available.');
75 | }
76 |
77 | // Include the actual cursor value for programmatic use
78 | if (hasMore && nextCursor) {
79 | parts.push(`*Next cursor: \`${nextCursor}\`*`);
80 | // Assuming nextCursor holds the next page number for Bitbucket
81 | parts.push(`*Use --page ${nextCursor} to view more.*`);
82 | } else if (hasMore && page !== undefined) {
83 | // Fallback if nextCursor wasn't parsed but page exists
84 | const nextPage = page + 1;
85 | parts.push(`*Next cursor: \`${nextPage}\`*`);
86 | parts.push(`*Use --page ${nextPage} to view more.*`);
87 | }
88 |
89 | // Add standard timestamp
90 | parts.push(`*Information retrieved at: ${formatDate(new Date())}*`);
91 |
92 | const result = parts.join('\n').trim(); // Join with newline
93 | methodLogger.debug(`Formatted pagination footer: ${result}`);
94 | return result;
95 | }
96 |
97 | /**
98 | * Format a heading with consistent style
99 | * @param text - Heading text
100 | * @param level - Heading level (1-6)
101 | * @returns Formatted heading
102 | */
103 | export function formatHeading(text: string, level: number = 1): string {
104 | const validLevel = Math.min(Math.max(level, 1), 6);
105 | const prefix = '#'.repeat(validLevel);
106 | return `${prefix} ${text}`;
107 | }
108 |
109 | /**
110 | * Format a list of key-value pairs as a bullet list
111 | * @param items - Object with key-value pairs
112 | * @param keyFormatter - Optional function to format keys
113 | * @returns Formatted bullet list
114 | */
115 | export function formatBulletList(
116 | items: Record<string, unknown>,
117 | keyFormatter?: (key: string) => string,
118 | ): string {
119 | const lines: string[] = [];
120 |
121 | for (const [key, value] of Object.entries(items)) {
122 | if (value === undefined || value === null) {
123 | continue;
124 | }
125 |
126 | const formattedKey = keyFormatter ? keyFormatter(key) : key;
127 | const formattedValue = formatValue(value);
128 | lines.push(`- **${formattedKey}**: ${formattedValue}`);
129 | }
130 |
131 | return lines.join('\n');
132 | }
133 |
134 | /**
135 | * Format a value based on its type
136 | * @param value - Value to format
137 | * @returns Formatted value
138 | */
139 | function formatValue(value: unknown): string {
140 | if (value === undefined || value === null) {
141 | return 'Not available';
142 | }
143 |
144 | if (value instanceof Date) {
145 | return formatDate(value);
146 | }
147 |
148 | // Handle URL objects with url and title properties
149 | if (typeof value === 'object' && value !== null && 'url' in value) {
150 | const urlObj = value as { url: string; title?: string };
151 | if (typeof urlObj.url === 'string') {
152 | return formatUrl(urlObj.url, urlObj.title);
153 | }
154 | }
155 |
156 | if (typeof value === 'string') {
157 | // Check if it's a URL
158 | if (value.startsWith('http://') || value.startsWith('https://')) {
159 | return formatUrl(value);
160 | }
161 |
162 | // Check if it might be a date
163 | if (/^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}/.test(value)) {
164 | return formatDate(value);
165 | }
166 |
167 | return value;
168 | }
169 |
170 | if (typeof value === 'boolean') {
171 | return value ? 'Yes' : 'No';
172 | }
173 |
174 | return String(value);
175 | }
176 |
177 | /**
178 | * Format a separator line
179 | * @returns Separator line
180 | */
181 | export function formatSeparator(): string {
182 | return '---';
183 | }
184 |
185 | /**
186 | * Format a numbered list of items
187 | * @param items - Array of items to format
188 | * @param formatter - Function to format each item
189 | * @returns Formatted numbered list
190 | */
191 | export function formatNumberedList<T>(
192 | items: T[],
193 | formatter: (item: T, index: number) => string,
194 | ): string {
195 | if (items.length === 0) {
196 | return 'No items.';
197 | }
198 |
199 | return items.map((item, index) => formatter(item, index)).join('\n\n');
200 | }
201 |
202 | /**
203 | * Format a raw diff output for display
204 | *
205 | * Parses and formats a raw unified diff string into a Markdown
206 | * formatted display with proper code block syntax highlighting.
207 | *
208 | * @param {string} rawDiff - The raw diff content from the API
209 | * @param {number} maxFiles - Maximum number of files to display in detail (optional, default: 5)
210 | * @param {number} maxLinesPerFile - Maximum number of lines to display per file (optional, default: 100)
211 | * @returns {string} Markdown formatted diff content
212 | */
213 | export function formatDiff(
214 | rawDiff: string,
215 | maxFiles: number = 5,
216 | maxLinesPerFile: number = 100,
217 | ): string {
218 | if (!rawDiff || rawDiff.trim() === '') {
219 | return '*No changes found in this pull request.*';
220 | }
221 |
222 | const lines = rawDiff.split('\n');
223 | const formattedLines: string[] = [];
224 | let currentFile = '';
225 | let fileCount = 0;
226 | let inFile = false;
227 | let truncated = false;
228 | let lineCount = 0;
229 |
230 | for (const line of lines) {
231 | // New file is marked by a line starting with "diff --git"
232 | if (line.startsWith('diff --git')) {
233 | if (inFile) {
234 | // Close previous file code block
235 | formattedLines.push('```');
236 | formattedLines.push('');
237 | }
238 |
239 | // Only process up to maxFiles
240 | fileCount++;
241 | if (fileCount > maxFiles) {
242 | truncated = true;
243 | break;
244 | }
245 |
246 | // Extract filename
247 | const filePath = line.match(/diff --git a\/(.*) b\/(.*)/);
248 | currentFile = filePath ? filePath[1] : 'unknown file';
249 | formattedLines.push(`### ${currentFile}`);
250 | formattedLines.push('');
251 | formattedLines.push('```diff');
252 | inFile = true;
253 | lineCount = 0;
254 | } else if (inFile) {
255 | lineCount++;
256 |
257 | // Truncate files that are too long
258 | if (lineCount > maxLinesPerFile) {
259 | formattedLines.push(
260 | '// ... more lines omitted for brevity ...',
261 | );
262 | formattedLines.push('```');
263 | formattedLines.push('');
264 | inFile = false;
265 | continue;
266 | }
267 |
268 | // Format diff lines with appropriate highlighting
269 | if (line.startsWith('+')) {
270 | formattedLines.push(line);
271 | } else if (line.startsWith('-')) {
272 | formattedLines.push(line);
273 | } else if (line.startsWith('@@')) {
274 | // Change section header
275 | formattedLines.push(line);
276 | } else {
277 | // Context line
278 | formattedLines.push(line);
279 | }
280 | }
281 | }
282 |
283 | // Close the last code block if necessary
284 | if (inFile) {
285 | formattedLines.push('```');
286 | }
287 |
288 | // Add truncation notice if we limited the output
289 | if (truncated) {
290 | formattedLines.push('');
291 | formattedLines.push(
292 | `*Output truncated. Only showing the first ${maxFiles} files.*`,
293 | );
294 | }
295 |
296 | return formattedLines.join('\n');
297 | }
298 |
299 | /**
300 | * Optimizes markdown content to address Bitbucket Cloud's rendering quirks
301 | *
302 | * IMPORTANT: This function does NOT convert between formats (unlike Jira's ADF conversion).
303 | * Bitbucket Cloud API natively accepts and returns markdown format. This function specifically
304 | * addresses documented rendering issues in Bitbucket's markdown renderer by applying targeted
305 | * formatting adjustments for better display in the Bitbucket UI.
306 | *
307 | * Known Bitbucket rendering issues this function fixes:
308 | * - List spacing and indentation (prevents items from concatenating on a single line)
309 | * - Code block formatting (addresses BCLOUD-20503 and similar bugs)
310 | * - Nested list indentation (ensures proper hierarchy display)
311 | * - Inline code formatting (adds proper spacing around backticks)
312 | * - Diff syntax preservation (maintains +/- at line starts)
313 | * - Excessive line break normalization
314 | * - Heading spacing consistency
315 | *
316 | * Use this function for both:
317 | * - Content received FROM the Bitbucket API (to properly display in CLI/tools)
318 | * - Content being sent TO the Bitbucket API (to ensure proper rendering in Bitbucket UI)
319 | *
320 | * @param {string} markdown - The original markdown content
321 | * @returns {string} Optimized markdown with workarounds for Bitbucket rendering issues
322 | */
323 | export function optimizeBitbucketMarkdown(markdown: string): string {
324 | const methodLogger = Logger.forContext(
325 | 'utils/formatter.util.ts',
326 | 'optimizeBitbucketMarkdown',
327 | );
328 |
329 | if (!markdown || markdown.trim() === '') {
330 | return markdown;
331 | }
332 |
333 | methodLogger.debug('Optimizing markdown for Bitbucket rendering');
334 |
335 | // First, let's extract code blocks to protect them from other transformations
336 | const codeBlocks: string[] = [];
337 | let optimized = markdown.replace(
338 | /```(\w*)\n([\s\S]*?)```/g,
339 | (_match, language, code) => {
340 | // Store the code block and replace with a placeholder
341 | const placeholder = `__CODE_BLOCK_${codeBlocks.length}__`;
342 | codeBlocks.push(`\n\n\`\`\`${language}\n${code}\n\`\`\`\n\n`);
343 | return placeholder;
344 | },
345 | );
346 |
347 | // Fix numbered lists with proper spacing
348 | // Match numbered lists (1. Item) and ensure proper spacing between items
349 | optimized = optimized.replace(
350 | /^(\d+\.)\s+(.*?)$/gm,
351 | (_match, number, content) => {
352 | // Keep the list item and ensure it ends with double line breaks if it doesn't already
353 | return `${number} ${content.trim()}\n\n`;
354 | },
355 | );
356 |
357 | // Fix bullet lists with proper spacing
358 | optimized = optimized.replace(
359 | /^(\s*)[-*]\s+(.*?)$/gm,
360 | (_match, indent, content) => {
361 | // Ensure proper indentation and spacing for bullet lists
362 | return `${indent}- ${content.trim()}\n\n`;
363 | },
364 | );
365 |
366 | // Ensure nested lists have proper indentation
367 | // Matches lines that are part of nested lists and ensures proper indentation
368 | // REMOVED: This step added excessive leading spaces causing Bitbucket to treat lists as code blocks
369 | // optimized = optimized.replace(
370 | // /^(\s+)[-*]\s+(.*?)$/gm,
371 | // (_match, indent, content) => {
372 | // // For nested items, ensure proper indentation (4 spaces per level)
373 | // const indentLevel = Math.ceil(indent.length / 2);
374 | // const properIndent = ' '.repeat(indentLevel);
375 | // return `${properIndent}- ${content.trim()}\n\n`;
376 | // },
377 | // );
378 |
379 | // Fix inline code formatting - ensure it has spaces around it for rendering
380 | optimized = optimized.replace(/`([^`]+)`/g, (_match, code) => {
381 | // Ensure inline code is properly formatted with spaces before and after
382 | // but avoid adding spaces within diff lines (+ or - prefixed)
383 | const trimmedCode = code.trim();
384 | const firstChar = trimmedCode.charAt(0);
385 |
386 | // Don't add spaces if it's part of a diff line
387 | if (firstChar === '+' || firstChar === '-') {
388 | return `\`${trimmedCode}\``;
389 | }
390 |
391 | return ` \`${trimmedCode}\` `;
392 | });
393 |
394 | // Ensure diff lines are properly preserved
395 | // This helps with preserving + and - prefixes in diff code blocks
396 | optimized = optimized.replace(
397 | /^([+-])(.*?)$/gm,
398 | (_match, prefix, content) => {
399 | return `${prefix}${content}`;
400 | },
401 | );
402 |
403 | // Remove excessive line breaks (more than 2 consecutive)
404 | optimized = optimized.replace(/\n{3,}/g, '\n\n');
405 |
406 | // Restore code blocks
407 | codeBlocks.forEach((codeBlock, index) => {
408 | optimized = optimized.replace(`__CODE_BLOCK_${index}__`, codeBlock);
409 | });
410 |
411 | // Fix double formatting issues (heading + bold) which Bitbucket renders incorrectly
412 | // Remove bold formatting from headings as headings are already emphasized
413 | optimized = optimized.replace(
414 | /^(#{1,6})\s+\*\*(.*?)\*\*\s*$/gm,
415 | (_match, hashes, content) => {
416 | return `\n${hashes} ${content.trim()}\n\n`;
417 | },
418 | );
419 |
420 | // Fix bold text within headings (alternative pattern)
421 | optimized = optimized.replace(
422 | /^(#{1,6})\s+(.*?)\*\*(.*?)\*\*(.*?)$/gm,
423 | (_match, hashes, before, boldText, after) => {
424 | // Combine text without bold formatting since heading already provides emphasis
425 | const cleanContent = (before + boldText + after).trim();
426 | return `\n${hashes} ${cleanContent}\n\n`;
427 | },
428 | );
429 |
430 | // Ensure headings have proper spacing (for headings without bold issues)
431 | optimized = optimized.replace(
432 | /^(#{1,6})\s+(.*?)$/gm,
433 | (_match, hashes, content) => {
434 | // Skip if already processed by bold removal above
435 | if (content.includes('**')) {
436 | return _match; // Leave as-is, will be handled by bold removal patterns
437 | }
438 | return `\n${hashes} ${content.trim()}\n\n`;
439 | },
440 | );
441 |
442 | // Ensure the content ends with a single line break
443 | optimized = optimized.trim() + '\n';
444 |
445 | methodLogger.debug('Markdown optimization complete');
446 | return optimized;
447 | }
448 |
449 | /**
450 | * Maximum character limit for AI responses (~10k tokens)
451 | * 1 token ≈ 4 characters, so 10k tokens ≈ 40,000 characters
452 | */
453 | const MAX_RESPONSE_CHARS = 40000;
454 |
455 | /**
456 | * Truncate content for AI consumption and add guidance if truncated
457 | *
458 | * When responses exceed the token limit, this function truncates the content
459 | * and appends guidance for the AI to either access the full response from
460 | * the raw log file or refine the request with better filtering.
461 | *
462 | * @param content - The formatted response content
463 | * @param rawResponsePath - Optional path to the raw response file in /tmp/mcp/
464 | * @returns Truncated content with guidance if needed, or original content if within limits
465 | */
466 | export function truncateForAI(
467 | content: string,
468 | rawResponsePath?: string | null,
469 | ): string {
470 | if (content.length <= MAX_RESPONSE_CHARS) {
471 | return content;
472 | }
473 |
474 | // Truncate at a reasonable boundary (try to find a newline near the limit)
475 | let truncateAt = MAX_RESPONSE_CHARS;
476 | const searchStart = Math.max(0, MAX_RESPONSE_CHARS - 500);
477 | const lastNewline = content.lastIndexOf('\n', MAX_RESPONSE_CHARS);
478 | if (lastNewline > searchStart) {
479 | truncateAt = lastNewline;
480 | }
481 |
482 | const truncatedContent = content.substring(0, truncateAt);
483 | const originalSize = content.length;
484 | const truncatedSize = truncatedContent.length;
485 | const percentShown = Math.round((truncatedSize / originalSize) * 100);
486 |
487 | // Build guidance section
488 | const guidance: string[] = [
489 | '',
490 | formatSeparator(),
491 | formatHeading('Response Truncated', 2),
492 | '',
493 | `This response was truncated to ~${Math.round(truncatedSize / 4000)}k tokens (${percentShown}% of original ${Math.round(originalSize / 1000)}k chars).`,
494 | '',
495 | '**To access the complete data:**',
496 | ];
497 |
498 | if (rawResponsePath) {
499 | guidance.push(
500 | `- The full raw API response is saved at: \`${rawResponsePath}\``,
501 | );
502 | }
503 |
504 | guidance.push(
505 | '- Consider refining your request with more specific filters or selecting fewer fields',
506 | '- For paginated data, use smaller page sizes or specific identifiers',
507 | '- When searching, use more targeted queries to reduce result sets',
508 | );
509 |
510 | return truncatedContent + guidance.join('\n');
511 | }
512 |
```
--------------------------------------------------------------------------------
/src/utils/error-handler.util.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { createApiError } from './error.util.js';
2 | import { Logger } from './logger.util.js';
3 | import { getDeepOriginalError } from './error.util.js';
4 | import { McpError } from './error.util.js';
5 |
6 | /**
7 | * Standard error codes for consistent handling
8 | */
9 | export enum ErrorCode {
10 | NOT_FOUND = 'NOT_FOUND',
11 | INVALID_CURSOR = 'INVALID_CURSOR',
12 | ACCESS_DENIED = 'ACCESS_DENIED',
13 | VALIDATION_ERROR = 'VALIDATION_ERROR',
14 | UNEXPECTED_ERROR = 'UNEXPECTED_ERROR',
15 | NETWORK_ERROR = 'NETWORK_ERROR',
16 | RATE_LIMIT_ERROR = 'RATE_LIMIT_ERROR',
17 | PRIVATE_IP_ERROR = 'PRIVATE_IP_ERROR',
18 | RESERVED_RANGE_ERROR = 'RESERVED_RANGE_ERROR',
19 | }
20 |
21 | /**
22 | * Context information for error handling
23 | */
24 | export interface ErrorContext {
25 | /**
26 | * Source of the error (e.g., file path and function)
27 | */
28 | source?: string;
29 |
30 | /**
31 | * Type of entity being processed (e.g., 'Repository', 'PullRequest')
32 | */
33 | entityType?: string;
34 |
35 | /**
36 | * Identifier of the entity being processed
37 | */
38 | entityId?: string | Record<string, string>;
39 |
40 | /**
41 | * Operation being performed (e.g., 'listing', 'creating')
42 | */
43 | operation?: string;
44 |
45 | /**
46 | * Additional information for debugging
47 | */
48 | additionalInfo?: Record<string, unknown>;
49 | }
50 |
51 | /**
52 | * Helper function to create a consistent error context object
53 | * @param entityType Type of entity being processed
54 | * @param operation Operation being performed
55 | * @param source Source of the error (typically file path and function)
56 | * @param entityId Optional identifier of the entity
57 | * @param additionalInfo Optional additional information for debugging
58 | * @returns A formatted ErrorContext object
59 | */
60 | export function buildErrorContext(
61 | entityType: string,
62 | operation: string,
63 | source: string,
64 | entityId?: string | Record<string, string>,
65 | additionalInfo?: Record<string, unknown>,
66 | ): ErrorContext {
67 | return {
68 | entityType,
69 | operation,
70 | source,
71 | ...(entityId && { entityId }),
72 | ...(additionalInfo && { additionalInfo }),
73 | };
74 | }
75 |
76 | /**
77 | * Detect specific error types from raw errors
78 | * @param error The error to analyze
79 | * @param context Context information for better error detection
80 | * @returns Object containing the error code and status code
81 | */
82 | export function detectErrorType(
83 | error: unknown,
84 | context: ErrorContext = {},
85 | ): { code: ErrorCode; statusCode: number } {
86 | const methodLogger = Logger.forContext(
87 | 'utils/error-handler.util.ts',
88 | 'detectErrorType',
89 | );
90 | methodLogger.debug(`Detecting error type`, { error, context });
91 |
92 | const errorMessage = error instanceof Error ? error.message : String(error);
93 | const statusCode =
94 | error instanceof Error && 'statusCode' in error
95 | ? (error as { statusCode: number }).statusCode
96 | : undefined;
97 |
98 | // PR ID validation error detection
99 | if (
100 | errorMessage.includes('Invalid pull request ID') ||
101 | errorMessage.includes('Pull request ID must be a positive integer')
102 | ) {
103 | return { code: ErrorCode.VALIDATION_ERROR, statusCode: 400 };
104 | }
105 |
106 | // Network error detection
107 | if (
108 | errorMessage.includes('network error') ||
109 | errorMessage.includes('fetch failed') ||
110 | errorMessage.includes('ECONNREFUSED') ||
111 | errorMessage.includes('ENOTFOUND') ||
112 | errorMessage.includes('Failed to fetch') ||
113 | errorMessage.includes('Network request failed')
114 | ) {
115 | return { code: ErrorCode.NETWORK_ERROR, statusCode: 500 };
116 | }
117 |
118 | // Network error detection in originalError
119 | if (
120 | error instanceof Error &&
121 | 'originalError' in error &&
122 | error.originalError
123 | ) {
124 | // Check for TypeError in originalError (common for network issues)
125 | if (error.originalError instanceof TypeError) {
126 | return { code: ErrorCode.NETWORK_ERROR, statusCode: 500 };
127 | }
128 |
129 | // Check for network error messages in originalError
130 | if (
131 | error.originalError instanceof Error &&
132 | (error.originalError.message.includes('fetch') ||
133 | error.originalError.message.includes('network') ||
134 | error.originalError.message.includes('ECON'))
135 | ) {
136 | return { code: ErrorCode.NETWORK_ERROR, statusCode: 500 };
137 | }
138 | }
139 |
140 | // Rate limiting detection
141 | if (
142 | errorMessage.includes('rate limit') ||
143 | errorMessage.includes('too many requests') ||
144 | statusCode === 429
145 | ) {
146 | return { code: ErrorCode.RATE_LIMIT_ERROR, statusCode: 429 };
147 | }
148 |
149 | // Bitbucket-specific error detection
150 | if (
151 | error instanceof Error &&
152 | 'originalError' in error &&
153 | error.originalError
154 | ) {
155 | const originalError = getDeepOriginalError(error.originalError);
156 |
157 | if (originalError && typeof originalError === 'object') {
158 | const oe = originalError as Record<string, unknown>;
159 |
160 | // Check for Bitbucket API error structure
161 | if (oe.error && typeof oe.error === 'object') {
162 | const bbError = oe.error as Record<string, unknown>;
163 | const errorMsg = String(bbError.message || '').toLowerCase();
164 | const errorDetail = bbError.detail
165 | ? String(bbError.detail).toLowerCase()
166 | : '';
167 |
168 | methodLogger.debug('Found Bitbucket error structure', {
169 | message: errorMsg,
170 | detail: errorDetail,
171 | });
172 |
173 | // Repository not found / Does not exist errors
174 | if (
175 | errorMsg.includes('repository not found') ||
176 | errorMsg.includes('does not exist') ||
177 | errorMsg.includes('no such resource') ||
178 | errorMsg.includes('not found')
179 | ) {
180 | return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
181 | }
182 |
183 | // Access and permission errors
184 | if (
185 | errorMsg.includes('access') ||
186 | errorMsg.includes('permission') ||
187 | errorMsg.includes('credentials') ||
188 | errorMsg.includes('unauthorized') ||
189 | errorMsg.includes('forbidden') ||
190 | errorMsg.includes('authentication')
191 | ) {
192 | return { code: ErrorCode.ACCESS_DENIED, statusCode: 403 };
193 | }
194 |
195 | // Validation errors
196 | if (
197 | errorMsg.includes('invalid') ||
198 | (errorMsg.includes('parameter') &&
199 | errorMsg.includes('error')) ||
200 | errorMsg.includes('input') ||
201 | errorMsg.includes('validation') ||
202 | errorMsg.includes('required field') ||
203 | errorMsg.includes('bad request')
204 | ) {
205 | return {
206 | code: ErrorCode.VALIDATION_ERROR,
207 | statusCode: 400,
208 | };
209 | }
210 |
211 | // Rate limiting errors
212 | if (
213 | errorMsg.includes('rate limit') ||
214 | errorMsg.includes('too many requests') ||
215 | errorMsg.includes('throttled')
216 | ) {
217 | return {
218 | code: ErrorCode.RATE_LIMIT_ERROR,
219 | statusCode: 429,
220 | };
221 | }
222 | }
223 |
224 | // Check for alternate Bitbucket error structure: {"type": "error", ...}
225 | if (oe.type === 'error') {
226 | methodLogger.debug('Found Bitbucket type:error structure', oe);
227 |
228 | // Check for status code if available in the error object
229 | if (typeof oe.status === 'number') {
230 | if (oe.status === 404) {
231 | return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
232 | }
233 | if (oe.status === 403 || oe.status === 401) {
234 | return {
235 | code: ErrorCode.ACCESS_DENIED,
236 | statusCode: oe.status,
237 | };
238 | }
239 | if (oe.status === 400) {
240 | return {
241 | code: ErrorCode.VALIDATION_ERROR,
242 | statusCode: 400,
243 | };
244 | }
245 | if (oe.status === 429) {
246 | return {
247 | code: ErrorCode.RATE_LIMIT_ERROR,
248 | statusCode: 429,
249 | };
250 | }
251 | }
252 | }
253 |
254 | // Check for Bitbucket error structure: {"errors": [{...}]}
255 | if (Array.isArray(oe.errors) && oe.errors.length > 0) {
256 | const firstError = oe.errors[0] as Record<string, unknown>;
257 | methodLogger.debug(
258 | 'Found Bitbucket errors array structure',
259 | firstError,
260 | );
261 |
262 | if (typeof firstError.status === 'number') {
263 | if (firstError.status === 404) {
264 | return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
265 | }
266 | if (
267 | firstError.status === 403 ||
268 | firstError.status === 401
269 | ) {
270 | return {
271 | code: ErrorCode.ACCESS_DENIED,
272 | statusCode: firstError.status,
273 | };
274 | }
275 | if (firstError.status === 400) {
276 | return {
277 | code: ErrorCode.VALIDATION_ERROR,
278 | statusCode: 400,
279 | };
280 | }
281 | if (firstError.status === 429) {
282 | return {
283 | code: ErrorCode.RATE_LIMIT_ERROR,
284 | statusCode: 429,
285 | };
286 | }
287 | }
288 |
289 | // Look for error messages in the title or message fields
290 | if (firstError.title || firstError.message) {
291 | const errorText = String(
292 | firstError.title || firstError.message,
293 | ).toLowerCase();
294 | if (errorText.includes('not found')) {
295 | return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
296 | }
297 | if (
298 | errorText.includes('access') ||
299 | errorText.includes('permission')
300 | ) {
301 | return {
302 | code: ErrorCode.ACCESS_DENIED,
303 | statusCode: 403,
304 | };
305 | }
306 | if (
307 | errorText.includes('invalid') ||
308 | errorText.includes('required')
309 | ) {
310 | return {
311 | code: ErrorCode.VALIDATION_ERROR,
312 | statusCode: 400,
313 | };
314 | }
315 | if (
316 | errorText.includes('rate limit') ||
317 | errorText.includes('too many requests')
318 | ) {
319 | return {
320 | code: ErrorCode.RATE_LIMIT_ERROR,
321 | statusCode: 429,
322 | };
323 | }
324 | }
325 | }
326 | }
327 | }
328 |
329 | // Not Found detection
330 | if (
331 | errorMessage.includes('not found') ||
332 | errorMessage.includes('does not exist') ||
333 | statusCode === 404
334 | ) {
335 | return { code: ErrorCode.NOT_FOUND, statusCode: 404 };
336 | }
337 |
338 | // Access Denied detection
339 | if (
340 | errorMessage.includes('access') ||
341 | errorMessage.includes('permission') ||
342 | errorMessage.includes('authorize') ||
343 | errorMessage.includes('authentication') ||
344 | statusCode === 401 ||
345 | statusCode === 403
346 | ) {
347 | return { code: ErrorCode.ACCESS_DENIED, statusCode: statusCode || 403 };
348 | }
349 |
350 | // Invalid Cursor detection
351 | if (
352 | (errorMessage.includes('cursor') ||
353 | errorMessage.includes('startAt') ||
354 | errorMessage.includes('page')) &&
355 | (errorMessage.includes('invalid') || errorMessage.includes('not valid'))
356 | ) {
357 | return { code: ErrorCode.INVALID_CURSOR, statusCode: 400 };
358 | }
359 |
360 | // Validation Error detection
361 | if (
362 | errorMessage.includes('validation') ||
363 | errorMessage.includes('invalid') ||
364 | errorMessage.includes('required') ||
365 | statusCode === 400 ||
366 | statusCode === 422
367 | ) {
368 | return {
369 | code: ErrorCode.VALIDATION_ERROR,
370 | statusCode: statusCode || 400,
371 | };
372 | }
373 |
374 | // Default to unexpected error
375 | return {
376 | code: ErrorCode.UNEXPECTED_ERROR,
377 | statusCode: statusCode || 500,
378 | };
379 | }
380 |
381 | /**
382 | * Create user-friendly error messages based on error type and context
383 | * @param code The error code
384 | * @param context Context information for better error messages
385 | * @param originalMessage The original error message
386 | * @returns User-friendly error message
387 | */
388 | export function createUserFriendlyErrorMessage(
389 | code: ErrorCode,
390 | context: ErrorContext = {},
391 | originalMessage?: string,
392 | ): string {
393 | const methodLogger = Logger.forContext(
394 | 'utils/error-handler.util.ts',
395 | 'createUserFriendlyErrorMessage',
396 | );
397 | const { entityType, entityId, operation } = context;
398 |
399 | // Format entity ID for display
400 | let entityIdStr = '';
401 | if (entityId) {
402 | if (typeof entityId === 'string') {
403 | entityIdStr = entityId;
404 | } else {
405 | // Handle object entityId (like ProjectIdentifier)
406 | entityIdStr = Object.values(entityId).join('/');
407 | }
408 | }
409 |
410 | // Determine entity display name
411 | const entity = entityType
412 | ? `${entityType}${entityIdStr ? ` ${entityIdStr}` : ''}`
413 | : 'Resource';
414 |
415 | let message = '';
416 |
417 | switch (code) {
418 | case ErrorCode.NOT_FOUND:
419 | message = `${entity} not found${entityIdStr ? `: ${entityIdStr}` : ''}. Verify the ID is correct and that you have access to this ${entityType?.toLowerCase() || 'resource'}.`;
420 |
421 | // Bitbucket-specific guidance
422 | if (
423 | entityType === 'Repository' ||
424 | entityType === 'PullRequest' ||
425 | entityType === 'Branch'
426 | ) {
427 | message += ` Make sure the workspace and ${entityType.toLowerCase()} names are spelled correctly and that you have permission to access it.`;
428 | }
429 | break;
430 |
431 | case ErrorCode.ACCESS_DENIED:
432 | message = `Access denied for ${entity.toLowerCase()}${entityIdStr ? ` ${entityIdStr}` : ''}. Verify your credentials and permissions.`;
433 |
434 | // Bitbucket-specific guidance
435 | message += ` Ensure your Bitbucket API token/app password has sufficient privileges and hasn't expired. If using a workspace/repository name, check that it's spelled correctly.`;
436 | break;
437 |
438 | case ErrorCode.INVALID_CURSOR:
439 | message = `Invalid pagination cursor. Use the exact cursor string returned from previous results.`;
440 |
441 | // Bitbucket-specific guidance
442 | message += ` Bitbucket pagination typically uses page numbers. Check that the page number is valid and within range.`;
443 | break;
444 |
445 | case ErrorCode.VALIDATION_ERROR:
446 | message =
447 | originalMessage ||
448 | `Invalid data provided for ${operation || 'operation'} ${entity.toLowerCase()}.`;
449 |
450 | // The originalMessage already includes error details for VALIDATION_ERROR
451 | break;
452 |
453 | case ErrorCode.NETWORK_ERROR:
454 | message = `Network error while ${operation || 'connecting to'} the Bitbucket API. Please check your internet connection and try again.`;
455 | break;
456 |
457 | case ErrorCode.RATE_LIMIT_ERROR:
458 | message = `Bitbucket API rate limit exceeded. Please wait a moment and try again, or reduce the frequency of requests.`;
459 |
460 | // Bitbucket-specific guidance
461 | message += ` Bitbucket's API has rate limits per IP address and additional limits for authenticated users.`;
462 | break;
463 |
464 | default:
465 | message = `An unexpected error occurred while ${operation || 'processing'} ${entity.toLowerCase()}.`;
466 | }
467 |
468 | // Include original message details if available and appropriate
469 | if (
470 | originalMessage &&
471 | code !== ErrorCode.NOT_FOUND &&
472 | code !== ErrorCode.ACCESS_DENIED
473 | ) {
474 | message += ` Error details: ${originalMessage}`;
475 | }
476 |
477 | methodLogger.debug(`Created user-friendly message: ${message}`, {
478 | code,
479 | context,
480 | });
481 | return message;
482 | }
483 |
484 | /**
485 | * Handle controller errors consistently
486 | * @param error The error to handle
487 | * @param context Context information for better error messages
488 | * @returns Never returns, always throws an error
489 | */
490 | export function handleControllerError(
491 | error: unknown,
492 | context: ErrorContext = {},
493 | ): never {
494 | const methodLogger = Logger.forContext(
495 | 'utils/error-handler.util.ts',
496 | 'handleControllerError',
497 | );
498 |
499 | // Extract error details
500 | const errorMessage = error instanceof Error ? error.message : String(error);
501 | const statusCode =
502 | error instanceof Error && 'statusCode' in error
503 | ? (error as { statusCode: number }).statusCode
504 | : undefined;
505 |
506 | // Detect error type using utility
507 | const { code, statusCode: detectedStatus } = detectErrorType(
508 | error,
509 | context,
510 | );
511 |
512 | // Combine detected status with explicit status
513 | const finalStatusCode = statusCode || detectedStatus;
514 |
515 | // Format entity information for logging
516 | const { entityType, entityId, operation } = context;
517 | const entity = entityType || 'resource';
518 | const entityIdStr = entityId
519 | ? typeof entityId === 'string'
520 | ? entityId
521 | : JSON.stringify(entityId)
522 | : '';
523 | const actionStr = operation || 'processing';
524 |
525 | // Log detailed error information
526 | methodLogger.error(
527 | `Error ${actionStr} ${entity}${
528 | entityIdStr ? `: ${entityIdStr}` : ''
529 | }: ${errorMessage}`,
530 | error,
531 | );
532 |
533 | // Create user-friendly error message for the response
534 | const message =
535 | code === ErrorCode.VALIDATION_ERROR
536 | ? errorMessage
537 | : createUserFriendlyErrorMessage(code, context, errorMessage);
538 |
539 | // Throw an appropriate API error with the user-friendly message
540 | throw createApiError(message, finalStatusCode, error);
541 | }
542 |
543 | /**
544 | * Handles errors from CLI commands
545 | * Logs the error and exits the process with appropriate exit code
546 | *
547 | * @param error The error to handle
548 | */
549 | export function handleCliError(error: unknown): never {
550 | const logger = Logger.forContext(
551 | 'utils/error-handler.util.ts',
552 | 'handleCliError',
553 | );
554 |
555 | logger.error('CLI error:', error);
556 |
557 | // Process different error types
558 | if (error instanceof McpError) {
559 | // Format user-friendly error message for MCP errors
560 | console.error(`Error: ${error.message}`);
561 |
562 | // Use specific exit codes based on error type
563 | switch (error.errorType) {
564 | case 'AUTHENTICATION_REQUIRED':
565 | process.exit(2);
566 | break; // Not strictly needed after process.exit but added for clarity
567 | case 'NOT_FOUND':
568 | process.exit(3);
569 | break;
570 | case 'VALIDATION_ERROR':
571 | process.exit(4);
572 | break;
573 | case 'RATE_LIMIT_EXCEEDED':
574 | process.exit(5);
575 | break;
576 | case 'API_ERROR':
577 | process.exit(6);
578 | break;
579 | default:
580 | process.exit(1);
581 | break;
582 | }
583 | } else if (error instanceof Error) {
584 | // Standard Error objects
585 | console.error(`Error: ${error.message}`);
586 | process.exit(1);
587 | } else {
588 | // Unknown error types
589 | console.error(`Unknown error occurred: ${String(error)}`);
590 | process.exit(1);
591 | }
592 | }
593 |
```