#
tokens: 45275/50000 9/236 files (page 8/11)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 8 of 11. Use http://codebase.md/sapientpants/sonarqube-mcp-server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .adr-dir
├── .changeset
│   ├── config.json
│   └── README.md
├── .claude
│   ├── commands
│   │   ├── analyze-and-fix-github-issue.md
│   │   ├── fix-sonarqube-issues.md
│   │   ├── implement-github-issue.md
│   │   ├── release.md
│   │   ├── spec-feature.md
│   │   └── update-dependencies.md
│   ├── hooks
│   │   └── block-git-no-verify.ts
│   └── settings.json
├── .dockerignore
├── .github
│   ├── actionlint.yaml
│   ├── changeset.yml
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   └── feature_request.md
│   ├── pull_request_template.md
│   ├── scripts
│   │   ├── determine-artifact.sh
│   │   └── version-and-release.js
│   ├── workflows
│   │   ├── codeql.yml
│   │   ├── main.yml
│   │   ├── pr.yml
│   │   ├── publish.yml
│   │   ├── reusable-docker.yml
│   │   ├── reusable-security.yml
│   │   └── reusable-validate.yml
│   └── WORKFLOWS.md
├── .gitignore
├── .husky
│   ├── commit-msg
│   └── pre-commit
├── .markdownlint.yaml
├── .markdownlintignore
├── .npmrc
├── .prettierignore
├── .prettierrc
├── .trivyignore
├── .yaml-lint.yml
├── .yamllintignore
├── CHANGELOG.md
├── changes.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── COMPATIBILITY.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── architecture
│   │   └── decisions
│   │       ├── 0001-record-architecture-decisions.md
│   │       ├── 0002-use-node-js-with-typescript.md
│   │       ├── 0003-adopt-model-context-protocol-for-sonarqube-integration.md
│   │       ├── 0004-use-sonarqube-web-api-client-for-all-sonarqube-interactions.md
│   │       ├── 0005-domain-driven-design-of-sonarqube-modules.md
│   │       ├── 0006-expose-sonarqube-features-as-mcp-tools.md
│   │       ├── 0007-support-multiple-authentication-methods-for-sonarqube.md
│   │       ├── 0008-use-environment-variables-for-configuration.md
│   │       ├── 0009-file-based-logging-to-avoid-stdio-conflicts.md
│   │       ├── 0010-use-stdio-transport-for-mcp-communication.md
│   │       ├── 0011-docker-containerization-for-deployment.md
│   │       ├── 0012-add-elicitation-support-for-interactive-user-input.md
│   │       ├── 0014-current-security-model-and-future-oauth2-considerations.md
│   │       ├── 0015-transport-architecture-refactoring.md
│   │       ├── 0016-http-transport-with-oauth-2-0-metadata-endpoints.md
│   │       ├── 0017-comprehensive-audit-logging-system.md
│   │       ├── 0018-add-comprehensive-monitoring-and-observability.md
│   │       ├── 0019-simplify-to-stdio-only-transport-for-mcp-gateway-deployment.md
│   │       ├── 0020-testing-framework-and-strategy-vitest-with-property-based-testing.md
│   │       ├── 0021-code-quality-toolchain-eslint-prettier-strict-typescript.md
│   │       ├── 0022-package-manager-choice-pnpm.md
│   │       ├── 0023-release-management-with-changesets.md
│   │       ├── 0024-ci-cd-platform-github-actions.md
│   │       ├── 0025-container-and-security-scanning-strategy.md
│   │       ├── 0026-circuit-breaker-pattern-with-opossum.md
│   │       ├── 0027-docker-image-publishing-strategy-ghcr-to-docker-hub.md
│   │       └── 0028-session-based-http-transport-with-server-sent-events.md
│   ├── architecture.md
│   ├── security.md
│   └── troubleshooting.md
├── eslint.config.js
├── examples
│   └── http-client.ts
├── jest.config.js
├── LICENSE
├── LICENSES.md
├── osv-scanner.toml
├── package.json
├── pnpm-lock.yaml
├── README.md
├── scripts
│   ├── actionlint.sh
│   ├── ci-local.sh
│   ├── load-test.sh
│   ├── README.md
│   ├── run-all-tests.sh
│   ├── scan-container.sh
│   ├── security-scan.sh
│   ├── setup.sh
│   ├── test-monitoring-integration.sh
│   └── validate-docs.sh
├── SECURITY.md
├── sonar-project.properties
├── src
│   ├── __tests__
│   │   ├── additional-coverage.test.ts
│   │   ├── advanced-index.test.ts
│   │   ├── assign-issue.test.ts
│   │   ├── auth-methods.test.ts
│   │   ├── boolean-string-transform.test.ts
│   │   ├── components.test.ts
│   │   ├── config
│   │   │   └── service-accounts.test.ts
│   │   ├── dependency-injection.test.ts
│   │   ├── direct-handlers.test.ts
│   │   ├── direct-lambdas.test.ts
│   │   ├── direct-schema-validation.test.ts
│   │   ├── domains
│   │   │   ├── components-domain-full.test.ts
│   │   │   ├── components-domain.test.ts
│   │   │   ├── hotspots-domain.test.ts
│   │   │   └── source-code-domain.test.ts
│   │   ├── environment-validation.test.ts
│   │   ├── error-handler.test.ts
│   │   ├── error-handling.test.ts
│   │   ├── errors.test.ts
│   │   ├── function-tests.test.ts
│   │   ├── handlers
│   │   │   ├── components-handler-integration.test.ts
│   │   │   └── projects-authorization.test.ts
│   │   ├── handlers.test.ts
│   │   ├── handlers.test.ts.skip
│   │   ├── index.test.ts
│   │   ├── issue-resolution-elicitation.test.ts
│   │   ├── issue-resolution.test.ts
│   │   ├── issue-transitions.test.ts
│   │   ├── issues-enhanced-search.test.ts
│   │   ├── issues-new-parameters.test.ts
│   │   ├── json-array-transform.test.ts
│   │   ├── lambda-functions.test.ts
│   │   ├── lambda-handlers.test.ts.skip
│   │   ├── logger.test.ts
│   │   ├── mapping-functions.test.ts
│   │   ├── mocked-environment.test.ts
│   │   ├── null-to-undefined.test.ts
│   │   ├── parameter-transformations-advanced.test.ts
│   │   ├── parameter-transformations.test.ts
│   │   ├── protocol-version.test.ts
│   │   ├── pull-request-transform.test.ts
│   │   ├── quality-gates.test.ts
│   │   ├── schema-parameter-transforms.test.ts
│   │   ├── schema-transformation-mocks.test.ts
│   │   ├── schema-transforms.test.ts
│   │   ├── schema-validators.test.ts
│   │   ├── schemas
│   │   │   ├── components-schema.test.ts
│   │   │   ├── hotspots-tools-schema.test.ts
│   │   │   └── issues-schema.test.ts
│   │   ├── sonarqube-elicitation.test.ts
│   │   ├── sonarqube.test.ts
│   │   ├── source-code.test.ts
│   │   ├── standalone-handlers.test.ts
│   │   ├── string-to-number-transform.test.ts
│   │   ├── tool-handler-lambdas.test.ts
│   │   ├── tool-handlers.test.ts
│   │   ├── tool-registration-schema.test.ts
│   │   ├── tool-registration-transforms.test.ts
│   │   ├── transformation-util.test.ts
│   │   ├── transports
│   │   │   ├── base.test.ts
│   │   │   ├── factory.test.ts
│   │   │   ├── http.test.ts
│   │   │   ├── session-manager.test.ts
│   │   │   └── stdio.test.ts
│   │   ├── utils
│   │   │   ├── retry.test.ts
│   │   │   └── transforms.test.ts
│   │   ├── zod-boolean-transform.test.ts
│   │   ├── zod-schema-transforms.test.ts
│   │   └── zod-transforms.test.ts
│   ├── config
│   │   ├── service-accounts.ts
│   │   └── versions.ts
│   ├── domains
│   │   ├── base.ts
│   │   ├── components.ts
│   │   ├── hotspots.ts
│   │   ├── index.ts
│   │   ├── issues.ts
│   │   ├── measures.ts
│   │   ├── metrics.ts
│   │   ├── projects.ts
│   │   ├── quality-gates.ts
│   │   ├── source-code.ts
│   │   └── system.ts
│   ├── errors.ts
│   ├── handlers
│   │   ├── components.ts
│   │   ├── hotspots.ts
│   │   ├── index.ts
│   │   ├── issues.ts
│   │   ├── measures.ts
│   │   ├── metrics.ts
│   │   ├── projects.ts
│   │   ├── quality-gates.ts
│   │   ├── source-code.ts
│   │   └── system.ts
│   ├── index.ts
│   ├── monitoring
│   │   ├── __tests__
│   │   │   └── circuit-breaker.test.ts
│   │   ├── circuit-breaker.ts
│   │   ├── health.ts
│   │   └── metrics.ts
│   ├── schemas
│   │   ├── common.ts
│   │   ├── components.ts
│   │   ├── hotspots-tools.ts
│   │   ├── hotspots.ts
│   │   ├── index.ts
│   │   ├── issues.ts
│   │   ├── measures.ts
│   │   ├── metrics.ts
│   │   ├── projects.ts
│   │   ├── quality-gates.ts
│   │   ├── source-code.ts
│   │   └── system.ts
│   ├── sonarqube.ts
│   ├── transports
│   │   ├── base.ts
│   │   ├── factory.ts
│   │   ├── http.ts
│   │   ├── index.ts
│   │   ├── session-manager.ts
│   │   └── stdio.ts
│   ├── types
│   │   ├── common.ts
│   │   ├── components.ts
│   │   ├── hotspots.ts
│   │   ├── index.ts
│   │   ├── issues.ts
│   │   ├── measures.ts
│   │   ├── metrics.ts
│   │   ├── projects.ts
│   │   ├── quality-gates.ts
│   │   ├── source-code.ts
│   │   └── system.ts
│   └── utils
│       ├── __tests__
│       │   ├── elicitation.test.ts
│       │   ├── pattern-matcher.test.ts
│       │   └── structured-response.test.ts
│       ├── client-factory.ts
│       ├── elicitation.ts
│       ├── error-handler.ts
│       ├── logger.ts
│       ├── parameter-mappers.ts
│       ├── pattern-matcher.ts
│       ├── retry.ts
│       ├── structured-response.ts
│       └── transforms.ts
├── test-http-transport.sh
├── tmp
│   └── .gitkeep
├── tsconfig.build.json
├── tsconfig.json
├── vitest.config.d.ts
├── vitest.config.js
├── vitest.config.js.map
└── vitest.config.ts
```

# Files

--------------------------------------------------------------------------------
/src/__tests__/errors.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
  2 | import {
  3 |   SonarQubeAPIError,
  4 |   SonarQubeErrorType,
  5 |   transformError,
  6 |   withErrorHandling,
  7 |   formatErrorForMCP,
  8 | } from '../errors.js';
  9 | import {
 10 |   SonarQubeError as SonarQubeClientError,
 11 |   AuthenticationError,
 12 |   AuthorizationError,
 13 |   NotFoundError,
 14 |   RateLimitError,
 15 |   NetworkError,
 16 |   ServerError,
 17 |   ValidationError,
 18 | } from 'sonarqube-web-api-client';
 19 | 
 20 | // Mock the logger
 21 | vi.mock('../utils/logger.js', () => ({
 22 |   createLogger: () => ({
 23 |     info: vi.fn(),
 24 |     error: vi.fn(),
 25 |     debug: vi.fn(),
 26 |     warn: vi.fn(),
 27 |   }),
 28 | }));
 29 | 
 30 | describe('Error Handling', () => {
 31 |   describe('SonarQubeAPIError', () => {
 32 |     it('should create error with all properties', () => {
 33 |       const error = new SonarQubeAPIError('Test error', SonarQubeErrorType.AUTHENTICATION_FAILED, {
 34 |         operation: 'test-operation',
 35 |         statusCode: 401,
 36 |         context: { key: 'value' },
 37 |         solution: 'Test solution',
 38 |       });
 39 | 
 40 |       expect(error.message).toBe('Test error');
 41 |       expect(error.type).toBe(SonarQubeErrorType.AUTHENTICATION_FAILED);
 42 |       expect(error.operation).toBe('test-operation');
 43 |       expect(error.statusCode).toBe(401);
 44 |       expect(error.context).toEqual({ key: 'value' });
 45 |       expect(error.solution).toBe('Test solution');
 46 |     });
 47 | 
 48 |     it('should format error as string with all details', () => {
 49 |       const error = new SonarQubeAPIError('Test error', SonarQubeErrorType.AUTHENTICATION_FAILED, {
 50 |         operation: 'test-operation',
 51 |         statusCode: 401,
 52 |         context: { key: 'value' },
 53 |         solution: 'Test solution',
 54 |       });
 55 | 
 56 |       const result = error.toString();
 57 |       expect(result).toContain('Error: Test error');
 58 |       expect(result).toContain('Operation: test-operation');
 59 |       expect(result).toContain('Status Code: 401');
 60 |       expect(result).toContain('Solution: Test solution');
 61 |       expect(result).toContain('Context:');
 62 |       expect(result).toContain('"key": "value"');
 63 |     });
 64 | 
 65 |     it('should format error without optional fields', () => {
 66 |       const error = new SonarQubeAPIError('Test error', SonarQubeErrorType.UNKNOWN_ERROR);
 67 |       const result = error.toString();
 68 |       expect(result).toBe('Error: Test error');
 69 |     });
 70 |   });
 71 | 
 72 |   describe('transformError', () => {
 73 |     it('should return existing SonarQubeAPIError unchanged', () => {
 74 |       const originalError = new SonarQubeAPIError(
 75 |         'Original error',
 76 |         SonarQubeErrorType.AUTHENTICATION_FAILED
 77 |       );
 78 |       const result = transformError(originalError, 'test-operation');
 79 |       expect(result).toBe(originalError);
 80 |     });
 81 | 
 82 |     it('should transform AuthenticationError', () => {
 83 |       const clientError = new AuthenticationError('Auth failed');
 84 |       const result = transformError(clientError, 'test-operation');
 85 | 
 86 |       expect(result).toBeInstanceOf(SonarQubeAPIError);
 87 |       expect(result.type).toBe(SonarQubeErrorType.AUTHENTICATION_FAILED);
 88 |       expect(result.message).toBe('Auth failed');
 89 |       expect(result.solution).toContain('check your SONARQUBE_TOKEN');
 90 |     });
 91 | 
 92 |     it('should transform AuthorizationError', () => {
 93 |       const clientError = new AuthorizationError('Access denied');
 94 |       const result = transformError(clientError, 'test-operation');
 95 | 
 96 |       expect(result.type).toBe(SonarQubeErrorType.AUTHORIZATION_FAILED);
 97 |       expect(result.solution).toContain('required permissions');
 98 |     });
 99 | 
100 |     it('should transform NotFoundError', () => {
101 |       const clientError = new NotFoundError('Not found');
102 |       const result = transformError(clientError, 'test-operation');
103 | 
104 |       expect(result.type).toBe(SonarQubeErrorType.RESOURCE_NOT_FOUND);
105 |       expect(result.solution).toContain('Verify the project key');
106 |     });
107 | 
108 |     it('should transform RateLimitError', () => {
109 |       const clientError = new RateLimitError('Rate limited');
110 |       const result = transformError(clientError, 'test-operation');
111 | 
112 |       expect(result.type).toBe(SonarQubeErrorType.RATE_LIMITED);
113 |       expect(result.solution).toContain('wait before retrying');
114 |     });
115 | 
116 |     it('should transform NetworkError', () => {
117 |       const clientError = new NetworkError('Network error');
118 |       const result = transformError(clientError, 'test-operation');
119 | 
120 |       expect(result.type).toBe(SonarQubeErrorType.NETWORK_ERROR);
121 |       expect(result.solution).toContain('Check your network connection');
122 |     });
123 | 
124 |     it('should transform ServerError', () => {
125 |       const clientError = new ServerError('Server error', 500);
126 |       const result = transformError(clientError, 'test-operation');
127 | 
128 |       expect(result.type).toBe(SonarQubeErrorType.SERVER_ERROR);
129 |       expect(result.solution).toContain('server is experiencing issues');
130 |     });
131 | 
132 |     it('should transform ValidationError', () => {
133 |       const clientError = new ValidationError('Validation error');
134 |       const result = transformError(clientError, 'test-operation');
135 | 
136 |       expect(result.type).toBe(SonarQubeErrorType.VALIDATION_ERROR);
137 |       expect(result.solution).toContain('check your request parameters');
138 |     });
139 | 
140 |     it('should transform unknown SonarQubeClientError', () => {
141 |       class UnknownError extends SonarQubeClientError {
142 |         constructor(message: string) {
143 |           super(message, 'UNKNOWN');
144 |         }
145 |       }
146 |       const clientError = new UnknownError('Unknown error');
147 |       const result = transformError(clientError, 'test-operation');
148 | 
149 |       expect(result.type).toBe(SonarQubeErrorType.UNKNOWN_ERROR);
150 |     });
151 | 
152 |     it('should transform generic Error', () => {
153 |       const error = new Error('Generic error');
154 |       const result = transformError(error, 'test-operation');
155 | 
156 |       expect(result.type).toBe(SonarQubeErrorType.UNKNOWN_ERROR);
157 |       expect(result.message).toBe('Generic error');
158 |     });
159 | 
160 |     it('should transform non-Error values', () => {
161 |       const result = transformError('String error', 'test-operation');
162 |       expect(result.type).toBe(SonarQubeErrorType.UNKNOWN_ERROR);
163 |       expect(result.message).toBe('String error');
164 |     });
165 |   });
166 | 
167 |   describe('withErrorHandling', () => {
168 |     beforeEach(() => {
169 |       vi.clearAllMocks();
170 |       vi.useFakeTimers();
171 |     });
172 | 
173 |     afterEach(() => {
174 |       vi.useRealTimers();
175 |     });
176 | 
177 |     it('should return successful result without retry', async () => {
178 |       const apiCall = vi.fn<() => Promise<string>>().mockResolvedValue('success');
179 |       const result = await withErrorHandling('test-operation', apiCall);
180 | 
181 |       expect(result).toBe('success');
182 |       expect(apiCall).toHaveBeenCalledTimes(1);
183 |     });
184 | 
185 |     it('should retry on rate limit error', async () => {
186 |       const apiCall = vi
187 |         .fn<() => Promise<string>>()
188 |         .mockRejectedValueOnce(new RateLimitError('Rate limited'))
189 |         .mockRejectedValueOnce(new RateLimitError('Rate limited'))
190 |         .mockResolvedValue('success');
191 | 
192 |       const promise = withErrorHandling('test-operation', apiCall);
193 | 
194 |       // Fast-forward through retry delays
195 |       await vi.advanceTimersByTimeAsync(1000);
196 |       await vi.advanceTimersByTimeAsync(2000);
197 | 
198 |       const result = await promise;
199 | 
200 |       expect(result).toBe('success');
201 |       expect(apiCall).toHaveBeenCalledTimes(3);
202 |     });
203 | 
204 |     it('should retry on network error', async () => {
205 |       const apiCall = vi
206 |         .fn<() => Promise<string>>()
207 |         .mockRejectedValueOnce(new NetworkError('Network error'))
208 |         .mockResolvedValue('success');
209 | 
210 |       const promise = withErrorHandling('test-operation', apiCall);
211 | 
212 |       await vi.advanceTimersByTimeAsync(1000);
213 | 
214 |       const result = await promise;
215 | 
216 |       expect(result).toBe('success');
217 |       expect(apiCall).toHaveBeenCalledTimes(2);
218 |     });
219 | 
220 |     it('should retry on server error', async () => {
221 |       const apiCall = vi
222 |         .fn<() => Promise<string>>()
223 |         .mockRejectedValueOnce(new ServerError('Server error', 500))
224 |         .mockResolvedValue('success');
225 | 
226 |       const promise = withErrorHandling('test-operation', apiCall);
227 | 
228 |       await vi.advanceTimersByTimeAsync(1000);
229 | 
230 |       const result = await promise;
231 | 
232 |       expect(result).toBe('success');
233 |       expect(apiCall).toHaveBeenCalledTimes(2);
234 |     });
235 | 
236 |     it('should not retry on authentication error', async () => {
237 |       const apiCall = vi
238 |         .fn<() => Promise<string>>()
239 |         .mockRejectedValue(new AuthenticationError('Auth failed'));
240 | 
241 |       await expect(withErrorHandling('test-operation', apiCall)).rejects.toThrow(SonarQubeAPIError);
242 | 
243 |       expect(apiCall).toHaveBeenCalledTimes(1);
244 |     });
245 | 
246 |     it('should respect max retries', async () => {
247 |       const apiCall = vi
248 |         .fn<() => Promise<string>>()
249 |         .mockRejectedValue(new RateLimitError('Rate limited'));
250 | 
251 |       // Run the test with real timers since fake timers are problematic with async/await
252 |       vi.useRealTimers();
253 | 
254 |       const promise = withErrorHandling('test-operation', apiCall, {
255 |         maxRetries: 3,
256 |         initialDelay: 1,
257 |         maxDelay: 10,
258 |       });
259 | 
260 |       await expect(promise).rejects.toThrow(SonarQubeAPIError);
261 |       expect(apiCall).toHaveBeenCalledTimes(4); // Initial + 3 retries
262 | 
263 |       // Restore fake timers for other tests
264 |       vi.useFakeTimers();
265 |     });
266 | 
267 |     it('should use exponential backoff', async () => {
268 |       // Track delays used
269 |       let delays: number[] = [];
270 | 
271 |       vi.useRealTimers();
272 | 
273 |       // Mock setTimeout to capture delays
274 |       const originalSetTimeout = global.setTimeout;
275 |       global.setTimeout = vi.fn((fn: () => void, delay?: number) => {
276 |         if (delay !== undefined) delays.push(delay);
277 |         return originalSetTimeout(fn, 0); // Execute immediately
278 |       }) as unknown as typeof global.setTimeout;
279 | 
280 |       const apiCall = vi
281 |         .fn<() => Promise<string>>()
282 |         .mockRejectedValue(new RateLimitError('Rate limited'));
283 | 
284 |       await expect(
285 |         withErrorHandling('test-operation', apiCall, {
286 |           maxRetries: 3,
287 |           initialDelay: 1000,
288 |           maxDelay: 10000,
289 |         })
290 |       ).rejects.toThrow();
291 | 
292 |       // Verify exponential backoff pattern
293 |       expect(delays).toEqual([1000, 2000, 4000]);
294 |       expect(apiCall).toHaveBeenCalledTimes(4);
295 | 
296 |       // Restore
297 |       global.setTimeout = originalSetTimeout;
298 |       vi.useFakeTimers();
299 |     });
300 | 
301 |     it('should respect max delay', async () => {
302 |       // Track delays used
303 |       let delays: number[] = [];
304 | 
305 |       vi.useRealTimers();
306 | 
307 |       // Mock setTimeout to capture delays
308 |       const originalSetTimeout = global.setTimeout;
309 |       global.setTimeout = vi.fn((fn: () => void, delay?: number) => {
310 |         if (delay !== undefined) delays.push(delay);
311 |         return originalSetTimeout(fn, 0); // Execute immediately
312 |       }) as unknown as typeof global.setTimeout;
313 | 
314 |       const apiCall = vi
315 |         .fn<() => Promise<string>>()
316 |         .mockRejectedValue(new RateLimitError('Rate limited'));
317 | 
318 |       await expect(
319 |         withErrorHandling('test-operation', apiCall, {
320 |           maxRetries: 3,
321 |           initialDelay: 1000,
322 |           maxDelay: 2000,
323 |         })
324 |       ).rejects.toThrow();
325 | 
326 |       // Verify delays are capped at maxDelay
327 |       expect(delays).toEqual([1000, 2000, 2000]); // 2nd and 3rd retry capped at 2000ms
328 |       expect(apiCall).toHaveBeenCalledTimes(4);
329 | 
330 |       // Restore
331 |       global.setTimeout = originalSetTimeout;
332 |       vi.useFakeTimers();
333 |     });
334 | 
335 |     it('should pass through non-SonarQubeClientError unchanged', async () => {
336 |       const customError = new Error('Custom error');
337 |       const apiCall = vi.fn<() => Promise<string>>().mockRejectedValue(customError);
338 | 
339 |       await expect(withErrorHandling('test-operation', apiCall)).rejects.toThrow(customError);
340 |       expect(apiCall).toHaveBeenCalledTimes(1);
341 |     });
342 |   });
343 | 
344 |   describe('formatErrorForMCP', () => {
345 |     it('should format authentication error', () => {
346 |       const error = new SonarQubeAPIError('Auth failed', SonarQubeErrorType.AUTHENTICATION_FAILED);
347 |       const result = formatErrorForMCP(error);
348 | 
349 |       expect(result.code).toBe(-32001);
350 |       expect(result.message).toContain('Auth failed');
351 |     });
352 | 
353 |     it('should format authorization error', () => {
354 |       const error = new SonarQubeAPIError('Access denied', SonarQubeErrorType.AUTHORIZATION_FAILED);
355 |       const result = formatErrorForMCP(error);
356 | 
357 |       expect(result.code).toBe(-32002);
358 |     });
359 | 
360 |     it('should format resource not found error', () => {
361 |       const error = new SonarQubeAPIError('Not found', SonarQubeErrorType.RESOURCE_NOT_FOUND);
362 |       const result = formatErrorForMCP(error);
363 | 
364 |       expect(result.code).toBe(-32003);
365 |     });
366 | 
367 |     it('should format rate limit error', () => {
368 |       const error = new SonarQubeAPIError('Rate limited', SonarQubeErrorType.RATE_LIMITED);
369 |       const result = formatErrorForMCP(error);
370 | 
371 |       expect(result.code).toBe(-32004);
372 |     });
373 | 
374 |     it('should format network error', () => {
375 |       const error = new SonarQubeAPIError('Network error', SonarQubeErrorType.NETWORK_ERROR);
376 |       const result = formatErrorForMCP(error);
377 | 
378 |       expect(result.code).toBe(-32005);
379 |     });
380 | 
381 |     it('should format configuration error', () => {
382 |       const error = new SonarQubeAPIError('Config error', SonarQubeErrorType.CONFIGURATION_ERROR);
383 |       const result = formatErrorForMCP(error);
384 | 
385 |       expect(result.code).toBe(-32006);
386 |     });
387 | 
388 |     it('should format validation error', () => {
389 |       const error = new SonarQubeAPIError('Validation error', SonarQubeErrorType.VALIDATION_ERROR);
390 |       const result = formatErrorForMCP(error);
391 | 
392 |       expect(result.code).toBe(-32007);
393 |     });
394 | 
395 |     it('should format server error', () => {
396 |       const error = new SonarQubeAPIError('Server error', SonarQubeErrorType.SERVER_ERROR);
397 |       const result = formatErrorForMCP(error);
398 | 
399 |       expect(result.code).toBe(-32008);
400 |     });
401 | 
402 |     it('should format unknown error', () => {
403 |       const error = new SonarQubeAPIError('Unknown error', SonarQubeErrorType.UNKNOWN_ERROR);
404 |       const result = formatErrorForMCP(error);
405 | 
406 |       expect(result.code).toBe(-32000);
407 |     });
408 | 
409 |     it('should include full error details in message', () => {
410 |       const error = new SonarQubeAPIError('Test error', SonarQubeErrorType.AUTHENTICATION_FAILED, {
411 |         operation: 'test-op',
412 |         solution: 'Test solution',
413 |       });
414 |       const result = formatErrorForMCP(error);
415 | 
416 |       expect(result.message).toContain('Test error');
417 |       expect(result.message).toContain('test-op');
418 |       expect(result.message).toContain('Test solution');
419 |     });
420 |   });
421 | });
422 | 
```

--------------------------------------------------------------------------------
/src/__tests__/issue-resolution-elicitation.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, vi } from 'vitest';
  2 | import type { Mocked } from 'vitest';
  3 | import {
  4 |   handleMarkIssueFalsePositive,
  5 |   handleMarkIssueWontFix,
  6 |   handleMarkIssuesFalsePositive,
  7 |   handleMarkIssuesWontFix,
  8 |   setElicitationManager,
  9 | } from '../handlers/issues.js';
 10 | import { ElicitationManager } from '../utils/elicitation.js';
 11 | // Mock environment variables
 12 | process.env.SONARQUBE_TOKEN = 'test-token';
 13 | process.env.SONARQUBE_URL = 'http://localhost:9000';
 14 | process.env.SONARQUBE_ORGANIZATION = 'test-org';
 15 | describe('Issue Resolution with Elicitation', () => {
 16 |   let mockElicitationManager: Mocked<ElicitationManager>;
 17 |   let mockClient: any;
 18 |   beforeEach(() => {
 19 |     vi.clearAllMocks();
 20 |     // Create mock elicitation manager
 21 |     mockElicitationManager = {
 22 |       isEnabled: vi.fn(),
 23 |       collectResolutionComment: vi.fn(),
 24 |       confirmBulkOperation: vi.fn(),
 25 |       setServer: vi.fn(),
 26 |       getOptions: vi.fn(),
 27 |       updateOptions: vi.fn(),
 28 |       collectAuthentication: vi.fn(),
 29 |       disambiguateSelection: vi.fn(),
 30 |     } as unknown as Mocked<ElicitationManager>;
 31 |     // Set the mock manager
 32 |     setElicitationManager(mockElicitationManager);
 33 |     // Create mock client
 34 |     mockClient = {
 35 |       markIssueFalsePositive: vi.fn(),
 36 |       markIssueWontFix: vi.fn(),
 37 |       markIssuesFalsePositive: vi.fn(),
 38 |       markIssuesWontFix: vi.fn(),
 39 |     };
 40 |   });
 41 |   describe('handleMarkIssueFalsePositive with elicitation', () => {
 42 |     it('should collect comment via elicitation when enabled and no comment provided', async () => {
 43 |       mockElicitationManager.isEnabled.mockReturnValue(true);
 44 |       mockElicitationManager.collectResolutionComment.mockResolvedValue({
 45 |         action: 'accept',
 46 |         content: { comment: 'Elicited comment for false positive' },
 47 |       });
 48 |       const mockResponse = {
 49 |         issue: { key: 'ISSUE-123', status: 'RESOLVED', resolution: 'FALSE-POSITIVE' },
 50 |         components: [],
 51 |         rules: [],
 52 |         users: [],
 53 |       };
 54 |       mockClient.markIssueFalsePositive.mockResolvedValue(mockResponse);
 55 |       const result = await handleMarkIssueFalsePositive({ issueKey: 'ISSUE-123' }, mockClient);
 56 |       expect(mockElicitationManager.isEnabled).toHaveBeenCalled();
 57 |       expect(mockElicitationManager.collectResolutionComment).toHaveBeenCalledWith(
 58 |         'ISSUE-123',
 59 |         'false positive'
 60 |       );
 61 |       expect(mockClient.markIssueFalsePositive).toHaveBeenCalledWith({
 62 |         issueKey: 'ISSUE-123',
 63 |         comment: 'Elicited comment for false positive',
 64 |       });
 65 |       const responseData = JSON.parse(result.content[0]!.text as string);
 66 |       expect(responseData.message).toBe('Issue ISSUE-123 marked as false positive');
 67 |     });
 68 |     it('should not collect comment when elicitation is disabled', async () => {
 69 |       mockElicitationManager.isEnabled.mockReturnValue(false);
 70 |       const mockResponse = {
 71 |         issue: { key: 'ISSUE-123', status: 'RESOLVED', resolution: 'FALSE-POSITIVE' },
 72 |         components: [],
 73 |         rules: [],
 74 |         users: [],
 75 |       };
 76 |       mockClient.markIssueFalsePositive.mockResolvedValue(mockResponse);
 77 |       await handleMarkIssueFalsePositive({ issueKey: 'ISSUE-123' }, mockClient);
 78 |       expect(mockElicitationManager.collectResolutionComment).not.toHaveBeenCalled();
 79 |       expect(mockClient.markIssueFalsePositive).toHaveBeenCalledWith({
 80 |         issueKey: 'ISSUE-123',
 81 |       });
 82 |     });
 83 |     it('should not collect comment when comment already provided', async () => {
 84 |       mockElicitationManager.isEnabled.mockReturnValue(true);
 85 |       const mockResponse = {
 86 |         issue: { key: 'ISSUE-123', status: 'RESOLVED', resolution: 'FALSE-POSITIVE' },
 87 |         components: [],
 88 |         rules: [],
 89 |         users: [],
 90 |       };
 91 |       mockClient.markIssueFalsePositive.mockResolvedValue(mockResponse);
 92 |       await handleMarkIssueFalsePositive(
 93 |         { issueKey: 'ISSUE-123', comment: 'Existing comment' },
 94 |         mockClient
 95 |       );
 96 |       expect(mockElicitationManager.collectResolutionComment).not.toHaveBeenCalled();
 97 |       expect(mockClient.markIssueFalsePositive).toHaveBeenCalledWith({
 98 |         issueKey: 'ISSUE-123',
 99 |         comment: 'Existing comment',
100 |       });
101 |     });
102 |     it('should handle elicitation cancellation', async () => {
103 |       mockElicitationManager.isEnabled.mockReturnValue(true);
104 |       mockElicitationManager.collectResolutionComment.mockResolvedValue({
105 |         action: 'cancel',
106 |       });
107 |       const result = await handleMarkIssueFalsePositive({ issueKey: 'ISSUE-123' }, mockClient);
108 |       expect(mockElicitationManager.collectResolutionComment).toHaveBeenCalled();
109 |       expect(mockClient.markIssueFalsePositive).not.toHaveBeenCalled();
110 |       const responseData = JSON.parse(result.content[0]!.text as string);
111 |       expect(responseData.message).toBe('Operation cancelled by user');
112 |       expect(responseData.issueKey).toBe('ISSUE-123');
113 |     });
114 |     it('should handle elicitation rejection', async () => {
115 |       mockElicitationManager.isEnabled.mockReturnValue(true);
116 |       mockElicitationManager.collectResolutionComment.mockResolvedValue({
117 |         action: 'reject',
118 |       });
119 |       const result = await handleMarkIssueFalsePositive({ issueKey: 'ISSUE-123' }, mockClient);
120 |       expect(mockElicitationManager.collectResolutionComment).toHaveBeenCalled();
121 |       expect(mockClient.markIssueFalsePositive).not.toHaveBeenCalled();
122 |       const responseData = JSON.parse(result.content[0]!.text as string);
123 |       expect(responseData.message).toBe('Operation cancelled by user');
124 |     });
125 |   });
126 |   describe('handleMarkIssueWontFix with elicitation', () => {
127 |     it('should collect comment via elicitation when enabled and no comment provided', async () => {
128 |       mockElicitationManager.isEnabled.mockReturnValue(true);
129 |       mockElicitationManager.collectResolutionComment.mockResolvedValue({
130 |         action: 'accept',
131 |         content: { comment: "Elicited comment for won't fix" },
132 |       });
133 |       const mockResponse = {
134 |         issue: { key: 'ISSUE-456', status: 'RESOLVED', resolution: 'WONTFIX' },
135 |         components: [],
136 |         rules: [],
137 |         users: [],
138 |       };
139 |       mockClient.markIssueWontFix.mockResolvedValue(mockResponse);
140 |       const result = await handleMarkIssueWontFix({ issueKey: 'ISSUE-456' }, mockClient);
141 |       expect(mockElicitationManager.isEnabled).toHaveBeenCalled();
142 |       expect(mockElicitationManager.collectResolutionComment).toHaveBeenCalledWith(
143 |         'ISSUE-456',
144 |         "won't fix"
145 |       );
146 |       expect(mockClient.markIssueWontFix).toHaveBeenCalledWith({
147 |         issueKey: 'ISSUE-456',
148 |         comment: "Elicited comment for won't fix",
149 |       });
150 |       const responseData = JSON.parse(result.content[0]!.text as string);
151 |       expect(responseData.message).toBe("Issue ISSUE-456 marked as won't fix");
152 |     });
153 |     it('should handle elicitation cancellation', async () => {
154 |       mockElicitationManager.isEnabled.mockReturnValue(true);
155 |       mockElicitationManager.collectResolutionComment.mockResolvedValue({
156 |         action: 'cancel',
157 |       });
158 |       const result = await handleMarkIssueWontFix({ issueKey: 'ISSUE-456' }, mockClient);
159 |       expect(mockElicitationManager.collectResolutionComment).toHaveBeenCalled();
160 |       expect(mockClient.markIssueWontFix).not.toHaveBeenCalled();
161 |       const responseData = JSON.parse(result.content[0]!.text as string);
162 |       expect(responseData.message).toBe('Operation cancelled by user');
163 |       expect(responseData.issueKey).toBe('ISSUE-456');
164 |     });
165 |   });
166 |   describe('handleMarkIssuesFalsePositive with elicitation', () => {
167 |     it('should request confirmation for bulk operations when enabled', async () => {
168 |       mockElicitationManager.isEnabled.mockReturnValue(true);
169 |       mockElicitationManager.confirmBulkOperation.mockResolvedValue({
170 |         action: 'accept',
171 |         content: { confirm: true, comment: 'Bulk operation comment' },
172 |       });
173 |       const mockResponses = [
174 |         { issue: { key: 'ISSUE-123' }, components: [], rules: [], users: [] },
175 |         { issue: { key: 'ISSUE-124' }, components: [], rules: [], users: [] },
176 |       ];
177 |       mockClient.markIssuesFalsePositive.mockResolvedValue(mockResponses);
178 |       const result = await handleMarkIssuesFalsePositive(
179 |         { issueKeys: ['ISSUE-123', 'ISSUE-124'] },
180 |         mockClient
181 |       );
182 |       expect(mockElicitationManager.isEnabled).toHaveBeenCalled();
183 |       expect(mockElicitationManager.confirmBulkOperation).toHaveBeenCalledWith(
184 |         'mark as false positive',
185 |         2,
186 |         ['ISSUE-123', 'ISSUE-124']
187 |       );
188 |       expect(mockClient.markIssuesFalsePositive).toHaveBeenCalledWith({
189 |         issueKeys: ['ISSUE-123', 'ISSUE-124'],
190 |         comment: 'Bulk operation comment',
191 |       });
192 |       const responseData = JSON.parse(result.content[0]!.text as string);
193 |       expect(responseData.message).toBe('2 issues marked as false positive');
194 |     });
195 |     it('should not request confirmation when elicitation is disabled', async () => {
196 |       mockElicitationManager.isEnabled.mockReturnValue(false);
197 |       const mockResponses = [
198 |         { issue: { key: 'ISSUE-123' }, components: [], rules: [], users: [] },
199 |         { issue: { key: 'ISSUE-124' }, components: [], rules: [], users: [] },
200 |       ];
201 |       mockClient.markIssuesFalsePositive.mockResolvedValue(mockResponses);
202 |       await handleMarkIssuesFalsePositive({ issueKeys: ['ISSUE-123', 'ISSUE-124'] }, mockClient);
203 |       expect(mockElicitationManager.confirmBulkOperation).not.toHaveBeenCalled();
204 |       expect(mockClient.markIssuesFalsePositive).toHaveBeenCalledWith({
205 |         issueKeys: ['ISSUE-123', 'ISSUE-124'],
206 |       });
207 |     });
208 |     it('should handle bulk operation rejection', async () => {
209 |       mockElicitationManager.isEnabled.mockReturnValue(true);
210 |       mockElicitationManager.confirmBulkOperation.mockResolvedValue({
211 |         action: 'reject',
212 |       });
213 |       const result = await handleMarkIssuesFalsePositive(
214 |         { issueKeys: ['ISSUE-123', 'ISSUE-124'] },
215 |         mockClient
216 |       );
217 |       expect(mockElicitationManager.confirmBulkOperation).toHaveBeenCalled();
218 |       expect(mockClient.markIssuesFalsePositive).not.toHaveBeenCalled();
219 |       const responseData = JSON.parse(result.content[0]!.text as string);
220 |       expect(responseData.message).toBe('Bulk operation cancelled by user');
221 |       expect(responseData.issueCount).toBe(2);
222 |     });
223 |     it('should handle bulk operation cancellation', async () => {
224 |       mockElicitationManager.isEnabled.mockReturnValue(true);
225 |       mockElicitationManager.confirmBulkOperation.mockResolvedValue({
226 |         action: 'cancel',
227 |       });
228 |       const result = await handleMarkIssuesFalsePositive(
229 |         { issueKeys: ['ISSUE-123', 'ISSUE-124'] },
230 |         mockClient
231 |       );
232 |       expect(mockElicitationManager.confirmBulkOperation).toHaveBeenCalled();
233 |       expect(mockClient.markIssuesFalsePositive).not.toHaveBeenCalled();
234 |       const responseData = JSON.parse(result.content[0]!.text as string);
235 |       expect(responseData.message).toBe('Bulk operation cancelled by user');
236 |       expect(responseData.issueCount).toBe(2);
237 |     });
238 |     it('should not override existing comment with elicited comment', async () => {
239 |       mockElicitationManager.isEnabled.mockReturnValue(true);
240 |       mockElicitationManager.confirmBulkOperation.mockResolvedValue({
241 |         action: 'accept',
242 |         content: { confirm: true, comment: 'Elicited comment' },
243 |       });
244 |       const mockResponses = [{ issue: { key: 'ISSUE-123' }, components: [], rules: [], users: [] }];
245 |       mockClient.markIssuesFalsePositive.mockResolvedValue(mockResponses);
246 |       await handleMarkIssuesFalsePositive(
247 |         { issueKeys: ['ISSUE-123'], comment: 'Existing comment' },
248 |         mockClient
249 |       );
250 |       expect(mockClient.markIssuesFalsePositive).toHaveBeenCalledWith({
251 |         issueKeys: ['ISSUE-123'],
252 |         comment: 'Existing comment', // Should keep existing comment
253 |       });
254 |     });
255 |   });
256 |   describe('handleMarkIssuesWontFix with elicitation', () => {
257 |     it('should request confirmation for bulk operations when enabled', async () => {
258 |       mockElicitationManager.isEnabled.mockReturnValue(true);
259 |       mockElicitationManager.confirmBulkOperation.mockResolvedValue({
260 |         action: 'accept',
261 |         content: { confirm: true, comment: "Bulk won't fix comment" },
262 |       });
263 |       const mockResponses = [
264 |         { issue: { key: 'ISSUE-456' }, components: [], rules: [], users: [] },
265 |         { issue: { key: 'ISSUE-457' }, components: [], rules: [], users: [] },
266 |       ];
267 |       mockClient.markIssuesWontFix.mockResolvedValue(mockResponses);
268 |       const result = await handleMarkIssuesWontFix(
269 |         { issueKeys: ['ISSUE-456', 'ISSUE-457'] },
270 |         mockClient
271 |       );
272 |       expect(mockElicitationManager.isEnabled).toHaveBeenCalled();
273 |       expect(mockElicitationManager.confirmBulkOperation).toHaveBeenCalledWith(
274 |         "mark as won't fix",
275 |         2,
276 |         ['ISSUE-456', 'ISSUE-457']
277 |       );
278 |       expect(mockClient.markIssuesWontFix).toHaveBeenCalledWith({
279 |         issueKeys: ['ISSUE-456', 'ISSUE-457'],
280 |         comment: "Bulk won't fix comment",
281 |       });
282 |       const responseData = JSON.parse(result.content[0]!.text as string);
283 |       expect(responseData.message).toBe("2 issues marked as won't fix");
284 |     });
285 |     it('should handle bulk operation rejection', async () => {
286 |       mockElicitationManager.isEnabled.mockReturnValue(true);
287 |       mockElicitationManager.confirmBulkOperation.mockResolvedValue({
288 |         action: 'reject',
289 |       });
290 |       const result = await handleMarkIssuesWontFix(
291 |         { issueKeys: ['ISSUE-456', 'ISSUE-457'] },
292 |         mockClient
293 |       );
294 |       expect(mockElicitationManager.confirmBulkOperation).toHaveBeenCalled();
295 |       expect(mockClient.markIssuesWontFix).not.toHaveBeenCalled();
296 |       const responseData = JSON.parse(result.content[0]!.text as string);
297 |       expect(responseData.message).toBe('Bulk operation cancelled by user');
298 |       expect(responseData.issueCount).toBe(2);
299 |     });
300 |     it('should handle bulk operation cancellation', async () => {
301 |       mockElicitationManager.isEnabled.mockReturnValue(true);
302 |       mockElicitationManager.confirmBulkOperation.mockResolvedValue({
303 |         action: 'cancel',
304 |       });
305 |       const result = await handleMarkIssuesWontFix(
306 |         { issueKeys: ['ISSUE-456', 'ISSUE-457'] },
307 |         mockClient
308 |       );
309 |       expect(mockElicitationManager.confirmBulkOperation).toHaveBeenCalled();
310 |       expect(mockClient.markIssuesWontFix).not.toHaveBeenCalled();
311 |       const responseData = JSON.parse(result.content[0]!.text as string);
312 |       expect(responseData.message).toBe('Bulk operation cancelled by user');
313 |       expect(responseData.issueCount).toBe(2);
314 |     });
315 |   });
316 | });
317 | 
```

--------------------------------------------------------------------------------
/docs/architecture/decisions/0026-circuit-breaker-pattern-with-opossum.md:
--------------------------------------------------------------------------------

```markdown
  1 | # 26. Circuit Breaker Pattern with opossum
  2 | 
  3 | Date: 2025-10-11
  4 | 
  5 | ## Status
  6 | 
  7 | Accepted
  8 | 
  9 | ## Context
 10 | 
 11 | The SonarQube MCP Server integrates with external services (SonarQube API, SonarCloud API) that may experience temporary outages, slow responses, or intermittent failures. Without protective mechanisms, cascading failures can occur:
 12 | 
 13 | - **Cascading Failures**: Failed external calls can cause the entire service to become unresponsive
 14 | - **Resource Exhaustion**: Hanging requests consume threads, memory, and connections
 15 | - **Slow Response Times**: Timeouts accumulate, degrading user experience
 16 | - **No Fault Isolation**: Failures in one service affect the entire application
 17 | - **Poor Observability**: Difficult to track and diagnose external service issues
 18 | 
 19 | Traditional retry mechanisms alone don't solve these problems:
 20 | 
 21 | - Simple retries amplify load on failing services
 22 | - Exponential backoff helps but doesn't prevent cascading failures
 23 | - No mechanism to "fail fast" when a service is known to be down
 24 | 
 25 | The Circuit Breaker pattern addresses these issues by:
 26 | 
 27 | 1. Monitoring external service health
 28 | 2. Failing fast when a service is unhealthy
 29 | 3. Automatically recovering when the service becomes healthy
 30 | 4. Providing observability into service health
 31 | 
 32 | Library options considered:
 33 | 
 34 | - **opossum**: Battle-tested Node.js library, rich features, good TypeScript support
 35 | - **cockatiel**: Modern, TypeScript-first, but less mature ecosystem
 36 | - **brakes**: Simpler but less actively maintained
 37 | - **Custom implementation**: Full control but significant development and testing effort
 38 | 
 39 | ## Decision
 40 | 
 41 | We will use **opossum** as the circuit breaker library, wrapped in a factory pattern for consistent configuration and monitoring integration.
 42 | 
 43 | ### Core Architecture
 44 | 
 45 | **CircuitBreakerFactory** (`src/monitoring/circuit-breaker.ts`):
 46 | 
 47 | - Singleton pattern for managing circuit breakers
 48 | - Consistent configuration across all breakers
 49 | - Integrated metrics and logging
 50 | - Type-safe wrapper functions
 51 | 
 52 | ### Configuration
 53 | 
 54 | **Default Circuit Breaker Settings**:
 55 | 
 56 | ```typescript
 57 | {
 58 |   timeout: 10000,                    // 10 seconds - fail if request exceeds
 59 |   errorThresholdPercentage: 50,      // Open circuit if 50% of requests fail
 60 |   resetTimeout: 30000,                // 30 seconds - try again after this period
 61 |   rollingCountTimeout: 10000,         // 10 second rolling window for stats
 62 |   rollingCountBuckets: 10,            // 10 buckets (1 second each)
 63 |   volumeThreshold: 5                  // Minimum 5 requests before triggering
 64 | }
 65 | ```
 66 | 
 67 | **Circuit States**:
 68 | 
 69 | 1. **CLOSED** (normal): Requests pass through
 70 | 2. **OPEN** (failing): Requests immediately rejected (fail fast)
 71 | 3. **HALF_OPEN** (testing): Allow one request to test if service recovered
 72 | 
 73 | ### Usage Patterns
 74 | 
 75 | #### 1. Factory Pattern (Recommended)
 76 | 
 77 | ```typescript
 78 | import { CircuitBreakerFactory } from './monitoring/circuit-breaker.js';
 79 | 
 80 | const breaker = CircuitBreakerFactory.getBreaker(
 81 |   'sonarqube-api',
 82 |   async (projectKey: string) => {
 83 |     return await sonarqubeClient.getProject(projectKey);
 84 |   },
 85 |   {
 86 |     timeout: 15000, // Custom timeout for this operation
 87 |     volumeThreshold: 10,
 88 |   }
 89 | );
 90 | 
 91 | // Use the breaker
 92 | const result = await breaker.fire('my-project-key');
 93 | ```
 94 | 
 95 | #### 2. Function Wrapper Pattern
 96 | 
 97 | ```typescript
 98 | import { wrapWithCircuitBreaker } from './monitoring/circuit-breaker.js';
 99 | 
100 | const getProjectWithCircuitBreaker = wrapWithCircuitBreaker(
101 |   'get-project',
102 |   async (projectKey: string) => {
103 |     return await sonarqubeClient.getProject(projectKey);
104 |   }
105 | );
106 | 
107 | // Use the wrapped function
108 | const result = await getProjectWithCircuitBreaker('my-project-key');
109 | ```
110 | 
111 | #### 3. Decorator Pattern (Method-level)
112 | 
113 | ```typescript
114 | import { withCircuitBreaker } from './monitoring/circuit-breaker.js';
115 | 
116 | class SonarQubeService {
117 |   @withCircuitBreaker('sonarqube-service', { timeout: 15000 })
118 |   async getProject(projectKey: string) {
119 |     return await this.client.getProject(projectKey);
120 |   }
121 | }
122 | ```
123 | 
124 | ### Event-Driven Monitoring
125 | 
126 | Circuit breakers emit events for observability:
127 | 
128 | **State Change Events**:
129 | 
130 | - `open`: Circuit opened due to failure threshold
131 | - `close`: Circuit closed (recovered)
132 | - `halfOpen`: Circuit testing recovery
133 | 
134 | **Request Events**:
135 | 
136 | - `success`: Request succeeded
137 | - `failure`: Request failed
138 | - `timeout`: Request timed out
139 | - `reject`: Request rejected (circuit open)
140 | 
141 | **Metrics Integration**:
142 | 
143 | ```typescript
144 | breaker.on('open', () => {
145 |   updateCircuitBreakerMetrics(name, 'open');
146 |   logger.warn('Circuit breaker opened', { name });
147 | });
148 | 
149 | breaker.on('failure', (error) => {
150 |   trackCircuitBreakerFailure(name);
151 |   logger.debug('Request failed', { name, error: error.message });
152 | });
153 | ```
154 | 
155 | ### Error Filtering
156 | 
157 | Custom error filtering for selective circuit breaking:
158 | 
159 | ```typescript
160 | const breaker = CircuitBreakerFactory.getBreaker('sonarqube-api', fetchFunction, {
161 |   errorFilter: (error) => {
162 |     // Don't count 404s as failures
163 |     if (error.message.includes('404')) return false;
164 | 
165 |     // Don't count authentication errors
166 |     if (error.message.includes('401')) return false;
167 | 
168 |     // Count all other errors
169 |     return true;
170 |   },
171 | });
172 | ```
173 | 
174 | ## Consequences
175 | 
176 | ### Positive
177 | 
178 | - **Cascading Failure Prevention**: Failed services don't bring down the entire application
179 | - **Fail Fast**: Immediate rejection when service is down (no waiting for timeouts)
180 | - **Automatic Recovery**: Circuit automatically tests and recovers when service is healthy
181 | - **Resource Protection**: Prevents resource exhaustion from hanging requests
182 | - **Observability**: Rich metrics and events for monitoring external service health
183 | - **Consistent Configuration**: Factory pattern ensures uniform settings
184 | - **Type Safety**: TypeScript generics provide type-safe circuit breaker calls
185 | - **Flexible Usage**: Multiple patterns (factory, wrapper, decorator) for different use cases
186 | - **Metrics Integration**: Built-in integration with monitoring system
187 | - **Battle-Tested**: opossum is production-proven with years of usage
188 | - **Selective Breaking**: Error filtering allows fine-grained control
189 | 
190 | ### Negative
191 | 
192 | - **Complexity**: Adds another layer of abstraction and configuration
193 | - **False Positives**: Circuit may open due to temporary network blips
194 | - **Configuration Overhead**: Need to tune parameters for each service
195 | - **Delayed Recovery**: ResetTimeout means delayed recovery even if service recovers immediately
196 | - **Testing Complexity**: Need to test circuit breaker behavior in unit/integration tests
197 | - **Dependency**: Adds opossum as a runtime dependency
198 | - **State Management**: Circuit breaker state is in-memory (not shared across instances)
199 | 
200 | ### Neutral
201 | 
202 | - **Performance Overhead**: Minimal overhead for healthy services (< 1ms)
203 | - **Memory Usage**: Small memory footprint for state tracking
204 | - **Learning Curve**: Team needs to understand circuit breaker pattern
205 | - **Error Handling**: Need to handle circuit breaker exceptions separately from service errors
206 | 
207 | ## Implementation
208 | 
209 | ### Installation
210 | 
211 | ```bash
212 | pnpm add opossum
213 | pnpm add -D @types/opossum
214 | ```
215 | 
216 | ### Basic Usage Example
217 | 
218 | ```typescript
219 | import { CircuitBreakerFactory } from './monitoring/circuit-breaker.js';
220 | 
221 | // Create a circuit breaker for SonarQube API calls
222 | const breaker = CircuitBreakerFactory.getBreaker(
223 |   'sonarqube-issues-search',
224 |   async (projectKey: string, severity: string) => {
225 |     // This function will be protected by the circuit breaker
226 |     const response = await fetch(
227 |       `${baseUrl}/api/issues/search?projectKeys=${projectKey}&severities=${severity}`,
228 |       { signal: AbortSignal.timeout(10000) }
229 |     );
230 | 
231 |     if (!response.ok) {
232 |       throw new Error(`SonarQube API returned ${response.status}`);
233 |     }
234 | 
235 |     return response.json();
236 |   },
237 |   {
238 |     timeout: 15000, // 15 second timeout
239 |     errorThresholdPercentage: 40, // Open at 40% failure rate
240 |     volumeThreshold: 10, // Need 10 requests before circuit can open
241 |   }
242 | );
243 | 
244 | // Use the circuit breaker
245 | try {
246 |   const issues = await breaker.fire('my-project', 'CRITICAL');
247 |   console.log('Found issues:', issues.total);
248 | } catch (error) {
249 |   if (error.message.includes('breaker is open')) {
250 |     // Circuit is open - service is known to be failing
251 |     console.error('SonarQube API is currently unavailable');
252 |   } else {
253 |     // Individual request failed
254 |     console.error('Request failed:', error.message);
255 |   }
256 | }
257 | ```
258 | 
259 | ### Advanced Configuration
260 | 
261 | ```typescript
262 | const breaker = CircuitBreakerFactory.getBreaker('sonarqube-quality-gates', fetchQualityGate, {
263 |   // Timing
264 |   timeout: 20000, // 20 second timeout
265 |   resetTimeout: 60000, // Try recovery after 60 seconds
266 | 
267 |   // Failure thresholds
268 |   errorThresholdPercentage: 30, // Open at 30% failure rate
269 |   volumeThreshold: 20, // Need 20 requests minimum
270 | 
271 |   // Rolling window
272 |   rollingCountTimeout: 20000, // 20 second rolling window
273 |   rollingCountBuckets: 20, // 20 buckets (1 second each)
274 | 
275 |   // Error filtering
276 |   errorFilter: (error: Error) => {
277 |     // Don't count 404 as failures
278 |     return !error.message.includes('404');
279 |   },
280 | });
281 | ```
282 | 
283 | ### Monitoring Circuit Breaker Health
284 | 
285 | ```typescript
286 | import { CircuitBreakerFactory } from './monitoring/circuit-breaker.js';
287 | 
288 | // Get statistics for a specific circuit breaker
289 | const stats = CircuitBreakerFactory.getStats('sonarqube-api');
290 | 
291 | if (stats) {
292 |   console.log({
293 |     successCount: stats.successes,
294 |     failureCount: stats.failures,
295 |     rejectedCount: stats.rejects,
296 |     timeoutCount: stats.timeouts,
297 |     averageResponseTime: stats.latencyMean,
298 |     percentiles: {
299 |       p50: stats.percentiles[50],
300 |       p95: stats.percentiles[95],
301 |       p99: stats.percentiles[99],
302 |     },
303 |   });
304 | }
305 | 
306 | // Get all circuit breakers
307 | const allBreakers = CircuitBreakerFactory.getAllBreakers();
308 | for (const [name, breaker] of allBreakers) {
309 |   console.log(`${name}: ${breaker.opened ? 'OPEN' : 'CLOSED'}`);
310 | }
311 | ```
312 | 
313 | ### Testing with Circuit Breakers
314 | 
315 | ```typescript
316 | import { describe, it, expect, beforeEach } from 'vitest';
317 | import { CircuitBreakerFactory } from './circuit-breaker.js';
318 | 
319 | describe('Circuit Breaker', () => {
320 |   beforeEach(() => {
321 |     // Reset circuit breakers between tests
322 |     CircuitBreakerFactory.reset();
323 |   });
324 | 
325 |   it('should open circuit after failure threshold', async () => {
326 |     const failingFunction = async () => {
327 |       throw new Error('Service unavailable');
328 |     };
329 | 
330 |     const breaker = CircuitBreakerFactory.getBreaker('test-service', failingFunction, {
331 |       errorThresholdPercentage: 50,
332 |       volumeThreshold: 3,
333 |       timeout: 1000,
334 |     });
335 | 
336 |     // Trigger failures to open circuit
337 |     for (let i = 0; i < 5; i++) {
338 |       try {
339 |         await breaker.fire();
340 |       } catch (error) {
341 |         // Expected to fail
342 |       }
343 |     }
344 | 
345 |     // Circuit should now be open
346 |     expect(breaker.opened).toBe(true);
347 | 
348 |     // Next request should be rejected immediately
349 |     await expect(breaker.fire()).rejects.toThrow('breaker is open');
350 |   });
351 | });
352 | ```
353 | 
354 | ### Graceful Shutdown
355 | 
356 | ```typescript
357 | import { CircuitBreakerFactory } from './monitoring/circuit-breaker.js';
358 | 
359 | // On application shutdown
360 | process.on('SIGTERM', () => {
361 |   console.log('Shutting down circuit breakers...');
362 |   CircuitBreakerFactory.shutdown();
363 |   console.log('Circuit breakers shut down');
364 |   process.exit(0);
365 | });
366 | ```
367 | 
368 | ## State Machine Diagram
369 | 
370 | ```
371 |                      ┌────────────────────────┐
372 |                      │                        │
373 |                      │       CLOSED           │
374 |                      │  (Normal Operation)    │
375 |                      │                        │
376 |                      └───────────┬────────────┘
377 |                                  │
378 |                                  │ Failure threshold
379 |                                  │ exceeded
380 |                                  ↓
381 |                      ┌────────────────────────┐
382 |                      │                        │
383 |                      │        OPEN            │
384 |                      │   (Fail Fast Mode)     │
385 |                      │                        │
386 |                      └───────────┬────────────┘
387 |                                  │
388 |                                  │ Reset timeout
389 |                                  │ elapsed
390 |                                  ↓
391 |                      ┌────────────────────────┐
392 |                      │                        │
393 |                      │      HALF_OPEN         │
394 |                      │   (Testing Recovery)   │
395 |                      │                        │
396 |                      └───┬───────────┬────────┘
397 |                          │           │
398 |                          │ Success   │ Failure
399 |                          ↓           ↓
400 |                        CLOSED       OPEN
401 | ```
402 | 
403 | ## Examples
404 | 
405 | ### Example 1: SonarQube API Integration
406 | 
407 | ```typescript
408 | import { wrapWithCircuitBreaker } from './monitoring/circuit-breaker.js';
409 | import { SonarQubeClient } from './sonarqube-client.js';
410 | 
411 | const client = new SonarQubeClient(config);
412 | 
413 | // Wrap API calls with circuit breaker
414 | export const searchIssues = wrapWithCircuitBreaker(
415 |   'sonarqube.searchIssues',
416 |   async (params: IssueSearchParams) => {
417 |     return await client.issues.search(params);
418 |   },
419 |   {
420 |     timeout: 15000,
421 |     errorThresholdPercentage: 40,
422 |   }
423 | );
424 | 
425 | export const getProject = wrapWithCircuitBreaker(
426 |   'sonarqube.getProject',
427 |   async (key: string) => {
428 |     return await client.projects.get(key);
429 |   },
430 |   {
431 |     timeout: 10000,
432 |     errorThresholdPercentage: 50,
433 |   }
434 | );
435 | ```
436 | 
437 | ### Example 2: Handling Circuit Breaker States
438 | 
439 | ```typescript
440 | import { CircuitBreakerFactory } from './monitoring/circuit-breaker.js';
441 | 
442 | async function fetchWithFallback(projectKey: string) {
443 |   const breaker = CircuitBreakerFactory.getBreaker('sonarqube-api', async (key: string) => {
444 |     return await sonarqubeClient.getProject(key);
445 |   });
446 | 
447 |   try {
448 |     return await breaker.fire(projectKey);
449 |   } catch (error) {
450 |     if (error.message.includes('breaker is open')) {
451 |       // Circuit is open - return cached data or default
452 |       console.warn('Circuit breaker is open, using fallback');
453 |       return getCachedProject(projectKey);
454 |     }
455 | 
456 |     // Other error - propagate
457 |     throw error;
458 |   }
459 | }
460 | ```
461 | 
462 | ### Example 3: Custom Error Handling
463 | 
464 | ```typescript
465 | const breaker = CircuitBreakerFactory.getBreaker('sonarqube-with-filter', fetchData, {
466 |   errorFilter: (error: Error) => {
467 |     // Don't count 404 (not found) as a failure
468 |     if (error.message.includes('404')) {
469 |       return false;
470 |     }
471 | 
472 |     // Don't count 401/403 (auth errors) as failures
473 |     if (error.message.includes('401') || error.message.includes('403')) {
474 |       return false;
475 |     }
476 | 
477 |     // Count 500-level errors and timeouts
478 |     return true;
479 |   },
480 | });
481 | ```
482 | 
483 | ## References
484 | 
485 | - opossum Documentation: https://nodeshift.dev/opossum/
486 | - opossum GitHub: https://github.com/nodeshift/opossum
487 | - Circuit Breaker Pattern: https://martinfowler.com/bliki/CircuitBreaker.html
488 | - Implementation: src/monitoring/circuit-breaker.ts
489 | - Tests: src/monitoring/**tests**/circuit-breaker.test.ts
490 | - Related ADR: ADR-0018 (Comprehensive Monitoring and Observability)
491 | - Metrics Integration: src/monitoring/metrics.ts
492 | 
```

--------------------------------------------------------------------------------
/docs/architecture/decisions/0024-ci-cd-platform-github-actions.md:
--------------------------------------------------------------------------------

```markdown
  1 | # 24. CI CD Platform GitHub Actions
  2 | 
  3 | Date: 2025-10-11
  4 | 
  5 | ## Status
  6 | 
  7 | Accepted
  8 | 
  9 | ## Context
 10 | 
 11 | The SonarQube MCP Server requires a robust CI/CD platform to automate testing, building, security scanning, and releasing. The platform must:
 12 | 
 13 | - Provide fast feedback on pull requests (< 5 minutes for validation)
 14 | - Run comprehensive quality checks before merging
 15 | - Automate semantic versioning and releases
 16 | - Support multi-platform Docker image builds
 17 | - Publish to multiple registries (NPM, GitHub Packages, Docker Hub, GHCR)
 18 | - Integrate with security scanning tools
 19 | - Generate supply chain security attestations (SLSA provenance)
 20 | - Prevent concurrent releases (avoid race conditions)
 21 | - Support reusable workflows to reduce duplication
 22 | 
 23 | Platform options considered:
 24 | 
 25 | - **GitHub Actions**: Native GitHub integration, generous free tier for open source, mature ecosystem
 26 | - **CircleCI**: Good parallelization but costs for private repos, less GitHub integration
 27 | - **Travis CI**: Declining support, slower builds
 28 | - **Jenkins**: Self-hosted, more complex setup and maintenance
 29 | - **GitLab CI**: Requires GitLab hosting, less integration with GitHub ecosystem
 30 | 
 31 | ## Decision
 32 | 
 33 | We will use **GitHub Actions** as the exclusive CI/CD platform for this project.
 34 | 
 35 | ### Workflow Architecture
 36 | 
 37 | The CI/CD pipeline consists of 7 workflow files organized into 3 categories:
 38 | 
 39 | #### 1. Primary Workflows (User-Facing)
 40 | 
 41 | **`.github/workflows/main.yml`** - Main Branch Release Pipeline
 42 | 
 43 | - **Trigger**: Push to `main` branch
 44 | - **Purpose**: Automated releases after merge
 45 | - **Jobs**:
 46 |   1. `validate`: Run all quality checks (reusable workflow)
 47 |   2. `security`: Run security scans (reusable workflow)
 48 |   3. `build`: Build TypeScript, version bump, create tag
 49 |   4. `docker`: Build multi-platform Docker image to GHCR
 50 |   5. `npm`: Package NPM tarball with attestations
 51 |   6. `create-release`: Create GitHub release with artifacts
 52 | - **Concurrency**: `cancel-in-progress: false` (ensures releases complete)
 53 | - **Permissions**: Elevated (write to releases, packages, security events)
 54 | 
 55 | **`.github/workflows/pr.yml`** - Pull Request Validation
 56 | 
 57 | - **Trigger**: Pull request open/sync to any branch
 58 | - **Purpose**: Fast feedback on code quality
 59 | - **Jobs**:
 60 |   1. `validate`: Run all quality checks (reusable workflow)
 61 |   2. `security`: Run security scans (reusable workflow)
 62 | - **Concurrency**: `cancel-in-progress: true` (cancel outdated runs on new push)
 63 | - **Permissions**: Read-only (security)
 64 | 
 65 | **`.github/workflows/publish.yml`** - Multi-Registry Publishing
 66 | 
 67 | - **Trigger**: GitHub release created
 68 | - **Purpose**: Publish artifacts to public registries
 69 | - **Jobs**:
 70 |   1. `npm`: Publish to NPM with provenance
 71 |   2. `github-packages`: Publish to GitHub Packages
 72 |   3. `docker`: Copy GHCR image to Docker Hub (multi-platform manifest)
 73 | - **Concurrency**: `cancel-in-progress: false` (ensures publish completes)
 74 | - **Permissions**: Read-only (uses secrets for publishing)
 75 | 
 76 | **`.github/workflows/codeql.yml`** - CodeQL Security Analysis
 77 | 
 78 | - **Trigger**: Push to `main`, pull requests, schedule (weekly)
 79 | - **Purpose**: SAST (Static Application Security Testing)
 80 | - **Language**: JavaScript/TypeScript
 81 | - **Permissions**: Security events write
 82 | 
 83 | #### 2. Reusable Workflows (Composable Building Blocks)
 84 | 
 85 | **`.github/workflows/reusable-validate.yml`** - Quality Validation Suite
 86 | 
 87 | - **Inputs**: `pnpm-version` (default: 10.17.0)
 88 | - **Secrets**: `SONAR_TOKEN` (for SonarCloud)
 89 | - **Jobs**: Runs in parallel:
 90 |   1. Dependency audit (critical vulnerabilities only)
 91 |   2. Type checking (`pnpm typecheck`)
 92 |   3. Linting (`pnpm lint`, workflows, markdown, YAML)
 93 |   4. Format checking (`pnpm format`)
 94 |   5. Tests with coverage (`pnpm test`, 80% threshold)
 95 |   6. SonarCloud analysis
 96 | - **Strategy**: Fail-fast disabled (shows all errors)
 97 | 
 98 | **`.github/workflows/reusable-security.yml`** - Security Scanning Suite
 99 | 
100 | - **Inputs**: `pnpm-version` (default: 10.17.0)
101 | - **Jobs**: Runs in parallel:
102 |   1. CodeQL analysis (JavaScript/TypeScript SAST)
103 |   2. OSV-Scanner (vulnerability detection)
104 |   3. Build validation (ensures code compiles)
105 | - **Permissions**: Security events write
106 | 
107 | **`.github/workflows/reusable-docker.yml`** - Multi-Platform Docker Build
108 | 
109 | - **Inputs**:
110 |   - `platforms`: Target platforms (default: `linux/amd64,linux/arm64`)
111 |   - `save-artifact`: Save image as artifact (true/false)
112 |   - `artifact-name`: Name for saved artifact
113 |   - `image-name`: Docker image name
114 |   - `version`: Image version tag
115 |   - `tag_sha`: Git commit SHA for tagging
116 |   - `build_artifact`: Build artifact name to download
117 | - **Outputs**: Image tags and digests
118 | - **Features**:
119 |   - Uses Docker Buildx for multi-platform builds
120 |   - QEMU for ARM64 emulation
121 |   - Caches layers in GitHub Actions cache
122 |   - Generates SBOM and SLSA attestations
123 |   - Pushes to GHCR with multiple tags
124 | 
125 | ### Key Architectural Patterns
126 | 
127 | #### 1. Reusable Workflow Pattern
128 | 
129 | Benefits:
130 | 
131 | - **DRY Principle**: Define once, use in multiple workflows
132 | - **Consistency**: Same validation in PR and main workflows
133 | - **Maintainability**: Update validation logic in one place
134 | - **Testability**: Can test reusable workflows in isolation
135 | 
136 | Example usage:
137 | 
138 | ```yaml
139 | jobs:
140 |   validate:
141 |     uses: ./.github/workflows/reusable-validate.yml
142 |     secrets:
143 |       SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
144 | ```
145 | 
146 | #### 2. Parallel Execution Strategy
147 | 
148 | All quality checks run in parallel to minimize CI time:
149 | 
150 | - Type checking, linting, testing run concurrently
151 | - Security scans run in parallel with validation
152 | - Total validation time: ~3-4 minutes (vs ~10-15 sequential)
153 | 
154 | #### 3. Unified Build Artifact
155 | 
156 | The main workflow builds once and shares artifacts:
157 | 
158 | ```yaml
159 | build:
160 |   outputs:
161 |     artifact-name: dist-${{ github.sha }}
162 |   steps:
163 |     - name: Build TypeScript
164 |       run: pnpm build
165 |     - name: Upload build artifact
166 |       uses: actions/upload-artifact@v4
167 | 
168 | docker:
169 |   needs: [build]
170 |   steps:
171 |     - name: Download build artifact
172 |       uses: actions/download-artifact@v4
173 |       with:
174 |         name: ${{ needs.build.outputs.artifact-name }}
175 | ```
176 | 
177 | Benefits:
178 | 
179 | - Consistent artifacts across jobs
180 | - Faster pipeline (build once, use multiple times)
181 | - Reduced risk of build inconsistencies
182 | 
183 | #### 4. Supply Chain Security
184 | 
185 | Every release includes:
186 | 
187 | - **SLSA Build Provenance**: Attestations for all artifacts
188 | - **SBOM**: Software Bill of Materials (CycloneDX format)
189 | - **Signature Verification**: GitHub attestations for provenance
190 | 
191 | ```yaml
192 | - name: Generate attestations
193 |   uses: actions/attest-build-provenance@v2
194 |   with:
195 |     subject-path: |
196 |       dist/**/*.js
197 |       sbom.cdx.json
198 |       *.tgz
199 | ```
200 | 
201 | #### 5. Permission Model
202 | 
203 | **Principle of Least Privilege**:
204 | 
205 | - PR workflows: Read-only (security events only)
206 | - Main workflow: Write permissions for releases and packages
207 | - Publish workflow: No write to GitHub, uses external secrets
208 | 
209 | Example:
210 | 
211 | ```yaml
212 | permissions:
213 |   contents: write # Create releases and tags
214 |   id-token: write # Generate SLSA attestations
215 |   attestations: write # Attach attestations
216 |   security-events: write # Upload security scan results
217 |   actions: read # Access workflow artifacts
218 |   packages: write # Push Docker images to GHCR
219 | ```
220 | 
221 | #### 6. Concurrency Control
222 | 
223 | **Main workflow** (`cancel-in-progress: false`):
224 | 
225 | ```yaml
226 | concurrency:
227 |   group: ${{ github.workflow }}-${{ github.ref }}
228 |   cancel-in-progress: false # Let releases complete
229 | ```
230 | 
231 | **PR workflow** (`cancel-in-progress: true`):
232 | 
233 | ```yaml
234 | concurrency:
235 |   group: ${{ github.workflow }}-${{ github.head_ref }}
236 |   cancel-in-progress: true # Cancel outdated validations
237 | ```
238 | 
239 | This prevents:
240 | 
241 | - Race conditions during version bumps and releases
242 | - Wasted CI time on outdated PR pushes
243 | - Conflicting Git commits to main branch
244 | 
245 | #### 7. Workflow Skipping
246 | 
247 | Version bump commits include `[skip actions]` to prevent recursion:
248 | 
249 | ```yaml
250 | git commit -m "chore(release): v$VERSION [skip actions]"
251 | ```
252 | 
253 | This prevents the main workflow from re-running after version commits.
254 | 
255 | ## Consequences
256 | 
257 | ### Positive
258 | 
259 | - **Native GitHub Integration**: Seamless integration with GitHub features (releases, packages, security)
260 | - **Free for Open Source**: No cost for public repositories
261 | - **Parallel Execution**: 3-4 minute validation vs 10-15 sequential
262 | - **Reusable Workflows**: DRY principle applied to CI/CD
263 | - **Supply Chain Security**: Built-in attestation and SBOM generation
264 | - **Multi-Platform Builds**: Docker Buildx support for ARM64 and AMD64
265 | - **Artifact Sharing**: Build once, use in multiple jobs
266 | - **Concurrency Control**: Prevents race conditions and wasted runs
267 | - **Security Scanning**: Integrated CodeQL, OSV-Scanner, Trivy
268 | - **Rich Ecosystem**: Large marketplace of actions
269 | - **Matrix Builds**: Support for testing multiple versions (if needed)
270 | 
271 | ### Negative
272 | 
273 | - **Vendor Lock-in**: Heavily tied to GitHub platform
274 | - **Learning Curve**: YAML syntax and workflow composition can be complex
275 | - **Debugging Difficulty**: Cannot run workflows locally (need act or similar)
276 | - **Rate Limits**: API rate limits for artifacts and packages
277 | - **Build Time**: Slower than some alternatives (CircleCI, Buildkite)
278 | - **Secret Management**: Limited secret organization (no folders/namespaces)
279 | - **Workflow File Size**: Large workflows can be hard to navigate
280 | - **Action Versioning**: Need to maintain action versions across workflows
281 | 
282 | ### Neutral
283 | 
284 | - **YAML Configuration**: Human-readable but verbose
285 | - **Marketplace Quality**: Third-party actions vary in quality and maintenance
286 | - **Caching Strategy**: Need to carefully design cache keys
287 | - **Artifact Retention**: Default 90 days, costs for long-term storage
288 | 
289 | ## Implementation
290 | 
291 | ### Setup Requirements
292 | 
293 | 1. **Repository Secrets** (Settings → Secrets → Actions):
294 |    - `RELEASE_TOKEN`: Personal Access Token with repo write
295 |    - `SONAR_TOKEN`: SonarCloud authentication token
296 |    - `NPM_TOKEN`: NPM registry publish token
297 |    - `DOCKERHUB_USERNAME`: Docker Hub username
298 |    - `DOCKERHUB_TOKEN`: Docker Hub access token
299 | 
300 | 2. **Repository Variables** (Settings → Variables → Actions):
301 |    - `ENABLE_DOCKER_RELEASE`: Set to 'true' to enable Docker releases
302 |    - `ENABLE_NPM_RELEASE`: Set to 'true' to enable NPM releases
303 | 
304 | 3. **Branch Protection Rules** (Settings → Branches → main):
305 |    - Require status checks: `validate`, `security`
306 |    - Require branches to be up to date before merging
307 |    - Require linear history
308 | 
309 | ### Common Workflow Patterns
310 | 
311 | **Installing pnpm consistently**:
312 | 
313 | ```yaml
314 | - name: Install pnpm
315 |   uses: pnpm/action-setup@v4
316 |   with:
317 |     version: 10.17.0
318 |     run_install: false
319 |     standalone: true
320 | 
321 | - name: Setup Node.js
322 |   uses: actions/setup-node@v4
323 |   with:
324 |     node-version: 22
325 |     cache: pnpm
326 | 
327 | - name: Install dependencies
328 |   run: pnpm install --frozen-lockfile
329 | ```
330 | 
331 | **Running validation checks**:
332 | 
333 | ```yaml
334 | - name: Audit dependencies
335 |   run: pnpm audit --audit-level critical
336 | 
337 | - name: Type check
338 |   run: pnpm typecheck
339 | 
340 | - name: Lint
341 |   run: pnpm lint
342 | 
343 | - name: Test
344 |   run: pnpm test
345 | ```
346 | 
347 | **Conditional job execution**:
348 | 
349 | ```yaml
350 | docker:
351 |   needs: [build]
352 |   if: vars.ENABLE_DOCKER_RELEASE == 'true' && needs.build.outputs.changed == 'true'
353 | ```
354 | 
355 | ### Workflow Validation
356 | 
357 | Lint workflow files locally:
358 | 
359 | ```bash
360 | pnpm lint:workflows  # Uses actionlint
361 | ```
362 | 
363 | ### Monitoring and Debugging
364 | 
365 | **Check workflow status**:
366 | 
367 | ```bash
368 | gh run list --limit 10
369 | gh run view <run-id>
370 | gh run watch <run-id>
371 | ```
372 | 
373 | **View logs**:
374 | 
375 | ```bash
376 | gh run view <run-id> --log
377 | gh run view <run-id> --log-failed  # Only failed steps
378 | ```
379 | 
380 | **Re-run workflow**:
381 | 
382 | ```bash
383 | gh run rerun <run-id>
384 | gh run rerun <run-id> --failed  # Only failed jobs
385 | ```
386 | 
387 | ## Examples
388 | 
389 | ### Example 1: Pull Request Flow
390 | 
391 | Developer opens PR:
392 | 
393 | ```
394 | PR opened → pr.yml triggers
395 | ├─ validate job (reusable-validate.yml)
396 | │  ├─ audit (parallel)
397 | │  ├─ typecheck (parallel)
398 | │  ├─ lint (parallel)
399 | │  ├─ format (parallel)
400 | │  ├─ test (parallel)
401 | │  └─ sonarcloud (parallel)
402 | └─ security job (reusable-security.yml)
403 |    ├─ codeql (parallel)
404 |    ├─ osv-scanner (parallel)
405 |    └─ build-check (parallel)
406 | 
407 | Total time: ~3-4 minutes
408 | Status: ✅ All checks passed
409 | ```
410 | 
411 | ### Example 2: Main Branch Release Flow
412 | 
413 | PR merged to main:
414 | 
415 | ```
416 | Push to main → main.yml triggers
417 | ├─ validate job (reusable) → ✅
418 | ├─ security job (reusable) → ✅
419 | ├─ build job
420 | │  ├─ Version packages (changeset)
421 | │  ├─ Commit version bump [skip actions]
422 | │  ├─ Create tag v1.11.0
423 | │  ├─ Build TypeScript
424 | │  └─ Upload artifact dist-abc1234
425 | ├─ docker job (needs: build)
426 | │  ├─ Download artifact dist-abc1234
427 | │  ├─ Build linux/amd64,linux/arm64
428 | │  └─ Push to ghcr.io with attestations
429 | ├─ npm job (needs: build)
430 | │  ├─ Download artifact dist-abc1234
431 | │  ├─ Create NPM package tarball
432 | │  └─ Upload artifact with attestations
433 | └─ create-release job (needs: build, docker, npm)
434 |    ├─ Generate SBOM
435 |    ├─ Create tar.gz and zip archives
436 |    ├─ Generate attestations for all artifacts
437 |    └─ Create GitHub release v1.11.0
438 | 
439 | Release created → publish.yml triggers
440 | ├─ npm job → Publish to NPM ✅
441 | ├─ github-packages job → Publish to GitHub Packages ✅
442 | └─ docker job → Copy GHCR to Docker Hub ✅
443 | 
444 | Total time: ~10-12 minutes
445 | Result: Version 1.11.0 published to all registries
446 | ```
447 | 
448 | ### Example 3: Security Scanning
449 | 
450 | Push triggers security scanning:
451 | 
452 | ```
453 | reusable-security.yml
454 | ├─ CodeQL
455 | │  ├─ Initialize CodeQL database
456 | │  ├─ Autobuild TypeScript
457 | │  ├─ Analyze for security vulnerabilities
458 | │  └─ Upload results to Security tab
459 | ├─ OSV-Scanner
460 | │  ├─ Scan pnpm-lock.yaml for vulnerabilities
461 | │  ├─ Generate SARIF report
462 | │  └─ Upload to Security tab
463 | └─ Build Check
464 |    ├─ Install dependencies
465 |    ├─ Build TypeScript
466 |    └─ Verify no build errors
467 | 
468 | Results visible in: Security → Code scanning alerts
469 | ```
470 | 
471 | ## Workflow Diagram
472 | 
473 | ```
474 | Pull Request         Main Branch                Release Created
475 |      │                   │                            │
476 |      ├─→ pr.yml         ├─→ main.yml                ├─→ publish.yml
477 |      │   │               │   │                        │   │
478 |      │   ├─ validate     │   ├─ validate             │   ├─ npm → NPM
479 |      │   └─ security     │   ├─ security             │   ├─ github-packages
480 |      │                   │   ├─ build                │   └─ docker → Docker Hub
481 |      │                   │   │  ├─ version           │
482 |      │                   │   │  ├─ build             │
483 |      │                   │   │  └─ tag               │
484 |      │                   │   ├─ docker → GHCR        │
485 |      │                   │   ├─ npm                  │
486 |      │                   │   └─ create-release       │
487 |      │                   │       └─ trigger ─────────┘
488 |      │                   │
489 |      ↓                   ↓
490 | Status checks      GitHub Release
491 | on PR             with artifacts
492 | ```
493 | 
494 | ## References
495 | 
496 | - GitHub Actions Documentation: https://docs.github.com/en/actions
497 | - Reusable Workflows: https://docs.github.com/en/actions/using-workflows/reusing-workflows
498 | - SLSA Attestations: https://slsa.dev/
499 | - Docker Buildx: https://docs.docker.com/buildx/
500 | - Workflow Files: `.github/workflows/`
501 | - Workflow Linting: `pnpm lint:workflows` (actionlint)
502 | - SonarCloud: https://sonarcloud.io/project/overview?id=sonarqube-mcp-server
503 | 
```

--------------------------------------------------------------------------------
/src/domains/issues.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import type {
  2 |   IssuesParams,
  3 |   SonarQubeIssuesResult,
  4 |   SonarQubeIssue,
  5 |   SonarQubeRule,
  6 |   SonarQubeIssueComment,
  7 |   MarkIssueFalsePositiveParams,
  8 |   MarkIssueWontFixParams,
  9 |   BulkIssueMarkParams,
 10 |   AddCommentToIssueParams,
 11 |   AssignIssueParams,
 12 |   ConfirmIssueParams,
 13 |   UnconfirmIssueParams,
 14 |   ResolveIssueParams,
 15 |   ReopenIssueParams,
 16 |   DoTransitionResponse,
 17 | } from '../types/index.js';
 18 | import { BaseDomain } from './base.js';
 19 | 
 20 | // Type aliases for sonarqube-web-api-client enums (not exported by the library)
 21 | type OwaspTop10Category = 'a1' | 'a2' | 'a3' | 'a4' | 'a5' | 'a6' | 'a7' | 'a8' | 'a9' | 'a10';
 22 | type OwaspTop10v2021Category = 'a1' | 'a2' | 'a3' | 'a4' | 'a5' | 'a6' | 'a7' | 'a8' | 'a9' | 'a10';
 23 | type SansTop25Category = 'insecure-interaction' | 'risky-resource' | 'porous-defenses';
 24 | type IssueFacet =
 25 |   | 'severities'
 26 |   | 'statuses'
 27 |   | 'resolutions'
 28 |   | 'rules'
 29 |   | 'tags'
 30 |   | 'types'
 31 |   | 'author'
 32 |   | 'authors'
 33 |   | 'assignees'
 34 |   | 'assigned_to_me'
 35 |   | 'languages'
 36 |   | 'projects'
 37 |   | 'directories'
 38 |   | 'files'
 39 |   | 'cwe'
 40 |   | 'createdAt'
 41 |   | 'owaspTop10'
 42 |   | 'owaspTop10-2021'
 43 |   | 'owaspAsvs-4.0'
 44 |   | 'owaspMobileTop10-2024'
 45 |   | 'pciDss-3.2'
 46 |   | 'pciDss-4.0'
 47 |   | 'sansTop25'
 48 |   | 'sonarsourceSecurity'
 49 |   | 'stig-ASD_V5R3'
 50 |   | 'casa'
 51 |   | 'codeVariants'
 52 |   | 'cleanCodeAttributeCategories'
 53 |   | 'impactSeverities'
 54 |   | 'impactSoftwareQualities'
 55 |   | 'issueStatuses'
 56 |   | 'prioritizedRule'
 57 |   | 'scopes';
 58 | 
 59 | /**
 60 |  * Domain module for issues-related operations
 61 |  */
 62 | export class IssuesDomain extends BaseDomain {
 63 |   /**
 64 |    * Gets issues for a project in SonarQube
 65 |    * @param params Parameters including project key, severity, pagination and organization
 66 |    * @returns Promise with the list of issues
 67 |    */
 68 |   async getIssues(params: IssuesParams): Promise<SonarQubeIssuesResult> {
 69 |     const { page, pageSize } = params;
 70 |     const builder = this.webApiClient.issues.search();
 71 | 
 72 |     // Apply all filters using helper methods
 73 |     this.applyComponentFilters(builder, params);
 74 |     this.applyIssueFilters(builder, params);
 75 |     this.applyDateAndAssignmentFilters(builder, params);
 76 |     this.applySecurityAndMetadataFilters(builder, params);
 77 | 
 78 |     // Add pagination
 79 |     if (page !== undefined) {
 80 |       builder.page(page);
 81 |     }
 82 |     if (pageSize !== undefined) {
 83 |       builder.pageSize(pageSize);
 84 |     }
 85 | 
 86 |     const response = await builder.execute();
 87 | 
 88 |     // Transform to our interface
 89 |     return {
 90 |       issues: response.issues as SonarQubeIssue[],
 91 |       components: (response.components ?? []).map((comp) => ({
 92 |         key: comp.key,
 93 |         name: comp.name,
 94 |         qualifier: comp.qualifier,
 95 |         enabled: comp.enabled,
 96 |         longName: comp.longName,
 97 |         path: comp.path,
 98 |       })),
 99 |       rules: (response.rules ?? []) as SonarQubeRule[],
100 |       users: response.users,
101 |       facets: response.facets,
102 |       paging: response.paging ?? { pageIndex: 1, pageSize: 100, total: 0 },
103 |     };
104 |   }
105 | 
106 |   /**
107 |    * Apply component-related filters to the issues search builder
108 |    * @param builder The search builder
109 |    * @param params The issues parameters
110 |    */
111 |   private applyComponentFilters(
112 |     builder: ReturnType<typeof this.webApiClient.issues.search>,
113 |     params: IssuesParams
114 |   ): void {
115 |     // Component filters
116 |     if (params.projectKey) {
117 |       builder.withProjects([params.projectKey]);
118 |     }
119 |     if (params.projects) {
120 |       builder.withProjects(params.projects);
121 |     }
122 |     if (params.componentKeys) {
123 |       builder.withComponents(params.componentKeys);
124 |     }
125 |     if (params.components) {
126 |       builder.withComponents(params.components);
127 |     }
128 |     if (params.onComponentOnly) {
129 |       builder.onComponentOnly();
130 |     }
131 |     if (params.directories) {
132 |       builder.withDirectories(params.directories);
133 |     }
134 |     if (params.files) {
135 |       builder.withFiles(params.files);
136 |     }
137 |     if (params.scopes) {
138 |       builder.withScopes(params.scopes);
139 |     }
140 | 
141 |     // Branch and PR
142 |     if (params.branch) {
143 |       builder.onBranch(params.branch);
144 |     }
145 |     if (params.pullRequest) {
146 |       builder.onPullRequest(params.pullRequest);
147 |     }
148 |   }
149 | 
150 |   /**
151 |    * Apply issue-related filters to the search builder
152 |    * @param builder The search builder
153 |    * @param params The issues parameters
154 |    */
155 |   private applyIssueFilters(
156 |     builder: ReturnType<typeof this.webApiClient.issues.search>,
157 |     params: IssuesParams
158 |   ): void {
159 |     // Issue filters
160 |     if (params.issues) {
161 |       builder.withIssues(params.issues);
162 |     }
163 |     if (params.severities) {
164 |       builder.withSeverities(params.severities);
165 |     }
166 |     if (params.statuses) {
167 |       builder.withStatuses(params.statuses);
168 |     }
169 |     if (params.resolutions) {
170 |       builder.withResolutions(params.resolutions);
171 |     }
172 |     if (params.resolved !== undefined) {
173 |       if (params.resolved) {
174 |         builder.onlyResolved();
175 |       } else {
176 |         builder.onlyUnresolved();
177 |       }
178 |     }
179 |     if (params.types) {
180 |       builder.withTypes(params.types);
181 |     }
182 | 
183 |     // Clean Code taxonomy
184 |     if (params.cleanCodeAttributeCategories) {
185 |       builder.withCleanCodeAttributeCategories(params.cleanCodeAttributeCategories);
186 |     }
187 |     if (params.impactSeverities) {
188 |       builder.withImpactSeverities(params.impactSeverities);
189 |     }
190 |     if (params.impactSoftwareQualities) {
191 |       builder.withImpactSoftwareQualities(params.impactSoftwareQualities);
192 |     }
193 |     if (params.issueStatuses) {
194 |       builder.withIssueStatuses(params.issueStatuses);
195 |     }
196 | 
197 |     // Rules and tags
198 |     if (params.rules) {
199 |       builder.withRules(params.rules);
200 |     }
201 |     if (params.tags) {
202 |       builder.withTags(params.tags);
203 |     }
204 |   }
205 | 
206 |   /**
207 |    * Apply date and assignment filters to the search builder
208 |    * @param builder The search builder
209 |    * @param params The issues parameters
210 |    */
211 |   private applyDateAndAssignmentFilters(
212 |     builder: ReturnType<typeof this.webApiClient.issues.search>,
213 |     params: IssuesParams
214 |   ): void {
215 |     // Date filters
216 |     if (params.createdAfter) {
217 |       builder.createdAfter(params.createdAfter);
218 |     }
219 |     if (params.createdBefore) {
220 |       builder.createdBefore(params.createdBefore);
221 |     }
222 |     if (params.createdAt) {
223 |       builder.createdAt(params.createdAt);
224 |     }
225 |     if (params.createdInLast) {
226 |       builder.createdInLast(params.createdInLast);
227 |     }
228 | 
229 |     // Assignment
230 |     if (params.assigned !== undefined) {
231 |       if (params.assigned) {
232 |         builder.onlyAssigned();
233 |       } else {
234 |         builder.onlyUnassigned();
235 |       }
236 |     }
237 |     if (params.assignees) {
238 |       builder.assignedToAny(params.assignees);
239 |     }
240 |     if (params.author) {
241 |       builder.byAuthor(params.author);
242 |     }
243 |     if (params.authors) {
244 |       builder.byAuthors(params.authors);
245 |     }
246 |   }
247 | 
248 |   /**
249 |    * Apply security standards and metadata filters to the search builder
250 |    * @param builder The search builder
251 |    * @param params The issues parameters
252 |    */
253 |   private applySecurityAndMetadataFilters(
254 |     builder: ReturnType<typeof this.webApiClient.issues.search>,
255 |     params: IssuesParams
256 |   ): void {
257 |     // Security standards
258 |     if (params.cwe) {
259 |       builder.withCwe(params.cwe);
260 |     }
261 |     if (params.owaspTop10) {
262 |       builder.withOwaspTop10(params.owaspTop10 as OwaspTop10Category[]);
263 |     }
264 |     if (params.owaspTop10v2021) {
265 |       builder.withOwaspTop10v2021(params.owaspTop10v2021 as OwaspTop10v2021Category[]);
266 |     }
267 |     if (params.sansTop25) {
268 |       // NOTE: withSansTop25 is deprecated since SonarQube 10.0, but kept for backward compatibility
269 |       builder.withSansTop25(params.sansTop25 as SansTop25Category[]);
270 |     }
271 |     if (params.sonarsourceSecurity) {
272 |       builder.withSonarSourceSecurity(params.sonarsourceSecurity);
273 |     }
274 |     if (params.sonarsourceSecurityCategory) {
275 |       builder.withSonarSourceSecurityNew(params.sonarsourceSecurityCategory);
276 |     }
277 | 
278 |     // Languages
279 |     if (params.languages) {
280 |       builder.withLanguages(params.languages);
281 |     }
282 | 
283 |     // Facets
284 |     if (params.facets) {
285 |       builder.withFacets(params.facets as IssueFacet[]);
286 |     }
287 |     if (params.facetMode) {
288 |       builder.withFacetMode(params.facetMode);
289 |     }
290 | 
291 |     // New code
292 |     if (params.sinceLeakPeriod) {
293 |       builder.sinceLeakPeriod();
294 |     }
295 |     if (params.inNewCodePeriod) {
296 |       builder.inNewCodePeriod();
297 |     }
298 | 
299 |     // Sorting
300 |     if (params.s) {
301 |       builder.sortBy(params.s, params.asc);
302 |     }
303 | 
304 |     // Additional fields
305 |     if (params.additionalFields) {
306 |       builder.withAdditionalFields(params.additionalFields);
307 |     }
308 | 
309 |     // Deprecated parameters
310 |     // Note: hotspots parameter is deprecated and not supported by the current API
311 |     if (params.severity) {
312 |       builder.withSeverities([params.severity]);
313 |     }
314 |   }
315 | 
316 |   /**
317 |    * Mark an issue as false positive
318 |    * @param params Parameters including issue key and optional comment
319 |    * @returns Promise with the updated issue and related data
320 |    */
321 |   async markIssueFalsePositive(
322 |     params: MarkIssueFalsePositiveParams
323 |   ): Promise<DoTransitionResponse> {
324 |     const request = {
325 |       issue: params.issueKey,
326 |       transition: 'falsepositive' as const,
327 |     };
328 | 
329 |     // Add comment if provided (using separate API call if needed)
330 |     if (params.comment) {
331 |       // First add the comment, then perform the transition
332 |       await this.webApiClient.issues.addComment({
333 |         issue: params.issueKey,
334 |         text: params.comment,
335 |       });
336 |     }
337 | 
338 |     return this.webApiClient.issues.doTransition(request);
339 |   }
340 | 
341 |   /**
342 |    * Mark an issue as won't fix
343 |    * @param params Parameters including issue key and optional comment
344 |    * @returns Promise with the updated issue and related data
345 |    */
346 |   async markIssueWontFix(params: MarkIssueWontFixParams): Promise<DoTransitionResponse> {
347 |     const request = {
348 |       issue: params.issueKey,
349 |       transition: 'wontfix' as const,
350 |     };
351 | 
352 |     // Add comment if provided (using separate API call if needed)
353 |     if (params.comment) {
354 |       // First add the comment, then perform the transition
355 |       await this.webApiClient.issues.addComment({
356 |         issue: params.issueKey,
357 |         text: params.comment,
358 |       });
359 |     }
360 | 
361 |     return this.webApiClient.issues.doTransition(request);
362 |   }
363 | 
364 |   /**
365 |    * Mark multiple issues as false positive
366 |    * @param params Parameters including issue keys and optional comment
367 |    * @returns Promise with array of updated issues and related data
368 |    */
369 |   async markIssuesFalsePositive(params: BulkIssueMarkParams): Promise<DoTransitionResponse[]> {
370 |     return Promise.all(
371 |       params.issueKeys.map((issueKey) => {
372 |         const requestParams: MarkIssueFalsePositiveParams = {
373 |           issueKey,
374 |           ...(params.comment && { comment: params.comment }),
375 |         };
376 |         return this.markIssueFalsePositive(requestParams);
377 |       })
378 |     );
379 |   }
380 | 
381 |   /**
382 |    * Mark multiple issues as won't fix
383 |    * @param params Parameters including issue keys and optional comment
384 |    * @returns Promise with array of updated issues and related data
385 |    */
386 |   async markIssuesWontFix(params: BulkIssueMarkParams): Promise<DoTransitionResponse[]> {
387 |     const results: DoTransitionResponse[] = [];
388 | 
389 |     for (const issueKey of params.issueKeys) {
390 |       const requestParams: MarkIssueWontFixParams = {
391 |         issueKey,
392 |         ...(params.comment && { comment: params.comment }),
393 |       };
394 |       const result = await this.markIssueWontFix(requestParams);
395 |       results.push(result);
396 |     }
397 | 
398 |     return results;
399 |   }
400 | 
401 |   /**
402 |    * Add a comment to an issue
403 |    * @param params Parameters including issue key and comment text
404 |    * @returns Promise with the created comment details
405 |    */
406 |   async addCommentToIssue(params: AddCommentToIssueParams): Promise<SonarQubeIssueComment> {
407 |     const response = await this.webApiClient.issues.addComment({
408 |       issue: params.issueKey,
409 |       text: params.text,
410 |     });
411 | 
412 |     // The API returns the full issue with comments, so we need to extract the latest comment
413 |     const issue = response.issue as SonarQubeIssue;
414 |     const comments = issue.comments || [];
415 | 
416 |     // Sort comments by timestamp to ensure chronological order
417 |     comments.sort((a, b) => new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime());
418 | 
419 |     // The newly added comment should now be the last one
420 |     const newComment = comments.at(-1);
421 |     if (!newComment) {
422 |       throw new Error('Failed to retrieve the newly added comment');
423 |     }
424 | 
425 |     return newComment;
426 |   }
427 | 
428 |   /**
429 |    * Assign an issue to a user
430 |    * @param params Assignment parameters
431 |    * @returns The updated issue details
432 |    */
433 |   async assignIssue(params: AssignIssueParams): Promise<SonarQubeIssue> {
434 |     // Call the assign API
435 |     const assignRequest: {
436 |       issue: string;
437 |       assignee?: string;
438 |     } = {
439 |       issue: params.issueKey,
440 |       ...(params.assignee && { assignee: params.assignee }),
441 |     };
442 |     await this.webApiClient.issues.assign(assignRequest);
443 | 
444 |     // Fetch and return the updated issue using the same search as getIssues
445 |     const searchBuilder = this.webApiClient.issues.search();
446 |     searchBuilder.withIssues([params.issueKey]);
447 |     searchBuilder.withAdditionalFields(['_all']);
448 | 
449 |     const response = await searchBuilder.execute();
450 | 
451 |     if (!response.issues || response.issues.length === 0) {
452 |       throw new Error(`Issue ${params.issueKey} not found after assignment`);
453 |     }
454 | 
455 |     return response.issues[0] as SonarQubeIssue;
456 |   }
457 | 
458 |   /**
459 |    * Confirm an issue
460 |    * @param params Parameters including issue key and optional comment
461 |    * @returns Promise with the updated issue and related data
462 |    */
463 |   async confirmIssue(params: ConfirmIssueParams): Promise<DoTransitionResponse> {
464 |     const request = {
465 |       issue: params.issueKey,
466 |       transition: 'confirm' as const,
467 |     };
468 | 
469 |     if (params.comment) {
470 |       await this.webApiClient.issues.addComment({
471 |         issue: params.issueKey,
472 |         text: params.comment,
473 |       });
474 |     }
475 | 
476 |     return this.webApiClient.issues.doTransition(request);
477 |   }
478 | 
479 |   /**
480 |    * Unconfirm an issue
481 |    * @param params Parameters including issue key and optional comment
482 |    * @returns Promise with the updated issue and related data
483 |    */
484 |   async unconfirmIssue(params: UnconfirmIssueParams): Promise<DoTransitionResponse> {
485 |     const request = {
486 |       issue: params.issueKey,
487 |       transition: 'unconfirm' as const,
488 |     };
489 | 
490 |     if (params.comment) {
491 |       await this.webApiClient.issues.addComment({
492 |         issue: params.issueKey,
493 |         text: params.comment,
494 |       });
495 |     }
496 | 
497 |     return this.webApiClient.issues.doTransition(request);
498 |   }
499 | 
500 |   /**
501 |    * Resolve an issue
502 |    * @param params Parameters including issue key and optional comment
503 |    * @returns Promise with the updated issue and related data
504 |    */
505 |   async resolveIssue(params: ResolveIssueParams): Promise<DoTransitionResponse> {
506 |     const request = {
507 |       issue: params.issueKey,
508 |       transition: 'resolve' as const,
509 |     };
510 | 
511 |     if (params.comment) {
512 |       await this.webApiClient.issues.addComment({
513 |         issue: params.issueKey,
514 |         text: params.comment,
515 |       });
516 |     }
517 | 
518 |     return this.webApiClient.issues.doTransition(request);
519 |   }
520 | 
521 |   /**
522 |    * Reopen an issue
523 |    * @param params Parameters including issue key and optional comment
524 |    * @returns Promise with the updated issue and related data
525 |    */
526 |   async reopenIssue(params: ReopenIssueParams): Promise<DoTransitionResponse> {
527 |     const request = {
528 |       issue: params.issueKey,
529 |       transition: 'reopen' as const,
530 |     };
531 | 
532 |     if (params.comment) {
533 |       await this.webApiClient.issues.addComment({
534 |         issue: params.issueKey,
535 |         text: params.comment,
536 |       });
537 |     }
538 | 
539 |     return this.webApiClient.issues.doTransition(request);
540 |   }
541 | }
542 | 
```

--------------------------------------------------------------------------------
/src/__tests__/transports/factory.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, expect, it, beforeEach, afterEach } from 'vitest';
  2 | import { TransportFactory } from '../../transports/factory.js';
  3 | import { StdioTransport } from '../../transports/stdio.js';
  4 | import { HttpTransport } from '../../transports/http.js';
  5 | import type { ITransportConfig, IHttpTransportConfig } from '../../transports/base.js';
  6 | 
  7 | describe('TransportFactory', () => {
  8 |   let originalEnv: NodeJS.ProcessEnv;
  9 | 
 10 |   beforeEach(() => {
 11 |     // Save original environment
 12 |     originalEnv = { ...process.env };
 13 |     // Clear all environment variables related to MCP
 14 |     delete process.env.MCP_TRANSPORT_TYPE;
 15 |     delete process.env.MCP_HTTP_PORT;
 16 |     delete process.env.MCP_HTTP_ALLOWED_HOSTS;
 17 |     delete process.env.MCP_HTTP_ALLOWED_ORIGINS;
 18 |     delete process.env.MCP_HTTP_SESSION_TIMEOUT;
 19 |     delete process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION;
 20 |   });
 21 | 
 22 |   afterEach(() => {
 23 |     // Restore original environment
 24 |     process.env = originalEnv;
 25 |   });
 26 | 
 27 |   describe('create', () => {
 28 |     it('should create a stdio transport', () => {
 29 |       const config: ITransportConfig = { type: 'stdio' };
 30 |       const transport = TransportFactory.create(config);
 31 | 
 32 |       expect(transport).toBeInstanceOf(StdioTransport);
 33 |       expect(transport.getName()).toBe('stdio');
 34 |     });
 35 | 
 36 |     it('should create an http transport', () => {
 37 |       const config: IHttpTransportConfig = {
 38 |         type: 'http',
 39 |         options: {
 40 |           port: 3001,
 41 |         },
 42 |       };
 43 |       const transport = TransportFactory.create(config);
 44 | 
 45 |       expect(transport).toBeInstanceOf(HttpTransport);
 46 |       expect(transport.getName()).toBe('http');
 47 |     });
 48 | 
 49 |     it('should create http transport with all options', () => {
 50 |       const config: IHttpTransportConfig = {
 51 |         type: 'http',
 52 |         options: {
 53 |           port: 3001,
 54 |           sessionTimeout: 1800000,
 55 |           enableDnsRebindingProtection: true,
 56 |           allowedHosts: ['localhost', '192.168.1.1'],
 57 |           allowedOrigins: ['http://localhost:3000', 'https://example.com'],
 58 |         },
 59 |       };
 60 | 
 61 |       const transport = TransportFactory.create(config);
 62 |       expect(transport).toBeInstanceOf(HttpTransport);
 63 |       expect(transport.getName()).toBe('http');
 64 |     });
 65 | 
 66 |     it('should create http transport without options', () => {
 67 |       const config: IHttpTransportConfig = {
 68 |         type: 'http',
 69 |       };
 70 |       const transport = TransportFactory.create(config);
 71 | 
 72 |       expect(transport).toBeInstanceOf(HttpTransport);
 73 |       expect(transport.getName()).toBe('http');
 74 |     });
 75 | 
 76 |     it('should throw error for unsupported transport type', () => {
 77 |       const config = { type: 'unsupported' as any } as ITransportConfig;
 78 | 
 79 |       expect(() => TransportFactory.create(config)).toThrow(
 80 |         'Unsupported transport type: unsupported'
 81 |       );
 82 |     });
 83 | 
 84 |     it('should throw error for undefined transport type', () => {
 85 |       const config = {} as ITransportConfig;
 86 | 
 87 |       expect(() => TransportFactory.create(config)).toThrow(
 88 |         'Unsupported transport type: undefined'
 89 |       );
 90 |     });
 91 | 
 92 |     it('should throw error for null transport type', () => {
 93 |       const config = { type: null } as unknown as ITransportConfig;
 94 | 
 95 |       expect(() => TransportFactory.create(config)).toThrow('Unsupported transport type: null');
 96 |     });
 97 |   });
 98 | 
 99 |   describe('createFromEnvironment', () => {
100 |     it('should create stdio transport by default', () => {
101 |       const transport = TransportFactory.createFromEnvironment();
102 | 
103 |       expect(transport).toBeInstanceOf(StdioTransport);
104 |       expect(transport.getName()).toBe('stdio');
105 |     });
106 | 
107 |     it('should create stdio transport when MCP_TRANSPORT_TYPE is stdio', () => {
108 |       process.env.MCP_TRANSPORT_TYPE = 'stdio';
109 | 
110 |       const transport = TransportFactory.createFromEnvironment();
111 | 
112 |       expect(transport).toBeInstanceOf(StdioTransport);
113 |       expect(transport.getName()).toBe('stdio');
114 |     });
115 | 
116 |     it('should create stdio transport when MCP_TRANSPORT_TYPE is STDIO (uppercase)', () => {
117 |       process.env.MCP_TRANSPORT_TYPE = 'STDIO';
118 | 
119 |       const transport = TransportFactory.createFromEnvironment();
120 | 
121 |       expect(transport).toBeInstanceOf(StdioTransport);
122 |       expect(transport.getName()).toBe('stdio');
123 |     });
124 | 
125 |     it('should create http transport when MCP_TRANSPORT_TYPE is http', () => {
126 |       process.env.MCP_TRANSPORT_TYPE = 'http';
127 | 
128 |       const transport = TransportFactory.createFromEnvironment();
129 | 
130 |       expect(transport).toBeInstanceOf(HttpTransport);
131 |       expect(transport.getName()).toBe('http');
132 |     });
133 | 
134 |     it('should create http transport when MCP_TRANSPORT_TYPE is HTTP (uppercase)', () => {
135 |       process.env.MCP_TRANSPORT_TYPE = 'HTTP';
136 | 
137 |       const transport = TransportFactory.createFromEnvironment();
138 | 
139 |       expect(transport).toBeInstanceOf(HttpTransport);
140 |       expect(transport.getName()).toBe('http');
141 |     });
142 | 
143 |     it('should parse MCP_HTTP_PORT environment variable', () => {
144 |       process.env.MCP_TRANSPORT_TYPE = 'http';
145 |       process.env.MCP_HTTP_PORT = '8080';
146 | 
147 |       const transport = TransportFactory.createFromEnvironment();
148 | 
149 |       expect(transport).toBeInstanceOf(HttpTransport);
150 |       // The factory creates the transport with the port option
151 |       // We can't directly verify the port since it's internal to HttpTransport
152 |       // But we know it was created with the environment config
153 |     });
154 | 
155 |     it('should parse MCP_HTTP_SESSION_TIMEOUT environment variable', () => {
156 |       process.env.MCP_TRANSPORT_TYPE = 'http';
157 |       process.env.MCP_HTTP_SESSION_TIMEOUT = '3600000';
158 | 
159 |       const transport = TransportFactory.createFromEnvironment();
160 | 
161 |       expect(transport).toBeInstanceOf(HttpTransport);
162 |     });
163 | 
164 |     it('should parse MCP_HTTP_ALLOWED_HOSTS environment variable', () => {
165 |       process.env.MCP_TRANSPORT_TYPE = 'http';
166 |       process.env.MCP_HTTP_ALLOWED_HOSTS = 'localhost,127.0.0.1,192.168.1.1';
167 | 
168 |       const transport = TransportFactory.createFromEnvironment();
169 | 
170 |       expect(transport).toBeInstanceOf(HttpTransport);
171 |     });
172 | 
173 |     it('should parse MCP_HTTP_ALLOWED_HOSTS with spaces', () => {
174 |       process.env.MCP_TRANSPORT_TYPE = 'http';
175 |       process.env.MCP_HTTP_ALLOWED_HOSTS = 'localhost, 127.0.0.1 , 192.168.1.1';
176 | 
177 |       const transport = TransportFactory.createFromEnvironment();
178 | 
179 |       expect(transport).toBeInstanceOf(HttpTransport);
180 |     });
181 | 
182 |     it('should parse MCP_HTTP_ALLOWED_ORIGINS environment variable', () => {
183 |       process.env.MCP_TRANSPORT_TYPE = 'http';
184 |       process.env.MCP_HTTP_ALLOWED_ORIGINS = 'http://localhost:3000,https://example.com';
185 | 
186 |       const transport = TransportFactory.createFromEnvironment();
187 | 
188 |       expect(transport).toBeInstanceOf(HttpTransport);
189 |     });
190 | 
191 |     it('should parse MCP_HTTP_ALLOWED_ORIGINS with spaces', () => {
192 |       process.env.MCP_TRANSPORT_TYPE = 'http';
193 |       process.env.MCP_HTTP_ALLOWED_ORIGINS = 'http://localhost:3000 , https://example.com , *';
194 | 
195 |       const transport = TransportFactory.createFromEnvironment();
196 | 
197 |       expect(transport).toBeInstanceOf(HttpTransport);
198 |     });
199 | 
200 |     it('should parse MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION environment variable', () => {
201 |       process.env.MCP_TRANSPORT_TYPE = 'http';
202 |       process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION = 'true';
203 | 
204 |       const transport = TransportFactory.createFromEnvironment();
205 | 
206 |       expect(transport).toBeInstanceOf(HttpTransport);
207 |     });
208 | 
209 |     it('should not enable DNS rebinding protection for other values', () => {
210 |       process.env.MCP_TRANSPORT_TYPE = 'http';
211 |       process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION = 'false';
212 | 
213 |       const transport = TransportFactory.createFromEnvironment();
214 | 
215 |       expect(transport).toBeInstanceOf(HttpTransport);
216 |     });
217 | 
218 |     it('should parse all HTTP environment variables together', () => {
219 |       process.env.MCP_TRANSPORT_TYPE = 'http';
220 |       process.env.MCP_HTTP_PORT = '3001';
221 |       process.env.MCP_HTTP_SESSION_TIMEOUT = '1800000';
222 |       process.env.MCP_HTTP_ALLOWED_HOSTS = 'localhost,127.0.0.1';
223 |       process.env.MCP_HTTP_ALLOWED_ORIGINS = 'http://localhost:3000,https://example.com';
224 |       process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION = 'true';
225 | 
226 |       const transport = TransportFactory.createFromEnvironment();
227 | 
228 |       expect(transport).toBeInstanceOf(HttpTransport);
229 |       expect(transport.getName()).toBe('http');
230 |     });
231 | 
232 |     it('should handle mixed case transport type', () => {
233 |       process.env.MCP_TRANSPORT_TYPE = 'HtTp';
234 | 
235 |       const transport = TransportFactory.createFromEnvironment();
236 | 
237 |       expect(transport).toBeInstanceOf(HttpTransport);
238 |       expect(transport.getName()).toBe('http');
239 |     });
240 | 
241 |     it('should create stdio transport for unknown transport type', () => {
242 |       process.env.MCP_TRANSPORT_TYPE = 'unknown';
243 | 
244 |       const transport = TransportFactory.createFromEnvironment();
245 | 
246 |       expect(transport).toBeInstanceOf(StdioTransport);
247 |       expect(transport.getName()).toBe('stdio');
248 |     });
249 | 
250 |     it('should handle empty MCP_TRANSPORT_TYPE', () => {
251 |       process.env.MCP_TRANSPORT_TYPE = '';
252 | 
253 |       const transport = TransportFactory.createFromEnvironment();
254 | 
255 |       expect(transport).toBeInstanceOf(StdioTransport);
256 |       expect(transport.getName()).toBe('stdio');
257 |     });
258 | 
259 |     it('should handle whitespace in MCP_TRANSPORT_TYPE', () => {
260 |       process.env.MCP_TRANSPORT_TYPE = '  http  ';
261 | 
262 |       const transport = TransportFactory.createFromEnvironment();
263 | 
264 |       // Note: Current implementation doesn't trim whitespace, so this becomes stdio
265 |       expect(transport).toBeInstanceOf(StdioTransport);
266 |       expect(transport.getName()).toBe('stdio');
267 |     });
268 | 
269 |     it('should handle invalid MCP_HTTP_PORT gracefully', () => {
270 |       process.env.MCP_TRANSPORT_TYPE = 'http';
271 |       process.env.MCP_HTTP_PORT = 'invalid';
272 | 
273 |       // Should not throw, just create with NaN value
274 |       const transport = TransportFactory.createFromEnvironment();
275 | 
276 |       expect(transport).toBeInstanceOf(HttpTransport);
277 |     });
278 | 
279 |     it('should handle empty allowed hosts list', () => {
280 |       process.env.MCP_TRANSPORT_TYPE = 'http';
281 |       process.env.MCP_HTTP_ALLOWED_HOSTS = '';
282 | 
283 |       const transport = TransportFactory.createFromEnvironment();
284 | 
285 |       expect(transport).toBeInstanceOf(HttpTransport);
286 |     });
287 | 
288 |     it('should handle single allowed host', () => {
289 |       process.env.MCP_TRANSPORT_TYPE = 'http';
290 |       process.env.MCP_HTTP_ALLOWED_HOSTS = 'localhost';
291 | 
292 |       const transport = TransportFactory.createFromEnvironment();
293 | 
294 |       expect(transport).toBeInstanceOf(HttpTransport);
295 |     });
296 | 
297 |     it('should handle port number at boundary values', () => {
298 |       process.env.MCP_TRANSPORT_TYPE = 'http';
299 | 
300 |       // Test port 0
301 |       process.env.MCP_HTTP_PORT = '0';
302 |       let transport = TransportFactory.createFromEnvironment();
303 |       expect(transport).toBeInstanceOf(HttpTransport);
304 | 
305 |       // Test port 65535
306 |       process.env.MCP_HTTP_PORT = '65535';
307 |       transport = TransportFactory.createFromEnvironment();
308 |       expect(transport).toBeInstanceOf(HttpTransport);
309 | 
310 |       // Test negative port (allowed but may not work)
311 |       process.env.MCP_HTTP_PORT = '-1';
312 |       transport = TransportFactory.createFromEnvironment();
313 |       expect(transport).toBeInstanceOf(HttpTransport);
314 |     });
315 | 
316 |     it('should handle very long session timeout', () => {
317 |       process.env.MCP_TRANSPORT_TYPE = 'http';
318 |       process.env.MCP_HTTP_SESSION_TIMEOUT = '999999999999';
319 | 
320 |       const transport = TransportFactory.createFromEnvironment();
321 | 
322 |       expect(transport).toBeInstanceOf(HttpTransport);
323 |     });
324 | 
325 |     it('should handle DNS rebinding protection with uppercase TRUE', () => {
326 |       process.env.MCP_TRANSPORT_TYPE = 'http';
327 |       process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION = 'TRUE';
328 | 
329 |       const transport = TransportFactory.createFromEnvironment();
330 | 
331 |       expect(transport).toBeInstanceOf(HttpTransport);
332 |     });
333 | 
334 |     it('should handle DNS rebinding protection with value 1', () => {
335 |       process.env.MCP_TRANSPORT_TYPE = 'http';
336 |       process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION = '1';
337 | 
338 |       const transport = TransportFactory.createFromEnvironment();
339 | 
340 |       expect(transport).toBeInstanceOf(HttpTransport);
341 |     });
342 | 
343 |     it('should create different instances on multiple calls', () => {
344 |       const transport1 = TransportFactory.createFromEnvironment();
345 |       const transport2 = TransportFactory.createFromEnvironment();
346 | 
347 |       expect(transport1).toBeInstanceOf(StdioTransport);
348 |       expect(transport2).toBeInstanceOf(StdioTransport);
349 |       expect(transport1).not.toBe(transport2); // Different instances
350 |     });
351 |   });
352 | 
353 |   describe('integration', () => {
354 |     it('should create equivalent transports using both methods for stdio', () => {
355 |       const configTransport = TransportFactory.create({ type: 'stdio' });
356 |       const envTransport = TransportFactory.createFromEnvironment();
357 | 
358 |       expect(configTransport.getName()).toBe(envTransport.getName());
359 |       expect(configTransport).toBeInstanceOf(StdioTransport);
360 |       expect(envTransport).toBeInstanceOf(StdioTransport);
361 |     });
362 | 
363 |     it('should create equivalent transports using both methods for http', () => {
364 |       process.env.MCP_TRANSPORT_TYPE = 'http';
365 |       process.env.MCP_HTTP_PORT = '3001';
366 | 
367 |       const configTransport = TransportFactory.create({
368 |         type: 'http',
369 |         options: { port: 3001 },
370 |       });
371 |       const envTransport = TransportFactory.createFromEnvironment();
372 | 
373 |       expect(configTransport.getName()).toBe(envTransport.getName());
374 |       expect(configTransport).toBeInstanceOf(HttpTransport);
375 |       expect(envTransport).toBeInstanceOf(HttpTransport);
376 |     });
377 | 
378 |     it('should handle multiple environment variable sets', () => {
379 |       // First configuration
380 |       process.env.MCP_TRANSPORT_TYPE = 'http';
381 |       process.env.MCP_HTTP_PORT = '3000';
382 |       let transport = TransportFactory.createFromEnvironment();
383 |       expect(transport).toBeInstanceOf(HttpTransport);
384 | 
385 |       // Change configuration
386 |       process.env.MCP_TRANSPORT_TYPE = 'stdio';
387 |       delete process.env.MCP_HTTP_PORT;
388 |       transport = TransportFactory.createFromEnvironment();
389 |       expect(transport).toBeInstanceOf(StdioTransport);
390 | 
391 |       // Back to HTTP with different port
392 |       process.env.MCP_TRANSPORT_TYPE = 'http';
393 |       process.env.MCP_HTTP_PORT = '8080';
394 |       transport = TransportFactory.createFromEnvironment();
395 |       expect(transport).toBeInstanceOf(HttpTransport);
396 |     });
397 |   });
398 | 
399 |   describe('error handling', () => {
400 |     it('should provide clear error message for invalid transport type', () => {
401 |       const config = { type: 'websocket' } as any;
402 | 
403 |       expect(() => TransportFactory.create(config)).toThrow(
404 |         'Unsupported transport type: websocket'
405 |       );
406 |     });
407 | 
408 |     it('should handle missing type gracefully', () => {
409 |       const config = { options: { port: 3000 } } as any;
410 | 
411 |       expect(() => TransportFactory.create(config)).toThrow(
412 |         'Unsupported transport type: undefined'
413 |       );
414 |     });
415 | 
416 |     it('should not throw when creating HTTP transport with invalid port in environment', () => {
417 |       process.env.MCP_TRANSPORT_TYPE = 'http';
418 |       process.env.MCP_HTTP_PORT = 'not-a-number';
419 | 
420 |       // Should create transport despite invalid port
421 |       expect(() => TransportFactory.createFromEnvironment()).not.toThrow();
422 |     });
423 | 
424 |     it('should not throw when creating HTTP transport with invalid timeout in environment', () => {
425 |       process.env.MCP_TRANSPORT_TYPE = 'http';
426 |       process.env.MCP_HTTP_SESSION_TIMEOUT = 'not-a-number';
427 | 
428 |       // Should create transport despite invalid timeout
429 |       expect(() => TransportFactory.createFromEnvironment()).not.toThrow();
430 |     });
431 |   });
432 | });
433 | 
```

--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------

```yaml
  1 | # =============================================================================
  2 | # WORKFLOW: Multi-Channel Package Publishing
  3 | # PURPOSE: Distribute releases to NPM, GitHub Packages, and Docker Hub
  4 | # TRIGGERS: GitHub release publication or manual dispatch
  5 | # OUTPUTS: Published packages to configured registries
  6 | # =============================================================================
  7 | 
  8 | name: Publish
  9 | 
 10 | on:
 11 |   release:
 12 |     types: [published] # Triggered when a GitHub release is published
 13 |   workflow_dispatch: # Manual trigger for re-publishing or testing
 14 |     inputs:
 15 |       tag:
 16 |         description: 'Release tag to publish (e.g., v1.2.3)'
 17 |         required: true
 18 |         type: string
 19 | 
 20 | # Allow only one publish workflow per branch
 21 | # cancel-in-progress: false to allow multiple releases to proceed
 22 | concurrency:
 23 |   group: ${{ github.workflow }}-${{ github.ref }}
 24 |   cancel-in-progress: false
 25 | 
 26 | # Global environment variables for consistency
 27 | env:
 28 |   PNPM_VERSION: 10.17.0 # Pinned: Must match packageManager in package.json
 29 |   NODE_VERSION: 22 # Pinned: Must match engines.node in package.json
 30 | 
 31 | # SECURITY: Minimal required permissions
 32 | # contents: read - Checkout code at release tag
 33 | # packages: write - Publish to GitHub Packages
 34 | # id-token: write - Generate provenance for npm
 35 | permissions:
 36 |   contents: read
 37 |   packages: write
 38 |   id-token: write
 39 | 
 40 | jobs:
 41 |   # =============================================================================
 42 |   # NPM PUBLISHING
 43 |   # Publishes package to npm registry with provenance
 44 |   # =============================================================================
 45 | 
 46 |   npm:
 47 |     name: Publish to NPM
 48 |     runs-on: ubuntu-latest
 49 |     # Only runs if ENABLE_NPM_RELEASE variable is set to 'true'
 50 |     # Configure in Settings > Secrets and variables > Variables
 51 |     if: vars.ENABLE_NPM_RELEASE == 'true'
 52 |     permissions:
 53 |       contents: read
 54 |       id-token: write # Required for npm provenance
 55 |       actions: read # Required to download artifacts
 56 |     steps:
 57 |       - name: Determine version
 58 |         id: version
 59 |         # Extract version from release tag or manual input
 60 |         # Strips 'v' prefix to get semver (v1.2.3 -> 1.2.3)
 61 |         run: |
 62 |           VERSION="${{ github.event_name == 'release' && github.event.release.tag_name || inputs.tag }}"
 63 |           VERSION="${VERSION#v}"  # Remove 'v' prefix
 64 |           echo "version=$VERSION" >> $GITHUB_OUTPUT
 65 |           echo "tag=v$VERSION" >> $GITHUB_OUTPUT
 66 |           echo "📦 Publishing to NPM: $VERSION"
 67 | 
 68 |       - name: Checkout code
 69 |         uses: actions/checkout@v4
 70 |         with:
 71 |           # IMPORTANT: Checkout the exact release tag, not latest main
 72 |           # This ensures we publish exactly what was released
 73 |           ref: ${{ steps.version.outputs.tag }}
 74 | 
 75 |       - name: Setup Node.js
 76 |         # Node.js is required for npm publish command
 77 |         uses: actions/setup-node@v4
 78 |         with:
 79 |           node-version: ${{ env.NODE_VERSION }}
 80 |           # Configure npm registry for authentication
 81 |           registry-url: 'https://registry.npmjs.org'
 82 | 
 83 |       - name: Determine artifact source
 84 |         id: artifact
 85 |         # Use shared script to find the correct NPM package artifact from the release build
 86 |         env:
 87 |           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
 88 |         run: |
 89 |           chmod +x .github/scripts/determine-artifact.sh
 90 |           .github/scripts/determine-artifact.sh \
 91 |             --tag "${{ steps.version.outputs.tag }}" \
 92 |             --repo "${{ github.repository }}" \
 93 |             --version "${{ steps.version.outputs.version }}" \
 94 |             --prefix "npm-package" \
 95 |             --output "$GITHUB_OUTPUT"
 96 | 
 97 |       - name: Download pre-built NPM package
 98 |         id: download
 99 |         # Download the pre-built, pre-scanned NPM package from main workflow
100 |         # This ensures we publish exactly what was tested
101 |         uses: actions/download-artifact@v4
102 |         with:
103 |           name: ${{ steps.artifact.outputs.artifact_name }}
104 |           path: ./npm-artifact
105 |           run-id: ${{ steps.artifact.outputs.run_id }}
106 |           github-token: ${{ secrets.GITHUB_TOKEN }}
107 | 
108 |       - name: Extract pre-built package
109 |         run: |
110 |           # Check if any .tgz files exist
111 |           TARBALL=$(find ./npm-artifact -name "*.tgz" -type f | head -1)
112 |           if [ -z "$TARBALL" ]; then
113 |             echo "❌ No .tgz file found in artifact!"
114 |             echo "Contents of ./npm-artifact:"
115 |             ls -la ./npm-artifact/
116 |             exit 1
117 |           fi
118 | 
119 |           echo "✅ Using pre-built NPM package from main workflow"
120 |           echo "📦 Extracting: $TARBALL"
121 |           tar -xzf "$TARBALL"
122 | 
123 |           # The package extracts to a 'package' directory
124 |           # We need to move its contents to the current directory
125 |           if [ -d package ]; then
126 |             cp -r package/* .
127 |             rm -rf package
128 |           fi
129 | 
130 |           echo "📋 Verified package contents from manifest"
131 |           if [ -f ./npm-artifact/npm-package-manifest.txt ]; then
132 |             echo "Package contains $(wc -l < ./npm-artifact/npm-package-manifest.txt) files"
133 |           fi
134 | 
135 |       - name: Check NPM token
136 |         id: check-npm
137 |         # Gracefully handle missing NPM_TOKEN
138 |         # Allows workflow to succeed even without npm publishing
139 |         run: |
140 |           if [ -n "${{ secrets.NPM_TOKEN }}" ]; then
141 |             echo "has_token=true" >> $GITHUB_OUTPUT
142 |             echo "✅ NPM_TOKEN is configured"
143 |           else
144 |             echo "has_token=false" >> $GITHUB_OUTPUT
145 |             echo "⚠️ NPM_TOKEN is not configured, skipping publish"
146 |             # To fix: Add NPM_TOKEN secret in Settings > Secrets
147 |           fi
148 | 
149 |       - name: Publish to NPM
150 |         if: steps.check-npm.outputs.has_token == 'true'
151 |         run: |
152 |           # Remove private flag and prepare script (which runs husky)
153 |           # The prepare script runs even with --ignore-scripts, so we must remove it
154 |           jq 'del(.private) | del(.scripts.prepare)' package.json > tmp.json && mv tmp.json package.json
155 | 
156 |           # Publish with provenance for supply chain security
157 |           # --provenance creates a signed attestation of the build
158 |           npm publish --provenance --access public
159 |         env:
160 |           # SECURITY: NPM_TOKEN required for authentication
161 |           NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
162 | 
163 |   # =============================================================================
164 |   # GITHUB PACKAGES PUBLISHING
165 |   # Publishes package to GitHub's npm registry
166 |   # =============================================================================
167 | 
168 |   github-packages:
169 |     name: Publish to GitHub Packages
170 |     runs-on: ubuntu-latest
171 |     # Only runs if ENABLE_GITHUB_PACKAGES variable is set
172 |     # Useful for private packages within organization
173 |     if: vars.ENABLE_GITHUB_PACKAGES == 'true'
174 |     permissions:
175 |       contents: read
176 |       packages: write # Required to publish to GitHub Packages
177 |       id-token: write # Required for provenance
178 |       actions: read # Required to download artifacts
179 |     steps:
180 |       - name: Determine version
181 |         id: version
182 |         run: |
183 |           VERSION="${{ github.event_name == 'release' && github.event.release.tag_name || inputs.tag }}"
184 |           VERSION="${VERSION#v}"
185 |           echo "version=$VERSION" >> $GITHUB_OUTPUT
186 |           echo "tag=v$VERSION" >> $GITHUB_OUTPUT
187 |           echo "📦 Publishing to GitHub Packages: $VERSION"
188 | 
189 |       - name: Checkout code
190 |         uses: actions/checkout@v4
191 |         with:
192 |           ref: ${{ steps.version.outputs.tag }}
193 | 
194 |       - name: Setup Node.js
195 |         uses: actions/setup-node@v4
196 |         with:
197 |           node-version: ${{ env.NODE_VERSION }}
198 |           # GitHub Packages npm registry URL
199 |           registry-url: 'https://npm.pkg.github.com'
200 | 
201 |       - name: Determine artifact source
202 |         id: artifact
203 |         # Use shared script to find the correct NPM package artifact (same as npm job)
204 |         env:
205 |           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
206 |         run: |
207 |           chmod +x .github/scripts/determine-artifact.sh
208 |           .github/scripts/determine-artifact.sh \
209 |             --tag "${{ steps.version.outputs.tag }}" \
210 |             --repo "${{ github.repository }}" \
211 |             --version "${{ steps.version.outputs.version }}" \
212 |             --prefix "npm-package" \
213 |             --output "$GITHUB_OUTPUT"
214 | 
215 |       - name: Download pre-built NPM package
216 |         id: download
217 |         uses: actions/download-artifact@v4
218 |         with:
219 |           name: ${{ steps.artifact.outputs.artifact_name }}
220 |           path: ./npm-artifact
221 |           run-id: ${{ steps.artifact.outputs.run_id }}
222 |           github-token: ${{ secrets.GITHUB_TOKEN }}
223 | 
224 |       - name: Extract pre-built package
225 |         run: |
226 |           # Check if any .tgz files exist
227 |           TARBALL=$(find ./npm-artifact -name "*.tgz" -type f | head -1)
228 |           if [ -z "$TARBALL" ]; then
229 |             echo "❌ No .tgz file found in artifact!"
230 |             echo "Contents of ./npm-artifact:"
231 |             ls -la ./npm-artifact/
232 |             exit 1
233 |           fi
234 | 
235 |           echo "✅ Using pre-built NPM package from main workflow"
236 |           echo "📦 Extracting: $TARBALL"
237 |           tar -xzf "$TARBALL"
238 | 
239 |           # The package extracts to a 'package' directory
240 |           if [ -d package ]; then
241 |             cp -r package/* .
242 |             rm -rf package
243 |           fi
244 | 
245 |           echo "📋 Verified package contents"
246 | 
247 |       - name: Publish to GitHub Packages
248 |         run: |
249 |           # Scope package name to organization and remove private flag and prepare script
250 |           # The prepare script runs even with --ignore-scripts, so we must remove it
251 |           jq '.name = "@${{ github.repository_owner }}/" + .name | del(.private) | del(.scripts.prepare)' package.json > tmp.json && mv tmp.json package.json
252 | 
253 |           npm publish --access public
254 |         env:
255 |           # SECURITY: Uses GITHUB_TOKEN for authentication
256 |           # Automatically available, no configuration needed
257 |           NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
258 | 
259 |   # =============================================================================
260 |   # DOCKER HUB PUBLISHING
261 |   # Copies pre-built multi-platform image from GHCR to Docker Hub
262 |   # =============================================================================
263 | 
264 |   docker:
265 |     name: Publish to Docker Hub
266 |     runs-on: ubuntu-latest
267 |     # Only runs if ENABLE_DOCKER_RELEASE variable is set
268 |     # Requires DOCKERHUB_USERNAME and DOCKERHUB_TOKEN secrets
269 |     if: vars.ENABLE_DOCKER_RELEASE == 'true'
270 |     permissions:
271 |       contents: read
272 |       packages: read # Read from GitHub Container Registry
273 |     steps:
274 |       - name: Determine version
275 |         id: version
276 |         run: |
277 |           VERSION="${{ github.event_name == 'release' && github.event.release.tag_name || inputs.tag }}"
278 |           VERSION="${VERSION#v}"
279 |           echo "version=$VERSION" >> $GITHUB_OUTPUT
280 |           echo "tag=v$VERSION" >> $GITHUB_OUTPUT
281 |           echo "🐳 Publishing Docker image: $VERSION"
282 | 
283 |       - name: Check Docker credentials
284 |         id: check-docker
285 |         # Validate Docker Hub credentials exist
286 |         # Allows workflow to succeed without Docker publishing
287 |         run: |
288 |           if [ -n "${{ secrets.DOCKERHUB_USERNAME }}" ] && [ -n "${{ secrets.DOCKERHUB_TOKEN }}" ]; then
289 |             echo "has_credentials=true" >> $GITHUB_OUTPUT
290 |             echo "✅ Docker Hub credentials are configured"
291 |           else
292 |             echo "has_credentials=false" >> $GITHUB_OUTPUT
293 |             echo "⚠️ Docker Hub credentials are not configured, skipping publish"
294 |             # To fix: Add DOCKERHUB_USERNAME and DOCKERHUB_TOKEN in Settings > Secrets
295 |             exit 0
296 |           fi
297 | 
298 |       - name: Set up Docker Buildx
299 |         # Required for imagetools commands
300 |         if: steps.check-docker.outputs.has_credentials == 'true'
301 |         uses: docker/setup-buildx-action@v3
302 | 
303 |       - name: Login to GitHub Container Registry
304 |         # Login to GHCR to pull the pre-built image
305 |         if: steps.check-docker.outputs.has_credentials == 'true'
306 |         uses: docker/login-action@v3
307 |         with:
308 |           registry: ghcr.io
309 |           username: ${{ github.actor }}
310 |           password: ${{ secrets.GITHUB_TOKEN }}
311 | 
312 |       - name: Login to Docker Hub
313 |         # SECURITY: Authenticate with Docker Hub for pushing
314 |         if: steps.check-docker.outputs.has_credentials == 'true'
315 |         uses: docker/login-action@v3
316 |         with:
317 |           username: ${{ secrets.DOCKERHUB_USERNAME }}
318 |           password: ${{ secrets.DOCKERHUB_TOKEN }}
319 | 
320 |       - name: Copy image from GHCR to Docker Hub
321 |         # Use buildx imagetools to copy multi-platform image between registries
322 |         # This properly handles multi-platform manifest lists
323 |         if: steps.check-docker.outputs.has_credentials == 'true'
324 |         run: |
325 |           SOURCE_IMAGE="ghcr.io/${{ github.repository_owner }}/sonarqube-mcp-server"
326 |           TARGET_REPO="${{ secrets.DOCKERHUB_USERNAME }}/${{ github.event.repository.name }}"
327 |           VERSION="${{ steps.version.outputs.version }}"
328 | 
329 |           echo "📤 Copying multi-platform image from GHCR to Docker Hub..."
330 |           echo "Source: $SOURCE_IMAGE:$VERSION"
331 |           echo "Target: $TARGET_REPO:$VERSION"
332 | 
333 |           # Copy image with version tag
334 |           docker buildx imagetools create \
335 |             --tag $TARGET_REPO:$VERSION \
336 |             $SOURCE_IMAGE:$VERSION
337 | 
338 |           echo "🏷️ Creating additional tags..."
339 |           # Create alias tags for latest, major, and major.minor versions
340 |           MAJOR=$(echo "$VERSION" | cut -d. -f1)
341 |           MINOR=$(echo "$VERSION" | cut -d. -f2)
342 | 
343 |           docker buildx imagetools create --tag $TARGET_REPO:latest $TARGET_REPO:$VERSION
344 |           docker buildx imagetools create --tag $TARGET_REPO:$MAJOR $TARGET_REPO:$VERSION
345 |           docker buildx imagetools create --tag $TARGET_REPO:$MAJOR.$MINOR $TARGET_REPO:$VERSION
346 | 
347 |           echo "✅ Docker image published successfully to Docker Hub"
348 |           echo "📋 Published tags: $VERSION, latest, $MAJOR, $MAJOR.$MINOR"
349 | 
350 |   # =============================================================================
351 |   # NOTIFICATION
352 |   # Send status updates to team communication channels
353 |   # =============================================================================
354 | 
355 |   notify:
356 |     name: Notify
357 |     if: always() # Run even if publishing jobs fail
358 |     needs: [npm, docker, github-packages]
359 |     runs-on: ubuntu-latest
360 |     steps:
361 |       - name: Check Slack webhook
362 |         id: check-slack
363 |         # Gracefully handle missing Slack configuration
364 |         run: |
365 |           if [ -n "${{ secrets.SLACK_WEBHOOK }}" ]; then
366 |             echo "has_webhook=true" >> $GITHUB_OUTPUT
367 |           else
368 |             echo "has_webhook=false" >> $GITHUB_OUTPUT
369 |             # Optional: Configure SLACK_WEBHOOK in Settings > Secrets
370 |           fi
371 | 
372 |       - name: Send Slack notification
373 |         # Send release status to Slack channel
374 |         # Shows success/skip/failure for each distribution channel
375 |         if: steps.check-slack.outputs.has_webhook == 'true'
376 |         uses: slackapi/slack-github-action@v2
377 |         with:
378 |           payload: |
379 |             {
380 |               "text": "🚀 Release ${{ github.event_name == 'release' && github.event.release.tag_name || inputs.tag }}",
381 |               "blocks": [
382 |                 {
383 |                   "type": "section",
384 |                   "fields": [
385 |                     {"type": "mrkdwn", "text": "*Repo:*\n${{ github.repository }}"},
386 |                     {"type": "mrkdwn", "text": "*NPM:*\n${{ needs.npm.result == 'success' && '✅' || needs.npm.result == 'skipped' && '⏭️' || '❌' }}"},
387 |                     {"type": "mrkdwn", "text": "*Docker:*\n${{ needs.docker.result == 'success' && '✅' || needs.docker.result == 'skipped' && '⏭️' || '❌' }}"},
388 |                     {"type": "mrkdwn", "text": "*GitHub:*\n${{ needs.github-packages.result == 'success' && '✅' || needs.github-packages.result == 'skipped' && '⏭️' || '❌' }}"}
389 |                   ]
390 |                 }
391 |               ]
392 |             }
393 |         env:
394 |           # SECURITY: Webhook URL for Slack integration
395 |           SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }}
396 | 
```

--------------------------------------------------------------------------------
/src/__tests__/direct-lambdas.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
  2 | import { z } from 'zod';
  3 | // Save the original environment variables
  4 | const originalEnv = process.env;
  5 | // Mock client responses to avoid network calls
  6 | vi.mock('../sonarqube.js', () => {
  7 |   return {
  8 |     SonarQubeClient: vi.fn().mockImplementation(() => ({
  9 |       listProjects: vi.fn<() => Promise<any>>().mockResolvedValue({
 10 |         projects: [{ key: 'test-project', name: 'Test Project' }],
 11 |         paging: { pageIndex: 1, pageSize: 10, total: 1 },
 12 |       }),
 13 |       getIssues: vi.fn<() => Promise<any>>().mockResolvedValue({
 14 |         issues: [{ key: 'test-issue', rule: 'test-rule' }],
 15 |         components: [],
 16 |         rules: [],
 17 |         paging: { pageIndex: 1, pageSize: 10, total: 1 },
 18 |       }),
 19 |       getMetrics: vi.fn<() => Promise<any>>().mockResolvedValue({
 20 |         metrics: [{ key: 'test-metric', name: 'Test Metric' }],
 21 |         paging: { pageIndex: 1, pageSize: 10, total: 1 },
 22 |       }),
 23 |       getHealth: vi.fn<() => Promise<any>>().mockResolvedValue({ health: 'GREEN', causes: [] }),
 24 |       getStatus: vi
 25 |         .fn<() => Promise<any>>()
 26 |         .mockResolvedValue({ id: 'id', version: '1.0', status: 'UP' }),
 27 |       ping: vi.fn<() => Promise<any>>().mockResolvedValue('pong'),
 28 |       getComponentMeasures: vi.fn<() => Promise<any>>().mockResolvedValue({
 29 |         component: { key: 'test-component', measures: [] },
 30 |         metrics: [],
 31 |       }),
 32 |       getComponentsMeasures: vi.fn<() => Promise<any>>().mockResolvedValue({
 33 |         components: [{ key: 'test-component', measures: [] }],
 34 |         metrics: [],
 35 |         paging: { pageIndex: 1, pageSize: 10, total: 1 },
 36 |       }),
 37 |       getMeasuresHistory: vi.fn<() => Promise<any>>().mockResolvedValue({
 38 |         measures: [{ metric: 'coverage', history: [] }],
 39 |         paging: { pageIndex: 1, pageSize: 10, total: 1 },
 40 |       }),
 41 |     })),
 42 |     setSonarQubeElicitationManager: vi.fn(),
 43 |     createSonarQubeClientFromEnv: vi.fn(() => ({
 44 |       listProjects: vi.fn<() => Promise<any>>().mockResolvedValue({
 45 |         projects: [{ key: 'test-project', name: 'Test Project' }],
 46 |         paging: { pageIndex: 1, pageSize: 10, total: 1 },
 47 |       }),
 48 |       getIssues: vi.fn<() => Promise<any>>().mockResolvedValue({
 49 |         issues: [{ key: 'test-issue', rule: 'test-rule' }],
 50 |         components: [],
 51 |         rules: [],
 52 |         paging: { pageIndex: 1, pageSize: 10, total: 1 },
 53 |       }),
 54 |       getMetrics: vi.fn<() => Promise<any>>().mockResolvedValue({
 55 |         metrics: [{ key: 'test-metric', name: 'Test Metric' }],
 56 |         paging: { pageIndex: 1, pageSize: 10, total: 1 },
 57 |       }),
 58 |       getHealth: vi.fn<() => Promise<any>>().mockResolvedValue({ health: 'GREEN', causes: [] }),
 59 |       getStatus: vi
 60 |         .fn<() => Promise<any>>()
 61 |         .mockResolvedValue({ id: 'id', version: '1.0', status: 'UP' }),
 62 |       ping: vi.fn<() => Promise<any>>().mockResolvedValue('pong'),
 63 |       getComponentMeasures: vi.fn<() => Promise<any>>().mockResolvedValue({
 64 |         component: { key: 'test-component', measures: [] },
 65 |         metrics: [],
 66 |       }),
 67 |       getComponentsMeasures: vi.fn<() => Promise<any>>().mockResolvedValue({
 68 |         components: [{ key: 'test-component', measures: [] }],
 69 |         metrics: [],
 70 |         paging: { pageIndex: 1, pageSize: 10, total: 1 },
 71 |       }),
 72 |       getMeasuresHistory: vi.fn<() => Promise<any>>().mockResolvedValue({
 73 |         measures: [{ metric: 'coverage', history: [] }],
 74 |         paging: { pageIndex: 1, pageSize: 10, total: 1 },
 75 |       }),
 76 |     })),
 77 |   };
 78 | });
 79 | describe('Direct Lambda Testing', () => {
 80 |   let index: typeof import('../index.js');
 81 |   beforeEach(async () => {
 82 |     vi.resetModules();
 83 |     process.env = { ...originalEnv };
 84 |     process.env.SONARQUBE_TOKEN = 'test-token';
 85 |     process.env.SONARQUBE_URL = 'http://localhost:9000';
 86 |     // Import the module for each test to ensure it's fresh
 87 |     index = await import('../index.js');
 88 |   });
 89 |   afterEach(() => {
 90 |     process.env = originalEnv;
 91 |     vi.clearAllMocks();
 92 |   });
 93 |   describe('Direct Lambda Function Execution', () => {
 94 |     // Directly extract the lambda functions from mcpServer.tool calls
 95 |     it('should execute metrics lambda function', async () => {
 96 |       // Get the metrics lambda function (simulating how it's registered)
 97 |       const metricsLambda = async (params: Record<string, unknown>) => {
 98 |         const page = index.nullToUndefined(params.page) as number | undefined;
 99 |         const pageSize = index.nullToUndefined(params.page_size) as number | undefined;
100 |         const metricsParams = { page, pageSize };
101 |         const result = await index.handleSonarQubeGetMetrics(metricsParams);
102 |         return {
103 |           content: [
104 |             {
105 |               type: 'text',
106 |               text: JSON.stringify(result, null, 2),
107 |             },
108 |           ],
109 |         };
110 |       };
111 |       // Execute the lambda function
112 |       const result = await metricsLambda({ page: '1', page_size: '10' });
113 |       // Verify the result structure
114 |       expect(result).toBeDefined();
115 |       expect(result.content).toBeDefined();
116 |       expect(result.content[0]?.type).toBe('text');
117 |       expect(result.content[0]?.text).toBeDefined();
118 |       // Parse the content to verify data structure
119 |       const data = JSON.parse(result.content[0]!.text);
120 |       expect(data.content[0]!.type).toBe('text');
121 |     });
122 |     it('should execute issues lambda function', async () => {
123 |       // Simulate the issues lambda function
124 |       const issuesLambda = async (params: Record<string, unknown>) => {
125 |         return index.handleSonarQubeGetIssues(index.mapToSonarQubeParams(params));
126 |       };
127 |       // Execute the lambda function
128 |       const result = await issuesLambda({ project_key: 'test-project', severity: 'MAJOR' });
129 |       // Verify the result structure
130 |       expect(result).toBeDefined();
131 |       expect(result.content).toBeDefined();
132 |       expect(result.content[0]!.type).toBe('text');
133 |     });
134 |     it('should execute measures_component lambda function with string metrics', async () => {
135 |       // Simulate the measures_component lambda function
136 |       const measuresLambda = async (params: Record<string, unknown>) => {
137 |         const componentParams: {
138 |           component: string;
139 |           metricKeys: string[];
140 |           additionalFields?: string[];
141 |           branch?: string;
142 |           pullRequest?: string;
143 |           period?: string;
144 |         } = {
145 |           component: params.component as string,
146 |           metricKeys: Array.isArray(params.metric_keys)
147 |             ? (params.metric_keys as string[])
148 |             : [params.metric_keys as string],
149 |         };
150 |         if (params.additional_fields)
151 |           componentParams.additionalFields = params.additional_fields as string[];
152 |         if (params.branch) componentParams.branch = params.branch as string;
153 |         if (params.pull_request) componentParams.pullRequest = params.pull_request as string;
154 |         if (params.period) componentParams.period = params.period as string;
155 |         return index.handleSonarQubeComponentMeasures(componentParams);
156 |       };
157 |       // Execute the lambda function with string metric
158 |       const result = await measuresLambda({
159 |         component: 'test-component',
160 |         metric_keys: 'coverage',
161 |       });
162 |       // Verify the result structure
163 |       expect(result).toBeDefined();
164 |       expect(result.content).toBeDefined();
165 |       expect(result.content[0]!.type).toBe('text');
166 |     });
167 |     it('should execute measures_component lambda function with array metrics', async () => {
168 |       // Simulate the measures_component lambda function
169 |       const measuresLambda = async (params: Record<string, unknown>) => {
170 |         const componentParams: {
171 |           component: string;
172 |           metricKeys: string[];
173 |           additionalFields?: string[];
174 |           branch?: string;
175 |           pullRequest?: string;
176 |           period?: string;
177 |         } = {
178 |           component: params.component as string,
179 |           metricKeys: Array.isArray(params.metric_keys)
180 |             ? (params.metric_keys as string[])
181 |             : [params.metric_keys as string],
182 |         };
183 |         if (params.additional_fields)
184 |           componentParams.additionalFields = params.additional_fields as string[];
185 |         if (params.branch) componentParams.branch = params.branch as string;
186 |         if (params.pull_request) componentParams.pullRequest = params.pull_request as string;
187 |         if (params.period) componentParams.period = params.period as string;
188 |         return index.handleSonarQubeComponentMeasures(componentParams);
189 |       };
190 |       // Execute the lambda function with array metrics
191 |       const result = await measuresLambda({
192 |         component: 'test-component',
193 |         metric_keys: ['coverage', 'bugs'],
194 |         additional_fields: ['periods'],
195 |         branch: 'main',
196 |         pull_request: 'pr-123',
197 |         period: '1',
198 |       });
199 |       // Verify the result structure
200 |       expect(result).toBeDefined();
201 |       expect(result.content).toBeDefined();
202 |       expect(result.content[0]!.type).toBe('text');
203 |     });
204 |     it('should execute measures_components lambda function', async () => {
205 |       // Simulate the measures_components lambda function
206 |       const componentsLambda = async (params: Record<string, unknown>) => {
207 |         const page = index.nullToUndefined(params.page) as number | undefined;
208 |         const pageSize = index.nullToUndefined(params.page_size) as number | undefined;
209 |         const componentsParams: {
210 |           componentKeys: string[];
211 |           metricKeys: string[];
212 |           additionalFields?: string[];
213 |           branch?: string;
214 |           pullRequest?: string;
215 |           period?: string;
216 |           page: number | undefined;
217 |           pageSize: number | undefined;
218 |         } = {
219 |           componentKeys: Array.isArray(params.component_keys)
220 |             ? (params.component_keys as string[])
221 |             : [params.component_keys as string],
222 |           metricKeys: Array.isArray(params.metric_keys)
223 |             ? (params.metric_keys as string[])
224 |             : [params.metric_keys as string],
225 |           page,
226 |           pageSize,
227 |         };
228 |         if (params.additional_fields)
229 |           componentsParams.additionalFields = params.additional_fields as string[];
230 |         if (params.branch) componentsParams.branch = params.branch as string;
231 |         if (params.pull_request) componentsParams.pullRequest = params.pull_request as string;
232 |         if (params.period) componentsParams.period = params.period as string;
233 |         return index.handleSonarQubeComponentsMeasures(componentsParams);
234 |       };
235 |       // Execute the lambda function
236 |       const result = await componentsLambda({
237 |         component_keys: ['comp1', 'comp2'],
238 |         metric_keys: ['coverage', 'bugs'],
239 |         page: '1',
240 |         page_size: '10',
241 |         additional_fields: ['periods'],
242 |         branch: 'main',
243 |       });
244 |       // Verify the result structure
245 |       expect(result).toBeDefined();
246 |       expect(result.content).toBeDefined();
247 |       expect(result.content[0]!.type).toBe('text');
248 |     });
249 |     it('should execute measures_history lambda function', async () => {
250 |       // Simulate the measures_history lambda function
251 |       const historyLambda = async (params: Record<string, unknown>) => {
252 |         const page = index.nullToUndefined(params.page) as number | undefined;
253 |         const pageSize = index.nullToUndefined(params.page_size) as number | undefined;
254 |         const historyParams: {
255 |           component: string;
256 |           metrics: string[];
257 |           from?: string;
258 |           to?: string;
259 |           branch?: string;
260 |           pullRequest?: string;
261 |           page: number | undefined;
262 |           pageSize: number | undefined;
263 |         } = {
264 |           component: params.component as string,
265 |           metrics: Array.isArray(params.metrics)
266 |             ? (params.metrics as string[])
267 |             : [params.metrics as string],
268 |           page,
269 |           pageSize,
270 |         };
271 |         if (params.from) historyParams.from = params.from as string;
272 |         if (params.to) historyParams.to = params.to as string;
273 |         if (params.branch) historyParams.branch = params.branch as string;
274 |         if (params.pull_request) historyParams.pullRequest = params.pull_request as string;
275 |         return index.handleSonarQubeMeasuresHistory(historyParams);
276 |       };
277 |       // Execute the lambda function
278 |       const result = await historyLambda({
279 |         component: 'test-component',
280 |         metrics: 'coverage',
281 |         from: '2023-01-01',
282 |         to: '2023-12-31',
283 |         branch: 'main',
284 |         page: '1',
285 |         page_size: '10',
286 |       });
287 |       // Verify the result structure
288 |       expect(result).toBeDefined();
289 |       expect(result.content).toBeDefined();
290 |       expect(result.content[0]!.type).toBe('text');
291 |     });
292 |   });
293 |   describe('Schema Transformations', () => {
294 |     it('should test page schema transformations', () => {
295 |       // Create a schema similar to what's in the actual code
296 |       const pageSchema = z
297 |         .string()
298 |         .optional()
299 |         .transform((val: any) => (val ? parseInt(val, 10) || null : null));
300 |       // Test valid numeric strings
301 |       expect(pageSchema.parse('10')).toBe(10);
302 |       expect(pageSchema.parse('100')).toBe(100);
303 |       // Test invalid inputs
304 |       expect(pageSchema.parse('')).toBe(null);
305 |       expect(pageSchema.parse('abc')).toBe(null);
306 |       expect(pageSchema.parse(undefined)).toBe(null);
307 |     });
308 |     it('should test boolean schema transformations', () => {
309 |       // Create a schema similar to what's in the actual code
310 |       const booleanSchema = z
311 |         .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
312 |         .nullable()
313 |         .optional();
314 |       // Test string values
315 |       expect(booleanSchema.parse('true')).toBe(true);
316 |       expect(booleanSchema.parse('false')).toBe(false);
317 |       // Test boolean values
318 |       expect(booleanSchema.parse(true)).toBe(true);
319 |       expect(booleanSchema.parse(false)).toBe(false);
320 |       // Test null/undefined
321 |       expect(booleanSchema.parse(null)).toBe(null);
322 |       expect(booleanSchema.parse(undefined)).toBe(undefined);
323 |     });
324 |     it('should test status schema validations', () => {
325 |       // Create a schema similar to what's in the actual code
326 |       const statusSchema = z
327 |         .array(
328 |           z.enum([
329 |             'OPEN',
330 |             'CONFIRMED',
331 |             'REOPENED',
332 |             'RESOLVED',
333 |             'CLOSED',
334 |             'TO_REVIEW',
335 |             'IN_REVIEW',
336 |             'REVIEWED',
337 |           ])
338 |         )
339 |         .nullable()
340 |         .optional();
341 |       // Test valid values
342 |       expect(statusSchema.parse(['OPEN', 'CONFIRMED'])).toEqual(['OPEN', 'CONFIRMED']);
343 |       // Test null/undefined
344 |       expect(statusSchema.parse(null)).toBe(null);
345 |       expect(statusSchema.parse(undefined)).toBe(undefined);
346 |       // Test invalid values (should throw)
347 |       expect(() => statusSchema.parse(['INVALID'])).toThrow();
348 |     });
349 |     it('should test resolution schema validations', () => {
350 |       // Create a schema similar to what's in the actual code
351 |       const resolutionSchema = z
352 |         .array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
353 |         .nullable()
354 |         .optional();
355 |       // Test valid values
356 |       expect(resolutionSchema.parse(['FALSE-POSITIVE', 'WONTFIX'])).toEqual([
357 |         'FALSE-POSITIVE',
358 |         'WONTFIX',
359 |       ]);
360 |       // Test null/undefined
361 |       expect(resolutionSchema.parse(null)).toBe(null);
362 |       expect(resolutionSchema.parse(undefined)).toBe(undefined);
363 |       // Test invalid values (should throw)
364 |       expect(() => resolutionSchema.parse(['INVALID'])).toThrow();
365 |     });
366 |     it('should test type schema validations', () => {
367 |       // Create a schema similar to what's in the actual code
368 |       const typeSchema = z
369 |         .array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
370 |         .nullable()
371 |         .optional();
372 |       // Test valid values
373 |       expect(typeSchema.parse(['CODE_SMELL', 'BUG'])).toEqual(['CODE_SMELL', 'BUG']);
374 |       // Test null/undefined
375 |       expect(typeSchema.parse(null)).toBe(null);
376 |       expect(typeSchema.parse(undefined)).toBe(undefined);
377 |       // Test invalid values (should throw)
378 |       expect(() => typeSchema.parse(['INVALID'])).toThrow();
379 |     });
380 |     it('should test severity schema validations', () => {
381 |       // Create a schema similar to what's in the actual code
382 |       const severitySchema = z
383 |         .enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
384 |         .nullable()
385 |         .optional();
386 |       // Test valid values
387 |       expect(severitySchema.parse('INFO')).toBe('INFO');
388 |       expect(severitySchema.parse('MINOR')).toBe('MINOR');
389 |       expect(severitySchema.parse('MAJOR')).toBe('MAJOR');
390 |       expect(severitySchema.parse('CRITICAL')).toBe('CRITICAL');
391 |       expect(severitySchema.parse('BLOCKER')).toBe('BLOCKER');
392 |       // Test null/undefined
393 |       expect(severitySchema.parse(null)).toBe(null);
394 |       expect(severitySchema.parse(undefined)).toBe(undefined);
395 |       // Test invalid values (should throw)
396 |       expect(() => severitySchema.parse('INVALID')).toThrow();
397 |     });
398 |   });
399 | });
400 | 
```

--------------------------------------------------------------------------------
/src/__tests__/schemas/issues-schema.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { z } from 'zod';
  2 | import {
  3 |   issuesToolSchema,
  4 |   markIssueFalsePositiveToolSchema,
  5 |   markIssueWontFixToolSchema,
  6 |   markIssuesFalsePositiveToolSchema,
  7 |   markIssuesWontFixToolSchema,
  8 |   addCommentToIssueToolSchema,
  9 |   assignIssueToolSchema,
 10 |   confirmIssueToolSchema,
 11 |   unconfirmIssueToolSchema,
 12 |   resolveIssueToolSchema,
 13 |   reopenIssueToolSchema,
 14 | } from '../../schemas/issues.js';
 15 | 
 16 | describe('issuesToolSchema', () => {
 17 |   it('should validate minimal issues parameters', () => {
 18 |     const input = {};
 19 |     const result = z.object(issuesToolSchema).parse(input);
 20 |     expect(result).toEqual({});
 21 |   });
 22 | 
 23 |   it('should validate issues with project key', () => {
 24 |     const input = {
 25 |       project_key: 'my-project',
 26 |     };
 27 |     const result = z.object(issuesToolSchema).parse(input);
 28 |     expect(result.project_key).toBe('my-project');
 29 |   });
 30 | 
 31 |   it('should validate issues with all filter parameters', () => {
 32 |     const input = {
 33 |       project_key: 'my-project',
 34 |       projects: ['proj1', 'proj2'],
 35 |       branch: 'main',
 36 |       pull_request: '123',
 37 |       issues: ['ISSUE-1', 'ISSUE-2'],
 38 |       severities: ['BLOCKER', 'CRITICAL'],
 39 |       severity: 'MAJOR',
 40 |       statuses: ['OPEN', 'CONFIRMED'],
 41 |       issue_statuses: ['OPEN', 'CONFIRMED'],
 42 |       resolutions: ['FALSE-POSITIVE', 'WONTFIX'],
 43 |       resolved: true,
 44 |       rules: ['java:S1234', 'java:S5678'],
 45 |       tags: ['security', 'performance'],
 46 |       types: ['BUG', 'VULNERABILITY'],
 47 |       languages: ['java', 'javascript'],
 48 |       component_keys: ['comp1', 'comp2'],
 49 |       components: ['comp3', 'comp4'],
 50 |       on_component_only: false,
 51 |       created_after: '2023-01-01',
 52 |       created_before: '2023-12-31',
 53 |       created_at: '2023-06-15',
 54 |       created_in_last: '7d',
 55 |       assigned: true,
 56 |       assignees: ['user1', 'user2'],
 57 |       author: 'author1',
 58 |       authors: ['author1', 'author2'],
 59 |       cwe: ['79', '89'],
 60 |       owasp_top10: ['a1', 'a3'],
 61 |       owasp_top10_v2021: ['a01', 'a03'],
 62 |       sans_top25: ['insecure-interaction', 'risky-resource'],
 63 |       sonarsource_security: ['sql-injection', 'xss'],
 64 |       sonarsource_security_category: ['injection'],
 65 |       clean_code_attribute_categories: ['INTENTIONAL', 'RESPONSIBLE'],
 66 |       impact_severities: ['HIGH', 'MEDIUM'],
 67 |       impact_software_qualities: ['SECURITY', 'RELIABILITY'],
 68 |       facets: ['severities', 'types'],
 69 |       facet_mode: 'effort',
 70 |       additional_fields: ['_all'],
 71 |       in_new_code_period: true,
 72 |       since_leak_period: false,
 73 |       s: 'FILE_LINE',
 74 |       asc: false,
 75 |       page: '2',
 76 |       page_size: '50',
 77 |     };
 78 | 
 79 |     const result = z.object(issuesToolSchema).parse(input);
 80 |     expect(result.project_key).toBe('my-project');
 81 |     expect(result.projects).toEqual(['proj1', 'proj2']);
 82 |     expect(result.severities).toEqual(['BLOCKER', 'CRITICAL']);
 83 |     expect(result.impact_severities).toEqual(['HIGH', 'MEDIUM']);
 84 |     expect(result.clean_code_attribute_categories).toEqual(['INTENTIONAL', 'RESPONSIBLE']);
 85 |   });
 86 | 
 87 |   it('should handle null values for optional arrays', () => {
 88 |     const input = {
 89 |       projects: null,
 90 |       severities: null,
 91 |       tags: null,
 92 |       rules: null,
 93 |     };
 94 |     const result = z.object(issuesToolSchema).parse(input);
 95 |     expect(result.projects).toBeNull();
 96 |     expect(result.severities).toBeNull();
 97 |     expect(result.tags).toBeNull();
 98 |     expect(result.rules).toBeNull();
 99 |   });
100 | 
101 |   it('should handle boolean string conversions', () => {
102 |     const input = {
103 |       resolved: 'true',
104 |       assigned: 'false',
105 |       on_component_only: 'true',
106 |       in_new_code_period: 'false',
107 |       since_leak_period: 'true',
108 |       asc: 'false',
109 |     };
110 |     const result = z.object(issuesToolSchema).parse(input);
111 |     expect(result.resolved).toBe(true);
112 |     expect(result.assigned).toBe(false);
113 |     expect(result.on_component_only).toBe(true);
114 |     expect(result.in_new_code_period).toBe(false);
115 |     expect(result.since_leak_period).toBe(true);
116 |     expect(result.asc).toBe(false);
117 |   });
118 | 
119 |   it('should handle page number string conversions', () => {
120 |     const input = {
121 |       page: '3',
122 |       page_size: '25',
123 |     };
124 |     const result = z.object(issuesToolSchema).parse(input);
125 |     expect(result.page).toBe(3);
126 |     expect(result.page_size).toBe(25);
127 |   });
128 | 
129 |   it('should reject invalid severity values', () => {
130 |     const input = {
131 |       severities: ['INVALID'],
132 |     };
133 |     expect(() => z.object(issuesToolSchema).parse(input)).toThrow();
134 |   });
135 | 
136 |   it('should reject invalid status values', () => {
137 |     const input = {
138 |       statuses: ['INVALID_STATUS'],
139 |     };
140 |     expect(() => z.object(issuesToolSchema).parse(input)).toThrow();
141 |   });
142 | 
143 |   it('should reject invalid impact severity values', () => {
144 |     const input = {
145 |       impact_severities: ['VERY_HIGH'],
146 |     };
147 |     expect(() => z.object(issuesToolSchema).parse(input)).toThrow();
148 |   });
149 | 
150 |   it('should reject invalid clean code categories', () => {
151 |     const input = {
152 |       clean_code_attribute_categories: ['INVALID_CATEGORY'],
153 |     };
154 |     expect(() => z.object(issuesToolSchema).parse(input)).toThrow();
155 |   });
156 | 
157 |   it('should handle empty arrays', () => {
158 |     const input = {
159 |       projects: [],
160 |       tags: [],
161 |       rules: [],
162 |     };
163 |     const result = z.object(issuesToolSchema).parse(input);
164 |     expect(result.projects).toEqual([]);
165 |     expect(result.tags).toEqual([]);
166 |     expect(result.rules).toEqual([]);
167 |   });
168 | 
169 |   it('should handle partial parameters', () => {
170 |     const input = {
171 |       project_key: 'test',
172 |       severities: ['MAJOR'],
173 |       page: '1',
174 |     };
175 |     const result = z.object(issuesToolSchema).parse(input);
176 |     expect(result.project_key).toBe('test');
177 |     expect(result.severities).toEqual(['MAJOR']);
178 |     expect(result.page).toBe(1);
179 |     expect(result.branch).toBeUndefined();
180 |     expect(result.tags).toBeUndefined();
181 |   });
182 | });
183 | 
184 | describe('markIssueFalsePositiveToolSchema', () => {
185 |   it('should validate minimal parameters with issue key', () => {
186 |     const input = {
187 |       issue_key: 'ISSUE-123',
188 |     };
189 |     const result = z.object(markIssueFalsePositiveToolSchema).parse(input);
190 |     expect(result.issue_key).toBe('ISSUE-123');
191 |     expect(result.comment).toBeUndefined();
192 |   });
193 | 
194 |   it('should validate parameters with comment', () => {
195 |     const input = {
196 |       issue_key: 'ISSUE-123',
197 |       comment: 'This is a false positive because...',
198 |     };
199 |     const result = z.object(markIssueFalsePositiveToolSchema).parse(input);
200 |     expect(result.issue_key).toBe('ISSUE-123');
201 |     expect(result.comment).toBe('This is a false positive because...');
202 |   });
203 | 
204 |   it('should reject missing issue key', () => {
205 |     const input = {
206 |       comment: 'Missing issue key',
207 |     };
208 |     expect(() => z.object(markIssueFalsePositiveToolSchema).parse(input)).toThrow();
209 |   });
210 | });
211 | 
212 | describe('markIssueWontFixToolSchema', () => {
213 |   it('should validate minimal parameters with issue key', () => {
214 |     const input = {
215 |       issue_key: 'ISSUE-456',
216 |     };
217 |     const result = z.object(markIssueWontFixToolSchema).parse(input);
218 |     expect(result.issue_key).toBe('ISSUE-456');
219 |     expect(result.comment).toBeUndefined();
220 |   });
221 | 
222 |   it('should validate parameters with comment', () => {
223 |     const input = {
224 |       issue_key: 'ISSUE-456',
225 |       comment: "Won't fix because it's acceptable in this context",
226 |     };
227 |     const result = z.object(markIssueWontFixToolSchema).parse(input);
228 |     expect(result.issue_key).toBe('ISSUE-456');
229 |     expect(result.comment).toBe("Won't fix because it's acceptable in this context");
230 |   });
231 | 
232 |   it('should reject missing issue key', () => {
233 |     const input = {
234 |       comment: 'Missing issue key',
235 |     };
236 |     expect(() => z.object(markIssueWontFixToolSchema).parse(input)).toThrow();
237 |   });
238 | });
239 | 
240 | describe('markIssuesFalsePositiveToolSchema', () => {
241 |   it('should validate minimal parameters with issue keys array', () => {
242 |     const input = {
243 |       issue_keys: ['ISSUE-123', 'ISSUE-124', 'ISSUE-125'],
244 |     };
245 |     const result = z.object(markIssuesFalsePositiveToolSchema).parse(input);
246 |     expect(result.issue_keys).toEqual(['ISSUE-123', 'ISSUE-124', 'ISSUE-125']);
247 |     expect(result.comment).toBeUndefined();
248 |   });
249 | 
250 |   it('should validate parameters with comment', () => {
251 |     const input = {
252 |       issue_keys: ['ISSUE-123', 'ISSUE-124'],
253 |       comment: 'Bulk marking as false positives',
254 |     };
255 |     const result = z.object(markIssuesFalsePositiveToolSchema).parse(input);
256 |     expect(result.issue_keys).toEqual(['ISSUE-123', 'ISSUE-124']);
257 |     expect(result.comment).toBe('Bulk marking as false positives');
258 |   });
259 | 
260 |   it('should validate single issue in array', () => {
261 |     const input = {
262 |       issue_keys: ['ISSUE-123'],
263 |     };
264 |     const result = z.object(markIssuesFalsePositiveToolSchema).parse(input);
265 |     expect(result.issue_keys).toEqual(['ISSUE-123']);
266 |   });
267 | 
268 |   it('should reject empty issue keys array', () => {
269 |     const input = {
270 |       issue_keys: [],
271 |     };
272 |     expect(() => z.object(markIssuesFalsePositiveToolSchema).parse(input)).toThrow();
273 |   });
274 | 
275 |   it('should reject missing issue keys', () => {
276 |     const input = {
277 |       comment: 'Missing issue keys',
278 |     };
279 |     expect(() => z.object(markIssuesFalsePositiveToolSchema).parse(input)).toThrow();
280 |   });
281 | });
282 | 
283 | describe('markIssuesWontFixToolSchema', () => {
284 |   it('should validate minimal parameters with issue keys array', () => {
285 |     const input = {
286 |       issue_keys: ['ISSUE-456', 'ISSUE-457', 'ISSUE-458'],
287 |     };
288 |     const result = z.object(markIssuesWontFixToolSchema).parse(input);
289 |     expect(result.issue_keys).toEqual(['ISSUE-456', 'ISSUE-457', 'ISSUE-458']);
290 |     expect(result.comment).toBeUndefined();
291 |   });
292 | 
293 |   it('should validate parameters with comment', () => {
294 |     const input = {
295 |       issue_keys: ['ISSUE-456', 'ISSUE-457'],
296 |       comment: "Bulk marking as won't fix",
297 |     };
298 |     const result = z.object(markIssuesWontFixToolSchema).parse(input);
299 |     expect(result.issue_keys).toEqual(['ISSUE-456', 'ISSUE-457']);
300 |     expect(result.comment).toBe("Bulk marking as won't fix");
301 |   });
302 | 
303 |   it('should validate single issue in array', () => {
304 |     const input = {
305 |       issue_keys: ['ISSUE-456'],
306 |     };
307 |     const result = z.object(markIssuesWontFixToolSchema).parse(input);
308 |     expect(result.issue_keys).toEqual(['ISSUE-456']);
309 |   });
310 | 
311 |   it('should reject empty issue keys array', () => {
312 |     const input = {
313 |       issue_keys: [],
314 |     };
315 |     expect(() => z.object(markIssuesWontFixToolSchema).parse(input)).toThrow();
316 |   });
317 | 
318 |   it('should reject missing issue keys', () => {
319 |     const input = {
320 |       comment: 'Missing issue keys',
321 |     };
322 |     expect(() => z.object(markIssuesWontFixToolSchema).parse(input)).toThrow();
323 |   });
324 | });
325 | 
326 | describe('addCommentToIssueToolSchema', () => {
327 |   it('should validate parameters with issue key and text', () => {
328 |     const input = {
329 |       issue_key: 'ISSUE-789',
330 |       text: 'This is a comment with **markdown** support',
331 |     };
332 |     const result = z.object(addCommentToIssueToolSchema).parse(input);
333 |     expect(result.issue_key).toBe('ISSUE-789');
334 |     expect(result.text).toBe('This is a comment with **markdown** support');
335 |   });
336 | 
337 |   it('should validate plain text comment', () => {
338 |     const input = {
339 |       issue_key: 'ISSUE-100',
340 |       text: 'Plain text comment without formatting',
341 |     };
342 |     const result = z.object(addCommentToIssueToolSchema).parse(input);
343 |     expect(result.issue_key).toBe('ISSUE-100');
344 |     expect(result.text).toBe('Plain text comment without formatting');
345 |   });
346 | 
347 |   it('should validate multi-line comment', () => {
348 |     const input = {
349 |       issue_key: 'ISSUE-200',
350 |       text: 'Line 1\nLine 2\n\n- Bullet point\n- Another bullet',
351 |     };
352 |     const result = z.object(addCommentToIssueToolSchema).parse(input);
353 |     expect(result.issue_key).toBe('ISSUE-200');
354 |     expect(result.text).toBe('Line 1\nLine 2\n\n- Bullet point\n- Another bullet');
355 |   });
356 | 
357 |   it('should validate markdown with code blocks', () => {
358 |     const input = {
359 |       issue_key: 'ISSUE-300',
360 |       text: 'Here is some code:\n\n```java\npublic void test() {\n  System.out.println("Hello");\n}\n```',
361 |     };
362 |     const result = z.object(addCommentToIssueToolSchema).parse(input);
363 |     expect(result.issue_key).toBe('ISSUE-300');
364 |     expect(result.text).toContain('```java');
365 |   });
366 | 
367 |   it('should reject missing issue key', () => {
368 |     const input = {
369 |       text: 'Comment without issue key',
370 |     };
371 |     expect(() => z.object(addCommentToIssueToolSchema).parse(input)).toThrow();
372 |   });
373 | 
374 |   it('should reject missing text', () => {
375 |     const input = {
376 |       issue_key: 'ISSUE-789',
377 |     };
378 |     expect(() => z.object(addCommentToIssueToolSchema).parse(input)).toThrow();
379 |   });
380 | 
381 |   it('should reject empty text', () => {
382 |     const input = {
383 |       issue_key: 'ISSUE-789',
384 |       text: '',
385 |     };
386 |     expect(() => z.object(addCommentToIssueToolSchema).parse(input)).toThrow();
387 |   });
388 | 
389 |   it('should reject empty issue key', () => {
390 |     const input = {
391 |       issue_key: '',
392 |       text: 'Valid comment',
393 |     };
394 |     expect(() => z.object(addCommentToIssueToolSchema).parse(input)).toThrow();
395 |   });
396 | 
397 |   it('should accept single character text', () => {
398 |     const input = {
399 |       issue_key: 'ISSUE-789',
400 |       text: 'X',
401 |     };
402 |     const result = z.object(addCommentToIssueToolSchema).parse(input);
403 |     expect(result.text).toBe('X');
404 |   });
405 | 
406 |   it('should handle very long comments', () => {
407 |     const longText = 'A'.repeat(10000);
408 |     const input = {
409 |       issue_key: 'ISSUE-789',
410 |       text: longText,
411 |     };
412 |     const result = z.object(addCommentToIssueToolSchema).parse(input);
413 |     expect(result.text).toBe(longText);
414 |   });
415 | 
416 |   it('should handle special characters in comments', () => {
417 |     const input = {
418 |       issue_key: 'ISSUE-789',
419 |       text: 'Special chars: <>&"\'`@#$%^&*()[]{}|\\;:,.?/',
420 |     };
421 |     const result = z.object(addCommentToIssueToolSchema).parse(input);
422 |     expect(result.text).toBe('Special chars: <>&"\'`@#$%^&*()[]{}|\\;:,.?/');
423 |   });
424 | 
425 |   it('should handle Unicode characters', () => {
426 |     const input = {
427 |       issue_key: 'ISSUE-789',
428 |       text: 'Unicode: 😀 你好 مرحبا こんにちは',
429 |     };
430 |     const result = z.object(addCommentToIssueToolSchema).parse(input);
431 |     expect(result.text).toBe('Unicode: 😀 你好 مرحبا こんにちは');
432 |   });
433 | });
434 | 
435 | describe('assignIssueToolSchema', () => {
436 |   it('should validate issue assignment with assignee', () => {
437 |     const input = {
438 |       issueKey: 'ISSUE-123',
439 |       assignee: 'john.doe',
440 |     };
441 |     const result = z.object(assignIssueToolSchema).parse(input);
442 |     expect(result.issueKey).toBe('ISSUE-123');
443 |     expect(result.assignee).toBe('john.doe');
444 |   });
445 | 
446 |   it('should validate issue unassignment without assignee', () => {
447 |     const input = {
448 |       issueKey: 'ISSUE-456',
449 |     };
450 |     const result = z.object(assignIssueToolSchema).parse(input);
451 |     expect(result.issueKey).toBe('ISSUE-456');
452 |     expect(result.assignee).toBeUndefined();
453 |   });
454 | 
455 |   it('should reject empty issue key', () => {
456 |     expect(() =>
457 |       z.object(assignIssueToolSchema).parse({
458 |         issueKey: '',
459 |         assignee: 'john.doe',
460 |       })
461 |     ).toThrow();
462 |   });
463 | 
464 |   it('should reject missing issue key', () => {
465 |     expect(() =>
466 |       z.object(assignIssueToolSchema).parse({
467 |         assignee: 'john.doe',
468 |       })
469 |     ).toThrow();
470 |   });
471 | 
472 |   it('should allow empty string for assignee to unassign', () => {
473 |     const input = {
474 |       issueKey: 'ISSUE-789',
475 |       assignee: '',
476 |     };
477 |     const result = z.object(assignIssueToolSchema).parse(input);
478 |     expect(result.issueKey).toBe('ISSUE-789');
479 |     expect(result.assignee).toBe('');
480 |   });
481 | });
482 | 
483 | describe('confirmIssueToolSchema', () => {
484 |   it('should validate minimal parameters', () => {
485 |     const input = {
486 |       issue_key: 'ISSUE-123',
487 |     };
488 |     const result = z.object(confirmIssueToolSchema).parse(input);
489 |     expect(result.issue_key).toBe('ISSUE-123');
490 |     expect(result.comment).toBeUndefined();
491 |   });
492 | 
493 |   it('should validate parameters with comment', () => {
494 |     const input = {
495 |       issue_key: 'ISSUE-123',
496 |       comment: 'Confirmed after code review',
497 |     };
498 |     const result = z.object(confirmIssueToolSchema).parse(input);
499 |     expect(result.issue_key).toBe('ISSUE-123');
500 |     expect(result.comment).toBe('Confirmed after code review');
501 |   });
502 | 
503 |   it('should reject missing issue key', () => {
504 |     const input = {
505 |       comment: 'Confirmed',
506 |     };
507 |     expect(() => z.object(confirmIssueToolSchema).parse(input)).toThrow();
508 |   });
509 | });
510 | 
511 | describe('unconfirmIssueToolSchema', () => {
512 |   it('should validate minimal parameters', () => {
513 |     const input = {
514 |       issue_key: 'ISSUE-456',
515 |     };
516 |     const result = z.object(unconfirmIssueToolSchema).parse(input);
517 |     expect(result.issue_key).toBe('ISSUE-456');
518 |     expect(result.comment).toBeUndefined();
519 |   });
520 | 
521 |   it('should validate parameters with comment', () => {
522 |     const input = {
523 |       issue_key: 'ISSUE-456',
524 |       comment: 'Needs further investigation',
525 |     };
526 |     const result = z.object(unconfirmIssueToolSchema).parse(input);
527 |     expect(result.issue_key).toBe('ISSUE-456');
528 |     expect(result.comment).toBe('Needs further investigation');
529 |   });
530 | 
531 |   it('should reject missing issue key', () => {
532 |     const input = {
533 |       comment: 'Unconfirmed',
534 |     };
535 |     expect(() => z.object(unconfirmIssueToolSchema).parse(input)).toThrow();
536 |   });
537 | });
538 | 
539 | describe('resolveIssueToolSchema', () => {
540 |   it('should validate minimal parameters', () => {
541 |     const input = {
542 |       issue_key: 'ISSUE-789',
543 |     };
544 |     const result = z.object(resolveIssueToolSchema).parse(input);
545 |     expect(result.issue_key).toBe('ISSUE-789');
546 |     expect(result.comment).toBeUndefined();
547 |   });
548 | 
549 |   it('should validate parameters with comment', () => {
550 |     const input = {
551 |       issue_key: 'ISSUE-789',
552 |       comment: 'Fixed in commit abc123',
553 |     };
554 |     const result = z.object(resolveIssueToolSchema).parse(input);
555 |     expect(result.issue_key).toBe('ISSUE-789');
556 |     expect(result.comment).toBe('Fixed in commit abc123');
557 |   });
558 | 
559 |   it('should reject missing issue key', () => {
560 |     const input = {
561 |       comment: 'Resolved',
562 |     };
563 |     expect(() => z.object(resolveIssueToolSchema).parse(input)).toThrow();
564 |   });
565 | });
566 | 
567 | describe('reopenIssueToolSchema', () => {
568 |   it('should validate minimal parameters', () => {
569 |     const input = {
570 |       issue_key: 'ISSUE-101',
571 |     };
572 |     const result = z.object(reopenIssueToolSchema).parse(input);
573 |     expect(result.issue_key).toBe('ISSUE-101');
574 |     expect(result.comment).toBeUndefined();
575 |   });
576 | 
577 |   it('should validate parameters with comment', () => {
578 |     const input = {
579 |       issue_key: 'ISSUE-101',
580 |       comment: 'Issue still occurs in production',
581 |     };
582 |     const result = z.object(reopenIssueToolSchema).parse(input);
583 |     expect(result.issue_key).toBe('ISSUE-101');
584 |     expect(result.comment).toBe('Issue still occurs in production');
585 |   });
586 | 
587 |   it('should reject missing issue key', () => {
588 |     const input = {
589 |       comment: 'Reopened',
590 |     };
591 |     expect(() => z.object(reopenIssueToolSchema).parse(input)).toThrow();
592 |   });
593 | });
594 | 
```
Page 8/11FirstPrevNextLast