#
tokens: 49136/50000 13/236 files (page 6/8)
lines: off (toggle) GitHub
raw markdown copy
This is page 6 of 8. Use http://codebase.md/sapientpants/sonarqube-mcp-server?page={x} to view the full context.

# Directory Structure

```
├── .adr-dir
├── .changeset
│   ├── config.json
│   └── README.md
├── .claude
│   ├── commands
│   │   ├── analyze-and-fix-github-issue.md
│   │   ├── fix-sonarqube-issues.md
│   │   ├── implement-github-issue.md
│   │   ├── release.md
│   │   ├── spec-feature.md
│   │   └── update-dependencies.md
│   ├── hooks
│   │   └── block-git-no-verify.ts
│   └── settings.json
├── .dockerignore
├── .github
│   ├── actionlint.yaml
│   ├── changeset.yml
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   └── feature_request.md
│   ├── pull_request_template.md
│   ├── scripts
│   │   ├── determine-artifact.sh
│   │   └── version-and-release.js
│   ├── workflows
│   │   ├── codeql.yml
│   │   ├── main.yml
│   │   ├── pr.yml
│   │   ├── publish.yml
│   │   ├── reusable-docker.yml
│   │   ├── reusable-security.yml
│   │   └── reusable-validate.yml
│   └── WORKFLOWS.md
├── .gitignore
├── .husky
│   ├── commit-msg
│   └── pre-commit
├── .markdownlint.yaml
├── .markdownlintignore
├── .npmrc
├── .prettierignore
├── .prettierrc
├── .trivyignore
├── .yaml-lint.yml
├── .yamllintignore
├── CHANGELOG.md
├── changes.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── COMPATIBILITY.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── architecture
│   │   └── decisions
│   │       ├── 0001-record-architecture-decisions.md
│   │       ├── 0002-use-node-js-with-typescript.md
│   │       ├── 0003-adopt-model-context-protocol-for-sonarqube-integration.md
│   │       ├── 0004-use-sonarqube-web-api-client-for-all-sonarqube-interactions.md
│   │       ├── 0005-domain-driven-design-of-sonarqube-modules.md
│   │       ├── 0006-expose-sonarqube-features-as-mcp-tools.md
│   │       ├── 0007-support-multiple-authentication-methods-for-sonarqube.md
│   │       ├── 0008-use-environment-variables-for-configuration.md
│   │       ├── 0009-file-based-logging-to-avoid-stdio-conflicts.md
│   │       ├── 0010-use-stdio-transport-for-mcp-communication.md
│   │       ├── 0011-docker-containerization-for-deployment.md
│   │       ├── 0012-add-elicitation-support-for-interactive-user-input.md
│   │       ├── 0014-current-security-model-and-future-oauth2-considerations.md
│   │       ├── 0015-transport-architecture-refactoring.md
│   │       ├── 0016-http-transport-with-oauth-2-0-metadata-endpoints.md
│   │       ├── 0017-comprehensive-audit-logging-system.md
│   │       ├── 0018-add-comprehensive-monitoring-and-observability.md
│   │       ├── 0019-simplify-to-stdio-only-transport-for-mcp-gateway-deployment.md
│   │       ├── 0020-testing-framework-and-strategy-vitest-with-property-based-testing.md
│   │       ├── 0021-code-quality-toolchain-eslint-prettier-strict-typescript.md
│   │       ├── 0022-package-manager-choice-pnpm.md
│   │       ├── 0023-release-management-with-changesets.md
│   │       ├── 0024-ci-cd-platform-github-actions.md
│   │       ├── 0025-container-and-security-scanning-strategy.md
│   │       ├── 0026-circuit-breaker-pattern-with-opossum.md
│   │       ├── 0027-docker-image-publishing-strategy-ghcr-to-docker-hub.md
│   │       └── 0028-session-based-http-transport-with-server-sent-events.md
│   ├── architecture.md
│   ├── security.md
│   └── troubleshooting.md
├── eslint.config.js
├── examples
│   └── http-client.ts
├── jest.config.js
├── LICENSE
├── LICENSES.md
├── osv-scanner.toml
├── package.json
├── pnpm-lock.yaml
├── README.md
├── scripts
│   ├── actionlint.sh
│   ├── ci-local.sh
│   ├── load-test.sh
│   ├── README.md
│   ├── run-all-tests.sh
│   ├── scan-container.sh
│   ├── security-scan.sh
│   ├── setup.sh
│   ├── test-monitoring-integration.sh
│   └── validate-docs.sh
├── SECURITY.md
├── sonar-project.properties
├── src
│   ├── __tests__
│   │   ├── additional-coverage.test.ts
│   │   ├── advanced-index.test.ts
│   │   ├── assign-issue.test.ts
│   │   ├── auth-methods.test.ts
│   │   ├── boolean-string-transform.test.ts
│   │   ├── components.test.ts
│   │   ├── config
│   │   │   └── service-accounts.test.ts
│   │   ├── dependency-injection.test.ts
│   │   ├── direct-handlers.test.ts
│   │   ├── direct-lambdas.test.ts
│   │   ├── direct-schema-validation.test.ts
│   │   ├── domains
│   │   │   ├── components-domain-full.test.ts
│   │   │   ├── components-domain.test.ts
│   │   │   ├── hotspots-domain.test.ts
│   │   │   └── source-code-domain.test.ts
│   │   ├── environment-validation.test.ts
│   │   ├── error-handler.test.ts
│   │   ├── error-handling.test.ts
│   │   ├── errors.test.ts
│   │   ├── function-tests.test.ts
│   │   ├── handlers
│   │   │   ├── components-handler-integration.test.ts
│   │   │   └── projects-authorization.test.ts
│   │   ├── handlers.test.ts
│   │   ├── handlers.test.ts.skip
│   │   ├── index.test.ts
│   │   ├── issue-resolution-elicitation.test.ts
│   │   ├── issue-resolution.test.ts
│   │   ├── issue-transitions.test.ts
│   │   ├── issues-enhanced-search.test.ts
│   │   ├── issues-new-parameters.test.ts
│   │   ├── json-array-transform.test.ts
│   │   ├── lambda-functions.test.ts
│   │   ├── lambda-handlers.test.ts.skip
│   │   ├── logger.test.ts
│   │   ├── mapping-functions.test.ts
│   │   ├── mocked-environment.test.ts
│   │   ├── null-to-undefined.test.ts
│   │   ├── parameter-transformations-advanced.test.ts
│   │   ├── parameter-transformations.test.ts
│   │   ├── protocol-version.test.ts
│   │   ├── pull-request-transform.test.ts
│   │   ├── quality-gates.test.ts
│   │   ├── schema-parameter-transforms.test.ts
│   │   ├── schema-transformation-mocks.test.ts
│   │   ├── schema-transforms.test.ts
│   │   ├── schema-validators.test.ts
│   │   ├── schemas
│   │   │   ├── components-schema.test.ts
│   │   │   ├── hotspots-tools-schema.test.ts
│   │   │   └── issues-schema.test.ts
│   │   ├── sonarqube-elicitation.test.ts
│   │   ├── sonarqube.test.ts
│   │   ├── source-code.test.ts
│   │   ├── standalone-handlers.test.ts
│   │   ├── string-to-number-transform.test.ts
│   │   ├── tool-handler-lambdas.test.ts
│   │   ├── tool-handlers.test.ts
│   │   ├── tool-registration-schema.test.ts
│   │   ├── tool-registration-transforms.test.ts
│   │   ├── transformation-util.test.ts
│   │   ├── transports
│   │   │   ├── base.test.ts
│   │   │   ├── factory.test.ts
│   │   │   ├── http.test.ts
│   │   │   ├── session-manager.test.ts
│   │   │   └── stdio.test.ts
│   │   ├── utils
│   │   │   ├── retry.test.ts
│   │   │   └── transforms.test.ts
│   │   ├── zod-boolean-transform.test.ts
│   │   ├── zod-schema-transforms.test.ts
│   │   └── zod-transforms.test.ts
│   ├── config
│   │   ├── service-accounts.ts
│   │   └── versions.ts
│   ├── domains
│   │   ├── base.ts
│   │   ├── components.ts
│   │   ├── hotspots.ts
│   │   ├── index.ts
│   │   ├── issues.ts
│   │   ├── measures.ts
│   │   ├── metrics.ts
│   │   ├── projects.ts
│   │   ├── quality-gates.ts
│   │   ├── source-code.ts
│   │   └── system.ts
│   ├── errors.ts
│   ├── handlers
│   │   ├── components.ts
│   │   ├── hotspots.ts
│   │   ├── index.ts
│   │   ├── issues.ts
│   │   ├── measures.ts
│   │   ├── metrics.ts
│   │   ├── projects.ts
│   │   ├── quality-gates.ts
│   │   ├── source-code.ts
│   │   └── system.ts
│   ├── index.ts
│   ├── monitoring
│   │   ├── __tests__
│   │   │   └── circuit-breaker.test.ts
│   │   ├── circuit-breaker.ts
│   │   ├── health.ts
│   │   └── metrics.ts
│   ├── schemas
│   │   ├── common.ts
│   │   ├── components.ts
│   │   ├── hotspots-tools.ts
│   │   ├── hotspots.ts
│   │   ├── index.ts
│   │   ├── issues.ts
│   │   ├── measures.ts
│   │   ├── metrics.ts
│   │   ├── projects.ts
│   │   ├── quality-gates.ts
│   │   ├── source-code.ts
│   │   └── system.ts
│   ├── sonarqube.ts
│   ├── transports
│   │   ├── base.ts
│   │   ├── factory.ts
│   │   ├── http.ts
│   │   ├── index.ts
│   │   ├── session-manager.ts
│   │   └── stdio.ts
│   ├── types
│   │   ├── common.ts
│   │   ├── components.ts
│   │   ├── hotspots.ts
│   │   ├── index.ts
│   │   ├── issues.ts
│   │   ├── measures.ts
│   │   ├── metrics.ts
│   │   ├── projects.ts
│   │   ├── quality-gates.ts
│   │   ├── source-code.ts
│   │   └── system.ts
│   └── utils
│       ├── __tests__
│       │   ├── elicitation.test.ts
│       │   ├── pattern-matcher.test.ts
│       │   └── structured-response.test.ts
│       ├── client-factory.ts
│       ├── elicitation.ts
│       ├── error-handler.ts
│       ├── logger.ts
│       ├── parameter-mappers.ts
│       ├── pattern-matcher.ts
│       ├── retry.ts
│       ├── structured-response.ts
│       └── transforms.ts
├── test-http-transport.sh
├── tmp
│   └── .gitkeep
├── tsconfig.build.json
├── tsconfig.json
├── vitest.config.d.ts
├── vitest.config.js
├── vitest.config.js.map
└── vitest.config.ts
```

# Files

--------------------------------------------------------------------------------
/src/__tests__/transports/session-manager.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { SessionManager, ISession } from '../../transports/session-manager.js';
import { Server } from '@modelcontextprotocol/sdk/server/index.js';

describe('SessionManager', () => {
  let sessionManager: SessionManager;
  let mockServer: Server;

  beforeEach(() => {
    // Reset timers
    vi.useFakeTimers();

    // Create mock MCP server
    mockServer = {
      connect: vi.fn(),
    } as unknown as Server;

    // Create session manager with short timeouts for testing
    sessionManager = new SessionManager({
      sessionTimeout: 1000, // 1 second for testing
      cleanupInterval: 500, // 500ms for testing
      maxSessions: 3,
    });
  });

  afterEach(() => {
    // Cleanup
    sessionManager.shutdown();
    vi.useRealTimers();
    vi.clearAllMocks();
  });

  describe('createSession', () => {
    it('should create a new session with unique ID', () => {
      const session = sessionManager.createSession(mockServer);

      expect(session).toBeDefined();
      expect(session.id).toBeDefined();
      expect(session.server).toBe(mockServer);
      expect(session.createdAt).toBeInstanceOf(Date);
      expect(session.lastActivityAt).toBeInstanceOf(Date);
    });

    it('should create session with metadata', () => {
      const metadata = { userId: 'test-user', role: 'admin' };
      const session = sessionManager.createSession(mockServer, metadata);

      expect(session.metadata).toEqual(metadata);
    });

    it('should throw error when max sessions limit is reached', () => {
      // Create max sessions
      sessionManager.createSession(mockServer);
      sessionManager.createSession(mockServer);
      sessionManager.createSession(mockServer);

      // Try to create one more
      expect(() => sessionManager.createSession(mockServer)).toThrow(
        'Maximum number of sessions (3) reached. Please try again later.'
      );
    });

    it('should generate unique session IDs', () => {
      const session1 = sessionManager.createSession(mockServer);
      const session2 = sessionManager.createSession(mockServer);

      expect(session1.id).not.toBe(session2.id);
    });
  });

  describe('getSession', () => {
    it('should retrieve an existing session', () => {
      const created = sessionManager.createSession(mockServer);
      const retrieved = sessionManager.getSession(created.id);

      expect(retrieved).toBe(created);
    });

    it('should update last activity timestamp when retrieving session', () => {
      const session = sessionManager.createSession(mockServer);
      const originalActivity = session.lastActivityAt;

      // Advance time
      vi.advanceTimersByTime(100);

      sessionManager.getSession(session.id);

      expect(session.lastActivityAt.getTime()).toBeGreaterThan(originalActivity.getTime());
    });

    it('should return undefined for non-existent session', () => {
      const session = sessionManager.getSession('non-existent-id');

      expect(session).toBeUndefined();
    });
  });

  describe('removeSession', () => {
    it('should remove an existing session', () => {
      const session = sessionManager.createSession(mockServer);

      const removed = sessionManager.removeSession(session.id);
      expect(removed).toBe(true);

      const retrieved = sessionManager.getSession(session.id);
      expect(retrieved).toBeUndefined();
    });

    it('should return false when removing non-existent session', () => {
      const removed = sessionManager.removeSession('non-existent-id');

      expect(removed).toBe(false);
    });

    it('should allow creating new session after removing one at max capacity', () => {
      // Create max sessions
      const session1 = sessionManager.createSession(mockServer);
      sessionManager.createSession(mockServer);
      sessionManager.createSession(mockServer);

      // Remove one session
      sessionManager.removeSession(session1.id);

      // Should be able to create a new one
      expect(() => sessionManager.createSession(mockServer)).not.toThrow();
    });
  });

  describe('hasSession', () => {
    it('should return true for existing valid session', () => {
      const session = sessionManager.createSession(mockServer);

      expect(sessionManager.hasSession(session.id)).toBe(true);
    });

    it('should return false for non-existent session', () => {
      expect(sessionManager.hasSession('non-existent-id')).toBe(false);
    });

    it('should return false and remove expired session', () => {
      const session = sessionManager.createSession(mockServer);

      // Advance time beyond session timeout
      vi.advanceTimersByTime(1001);

      expect(sessionManager.hasSession(session.id)).toBe(false);
      expect(sessionManager.getSession(session.id)).toBeUndefined();
    });

    it('should return true for session that was recently accessed', () => {
      const session = sessionManager.createSession(mockServer);

      // Access session to update activity
      vi.advanceTimersByTime(500);
      sessionManager.getSession(session.id);

      // Advance time but not beyond timeout from last activity
      vi.advanceTimersByTime(700);

      expect(sessionManager.hasSession(session.id)).toBe(true);
    });
  });

  describe('getAllSessions', () => {
    it('should return empty array when no sessions exist', () => {
      const sessions = sessionManager.getAllSessions();

      expect(sessions).toEqual([]);
    });

    it('should return all active sessions', () => {
      const session1 = sessionManager.createSession(mockServer);
      const session2 = sessionManager.createSession(mockServer);

      const sessions = sessionManager.getAllSessions();

      expect(sessions).toHaveLength(2);
      expect(sessions).toContain(session1);
      expect(sessions).toContain(session2);
    });

    it('should not return removed sessions', () => {
      const session1 = sessionManager.createSession(mockServer);
      const session2 = sessionManager.createSession(mockServer);
      sessionManager.removeSession(session1.id);

      const sessions = sessionManager.getAllSessions();

      expect(sessions).toHaveLength(1);
      expect(sessions).toContain(session2);
    });
  });

  describe('getSessionCount', () => {
    it('should return 0 when no sessions exist', () => {
      expect(sessionManager.getSessionCount()).toBe(0);
    });

    it('should return correct count of active sessions', () => {
      sessionManager.createSession(mockServer);
      expect(sessionManager.getSessionCount()).toBe(1);

      sessionManager.createSession(mockServer);
      expect(sessionManager.getSessionCount()).toBe(2);
    });

    it('should update count when sessions are removed', () => {
      const session = sessionManager.createSession(mockServer);
      sessionManager.createSession(mockServer);

      expect(sessionManager.getSessionCount()).toBe(2);

      sessionManager.removeSession(session.id);

      expect(sessionManager.getSessionCount()).toBe(1);
    });
  });

  describe('automatic cleanup', () => {
    it('should automatically clean up expired sessions', () => {
      const session1 = sessionManager.createSession(mockServer);
      const session2 = sessionManager.createSession(mockServer);

      // Make session1 expire but keep session2 active
      vi.advanceTimersByTime(600);
      sessionManager.getSession(session2.id); // Update activity

      // Advance time past timeout for session1
      vi.advanceTimersByTime(500); // Total 1100ms for session1, 500ms for session2

      // hasSession will remove expired sessions when checking
      expect(sessionManager.hasSession(session1.id)).toBe(false);
      expect(sessionManager.hasSession(session2.id)).toBe(true);

      // Now verify they were actually removed/kept
      expect(sessionManager.getSession(session1.id)).toBeUndefined();
      expect(sessionManager.getSession(session2.id)).toBeDefined();
    });

    it('should run cleanup at specified intervals', () => {
      const session1 = sessionManager.createSession(mockServer);
      const session2 = sessionManager.createSession(mockServer);

      // Make both sessions expire
      vi.advanceTimersByTime(1100);

      // Trigger cleanup interval
      vi.advanceTimersByTime(500);

      // Both should be removed
      expect(sessionManager.getSession(session1.id)).toBeUndefined();
      expect(sessionManager.getSession(session2.id)).toBeUndefined();
      expect(sessionManager.getSessionCount()).toBe(0);
    });

    it('should handle multiple cleanup cycles', () => {
      const session1 = sessionManager.createSession(mockServer);

      // First cleanup cycle - session still valid
      vi.advanceTimersByTime(500);
      expect(sessionManager.hasSession(session1.id)).toBe(true);

      // Make session expire
      vi.advanceTimersByTime(600);

      // Second cleanup cycle - should remove expired session
      vi.advanceTimersByTime(500);
      expect(sessionManager.hasSession(session1.id)).toBe(false);
    });
  });

  describe('shutdown', () => {
    it('should clear all sessions on shutdown', () => {
      sessionManager.createSession(mockServer);
      sessionManager.createSession(mockServer);

      expect(sessionManager.getSessionCount()).toBe(2);

      sessionManager.shutdown();

      expect(sessionManager.getSessionCount()).toBe(0);
    });

    it('should stop cleanup timer on shutdown', () => {
      const clearIntervalSpy = vi.spyOn(global, 'clearInterval');

      sessionManager.shutdown();

      expect(clearIntervalSpy).toHaveBeenCalled();
    });

    it('should handle multiple shutdown calls gracefully', () => {
      sessionManager.createSession(mockServer);

      sessionManager.shutdown();
      expect(sessionManager.getSessionCount()).toBe(0);

      // Second shutdown should not throw
      expect(() => sessionManager.shutdown()).not.toThrow();
    });
  });

  describe('getStatistics', () => {
    it('should return statistics for empty session manager', () => {
      const stats = sessionManager.getStatistics();

      expect(stats).toEqual({
        activeSessions: 0,
        maxSessions: 3,
        sessionTimeout: 1000,
      });
    });

    it('should return statistics with active sessions', () => {
      const session1 = sessionManager.createSession(mockServer);

      vi.advanceTimersByTime(100);
      const session2 = sessionManager.createSession(mockServer);

      const stats = sessionManager.getStatistics();

      expect(stats.activeSessions).toBe(2);
      expect(stats.maxSessions).toBe(3);
      expect(stats.sessionTimeout).toBe(1000);
      expect(stats.oldestSession).toEqual(session1.createdAt);
      expect(stats.newestSession).toEqual(session2.createdAt);
    });

    it('should correctly identify oldest and newest sessions', () => {
      const session1 = sessionManager.createSession(mockServer);

      vi.advanceTimersByTime(200);
      sessionManager.createSession(mockServer); // Middle session

      vi.advanceTimersByTime(300);
      const session3 = sessionManager.createSession(mockServer);

      const stats = sessionManager.getStatistics();

      expect(stats.oldestSession).toEqual(session1.createdAt);
      expect(stats.newestSession).toEqual(session3.createdAt);
      expect(stats.oldestSession!.getTime()).toBeLessThan(stats.newestSession!.getTime());
    });

    it('should update statistics when sessions are removed', () => {
      const session1 = sessionManager.createSession(mockServer);

      vi.advanceTimersByTime(100);
      sessionManager.createSession(mockServer);

      let stats = sessionManager.getStatistics();
      expect(stats.activeSessions).toBe(2);

      sessionManager.removeSession(session1.id);

      stats = sessionManager.getStatistics();
      expect(stats.activeSessions).toBe(1);
    });
  });

  describe('edge cases', () => {
    it('should handle session with no metadata', () => {
      const session = sessionManager.createSession(mockServer);

      expect(session.metadata).toBeUndefined();
    });

    it('should handle rapid session creation and removal', () => {
      const sessions: ISession[] = [];

      // Rapidly create and remove sessions
      for (let i = 0; i < 10; i++) {
        const session = sessionManager.createSession(mockServer);
        sessions.push(session);

        if (sessions.length > 2) {
          const removed = sessions.shift()!;
          sessionManager.removeSession(removed.id);
        }
      }

      expect(sessionManager.getSessionCount()).toBeLessThanOrEqual(3);
    });

    it('should handle concurrent access to same session', () => {
      const session = sessionManager.createSession(mockServer);

      // Simulate concurrent access
      const retrieved1 = sessionManager.getSession(session.id);
      const retrieved2 = sessionManager.getSession(session.id);

      expect(retrieved1).toBe(session);
      expect(retrieved2).toBe(session);
    });
  });

  describe('configuration defaults', () => {
    it('should use default configuration when not specified', () => {
      const defaultManager = new SessionManager();

      const stats = defaultManager.getStatistics();
      expect(stats.sessionTimeout).toBe(1800000); // 30 minutes
      expect(stats.maxSessions).toBe(100);

      defaultManager.shutdown();
    });

    it('should allow partial configuration override', () => {
      const customManager = new SessionManager({
        maxSessions: 50,
        // sessionTimeout and cleanupInterval use defaults
      });

      const stats = customManager.getStatistics();
      expect(stats.maxSessions).toBe(50);
      expect(stats.sessionTimeout).toBe(1800000); // default

      customManager.shutdown();
    });
  });
});

```

--------------------------------------------------------------------------------
/docs/architecture/decisions/0025-container-and-security-scanning-strategy.md:
--------------------------------------------------------------------------------

```markdown
# 25. Container and Security Scanning Strategy

Date: 2025-10-11

## Status

Accepted

## Context

The SonarQube MCP Server requires comprehensive security scanning to identify vulnerabilities before they reach production. As a tool that integrates with SonarQube (a security-focused platform), this project must maintain exemplary security practices. The project needs:

- Container vulnerability scanning for Docker images
- Source code static analysis (SAST) for security issues
- Dependency vulnerability scanning for npm packages
- Integration with GitHub Security tab for centralized visibility
- Fail-fast approach to prevent insecure releases
- SARIF format output for GitHub Advanced Security
- Supply chain security attestations (SLSA provenance)
- License compliance checking

Multiple scanning tools exist with different strengths:

- **Trivy**: Fast, comprehensive, supports multiple formats, excellent container scanning
- **Snyk**: Good UI, expensive for private repos, requires account
- **Grype**: Fast but fewer vulnerability sources
- **Clair**: More complex setup, primarily for registries
- **CodeQL**: GitHub's native SAST tool, excellent for code analysis
- **OSV-Scanner**: Google's vulnerability scanner, good for dependencies

## Decision

We will implement a **multi-layered security scanning strategy** using multiple complementary tools:

### 1. Trivy for Container Scanning

**Purpose**: Scan Docker images for OS and application vulnerabilities

**Configuration**:

- Severity threshold: `HIGH,CRITICAL` (blocks release)
- Formats: Table (local), SARIF (CI/CD)
- Scan targets: Built Docker images before publishing
- License scanning: GPL, LGPL, MPL allowed (configured exceptions)

**Integration points**:

- Local development: `pnpm scan:container` script
- CI/CD: Integrated in reusable-docker.yml workflow
- SARIF upload: GitHub Security tab for visibility

**Script**: `scripts/scan-container.sh`

```bash
# Local container scanning with flexible options
./scripts/scan-container.sh --severity HIGH,CRITICAL
```

**Trivy configuration** (`.trivyignore`):

- Minimal ignores (only false positives)
- Each exclusion documented with reason
- Regular review of ignored CVEs

### 2. CodeQL for Static Application Security Testing (SAST)

**Purpose**: Analyze TypeScript/JavaScript source code for security vulnerabilities

**Configuration**:

- Language: JavaScript/TypeScript
- Queries: Default CodeQL security queries
- Schedule: Weekly scans + on every PR
- Auto-fix: Enabled for supported issues

**Detects**:

- SQL injection risks
- Cross-site scripting (XSS)
- Command injection
- Path traversal
- Cryptographic issues
- Insecure deserialization
- Server-side request forgery (SSRF)

**Workflow**: `.github/workflows/codeql.yml`

### 3. OSV-Scanner for Dependency Vulnerabilities

**Purpose**: Scan npm dependencies for known vulnerabilities

**Configuration**:

- Target: `pnpm-lock.yaml`
- Format: SARIF for GitHub integration
- Fail threshold: Any HIGH or CRITICAL vulnerability
- Auto-remediation: Dependabot PRs

**Coverage**:

- NPM packages (production and dev dependencies)
- Transitive dependencies
- OSV (Open Source Vulnerabilities) database
- GitHub Advisory Database
- NVD (National Vulnerability Database)

**Workflow**: Integrated in `.github/workflows/reusable-security.yml`

### 4. SonarCloud for Code Quality and Security

**Purpose**: Continuous code quality and security analysis

**Configuration**:

- Project key: `sonarqube-mcp-server`
- Quality gate: Must pass before merge
- Coverage requirement: 80% minimum

**Security analysis**:

- Security hotspots identification
- OWASP Top 10 coverage
- CWE/SANS Top 25 detection
- Code smells with security impact

**Integration**: Integrated in `.github/workflows/reusable-validate.yml`

### 5. NPM Audit for Dependency Vulnerabilities

**Purpose**: Quick vulnerability check for npm dependencies

**Configuration**:

- Audit level: `critical` only (blocks pre-commit)
- Run frequency: Every commit, every CI run
- Automatic fixes: Manual review required

**Command**: `pnpm audit --audit-level critical`

### Supply Chain Security

**SLSA Provenance Attestations**:

- Generated for all release artifacts
- Includes Docker images, NPM packages, dist files
- Verifiable with GitHub attestation API
- Build provenance includes:
  - Build environment details
  - Builder identity
  - Source repository and commit
  - Build steps and inputs

**SBOM (Software Bill of Materials)**:

- Format: CycloneDX JSON
- Generated with: `@cyclonedx/cdxgen`
- Includes: All dependencies with versions and licenses
- Attached to every GitHub release

**Command**: `pnpm sbom` → generates `sbom.cdx.json`

## Consequences

### Positive

- **Multi-Layered Defense**: Multiple tools catch different vulnerability types
- **Container Security**: Trivy catches OS and application vulnerabilities
- **Source Code Security**: CodeQL detects code-level security issues
- **Dependency Security**: OSV-Scanner and npm audit protect against vulnerable dependencies
- **GitHub Integration**: SARIF uploads centralize findings in Security tab
- **Fail-Fast**: High/critical vulnerabilities block releases
- **Supply Chain Security**: SLSA provenance and SBOM provide transparency
- **License Compliance**: Trivy checks for license violations
- **Local Development**: Developers can run scans locally before commit
- **Comprehensive Coverage**: Covers OS packages, npm dependencies, source code
- **Automated Remediation**: Dependabot creates PRs for fixable vulnerabilities
- **Weekly Scans**: Scheduled CodeQL scans catch new vulnerabilities

### Negative

- **False Positives**: Multiple tools may report false positives requiring triage
- **Scan Time**: Security scans add 2-3 minutes to CI/CD pipeline
- **Maintenance Overhead**: Need to maintain `.trivyignore` and exclusion lists
- **Tool Updates**: Security tools require regular updates to stay current
- **Noise**: Low-severity findings can create noise (mitigated with thresholds)
- **Complex Triage**: Multiple tools require checking multiple interfaces
- **Breaking Changes**: Tool updates may introduce new findings that break builds

### Neutral

- **GitHub Dependency**: Heavily relies on GitHub Security features
- **Learning Curve**: Team needs to understand output from multiple tools
- **Update Frequency**: Vulnerability databases update frequently, findings change
- **Scanner Differences**: Different tools may disagree on severity ratings

## Implementation

### Local Development Setup

**Install Trivy**:

```bash
# macOS
brew install aquasecurity/trivy/trivy

# Linux (Ubuntu/Debian)
wget -qO - https://aquasecurity.github.io/trivy-repo/deb/public.key | sudo apt-key add -
echo "deb https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main" | sudo tee -a /etc/apt/sources.list.d/trivy.list
sudo apt-get update && sudo apt-get install trivy

# Docker
alias trivy='docker run --rm -v /var/run/docker.sock:/var/run/docker.sock aquasecurity/trivy:latest'
```

**Run local scans**:

```bash
# Quick scan (HIGH and CRITICAL only)
pnpm scan:container

# Full scan (all severities)
./scripts/scan-container.sh --severity UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL

# Scan and generate SARIF report
pnpm scan:container:sarif

# Scan specific image
./scripts/scan-container.sh --image myimage:tag --skip-build

# Ignore unfixed vulnerabilities
./scripts/scan-container.sh --ignore-unfixed
```

### CI/CD Integration

**Security workflow** (`.github/workflows/reusable-security.yml`):

```yaml
jobs:
  codeql:
    name: CodeQL Analysis
    runs-on: ubuntu-latest
    permissions:
      security-events: write
    steps:
      - name: Initialize CodeQL
        uses: github/codeql-action/init@v3
        with:
          languages: javascript-typescript

      - name: Autobuild
        uses: github/codeql-action/autobuild@v3

      - name: Perform CodeQL Analysis
        uses: github/codeql-action/analyze@v3
        with:
          upload: true

  osv-scanner:
    name: OSV Vulnerability Scan
    runs-on: ubuntu-latest
    permissions:
      security-events: write
    steps:
      - name: Run OSV-Scanner
        uses: google/osv-scanner-action@v1
        with:
          scan-args: --lockfile=pnpm-lock.yaml --format=sarif --output=osv-results.sarif

      - name: Upload SARIF
        uses: github/codeql-action/upload-sarif@v3
        with:
          sarif_file: osv-results.sarif
```

**Docker workflow** (`.github/workflows/reusable-docker.yml`):

```yaml
- name: Run Trivy vulnerability scanner
  uses: aquasecurity/trivy-action@master
  with:
    image-ref: ${{ env.IMAGE_TAG }}
    format: 'sarif'
    output: 'trivy-results.sarif'
    severity: 'HIGH,CRITICAL'

- name: Upload Trivy results to GitHub Security
  uses: github/codeql-action/upload-sarif@v3
  with:
    sarif_file: 'trivy-results.sarif'
```

### Trivy Configuration

**License exceptions** (`.trivy.yaml` - if created):

```yaml
vulnerability:
  severity:
    - HIGH
    - CRITICAL

license:
  # Allow these open-source licenses
  allowed:
    - MIT
    - Apache-2.0
    - BSD-2-Clause
    - BSD-3-Clause
    - ISC
    - GPL-3.0 # Allowed for this project
    - LGPL-3.0
    - MPL-2.0
```

**Ignore file** (`.trivyignore`):

```
# Format: CVE-ID [exp:YYYY-MM-DD] [# comment]

# Example: False positive in dev dependency
# CVE-2024-12345 exp:2025-12-31 # False positive, only affects test environment

# No exceptions currently - keep this file minimal!
```

### Remediation Workflow

When security vulnerabilities are found:

1. **Triage**:
   - Check severity (HIGH/CRITICAL = immediate fix)
   - Verify it's not a false positive
   - Check if fix is available

2. **Fix**:

   ```bash
   # Update base image
   # In Dockerfile: FROM node:22-alpine -> node:22-alpine@sha256:...

   # Update dependencies
   pnpm update <package>

   # Or update all
   pnpm update --latest
   ```

3. **Verify**:

   ```bash
   # Run local scan
   pnpm scan:container

   # Check if vulnerability is resolved
   trivy image --severity HIGH,CRITICAL myimage:tag
   ```

4. **Document** (if no fix available):
   - Add to `.trivyignore` with expiration date
   - Add comment explaining why it's ignored
   - Create issue to track fix availability

### Monitoring Security Findings

**GitHub Security tab**:

- Navigate to: Repository → Security → Code scanning
- View all findings from CodeQL, OSV-Scanner, Trivy
- Filter by severity, tool, status
- Dismiss false positives with reason

**Check CLI**:

```bash
# View security alerts
gh api /repos/sapientpants/sonarqube-mcp-server/code-scanning/alerts

# View specific alert
gh api /repos/sapientpants/sonarqube-mcp-server/code-scanning/alerts/1
```

## Examples

### Example 1: Container Scan Output

**No vulnerabilities**:

```
✅ Container security scan passed!
No vulnerabilities found matching severity threshold: HIGH,CRITICAL

Scan results:
- Total vulnerabilities: 5
  - LOW: 3
  - MEDIUM: 2
  - HIGH: 0
  - CRITICAL: 0
```

**Vulnerabilities found**:

```
❌ Container security scan failed!
Vulnerabilities found matching severity threshold: HIGH,CRITICAL

myimage:latest (alpine 3.18.0)
==================================
Total: 2 (HIGH: 1, CRITICAL: 1)

+----------------+------------------+----------+-------------------+
| LIBRARY        | VULNERABILITY ID | SEVERITY | FIXED VERSION     |
+----------------+------------------+----------+-------------------+
| libcrypto3     | CVE-2024-12345   | CRITICAL | 3.0.10-r2         |
| libssl3        | CVE-2024-67890   | HIGH     | 3.0.10-r2         |
+----------------+------------------+----------+-------------------+

Remediation Tips:
1. Update base image to latest version
2. Update dependencies in package.json
3. Check for security advisories
```

### Example 2: CodeQL Findings

GitHub Security tab shows:

```
Code scanning alert: SQL Injection
Severity: High
Tool: CodeQL
Location: src/database/query.ts:45

Description:
This query contains unsanitized user input, which could lead to SQL injection.

Recommendation:
Use parameterized queries or an ORM to prevent SQL injection.
```

### Example 3: SBOM Content

**sbom.cdx.json** (excerpt):

```json
{
  "bomFormat": "CycloneDX",
  "specVersion": "1.4",
  "serialNumber": "urn:uuid:...",
  "version": 1,
  "metadata": {
    "component": {
      "name": "sonarqube-mcp-server",
      "version": "1.10.18",
      "type": "application"
    }
  },
  "components": [
    {
      "name": "@modelcontextprotocol/sdk",
      "version": "1.20.0",
      "purl": "pkg:npm/%40modelcontextprotocol/[email protected]",
      "type": "library",
      "licenses": [{ "license": { "id": "MIT" } }]
    }
  ]
}
```

## Security Scanning Matrix

| Tool        | Target         | Purpose                    | Frequency      | Fail Threshold |
| ----------- | -------------- | -------------------------- | -------------- | -------------- |
| Trivy       | Docker images  | Container vulnerabilities  | Every build    | HIGH, CRITICAL |
| CodeQL      | Source code    | SAST (code security)       | PR + Weekly    | Any finding    |
| OSV-Scanner | pnpm-lock.yaml | Dependency vulnerabilities | Every PR, push | HIGH, CRITICAL |
| npm audit   | package.json   | Quick dependency check     | Pre-commit, CI | CRITICAL       |
| SonarCloud  | Source code    | Quality + security         | Every PR, push | Quality gate   |

## References

- Trivy Documentation: https://aquasecurity.github.io/trivy/
- CodeQL Documentation: https://codeql.github.com/docs/
- OSV-Scanner: https://google.github.io/osv-scanner/
- SLSA Provenance: https://slsa.dev/
- CycloneDX SBOM: https://cyclonedx.org/
- Container Scan Script: scripts/scan-container.sh
- Security Workflows: .github/workflows/reusable-security.yml
- Trivy Ignore File: .trivyignore
- GitHub Security Tab: https://github.com/sapientpants/sonarqube-mcp-server/security

```

--------------------------------------------------------------------------------
/.github/workflows/reusable-docker.yml:
--------------------------------------------------------------------------------

```yaml
# =============================================================================
# REUSABLE WORKFLOW: Docker Build and Security Scanning
# PURPOSE: Build Docker images and scan for vulnerabilities with Trivy
# USAGE: Called by PR and main workflows for container validation
# OUTPUTS: Security findings uploaded to GitHub Security tab, Docker image artifact
# =============================================================================

name: Reusable Docker

on:
  workflow_call:
    inputs:
      platforms:
        description: 'Docker platforms to build (e.g., linux/amd64,linux/arm64)'
        type: string
        default: 'linux/amd64' # Single platform for PRs, multi for main
      push-image:
        description: 'Whether to push image to registry (always false for this workflow)'
        type: boolean
        default: false
      save-artifact:
        description: 'Whether to save Docker image as artifact for later use'
        type: boolean
        default: false
      artifact-name:
        description: 'Name for the Docker image artifact'
        type: string
        default: 'docker-image'
      version:
        description: 'Version tag for the Docker image'
        type: string
        default: ''
      image-name:
        description: 'Docker image name (without registry)'
        type: string
        default: 'sonarqube-mcp-server'
      tag_sha:
        description: 'SHA of the version tag for consistent naming'
        type: string
        default: ''
      build_artifact:
        description: 'Name of the pre-built TypeScript artifact to use'
        type: string
        default: ''
    outputs:
      image-digest:
        description: 'Docker image digest'
        value: ${{ jobs.docker.outputs.digest }}
      artifact-name:
        description: 'Name of the saved artifact'
        value: ${{ jobs.docker.outputs.artifact-name }}

# SECURITY: Required permissions for Docker operations
# Note: packages: write is only needed if pushing to GitHub Container Registry
# Calling workflows can omit it if not pushing images
permissions:
  contents: read # Read source code
  security-events: write # Upload Trivy scan results
  packages: write # Push Docker images to GitHub Container Registry

jobs:
  docker:
    runs-on: ubuntu-latest
    outputs:
      digest: ${{ steps.build.outputs.digest }}
      artifact-name: ${{ inputs.artifact-name }}
    steps:
      - name: Checkout code
        uses: actions/checkout@v4

      - name: Download build artifact
        # Download pre-built TypeScript if artifact name provided
        if: inputs.build_artifact != ''
        uses: actions/download-artifact@v4
        with:
          name: ${{ inputs.build_artifact }}

      # =============================================================================
      # DOCKER SETUP
      # Configure build environment for single or multi-platform builds
      # =============================================================================

      - name: Set up QEMU
        # Required for multi-platform builds (arm64)
        if: contains(inputs.platforms, 'arm64')
        uses: docker/setup-qemu-action@v3

      - name: Set up Docker Buildx
        # Advanced Docker builder with cache support
        uses: docker/setup-buildx-action@v3

      - name: Login to GitHub Container Registry
        # Login to GHCR for multi-platform builds that need to be pushed to registry
        # Single-platform builds for PRs don't need registry push
        if: inputs.save-artifact && contains(inputs.platforms, ',')
        uses: docker/login-action@v3
        with:
          registry: ghcr.io
          username: ${{ github.actor }}
          password: ${{ secrets.GITHUB_TOKEN }}

      # =============================================================================
      # DOCKER BUILD
      # Build image with layer caching for efficiency
      # =============================================================================

      - name: Generate Docker metadata
        id: meta
        uses: docker/metadata-action@v5
        with:
          # Use GHCR for multi-platform artifact builds, local name otherwise
          images: |
            ${{ (inputs.save-artifact && contains(inputs.platforms, ',')) && format('ghcr.io/{0}/{1}', github.repository_owner, inputs.image-name) || inputs.image-name }}
          tags: |
            type=raw,value=${{ inputs.version }},enable=${{ inputs.version != '' }}
            type=raw,value=latest,enable=${{ inputs.version != '' }}
            type=ref,event=pr
            type=sha,format=short

      - name: Determine build configuration
        # Set clear variables for build mode to improve readability
        id: build-config
        run: |
          # Determine if we're building for multiple platforms
          IS_MULTI_PLATFORM="false"
          if echo "${{ inputs.platforms }}" | grep -q ','; then
            IS_MULTI_PLATFORM="true"
          fi

          # For multi-platform builds with save-artifact, push to GHCR
          # For single-platform builds or PR builds, load locally or save to tar
          SAVE_ARTIFACT="${{ inputs.save-artifact }}"
          SHOULD_PUSH="false"
          CAN_LOAD="false"
          OUTPUT_TYPE=""

          if [ "$SAVE_ARTIFACT" = "true" ] && [ "$IS_MULTI_PLATFORM" = "true" ]; then
            # Multi-platform artifact build: push to GHCR
            SHOULD_PUSH="true"
            CAN_LOAD="false"
          elif [ "$SAVE_ARTIFACT" != "true" ] && [ "$IS_MULTI_PLATFORM" = "false" ]; then
            # Single-platform PR build: load locally
            CAN_LOAD="true"
          else
            # Single-platform artifact build: save to tar
            CAN_LOAD="false"
            SHA_TO_USE="${{ inputs.tag_sha || github.sha }}"
            OUTPUT_TYPE="type=docker,dest=${{ inputs.artifact-name }}-${SHA_TO_USE}.tar"
          fi

          {
            echo "is_multi_platform=$IS_MULTI_PLATFORM"
            echo "should_push=$SHOULD_PUSH"
            echo "can_load=$CAN_LOAD"
            echo "output_type=$OUTPUT_TYPE"
          } >> $GITHUB_OUTPUT

          echo "📋 Build configuration:"
          echo "  Multi-platform: $IS_MULTI_PLATFORM"
          echo "  Save artifact: $SAVE_ARTIFACT"
          echo "  Should push: $SHOULD_PUSH"
          echo "  Can load: $CAN_LOAD"
          echo "  Output type: $OUTPUT_TYPE"

      - name: Build Docker image
        id: build
        uses: docker/build-push-action@v6
        with:
          context: .
          platforms: ${{ inputs.platforms }}
          push: ${{ steps.build-config.outputs.should_push == 'true' }}
          load: ${{ steps.build-config.outputs.can_load == 'true' }}
          tags: ${{ steps.meta.outputs.tags }}
          labels: ${{ steps.meta.outputs.labels }}
          cache-from: type=gha # Use GitHub Actions cache
          cache-to: type=gha,mode=max # Maximum cache retention
          build-args: |
            VERSION=${{ inputs.version || github.sha }}
          outputs: ${{ steps.build-config.outputs.output_type }}

      # =============================================================================
      # SECURITY SCANNING
      # Trivy vulnerability scanning with configurable severity
      # =============================================================================

      - name: Determine Trivy scan configuration
        # Set clear variables for scan inputs to improve readability
        id: scan-config
        run: |
          # Determine scanning mode based on build configuration
          CAN_LOAD="${{ steps.build-config.outputs.can_load }}"
          if [ "$CAN_LOAD" = "true" ]; then
            # For loaded single-platform images, scan by image reference
            FIRST_TAG=$(echo "${{ steps.meta.outputs.tags }}" | head -n1)
            {
              echo "scan_input="
              echo "scan_image_ref=$FIRST_TAG"
            } >> $GITHUB_OUTPUT
            echo "Using image reference for scanning: $FIRST_TAG"
          else
            # For multi-platform or artifact builds, scan the tar file
            SHA_TO_USE="${{ inputs.tag_sha || github.sha }}"
            {
              echo "scan_input=${{ inputs.artifact-name }}-${SHA_TO_USE}.tar"
              echo "scan_image_ref="
            } >> $GITHUB_OUTPUT
            echo "Using tar file for scanning: ${{ inputs.artifact-name }}-${SHA_TO_USE}.tar"
          fi

      - name: Run Trivy vulnerability scanner
        # SECURITY: Scan image for vulnerabilities before any distribution
        # NOTE: Multi-platform OCI exports cannot be scanned from tar files
        # Scans for vulnerabilities, secrets, misconfigurations, and licenses
        # License findings are informational only (see LICENSES.md)
        if: steps.build-config.outputs.can_load == 'true' || !contains(inputs.platforms, ',')
        uses: aquasecurity/[email protected]
        with:
          input: ${{ steps.scan-config.outputs.scan_input }}
          image-ref: ${{ steps.scan-config.outputs.scan_image_ref }}
          exit-code: '1'
          format: 'sarif'
          hide-progress: false
          output: 'trivy-results.sarif'
          severity: 'HIGH,CRITICAL'
          scanners: 'vuln,secret,misconfig'
          trivyignores: '.trivyignore'
          version: 'latest'
        env:
          TRIVY_DEBUG: 'true'

      - name: Check Trivy results for vulnerabilities
        # Fail build if non-license security issues are found
        # License findings are informational and don't fail the build
        if: steps.build-config.outputs.can_load == 'true' || !contains(inputs.platforms, ',')
        run: |
          if [ -f trivy-results.sarif ]; then
            # Check for vulnerabilities, secrets, or misconfigurations (not licenses)
            SECURITY_ISSUES=$(jq -r '.runs[0].results[] | select(.ruleId | startswith("CVE-") or startswith("SECRET-") or startswith("CONFIG-")) | .level' trivy-results.sarif 2>/dev/null | wc -l || echo "0")
            if [ "$SECURITY_ISSUES" -gt 0 ]; then
              echo "::error::Found $SECURITY_ISSUES security issue(s) in container image"
              echo "Review the scan results in the Security tab after SARIF upload"
              exit 1
            fi
            echo "No security vulnerabilities found (license findings are informational)"
          fi

      - name: Upload Trivy results to GitHub Security
        # Always upload results, even if scan fails
        # Results viewable at: Security > Code scanning alerts
        if: always() && (steps.build-config.outputs.can_load == 'true' || !contains(inputs.platforms, ','))
        uses: github/codeql-action/upload-sarif@v3
        with:
          sarif_file: 'trivy-results.sarif'
          category: 'container-scan-${{ github.event_name }}'

      - name: Upload Trivy SARIF as artifact
        # Upload SARIF file as artifact for debugging and inspection
        if: always() && (steps.build-config.outputs.can_load == 'true' || !contains(inputs.platforms, ','))
        uses: actions/upload-artifact@v4
        with:
          name: trivy-${{ github.sha }}
          path: trivy-results.sarif
          retention-days: 7

      # =============================================================================
      # ARTIFACT STORAGE
      # Save Docker image tar files for single-platform builds
      # Multi-platform builds are pushed to GHCR instead
      # =============================================================================

      - name: Compress Docker image artifact
        # Compress the tar file to reduce storage costs
        # Only for single-platform builds (multi-platform builds pushed to GHCR)
        if: inputs.save-artifact && !contains(inputs.platforms, ',')
        run: |
          SHA_TO_USE="${{ inputs.tag_sha || github.sha }}"
          echo "Compressing Docker image artifact..."
          gzip -9 ${{ inputs.artifact-name }}-${SHA_TO_USE}.tar
          ls -lh ${{ inputs.artifact-name }}-${SHA_TO_USE}.tar.gz

      - name: Upload Docker image artifact
        # Store single-platform image tar for deterministic publishing
        # Multi-platform images are stored in GHCR registry
        if: inputs.save-artifact && !contains(inputs.platforms, ',')
        uses: actions/upload-artifact@v4
        with:
          name: ${{ inputs.artifact-name }}-${{ inputs.tag_sha || github.sha }}
          path: ${{ inputs.artifact-name }}-${{ inputs.tag_sha || github.sha }}.tar.gz
          retention-days: 7 # Keep for a week (enough for release cycle)
          compression-level: 0 # Already compressed with gzip

      # =============================================================================
      # SUPPLY CHAIN SECURITY
      # Generate attestations for build provenance (main builds only)
      # =============================================================================

      - name: Generate attestations for GHCR images
        # Creates cryptographic proof of build provenance for multi-platform images
        # Multi-platform images are stored in GHCR registry
        if: inputs.save-artifact && contains(inputs.platforms, ',') && inputs.version != '' && env.ACTIONS_ID_TOKEN_REQUEST_URL != ''
        uses: actions/attest-build-provenance@v2
        with:
          subject-name: ghcr.io/${{ github.repository_owner }}/${{ inputs.image-name }}
          subject-digest: ${{ steps.build.outputs.digest }}
          push-to-registry: true

      - name: Generate attestations for tar artifacts
        # Creates cryptographic proof of build provenance for single-platform tar files
        if: inputs.save-artifact && !contains(inputs.platforms, ',') && inputs.version != '' && env.ACTIONS_ID_TOKEN_REQUEST_URL != ''
        uses: actions/attest-build-provenance@v2
        with:
          subject-path: ${{ inputs.artifact-name }}-${{ inputs.tag_sha || github.sha }}.tar.gz

```

--------------------------------------------------------------------------------
/src/__tests__/errors.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import {
  SonarQubeAPIError,
  SonarQubeErrorType,
  transformError,
  withErrorHandling,
  formatErrorForMCP,
} from '../errors.js';
import {
  SonarQubeError as SonarQubeClientError,
  AuthenticationError,
  AuthorizationError,
  NotFoundError,
  RateLimitError,
  NetworkError,
  ServerError,
  ValidationError,
} from 'sonarqube-web-api-client';

// Mock the logger
vi.mock('../utils/logger.js', () => ({
  createLogger: () => ({
    info: vi.fn(),
    error: vi.fn(),
    debug: vi.fn(),
    warn: vi.fn(),
  }),
}));

describe('Error Handling', () => {
  describe('SonarQubeAPIError', () => {
    it('should create error with all properties', () => {
      const error = new SonarQubeAPIError('Test error', SonarQubeErrorType.AUTHENTICATION_FAILED, {
        operation: 'test-operation',
        statusCode: 401,
        context: { key: 'value' },
        solution: 'Test solution',
      });

      expect(error.message).toBe('Test error');
      expect(error.type).toBe(SonarQubeErrorType.AUTHENTICATION_FAILED);
      expect(error.operation).toBe('test-operation');
      expect(error.statusCode).toBe(401);
      expect(error.context).toEqual({ key: 'value' });
      expect(error.solution).toBe('Test solution');
    });

    it('should format error as string with all details', () => {
      const error = new SonarQubeAPIError('Test error', SonarQubeErrorType.AUTHENTICATION_FAILED, {
        operation: 'test-operation',
        statusCode: 401,
        context: { key: 'value' },
        solution: 'Test solution',
      });

      const result = error.toString();
      expect(result).toContain('Error: Test error');
      expect(result).toContain('Operation: test-operation');
      expect(result).toContain('Status Code: 401');
      expect(result).toContain('Solution: Test solution');
      expect(result).toContain('Context:');
      expect(result).toContain('"key": "value"');
    });

    it('should format error without optional fields', () => {
      const error = new SonarQubeAPIError('Test error', SonarQubeErrorType.UNKNOWN_ERROR);
      const result = error.toString();
      expect(result).toBe('Error: Test error');
    });
  });

  describe('transformError', () => {
    it('should return existing SonarQubeAPIError unchanged', () => {
      const originalError = new SonarQubeAPIError(
        'Original error',
        SonarQubeErrorType.AUTHENTICATION_FAILED
      );
      const result = transformError(originalError, 'test-operation');
      expect(result).toBe(originalError);
    });

    it('should transform AuthenticationError', () => {
      const clientError = new AuthenticationError('Auth failed');
      const result = transformError(clientError, 'test-operation');

      expect(result).toBeInstanceOf(SonarQubeAPIError);
      expect(result.type).toBe(SonarQubeErrorType.AUTHENTICATION_FAILED);
      expect(result.message).toBe('Auth failed');
      expect(result.solution).toContain('check your SONARQUBE_TOKEN');
    });

    it('should transform AuthorizationError', () => {
      const clientError = new AuthorizationError('Access denied');
      const result = transformError(clientError, 'test-operation');

      expect(result.type).toBe(SonarQubeErrorType.AUTHORIZATION_FAILED);
      expect(result.solution).toContain('required permissions');
    });

    it('should transform NotFoundError', () => {
      const clientError = new NotFoundError('Not found');
      const result = transformError(clientError, 'test-operation');

      expect(result.type).toBe(SonarQubeErrorType.RESOURCE_NOT_FOUND);
      expect(result.solution).toContain('Verify the project key');
    });

    it('should transform RateLimitError', () => {
      const clientError = new RateLimitError('Rate limited');
      const result = transformError(clientError, 'test-operation');

      expect(result.type).toBe(SonarQubeErrorType.RATE_LIMITED);
      expect(result.solution).toContain('wait before retrying');
    });

    it('should transform NetworkError', () => {
      const clientError = new NetworkError('Network error');
      const result = transformError(clientError, 'test-operation');

      expect(result.type).toBe(SonarQubeErrorType.NETWORK_ERROR);
      expect(result.solution).toContain('Check your network connection');
    });

    it('should transform ServerError', () => {
      const clientError = new ServerError('Server error', 500);
      const result = transformError(clientError, 'test-operation');

      expect(result.type).toBe(SonarQubeErrorType.SERVER_ERROR);
      expect(result.solution).toContain('server is experiencing issues');
    });

    it('should transform ValidationError', () => {
      const clientError = new ValidationError('Validation error');
      const result = transformError(clientError, 'test-operation');

      expect(result.type).toBe(SonarQubeErrorType.VALIDATION_ERROR);
      expect(result.solution).toContain('check your request parameters');
    });

    it('should transform unknown SonarQubeClientError', () => {
      class UnknownError extends SonarQubeClientError {
        constructor(message: string) {
          super(message, 'UNKNOWN');
        }
      }
      const clientError = new UnknownError('Unknown error');
      const result = transformError(clientError, 'test-operation');

      expect(result.type).toBe(SonarQubeErrorType.UNKNOWN_ERROR);
    });

    it('should transform generic Error', () => {
      const error = new Error('Generic error');
      const result = transformError(error, 'test-operation');

      expect(result.type).toBe(SonarQubeErrorType.UNKNOWN_ERROR);
      expect(result.message).toBe('Generic error');
    });

    it('should transform non-Error values', () => {
      const result = transformError('String error', 'test-operation');
      expect(result.type).toBe(SonarQubeErrorType.UNKNOWN_ERROR);
      expect(result.message).toBe('String error');
    });
  });

  describe('withErrorHandling', () => {
    beforeEach(() => {
      vi.clearAllMocks();
      vi.useFakeTimers();
    });

    afterEach(() => {
      vi.useRealTimers();
    });

    it('should return successful result without retry', async () => {
      const apiCall = vi.fn<() => Promise<string>>().mockResolvedValue('success');
      const result = await withErrorHandling('test-operation', apiCall);

      expect(result).toBe('success');
      expect(apiCall).toHaveBeenCalledTimes(1);
    });

    it('should retry on rate limit error', async () => {
      const apiCall = vi
        .fn<() => Promise<string>>()
        .mockRejectedValueOnce(new RateLimitError('Rate limited'))
        .mockRejectedValueOnce(new RateLimitError('Rate limited'))
        .mockResolvedValue('success');

      const promise = withErrorHandling('test-operation', apiCall);

      // Fast-forward through retry delays
      await vi.advanceTimersByTimeAsync(1000);
      await vi.advanceTimersByTimeAsync(2000);

      const result = await promise;

      expect(result).toBe('success');
      expect(apiCall).toHaveBeenCalledTimes(3);
    });

    it('should retry on network error', async () => {
      const apiCall = vi
        .fn<() => Promise<string>>()
        .mockRejectedValueOnce(new NetworkError('Network error'))
        .mockResolvedValue('success');

      const promise = withErrorHandling('test-operation', apiCall);

      await vi.advanceTimersByTimeAsync(1000);

      const result = await promise;

      expect(result).toBe('success');
      expect(apiCall).toHaveBeenCalledTimes(2);
    });

    it('should retry on server error', async () => {
      const apiCall = vi
        .fn<() => Promise<string>>()
        .mockRejectedValueOnce(new ServerError('Server error', 500))
        .mockResolvedValue('success');

      const promise = withErrorHandling('test-operation', apiCall);

      await vi.advanceTimersByTimeAsync(1000);

      const result = await promise;

      expect(result).toBe('success');
      expect(apiCall).toHaveBeenCalledTimes(2);
    });

    it('should not retry on authentication error', async () => {
      const apiCall = vi
        .fn<() => Promise<string>>()
        .mockRejectedValue(new AuthenticationError('Auth failed'));

      await expect(withErrorHandling('test-operation', apiCall)).rejects.toThrow(SonarQubeAPIError);

      expect(apiCall).toHaveBeenCalledTimes(1);
    });

    it('should respect max retries', async () => {
      const apiCall = vi
        .fn<() => Promise<string>>()
        .mockRejectedValue(new RateLimitError('Rate limited'));

      // Run the test with real timers since fake timers are problematic with async/await
      vi.useRealTimers();

      const promise = withErrorHandling('test-operation', apiCall, {
        maxRetries: 3,
        initialDelay: 1,
        maxDelay: 10,
      });

      await expect(promise).rejects.toThrow(SonarQubeAPIError);
      expect(apiCall).toHaveBeenCalledTimes(4); // Initial + 3 retries

      // Restore fake timers for other tests
      vi.useFakeTimers();
    });

    it('should use exponential backoff', async () => {
      // Track delays used
      let delays: number[] = [];

      vi.useRealTimers();

      // Mock setTimeout to capture delays
      const originalSetTimeout = global.setTimeout;
      global.setTimeout = vi.fn((fn: () => void, delay?: number) => {
        if (delay !== undefined) delays.push(delay);
        return originalSetTimeout(fn, 0); // Execute immediately
      }) as unknown as typeof global.setTimeout;

      const apiCall = vi
        .fn<() => Promise<string>>()
        .mockRejectedValue(new RateLimitError('Rate limited'));

      await expect(
        withErrorHandling('test-operation', apiCall, {
          maxRetries: 3,
          initialDelay: 1000,
          maxDelay: 10000,
        })
      ).rejects.toThrow();

      // Verify exponential backoff pattern
      expect(delays).toEqual([1000, 2000, 4000]);
      expect(apiCall).toHaveBeenCalledTimes(4);

      // Restore
      global.setTimeout = originalSetTimeout;
      vi.useFakeTimers();
    });

    it('should respect max delay', async () => {
      // Track delays used
      let delays: number[] = [];

      vi.useRealTimers();

      // Mock setTimeout to capture delays
      const originalSetTimeout = global.setTimeout;
      global.setTimeout = vi.fn((fn: () => void, delay?: number) => {
        if (delay !== undefined) delays.push(delay);
        return originalSetTimeout(fn, 0); // Execute immediately
      }) as unknown as typeof global.setTimeout;

      const apiCall = vi
        .fn<() => Promise<string>>()
        .mockRejectedValue(new RateLimitError('Rate limited'));

      await expect(
        withErrorHandling('test-operation', apiCall, {
          maxRetries: 3,
          initialDelay: 1000,
          maxDelay: 2000,
        })
      ).rejects.toThrow();

      // Verify delays are capped at maxDelay
      expect(delays).toEqual([1000, 2000, 2000]); // 2nd and 3rd retry capped at 2000ms
      expect(apiCall).toHaveBeenCalledTimes(4);

      // Restore
      global.setTimeout = originalSetTimeout;
      vi.useFakeTimers();
    });

    it('should pass through non-SonarQubeClientError unchanged', async () => {
      const customError = new Error('Custom error');
      const apiCall = vi.fn<() => Promise<string>>().mockRejectedValue(customError);

      await expect(withErrorHandling('test-operation', apiCall)).rejects.toThrow(customError);
      expect(apiCall).toHaveBeenCalledTimes(1);
    });
  });

  describe('formatErrorForMCP', () => {
    it('should format authentication error', () => {
      const error = new SonarQubeAPIError('Auth failed', SonarQubeErrorType.AUTHENTICATION_FAILED);
      const result = formatErrorForMCP(error);

      expect(result.code).toBe(-32001);
      expect(result.message).toContain('Auth failed');
    });

    it('should format authorization error', () => {
      const error = new SonarQubeAPIError('Access denied', SonarQubeErrorType.AUTHORIZATION_FAILED);
      const result = formatErrorForMCP(error);

      expect(result.code).toBe(-32002);
    });

    it('should format resource not found error', () => {
      const error = new SonarQubeAPIError('Not found', SonarQubeErrorType.RESOURCE_NOT_FOUND);
      const result = formatErrorForMCP(error);

      expect(result.code).toBe(-32003);
    });

    it('should format rate limit error', () => {
      const error = new SonarQubeAPIError('Rate limited', SonarQubeErrorType.RATE_LIMITED);
      const result = formatErrorForMCP(error);

      expect(result.code).toBe(-32004);
    });

    it('should format network error', () => {
      const error = new SonarQubeAPIError('Network error', SonarQubeErrorType.NETWORK_ERROR);
      const result = formatErrorForMCP(error);

      expect(result.code).toBe(-32005);
    });

    it('should format configuration error', () => {
      const error = new SonarQubeAPIError('Config error', SonarQubeErrorType.CONFIGURATION_ERROR);
      const result = formatErrorForMCP(error);

      expect(result.code).toBe(-32006);
    });

    it('should format validation error', () => {
      const error = new SonarQubeAPIError('Validation error', SonarQubeErrorType.VALIDATION_ERROR);
      const result = formatErrorForMCP(error);

      expect(result.code).toBe(-32007);
    });

    it('should format server error', () => {
      const error = new SonarQubeAPIError('Server error', SonarQubeErrorType.SERVER_ERROR);
      const result = formatErrorForMCP(error);

      expect(result.code).toBe(-32008);
    });

    it('should format unknown error', () => {
      const error = new SonarQubeAPIError('Unknown error', SonarQubeErrorType.UNKNOWN_ERROR);
      const result = formatErrorForMCP(error);

      expect(result.code).toBe(-32000);
    });

    it('should include full error details in message', () => {
      const error = new SonarQubeAPIError('Test error', SonarQubeErrorType.AUTHENTICATION_FAILED, {
        operation: 'test-op',
        solution: 'Test solution',
      });
      const result = formatErrorForMCP(error);

      expect(result.message).toContain('Test error');
      expect(result.message).toContain('test-op');
      expect(result.message).toContain('Test solution');
    });
  });
});

```

--------------------------------------------------------------------------------
/src/__tests__/issue-resolution-elicitation.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, beforeEach, vi } from 'vitest';
import type { Mocked } from 'vitest';
import {
  handleMarkIssueFalsePositive,
  handleMarkIssueWontFix,
  handleMarkIssuesFalsePositive,
  handleMarkIssuesWontFix,
  setElicitationManager,
} from '../handlers/issues.js';
import { ElicitationManager } from '../utils/elicitation.js';
// Mock environment variables
process.env.SONARQUBE_TOKEN = 'test-token';
process.env.SONARQUBE_URL = 'http://localhost:9000';
process.env.SONARQUBE_ORGANIZATION = 'test-org';
describe('Issue Resolution with Elicitation', () => {
  let mockElicitationManager: Mocked<ElicitationManager>;
  let mockClient: any;
  beforeEach(() => {
    vi.clearAllMocks();
    // Create mock elicitation manager
    mockElicitationManager = {
      isEnabled: vi.fn(),
      collectResolutionComment: vi.fn(),
      confirmBulkOperation: vi.fn(),
      setServer: vi.fn(),
      getOptions: vi.fn(),
      updateOptions: vi.fn(),
      collectAuthentication: vi.fn(),
      disambiguateSelection: vi.fn(),
    } as unknown as Mocked<ElicitationManager>;
    // Set the mock manager
    setElicitationManager(mockElicitationManager);
    // Create mock client
    mockClient = {
      markIssueFalsePositive: vi.fn(),
      markIssueWontFix: vi.fn(),
      markIssuesFalsePositive: vi.fn(),
      markIssuesWontFix: vi.fn(),
    };
  });
  describe('handleMarkIssueFalsePositive with elicitation', () => {
    it('should collect comment via elicitation when enabled and no comment provided', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      mockElicitationManager.collectResolutionComment.mockResolvedValue({
        action: 'accept',
        content: { comment: 'Elicited comment for false positive' },
      });
      const mockResponse = {
        issue: { key: 'ISSUE-123', status: 'RESOLVED', resolution: 'FALSE-POSITIVE' },
        components: [],
        rules: [],
        users: [],
      };
      mockClient.markIssueFalsePositive.mockResolvedValue(mockResponse);
      const result = await handleMarkIssueFalsePositive({ issueKey: 'ISSUE-123' }, mockClient);
      expect(mockElicitationManager.isEnabled).toHaveBeenCalled();
      expect(mockElicitationManager.collectResolutionComment).toHaveBeenCalledWith(
        'ISSUE-123',
        'false positive'
      );
      expect(mockClient.markIssueFalsePositive).toHaveBeenCalledWith({
        issueKey: 'ISSUE-123',
        comment: 'Elicited comment for false positive',
      });
      const responseData = JSON.parse(result.content[0]!.text as string);
      expect(responseData.message).toBe('Issue ISSUE-123 marked as false positive');
    });
    it('should not collect comment when elicitation is disabled', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(false);
      const mockResponse = {
        issue: { key: 'ISSUE-123', status: 'RESOLVED', resolution: 'FALSE-POSITIVE' },
        components: [],
        rules: [],
        users: [],
      };
      mockClient.markIssueFalsePositive.mockResolvedValue(mockResponse);
      await handleMarkIssueFalsePositive({ issueKey: 'ISSUE-123' }, mockClient);
      expect(mockElicitationManager.collectResolutionComment).not.toHaveBeenCalled();
      expect(mockClient.markIssueFalsePositive).toHaveBeenCalledWith({
        issueKey: 'ISSUE-123',
      });
    });
    it('should not collect comment when comment already provided', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      const mockResponse = {
        issue: { key: 'ISSUE-123', status: 'RESOLVED', resolution: 'FALSE-POSITIVE' },
        components: [],
        rules: [],
        users: [],
      };
      mockClient.markIssueFalsePositive.mockResolvedValue(mockResponse);
      await handleMarkIssueFalsePositive(
        { issueKey: 'ISSUE-123', comment: 'Existing comment' },
        mockClient
      );
      expect(mockElicitationManager.collectResolutionComment).not.toHaveBeenCalled();
      expect(mockClient.markIssueFalsePositive).toHaveBeenCalledWith({
        issueKey: 'ISSUE-123',
        comment: 'Existing comment',
      });
    });
    it('should handle elicitation cancellation', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      mockElicitationManager.collectResolutionComment.mockResolvedValue({
        action: 'cancel',
      });
      const result = await handleMarkIssueFalsePositive({ issueKey: 'ISSUE-123' }, mockClient);
      expect(mockElicitationManager.collectResolutionComment).toHaveBeenCalled();
      expect(mockClient.markIssueFalsePositive).not.toHaveBeenCalled();
      const responseData = JSON.parse(result.content[0]!.text as string);
      expect(responseData.message).toBe('Operation cancelled by user');
      expect(responseData.issueKey).toBe('ISSUE-123');
    });
    it('should handle elicitation rejection', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      mockElicitationManager.collectResolutionComment.mockResolvedValue({
        action: 'reject',
      });
      const result = await handleMarkIssueFalsePositive({ issueKey: 'ISSUE-123' }, mockClient);
      expect(mockElicitationManager.collectResolutionComment).toHaveBeenCalled();
      expect(mockClient.markIssueFalsePositive).not.toHaveBeenCalled();
      const responseData = JSON.parse(result.content[0]!.text as string);
      expect(responseData.message).toBe('Operation cancelled by user');
    });
  });
  describe('handleMarkIssueWontFix with elicitation', () => {
    it('should collect comment via elicitation when enabled and no comment provided', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      mockElicitationManager.collectResolutionComment.mockResolvedValue({
        action: 'accept',
        content: { comment: "Elicited comment for won't fix" },
      });
      const mockResponse = {
        issue: { key: 'ISSUE-456', status: 'RESOLVED', resolution: 'WONTFIX' },
        components: [],
        rules: [],
        users: [],
      };
      mockClient.markIssueWontFix.mockResolvedValue(mockResponse);
      const result = await handleMarkIssueWontFix({ issueKey: 'ISSUE-456' }, mockClient);
      expect(mockElicitationManager.isEnabled).toHaveBeenCalled();
      expect(mockElicitationManager.collectResolutionComment).toHaveBeenCalledWith(
        'ISSUE-456',
        "won't fix"
      );
      expect(mockClient.markIssueWontFix).toHaveBeenCalledWith({
        issueKey: 'ISSUE-456',
        comment: "Elicited comment for won't fix",
      });
      const responseData = JSON.parse(result.content[0]!.text as string);
      expect(responseData.message).toBe("Issue ISSUE-456 marked as won't fix");
    });
    it('should handle elicitation cancellation', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      mockElicitationManager.collectResolutionComment.mockResolvedValue({
        action: 'cancel',
      });
      const result = await handleMarkIssueWontFix({ issueKey: 'ISSUE-456' }, mockClient);
      expect(mockElicitationManager.collectResolutionComment).toHaveBeenCalled();
      expect(mockClient.markIssueWontFix).not.toHaveBeenCalled();
      const responseData = JSON.parse(result.content[0]!.text as string);
      expect(responseData.message).toBe('Operation cancelled by user');
      expect(responseData.issueKey).toBe('ISSUE-456');
    });
  });
  describe('handleMarkIssuesFalsePositive with elicitation', () => {
    it('should request confirmation for bulk operations when enabled', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      mockElicitationManager.confirmBulkOperation.mockResolvedValue({
        action: 'accept',
        content: { confirm: true, comment: 'Bulk operation comment' },
      });
      const mockResponses = [
        { issue: { key: 'ISSUE-123' }, components: [], rules: [], users: [] },
        { issue: { key: 'ISSUE-124' }, components: [], rules: [], users: [] },
      ];
      mockClient.markIssuesFalsePositive.mockResolvedValue(mockResponses);
      const result = await handleMarkIssuesFalsePositive(
        { issueKeys: ['ISSUE-123', 'ISSUE-124'] },
        mockClient
      );
      expect(mockElicitationManager.isEnabled).toHaveBeenCalled();
      expect(mockElicitationManager.confirmBulkOperation).toHaveBeenCalledWith(
        'mark as false positive',
        2,
        ['ISSUE-123', 'ISSUE-124']
      );
      expect(mockClient.markIssuesFalsePositive).toHaveBeenCalledWith({
        issueKeys: ['ISSUE-123', 'ISSUE-124'],
        comment: 'Bulk operation comment',
      });
      const responseData = JSON.parse(result.content[0]!.text as string);
      expect(responseData.message).toBe('2 issues marked as false positive');
    });
    it('should not request confirmation when elicitation is disabled', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(false);
      const mockResponses = [
        { issue: { key: 'ISSUE-123' }, components: [], rules: [], users: [] },
        { issue: { key: 'ISSUE-124' }, components: [], rules: [], users: [] },
      ];
      mockClient.markIssuesFalsePositive.mockResolvedValue(mockResponses);
      await handleMarkIssuesFalsePositive({ issueKeys: ['ISSUE-123', 'ISSUE-124'] }, mockClient);
      expect(mockElicitationManager.confirmBulkOperation).not.toHaveBeenCalled();
      expect(mockClient.markIssuesFalsePositive).toHaveBeenCalledWith({
        issueKeys: ['ISSUE-123', 'ISSUE-124'],
      });
    });
    it('should handle bulk operation rejection', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      mockElicitationManager.confirmBulkOperation.mockResolvedValue({
        action: 'reject',
      });
      const result = await handleMarkIssuesFalsePositive(
        { issueKeys: ['ISSUE-123', 'ISSUE-124'] },
        mockClient
      );
      expect(mockElicitationManager.confirmBulkOperation).toHaveBeenCalled();
      expect(mockClient.markIssuesFalsePositive).not.toHaveBeenCalled();
      const responseData = JSON.parse(result.content[0]!.text as string);
      expect(responseData.message).toBe('Bulk operation cancelled by user');
      expect(responseData.issueCount).toBe(2);
    });
    it('should handle bulk operation cancellation', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      mockElicitationManager.confirmBulkOperation.mockResolvedValue({
        action: 'cancel',
      });
      const result = await handleMarkIssuesFalsePositive(
        { issueKeys: ['ISSUE-123', 'ISSUE-124'] },
        mockClient
      );
      expect(mockElicitationManager.confirmBulkOperation).toHaveBeenCalled();
      expect(mockClient.markIssuesFalsePositive).not.toHaveBeenCalled();
      const responseData = JSON.parse(result.content[0]!.text as string);
      expect(responseData.message).toBe('Bulk operation cancelled by user');
      expect(responseData.issueCount).toBe(2);
    });
    it('should not override existing comment with elicited comment', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      mockElicitationManager.confirmBulkOperation.mockResolvedValue({
        action: 'accept',
        content: { confirm: true, comment: 'Elicited comment' },
      });
      const mockResponses = [{ issue: { key: 'ISSUE-123' }, components: [], rules: [], users: [] }];
      mockClient.markIssuesFalsePositive.mockResolvedValue(mockResponses);
      await handleMarkIssuesFalsePositive(
        { issueKeys: ['ISSUE-123'], comment: 'Existing comment' },
        mockClient
      );
      expect(mockClient.markIssuesFalsePositive).toHaveBeenCalledWith({
        issueKeys: ['ISSUE-123'],
        comment: 'Existing comment', // Should keep existing comment
      });
    });
  });
  describe('handleMarkIssuesWontFix with elicitation', () => {
    it('should request confirmation for bulk operations when enabled', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      mockElicitationManager.confirmBulkOperation.mockResolvedValue({
        action: 'accept',
        content: { confirm: true, comment: "Bulk won't fix comment" },
      });
      const mockResponses = [
        { issue: { key: 'ISSUE-456' }, components: [], rules: [], users: [] },
        { issue: { key: 'ISSUE-457' }, components: [], rules: [], users: [] },
      ];
      mockClient.markIssuesWontFix.mockResolvedValue(mockResponses);
      const result = await handleMarkIssuesWontFix(
        { issueKeys: ['ISSUE-456', 'ISSUE-457'] },
        mockClient
      );
      expect(mockElicitationManager.isEnabled).toHaveBeenCalled();
      expect(mockElicitationManager.confirmBulkOperation).toHaveBeenCalledWith(
        "mark as won't fix",
        2,
        ['ISSUE-456', 'ISSUE-457']
      );
      expect(mockClient.markIssuesWontFix).toHaveBeenCalledWith({
        issueKeys: ['ISSUE-456', 'ISSUE-457'],
        comment: "Bulk won't fix comment",
      });
      const responseData = JSON.parse(result.content[0]!.text as string);
      expect(responseData.message).toBe("2 issues marked as won't fix");
    });
    it('should handle bulk operation rejection', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      mockElicitationManager.confirmBulkOperation.mockResolvedValue({
        action: 'reject',
      });
      const result = await handleMarkIssuesWontFix(
        { issueKeys: ['ISSUE-456', 'ISSUE-457'] },
        mockClient
      );
      expect(mockElicitationManager.confirmBulkOperation).toHaveBeenCalled();
      expect(mockClient.markIssuesWontFix).not.toHaveBeenCalled();
      const responseData = JSON.parse(result.content[0]!.text as string);
      expect(responseData.message).toBe('Bulk operation cancelled by user');
      expect(responseData.issueCount).toBe(2);
    });
    it('should handle bulk operation cancellation', async () => {
      mockElicitationManager.isEnabled.mockReturnValue(true);
      mockElicitationManager.confirmBulkOperation.mockResolvedValue({
        action: 'cancel',
      });
      const result = await handleMarkIssuesWontFix(
        { issueKeys: ['ISSUE-456', 'ISSUE-457'] },
        mockClient
      );
      expect(mockElicitationManager.confirmBulkOperation).toHaveBeenCalled();
      expect(mockClient.markIssuesWontFix).not.toHaveBeenCalled();
      const responseData = JSON.parse(result.content[0]!.text as string);
      expect(responseData.message).toBe('Bulk operation cancelled by user');
      expect(responseData.issueCount).toBe(2);
    });
  });
});

```

--------------------------------------------------------------------------------
/docs/architecture/decisions/0026-circuit-breaker-pattern-with-opossum.md:
--------------------------------------------------------------------------------

```markdown
# 26. Circuit Breaker Pattern with opossum

Date: 2025-10-11

## Status

Accepted

## Context

The SonarQube MCP Server integrates with external services (SonarQube API, SonarCloud API) that may experience temporary outages, slow responses, or intermittent failures. Without protective mechanisms, cascading failures can occur:

- **Cascading Failures**: Failed external calls can cause the entire service to become unresponsive
- **Resource Exhaustion**: Hanging requests consume threads, memory, and connections
- **Slow Response Times**: Timeouts accumulate, degrading user experience
- **No Fault Isolation**: Failures in one service affect the entire application
- **Poor Observability**: Difficult to track and diagnose external service issues

Traditional retry mechanisms alone don't solve these problems:

- Simple retries amplify load on failing services
- Exponential backoff helps but doesn't prevent cascading failures
- No mechanism to "fail fast" when a service is known to be down

The Circuit Breaker pattern addresses these issues by:

1. Monitoring external service health
2. Failing fast when a service is unhealthy
3. Automatically recovering when the service becomes healthy
4. Providing observability into service health

Library options considered:

- **opossum**: Battle-tested Node.js library, rich features, good TypeScript support
- **cockatiel**: Modern, TypeScript-first, but less mature ecosystem
- **brakes**: Simpler but less actively maintained
- **Custom implementation**: Full control but significant development and testing effort

## Decision

We will use **opossum** as the circuit breaker library, wrapped in a factory pattern for consistent configuration and monitoring integration.

### Core Architecture

**CircuitBreakerFactory** (`src/monitoring/circuit-breaker.ts`):

- Singleton pattern for managing circuit breakers
- Consistent configuration across all breakers
- Integrated metrics and logging
- Type-safe wrapper functions

### Configuration

**Default Circuit Breaker Settings**:

```typescript
{
  timeout: 10000,                    // 10 seconds - fail if request exceeds
  errorThresholdPercentage: 50,      // Open circuit if 50% of requests fail
  resetTimeout: 30000,                // 30 seconds - try again after this period
  rollingCountTimeout: 10000,         // 10 second rolling window for stats
  rollingCountBuckets: 10,            // 10 buckets (1 second each)
  volumeThreshold: 5                  // Minimum 5 requests before triggering
}
```

**Circuit States**:

1. **CLOSED** (normal): Requests pass through
2. **OPEN** (failing): Requests immediately rejected (fail fast)
3. **HALF_OPEN** (testing): Allow one request to test if service recovered

### Usage Patterns

#### 1. Factory Pattern (Recommended)

```typescript
import { CircuitBreakerFactory } from './monitoring/circuit-breaker.js';

const breaker = CircuitBreakerFactory.getBreaker(
  'sonarqube-api',
  async (projectKey: string) => {
    return await sonarqubeClient.getProject(projectKey);
  },
  {
    timeout: 15000, // Custom timeout for this operation
    volumeThreshold: 10,
  }
);

// Use the breaker
const result = await breaker.fire('my-project-key');
```

#### 2. Function Wrapper Pattern

```typescript
import { wrapWithCircuitBreaker } from './monitoring/circuit-breaker.js';

const getProjectWithCircuitBreaker = wrapWithCircuitBreaker(
  'get-project',
  async (projectKey: string) => {
    return await sonarqubeClient.getProject(projectKey);
  }
);

// Use the wrapped function
const result = await getProjectWithCircuitBreaker('my-project-key');
```

#### 3. Decorator Pattern (Method-level)

```typescript
import { withCircuitBreaker } from './monitoring/circuit-breaker.js';

class SonarQubeService {
  @withCircuitBreaker('sonarqube-service', { timeout: 15000 })
  async getProject(projectKey: string) {
    return await this.client.getProject(projectKey);
  }
}
```

### Event-Driven Monitoring

Circuit breakers emit events for observability:

**State Change Events**:

- `open`: Circuit opened due to failure threshold
- `close`: Circuit closed (recovered)
- `halfOpen`: Circuit testing recovery

**Request Events**:

- `success`: Request succeeded
- `failure`: Request failed
- `timeout`: Request timed out
- `reject`: Request rejected (circuit open)

**Metrics Integration**:

```typescript
breaker.on('open', () => {
  updateCircuitBreakerMetrics(name, 'open');
  logger.warn('Circuit breaker opened', { name });
});

breaker.on('failure', (error) => {
  trackCircuitBreakerFailure(name);
  logger.debug('Request failed', { name, error: error.message });
});
```

### Error Filtering

Custom error filtering for selective circuit breaking:

```typescript
const breaker = CircuitBreakerFactory.getBreaker('sonarqube-api', fetchFunction, {
  errorFilter: (error) => {
    // Don't count 404s as failures
    if (error.message.includes('404')) return false;

    // Don't count authentication errors
    if (error.message.includes('401')) return false;

    // Count all other errors
    return true;
  },
});
```

## Consequences

### Positive

- **Cascading Failure Prevention**: Failed services don't bring down the entire application
- **Fail Fast**: Immediate rejection when service is down (no waiting for timeouts)
- **Automatic Recovery**: Circuit automatically tests and recovers when service is healthy
- **Resource Protection**: Prevents resource exhaustion from hanging requests
- **Observability**: Rich metrics and events for monitoring external service health
- **Consistent Configuration**: Factory pattern ensures uniform settings
- **Type Safety**: TypeScript generics provide type-safe circuit breaker calls
- **Flexible Usage**: Multiple patterns (factory, wrapper, decorator) for different use cases
- **Metrics Integration**: Built-in integration with monitoring system
- **Battle-Tested**: opossum is production-proven with years of usage
- **Selective Breaking**: Error filtering allows fine-grained control

### Negative

- **Complexity**: Adds another layer of abstraction and configuration
- **False Positives**: Circuit may open due to temporary network blips
- **Configuration Overhead**: Need to tune parameters for each service
- **Delayed Recovery**: ResetTimeout means delayed recovery even if service recovers immediately
- **Testing Complexity**: Need to test circuit breaker behavior in unit/integration tests
- **Dependency**: Adds opossum as a runtime dependency
- **State Management**: Circuit breaker state is in-memory (not shared across instances)

### Neutral

- **Performance Overhead**: Minimal overhead for healthy services (< 1ms)
- **Memory Usage**: Small memory footprint for state tracking
- **Learning Curve**: Team needs to understand circuit breaker pattern
- **Error Handling**: Need to handle circuit breaker exceptions separately from service errors

## Implementation

### Installation

```bash
pnpm add opossum
pnpm add -D @types/opossum
```

### Basic Usage Example

```typescript
import { CircuitBreakerFactory } from './monitoring/circuit-breaker.js';

// Create a circuit breaker for SonarQube API calls
const breaker = CircuitBreakerFactory.getBreaker(
  'sonarqube-issues-search',
  async (projectKey: string, severity: string) => {
    // This function will be protected by the circuit breaker
    const response = await fetch(
      `${baseUrl}/api/issues/search?projectKeys=${projectKey}&severities=${severity}`,
      { signal: AbortSignal.timeout(10000) }
    );

    if (!response.ok) {
      throw new Error(`SonarQube API returned ${response.status}`);
    }

    return response.json();
  },
  {
    timeout: 15000, // 15 second timeout
    errorThresholdPercentage: 40, // Open at 40% failure rate
    volumeThreshold: 10, // Need 10 requests before circuit can open
  }
);

// Use the circuit breaker
try {
  const issues = await breaker.fire('my-project', 'CRITICAL');
  console.log('Found issues:', issues.total);
} catch (error) {
  if (error.message.includes('breaker is open')) {
    // Circuit is open - service is known to be failing
    console.error('SonarQube API is currently unavailable');
  } else {
    // Individual request failed
    console.error('Request failed:', error.message);
  }
}
```

### Advanced Configuration

```typescript
const breaker = CircuitBreakerFactory.getBreaker('sonarqube-quality-gates', fetchQualityGate, {
  // Timing
  timeout: 20000, // 20 second timeout
  resetTimeout: 60000, // Try recovery after 60 seconds

  // Failure thresholds
  errorThresholdPercentage: 30, // Open at 30% failure rate
  volumeThreshold: 20, // Need 20 requests minimum

  // Rolling window
  rollingCountTimeout: 20000, // 20 second rolling window
  rollingCountBuckets: 20, // 20 buckets (1 second each)

  // Error filtering
  errorFilter: (error: Error) => {
    // Don't count 404 as failures
    return !error.message.includes('404');
  },
});
```

### Monitoring Circuit Breaker Health

```typescript
import { CircuitBreakerFactory } from './monitoring/circuit-breaker.js';

// Get statistics for a specific circuit breaker
const stats = CircuitBreakerFactory.getStats('sonarqube-api');

if (stats) {
  console.log({
    successCount: stats.successes,
    failureCount: stats.failures,
    rejectedCount: stats.rejects,
    timeoutCount: stats.timeouts,
    averageResponseTime: stats.latencyMean,
    percentiles: {
      p50: stats.percentiles[50],
      p95: stats.percentiles[95],
      p99: stats.percentiles[99],
    },
  });
}

// Get all circuit breakers
const allBreakers = CircuitBreakerFactory.getAllBreakers();
for (const [name, breaker] of allBreakers) {
  console.log(`${name}: ${breaker.opened ? 'OPEN' : 'CLOSED'}`);
}
```

### Testing with Circuit Breakers

```typescript
import { describe, it, expect, beforeEach } from 'vitest';
import { CircuitBreakerFactory } from './circuit-breaker.js';

describe('Circuit Breaker', () => {
  beforeEach(() => {
    // Reset circuit breakers between tests
    CircuitBreakerFactory.reset();
  });

  it('should open circuit after failure threshold', async () => {
    const failingFunction = async () => {
      throw new Error('Service unavailable');
    };

    const breaker = CircuitBreakerFactory.getBreaker('test-service', failingFunction, {
      errorThresholdPercentage: 50,
      volumeThreshold: 3,
      timeout: 1000,
    });

    // Trigger failures to open circuit
    for (let i = 0; i < 5; i++) {
      try {
        await breaker.fire();
      } catch (error) {
        // Expected to fail
      }
    }

    // Circuit should now be open
    expect(breaker.opened).toBe(true);

    // Next request should be rejected immediately
    await expect(breaker.fire()).rejects.toThrow('breaker is open');
  });
});
```

### Graceful Shutdown

```typescript
import { CircuitBreakerFactory } from './monitoring/circuit-breaker.js';

// On application shutdown
process.on('SIGTERM', () => {
  console.log('Shutting down circuit breakers...');
  CircuitBreakerFactory.shutdown();
  console.log('Circuit breakers shut down');
  process.exit(0);
});
```

## State Machine Diagram

```
                     ┌────────────────────────┐
                     │                        │
                     │       CLOSED           │
                     │  (Normal Operation)    │
                     │                        │
                     └───────────┬────────────┘
                                 │
                                 │ Failure threshold
                                 │ exceeded
                                 ↓
                     ┌────────────────────────┐
                     │                        │
                     │        OPEN            │
                     │   (Fail Fast Mode)     │
                     │                        │
                     └───────────┬────────────┘
                                 │
                                 │ Reset timeout
                                 │ elapsed
                                 ↓
                     ┌────────────────────────┐
                     │                        │
                     │      HALF_OPEN         │
                     │   (Testing Recovery)   │
                     │                        │
                     └───┬───────────┬────────┘
                         │           │
                         │ Success   │ Failure
                         ↓           ↓
                       CLOSED       OPEN
```

## Examples

### Example 1: SonarQube API Integration

```typescript
import { wrapWithCircuitBreaker } from './monitoring/circuit-breaker.js';
import { SonarQubeClient } from './sonarqube-client.js';

const client = new SonarQubeClient(config);

// Wrap API calls with circuit breaker
export const searchIssues = wrapWithCircuitBreaker(
  'sonarqube.searchIssues',
  async (params: IssueSearchParams) => {
    return await client.issues.search(params);
  },
  {
    timeout: 15000,
    errorThresholdPercentage: 40,
  }
);

export const getProject = wrapWithCircuitBreaker(
  'sonarqube.getProject',
  async (key: string) => {
    return await client.projects.get(key);
  },
  {
    timeout: 10000,
    errorThresholdPercentage: 50,
  }
);
```

### Example 2: Handling Circuit Breaker States

```typescript
import { CircuitBreakerFactory } from './monitoring/circuit-breaker.js';

async function fetchWithFallback(projectKey: string) {
  const breaker = CircuitBreakerFactory.getBreaker('sonarqube-api', async (key: string) => {
    return await sonarqubeClient.getProject(key);
  });

  try {
    return await breaker.fire(projectKey);
  } catch (error) {
    if (error.message.includes('breaker is open')) {
      // Circuit is open - return cached data or default
      console.warn('Circuit breaker is open, using fallback');
      return getCachedProject(projectKey);
    }

    // Other error - propagate
    throw error;
  }
}
```

### Example 3: Custom Error Handling

```typescript
const breaker = CircuitBreakerFactory.getBreaker('sonarqube-with-filter', fetchData, {
  errorFilter: (error: Error) => {
    // Don't count 404 (not found) as a failure
    if (error.message.includes('404')) {
      return false;
    }

    // Don't count 401/403 (auth errors) as failures
    if (error.message.includes('401') || error.message.includes('403')) {
      return false;
    }

    // Count 500-level errors and timeouts
    return true;
  },
});
```

## References

- opossum Documentation: https://nodeshift.dev/opossum/
- opossum GitHub: https://github.com/nodeshift/opossum
- Circuit Breaker Pattern: https://martinfowler.com/bliki/CircuitBreaker.html
- Implementation: src/monitoring/circuit-breaker.ts
- Tests: src/monitoring/**tests**/circuit-breaker.test.ts
- Related ADR: ADR-0018 (Comprehensive Monitoring and Observability)
- Metrics Integration: src/monitoring/metrics.ts

```

--------------------------------------------------------------------------------
/docs/architecture/decisions/0024-ci-cd-platform-github-actions.md:
--------------------------------------------------------------------------------

```markdown
# 24. CI CD Platform GitHub Actions

Date: 2025-10-11

## Status

Accepted

## Context

The SonarQube MCP Server requires a robust CI/CD platform to automate testing, building, security scanning, and releasing. The platform must:

- Provide fast feedback on pull requests (< 5 minutes for validation)
- Run comprehensive quality checks before merging
- Automate semantic versioning and releases
- Support multi-platform Docker image builds
- Publish to multiple registries (NPM, GitHub Packages, Docker Hub, GHCR)
- Integrate with security scanning tools
- Generate supply chain security attestations (SLSA provenance)
- Prevent concurrent releases (avoid race conditions)
- Support reusable workflows to reduce duplication

Platform options considered:

- **GitHub Actions**: Native GitHub integration, generous free tier for open source, mature ecosystem
- **CircleCI**: Good parallelization but costs for private repos, less GitHub integration
- **Travis CI**: Declining support, slower builds
- **Jenkins**: Self-hosted, more complex setup and maintenance
- **GitLab CI**: Requires GitLab hosting, less integration with GitHub ecosystem

## Decision

We will use **GitHub Actions** as the exclusive CI/CD platform for this project.

### Workflow Architecture

The CI/CD pipeline consists of 7 workflow files organized into 3 categories:

#### 1. Primary Workflows (User-Facing)

**`.github/workflows/main.yml`** - Main Branch Release Pipeline

- **Trigger**: Push to `main` branch
- **Purpose**: Automated releases after merge
- **Jobs**:
  1. `validate`: Run all quality checks (reusable workflow)
  2. `security`: Run security scans (reusable workflow)
  3. `build`: Build TypeScript, version bump, create tag
  4. `docker`: Build multi-platform Docker image to GHCR
  5. `npm`: Package NPM tarball with attestations
  6. `create-release`: Create GitHub release with artifacts
- **Concurrency**: `cancel-in-progress: false` (ensures releases complete)
- **Permissions**: Elevated (write to releases, packages, security events)

**`.github/workflows/pr.yml`** - Pull Request Validation

- **Trigger**: Pull request open/sync to any branch
- **Purpose**: Fast feedback on code quality
- **Jobs**:
  1. `validate`: Run all quality checks (reusable workflow)
  2. `security`: Run security scans (reusable workflow)
- **Concurrency**: `cancel-in-progress: true` (cancel outdated runs on new push)
- **Permissions**: Read-only (security)

**`.github/workflows/publish.yml`** - Multi-Registry Publishing

- **Trigger**: GitHub release created
- **Purpose**: Publish artifacts to public registries
- **Jobs**:
  1. `npm`: Publish to NPM with provenance
  2. `github-packages`: Publish to GitHub Packages
  3. `docker`: Copy GHCR image to Docker Hub (multi-platform manifest)
- **Concurrency**: `cancel-in-progress: false` (ensures publish completes)
- **Permissions**: Read-only (uses secrets for publishing)

**`.github/workflows/codeql.yml`** - CodeQL Security Analysis

- **Trigger**: Push to `main`, pull requests, schedule (weekly)
- **Purpose**: SAST (Static Application Security Testing)
- **Language**: JavaScript/TypeScript
- **Permissions**: Security events write

#### 2. Reusable Workflows (Composable Building Blocks)

**`.github/workflows/reusable-validate.yml`** - Quality Validation Suite

- **Inputs**: `pnpm-version` (default: 10.17.0)
- **Secrets**: `SONAR_TOKEN` (for SonarCloud)
- **Jobs**: Runs in parallel:
  1. Dependency audit (critical vulnerabilities only)
  2. Type checking (`pnpm typecheck`)
  3. Linting (`pnpm lint`, workflows, markdown, YAML)
  4. Format checking (`pnpm format`)
  5. Tests with coverage (`pnpm test`, 80% threshold)
  6. SonarCloud analysis
- **Strategy**: Fail-fast disabled (shows all errors)

**`.github/workflows/reusable-security.yml`** - Security Scanning Suite

- **Inputs**: `pnpm-version` (default: 10.17.0)
- **Jobs**: Runs in parallel:
  1. CodeQL analysis (JavaScript/TypeScript SAST)
  2. OSV-Scanner (vulnerability detection)
  3. Build validation (ensures code compiles)
- **Permissions**: Security events write

**`.github/workflows/reusable-docker.yml`** - Multi-Platform Docker Build

- **Inputs**:
  - `platforms`: Target platforms (default: `linux/amd64,linux/arm64`)
  - `save-artifact`: Save image as artifact (true/false)
  - `artifact-name`: Name for saved artifact
  - `image-name`: Docker image name
  - `version`: Image version tag
  - `tag_sha`: Git commit SHA for tagging
  - `build_artifact`: Build artifact name to download
- **Outputs**: Image tags and digests
- **Features**:
  - Uses Docker Buildx for multi-platform builds
  - QEMU for ARM64 emulation
  - Caches layers in GitHub Actions cache
  - Generates SBOM and SLSA attestations
  - Pushes to GHCR with multiple tags

### Key Architectural Patterns

#### 1. Reusable Workflow Pattern

Benefits:

- **DRY Principle**: Define once, use in multiple workflows
- **Consistency**: Same validation in PR and main workflows
- **Maintainability**: Update validation logic in one place
- **Testability**: Can test reusable workflows in isolation

Example usage:

```yaml
jobs:
  validate:
    uses: ./.github/workflows/reusable-validate.yml
    secrets:
      SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
```

#### 2. Parallel Execution Strategy

All quality checks run in parallel to minimize CI time:

- Type checking, linting, testing run concurrently
- Security scans run in parallel with validation
- Total validation time: ~3-4 minutes (vs ~10-15 sequential)

#### 3. Unified Build Artifact

The main workflow builds once and shares artifacts:

```yaml
build:
  outputs:
    artifact-name: dist-${{ github.sha }}
  steps:
    - name: Build TypeScript
      run: pnpm build
    - name: Upload build artifact
      uses: actions/upload-artifact@v4

docker:
  needs: [build]
  steps:
    - name: Download build artifact
      uses: actions/download-artifact@v4
      with:
        name: ${{ needs.build.outputs.artifact-name }}
```

Benefits:

- Consistent artifacts across jobs
- Faster pipeline (build once, use multiple times)
- Reduced risk of build inconsistencies

#### 4. Supply Chain Security

Every release includes:

- **SLSA Build Provenance**: Attestations for all artifacts
- **SBOM**: Software Bill of Materials (CycloneDX format)
- **Signature Verification**: GitHub attestations for provenance

```yaml
- name: Generate attestations
  uses: actions/attest-build-provenance@v2
  with:
    subject-path: |
      dist/**/*.js
      sbom.cdx.json
      *.tgz
```

#### 5. Permission Model

**Principle of Least Privilege**:

- PR workflows: Read-only (security events only)
- Main workflow: Write permissions for releases and packages
- Publish workflow: No write to GitHub, uses external secrets

Example:

```yaml
permissions:
  contents: write # Create releases and tags
  id-token: write # Generate SLSA attestations
  attestations: write # Attach attestations
  security-events: write # Upload security scan results
  actions: read # Access workflow artifacts
  packages: write # Push Docker images to GHCR
```

#### 6. Concurrency Control

**Main workflow** (`cancel-in-progress: false`):

```yaml
concurrency:
  group: ${{ github.workflow }}-${{ github.ref }}
  cancel-in-progress: false # Let releases complete
```

**PR workflow** (`cancel-in-progress: true`):

```yaml
concurrency:
  group: ${{ github.workflow }}-${{ github.head_ref }}
  cancel-in-progress: true # Cancel outdated validations
```

This prevents:

- Race conditions during version bumps and releases
- Wasted CI time on outdated PR pushes
- Conflicting Git commits to main branch

#### 7. Workflow Skipping

Version bump commits include `[skip actions]` to prevent recursion:

```yaml
git commit -m "chore(release): v$VERSION [skip actions]"
```

This prevents the main workflow from re-running after version commits.

## Consequences

### Positive

- **Native GitHub Integration**: Seamless integration with GitHub features (releases, packages, security)
- **Free for Open Source**: No cost for public repositories
- **Parallel Execution**: 3-4 minute validation vs 10-15 sequential
- **Reusable Workflows**: DRY principle applied to CI/CD
- **Supply Chain Security**: Built-in attestation and SBOM generation
- **Multi-Platform Builds**: Docker Buildx support for ARM64 and AMD64
- **Artifact Sharing**: Build once, use in multiple jobs
- **Concurrency Control**: Prevents race conditions and wasted runs
- **Security Scanning**: Integrated CodeQL, OSV-Scanner, Trivy
- **Rich Ecosystem**: Large marketplace of actions
- **Matrix Builds**: Support for testing multiple versions (if needed)

### Negative

- **Vendor Lock-in**: Heavily tied to GitHub platform
- **Learning Curve**: YAML syntax and workflow composition can be complex
- **Debugging Difficulty**: Cannot run workflows locally (need act or similar)
- **Rate Limits**: API rate limits for artifacts and packages
- **Build Time**: Slower than some alternatives (CircleCI, Buildkite)
- **Secret Management**: Limited secret organization (no folders/namespaces)
- **Workflow File Size**: Large workflows can be hard to navigate
- **Action Versioning**: Need to maintain action versions across workflows

### Neutral

- **YAML Configuration**: Human-readable but verbose
- **Marketplace Quality**: Third-party actions vary in quality and maintenance
- **Caching Strategy**: Need to carefully design cache keys
- **Artifact Retention**: Default 90 days, costs for long-term storage

## Implementation

### Setup Requirements

1. **Repository Secrets** (Settings → Secrets → Actions):
   - `RELEASE_TOKEN`: Personal Access Token with repo write
   - `SONAR_TOKEN`: SonarCloud authentication token
   - `NPM_TOKEN`: NPM registry publish token
   - `DOCKERHUB_USERNAME`: Docker Hub username
   - `DOCKERHUB_TOKEN`: Docker Hub access token

2. **Repository Variables** (Settings → Variables → Actions):
   - `ENABLE_DOCKER_RELEASE`: Set to 'true' to enable Docker releases
   - `ENABLE_NPM_RELEASE`: Set to 'true' to enable NPM releases

3. **Branch Protection Rules** (Settings → Branches → main):
   - Require status checks: `validate`, `security`
   - Require branches to be up to date before merging
   - Require linear history

### Common Workflow Patterns

**Installing pnpm consistently**:

```yaml
- name: Install pnpm
  uses: pnpm/action-setup@v4
  with:
    version: 10.17.0
    run_install: false
    standalone: true

- name: Setup Node.js
  uses: actions/setup-node@v4
  with:
    node-version: 22
    cache: pnpm

- name: Install dependencies
  run: pnpm install --frozen-lockfile
```

**Running validation checks**:

```yaml
- name: Audit dependencies
  run: pnpm audit --audit-level critical

- name: Type check
  run: pnpm typecheck

- name: Lint
  run: pnpm lint

- name: Test
  run: pnpm test
```

**Conditional job execution**:

```yaml
docker:
  needs: [build]
  if: vars.ENABLE_DOCKER_RELEASE == 'true' && needs.build.outputs.changed == 'true'
```

### Workflow Validation

Lint workflow files locally:

```bash
pnpm lint:workflows  # Uses actionlint
```

### Monitoring and Debugging

**Check workflow status**:

```bash
gh run list --limit 10
gh run view <run-id>
gh run watch <run-id>
```

**View logs**:

```bash
gh run view <run-id> --log
gh run view <run-id> --log-failed  # Only failed steps
```

**Re-run workflow**:

```bash
gh run rerun <run-id>
gh run rerun <run-id> --failed  # Only failed jobs
```

## Examples

### Example 1: Pull Request Flow

Developer opens PR:

```
PR opened → pr.yml triggers
├─ validate job (reusable-validate.yml)
│  ├─ audit (parallel)
│  ├─ typecheck (parallel)
│  ├─ lint (parallel)
│  ├─ format (parallel)
│  ├─ test (parallel)
│  └─ sonarcloud (parallel)
└─ security job (reusable-security.yml)
   ├─ codeql (parallel)
   ├─ osv-scanner (parallel)
   └─ build-check (parallel)

Total time: ~3-4 minutes
Status: ✅ All checks passed
```

### Example 2: Main Branch Release Flow

PR merged to main:

```
Push to main → main.yml triggers
├─ validate job (reusable) → ✅
├─ security job (reusable) → ✅
├─ build job
│  ├─ Version packages (changeset)
│  ├─ Commit version bump [skip actions]
│  ├─ Create tag v1.11.0
│  ├─ Build TypeScript
│  └─ Upload artifact dist-abc1234
├─ docker job (needs: build)
│  ├─ Download artifact dist-abc1234
│  ├─ Build linux/amd64,linux/arm64
│  └─ Push to ghcr.io with attestations
├─ npm job (needs: build)
│  ├─ Download artifact dist-abc1234
│  ├─ Create NPM package tarball
│  └─ Upload artifact with attestations
└─ create-release job (needs: build, docker, npm)
   ├─ Generate SBOM
   ├─ Create tar.gz and zip archives
   ├─ Generate attestations for all artifacts
   └─ Create GitHub release v1.11.0

Release created → publish.yml triggers
├─ npm job → Publish to NPM ✅
├─ github-packages job → Publish to GitHub Packages ✅
└─ docker job → Copy GHCR to Docker Hub ✅

Total time: ~10-12 minutes
Result: Version 1.11.0 published to all registries
```

### Example 3: Security Scanning

Push triggers security scanning:

```
reusable-security.yml
├─ CodeQL
│  ├─ Initialize CodeQL database
│  ├─ Autobuild TypeScript
│  ├─ Analyze for security vulnerabilities
│  └─ Upload results to Security tab
├─ OSV-Scanner
│  ├─ Scan pnpm-lock.yaml for vulnerabilities
│  ├─ Generate SARIF report
│  └─ Upload to Security tab
└─ Build Check
   ├─ Install dependencies
   ├─ Build TypeScript
   └─ Verify no build errors

Results visible in: Security → Code scanning alerts
```

## Workflow Diagram

```
Pull Request         Main Branch                Release Created
     │                   │                            │
     ├─→ pr.yml         ├─→ main.yml                ├─→ publish.yml
     │   │               │   │                        │   │
     │   ├─ validate     │   ├─ validate             │   ├─ npm → NPM
     │   └─ security     │   ├─ security             │   ├─ github-packages
     │                   │   ├─ build                │   └─ docker → Docker Hub
     │                   │   │  ├─ version           │
     │                   │   │  ├─ build             │
     │                   │   │  └─ tag               │
     │                   │   ├─ docker → GHCR        │
     │                   │   ├─ npm                  │
     │                   │   └─ create-release       │
     │                   │       └─ trigger ─────────┘
     │                   │
     ↓                   ↓
Status checks      GitHub Release
on PR             with artifacts
```

## References

- GitHub Actions Documentation: https://docs.github.com/en/actions
- Reusable Workflows: https://docs.github.com/en/actions/using-workflows/reusing-workflows
- SLSA Attestations: https://slsa.dev/
- Docker Buildx: https://docs.docker.com/buildx/
- Workflow Files: `.github/workflows/`
- Workflow Linting: `pnpm lint:workflows` (actionlint)
- SonarCloud: https://sonarcloud.io/project/overview?id=sonarqube-mcp-server

```

--------------------------------------------------------------------------------
/src/domains/issues.ts:
--------------------------------------------------------------------------------

```typescript
import type {
  IssuesParams,
  SonarQubeIssuesResult,
  SonarQubeIssue,
  SonarQubeRule,
  SonarQubeIssueComment,
  MarkIssueFalsePositiveParams,
  MarkIssueWontFixParams,
  BulkIssueMarkParams,
  AddCommentToIssueParams,
  AssignIssueParams,
  ConfirmIssueParams,
  UnconfirmIssueParams,
  ResolveIssueParams,
  ReopenIssueParams,
  DoTransitionResponse,
} from '../types/index.js';
import { BaseDomain } from './base.js';

// Type aliases for sonarqube-web-api-client enums (not exported by the library)
type OwaspTop10Category = 'a1' | 'a2' | 'a3' | 'a4' | 'a5' | 'a6' | 'a7' | 'a8' | 'a9' | 'a10';
type OwaspTop10v2021Category = 'a1' | 'a2' | 'a3' | 'a4' | 'a5' | 'a6' | 'a7' | 'a8' | 'a9' | 'a10';
type SansTop25Category = 'insecure-interaction' | 'risky-resource' | 'porous-defenses';
type IssueFacet =
  | 'severities'
  | 'statuses'
  | 'resolutions'
  | 'rules'
  | 'tags'
  | 'types'
  | 'author'
  | 'authors'
  | 'assignees'
  | 'assigned_to_me'
  | 'languages'
  | 'projects'
  | 'directories'
  | 'files'
  | 'cwe'
  | 'createdAt'
  | 'owaspTop10'
  | 'owaspTop10-2021'
  | 'owaspAsvs-4.0'
  | 'owaspMobileTop10-2024'
  | 'pciDss-3.2'
  | 'pciDss-4.0'
  | 'sansTop25'
  | 'sonarsourceSecurity'
  | 'stig-ASD_V5R3'
  | 'casa'
  | 'codeVariants'
  | 'cleanCodeAttributeCategories'
  | 'impactSeverities'
  | 'impactSoftwareQualities'
  | 'issueStatuses'
  | 'prioritizedRule'
  | 'scopes';

/**
 * Domain module for issues-related operations
 */
export class IssuesDomain extends BaseDomain {
  /**
   * Gets issues for a project in SonarQube
   * @param params Parameters including project key, severity, pagination and organization
   * @returns Promise with the list of issues
   */
  async getIssues(params: IssuesParams): Promise<SonarQubeIssuesResult> {
    const { page, pageSize } = params;
    const builder = this.webApiClient.issues.search();

    // Apply all filters using helper methods
    this.applyComponentFilters(builder, params);
    this.applyIssueFilters(builder, params);
    this.applyDateAndAssignmentFilters(builder, params);
    this.applySecurityAndMetadataFilters(builder, params);

    // Add pagination
    if (page !== undefined) {
      builder.page(page);
    }
    if (pageSize !== undefined) {
      builder.pageSize(pageSize);
    }

    const response = await builder.execute();

    // Transform to our interface
    return {
      issues: response.issues as SonarQubeIssue[],
      components: (response.components ?? []).map((comp) => ({
        key: comp.key,
        name: comp.name,
        qualifier: comp.qualifier,
        enabled: comp.enabled,
        longName: comp.longName,
        path: comp.path,
      })),
      rules: (response.rules ?? []) as SonarQubeRule[],
      users: response.users,
      facets: response.facets,
      paging: response.paging ?? { pageIndex: 1, pageSize: 100, total: 0 },
    };
  }

  /**
   * Apply component-related filters to the issues search builder
   * @param builder The search builder
   * @param params The issues parameters
   */
  private applyComponentFilters(
    builder: ReturnType<typeof this.webApiClient.issues.search>,
    params: IssuesParams
  ): void {
    // Component filters
    if (params.projectKey) {
      builder.withProjects([params.projectKey]);
    }
    if (params.projects) {
      builder.withProjects(params.projects);
    }
    if (params.componentKeys) {
      builder.withComponents(params.componentKeys);
    }
    if (params.components) {
      builder.withComponents(params.components);
    }
    if (params.onComponentOnly) {
      builder.onComponentOnly();
    }
    if (params.directories) {
      builder.withDirectories(params.directories);
    }
    if (params.files) {
      builder.withFiles(params.files);
    }
    if (params.scopes) {
      builder.withScopes(params.scopes);
    }

    // Branch and PR
    if (params.branch) {
      builder.onBranch(params.branch);
    }
    if (params.pullRequest) {
      builder.onPullRequest(params.pullRequest);
    }
  }

  /**
   * Apply issue-related filters to the search builder
   * @param builder The search builder
   * @param params The issues parameters
   */
  private applyIssueFilters(
    builder: ReturnType<typeof this.webApiClient.issues.search>,
    params: IssuesParams
  ): void {
    // Issue filters
    if (params.issues) {
      builder.withIssues(params.issues);
    }
    if (params.severities) {
      builder.withSeverities(params.severities);
    }
    if (params.statuses) {
      builder.withStatuses(params.statuses);
    }
    if (params.resolutions) {
      builder.withResolutions(params.resolutions);
    }
    if (params.resolved !== undefined) {
      if (params.resolved) {
        builder.onlyResolved();
      } else {
        builder.onlyUnresolved();
      }
    }
    if (params.types) {
      builder.withTypes(params.types);
    }

    // Clean Code taxonomy
    if (params.cleanCodeAttributeCategories) {
      builder.withCleanCodeAttributeCategories(params.cleanCodeAttributeCategories);
    }
    if (params.impactSeverities) {
      builder.withImpactSeverities(params.impactSeverities);
    }
    if (params.impactSoftwareQualities) {
      builder.withImpactSoftwareQualities(params.impactSoftwareQualities);
    }
    if (params.issueStatuses) {
      builder.withIssueStatuses(params.issueStatuses);
    }

    // Rules and tags
    if (params.rules) {
      builder.withRules(params.rules);
    }
    if (params.tags) {
      builder.withTags(params.tags);
    }
  }

  /**
   * Apply date and assignment filters to the search builder
   * @param builder The search builder
   * @param params The issues parameters
   */
  private applyDateAndAssignmentFilters(
    builder: ReturnType<typeof this.webApiClient.issues.search>,
    params: IssuesParams
  ): void {
    // Date filters
    if (params.createdAfter) {
      builder.createdAfter(params.createdAfter);
    }
    if (params.createdBefore) {
      builder.createdBefore(params.createdBefore);
    }
    if (params.createdAt) {
      builder.createdAt(params.createdAt);
    }
    if (params.createdInLast) {
      builder.createdInLast(params.createdInLast);
    }

    // Assignment
    if (params.assigned !== undefined) {
      if (params.assigned) {
        builder.onlyAssigned();
      } else {
        builder.onlyUnassigned();
      }
    }
    if (params.assignees) {
      builder.assignedToAny(params.assignees);
    }
    if (params.author) {
      builder.byAuthor(params.author);
    }
    if (params.authors) {
      builder.byAuthors(params.authors);
    }
  }

  /**
   * Apply security standards and metadata filters to the search builder
   * @param builder The search builder
   * @param params The issues parameters
   */
  private applySecurityAndMetadataFilters(
    builder: ReturnType<typeof this.webApiClient.issues.search>,
    params: IssuesParams
  ): void {
    // Security standards
    if (params.cwe) {
      builder.withCwe(params.cwe);
    }
    if (params.owaspTop10) {
      builder.withOwaspTop10(params.owaspTop10 as OwaspTop10Category[]);
    }
    if (params.owaspTop10v2021) {
      builder.withOwaspTop10v2021(params.owaspTop10v2021 as OwaspTop10v2021Category[]);
    }
    if (params.sansTop25) {
      // NOTE: withSansTop25 is deprecated since SonarQube 10.0, but kept for backward compatibility
      builder.withSansTop25(params.sansTop25 as SansTop25Category[]);
    }
    if (params.sonarsourceSecurity) {
      builder.withSonarSourceSecurity(params.sonarsourceSecurity);
    }
    if (params.sonarsourceSecurityCategory) {
      builder.withSonarSourceSecurityNew(params.sonarsourceSecurityCategory);
    }

    // Languages
    if (params.languages) {
      builder.withLanguages(params.languages);
    }

    // Facets
    if (params.facets) {
      builder.withFacets(params.facets as IssueFacet[]);
    }
    if (params.facetMode) {
      builder.withFacetMode(params.facetMode);
    }

    // New code
    if (params.sinceLeakPeriod) {
      builder.sinceLeakPeriod();
    }
    if (params.inNewCodePeriod) {
      builder.inNewCodePeriod();
    }

    // Sorting
    if (params.s) {
      builder.sortBy(params.s, params.asc);
    }

    // Additional fields
    if (params.additionalFields) {
      builder.withAdditionalFields(params.additionalFields);
    }

    // Deprecated parameters
    // Note: hotspots parameter is deprecated and not supported by the current API
    if (params.severity) {
      builder.withSeverities([params.severity]);
    }
  }

  /**
   * Mark an issue as false positive
   * @param params Parameters including issue key and optional comment
   * @returns Promise with the updated issue and related data
   */
  async markIssueFalsePositive(
    params: MarkIssueFalsePositiveParams
  ): Promise<DoTransitionResponse> {
    const request = {
      issue: params.issueKey,
      transition: 'falsepositive' as const,
    };

    // Add comment if provided (using separate API call if needed)
    if (params.comment) {
      // First add the comment, then perform the transition
      await this.webApiClient.issues.addComment({
        issue: params.issueKey,
        text: params.comment,
      });
    }

    return this.webApiClient.issues.doTransition(request);
  }

  /**
   * Mark an issue as won't fix
   * @param params Parameters including issue key and optional comment
   * @returns Promise with the updated issue and related data
   */
  async markIssueWontFix(params: MarkIssueWontFixParams): Promise<DoTransitionResponse> {
    const request = {
      issue: params.issueKey,
      transition: 'wontfix' as const,
    };

    // Add comment if provided (using separate API call if needed)
    if (params.comment) {
      // First add the comment, then perform the transition
      await this.webApiClient.issues.addComment({
        issue: params.issueKey,
        text: params.comment,
      });
    }

    return this.webApiClient.issues.doTransition(request);
  }

  /**
   * Mark multiple issues as false positive
   * @param params Parameters including issue keys and optional comment
   * @returns Promise with array of updated issues and related data
   */
  async markIssuesFalsePositive(params: BulkIssueMarkParams): Promise<DoTransitionResponse[]> {
    return Promise.all(
      params.issueKeys.map((issueKey) => {
        const requestParams: MarkIssueFalsePositiveParams = {
          issueKey,
          ...(params.comment && { comment: params.comment }),
        };
        return this.markIssueFalsePositive(requestParams);
      })
    );
  }

  /**
   * Mark multiple issues as won't fix
   * @param params Parameters including issue keys and optional comment
   * @returns Promise with array of updated issues and related data
   */
  async markIssuesWontFix(params: BulkIssueMarkParams): Promise<DoTransitionResponse[]> {
    const results: DoTransitionResponse[] = [];

    for (const issueKey of params.issueKeys) {
      const requestParams: MarkIssueWontFixParams = {
        issueKey,
        ...(params.comment && { comment: params.comment }),
      };
      const result = await this.markIssueWontFix(requestParams);
      results.push(result);
    }

    return results;
  }

  /**
   * Add a comment to an issue
   * @param params Parameters including issue key and comment text
   * @returns Promise with the created comment details
   */
  async addCommentToIssue(params: AddCommentToIssueParams): Promise<SonarQubeIssueComment> {
    const response = await this.webApiClient.issues.addComment({
      issue: params.issueKey,
      text: params.text,
    });

    // The API returns the full issue with comments, so we need to extract the latest comment
    const issue = response.issue as SonarQubeIssue;
    const comments = issue.comments || [];

    // Sort comments by timestamp to ensure chronological order
    comments.sort((a, b) => new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime());

    // The newly added comment should now be the last one
    const newComment = comments.at(-1);
    if (!newComment) {
      throw new Error('Failed to retrieve the newly added comment');
    }

    return newComment;
  }

  /**
   * Assign an issue to a user
   * @param params Assignment parameters
   * @returns The updated issue details
   */
  async assignIssue(params: AssignIssueParams): Promise<SonarQubeIssue> {
    // Call the assign API
    const assignRequest: {
      issue: string;
      assignee?: string;
    } = {
      issue: params.issueKey,
      ...(params.assignee && { assignee: params.assignee }),
    };
    await this.webApiClient.issues.assign(assignRequest);

    // Fetch and return the updated issue using the same search as getIssues
    const searchBuilder = this.webApiClient.issues.search();
    searchBuilder.withIssues([params.issueKey]);
    searchBuilder.withAdditionalFields(['_all']);

    const response = await searchBuilder.execute();

    if (!response.issues || response.issues.length === 0) {
      throw new Error(`Issue ${params.issueKey} not found after assignment`);
    }

    return response.issues[0] as SonarQubeIssue;
  }

  /**
   * Confirm an issue
   * @param params Parameters including issue key and optional comment
   * @returns Promise with the updated issue and related data
   */
  async confirmIssue(params: ConfirmIssueParams): Promise<DoTransitionResponse> {
    const request = {
      issue: params.issueKey,
      transition: 'confirm' as const,
    };

    if (params.comment) {
      await this.webApiClient.issues.addComment({
        issue: params.issueKey,
        text: params.comment,
      });
    }

    return this.webApiClient.issues.doTransition(request);
  }

  /**
   * Unconfirm an issue
   * @param params Parameters including issue key and optional comment
   * @returns Promise with the updated issue and related data
   */
  async unconfirmIssue(params: UnconfirmIssueParams): Promise<DoTransitionResponse> {
    const request = {
      issue: params.issueKey,
      transition: 'unconfirm' as const,
    };

    if (params.comment) {
      await this.webApiClient.issues.addComment({
        issue: params.issueKey,
        text: params.comment,
      });
    }

    return this.webApiClient.issues.doTransition(request);
  }

  /**
   * Resolve an issue
   * @param params Parameters including issue key and optional comment
   * @returns Promise with the updated issue and related data
   */
  async resolveIssue(params: ResolveIssueParams): Promise<DoTransitionResponse> {
    const request = {
      issue: params.issueKey,
      transition: 'resolve' as const,
    };

    if (params.comment) {
      await this.webApiClient.issues.addComment({
        issue: params.issueKey,
        text: params.comment,
      });
    }

    return this.webApiClient.issues.doTransition(request);
  }

  /**
   * Reopen an issue
   * @param params Parameters including issue key and optional comment
   * @returns Promise with the updated issue and related data
   */
  async reopenIssue(params: ReopenIssueParams): Promise<DoTransitionResponse> {
    const request = {
      issue: params.issueKey,
      transition: 'reopen' as const,
    };

    if (params.comment) {
      await this.webApiClient.issues.addComment({
        issue: params.issueKey,
        text: params.comment,
      });
    }

    return this.webApiClient.issues.doTransition(request);
  }
}

```

--------------------------------------------------------------------------------
/src/__tests__/transports/factory.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, expect, it, beforeEach, afterEach } from 'vitest';
import { TransportFactory } from '../../transports/factory.js';
import { StdioTransport } from '../../transports/stdio.js';
import { HttpTransport } from '../../transports/http.js';
import type { ITransportConfig, IHttpTransportConfig } from '../../transports/base.js';

describe('TransportFactory', () => {
  let originalEnv: NodeJS.ProcessEnv;

  beforeEach(() => {
    // Save original environment
    originalEnv = { ...process.env };
    // Clear all environment variables related to MCP
    delete process.env.MCP_TRANSPORT_TYPE;
    delete process.env.MCP_HTTP_PORT;
    delete process.env.MCP_HTTP_ALLOWED_HOSTS;
    delete process.env.MCP_HTTP_ALLOWED_ORIGINS;
    delete process.env.MCP_HTTP_SESSION_TIMEOUT;
    delete process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION;
  });

  afterEach(() => {
    // Restore original environment
    process.env = originalEnv;
  });

  describe('create', () => {
    it('should create a stdio transport', () => {
      const config: ITransportConfig = { type: 'stdio' };
      const transport = TransportFactory.create(config);

      expect(transport).toBeInstanceOf(StdioTransport);
      expect(transport.getName()).toBe('stdio');
    });

    it('should create an http transport', () => {
      const config: IHttpTransportConfig = {
        type: 'http',
        options: {
          port: 3001,
        },
      };
      const transport = TransportFactory.create(config);

      expect(transport).toBeInstanceOf(HttpTransport);
      expect(transport.getName()).toBe('http');
    });

    it('should create http transport with all options', () => {
      const config: IHttpTransportConfig = {
        type: 'http',
        options: {
          port: 3001,
          sessionTimeout: 1800000,
          enableDnsRebindingProtection: true,
          allowedHosts: ['localhost', '192.168.1.1'],
          allowedOrigins: ['http://localhost:3000', 'https://example.com'],
        },
      };

      const transport = TransportFactory.create(config);
      expect(transport).toBeInstanceOf(HttpTransport);
      expect(transport.getName()).toBe('http');
    });

    it('should create http transport without options', () => {
      const config: IHttpTransportConfig = {
        type: 'http',
      };
      const transport = TransportFactory.create(config);

      expect(transport).toBeInstanceOf(HttpTransport);
      expect(transport.getName()).toBe('http');
    });

    it('should throw error for unsupported transport type', () => {
      const config = { type: 'unsupported' as any } as ITransportConfig;

      expect(() => TransportFactory.create(config)).toThrow(
        'Unsupported transport type: unsupported'
      );
    });

    it('should throw error for undefined transport type', () => {
      const config = {} as ITransportConfig;

      expect(() => TransportFactory.create(config)).toThrow(
        'Unsupported transport type: undefined'
      );
    });

    it('should throw error for null transport type', () => {
      const config = { type: null } as unknown as ITransportConfig;

      expect(() => TransportFactory.create(config)).toThrow('Unsupported transport type: null');
    });
  });

  describe('createFromEnvironment', () => {
    it('should create stdio transport by default', () => {
      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(StdioTransport);
      expect(transport.getName()).toBe('stdio');
    });

    it('should create stdio transport when MCP_TRANSPORT_TYPE is stdio', () => {
      process.env.MCP_TRANSPORT_TYPE = 'stdio';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(StdioTransport);
      expect(transport.getName()).toBe('stdio');
    });

    it('should create stdio transport when MCP_TRANSPORT_TYPE is STDIO (uppercase)', () => {
      process.env.MCP_TRANSPORT_TYPE = 'STDIO';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(StdioTransport);
      expect(transport.getName()).toBe('stdio');
    });

    it('should create http transport when MCP_TRANSPORT_TYPE is http', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
      expect(transport.getName()).toBe('http');
    });

    it('should create http transport when MCP_TRANSPORT_TYPE is HTTP (uppercase)', () => {
      process.env.MCP_TRANSPORT_TYPE = 'HTTP';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
      expect(transport.getName()).toBe('http');
    });

    it('should parse MCP_HTTP_PORT environment variable', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_PORT = '8080';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
      // The factory creates the transport with the port option
      // We can't directly verify the port since it's internal to HttpTransport
      // But we know it was created with the environment config
    });

    it('should parse MCP_HTTP_SESSION_TIMEOUT environment variable', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_SESSION_TIMEOUT = '3600000';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should parse MCP_HTTP_ALLOWED_HOSTS environment variable', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_ALLOWED_HOSTS = 'localhost,127.0.0.1,192.168.1.1';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should parse MCP_HTTP_ALLOWED_HOSTS with spaces', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_ALLOWED_HOSTS = 'localhost, 127.0.0.1 , 192.168.1.1';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should parse MCP_HTTP_ALLOWED_ORIGINS environment variable', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_ALLOWED_ORIGINS = 'http://localhost:3000,https://example.com';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should parse MCP_HTTP_ALLOWED_ORIGINS with spaces', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_ALLOWED_ORIGINS = 'http://localhost:3000 , https://example.com , *';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should parse MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION environment variable', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION = 'true';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should not enable DNS rebinding protection for other values', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION = 'false';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should parse all HTTP environment variables together', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_PORT = '3001';
      process.env.MCP_HTTP_SESSION_TIMEOUT = '1800000';
      process.env.MCP_HTTP_ALLOWED_HOSTS = 'localhost,127.0.0.1';
      process.env.MCP_HTTP_ALLOWED_ORIGINS = 'http://localhost:3000,https://example.com';
      process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION = 'true';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
      expect(transport.getName()).toBe('http');
    });

    it('should handle mixed case transport type', () => {
      process.env.MCP_TRANSPORT_TYPE = 'HtTp';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
      expect(transport.getName()).toBe('http');
    });

    it('should create stdio transport for unknown transport type', () => {
      process.env.MCP_TRANSPORT_TYPE = 'unknown';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(StdioTransport);
      expect(transport.getName()).toBe('stdio');
    });

    it('should handle empty MCP_TRANSPORT_TYPE', () => {
      process.env.MCP_TRANSPORT_TYPE = '';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(StdioTransport);
      expect(transport.getName()).toBe('stdio');
    });

    it('should handle whitespace in MCP_TRANSPORT_TYPE', () => {
      process.env.MCP_TRANSPORT_TYPE = '  http  ';

      const transport = TransportFactory.createFromEnvironment();

      // Note: Current implementation doesn't trim whitespace, so this becomes stdio
      expect(transport).toBeInstanceOf(StdioTransport);
      expect(transport.getName()).toBe('stdio');
    });

    it('should handle invalid MCP_HTTP_PORT gracefully', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_PORT = 'invalid';

      // Should not throw, just create with NaN value
      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should handle empty allowed hosts list', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_ALLOWED_HOSTS = '';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should handle single allowed host', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_ALLOWED_HOSTS = 'localhost';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should handle port number at boundary values', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';

      // Test port 0
      process.env.MCP_HTTP_PORT = '0';
      let transport = TransportFactory.createFromEnvironment();
      expect(transport).toBeInstanceOf(HttpTransport);

      // Test port 65535
      process.env.MCP_HTTP_PORT = '65535';
      transport = TransportFactory.createFromEnvironment();
      expect(transport).toBeInstanceOf(HttpTransport);

      // Test negative port (allowed but may not work)
      process.env.MCP_HTTP_PORT = '-1';
      transport = TransportFactory.createFromEnvironment();
      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should handle very long session timeout', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_SESSION_TIMEOUT = '999999999999';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should handle DNS rebinding protection with uppercase TRUE', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION = 'TRUE';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should handle DNS rebinding protection with value 1', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION = '1';

      const transport = TransportFactory.createFromEnvironment();

      expect(transport).toBeInstanceOf(HttpTransport);
    });

    it('should create different instances on multiple calls', () => {
      const transport1 = TransportFactory.createFromEnvironment();
      const transport2 = TransportFactory.createFromEnvironment();

      expect(transport1).toBeInstanceOf(StdioTransport);
      expect(transport2).toBeInstanceOf(StdioTransport);
      expect(transport1).not.toBe(transport2); // Different instances
    });
  });

  describe('integration', () => {
    it('should create equivalent transports using both methods for stdio', () => {
      const configTransport = TransportFactory.create({ type: 'stdio' });
      const envTransport = TransportFactory.createFromEnvironment();

      expect(configTransport.getName()).toBe(envTransport.getName());
      expect(configTransport).toBeInstanceOf(StdioTransport);
      expect(envTransport).toBeInstanceOf(StdioTransport);
    });

    it('should create equivalent transports using both methods for http', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_PORT = '3001';

      const configTransport = TransportFactory.create({
        type: 'http',
        options: { port: 3001 },
      });
      const envTransport = TransportFactory.createFromEnvironment();

      expect(configTransport.getName()).toBe(envTransport.getName());
      expect(configTransport).toBeInstanceOf(HttpTransport);
      expect(envTransport).toBeInstanceOf(HttpTransport);
    });

    it('should handle multiple environment variable sets', () => {
      // First configuration
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_PORT = '3000';
      let transport = TransportFactory.createFromEnvironment();
      expect(transport).toBeInstanceOf(HttpTransport);

      // Change configuration
      process.env.MCP_TRANSPORT_TYPE = 'stdio';
      delete process.env.MCP_HTTP_PORT;
      transport = TransportFactory.createFromEnvironment();
      expect(transport).toBeInstanceOf(StdioTransport);

      // Back to HTTP with different port
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_PORT = '8080';
      transport = TransportFactory.createFromEnvironment();
      expect(transport).toBeInstanceOf(HttpTransport);
    });
  });

  describe('error handling', () => {
    it('should provide clear error message for invalid transport type', () => {
      const config = { type: 'websocket' } as any;

      expect(() => TransportFactory.create(config)).toThrow(
        'Unsupported transport type: websocket'
      );
    });

    it('should handle missing type gracefully', () => {
      const config = { options: { port: 3000 } } as any;

      expect(() => TransportFactory.create(config)).toThrow(
        'Unsupported transport type: undefined'
      );
    });

    it('should not throw when creating HTTP transport with invalid port in environment', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_PORT = 'not-a-number';

      // Should create transport despite invalid port
      expect(() => TransportFactory.createFromEnvironment()).not.toThrow();
    });

    it('should not throw when creating HTTP transport with invalid timeout in environment', () => {
      process.env.MCP_TRANSPORT_TYPE = 'http';
      process.env.MCP_HTTP_SESSION_TIMEOUT = 'not-a-number';

      // Should create transport despite invalid timeout
      expect(() => TransportFactory.createFromEnvironment()).not.toThrow();
    });
  });
});

```

--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------

```yaml
# =============================================================================
# WORKFLOW: Multi-Channel Package Publishing
# PURPOSE: Distribute releases to NPM, GitHub Packages, and Docker Hub
# TRIGGERS: GitHub release publication or manual dispatch
# OUTPUTS: Published packages to configured registries
# =============================================================================

name: Publish

on:
  release:
    types: [published] # Triggered when a GitHub release is published
  workflow_dispatch: # Manual trigger for re-publishing or testing
    inputs:
      tag:
        description: 'Release tag to publish (e.g., v1.2.3)'
        required: true
        type: string

# Allow only one publish workflow per branch
# cancel-in-progress: false to allow multiple releases to proceed
concurrency:
  group: ${{ github.workflow }}-${{ github.ref }}
  cancel-in-progress: false

# Global environment variables for consistency
env:
  PNPM_VERSION: 10.17.0 # Pinned: Must match packageManager in package.json
  NODE_VERSION: 22 # Pinned: Must match engines.node in package.json

# SECURITY: Minimal required permissions
# contents: read - Checkout code at release tag
# packages: write - Publish to GitHub Packages
# id-token: write - Generate provenance for npm
permissions:
  contents: read
  packages: write
  id-token: write

jobs:
  # =============================================================================
  # NPM PUBLISHING
  # Publishes package to npm registry with provenance
  # =============================================================================

  npm:
    name: Publish to NPM
    runs-on: ubuntu-latest
    # Only runs if ENABLE_NPM_RELEASE variable is set to 'true'
    # Configure in Settings > Secrets and variables > Variables
    if: vars.ENABLE_NPM_RELEASE == 'true'
    permissions:
      contents: read
      id-token: write # Required for npm provenance
      actions: read # Required to download artifacts
    steps:
      - name: Determine version
        id: version
        # Extract version from release tag or manual input
        # Strips 'v' prefix to get semver (v1.2.3 -> 1.2.3)
        run: |
          VERSION="${{ github.event_name == 'release' && github.event.release.tag_name || inputs.tag }}"
          VERSION="${VERSION#v}"  # Remove 'v' prefix
          echo "version=$VERSION" >> $GITHUB_OUTPUT
          echo "tag=v$VERSION" >> $GITHUB_OUTPUT
          echo "📦 Publishing to NPM: $VERSION"

      - name: Checkout code
        uses: actions/checkout@v4
        with:
          # IMPORTANT: Checkout the exact release tag, not latest main
          # This ensures we publish exactly what was released
          ref: ${{ steps.version.outputs.tag }}

      - name: Setup Node.js
        # Node.js is required for npm publish command
        uses: actions/setup-node@v4
        with:
          node-version: ${{ env.NODE_VERSION }}
          # Configure npm registry for authentication
          registry-url: 'https://registry.npmjs.org'

      - name: Determine artifact source
        id: artifact
        # Use shared script to find the correct NPM package artifact from the release build
        env:
          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
        run: |
          chmod +x .github/scripts/determine-artifact.sh
          .github/scripts/determine-artifact.sh \
            --tag "${{ steps.version.outputs.tag }}" \
            --repo "${{ github.repository }}" \
            --version "${{ steps.version.outputs.version }}" \
            --prefix "npm-package" \
            --output "$GITHUB_OUTPUT"

      - name: Download pre-built NPM package
        id: download
        # Download the pre-built, pre-scanned NPM package from main workflow
        # This ensures we publish exactly what was tested
        uses: actions/download-artifact@v4
        with:
          name: ${{ steps.artifact.outputs.artifact_name }}
          path: ./npm-artifact
          run-id: ${{ steps.artifact.outputs.run_id }}
          github-token: ${{ secrets.GITHUB_TOKEN }}

      - name: Extract pre-built package
        run: |
          # Check if any .tgz files exist
          TARBALL=$(find ./npm-artifact -name "*.tgz" -type f | head -1)
          if [ -z "$TARBALL" ]; then
            echo "❌ No .tgz file found in artifact!"
            echo "Contents of ./npm-artifact:"
            ls -la ./npm-artifact/
            exit 1
          fi

          echo "✅ Using pre-built NPM package from main workflow"
          echo "📦 Extracting: $TARBALL"
          tar -xzf "$TARBALL"

          # The package extracts to a 'package' directory
          # We need to move its contents to the current directory
          if [ -d package ]; then
            cp -r package/* .
            rm -rf package
          fi

          echo "📋 Verified package contents from manifest"
          if [ -f ./npm-artifact/npm-package-manifest.txt ]; then
            echo "Package contains $(wc -l < ./npm-artifact/npm-package-manifest.txt) files"
          fi

      - name: Check NPM token
        id: check-npm
        # Gracefully handle missing NPM_TOKEN
        # Allows workflow to succeed even without npm publishing
        run: |
          if [ -n "${{ secrets.NPM_TOKEN }}" ]; then
            echo "has_token=true" >> $GITHUB_OUTPUT
            echo "✅ NPM_TOKEN is configured"
          else
            echo "has_token=false" >> $GITHUB_OUTPUT
            echo "⚠️ NPM_TOKEN is not configured, skipping publish"
            # To fix: Add NPM_TOKEN secret in Settings > Secrets
          fi

      - name: Publish to NPM
        if: steps.check-npm.outputs.has_token == 'true'
        run: |
          # Remove private flag and prepare script (which runs husky)
          # The prepare script runs even with --ignore-scripts, so we must remove it
          jq 'del(.private) | del(.scripts.prepare)' package.json > tmp.json && mv tmp.json package.json

          # Publish with provenance for supply chain security
          # --provenance creates a signed attestation of the build
          npm publish --provenance --access public
        env:
          # SECURITY: NPM_TOKEN required for authentication
          NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

  # =============================================================================
  # GITHUB PACKAGES PUBLISHING
  # Publishes package to GitHub's npm registry
  # =============================================================================

  github-packages:
    name: Publish to GitHub Packages
    runs-on: ubuntu-latest
    # Only runs if ENABLE_GITHUB_PACKAGES variable is set
    # Useful for private packages within organization
    if: vars.ENABLE_GITHUB_PACKAGES == 'true'
    permissions:
      contents: read
      packages: write # Required to publish to GitHub Packages
      id-token: write # Required for provenance
      actions: read # Required to download artifacts
    steps:
      - name: Determine version
        id: version
        run: |
          VERSION="${{ github.event_name == 'release' && github.event.release.tag_name || inputs.tag }}"
          VERSION="${VERSION#v}"
          echo "version=$VERSION" >> $GITHUB_OUTPUT
          echo "tag=v$VERSION" >> $GITHUB_OUTPUT
          echo "📦 Publishing to GitHub Packages: $VERSION"

      - name: Checkout code
        uses: actions/checkout@v4
        with:
          ref: ${{ steps.version.outputs.tag }}

      - name: Setup Node.js
        uses: actions/setup-node@v4
        with:
          node-version: ${{ env.NODE_VERSION }}
          # GitHub Packages npm registry URL
          registry-url: 'https://npm.pkg.github.com'

      - name: Determine artifact source
        id: artifact
        # Use shared script to find the correct NPM package artifact (same as npm job)
        env:
          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
        run: |
          chmod +x .github/scripts/determine-artifact.sh
          .github/scripts/determine-artifact.sh \
            --tag "${{ steps.version.outputs.tag }}" \
            --repo "${{ github.repository }}" \
            --version "${{ steps.version.outputs.version }}" \
            --prefix "npm-package" \
            --output "$GITHUB_OUTPUT"

      - name: Download pre-built NPM package
        id: download
        uses: actions/download-artifact@v4
        with:
          name: ${{ steps.artifact.outputs.artifact_name }}
          path: ./npm-artifact
          run-id: ${{ steps.artifact.outputs.run_id }}
          github-token: ${{ secrets.GITHUB_TOKEN }}

      - name: Extract pre-built package
        run: |
          # Check if any .tgz files exist
          TARBALL=$(find ./npm-artifact -name "*.tgz" -type f | head -1)
          if [ -z "$TARBALL" ]; then
            echo "❌ No .tgz file found in artifact!"
            echo "Contents of ./npm-artifact:"
            ls -la ./npm-artifact/
            exit 1
          fi

          echo "✅ Using pre-built NPM package from main workflow"
          echo "📦 Extracting: $TARBALL"
          tar -xzf "$TARBALL"

          # The package extracts to a 'package' directory
          if [ -d package ]; then
            cp -r package/* .
            rm -rf package
          fi

          echo "📋 Verified package contents"

      - name: Publish to GitHub Packages
        run: |
          # Scope package name to organization and remove private flag and prepare script
          # The prepare script runs even with --ignore-scripts, so we must remove it
          jq '.name = "@${{ github.repository_owner }}/" + .name | del(.private) | del(.scripts.prepare)' package.json > tmp.json && mv tmp.json package.json

          npm publish --access public
        env:
          # SECURITY: Uses GITHUB_TOKEN for authentication
          # Automatically available, no configuration needed
          NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

  # =============================================================================
  # DOCKER HUB PUBLISHING
  # Copies pre-built multi-platform image from GHCR to Docker Hub
  # =============================================================================

  docker:
    name: Publish to Docker Hub
    runs-on: ubuntu-latest
    # Only runs if ENABLE_DOCKER_RELEASE variable is set
    # Requires DOCKERHUB_USERNAME and DOCKERHUB_TOKEN secrets
    if: vars.ENABLE_DOCKER_RELEASE == 'true'
    permissions:
      contents: read
      packages: read # Read from GitHub Container Registry
    steps:
      - name: Determine version
        id: version
        run: |
          VERSION="${{ github.event_name == 'release' && github.event.release.tag_name || inputs.tag }}"
          VERSION="${VERSION#v}"
          echo "version=$VERSION" >> $GITHUB_OUTPUT
          echo "tag=v$VERSION" >> $GITHUB_OUTPUT
          echo "🐳 Publishing Docker image: $VERSION"

      - name: Check Docker credentials
        id: check-docker
        # Validate Docker Hub credentials exist
        # Allows workflow to succeed without Docker publishing
        run: |
          if [ -n "${{ secrets.DOCKERHUB_USERNAME }}" ] && [ -n "${{ secrets.DOCKERHUB_TOKEN }}" ]; then
            echo "has_credentials=true" >> $GITHUB_OUTPUT
            echo "✅ Docker Hub credentials are configured"
          else
            echo "has_credentials=false" >> $GITHUB_OUTPUT
            echo "⚠️ Docker Hub credentials are not configured, skipping publish"
            # To fix: Add DOCKERHUB_USERNAME and DOCKERHUB_TOKEN in Settings > Secrets
            exit 0
          fi

      - name: Set up Docker Buildx
        # Required for imagetools commands
        if: steps.check-docker.outputs.has_credentials == 'true'
        uses: docker/setup-buildx-action@v3

      - name: Login to GitHub Container Registry
        # Login to GHCR to pull the pre-built image
        if: steps.check-docker.outputs.has_credentials == 'true'
        uses: docker/login-action@v3
        with:
          registry: ghcr.io
          username: ${{ github.actor }}
          password: ${{ secrets.GITHUB_TOKEN }}

      - name: Login to Docker Hub
        # SECURITY: Authenticate with Docker Hub for pushing
        if: steps.check-docker.outputs.has_credentials == 'true'
        uses: docker/login-action@v3
        with:
          username: ${{ secrets.DOCKERHUB_USERNAME }}
          password: ${{ secrets.DOCKERHUB_TOKEN }}

      - name: Copy image from GHCR to Docker Hub
        # Use buildx imagetools to copy multi-platform image between registries
        # This properly handles multi-platform manifest lists
        if: steps.check-docker.outputs.has_credentials == 'true'
        run: |
          SOURCE_IMAGE="ghcr.io/${{ github.repository_owner }}/sonarqube-mcp-server"
          TARGET_REPO="${{ secrets.DOCKERHUB_USERNAME }}/${{ github.event.repository.name }}"
          VERSION="${{ steps.version.outputs.version }}"

          echo "📤 Copying multi-platform image from GHCR to Docker Hub..."
          echo "Source: $SOURCE_IMAGE:$VERSION"
          echo "Target: $TARGET_REPO:$VERSION"

          # Copy image with version tag
          docker buildx imagetools create \
            --tag $TARGET_REPO:$VERSION \
            $SOURCE_IMAGE:$VERSION

          echo "🏷️ Creating additional tags..."
          # Create alias tags for latest, major, and major.minor versions
          MAJOR=$(echo "$VERSION" | cut -d. -f1)
          MINOR=$(echo "$VERSION" | cut -d. -f2)

          docker buildx imagetools create --tag $TARGET_REPO:latest $TARGET_REPO:$VERSION
          docker buildx imagetools create --tag $TARGET_REPO:$MAJOR $TARGET_REPO:$VERSION
          docker buildx imagetools create --tag $TARGET_REPO:$MAJOR.$MINOR $TARGET_REPO:$VERSION

          echo "✅ Docker image published successfully to Docker Hub"
          echo "📋 Published tags: $VERSION, latest, $MAJOR, $MAJOR.$MINOR"

  # =============================================================================
  # NOTIFICATION
  # Send status updates to team communication channels
  # =============================================================================

  notify:
    name: Notify
    if: always() # Run even if publishing jobs fail
    needs: [npm, docker, github-packages]
    runs-on: ubuntu-latest
    steps:
      - name: Check Slack webhook
        id: check-slack
        # Gracefully handle missing Slack configuration
        run: |
          if [ -n "${{ secrets.SLACK_WEBHOOK }}" ]; then
            echo "has_webhook=true" >> $GITHUB_OUTPUT
          else
            echo "has_webhook=false" >> $GITHUB_OUTPUT
            # Optional: Configure SLACK_WEBHOOK in Settings > Secrets
          fi

      - name: Send Slack notification
        # Send release status to Slack channel
        # Shows success/skip/failure for each distribution channel
        if: steps.check-slack.outputs.has_webhook == 'true'
        uses: slackapi/slack-github-action@v2
        with:
          payload: |
            {
              "text": "🚀 Release ${{ github.event_name == 'release' && github.event.release.tag_name || inputs.tag }}",
              "blocks": [
                {
                  "type": "section",
                  "fields": [
                    {"type": "mrkdwn", "text": "*Repo:*\n${{ github.repository }}"},
                    {"type": "mrkdwn", "text": "*NPM:*\n${{ needs.npm.result == 'success' && '✅' || needs.npm.result == 'skipped' && '⏭️' || '❌' }}"},
                    {"type": "mrkdwn", "text": "*Docker:*\n${{ needs.docker.result == 'success' && '✅' || needs.docker.result == 'skipped' && '⏭️' || '❌' }}"},
                    {"type": "mrkdwn", "text": "*GitHub:*\n${{ needs.github-packages.result == 'success' && '✅' || needs.github-packages.result == 'skipped' && '⏭️' || '❌' }}"}
                  ]
                }
              ]
            }
        env:
          # SECURITY: Webhook URL for Slack integration
          SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }}

```

--------------------------------------------------------------------------------
/src/__tests__/direct-lambdas.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { z } from 'zod';
// Save the original environment variables
const originalEnv = process.env;
// Mock client responses to avoid network calls
vi.mock('../sonarqube.js', () => {
  return {
    SonarQubeClient: vi.fn().mockImplementation(() => ({
      listProjects: vi.fn<() => Promise<any>>().mockResolvedValue({
        projects: [{ key: 'test-project', name: 'Test Project' }],
        paging: { pageIndex: 1, pageSize: 10, total: 1 },
      }),
      getIssues: vi.fn<() => Promise<any>>().mockResolvedValue({
        issues: [{ key: 'test-issue', rule: 'test-rule' }],
        components: [],
        rules: [],
        paging: { pageIndex: 1, pageSize: 10, total: 1 },
      }),
      getMetrics: vi.fn<() => Promise<any>>().mockResolvedValue({
        metrics: [{ key: 'test-metric', name: 'Test Metric' }],
        paging: { pageIndex: 1, pageSize: 10, total: 1 },
      }),
      getHealth: vi.fn<() => Promise<any>>().mockResolvedValue({ health: 'GREEN', causes: [] }),
      getStatus: vi
        .fn<() => Promise<any>>()
        .mockResolvedValue({ id: 'id', version: '1.0', status: 'UP' }),
      ping: vi.fn<() => Promise<any>>().mockResolvedValue('pong'),
      getComponentMeasures: vi.fn<() => Promise<any>>().mockResolvedValue({
        component: { key: 'test-component', measures: [] },
        metrics: [],
      }),
      getComponentsMeasures: vi.fn<() => Promise<any>>().mockResolvedValue({
        components: [{ key: 'test-component', measures: [] }],
        metrics: [],
        paging: { pageIndex: 1, pageSize: 10, total: 1 },
      }),
      getMeasuresHistory: vi.fn<() => Promise<any>>().mockResolvedValue({
        measures: [{ metric: 'coverage', history: [] }],
        paging: { pageIndex: 1, pageSize: 10, total: 1 },
      }),
    })),
    setSonarQubeElicitationManager: vi.fn(),
    createSonarQubeClientFromEnv: vi.fn(() => ({
      listProjects: vi.fn<() => Promise<any>>().mockResolvedValue({
        projects: [{ key: 'test-project', name: 'Test Project' }],
        paging: { pageIndex: 1, pageSize: 10, total: 1 },
      }),
      getIssues: vi.fn<() => Promise<any>>().mockResolvedValue({
        issues: [{ key: 'test-issue', rule: 'test-rule' }],
        components: [],
        rules: [],
        paging: { pageIndex: 1, pageSize: 10, total: 1 },
      }),
      getMetrics: vi.fn<() => Promise<any>>().mockResolvedValue({
        metrics: [{ key: 'test-metric', name: 'Test Metric' }],
        paging: { pageIndex: 1, pageSize: 10, total: 1 },
      }),
      getHealth: vi.fn<() => Promise<any>>().mockResolvedValue({ health: 'GREEN', causes: [] }),
      getStatus: vi
        .fn<() => Promise<any>>()
        .mockResolvedValue({ id: 'id', version: '1.0', status: 'UP' }),
      ping: vi.fn<() => Promise<any>>().mockResolvedValue('pong'),
      getComponentMeasures: vi.fn<() => Promise<any>>().mockResolvedValue({
        component: { key: 'test-component', measures: [] },
        metrics: [],
      }),
      getComponentsMeasures: vi.fn<() => Promise<any>>().mockResolvedValue({
        components: [{ key: 'test-component', measures: [] }],
        metrics: [],
        paging: { pageIndex: 1, pageSize: 10, total: 1 },
      }),
      getMeasuresHistory: vi.fn<() => Promise<any>>().mockResolvedValue({
        measures: [{ metric: 'coverage', history: [] }],
        paging: { pageIndex: 1, pageSize: 10, total: 1 },
      }),
    })),
  };
});
describe('Direct Lambda Testing', () => {
  let index: typeof import('../index.js');
  beforeEach(async () => {
    vi.resetModules();
    process.env = { ...originalEnv };
    process.env.SONARQUBE_TOKEN = 'test-token';
    process.env.SONARQUBE_URL = 'http://localhost:9000';
    // Import the module for each test to ensure it's fresh
    index = await import('../index.js');
  });
  afterEach(() => {
    process.env = originalEnv;
    vi.clearAllMocks();
  });
  describe('Direct Lambda Function Execution', () => {
    // Directly extract the lambda functions from mcpServer.tool calls
    it('should execute metrics lambda function', async () => {
      // Get the metrics lambda function (simulating how it's registered)
      const metricsLambda = async (params: Record<string, unknown>) => {
        const page = index.nullToUndefined(params.page) as number | undefined;
        const pageSize = index.nullToUndefined(params.page_size) as number | undefined;
        const metricsParams = { page, pageSize };
        const result = await index.handleSonarQubeGetMetrics(metricsParams);
        return {
          content: [
            {
              type: 'text',
              text: JSON.stringify(result, null, 2),
            },
          ],
        };
      };
      // Execute the lambda function
      const result = await metricsLambda({ page: '1', page_size: '10' });
      // Verify the result structure
      expect(result).toBeDefined();
      expect(result.content).toBeDefined();
      expect(result.content[0]?.type).toBe('text');
      expect(result.content[0]?.text).toBeDefined();
      // Parse the content to verify data structure
      const data = JSON.parse(result.content[0]!.text);
      expect(data.content[0]!.type).toBe('text');
    });
    it('should execute issues lambda function', async () => {
      // Simulate the issues lambda function
      const issuesLambda = async (params: Record<string, unknown>) => {
        return index.handleSonarQubeGetIssues(index.mapToSonarQubeParams(params));
      };
      // Execute the lambda function
      const result = await issuesLambda({ project_key: 'test-project', severity: 'MAJOR' });
      // Verify the result structure
      expect(result).toBeDefined();
      expect(result.content).toBeDefined();
      expect(result.content[0]!.type).toBe('text');
    });
    it('should execute measures_component lambda function with string metrics', async () => {
      // Simulate the measures_component lambda function
      const measuresLambda = async (params: Record<string, unknown>) => {
        const componentParams: {
          component: string;
          metricKeys: string[];
          additionalFields?: string[];
          branch?: string;
          pullRequest?: string;
          period?: string;
        } = {
          component: params.component as string,
          metricKeys: Array.isArray(params.metric_keys)
            ? (params.metric_keys as string[])
            : [params.metric_keys as string],
        };
        if (params.additional_fields)
          componentParams.additionalFields = params.additional_fields as string[];
        if (params.branch) componentParams.branch = params.branch as string;
        if (params.pull_request) componentParams.pullRequest = params.pull_request as string;
        if (params.period) componentParams.period = params.period as string;
        return index.handleSonarQubeComponentMeasures(componentParams);
      };
      // Execute the lambda function with string metric
      const result = await measuresLambda({
        component: 'test-component',
        metric_keys: 'coverage',
      });
      // Verify the result structure
      expect(result).toBeDefined();
      expect(result.content).toBeDefined();
      expect(result.content[0]!.type).toBe('text');
    });
    it('should execute measures_component lambda function with array metrics', async () => {
      // Simulate the measures_component lambda function
      const measuresLambda = async (params: Record<string, unknown>) => {
        const componentParams: {
          component: string;
          metricKeys: string[];
          additionalFields?: string[];
          branch?: string;
          pullRequest?: string;
          period?: string;
        } = {
          component: params.component as string,
          metricKeys: Array.isArray(params.metric_keys)
            ? (params.metric_keys as string[])
            : [params.metric_keys as string],
        };
        if (params.additional_fields)
          componentParams.additionalFields = params.additional_fields as string[];
        if (params.branch) componentParams.branch = params.branch as string;
        if (params.pull_request) componentParams.pullRequest = params.pull_request as string;
        if (params.period) componentParams.period = params.period as string;
        return index.handleSonarQubeComponentMeasures(componentParams);
      };
      // Execute the lambda function with array metrics
      const result = await measuresLambda({
        component: 'test-component',
        metric_keys: ['coverage', 'bugs'],
        additional_fields: ['periods'],
        branch: 'main',
        pull_request: 'pr-123',
        period: '1',
      });
      // Verify the result structure
      expect(result).toBeDefined();
      expect(result.content).toBeDefined();
      expect(result.content[0]!.type).toBe('text');
    });
    it('should execute measures_components lambda function', async () => {
      // Simulate the measures_components lambda function
      const componentsLambda = async (params: Record<string, unknown>) => {
        const page = index.nullToUndefined(params.page) as number | undefined;
        const pageSize = index.nullToUndefined(params.page_size) as number | undefined;
        const componentsParams: {
          componentKeys: string[];
          metricKeys: string[];
          additionalFields?: string[];
          branch?: string;
          pullRequest?: string;
          period?: string;
          page: number | undefined;
          pageSize: number | undefined;
        } = {
          componentKeys: Array.isArray(params.component_keys)
            ? (params.component_keys as string[])
            : [params.component_keys as string],
          metricKeys: Array.isArray(params.metric_keys)
            ? (params.metric_keys as string[])
            : [params.metric_keys as string],
          page,
          pageSize,
        };
        if (params.additional_fields)
          componentsParams.additionalFields = params.additional_fields as string[];
        if (params.branch) componentsParams.branch = params.branch as string;
        if (params.pull_request) componentsParams.pullRequest = params.pull_request as string;
        if (params.period) componentsParams.period = params.period as string;
        return index.handleSonarQubeComponentsMeasures(componentsParams);
      };
      // Execute the lambda function
      const result = await componentsLambda({
        component_keys: ['comp1', 'comp2'],
        metric_keys: ['coverage', 'bugs'],
        page: '1',
        page_size: '10',
        additional_fields: ['periods'],
        branch: 'main',
      });
      // Verify the result structure
      expect(result).toBeDefined();
      expect(result.content).toBeDefined();
      expect(result.content[0]!.type).toBe('text');
    });
    it('should execute measures_history lambda function', async () => {
      // Simulate the measures_history lambda function
      const historyLambda = async (params: Record<string, unknown>) => {
        const page = index.nullToUndefined(params.page) as number | undefined;
        const pageSize = index.nullToUndefined(params.page_size) as number | undefined;
        const historyParams: {
          component: string;
          metrics: string[];
          from?: string;
          to?: string;
          branch?: string;
          pullRequest?: string;
          page: number | undefined;
          pageSize: number | undefined;
        } = {
          component: params.component as string,
          metrics: Array.isArray(params.metrics)
            ? (params.metrics as string[])
            : [params.metrics as string],
          page,
          pageSize,
        };
        if (params.from) historyParams.from = params.from as string;
        if (params.to) historyParams.to = params.to as string;
        if (params.branch) historyParams.branch = params.branch as string;
        if (params.pull_request) historyParams.pullRequest = params.pull_request as string;
        return index.handleSonarQubeMeasuresHistory(historyParams);
      };
      // Execute the lambda function
      const result = await historyLambda({
        component: 'test-component',
        metrics: 'coverage',
        from: '2023-01-01',
        to: '2023-12-31',
        branch: 'main',
        page: '1',
        page_size: '10',
      });
      // Verify the result structure
      expect(result).toBeDefined();
      expect(result.content).toBeDefined();
      expect(result.content[0]!.type).toBe('text');
    });
  });
  describe('Schema Transformations', () => {
    it('should test page schema transformations', () => {
      // Create a schema similar to what's in the actual code
      const pageSchema = z
        .string()
        .optional()
        .transform((val: any) => (val ? parseInt(val, 10) || null : null));
      // Test valid numeric strings
      expect(pageSchema.parse('10')).toBe(10);
      expect(pageSchema.parse('100')).toBe(100);
      // Test invalid inputs
      expect(pageSchema.parse('')).toBe(null);
      expect(pageSchema.parse('abc')).toBe(null);
      expect(pageSchema.parse(undefined)).toBe(null);
    });
    it('should test boolean schema transformations', () => {
      // Create a schema similar to what's in the actual code
      const booleanSchema = z
        .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
        .nullable()
        .optional();
      // Test string values
      expect(booleanSchema.parse('true')).toBe(true);
      expect(booleanSchema.parse('false')).toBe(false);
      // Test boolean values
      expect(booleanSchema.parse(true)).toBe(true);
      expect(booleanSchema.parse(false)).toBe(false);
      // Test null/undefined
      expect(booleanSchema.parse(null)).toBe(null);
      expect(booleanSchema.parse(undefined)).toBe(undefined);
    });
    it('should test status schema validations', () => {
      // Create a schema similar to what's in the actual code
      const statusSchema = z
        .array(
          z.enum([
            'OPEN',
            'CONFIRMED',
            'REOPENED',
            'RESOLVED',
            'CLOSED',
            'TO_REVIEW',
            'IN_REVIEW',
            'REVIEWED',
          ])
        )
        .nullable()
        .optional();
      // Test valid values
      expect(statusSchema.parse(['OPEN', 'CONFIRMED'])).toEqual(['OPEN', 'CONFIRMED']);
      // Test null/undefined
      expect(statusSchema.parse(null)).toBe(null);
      expect(statusSchema.parse(undefined)).toBe(undefined);
      // Test invalid values (should throw)
      expect(() => statusSchema.parse(['INVALID'])).toThrow();
    });
    it('should test resolution schema validations', () => {
      // Create a schema similar to what's in the actual code
      const resolutionSchema = z
        .array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
        .nullable()
        .optional();
      // Test valid values
      expect(resolutionSchema.parse(['FALSE-POSITIVE', 'WONTFIX'])).toEqual([
        'FALSE-POSITIVE',
        'WONTFIX',
      ]);
      // Test null/undefined
      expect(resolutionSchema.parse(null)).toBe(null);
      expect(resolutionSchema.parse(undefined)).toBe(undefined);
      // Test invalid values (should throw)
      expect(() => resolutionSchema.parse(['INVALID'])).toThrow();
    });
    it('should test type schema validations', () => {
      // Create a schema similar to what's in the actual code
      const typeSchema = z
        .array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
        .nullable()
        .optional();
      // Test valid values
      expect(typeSchema.parse(['CODE_SMELL', 'BUG'])).toEqual(['CODE_SMELL', 'BUG']);
      // Test null/undefined
      expect(typeSchema.parse(null)).toBe(null);
      expect(typeSchema.parse(undefined)).toBe(undefined);
      // Test invalid values (should throw)
      expect(() => typeSchema.parse(['INVALID'])).toThrow();
    });
    it('should test severity schema validations', () => {
      // Create a schema similar to what's in the actual code
      const severitySchema = z
        .enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
        .nullable()
        .optional();
      // Test valid values
      expect(severitySchema.parse('INFO')).toBe('INFO');
      expect(severitySchema.parse('MINOR')).toBe('MINOR');
      expect(severitySchema.parse('MAJOR')).toBe('MAJOR');
      expect(severitySchema.parse('CRITICAL')).toBe('CRITICAL');
      expect(severitySchema.parse('BLOCKER')).toBe('BLOCKER');
      // Test null/undefined
      expect(severitySchema.parse(null)).toBe(null);
      expect(severitySchema.parse(undefined)).toBe(undefined);
      // Test invalid values (should throw)
      expect(() => severitySchema.parse('INVALID')).toThrow();
    });
  });
});

```

--------------------------------------------------------------------------------
/src/__tests__/schemas/issues-schema.test.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from 'zod';
import {
  issuesToolSchema,
  markIssueFalsePositiveToolSchema,
  markIssueWontFixToolSchema,
  markIssuesFalsePositiveToolSchema,
  markIssuesWontFixToolSchema,
  addCommentToIssueToolSchema,
  assignIssueToolSchema,
  confirmIssueToolSchema,
  unconfirmIssueToolSchema,
  resolveIssueToolSchema,
  reopenIssueToolSchema,
} from '../../schemas/issues.js';

describe('issuesToolSchema', () => {
  it('should validate minimal issues parameters', () => {
    const input = {};
    const result = z.object(issuesToolSchema).parse(input);
    expect(result).toEqual({});
  });

  it('should validate issues with project key', () => {
    const input = {
      project_key: 'my-project',
    };
    const result = z.object(issuesToolSchema).parse(input);
    expect(result.project_key).toBe('my-project');
  });

  it('should validate issues with all filter parameters', () => {
    const input = {
      project_key: 'my-project',
      projects: ['proj1', 'proj2'],
      branch: 'main',
      pull_request: '123',
      issues: ['ISSUE-1', 'ISSUE-2'],
      severities: ['BLOCKER', 'CRITICAL'],
      severity: 'MAJOR',
      statuses: ['OPEN', 'CONFIRMED'],
      issue_statuses: ['OPEN', 'CONFIRMED'],
      resolutions: ['FALSE-POSITIVE', 'WONTFIX'],
      resolved: true,
      rules: ['java:S1234', 'java:S5678'],
      tags: ['security', 'performance'],
      types: ['BUG', 'VULNERABILITY'],
      languages: ['java', 'javascript'],
      component_keys: ['comp1', 'comp2'],
      components: ['comp3', 'comp4'],
      on_component_only: false,
      created_after: '2023-01-01',
      created_before: '2023-12-31',
      created_at: '2023-06-15',
      created_in_last: '7d',
      assigned: true,
      assignees: ['user1', 'user2'],
      author: 'author1',
      authors: ['author1', 'author2'],
      cwe: ['79', '89'],
      owasp_top10: ['a1', 'a3'],
      owasp_top10_v2021: ['a01', 'a03'],
      sans_top25: ['insecure-interaction', 'risky-resource'],
      sonarsource_security: ['sql-injection', 'xss'],
      sonarsource_security_category: ['injection'],
      clean_code_attribute_categories: ['INTENTIONAL', 'RESPONSIBLE'],
      impact_severities: ['HIGH', 'MEDIUM'],
      impact_software_qualities: ['SECURITY', 'RELIABILITY'],
      facets: ['severities', 'types'],
      facet_mode: 'effort',
      additional_fields: ['_all'],
      in_new_code_period: true,
      since_leak_period: false,
      s: 'FILE_LINE',
      asc: false,
      page: '2',
      page_size: '50',
    };

    const result = z.object(issuesToolSchema).parse(input);
    expect(result.project_key).toBe('my-project');
    expect(result.projects).toEqual(['proj1', 'proj2']);
    expect(result.severities).toEqual(['BLOCKER', 'CRITICAL']);
    expect(result.impact_severities).toEqual(['HIGH', 'MEDIUM']);
    expect(result.clean_code_attribute_categories).toEqual(['INTENTIONAL', 'RESPONSIBLE']);
  });

  it('should handle null values for optional arrays', () => {
    const input = {
      projects: null,
      severities: null,
      tags: null,
      rules: null,
    };
    const result = z.object(issuesToolSchema).parse(input);
    expect(result.projects).toBeNull();
    expect(result.severities).toBeNull();
    expect(result.tags).toBeNull();
    expect(result.rules).toBeNull();
  });

  it('should handle boolean string conversions', () => {
    const input = {
      resolved: 'true',
      assigned: 'false',
      on_component_only: 'true',
      in_new_code_period: 'false',
      since_leak_period: 'true',
      asc: 'false',
    };
    const result = z.object(issuesToolSchema).parse(input);
    expect(result.resolved).toBe(true);
    expect(result.assigned).toBe(false);
    expect(result.on_component_only).toBe(true);
    expect(result.in_new_code_period).toBe(false);
    expect(result.since_leak_period).toBe(true);
    expect(result.asc).toBe(false);
  });

  it('should handle page number string conversions', () => {
    const input = {
      page: '3',
      page_size: '25',
    };
    const result = z.object(issuesToolSchema).parse(input);
    expect(result.page).toBe(3);
    expect(result.page_size).toBe(25);
  });

  it('should reject invalid severity values', () => {
    const input = {
      severities: ['INVALID'],
    };
    expect(() => z.object(issuesToolSchema).parse(input)).toThrow();
  });

  it('should reject invalid status values', () => {
    const input = {
      statuses: ['INVALID_STATUS'],
    };
    expect(() => z.object(issuesToolSchema).parse(input)).toThrow();
  });

  it('should reject invalid impact severity values', () => {
    const input = {
      impact_severities: ['VERY_HIGH'],
    };
    expect(() => z.object(issuesToolSchema).parse(input)).toThrow();
  });

  it('should reject invalid clean code categories', () => {
    const input = {
      clean_code_attribute_categories: ['INVALID_CATEGORY'],
    };
    expect(() => z.object(issuesToolSchema).parse(input)).toThrow();
  });

  it('should handle empty arrays', () => {
    const input = {
      projects: [],
      tags: [],
      rules: [],
    };
    const result = z.object(issuesToolSchema).parse(input);
    expect(result.projects).toEqual([]);
    expect(result.tags).toEqual([]);
    expect(result.rules).toEqual([]);
  });

  it('should handle partial parameters', () => {
    const input = {
      project_key: 'test',
      severities: ['MAJOR'],
      page: '1',
    };
    const result = z.object(issuesToolSchema).parse(input);
    expect(result.project_key).toBe('test');
    expect(result.severities).toEqual(['MAJOR']);
    expect(result.page).toBe(1);
    expect(result.branch).toBeUndefined();
    expect(result.tags).toBeUndefined();
  });
});

describe('markIssueFalsePositiveToolSchema', () => {
  it('should validate minimal parameters with issue key', () => {
    const input = {
      issue_key: 'ISSUE-123',
    };
    const result = z.object(markIssueFalsePositiveToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-123');
    expect(result.comment).toBeUndefined();
  });

  it('should validate parameters with comment', () => {
    const input = {
      issue_key: 'ISSUE-123',
      comment: 'This is a false positive because...',
    };
    const result = z.object(markIssueFalsePositiveToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-123');
    expect(result.comment).toBe('This is a false positive because...');
  });

  it('should reject missing issue key', () => {
    const input = {
      comment: 'Missing issue key',
    };
    expect(() => z.object(markIssueFalsePositiveToolSchema).parse(input)).toThrow();
  });
});

describe('markIssueWontFixToolSchema', () => {
  it('should validate minimal parameters with issue key', () => {
    const input = {
      issue_key: 'ISSUE-456',
    };
    const result = z.object(markIssueWontFixToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-456');
    expect(result.comment).toBeUndefined();
  });

  it('should validate parameters with comment', () => {
    const input = {
      issue_key: 'ISSUE-456',
      comment: "Won't fix because it's acceptable in this context",
    };
    const result = z.object(markIssueWontFixToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-456');
    expect(result.comment).toBe("Won't fix because it's acceptable in this context");
  });

  it('should reject missing issue key', () => {
    const input = {
      comment: 'Missing issue key',
    };
    expect(() => z.object(markIssueWontFixToolSchema).parse(input)).toThrow();
  });
});

describe('markIssuesFalsePositiveToolSchema', () => {
  it('should validate minimal parameters with issue keys array', () => {
    const input = {
      issue_keys: ['ISSUE-123', 'ISSUE-124', 'ISSUE-125'],
    };
    const result = z.object(markIssuesFalsePositiveToolSchema).parse(input);
    expect(result.issue_keys).toEqual(['ISSUE-123', 'ISSUE-124', 'ISSUE-125']);
    expect(result.comment).toBeUndefined();
  });

  it('should validate parameters with comment', () => {
    const input = {
      issue_keys: ['ISSUE-123', 'ISSUE-124'],
      comment: 'Bulk marking as false positives',
    };
    const result = z.object(markIssuesFalsePositiveToolSchema).parse(input);
    expect(result.issue_keys).toEqual(['ISSUE-123', 'ISSUE-124']);
    expect(result.comment).toBe('Bulk marking as false positives');
  });

  it('should validate single issue in array', () => {
    const input = {
      issue_keys: ['ISSUE-123'],
    };
    const result = z.object(markIssuesFalsePositiveToolSchema).parse(input);
    expect(result.issue_keys).toEqual(['ISSUE-123']);
  });

  it('should reject empty issue keys array', () => {
    const input = {
      issue_keys: [],
    };
    expect(() => z.object(markIssuesFalsePositiveToolSchema).parse(input)).toThrow();
  });

  it('should reject missing issue keys', () => {
    const input = {
      comment: 'Missing issue keys',
    };
    expect(() => z.object(markIssuesFalsePositiveToolSchema).parse(input)).toThrow();
  });
});

describe('markIssuesWontFixToolSchema', () => {
  it('should validate minimal parameters with issue keys array', () => {
    const input = {
      issue_keys: ['ISSUE-456', 'ISSUE-457', 'ISSUE-458'],
    };
    const result = z.object(markIssuesWontFixToolSchema).parse(input);
    expect(result.issue_keys).toEqual(['ISSUE-456', 'ISSUE-457', 'ISSUE-458']);
    expect(result.comment).toBeUndefined();
  });

  it('should validate parameters with comment', () => {
    const input = {
      issue_keys: ['ISSUE-456', 'ISSUE-457'],
      comment: "Bulk marking as won't fix",
    };
    const result = z.object(markIssuesWontFixToolSchema).parse(input);
    expect(result.issue_keys).toEqual(['ISSUE-456', 'ISSUE-457']);
    expect(result.comment).toBe("Bulk marking as won't fix");
  });

  it('should validate single issue in array', () => {
    const input = {
      issue_keys: ['ISSUE-456'],
    };
    const result = z.object(markIssuesWontFixToolSchema).parse(input);
    expect(result.issue_keys).toEqual(['ISSUE-456']);
  });

  it('should reject empty issue keys array', () => {
    const input = {
      issue_keys: [],
    };
    expect(() => z.object(markIssuesWontFixToolSchema).parse(input)).toThrow();
  });

  it('should reject missing issue keys', () => {
    const input = {
      comment: 'Missing issue keys',
    };
    expect(() => z.object(markIssuesWontFixToolSchema).parse(input)).toThrow();
  });
});

describe('addCommentToIssueToolSchema', () => {
  it('should validate parameters with issue key and text', () => {
    const input = {
      issue_key: 'ISSUE-789',
      text: 'This is a comment with **markdown** support',
    };
    const result = z.object(addCommentToIssueToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-789');
    expect(result.text).toBe('This is a comment with **markdown** support');
  });

  it('should validate plain text comment', () => {
    const input = {
      issue_key: 'ISSUE-100',
      text: 'Plain text comment without formatting',
    };
    const result = z.object(addCommentToIssueToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-100');
    expect(result.text).toBe('Plain text comment without formatting');
  });

  it('should validate multi-line comment', () => {
    const input = {
      issue_key: 'ISSUE-200',
      text: 'Line 1\nLine 2\n\n- Bullet point\n- Another bullet',
    };
    const result = z.object(addCommentToIssueToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-200');
    expect(result.text).toBe('Line 1\nLine 2\n\n- Bullet point\n- Another bullet');
  });

  it('should validate markdown with code blocks', () => {
    const input = {
      issue_key: 'ISSUE-300',
      text: 'Here is some code:\n\n```java\npublic void test() {\n  System.out.println("Hello");\n}\n```',
    };
    const result = z.object(addCommentToIssueToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-300');
    expect(result.text).toContain('```java');
  });

  it('should reject missing issue key', () => {
    const input = {
      text: 'Comment without issue key',
    };
    expect(() => z.object(addCommentToIssueToolSchema).parse(input)).toThrow();
  });

  it('should reject missing text', () => {
    const input = {
      issue_key: 'ISSUE-789',
    };
    expect(() => z.object(addCommentToIssueToolSchema).parse(input)).toThrow();
  });

  it('should reject empty text', () => {
    const input = {
      issue_key: 'ISSUE-789',
      text: '',
    };
    expect(() => z.object(addCommentToIssueToolSchema).parse(input)).toThrow();
  });

  it('should reject empty issue key', () => {
    const input = {
      issue_key: '',
      text: 'Valid comment',
    };
    expect(() => z.object(addCommentToIssueToolSchema).parse(input)).toThrow();
  });

  it('should accept single character text', () => {
    const input = {
      issue_key: 'ISSUE-789',
      text: 'X',
    };
    const result = z.object(addCommentToIssueToolSchema).parse(input);
    expect(result.text).toBe('X');
  });

  it('should handle very long comments', () => {
    const longText = 'A'.repeat(10000);
    const input = {
      issue_key: 'ISSUE-789',
      text: longText,
    };
    const result = z.object(addCommentToIssueToolSchema).parse(input);
    expect(result.text).toBe(longText);
  });

  it('should handle special characters in comments', () => {
    const input = {
      issue_key: 'ISSUE-789',
      text: 'Special chars: <>&"\'`@#$%^&*()[]{}|\\;:,.?/',
    };
    const result = z.object(addCommentToIssueToolSchema).parse(input);
    expect(result.text).toBe('Special chars: <>&"\'`@#$%^&*()[]{}|\\;:,.?/');
  });

  it('should handle Unicode characters', () => {
    const input = {
      issue_key: 'ISSUE-789',
      text: 'Unicode: 😀 你好 مرحبا こんにちは',
    };
    const result = z.object(addCommentToIssueToolSchema).parse(input);
    expect(result.text).toBe('Unicode: 😀 你好 مرحبا こんにちは');
  });
});

describe('assignIssueToolSchema', () => {
  it('should validate issue assignment with assignee', () => {
    const input = {
      issueKey: 'ISSUE-123',
      assignee: 'john.doe',
    };
    const result = z.object(assignIssueToolSchema).parse(input);
    expect(result.issueKey).toBe('ISSUE-123');
    expect(result.assignee).toBe('john.doe');
  });

  it('should validate issue unassignment without assignee', () => {
    const input = {
      issueKey: 'ISSUE-456',
    };
    const result = z.object(assignIssueToolSchema).parse(input);
    expect(result.issueKey).toBe('ISSUE-456');
    expect(result.assignee).toBeUndefined();
  });

  it('should reject empty issue key', () => {
    expect(() =>
      z.object(assignIssueToolSchema).parse({
        issueKey: '',
        assignee: 'john.doe',
      })
    ).toThrow();
  });

  it('should reject missing issue key', () => {
    expect(() =>
      z.object(assignIssueToolSchema).parse({
        assignee: 'john.doe',
      })
    ).toThrow();
  });

  it('should allow empty string for assignee to unassign', () => {
    const input = {
      issueKey: 'ISSUE-789',
      assignee: '',
    };
    const result = z.object(assignIssueToolSchema).parse(input);
    expect(result.issueKey).toBe('ISSUE-789');
    expect(result.assignee).toBe('');
  });
});

describe('confirmIssueToolSchema', () => {
  it('should validate minimal parameters', () => {
    const input = {
      issue_key: 'ISSUE-123',
    };
    const result = z.object(confirmIssueToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-123');
    expect(result.comment).toBeUndefined();
  });

  it('should validate parameters with comment', () => {
    const input = {
      issue_key: 'ISSUE-123',
      comment: 'Confirmed after code review',
    };
    const result = z.object(confirmIssueToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-123');
    expect(result.comment).toBe('Confirmed after code review');
  });

  it('should reject missing issue key', () => {
    const input = {
      comment: 'Confirmed',
    };
    expect(() => z.object(confirmIssueToolSchema).parse(input)).toThrow();
  });
});

describe('unconfirmIssueToolSchema', () => {
  it('should validate minimal parameters', () => {
    const input = {
      issue_key: 'ISSUE-456',
    };
    const result = z.object(unconfirmIssueToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-456');
    expect(result.comment).toBeUndefined();
  });

  it('should validate parameters with comment', () => {
    const input = {
      issue_key: 'ISSUE-456',
      comment: 'Needs further investigation',
    };
    const result = z.object(unconfirmIssueToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-456');
    expect(result.comment).toBe('Needs further investigation');
  });

  it('should reject missing issue key', () => {
    const input = {
      comment: 'Unconfirmed',
    };
    expect(() => z.object(unconfirmIssueToolSchema).parse(input)).toThrow();
  });
});

describe('resolveIssueToolSchema', () => {
  it('should validate minimal parameters', () => {
    const input = {
      issue_key: 'ISSUE-789',
    };
    const result = z.object(resolveIssueToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-789');
    expect(result.comment).toBeUndefined();
  });

  it('should validate parameters with comment', () => {
    const input = {
      issue_key: 'ISSUE-789',
      comment: 'Fixed in commit abc123',
    };
    const result = z.object(resolveIssueToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-789');
    expect(result.comment).toBe('Fixed in commit abc123');
  });

  it('should reject missing issue key', () => {
    const input = {
      comment: 'Resolved',
    };
    expect(() => z.object(resolveIssueToolSchema).parse(input)).toThrow();
  });
});

describe('reopenIssueToolSchema', () => {
  it('should validate minimal parameters', () => {
    const input = {
      issue_key: 'ISSUE-101',
    };
    const result = z.object(reopenIssueToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-101');
    expect(result.comment).toBeUndefined();
  });

  it('should validate parameters with comment', () => {
    const input = {
      issue_key: 'ISSUE-101',
      comment: 'Issue still occurs in production',
    };
    const result = z.object(reopenIssueToolSchema).parse(input);
    expect(result.issue_key).toBe('ISSUE-101');
    expect(result.comment).toBe('Issue still occurs in production');
  });

  it('should reject missing issue key', () => {
    const input = {
      comment: 'Reopened',
    };
    expect(() => z.object(reopenIssueToolSchema).parse(input)).toThrow();
  });
});

```

--------------------------------------------------------------------------------
/src/handlers/issues.ts:
--------------------------------------------------------------------------------

```typescript
import type {
  IssuesParams,
  ISonarQubeClient,
  SonarQubeIssue,
  MarkIssueFalsePositiveParams,
  MarkIssueWontFixParams,
  BulkIssueMarkParams,
  AddCommentToIssueParams,
  AssignIssueParams,
  ConfirmIssueParams,
  UnconfirmIssueParams,
  ResolveIssueParams,
  ReopenIssueParams,
  DoTransitionResponse,
} from '../types/index.js';
import { getDefaultClient } from '../utils/client-factory.js';
import { createLogger } from '../utils/logger.js';
import { ElicitationManager } from '../utils/elicitation.js';
import { createStructuredResponse } from '../utils/structured-response.js';

const logger = createLogger('handlers/issues');

// Elicitation manager instance (will be set by index.ts)
let elicitationManager: ElicitationManager | null = null;

export function setElicitationManager(manager: ElicitationManager): void {
  elicitationManager = manager;
}

// Common types for elicitation responses
export interface ElicitationCancelResponse {
  [key: string]: unknown;
  content: Array<{
    type: 'text';
    text: string;
  }>;
}

/**
 * Handles single issue resolution elicitation
 */
async function handleSingleIssueElicitation<T extends { comment?: string }>(
  params: T & { issueKey: string },
  resolutionType: string
): Promise<
  { params: T; cancelled: false } | { cancelled: true; response: ElicitationCancelResponse }
> {
  if (!elicitationManager?.isEnabled() || params.comment) {
    return { params, cancelled: false };
  }

  const commentResult = await elicitationManager.collectResolutionComment(
    params.issueKey,
    resolutionType
  );

  if (commentResult.action === 'accept' && commentResult.content) {
    return { params: { ...params, comment: commentResult.content.comment }, cancelled: false };
  }

  if (commentResult.action === 'reject' || commentResult.action === 'cancel') {
    return {
      cancelled: true,
      response: {
        content: [
          {
            type: 'text' as const,
            text: JSON.stringify({
              message: 'Operation cancelled by user',
              issueKey: params.issueKey,
            }),
          },
        ],
      },
    };
  }

  return { params, cancelled: false };
}

/**
 * Handles bulk issue resolution elicitation
 */
async function handleBulkIssueElicitation<T extends { comment?: string; issueKeys: string[] }>(
  params: T,
  operationType: string
): Promise<
  { params: T; cancelled: false } | { cancelled: true; response: ElicitationCancelResponse }
> {
  if (!elicitationManager?.isEnabled()) {
    return { params, cancelled: false };
  }

  const confirmResult = await elicitationManager.confirmBulkOperation(
    operationType,
    params.issueKeys.length,
    params.issueKeys
  );

  if (confirmResult.action === 'reject' || confirmResult.action === 'cancel') {
    return {
      cancelled: true,
      response: {
        content: [
          {
            type: 'text' as const,
            text: JSON.stringify({
              message: 'Bulk operation cancelled by user',
              issueCount: params.issueKeys.length,
            }),
          },
        ],
      },
    };
  }

  // Add comment from confirmation if provided
  if (confirmResult.action === 'accept' && confirmResult.content?.comment && !params.comment) {
    return {
      params: { ...params, comment: confirmResult.content.comment },
      cancelled: false,
    };
  }

  return { params, cancelled: false };
}

/**
 * Maps bulk operation results to a consistent format
 */
function mapBulkResults(results: DoTransitionResponse[]) {
  return results.map((result) => ({
    issue: result.issue,
    components: result.components,
    rules: result.rules,
    users: result.users,
  }));
}

/**
 * Creates a standard issue operation response
 */
function createIssueOperationResponse(message: string, result: DoTransitionResponse) {
  return createStructuredResponse({
    message,
    issue: result.issue,
    components: result.components,
    rules: result.rules,
    users: result.users,
  });
}

/**
 * Fetches and returns issues from a specified SonarQube project with advanced filtering capabilities
 *
 * This tool supports comprehensive filtering for targeted analysis, dashboards, and audits:
 * - **Component/File Path Filtering**: Use `component_keys` to filter by specific files or directories
 * - **Directory Filtering**: Use `directories` to filter by directory paths (e.g., ['src/main/', 'test/'])
 * - **File Filtering**: Use `files` to filter by specific file paths (e.g., ['UserService.java', 'config.properties'])
 * - **Scope Filtering**: Use `scopes` to filter by issue scope (MAIN for production code, TEST for test code, OVERALL for both)
 * - **Assignee Filtering**: Use `assignees` to filter by assigned users
 * - **Tag Filtering**: Use `tags` to filter by issue tags
 * - **Severity Filtering**: Use `severities` to filter by severity levels (INFO, MINOR, MAJOR, CRITICAL, BLOCKER)
 * - **Status Filtering**: Use `statuses` to filter by issue status (OPEN, CONFIRMED, REOPENED, RESOLVED, CLOSED)
 * - **Date Filtering**: Use `created_after`, `created_before`, `created_in_last` for time-based queries
 * - **Security Standards**: Filter by OWASP, CWE, SANS Top 25, and SonarSource security categories
 * - **Faceted Search**: Use `facets` to get aggregated data for dashboards
 *
 * @param params Parameters for fetching issues with extensive filtering options
 * @param client Optional SonarQube client instance
 * @returns A response containing the list of issues with their details, facets, and pagination info
 * @throws Error if no authentication environment variables are set (SONARQUBE_TOKEN, SONARQUBE_USERNAME/PASSWORD, or SONARQUBE_PASSCODE)
 *
 * @example
 * // Filter by file path and severity
 * await handleSonarQubeGetIssues({
 *   projectKey: 'my-project',
 *   componentKeys: ['src/main/java/com/example/Service.java'],
 *   severities: ['CRITICAL', 'BLOCKER'],
 *   facets: ['severities', 'types', 'authors']
 * });
 *
 * @example
 * // Filter by directory and scope
 * await handleSonarQubeGetIssues({
 *   projectKey: 'my-project',
 *   directories: ['src/main/java/com/example/services/'],
 *   scopes: ['MAIN'],
 *   facets: ['severities', 'rules']
 * });
 *
 * @example
 * // Dashboard query with assignee and tag filters
 * await handleSonarQubeGetIssues({
 *   projectKey: 'my-project',
 *   assignees: ['[email protected]'],
 *   tags: ['security', 'performance'],
 *   statuses: ['OPEN', 'REOPENED'],
 *   facets: ['severities', 'tags', 'assignees']
 * });
 */
export async function handleSonarQubeGetIssues(
  params: IssuesParams,
  client: ISonarQubeClient = getDefaultClient()
) {
  logger.debug('Handling SonarQube issues request', { projectKey: params.projectKey });

  try {
    const result = await client.getIssues(params);
    logger.info('Successfully retrieved issues', {
      projectKey: params.projectKey,
      count: result.issues.length,
    });

    return createStructuredResponse({
      issues: result.issues.map((issue: SonarQubeIssue) => ({
        key: issue.key,
        rule: issue.rule,
        severity: issue.severity,
        component: issue.component,
        project: issue.project,
        line: issue.line,
        status: issue.status,
        issueStatus: issue.issueStatus,
        message: issue.message,
        messageFormattings: issue.messageFormattings,
        effort: issue.effort,
        debt: issue.debt,
        author: issue.author,
        tags: issue.tags,
        creationDate: issue.creationDate,
        updateDate: issue.updateDate,
        type: issue.type,
        cleanCodeAttribute: issue.cleanCodeAttribute,
        cleanCodeAttributeCategory: issue.cleanCodeAttributeCategory,
        prioritizedRule: issue.prioritizedRule,
        impacts: issue.impacts,
        textRange: issue.textRange,
        comments: issue.comments,
        transitions: issue.transitions,
        actions: issue.actions,
        flows: issue.flows,
        quickFixAvailable: issue.quickFixAvailable,
        ruleDescriptionContextKey: issue.ruleDescriptionContextKey,
        codeVariants: issue.codeVariants,
        hash: issue.hash,
      })),
      components: result.components,
      rules: result.rules,
      users: result.users,
      facets: result.facets,
      paging: result.paging,
    });
  } catch (error) {
    logger.error('Failed to retrieve SonarQube issues', error);
    throw error;
  }
}

/**
 * Mark an issue as false positive
 * @param params Parameters for marking issue as false positive
 * @param client Optional SonarQube client instance
 * @returns A response containing the updated issue details
 */
export async function handleMarkIssueFalsePositive(
  params: MarkIssueFalsePositiveParams,
  client: ISonarQubeClient = getDefaultClient()
) {
  logger.debug('Handling mark issue false positive request', { issueKey: params.issueKey });

  try {
    // Handle elicitation for resolution comment
    const elicitationResult = await handleSingleIssueElicitation(params, 'false positive');
    if (elicitationResult.cancelled) {
      return elicitationResult.response;
    }
    const finalParams = (
      elicitationResult as { params: MarkIssueFalsePositiveParams; cancelled: false }
    ).params;

    const result = await client.markIssueFalsePositive(finalParams);
    logger.info('Successfully marked issue as false positive', {
      issueKey: finalParams.issueKey,
      comment: finalParams.comment ? 'with comment' : 'without comment',
    });

    return createIssueOperationResponse(
      `Issue ${finalParams.issueKey} marked as false positive`,
      result
    );
  } catch (error) {
    logger.error('Failed to mark issue as false positive', error);
    throw error;
  }
}

/**
 * Mark an issue as won't fix
 * @param params Parameters for marking issue as won't fix
 * @param client Optional SonarQube client instance
 * @returns A response containing the updated issue details
 */
export async function handleMarkIssueWontFix(
  params: MarkIssueWontFixParams,
  client: ISonarQubeClient = getDefaultClient()
) {
  logger.debug("Handling mark issue won't fix request", { issueKey: params.issueKey });

  try {
    // Handle elicitation for resolution comment
    const elicitationResult = await handleSingleIssueElicitation(params, "won't fix");
    if (elicitationResult.cancelled) {
      return elicitationResult.response;
    }
    const finalParams = (elicitationResult as { params: MarkIssueWontFixParams; cancelled: false })
      .params;

    const result = await client.markIssueWontFix(finalParams);
    logger.info("Successfully marked issue as won't fix", {
      issueKey: finalParams.issueKey,
      comment: finalParams.comment ? 'with comment' : 'without comment',
    });

    return createIssueOperationResponse(
      `Issue ${finalParams.issueKey} marked as won't fix`,
      result
    );
  } catch (error) {
    logger.error("Failed to mark issue as won't fix", error);
    throw error;
  }
}

/**
 * Mark multiple issues as false positive
 * @param params Parameters for marking issues as false positive
 * @param client Optional SonarQube client instance
 * @returns A response containing the updated issues details
 */
export async function handleMarkIssuesFalsePositive(
  params: BulkIssueMarkParams,
  client: ISonarQubeClient = getDefaultClient()
) {
  logger.debug('Handling mark issues false positive request', {
    issueCount: params.issueKeys.length,
  });

  try {
    // Handle elicitation for bulk operation
    const elicitationResult = await handleBulkIssueElicitation(params, 'mark as false positive');
    if (elicitationResult.cancelled) {
      return elicitationResult.response;
    }
    const finalParams = (elicitationResult as { params: BulkIssueMarkParams; cancelled: false })
      .params;

    const results = await client.markIssuesFalsePositive(finalParams);
    logger.info('Successfully marked issues as false positive', {
      issueCount: finalParams.issueKeys.length,
      comment: finalParams.comment ? 'with comment' : 'without comment',
    });

    return createStructuredResponse({
      message: `${finalParams.issueKeys.length} issues marked as false positive`,
      results: mapBulkResults(results),
    });
  } catch (error) {
    logger.error('Failed to mark issues as false positive', error);
    throw error;
  }
}

/**
 * Mark multiple issues as won't fix
 * @param params Parameters for marking issues as won't fix
 * @param client Optional SonarQube client instance
 * @returns A response containing the updated issues details
 */
export async function handleMarkIssuesWontFix(
  params: BulkIssueMarkParams,
  client: ISonarQubeClient = getDefaultClient()
) {
  logger.debug("Handling mark issues won't fix request", {
    issueCount: params.issueKeys.length,
  });

  try {
    // Handle elicitation for bulk operation
    const elicitationResult = await handleBulkIssueElicitation(params, "mark as won't fix");
    if (elicitationResult.cancelled) {
      return elicitationResult.response;
    }
    const finalParams = (elicitationResult as { params: BulkIssueMarkParams; cancelled: false })
      .params;

    const results = await client.markIssuesWontFix(finalParams);
    logger.info("Successfully marked issues as won't fix", {
      issueCount: finalParams.issueKeys.length,
      comment: finalParams.comment ? 'with comment' : 'without comment',
    });

    return createStructuredResponse({
      message: `${finalParams.issueKeys.length} issues marked as won't fix`,
      results: mapBulkResults(results),
    });
  } catch (error) {
    logger.error("Failed to mark issues as won't fix", error);
    throw error;
  }
}

/**
 * Add a comment to an issue
 * @param params Parameters for adding a comment to an issue
 * @param client Optional SonarQube client instance
 * @returns A response containing the created comment details
 */
export async function handleAddCommentToIssue(
  params: AddCommentToIssueParams,
  client: ISonarQubeClient = getDefaultClient()
) {
  logger.debug('Handling add comment to issue request', { issueKey: params.issueKey });

  try {
    const comment = await client.addCommentToIssue(params);
    logger.info('Successfully added comment to issue', {
      issueKey: params.issueKey,
      commentKey: comment.key,
    });

    return createStructuredResponse({
      message: `Comment added to issue ${params.issueKey}`,
      comment: {
        key: comment.key,
        login: comment.login,
        htmlText: comment.htmlText,
        markdown: comment.markdown,
        updatable: comment.updatable,
        createdAt: comment.createdAt,
      },
    });
  } catch (error) {
    logger.error('Failed to add comment to issue', error);
    throw error;
  }
}

/**
 * Handler for assigning an issue
 */
export async function handleAssignIssue(
  params: { issueKey: string; assignee?: string },
  client?: ISonarQubeClient
) {
  const sonarQubeClient = client ?? getDefaultClient();

  logger.debug('Handling assign issue request', {
    issueKey: params.issueKey,
    assignee: params.assignee,
  });

  try {
    // Normalize empty string to undefined for consistent unassignment handling
    const normalizedAssignee = params.assignee === '' ? undefined : params.assignee;

    const assignParams: AssignIssueParams = {
      issueKey: params.issueKey,
    };
    if (normalizedAssignee !== undefined) {
      assignParams.assignee = normalizedAssignee;
    }

    const updatedIssue = await sonarQubeClient.assignIssue(assignParams);

    // Cast to access dynamic fields
    const issueWithAssignee = updatedIssue as SonarQubeIssue & {
      assignee?: string | null;
      assigneeName?: string | null;
      resolution?: string | null;
    };

    const assigneeName = issueWithAssignee.assignee ?? 'unassigned';
    const assigneeDisplay = normalizedAssignee
      ? `Assigned to: ${issueWithAssignee.assigneeName ?? normalizedAssignee}`
      : 'Issue unassigned';

    logger.info('Issue assigned successfully', {
      issueKey: params.issueKey,
      assignee: assigneeName,
    });

    return createStructuredResponse({
      message: `${assigneeDisplay} for issue ${params.issueKey}`,
      issue: {
        key: updatedIssue.key,
        component: updatedIssue.component ?? 'N/A',
        message: updatedIssue.message,
        severity: updatedIssue.severity ?? 'UNKNOWN',
        type: updatedIssue.type ?? 'UNKNOWN',
        status: updatedIssue.status,
        resolution: issueWithAssignee.resolution ?? null,
        assignee: issueWithAssignee.assignee,
        assigneeName: issueWithAssignee.assigneeName ?? null,
      },
    });
  } catch (error) {
    logger.error('Failed to assign issue', error);
    throw error;
  }
}

/**
 * Handler for confirming an issue
 */
export async function handleConfirmIssue(
  params: ConfirmIssueParams,
  client: ISonarQubeClient = getDefaultClient()
) {
  logger.debug('Handling confirm issue request', { issueKey: params.issueKey });

  try {
    const result = await client.confirmIssue(params);
    logger.info('Successfully confirmed issue', {
      issueKey: params.issueKey,
      comment: params.comment ? 'with comment' : 'without comment',
    });

    return createIssueOperationResponse(`Issue ${params.issueKey} confirmed`, result);
  } catch (error) {
    logger.error('Failed to confirm issue', error);
    throw error;
  }
}

/**
 * Handler for unconfirming an issue
 */
export async function handleUnconfirmIssue(
  params: UnconfirmIssueParams,
  client: ISonarQubeClient = getDefaultClient()
) {
  logger.debug('Handling unconfirm issue request', { issueKey: params.issueKey });

  try {
    const result = await client.unconfirmIssue(params);
    logger.info('Successfully unconfirmed issue', {
      issueKey: params.issueKey,
      comment: params.comment ? 'with comment' : 'without comment',
    });

    return createIssueOperationResponse(`Issue ${params.issueKey} unconfirmed`, result);
  } catch (error) {
    logger.error('Failed to unconfirm issue', error);
    throw error;
  }
}

/**
 * Handler for resolving an issue
 */
export async function handleResolveIssue(
  params: ResolveIssueParams,
  client: ISonarQubeClient = getDefaultClient()
) {
  logger.debug('Handling resolve issue request', { issueKey: params.issueKey });

  try {
    const result = await client.resolveIssue(params);
    logger.info('Successfully resolved issue', {
      issueKey: params.issueKey,
      comment: params.comment ? 'with comment' : 'without comment',
    });

    return createIssueOperationResponse(`Issue ${params.issueKey} resolved`, result);
  } catch (error) {
    logger.error('Failed to resolve issue', error);
    throw error;
  }
}

/**
 * Handler for reopening an issue
 */
export async function handleReopenIssue(
  params: ReopenIssueParams,
  client: ISonarQubeClient = getDefaultClient()
) {
  logger.debug('Handling reopen issue request', { issueKey: params.issueKey });

  try {
    const result = await client.reopenIssue(params);
    logger.info('Successfully reopened issue', {
      issueKey: params.issueKey,
      comment: params.comment ? 'with comment' : 'without comment',
    });

    return createIssueOperationResponse(`Issue ${params.issueKey} reopened`, result);
  } catch (error) {
    logger.error('Failed to reopen issue', error);
    throw error;
  }
}

```
Page 6/8FirstPrevNextLast