#
tokens: 48666/50000 22/236 files (page 4/8)
lines: off (toggle) GitHub
raw markdown copy
This is page 4 of 8. Use http://codebase.md/sapientpants/sonarqube-mcp-server?page={x} to view the full context.

# Directory Structure

```
├── .adr-dir
├── .changeset
│   ├── config.json
│   └── README.md
├── .claude
│   ├── commands
│   │   ├── analyze-and-fix-github-issue.md
│   │   ├── fix-sonarqube-issues.md
│   │   ├── implement-github-issue.md
│   │   ├── release.md
│   │   ├── spec-feature.md
│   │   └── update-dependencies.md
│   ├── hooks
│   │   └── block-git-no-verify.ts
│   └── settings.json
├── .dockerignore
├── .github
│   ├── actionlint.yaml
│   ├── changeset.yml
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   └── feature_request.md
│   ├── pull_request_template.md
│   ├── scripts
│   │   ├── determine-artifact.sh
│   │   └── version-and-release.js
│   ├── workflows
│   │   ├── codeql.yml
│   │   ├── main.yml
│   │   ├── pr.yml
│   │   ├── publish.yml
│   │   ├── reusable-docker.yml
│   │   ├── reusable-security.yml
│   │   └── reusable-validate.yml
│   └── WORKFLOWS.md
├── .gitignore
├── .husky
│   ├── commit-msg
│   └── pre-commit
├── .markdownlint.yaml
├── .markdownlintignore
├── .npmrc
├── .prettierignore
├── .prettierrc
├── .trivyignore
├── .yaml-lint.yml
├── .yamllintignore
├── CHANGELOG.md
├── changes.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── COMPATIBILITY.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── architecture
│   │   └── decisions
│   │       ├── 0001-record-architecture-decisions.md
│   │       ├── 0002-use-node-js-with-typescript.md
│   │       ├── 0003-adopt-model-context-protocol-for-sonarqube-integration.md
│   │       ├── 0004-use-sonarqube-web-api-client-for-all-sonarqube-interactions.md
│   │       ├── 0005-domain-driven-design-of-sonarqube-modules.md
│   │       ├── 0006-expose-sonarqube-features-as-mcp-tools.md
│   │       ├── 0007-support-multiple-authentication-methods-for-sonarqube.md
│   │       ├── 0008-use-environment-variables-for-configuration.md
│   │       ├── 0009-file-based-logging-to-avoid-stdio-conflicts.md
│   │       ├── 0010-use-stdio-transport-for-mcp-communication.md
│   │       ├── 0011-docker-containerization-for-deployment.md
│   │       ├── 0012-add-elicitation-support-for-interactive-user-input.md
│   │       ├── 0014-current-security-model-and-future-oauth2-considerations.md
│   │       ├── 0015-transport-architecture-refactoring.md
│   │       ├── 0016-http-transport-with-oauth-2-0-metadata-endpoints.md
│   │       ├── 0017-comprehensive-audit-logging-system.md
│   │       ├── 0018-add-comprehensive-monitoring-and-observability.md
│   │       ├── 0019-simplify-to-stdio-only-transport-for-mcp-gateway-deployment.md
│   │       ├── 0020-testing-framework-and-strategy-vitest-with-property-based-testing.md
│   │       ├── 0021-code-quality-toolchain-eslint-prettier-strict-typescript.md
│   │       ├── 0022-package-manager-choice-pnpm.md
│   │       ├── 0023-release-management-with-changesets.md
│   │       ├── 0024-ci-cd-platform-github-actions.md
│   │       ├── 0025-container-and-security-scanning-strategy.md
│   │       ├── 0026-circuit-breaker-pattern-with-opossum.md
│   │       ├── 0027-docker-image-publishing-strategy-ghcr-to-docker-hub.md
│   │       └── 0028-session-based-http-transport-with-server-sent-events.md
│   ├── architecture.md
│   ├── security.md
│   └── troubleshooting.md
├── eslint.config.js
├── examples
│   └── http-client.ts
├── jest.config.js
├── LICENSE
├── LICENSES.md
├── osv-scanner.toml
├── package.json
├── pnpm-lock.yaml
├── README.md
├── scripts
│   ├── actionlint.sh
│   ├── ci-local.sh
│   ├── load-test.sh
│   ├── README.md
│   ├── run-all-tests.sh
│   ├── scan-container.sh
│   ├── security-scan.sh
│   ├── setup.sh
│   ├── test-monitoring-integration.sh
│   └── validate-docs.sh
├── SECURITY.md
├── sonar-project.properties
├── src
│   ├── __tests__
│   │   ├── additional-coverage.test.ts
│   │   ├── advanced-index.test.ts
│   │   ├── assign-issue.test.ts
│   │   ├── auth-methods.test.ts
│   │   ├── boolean-string-transform.test.ts
│   │   ├── components.test.ts
│   │   ├── config
│   │   │   └── service-accounts.test.ts
│   │   ├── dependency-injection.test.ts
│   │   ├── direct-handlers.test.ts
│   │   ├── direct-lambdas.test.ts
│   │   ├── direct-schema-validation.test.ts
│   │   ├── domains
│   │   │   ├── components-domain-full.test.ts
│   │   │   ├── components-domain.test.ts
│   │   │   ├── hotspots-domain.test.ts
│   │   │   └── source-code-domain.test.ts
│   │   ├── environment-validation.test.ts
│   │   ├── error-handler.test.ts
│   │   ├── error-handling.test.ts
│   │   ├── errors.test.ts
│   │   ├── function-tests.test.ts
│   │   ├── handlers
│   │   │   ├── components-handler-integration.test.ts
│   │   │   └── projects-authorization.test.ts
│   │   ├── handlers.test.ts
│   │   ├── handlers.test.ts.skip
│   │   ├── index.test.ts
│   │   ├── issue-resolution-elicitation.test.ts
│   │   ├── issue-resolution.test.ts
│   │   ├── issue-transitions.test.ts
│   │   ├── issues-enhanced-search.test.ts
│   │   ├── issues-new-parameters.test.ts
│   │   ├── json-array-transform.test.ts
│   │   ├── lambda-functions.test.ts
│   │   ├── lambda-handlers.test.ts.skip
│   │   ├── logger.test.ts
│   │   ├── mapping-functions.test.ts
│   │   ├── mocked-environment.test.ts
│   │   ├── null-to-undefined.test.ts
│   │   ├── parameter-transformations-advanced.test.ts
│   │   ├── parameter-transformations.test.ts
│   │   ├── protocol-version.test.ts
│   │   ├── pull-request-transform.test.ts
│   │   ├── quality-gates.test.ts
│   │   ├── schema-parameter-transforms.test.ts
│   │   ├── schema-transformation-mocks.test.ts
│   │   ├── schema-transforms.test.ts
│   │   ├── schema-validators.test.ts
│   │   ├── schemas
│   │   │   ├── components-schema.test.ts
│   │   │   ├── hotspots-tools-schema.test.ts
│   │   │   └── issues-schema.test.ts
│   │   ├── sonarqube-elicitation.test.ts
│   │   ├── sonarqube.test.ts
│   │   ├── source-code.test.ts
│   │   ├── standalone-handlers.test.ts
│   │   ├── string-to-number-transform.test.ts
│   │   ├── tool-handler-lambdas.test.ts
│   │   ├── tool-handlers.test.ts
│   │   ├── tool-registration-schema.test.ts
│   │   ├── tool-registration-transforms.test.ts
│   │   ├── transformation-util.test.ts
│   │   ├── transports
│   │   │   ├── base.test.ts
│   │   │   ├── factory.test.ts
│   │   │   ├── http.test.ts
│   │   │   ├── session-manager.test.ts
│   │   │   └── stdio.test.ts
│   │   ├── utils
│   │   │   ├── retry.test.ts
│   │   │   └── transforms.test.ts
│   │   ├── zod-boolean-transform.test.ts
│   │   ├── zod-schema-transforms.test.ts
│   │   └── zod-transforms.test.ts
│   ├── config
│   │   ├── service-accounts.ts
│   │   └── versions.ts
│   ├── domains
│   │   ├── base.ts
│   │   ├── components.ts
│   │   ├── hotspots.ts
│   │   ├── index.ts
│   │   ├── issues.ts
│   │   ├── measures.ts
│   │   ├── metrics.ts
│   │   ├── projects.ts
│   │   ├── quality-gates.ts
│   │   ├── source-code.ts
│   │   └── system.ts
│   ├── errors.ts
│   ├── handlers
│   │   ├── components.ts
│   │   ├── hotspots.ts
│   │   ├── index.ts
│   │   ├── issues.ts
│   │   ├── measures.ts
│   │   ├── metrics.ts
│   │   ├── projects.ts
│   │   ├── quality-gates.ts
│   │   ├── source-code.ts
│   │   └── system.ts
│   ├── index.ts
│   ├── monitoring
│   │   ├── __tests__
│   │   │   └── circuit-breaker.test.ts
│   │   ├── circuit-breaker.ts
│   │   ├── health.ts
│   │   └── metrics.ts
│   ├── schemas
│   │   ├── common.ts
│   │   ├── components.ts
│   │   ├── hotspots-tools.ts
│   │   ├── hotspots.ts
│   │   ├── index.ts
│   │   ├── issues.ts
│   │   ├── measures.ts
│   │   ├── metrics.ts
│   │   ├── projects.ts
│   │   ├── quality-gates.ts
│   │   ├── source-code.ts
│   │   └── system.ts
│   ├── sonarqube.ts
│   ├── transports
│   │   ├── base.ts
│   │   ├── factory.ts
│   │   ├── http.ts
│   │   ├── index.ts
│   │   ├── session-manager.ts
│   │   └── stdio.ts
│   ├── types
│   │   ├── common.ts
│   │   ├── components.ts
│   │   ├── hotspots.ts
│   │   ├── index.ts
│   │   ├── issues.ts
│   │   ├── measures.ts
│   │   ├── metrics.ts
│   │   ├── projects.ts
│   │   ├── quality-gates.ts
│   │   ├── source-code.ts
│   │   └── system.ts
│   └── utils
│       ├── __tests__
│       │   ├── elicitation.test.ts
│       │   ├── pattern-matcher.test.ts
│       │   └── structured-response.test.ts
│       ├── client-factory.ts
│       ├── elicitation.ts
│       ├── error-handler.ts
│       ├── logger.ts
│       ├── parameter-mappers.ts
│       ├── pattern-matcher.ts
│       ├── retry.ts
│       ├── structured-response.ts
│       └── transforms.ts
├── test-http-transport.sh
├── tmp
│   └── .gitkeep
├── tsconfig.build.json
├── tsconfig.json
├── vitest.config.d.ts
├── vitest.config.js
├── vitest.config.js.map
└── vitest.config.ts
```

# Files

--------------------------------------------------------------------------------
/src/__tests__/handlers.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, beforeEach, afterEach, beforeAll, vi } from 'vitest';
// Mock environment variables
process.env.SONARQUBE_TOKEN = 'test-token';
process.env.SONARQUBE_URL = 'http://localhost:9000';
process.env.SONARQUBE_ORGANIZATION = 'test-org';

// Mock the sonarqube client
vi.mock('../sonarqube.js', () => ({
  createSonarQubeClientFromEnv: vi.fn(() => ({
    listProjects: vi.fn().mockResolvedValue({
      projects: [{ key: 'test-project', name: 'Test Project' }],
      paging: { pageIndex: 1, pageSize: 10, total: 1 },
    }),
    getIssues: vi.fn().mockResolvedValue({
      issues: [{ key: 'test-issue', rule: 'test-rule', severity: 'MAJOR' }],
      paging: { pageIndex: 1, pageSize: 10, total: 1 },
    }),
    getMetrics: vi.fn().mockResolvedValue({
      metrics: [{ key: 'coverage', name: 'Coverage' }],
      paging: { pageIndex: 1, pageSize: 10, total: 1 },
    }),
    getHealth: vi.fn().mockResolvedValue({ health: 'GREEN', causes: [] }),
    getStatus: vi.fn().mockResolvedValue({ id: 'test-id', version: '10.3.0.82913', status: 'UP' }),
    ping: vi.fn().mockResolvedValue('pong'),
    getComponentMeasures: vi.fn().mockResolvedValue({
      component: { key: 'test-component', measures: [{ metric: 'coverage', value: '85.4' }] },
      metrics: [{ key: 'coverage', name: 'Coverage' }],
    }),
    getComponentsMeasures: vi.fn().mockResolvedValue({
      components: [{ key: 'test-component-1', measures: [{ metric: 'coverage', value: '85.4' }] }],
      metrics: [{ key: 'coverage', name: 'Coverage' }],
      paging: { pageIndex: 1, pageSize: 10, total: 1 },
    }),
    getMeasuresHistory: vi.fn().mockResolvedValue({
      measures: [{ metric: 'coverage', history: [{ date: '2023-01-01', value: '85.4' }] }],
      paging: { pageIndex: 1, pageSize: 10, total: 1 },
    }),
  })),
  setSonarQubeElicitationManager: vi.fn(),
  createSonarQubeClientFromEnvWithElicitation: vi.fn(() =>
    Promise.resolve({
      listProjects: vi.fn(),
      getIssues: vi.fn(),
    })
  ),
}));

// Save environment variables
const originalEnv = process.env;
let handleSonarQubeProjects: any;
let handleSonarQubeGetIssues: any;
let handleSonarQubeGetMetrics: any;
let handleSonarQubeGetHealth: any;
let handleSonarQubeGetStatus: any;
let handleSonarQubePing: any;
let handleSonarQubeComponentMeasures: any;
let handleSonarQubeComponentsMeasures: any;
let handleSonarQubeMeasuresHistory: any;
// No need to mock axios anymore since we're using sonarqube-web-api-client
describe('Handler Functions', () => {
  beforeAll(async () => {
    const module = await import('../index.js');
    handleSonarQubeProjects = module.handleSonarQubeProjects;
    handleSonarQubeGetIssues = module.handleSonarQubeGetIssues;
    handleSonarQubeGetMetrics = module.handleSonarQubeGetMetrics;
    handleSonarQubeGetHealth = module.handleSonarQubeGetHealth;
    handleSonarQubeGetStatus = module.handleSonarQubeGetStatus;
    handleSonarQubePing = module.handleSonarQubePing;
    handleSonarQubeComponentMeasures = module.handleSonarQubeComponentMeasures;
    handleSonarQubeComponentsMeasures = module.handleSonarQubeComponentsMeasures;
    handleSonarQubeMeasuresHistory = module.handleSonarQubeMeasuresHistory;
  });
  beforeEach(() => {
    vi.resetModules();
    process.env = { ...originalEnv };
  });
  afterEach(() => {
    process.env = originalEnv;
    vi.clearAllMocks();
  });
  describe('handleSonarQubeProjects', () => {
    it('should handle projects correctly', async () => {
      const result = await handleSonarQubeProjects({});
      const data = JSON.parse(result.content[0].text);
      expect(data.projects).toBeDefined();
      expect(data.projects).toHaveLength(1);
      expect(data.projects[0].key).toBe('test-project');
      expect(data.paging).toBeDefined();
    });
    it('should handle pagination parameters', async () => {
      const result = await handleSonarQubeProjects({ page: 2, page_size: 10 });
      const data = JSON.parse(result.content[0].text);
      expect(data.projects).toBeDefined();
      expect(data.paging).toBeDefined();
    });
  });
  describe('handleSonarQubeGetIssues', () => {
    it('should handle issues correctly', async () => {
      const result = await handleSonarQubeGetIssues({ projectKey: 'test-project' });
      const data = JSON.parse(result.content[0].text);
      expect(data.issues).toBeDefined();
      expect(data.issues).toHaveLength(1);
      expect(data.issues[0].severity).toBe('MAJOR');
      expect(data.paging).toBeDefined();
    });
  });
  describe('handleSonarQubeGetMetrics', () => {
    it('should handle metrics correctly', async () => {
      const result = await handleSonarQubeGetMetrics({});
      const data = JSON.parse(result.content[0].text);
      expect(data.metrics).toBeDefined();
      expect(data.metrics).toHaveLength(1);
      expect(data.metrics[0].key).toBe('coverage');
      expect(data.paging).toBeDefined();
    });
  });
  describe('System API Handlers', () => {
    it('should handle health correctly', async () => {
      const result = await handleSonarQubeGetHealth();
      const data = JSON.parse(result.content[0].text);
      expect(data.health).toBe('GREEN');
      expect(data.causes).toEqual([]);
    });
    it('should handle status correctly', async () => {
      const result = await handleSonarQubeGetStatus();
      const data = JSON.parse(result.content[0].text);
      expect(data.id).toBe('test-id');
      expect(data.version).toBe('10.3.0.82913');
      expect(data.status).toBe('UP');
    });
    it('should handle ping correctly', async () => {
      const result = await handleSonarQubePing();
      expect(result.content[0].text).toBe('pong');
    });
  });
  describe('Measures API Handlers', () => {
    it('should handle component measures correctly', async () => {
      const result = await handleSonarQubeComponentMeasures({
        component: 'test-component',
        metricKeys: ['coverage'],
      });
      const data = JSON.parse(result.content[0].text);
      expect(data.component).toBeDefined();
      expect(data.component.key).toBe('test-component');
      expect(data.component.measures).toHaveLength(1);
      expect(data.component.measures[0].metric).toBe('coverage');
      expect(data.metrics).toBeDefined();
    });
    it('should handle components measures correctly', async () => {
      const result = await handleSonarQubeComponentsMeasures({
        componentKeys: ['test-component-1'],
        metricKeys: ['coverage'],
      });
      const data = JSON.parse(result.content[0].text);
      expect(data.components).toBeDefined();
      expect(data.components).toHaveLength(1);
      expect(data.components[0].key).toBe('test-component-1');
      expect(data.metrics).toBeDefined();
      expect(data.paging).toBeDefined();
    });
    it('should handle measures history correctly', async () => {
      const result = await handleSonarQubeMeasuresHistory({
        component: 'test-component',
        metrics: ['coverage'],
      });
      const data = JSON.parse(result.content[0].text);
      expect(data.measures).toBeDefined();
      expect(data.measures).toHaveLength(1);
      expect(data.measures[0].metric).toBe('coverage');
      expect(data.measures[0].history).toHaveLength(1);
      expect(data.paging).toBeDefined();
    });
  });
});

```

--------------------------------------------------------------------------------
/docs/architecture/decisions/0012-add-elicitation-support-for-interactive-user-input.md:
--------------------------------------------------------------------------------

```markdown
# 12. Add Elicitation Support for Interactive User Input

Date: 2025-06-19

## Status

Proposed

## Context

The SonarQube MCP server currently operates in a non-interactive mode, requiring all configuration and parameters to be provided upfront through environment variables or tool arguments. This approach has several limitations:

1. **Bulk Operations Risk**: When performing bulk operations (e.g., marking multiple issues as false positive), there's no confirmation step, risking accidental modifications.

2. **Configuration Complexity**: Users must configure authentication before starting the server, with no guidance when configuration is missing or incorrect.

3. **Limited Discoverability**: Users must know exact project keys, component paths, and valid parameter values without assistance.

4. **No Context Collection**: Operations like marking issues as false positive or won't fix lack the ability to collect explanatory comments interactively.

MCP SDK v1.13.0 introduced the elicitation capability, which allows servers to request structured input from users through clients during operation. This feature enables:

- Interactive data collection with JSON schema validation
- Multi-attempt collection with confirmation
- Type-safe input handling
- User-controlled data sharing

## Decision

We will add elicitation support to the SonarQube MCP server to enable interactive user input collection in specific scenarios where it provides clear value for safety, usability, or data quality.

## Implementation Plan

### 1. Elicitation Use Cases

#### Critical Safety Confirmations

- **Bulk False Positive**: Confirm before marking >5 issues as false positive
- **Bulk Won't Fix**: Confirm before marking >5 issues as won't fix
- **Bulk Assignment**: Confirm before assigning >10 issues to a user

#### Configuration Assistance

- **Missing Authentication**: Guide users through auth setup when not configured
- **Invalid Credentials**: Help users correct authentication issues
- **Organization Selection**: List available organizations for SonarCloud users

#### Context Collection

- **False Positive Justification**: Collect explanation when marking issues
- **Won't Fix Reasoning**: Document why issues won't be addressed
- **Resolution Comments**: Gather details about how issues were resolved

#### Search Refinement

- **Component Disambiguation**: When multiple components match a query
- **Project Selection**: When multiple projects are available
- **Filter Refinement**: When initial search returns too many results

### 2. Technical Implementation

#### Schema Definitions

```typescript
// Confirmation schema
const confirmationSchema = {
  type: 'object',
  properties: {
    confirm: {
      type: 'boolean',
      description: 'Confirm the operation',
    },
    comment: {
      type: 'string',
      description: 'Optional comment',
      maxLength: 500,
    },
  },
  required: ['confirm'],
};

// Authentication schema
const authSchema = {
  type: 'object',
  properties: {
    method: {
      type: 'string',
      enum: ['token', 'basic', 'passcode'],
      description: 'Authentication method',
    },
    token: {
      type: 'string',
      description: 'SonarQube token (for token auth)',
    },
    username: {
      type: 'string',
      description: 'Username (for basic auth)',
    },
    password: {
      type: 'string',
      description: 'Password (for basic auth)',
    },
    passcode: {
      type: 'string',
      description: 'System passcode',
    },
  },
  dependencies: {
    method: {
      oneOf: [
        {
          properties: { method: { const: 'token' } },
          required: ['token'],
        },
        {
          properties: { method: { const: 'basic' } },
          required: ['username', 'password'],
        },
        {
          properties: { method: { const: 'passcode' } },
          required: ['passcode'],
        },
      ],
    },
  },
};
```

#### Integration Points

1. **Bulk Operations**: Add threshold checks and confirmation elicitation
2. **Authentication**: Detect missing/invalid auth and offer setup assistance
3. **Tool Enhancement**: Update existing tools to use elicitation when beneficial
4. **Error Recovery**: Use elicitation to help users recover from common errors

### 3. Configuration Options

Add server options to control elicitation behavior:

```typescript
interface ElicitationOptions {
  enabled: boolean; // Master switch for elicitation
  bulkOperationThreshold: number; // Items before confirmation (default: 5)
  requireComments: boolean; // Require comments for resolutions
  interactiveSearch: boolean; // Enable search refinement
}
```

### 4. Backward Compatibility

- Elicitation will be **opt-in** by default
- Environment variable `SONARQUBE_MCP_ELICITATION=true` to enable
- All existing workflows continue to work without elicitation
- Tools detect elicitation availability and adapt behavior

## Consequences

### Positive

1. **Improved Safety**: Prevents accidental bulk modifications
2. **Better UX**: Interactive guidance for complex operations
3. **Higher Data Quality**: Collects context and justifications
4. **Easier Onboarding**: Helps new users configure the server
5. **Reduced Errors**: Validates input before operations
6. **Enhanced Discoverability**: Users learn available options interactively

### Negative

1. **SDK Dependency**: Requires upgrade to MCP SDK v1.13.0+
2. **Increased Complexity**: More code paths to maintain
3. **Workflow Interruption**: May slow down automated workflows
4. **Testing Overhead**: Requires testing both interactive and non-interactive modes
5. **Client Compatibility**: Only works with clients that support elicitation

### Neutral

1. **Optional Feature**: Can be disabled for automation scenarios
2. **Gradual Adoption**: Can be implemented incrementally
3. **Learning Curve**: Users need to understand when elicitation occurs

## Migration Strategy

### Phase 1: Foundation (Week 1)

- Upgrade MCP SDK to v1.13.0+
- Add elicitation configuration system
- Create base elicitation utilities

### Phase 2: Critical Safety (Week 2)

- Implement bulk operation confirmations
- Add tests for confirmation flows
- Document safety features

### Phase 3: Enhanced UX (Week 3-4)

- Add authentication setup assistance
- Implement search refinement
- Add context collection for resolutions

### Phase 4: Polish (Week 5)

- Performance optimization
- Extended documentation
- User feedback incorporation

## Alternatives Considered

1. **Status Quo**: Continue with non-interactive operation
   - Pros: Simple, predictable
   - Cons: Risk of accidents, poor discoverability

2. **Custom Prompting**: Use MCP prompts instead of elicitation
   - Pros: Available in current SDK
   - Cons: Less structured, no validation, one-way communication

3. **External Configuration Tool**: Separate CLI for configuration
   - Pros: Separation of concerns
   - Cons: Additional tool to maintain, poor integration

4. **Client-Side Validation**: Rely on clients to validate
   - Pros: No server changes needed
   - Cons: Inconsistent experience, no server control

## References

- [MCP Elicitation Specification](https://modelcontextprotocol.io/specification/2025-06-18/client/elicitation)
- [MCP SDK v1.13.0 Release Notes](https://github.com/modelcontextprotocol/sdk/releases/tag/v1.13.0)
- [SonarQube Web API Documentation](https://docs.sonarqube.org/latest/web-api/)

```

--------------------------------------------------------------------------------
/src/__tests__/schema-transforms.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect } from 'vitest';
import { z } from 'zod';
import { nullToUndefined } from '../index.js';
describe('Schema Transformations', () => {
  describe('nullToUndefined function', () => {
    it('should convert null to undefined', () => {
      expect(nullToUndefined(null)).toBeUndefined();
    });
    it('should keep undefined as undefined', () => {
      expect(nullToUndefined(undefined)).toBeUndefined();
    });
    it('should pass through non-null values', () => {
      expect(nullToUndefined('test')).toBe('test');
      expect(nullToUndefined(42)).toBe(42);
      expect(nullToUndefined(true)).toBe(true);
      expect(nullToUndefined(false)).toBe(false);
      expect(nullToUndefined(0)).toBe(0);
      expect(nullToUndefined('')).toBe('');
      const obj = { test: 'value' };
      expect(nullToUndefined(obj)).toBe(obj);
      const arr = [1, 2, 3];
      expect(nullToUndefined(arr)).toBe(arr);
    });
  });
  describe('Common Zod Schemas', () => {
    it('should transform page parameters correctly', () => {
      const pageSchema = z
        .string()
        .optional()
        .transform((val: any) => (val ? parseInt(val, 10) || null : null));
      // Valid numbers
      expect(pageSchema.parse('1')).toBe(1);
      expect(pageSchema.parse('10')).toBe(10);
      expect(pageSchema.parse('100')).toBe(100);
      // Invalid or empty values
      expect(pageSchema.parse('abc')).toBe(null);
      expect(pageSchema.parse('')).toBe(null);
      expect(pageSchema.parse(undefined)).toBe(null);
    });
    it('should transform page_size parameters correctly', () => {
      const pageSizeSchema = z
        .string()
        .optional()
        .transform((val: any) => (val ? parseInt(val, 10) || null : null));
      // Valid numbers
      expect(pageSizeSchema.parse('10')).toBe(10);
      expect(pageSizeSchema.parse('50')).toBe(50);
      expect(pageSizeSchema.parse('100')).toBe(100);
      // Invalid or empty values
      expect(pageSizeSchema.parse('abc')).toBe(null);
      expect(pageSizeSchema.parse('')).toBe(null);
      expect(pageSizeSchema.parse(undefined)).toBe(null);
    });
    it('should validate severity values correctly', () => {
      const severitySchema = z
        .enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
        .nullable()
        .optional();
      // Valid severities
      expect(severitySchema.parse('INFO')).toBe('INFO');
      expect(severitySchema.parse('MINOR')).toBe('MINOR');
      expect(severitySchema.parse('MAJOR')).toBe('MAJOR');
      expect(severitySchema.parse('CRITICAL')).toBe('CRITICAL');
      expect(severitySchema.parse('BLOCKER')).toBe('BLOCKER');
      // Null/undefined
      expect(severitySchema.parse(null)).toBe(null);
      expect(severitySchema.parse(undefined)).toBe(undefined);
      // Invalid
      expect(() => severitySchema.parse('INVALID')).toThrow();
    });
    it('should validate status values correctly', () => {
      const statusSchema = z
        .array(
          z.enum([
            'OPEN',
            'CONFIRMED',
            'REOPENED',
            'RESOLVED',
            'CLOSED',
            'TO_REVIEW',
            'IN_REVIEW',
            'REVIEWED',
          ])
        )
        .nullable()
        .optional();
      // Valid statuses
      expect(statusSchema.parse(['OPEN'])).toEqual(['OPEN']);
      expect(statusSchema.parse(['CONFIRMED', 'REOPENED'])).toEqual(['CONFIRMED', 'REOPENED']);
      expect(statusSchema.parse(['RESOLVED', 'CLOSED'])).toEqual(['RESOLVED', 'CLOSED']);
      expect(statusSchema.parse(['TO_REVIEW', 'IN_REVIEW', 'REVIEWED'])).toEqual([
        'TO_REVIEW',
        'IN_REVIEW',
        'REVIEWED',
      ]);
      // Null/undefined
      expect(statusSchema.parse(null)).toBe(null);
      expect(statusSchema.parse(undefined)).toBe(undefined);
      // Invalid
      expect(() => statusSchema.parse(['INVALID'])).toThrow();
      expect(() => statusSchema.parse(['open'])).toThrow(); // case sensitivity
    });
    it('should validate resolution values correctly', () => {
      const resolutionSchema = z
        .array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
        .nullable()
        .optional();
      // Valid resolutions
      expect(resolutionSchema.parse(['FALSE-POSITIVE'])).toEqual(['FALSE-POSITIVE']);
      expect(resolutionSchema.parse(['WONTFIX', 'FIXED'])).toEqual(['WONTFIX', 'FIXED']);
      expect(resolutionSchema.parse(['REMOVED'])).toEqual(['REMOVED']);
      expect(resolutionSchema.parse(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED'])).toEqual([
        'FALSE-POSITIVE',
        'WONTFIX',
        'FIXED',
        'REMOVED',
      ]);
      // Null/undefined
      expect(resolutionSchema.parse(null)).toBe(null);
      expect(resolutionSchema.parse(undefined)).toBe(undefined);
      // Invalid
      expect(() => resolutionSchema.parse(['INVALID'])).toThrow();
    });
    it('should validate type values correctly', () => {
      const typeSchema = z
        .array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
        .nullable()
        .optional();
      // Valid types
      expect(typeSchema.parse(['CODE_SMELL'])).toEqual(['CODE_SMELL']);
      expect(typeSchema.parse(['BUG', 'VULNERABILITY'])).toEqual(['BUG', 'VULNERABILITY']);
      expect(typeSchema.parse(['SECURITY_HOTSPOT'])).toEqual(['SECURITY_HOTSPOT']);
      expect(typeSchema.parse(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT'])).toEqual([
        'CODE_SMELL',
        'BUG',
        'VULNERABILITY',
        'SECURITY_HOTSPOT',
      ]);
      // Null/undefined
      expect(typeSchema.parse(null)).toBe(null);
      expect(typeSchema.parse(undefined)).toBe(undefined);
      // Invalid
      expect(() => typeSchema.parse(['INVALID'])).toThrow();
    });
    it('should transform boolean values correctly', () => {
      const booleanSchema = z
        .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
        .nullable()
        .optional();
      // String values
      expect(booleanSchema.parse('true')).toBe(true);
      expect(booleanSchema.parse('false')).toBe(false);
      // Boolean values
      expect(booleanSchema.parse(true)).toBe(true);
      expect(booleanSchema.parse(false)).toBe(false);
      // Null/undefined
      expect(booleanSchema.parse(null)).toBe(null);
      expect(booleanSchema.parse(undefined)).toBe(undefined);
    });
    it('should validate string arrays correctly', () => {
      const stringArraySchema = z.array(z.string()).nullable().optional();
      // Valid arrays
      expect(stringArraySchema.parse(['test'])).toEqual(['test']);
      expect(stringArraySchema.parse(['one', 'two', 'three'])).toEqual(['one', 'two', 'three']);
      expect(stringArraySchema.parse([])).toEqual([]);
      // Null/undefined
      expect(stringArraySchema.parse(null)).toBe(null);
      expect(stringArraySchema.parse(undefined)).toBe(undefined);
      // Invalid
      expect(() => stringArraySchema.parse('not-an-array')).toThrow();
      expect(() => stringArraySchema.parse([1, 2, 3])).toThrow();
    });
    it('should validate and transform string or array unions', () => {
      const unionSchema = z.union([z.string(), z.array(z.string())]);
      // Single string
      expect(unionSchema.parse('test')).toBe('test');
      // String array
      expect(unionSchema.parse(['one', 'two'])).toEqual(['one', 'two']);
      // Invalid
      expect(() => unionSchema.parse(123)).toThrow();
      expect(() => unionSchema.parse([1, 2, 3])).toThrow();
    });
  });
});

```

--------------------------------------------------------------------------------
/src/utils/logger.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Simple logging service for the application.
 * This provides a centralized place for all logging functionality.
 *
 * Configuration:
 * - LOG_LEVEL: Sets the minimum log level (DEBUG, INFO, WARN, ERROR). Defaults to DEBUG.
 * - LOG_FILE: Path to the log file. If not set, no logs will be written.
 *
 * Note: Since MCP servers use stdout for protocol communication, logs are written
 * to a file instead of stdout/stderr to avoid interference.
 */

import { writeFileSync, appendFileSync, existsSync, mkdirSync } from 'node:fs';
import { dirname } from 'node:path';

/**
 * Log levels for the application
 * @enum {string}
 */
export enum LogLevel {
  DEBUG = 'DEBUG',
  INFO = 'INFO',
  WARN = 'WARN',
  ERROR = 'ERROR',
}

/**
 * Environment-aware logging configuration
 */
const LOG_LEVELS_PRIORITY: Record<LogLevel, number> = {
  [LogLevel.DEBUG]: 0,
  [LogLevel.INFO]: 1,
  [LogLevel.WARN]: 2,
  [LogLevel.ERROR]: 3,
};

/**
 * Get the log file path from environment
 * @returns {string | null} The log file path or null if not configured
 * @private
 */
function getLogFilePath(): string | null {
  return process.env.LOG_FILE ?? null;
}

let logFileInitialized = false;

/**
 * Initialize the log file if needed by creating the directory and file
 * Only initializes once per process to avoid redundant file operations
 * @private
 * @returns {void}
 */
function initializeLogFile(): void {
  const logFile = getLogFilePath();
  if (logFile && !logFileInitialized) {
    try {
      // Create directory if it doesn't exist
      const dir = dirname(logFile);
      if (!existsSync(dir)) {
        mkdirSync(dir, { recursive: true });
      }
      // Create or truncate the log file
      writeFileSync(logFile, '');
      logFileInitialized = true;
    } catch {
      // Fail silently if we can't create the log file
      logFileInitialized = true; // Don't retry
    }
  }
}

/**
 * Formats non-JSON-serializable values to string
 * @param value The value to format
 * @returns String representation of the value
 */
function formatNonSerializable(value: unknown): string {
  if (value === null) return 'null';
  if (value === undefined) return 'undefined';

  if (typeof value === 'object') {
    const constructorName =
      'constructor' in value && value.constructor?.name ? value.constructor.name : 'Object';
    return `[object ${constructorName}]`;
  }

  return Object.prototype.toString.call(value);
}

/**
 * Formats an error for logging
 * @param error The error to format
 * @returns Formatted error string
 */
function formatError(error: unknown): string {
  if (error === undefined) {
    return '';
  }

  if (error instanceof Error) {
    const stack = error.stack ? `\n${error.stack}` : '';
    return `${error.name}: ${error.message}${stack}`;
  }

  try {
    return JSON.stringify(error, null, 2);
  } catch {
    // Fallback to string representation if JSON.stringify fails
    return formatNonSerializable(error);
  }
}

/**
 * Write a log message to file
 * @param message The formatted log message to write
 * @private
 */
function writeToLogFile(message: string): void {
  const logFile = getLogFilePath();
  if (logFile) {
    try {
      if (!logFileInitialized) {
        initializeLogFile();
      }
      appendFileSync(logFile, `${message}\n`);
    } catch {
      // Fail silently if we can't write to the log file
    }
  }
}

/**
 * Check if a log level should be displayed based on the environment configuration
 * @param level The log level to check
 * @returns {boolean} True if the log level should be displayed
 * @private
 */
function shouldLog(level: LogLevel): boolean {
  const configuredLevel = (process.env.LOG_LEVEL ?? 'DEBUG') as LogLevel;
  return LOG_LEVELS_PRIORITY[level] >= LOG_LEVELS_PRIORITY[configuredLevel];
}

/**
 * Format a log message with timestamp, level, and context information
 * @param level The log level of the message
 * @param message The log message content
 * @param context Optional context identifier
 * @returns {string} Formatted log message
 * @private
 */
function formatLogMessage(level: LogLevel, message: string, context?: string): string {
  const timestamp = new Date().toISOString();
  const contextStr = context ? `[${context}] ` : '';
  return `${timestamp} ${level} ${contextStr}${message}`;
}

/**
 * Logger service for consistent logging throughout the application
 */
export class Logger {
  private readonly context: string | undefined;

  /**
   * Create a new logger instance, optionally with a context
   * @param context Optional context name to identify the log source
   */
  constructor(context?: string) {
    this.context = context;
  }

  /**
   * Log a debug message
   * @param message The message to log
   * @param data Optional data to include in the log
   */
  debug(message: string, data?: unknown): void {
    if (shouldLog(LogLevel.DEBUG) && getLogFilePath()) {
      const formattedMessage = formatLogMessage(LogLevel.DEBUG, message, this.context);
      const fullMessage =
        data === undefined
          ? formattedMessage
          : `${formattedMessage} ${JSON.stringify(data, null, 2)}`;
      writeToLogFile(fullMessage);
    }
  }

  /**
   * Log an info message
   * @param message The message to log
   * @param data Optional data to include in the log
   */
  info(message: string, data?: unknown): void {
    if (shouldLog(LogLevel.INFO) && getLogFilePath()) {
      const formattedMessage = formatLogMessage(LogLevel.INFO, message, this.context);
      const fullMessage =
        data === undefined
          ? formattedMessage
          : `${formattedMessage} ${JSON.stringify(data, null, 2)}`;
      writeToLogFile(fullMessage);
    }
  }

  /**
   * Log a warning message
   * @param message The message to log
   * @param data Optional data to include in the log
   */
  warn(message: string, data?: unknown): void {
    if (shouldLog(LogLevel.WARN) && getLogFilePath()) {
      const formattedMessage = formatLogMessage(LogLevel.WARN, message, this.context);
      const fullMessage =
        data === undefined
          ? formattedMessage
          : `${formattedMessage} ${JSON.stringify(data, null, 2)}`;
      writeToLogFile(fullMessage);
    }
  }

  /**
   * Log an error message with improved error formatting
   * @param message The message to log
   * @param error Optional error to include in the log. The error will be formatted for better readability:
   *        - Error objects will include name, message and stack trace
   *        - Objects will be stringified with proper indentation
   *        - Other values will be converted to strings
   */
  error(message: string, error?: unknown): void {
    if (!shouldLog(LogLevel.ERROR) || !getLogFilePath()) {
      return;
    }

    const formattedMessage = formatLogMessage(LogLevel.ERROR, message, this.context);
    const errorOutput = formatError(error);
    const fullMessage = errorOutput ? `${formattedMessage} ${errorOutput}` : formattedMessage;
    writeToLogFile(fullMessage);
  }
}

/**
 * Default logger instance for the application
 * Pre-configured with the 'SonarQubeMCP' context for quick imports
 * @const {Logger}
 */
export const defaultLogger = new Logger('SonarQubeMCP');

/**
 * Helper function to create a logger with a specific context
 * @param context The context to use for the logger
 * @returns A new logger instance with the specified context
 */
export function createLogger(context: string): Logger {
  return new Logger(context);
}

/**
 * Default export for simpler imports
 */
export default defaultLogger;

```

--------------------------------------------------------------------------------
/src/__tests__/domains/source-code-domain.test.ts:
--------------------------------------------------------------------------------

```typescript
import nock from 'nock';
import { SourceCodeDomain } from '../../domains/source-code.js';
import { IssuesDomain } from '../../domains/issues.js';
import { SonarQubeClient as WebApiClient } from 'sonarqube-web-api-client';

describe('SourceCodeDomain', () => {
  const baseUrl = 'https://sonarqube.example.com';
  const organization = 'test-org';
  let domain: SourceCodeDomain;
  let webApiClient: WebApiClient;
  let issuesDomain: IssuesDomain;

  beforeEach(() => {
    webApiClient = WebApiClient.withToken(baseUrl, 'test-token', { organization });
    issuesDomain = new IssuesDomain(webApiClient, organization);
    domain = new SourceCodeDomain(webApiClient, organization, issuesDomain);
    nock.cleanAll();
  });

  afterEach(() => {
    nock.cleanAll();
  });

  describe('getSourceCode', () => {
    const mockSourceResponse = [
      'public class Example {',
      '    public void method() {',
      '        // TODO: implement',
      '    }',
      '}',
    ].join('\n');

    const mockIssuesResponse = {
      paging: { pageIndex: 1, pageSize: 100, total: 2 },
      issues: [
        {
          key: 'issue1',
          rule: 'squid:S1234',
          component: 'com.example:Example.java',
          project: 'com.example',
          line: 2,
          message: 'Fix this issue',
          severity: 'MAJOR',
          status: 'OPEN',
          type: 'BUG',
          textRange: {
            startLine: 2,
            endLine: 2,
            startOffset: 10,
            endOffset: 20,
          },
          tags: [],
          creationDate: '2023-01-01T00:00:00Z',
          updateDate: '2023-01-01T00:00:00Z',
        },
        {
          key: 'issue2',
          rule: 'squid:S5678',
          component: 'com.example:Example.java',
          project: 'com.example',
          line: 3,
          message: 'Another issue',
          severity: 'MINOR',
          status: 'OPEN',
          type: 'CODE_SMELL',
          tags: [],
          creationDate: '2023-01-01T00:00:00Z',
          updateDate: '2023-01-01T00:00:00Z',
        },
      ],
      components: [],
      rules: [],
    };

    it('should get source code with issues for all lines', async () => {
      nock(baseUrl)
        .get('/api/sources/raw')
        .query({
          key: 'com.example:Example.java',
          organization,
        })
        .reply(200, mockSourceResponse);

      nock(baseUrl)
        .get('/api/issues/search')
        .query({
          projects: 'com.example:Example.java',
          onComponentOnly: 'true',
          organization,
          p: '1',
          ps: '100',
        })
        .reply(200, mockIssuesResponse);

      const result = await domain.getSourceCode({
        key: 'com.example:Example.java',
      });

      expect(result.component.key).toBe('com.example:Example.java');
      expect(result.component.name).toBe('com.example:Example.java'); // name is the full key since there's no '/' in the path
      expect(result.component.qualifier).toBe('FIL');
      expect(result.sources).toHaveLength(5);
      expect(result.sources[0]).toEqual({
        line: 1,
        code: 'public class Example {',
        issues: undefined,
      });
      expect(result.sources[1]).toBeDefined();
      expect(result.sources[1]!.issues).toHaveLength(1);
      expect(result.sources[1]!.issues?.[0]!.key).toBe('issue1');
      expect(result.sources[2]).toBeDefined();
      expect(result.sources[2]!.issues).toHaveLength(1);
      expect(result.sources[2]!.issues?.[0]!.key).toBe('issue2');
    });

    it('should get source code with line range and branch', async () => {
      nock(baseUrl)
        .get('/api/sources/raw')
        .query({
          key: 'com.example:Example.java',
          branch: 'feature-branch',
          organization,
        })
        .reply(200, mockSourceResponse);

      nock(baseUrl)
        .get('/api/issues/search')
        .query({
          projects: 'com.example:Example.java',
          branch: 'feature-branch',
          onComponentOnly: 'true',
          organization,
        })
        .reply(200, { ...mockIssuesResponse, issues: [] });

      const result = await domain.getSourceCode({
        key: 'com.example:Example.java',
        from: 2,
        to: 4,
        branch: 'feature-branch',
      });

      expect(result.sources).toHaveLength(3);
      expect(result.sources[0]!.line).toBe(2);
      expect(result.sources[2]!.line).toBe(4);
    });

    it('should get source code for pull request', async () => {
      nock(baseUrl)
        .get('/api/sources/raw')
        .query({
          key: 'com.example:Example.java',
          pullRequest: '123',
          organization,
        })
        .reply(200, mockSourceResponse);

      nock(baseUrl)
        .get('/api/issues/search')
        .query({
          projects: 'com.example:Example.java',
          pullRequest: '123',
          onComponentOnly: 'true',
          organization,
        })
        .reply(200, { ...mockIssuesResponse, issues: [] });

      const result = await domain.getSourceCode({
        key: 'com.example:Example.java',
        pullRequest: '123',
      });

      expect(result.sources).toHaveLength(5);
    });

    it('should handle source code without issues domain', async () => {
      const domainWithoutIssues = new SourceCodeDomain(webApiClient, organization);

      nock(baseUrl)
        .get('/api/sources/raw')
        .query({
          key: 'com.example:Example.java',
          organization,
        })
        .reply(200, mockSourceResponse);

      const result = await domainWithoutIssues.getSourceCode({
        key: 'com.example:Example.java',
      });

      expect(result.sources).toHaveLength(5);
      expect(result.sources[0]!.issues).toBeUndefined();
    });

    it('should handle error when fetching issues', async () => {
      nock(baseUrl)
        .get('/api/sources/raw')
        .query({
          key: 'com.example:Example.java',
          organization,
        })
        .reply(200, mockSourceResponse);

      nock(baseUrl)
        .get('/api/issues/search')
        .query({
          projects: 'com.example:Example.java',
          onComponentOnly: 'true',
          organization,
        })
        .reply(500, 'Internal Server Error');

      const result = await domain.getSourceCode({
        key: 'com.example:Example.java',
      });

      // Should still return source code without issues
      expect(result.sources).toHaveLength(5);
      expect(result.sources[0]!.issues).toBeUndefined();
    });
  });

  describe('getScmBlame', () => {
    const mockScmResponse = {
      scm: [
        ['abc123', '[email protected]', '2023-01-01T00:00:00Z'],
        ['def456', '[email protected]', '2023-01-02T00:00:00Z'],
      ],
    };

    it('should get SCM blame information', async () => {
      nock(baseUrl)
        .get('/api/sources/scm')
        .query({
          key: 'com.example:Example.java',
          organization,
        })
        .reply(200, mockScmResponse);

      const result = await domain.getScmBlame({
        key: 'com.example:Example.java',
      });

      expect(result).toEqual(mockScmResponse);
    });

    it('should get SCM blame with line range', async () => {
      nock(baseUrl)
        .get('/api/sources/scm')
        .query({
          key: 'com.example:Example.java',
          from: 1,
          to: 3,
          organization,
        })
        .reply(200, mockScmResponse);

      const result = await domain.getScmBlame({
        key: 'com.example:Example.java',
        from: 1,
        to: 3,
      });

      expect(result).toEqual(mockScmResponse);
    });
  });
});

```

--------------------------------------------------------------------------------
/src/__tests__/tool-registration-transforms.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect } from 'vitest';
import { z } from 'zod';
describe('Tool Registration Schema Transforms', () => {
  describe('Pagination parameters', () => {
    it('should transform page string to number or null', () => {
      const pageSchema = z
        .string()
        .optional()
        .transform((val: any) => (val ? parseInt(val, 10) || null : null));
      expect(pageSchema.parse('10')).toBe(10);
      expect(pageSchema.parse('invalid')).toBe(null);
      expect(pageSchema.parse('')).toBe(null);
      expect(pageSchema.parse(undefined)).toBe(null);
    });
  });
  describe('Boolean parameters', () => {
    it('should transform string to boolean', () => {
      const booleanSchema = z
        .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
        .nullable()
        .optional();
      expect(booleanSchema.parse('true')).toBe(true);
      expect(booleanSchema.parse('false')).toBe(false);
      expect(booleanSchema.parse(true)).toBe(true);
      expect(booleanSchema.parse(false)).toBe(false);
      expect(booleanSchema.parse(null)).toBe(null);
      expect(booleanSchema.parse(undefined)).toBe(undefined);
    });
  });
  describe('Array union with string', () => {
    it('should handle both string and array inputs', () => {
      const schema = z.union([z.string(), z.array(z.string())]);
      // Test with string input
      expect(schema.parse('test')).toBe('test');
      // Test with array input
      expect(schema.parse(['test1', 'test2'])).toEqual(['test1', 'test2']);
    });
  });
  describe('Union schemas for tool parameters', () => {
    it('should validate both array and string metrics parameters', () => {
      // Similar to how the metrics_keys parameter is defined
      const metricsSchema = z.union([z.string(), z.array(z.string())]);
      expect(metricsSchema.parse('coverage')).toBe('coverage');
      expect(metricsSchema.parse(['coverage', 'bugs'])).toEqual(['coverage', 'bugs']);
    });
    it('should validate both array and string component keys parameters', () => {
      // Similar to how the component_keys parameter is defined
      const componentKeysSchema = z.union([z.string(), z.array(z.string())]);
      expect(componentKeysSchema.parse('component1')).toBe('component1');
      expect(componentKeysSchema.parse(['component1', 'component2'])).toEqual([
        'component1',
        'component2',
      ]);
    });
  });
  describe('Enumeration schemas', () => {
    it('should validate severity enum value', () => {
      const severitySchema = z
        .enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
        .nullable()
        .optional();
      expect(severitySchema.parse('INFO')).toBe('INFO');
      expect(severitySchema.parse('MINOR')).toBe('MINOR');
      expect(severitySchema.parse('MAJOR')).toBe('MAJOR');
      expect(severitySchema.parse('CRITICAL')).toBe('CRITICAL');
      expect(severitySchema.parse('BLOCKER')).toBe('BLOCKER');
      expect(severitySchema.parse(null)).toBe(null);
      expect(severitySchema.parse(undefined)).toBe(undefined);
      expect(() => severitySchema.parse('INVALID')).toThrow();
    });
    it('should validate status array enum values', () => {
      const statusSchema = z
        .array(
          z.enum([
            'OPEN',
            'CONFIRMED',
            'REOPENED',
            'RESOLVED',
            'CLOSED',
            'TO_REVIEW',
            'IN_REVIEW',
            'REVIEWED',
          ])
        )
        .nullable()
        .optional();
      expect(statusSchema.parse(['OPEN', 'CONFIRMED'])).toEqual(['OPEN', 'CONFIRMED']);
      expect(statusSchema.parse(null)).toBe(null);
      expect(statusSchema.parse(undefined)).toBe(undefined);
      expect(() => statusSchema.parse(['INVALID'])).toThrow();
    });
    it('should validate resolution array enum values', () => {
      const resolutionSchema = z
        .array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
        .nullable()
        .optional();
      expect(resolutionSchema.parse(['FALSE-POSITIVE', 'WONTFIX'])).toEqual([
        'FALSE-POSITIVE',
        'WONTFIX',
      ]);
      expect(resolutionSchema.parse(null)).toBe(null);
      expect(resolutionSchema.parse(undefined)).toBe(undefined);
      expect(() => resolutionSchema.parse(['INVALID'])).toThrow();
    });
    it('should validate type array enum values', () => {
      const typeSchema = z
        .array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
        .nullable()
        .optional();
      expect(typeSchema.parse(['CODE_SMELL', 'BUG'])).toEqual(['CODE_SMELL', 'BUG']);
      expect(typeSchema.parse(null)).toBe(null);
      expect(typeSchema.parse(undefined)).toBe(undefined);
      expect(() => typeSchema.parse(['INVALID'])).toThrow();
    });
  });
  describe('Complete registration schema', () => {
    it('should validate and transform a complete issues tool schema', () => {
      // Create schemas similar to what's in the tool registration
      const pageSchema = z
        .string()
        .optional()
        .transform((val: any) => (val ? parseInt(val, 10) || null : null));
      const booleanSchema = z
        .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
        .nullable()
        .optional();
      const severitySchema = z
        .enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
        .nullable()
        .optional();
      const statusSchema = z
        .array(
          z.enum([
            'OPEN',
            'CONFIRMED',
            'REOPENED',
            'RESOLVED',
            'CLOSED',
            'TO_REVIEW',
            'IN_REVIEW',
            'REVIEWED',
          ])
        )
        .nullable()
        .optional();
      const resolutionSchema = z
        .array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
        .nullable()
        .optional();
      const typeSchema = z
        .array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
        .nullable()
        .optional();
      const stringArraySchema = z.array(z.string()).nullable().optional();
      // Create the complete schema
      const schema = z.object({
        project_key: z.string(),
        severity: severitySchema,
        page: pageSchema,
        page_size: pageSchema,
        statuses: statusSchema,
        resolutions: resolutionSchema,
        resolved: booleanSchema,
        types: typeSchema,
        rules: stringArraySchema,
        tags: stringArraySchema,
      });
      // Test with valid data
      const validData = {
        project_key: 'test-project',
        severity: 'MAJOR',
        page: '10',
        page_size: '20',
        statuses: ['OPEN', 'CONFIRMED'],
        resolutions: ['FALSE-POSITIVE', 'WONTFIX'],
        resolved: 'true',
        types: ['CODE_SMELL', 'BUG'],
        rules: ['rule1', 'rule2'],
        tags: ['tag1', 'tag2'],
      };
      const result = schema.parse(validData);
      // Check that transformations worked correctly
      expect(result.project_key).toBe('test-project');
      expect(result.severity).toBe('MAJOR');
      expect(result.page).toBe(10); // Transformed from string to number
      expect(result.page_size).toBe(20); // Transformed from string to number
      expect(result.statuses).toEqual(['OPEN', 'CONFIRMED']);
      expect(result.resolutions).toEqual(['FALSE-POSITIVE', 'WONTFIX']);
      expect(result.resolved).toBe(true); // Transformed from string to boolean
      expect(result.types).toEqual(['CODE_SMELL', 'BUG']);
      expect(result.rules).toEqual(['rule1', 'rule2']);
      expect(result.tags).toEqual(['tag1', 'tag2']);
    });
  });
});

```

--------------------------------------------------------------------------------
/src/errors.ts:
--------------------------------------------------------------------------------

```typescript
import {
  SonarQubeError as SonarQubeClientError,
  ApiError,
  AuthenticationError,
  AuthorizationError,
  NotFoundError,
  RateLimitError,
  NetworkError,
  ServerError,
  ValidationError,
} from 'sonarqube-web-api-client';
import { createLogger } from './utils/logger.js';

export enum SonarQubeErrorType {
  AUTHENTICATION_FAILED = 'AUTHENTICATION_FAILED',
  AUTHORIZATION_FAILED = 'AUTHORIZATION_FAILED',
  RESOURCE_NOT_FOUND = 'RESOURCE_NOT_FOUND',
  RATE_LIMITED = 'RATE_LIMITED',
  NETWORK_ERROR = 'NETWORK_ERROR',
  CONFIGURATION_ERROR = 'CONFIGURATION_ERROR',
  VALIDATION_ERROR = 'VALIDATION_ERROR',
  SERVER_ERROR = 'SERVER_ERROR',
  UNKNOWN_ERROR = 'UNKNOWN_ERROR',
}

export interface SonarQubeError extends Error {
  type: SonarQubeErrorType;
  operation?: string;
  statusCode?: number;
  context?: Record<string, unknown>;
  solution?: string;
}

export class SonarQubeAPIError extends Error implements SonarQubeError {
  type: SonarQubeErrorType;
  operation?: string;
  statusCode?: number;
  context?: Record<string, unknown>;
  solution?: string;

  constructor(
    message: string,
    type: SonarQubeErrorType,
    options?: {
      operation?: string;
      statusCode?: number;
      context?: Record<string, unknown>;
      solution?: string;
    }
  ) {
    super(message);
    this.name = 'SonarQubeAPIError';
    this.type = type;
    if (options?.operation !== undefined) {
      this.operation = options.operation;
    }
    if (options?.statusCode !== undefined) {
      this.statusCode = options.statusCode;
    }
    if (options?.context !== undefined) {
      this.context = options.context;
    }
    if (options?.solution !== undefined) {
      this.solution = options.solution;
    }
  }

  override toString(): string {
    let result = `Error: ${this.message}`;
    if (this.operation) {
      result += `\nOperation: ${this.operation}`;
    }
    if (this.statusCode) {
      result += `\nStatus Code: ${this.statusCode}`;
    }
    if (this.solution) {
      result += `\nSolution: ${this.solution}`;
    }
    if (this.context && Object.keys(this.context).length > 0) {
      result += `\nContext: ${JSON.stringify(this.context, null, 2)}`;
    }
    return result;
  }
}

function getErrorTypeFromClientError(error: SonarQubeClientError): {
  type: SonarQubeErrorType;
  solution: string | undefined;
} {
  if (error instanceof AuthenticationError) {
    return {
      type: SonarQubeErrorType.AUTHENTICATION_FAILED,
      solution:
        'Please check your SONARQUBE_TOKEN or credentials. Ensure the token is valid and not expired.',
    };
  }
  if (error instanceof AuthorizationError) {
    return {
      type: SonarQubeErrorType.AUTHORIZATION_FAILED,
      solution: 'Ensure your token has the required permissions for this operation.',
    };
  }
  if (error instanceof NotFoundError) {
    return {
      type: SonarQubeErrorType.RESOURCE_NOT_FOUND,
      solution: 'Verify the project key/component exists and you have access to it.',
    };
  }
  if (error instanceof RateLimitError) {
    return {
      type: SonarQubeErrorType.RATE_LIMITED,
      solution: 'Please wait before retrying. Consider implementing request throttling.',
    };
  }
  if (error instanceof NetworkError) {
    return {
      type: SonarQubeErrorType.NETWORK_ERROR,
      solution: 'Check your network connection and verify the SonarQube URL.',
    };
  }
  if (error instanceof ServerError) {
    return {
      type: SonarQubeErrorType.SERVER_ERROR,
      solution:
        'The server is experiencing issues. Please try again later or contact your administrator.',
    };
  }
  if (error instanceof ValidationError) {
    return {
      type: SonarQubeErrorType.VALIDATION_ERROR,
      solution: 'Please check your request parameters and try again.',
    };
  }
  return {
    type: SonarQubeErrorType.UNKNOWN_ERROR,
    solution: undefined,
  };
}

export function transformError(error: unknown, operation: string): SonarQubeAPIError {
  if (error instanceof SonarQubeAPIError) {
    return error;
  }

  if (error instanceof SonarQubeClientError) {
    const { type, solution } = getErrorTypeFromClientError(error);
    const context: Record<string, unknown> = {};

    // Extract status code if available
    let statusCode: number | undefined;
    if (error instanceof ApiError && 'statusCode' in error) {
      statusCode = (error as ApiError & { statusCode?: number }).statusCode;
    }

    const errorOptions: {
      operation?: string;
      statusCode?: number;
      context?: Record<string, unknown>;
      solution?: string;
    } = {
      operation,
      context,
    };
    if (statusCode !== undefined) {
      errorOptions.statusCode = statusCode;
    }
    if (solution !== undefined) {
      errorOptions.solution = solution;
    }
    return new SonarQubeAPIError(error.message, type, errorOptions);
  }

  if (error instanceof Error) {
    return new SonarQubeAPIError(error.message, SonarQubeErrorType.UNKNOWN_ERROR, {
      operation,
    });
  }

  return new SonarQubeAPIError(String(error), SonarQubeErrorType.UNKNOWN_ERROR, {
    operation,
  });
}

interface RetryOptions {
  maxRetries?: number;
  initialDelay?: number;
  maxDelay?: number;
  backoffFactor?: number;
}

const logger = createLogger('errors');

const DEFAULT_RETRY_OPTIONS: Required<RetryOptions> = {
  maxRetries: 3,
  initialDelay: 1000,
  maxDelay: 10000,
  backoffFactor: 2,
};

function shouldRetry(error: unknown): boolean {
  if (!(error instanceof SonarQubeAPIError)) {
    return false;
  }

  // Retry on network errors, rate limiting, and server errors
  return [
    SonarQubeErrorType.NETWORK_ERROR,
    SonarQubeErrorType.RATE_LIMITED,
    SonarQubeErrorType.SERVER_ERROR,
  ].includes(error.type);
}

async function sleep(ms: number): Promise<void> {
  return new Promise((resolve) => setTimeout(resolve, ms));
}

export async function withErrorHandling<T>(
  operation: string,
  apiCall: () => Promise<T>,
  retryOptions?: RetryOptions
): Promise<T> {
  const options = { ...DEFAULT_RETRY_OPTIONS, ...retryOptions };
  let lastError: unknown;
  let delay = options.initialDelay;

  for (let attempt = 0; attempt <= options.maxRetries; attempt++) {
    try {
      return await apiCall();
    } catch (error) {
      // Only transform errors from the SonarQube client
      if (error instanceof SonarQubeClientError) {
        lastError = transformError(error, operation);
      } else {
        // Pass through other errors unchanged (e.g., test mocks)
        lastError = error;
      }

      if (attempt < options.maxRetries && shouldRetry(lastError)) {
        const retryDelay = Math.min(delay, options.maxDelay);
        logger.info(`Retrying ${operation} after ${retryDelay}ms`, {
          attempt: attempt + 1,
          maxRetries: options.maxRetries,
          delay: retryDelay,
        });
        await sleep(retryDelay);
        delay *= options.backoffFactor;
      } else {
        break;
      }
    }
  }

  throw lastError;
}

export function formatErrorForMCP(error: SonarQubeAPIError): { code: number; message: string } {
  const errorMap: Record<SonarQubeErrorType, number> = {
    [SonarQubeErrorType.AUTHENTICATION_FAILED]: -32001,
    [SonarQubeErrorType.AUTHORIZATION_FAILED]: -32002,
    [SonarQubeErrorType.RESOURCE_NOT_FOUND]: -32003,
    [SonarQubeErrorType.RATE_LIMITED]: -32004,
    [SonarQubeErrorType.NETWORK_ERROR]: -32005,
    [SonarQubeErrorType.CONFIGURATION_ERROR]: -32006,
    [SonarQubeErrorType.VALIDATION_ERROR]: -32007,
    [SonarQubeErrorType.SERVER_ERROR]: -32008,
    [SonarQubeErrorType.UNKNOWN_ERROR]: -32000,
  };

  return {
    code: errorMap[error.type] ?? -32000,
    message: error.toString(),
  };
}

```

--------------------------------------------------------------------------------
/src/utils/__tests__/structured-response.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect } from 'vitest';
import {
  createStructuredResponse,
  createTextResponse,
  createErrorResponse,
} from '../structured-response.js';

describe('structured-response', () => {
  describe('createStructuredResponse', () => {
    it('should create response with text and structured content', () => {
      const data = { foo: 'bar', count: 42 };
      const result = createStructuredResponse(data);

      expect(result).toEqual({
        content: [
          {
            type: 'text',
            text: JSON.stringify(data, null, 2),
          },
        ],
        structuredContent: data,
      });
    });

    it('should handle null data', () => {
      const result = createStructuredResponse(null);

      expect(result).toEqual({
        content: [
          {
            type: 'text',
            text: 'null',
          },
        ],
        structuredContent: null,
      });
    });

    it('should handle undefined data', () => {
      const result = createStructuredResponse(undefined);

      expect(result).toEqual({
        content: [
          {
            type: 'text',
            text: undefined, // JSON.stringify(undefined) returns undefined
          },
        ],
        structuredContent: undefined,
      });
    });

    it('should handle array data', () => {
      const data = [1, 2, 3, 'test'];
      const result = createStructuredResponse(data);

      expect(result).toEqual({
        content: [
          {
            type: 'text',
            text: JSON.stringify(data, null, 2),
          },
        ],
        structuredContent: data,
      });
    });

    it('should handle complex nested objects', () => {
      const data = {
        level1: {
          level2: {
            level3: ['a', 'b', 'c'],
            number: 123,
          },
        },
        array: [{ id: 1 }, { id: 2 }],
      };
      const result = createStructuredResponse(data);

      expect(result.content[0]?.text).toBe(JSON.stringify(data, null, 2));
      expect(result.structuredContent).toBe(data);
    });

    it('should handle circular references gracefully', () => {
      const data: Record<string, unknown> = { name: 'test' };
      data.circular = data;

      expect(() => createStructuredResponse(data)).toThrow();
    });

    it('should preserve Date objects in structured content', () => {
      const date = new Date('2023-01-01');
      const data = { created: date };
      const result = createStructuredResponse(data);

      expect(result.structuredContent).toEqual({ created: date });
      expect(result.content[0]?.text).toBe(JSON.stringify(data, null, 2));
    });
  });

  describe('createTextResponse', () => {
    it('should create response with only text content', () => {
      const text = 'Hello, world!';
      const result = createTextResponse(text);

      expect(result).toEqual({
        content: [
          {
            type: 'text',
            text,
          },
        ],
      });
    });

    it('should handle empty string', () => {
      const result = createTextResponse('');

      expect(result).toEqual({
        content: [
          {
            type: 'text',
            text: '',
          },
        ],
      });
    });

    it('should handle multiline text', () => {
      const text = 'Line 1\nLine 2\nLine 3';
      const result = createTextResponse(text);

      expect(result).toEqual({
        content: [
          {
            type: 'text',
            text,
          },
        ],
      });
    });

    it('should handle special characters', () => {
      const text = 'Special chars: < > & " \' \\ \n \t';
      const result = createTextResponse(text);

      expect(result.content[0]?.text).toBe(text);
    });

    it('should not include structuredContent', () => {
      const result = createTextResponse('test');

      expect(result.structuredContent).toBeUndefined();
      expect(result.isError).toBeUndefined();
    });
  });

  describe('createErrorResponse', () => {
    it('should create error response with message only', () => {
      const message = 'Something went wrong';
      const result = createErrorResponse(message);

      expect(result).toEqual({
        content: [
          {
            type: 'text',
            text: message,
          },
        ],
        structuredContent: {
          error: message,
        },
        isError: true,
      });
    });

    it('should create error response with message and details', () => {
      const message = 'Validation failed';
      const details = {
        field: 'email',
        reason: 'invalid format',
      };
      const result = createErrorResponse(message, details);

      expect(result).toEqual({
        content: [
          {
            type: 'text',
            text: message,
          },
        ],
        structuredContent: {
          error: message,
          details,
        },
        isError: true,
      });
    });

    it('should handle null details', () => {
      const message = 'Error occurred';
      const result = createErrorResponse(message, null);

      expect(result).toEqual({
        content: [
          {
            type: 'text',
            text: message,
          },
        ],
        structuredContent: {
          error: message,
          details: null,
        },
        isError: true,
      });
    });

    it('should handle undefined details explicitly', () => {
      const message = 'Error occurred';
      const result = createErrorResponse(message, undefined);

      expect(result.structuredContent).toEqual({
        error: message,
      });
      expect('details' in result.structuredContent!).toBe(false);
    });

    it('should handle complex error details', () => {
      const message = 'Multiple errors';
      const details = {
        errors: [
          { field: 'name', message: 'required' },
          { field: 'age', message: 'must be positive' },
        ],
        timestamp: new Date(),
        requestId: '123456',
      };
      const result = createErrorResponse(message, details);

      expect(result.structuredContent).toEqual({
        error: message,
        details,
      });
      expect(result.isError).toBe(true);
    });

    it('should handle empty error message', () => {
      const result = createErrorResponse('');

      expect(result).toEqual({
        content: [
          {
            type: 'text',
            text: '',
          },
        ],
        structuredContent: {
          error: '',
        },
        isError: true,
      });
    });

    it('should handle error details with circular references', () => {
      const message = 'Circular error';
      const details: Record<string, unknown> = { type: 'error' };
      details.self = details;

      const result = createErrorResponse(message, details);

      expect(result.structuredContent).toEqual({
        error: message,
        details,
      });
    });
  });

  describe('type safety', () => {
    it('should maintain proper types for content array', () => {
      const result = createStructuredResponse({ test: true });

      // Check that content is an array
      expect(Array.isArray(result.content)).toBe(true);
      expect(result.content).toHaveLength(1);

      // Check that content item has correct type
      expect(result.content[0]?.type).toBe('text');
      expect(typeof result.content[0]?.text).toBe('string');
    });

    it('should cast structuredContent to Record<string, unknown>', () => {
      const data = { num: 123, str: 'test', bool: true };
      const result = createStructuredResponse(data);

      expect(result.structuredContent).toBe(data);
      expect(typeof result.structuredContent).toBe('object');
    });
  });
});

```

--------------------------------------------------------------------------------
/src/__tests__/mocked-environment.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
// Mock all dependencies
vi.mock('@modelcontextprotocol/sdk/server/mcp.js', () => ({
  McpServer: vi.fn(() => ({
    name: 'sonarqube-mcp-server',
    version: '1.1.0',
    tool: vi.fn(),
    connect: vi.fn<() => Promise<any>>().mockResolvedValue(undefined as never),
    server: { use: vi.fn() },
  })),
}));
vi.mock('@modelcontextprotocol/sdk/server/stdio.js', () => ({
  StdioServerTransport: vi.fn(() => ({
    connect: vi.fn<() => Promise<any>>().mockResolvedValue(undefined as never),
  })),
}));
// Save original environment variables
const originalEnv = process.env;
describe('Mocked Environment Tests', () => {
  beforeEach(() => {
    vi.resetModules();
    process.env = { ...originalEnv };
    process.env.SONARQUBE_TOKEN = 'test-token';
    process.env.SONARQUBE_URL = 'http://localhost:9000';
    process.env.SONARQUBE_ORGANIZATION = 'test-organization';
  });
  afterEach(() => {
    process.env = originalEnv;
    vi.clearAllMocks();
  });
  describe('Server Initialization', () => {
    it('should initialize the MCP server with correct configuration', async () => {
      const { mcpServer } = await import('../index.js');
      expect(mcpServer).toBeDefined();
      expect((mcpServer as any).name).toBe('sonarqube-mcp-server');
      expect((mcpServer as any).version).toBe('1.1.0');
    });
    it('should register tools on the server', async () => {
      const { mcpServer } = await import('../index.js');
      expect((mcpServer as any).tool).toBeDefined();
      expect((mcpServer as any).tool).toHaveBeenCalled();
      // Check number of tool registrations (28 tools total)
      expect((mcpServer as any).tool).toHaveBeenCalledTimes(28);
    });
    it('should not connect to transport in test mode', async () => {
      process.env.NODE_ENV = 'test';
      const { mcpServer } = await import('../index.js');
      expect((mcpServer as any).connect).not.toHaveBeenCalled();
    });
    it('should connect to transport in non-test mode', async () => {
      process.env.NODE_ENV = 'development';
      // Special mock for this specific test that simulates a clean import
      vi.resetModules();
      // Import the module with development environment
      await import('../index.js');
      // Since we're not directly importing mcpServer here, we check connection indirectly
      // We've mocked the StdioServerTransport so its connect method should have been called
      const { StdioServerTransport } = await import('@modelcontextprotocol/sdk/server/stdio.js');
      expect(StdioServerTransport).toHaveBeenCalled();
      // Reset to test mode
      process.env.NODE_ENV = 'test';
    });
  });
  describe('Environment Variables', () => {
    it('should use environment variables to configure SonarQube client', async () => {
      // Set specific test environment variables
      process.env.SONARQUBE_TOKEN = 'specific-test-token';
      process.env.SONARQUBE_URL = 'https://specific-test-url.com';
      process.env.SONARQUBE_ORGANIZATION = 'specific-test-org';

      // Use dynamic import to test environment variable handling
      // Since we've already mocked the module at the top level, we can just verify the behavior
      const { mcpServer } = await import('../index.js');

      // The server should be properly initialized
      expect(mcpServer).toBeDefined();
      expect((mcpServer as any).name).toBe('sonarqube-mcp-server');
    });
  });
  describe('Tool Registration Complete', () => {
    it('should register all expected tools', async () => {
      const { mcpServer } = await import('../index.js');
      // Verify all tools are registered
      const toolNames = (mcpServer as any).tool.mock.calls.map((call: any) => call[0]);
      expect(toolNames).toContain('projects');
      expect(toolNames).toContain('metrics');
      expect(toolNames).toContain('issues');
      expect(toolNames).toContain('system_health');
      expect(toolNames).toContain('system_status');
      expect(toolNames).toContain('system_ping');
      expect(toolNames).toContain('measures_component');
      expect(toolNames).toContain('measures_components');
      expect(toolNames).toContain('measures_history');
    });
    it('should register tools with correct descriptions', async () => {
      const { mcpServer } = await import('../index.js');
      // Map of tool names to their descriptions from the mcpServer.tool mock calls
      const toolDescriptions = new Map(
        (mcpServer as any).tool.mock.calls.map((call: any) => [call[0], call[1]])
      );
      expect(toolDescriptions.get('projects')).toBe(
        'List all SonarQube projects with metadata. Essential for project discovery, inventory management, and accessing project-specific analysis data (requires admin permissions)'
      );
      expect(toolDescriptions.get('metrics')).toBe(
        'Get available metrics from SonarQube. Use this to discover all measurable code quality dimensions (lines of code, complexity, coverage, duplications, etc.) for reports and dashboards'
      );
      expect(toolDescriptions.get('issues')).toBe(
        'Search and filter SonarQube issues by severity, status, assignee, tag, file path, directory, scope, and more. Critical for dashboards, targeted clean-up sprints, security audits, and regression testing. Supports faceted search for aggregations.'
      );
      expect(toolDescriptions.get('system_health')).toBe(
        'Get the health status of the SonarQube instance. Monitor system components, database connectivity, and overall service availability for operational insights'
      );
      expect(toolDescriptions.get('system_status')).toBe(
        'Get the status of the SonarQube instance'
      );
      expect(toolDescriptions.get('system_ping')).toBe(
        'Ping the SonarQube instance to check if it is up'
      );
      expect(toolDescriptions.get('measures_component')).toBe(
        'Get measures for a specific component (project, directory, or file). Essential for tracking code quality metrics, technical debt, and trends over time'
      );
      expect(toolDescriptions.get('measures_components')).toBe(
        'Get measures for multiple components'
      );
      expect(toolDescriptions.get('measures_history')).toBe('Get measures history for a component');
    });
    it('should register tools with valid schemas', async () => {
      const { mcpServer } = await import('../index.js');
      // Extract schemas from the mcpServer.tool mock calls
      const toolSchemas = new Map(
        (mcpServer as any).tool.mock.calls.map((call: any) => [call[0], call[2]])
      );
      // Check if each tool has a schema defined
      for (const [, schema] of toolSchemas.entries()) {
        expect(schema).toBeDefined();
      }
      // Check specific schemas for required tools
      expect(toolSchemas.get('projects')).toHaveProperty('page');
      expect(toolSchemas.get('projects')).toHaveProperty('page_size');
      expect(toolSchemas.get('issues')).toHaveProperty('project_key');
      expect(toolSchemas.get('issues')).toHaveProperty('severity');
      expect(toolSchemas.get('measures_component')).toHaveProperty('component');
      expect(toolSchemas.get('measures_component')).toHaveProperty('metric_keys');
      expect(toolSchemas.get('measures_components')).toHaveProperty('component_keys');
      expect(toolSchemas.get('measures_components')).toHaveProperty('metric_keys');
      expect(toolSchemas.get('measures_history')).toHaveProperty('component');
      expect(toolSchemas.get('measures_history')).toHaveProperty('metrics');
    });
    it('should register tools with valid handlers', async () => {
      const { mcpServer } = await import('../index.js');
      // Extract handlers from the mcpServer.tool mock calls
      const toolHandlers = new Map(
        (mcpServer as any).tool.mock.calls.map((call: any) => [call[0], call[4]])
      );
      // Check if each tool has a handler defined and it's a function
      for (const [, handler] of toolHandlers.entries()) {
        expect(handler).toBeDefined();
        expect(typeof handler).toBe('function');
      }
    });
  });
});

```

--------------------------------------------------------------------------------
/docs/architecture/decisions/0022-package-manager-choice-pnpm.md:
--------------------------------------------------------------------------------

```markdown
# 22. Package Manager Choice pnpm

Date: 2025-10-11

## Status

Accepted

## Context

The choice of package manager significantly impacts development workflow, CI/CD performance, disk usage, and dependency management reliability. The SonarQube MCP Server requires:

- Fast dependency installation for rapid development iteration
- Efficient disk space usage across multiple projects
- Strict dependency resolution to prevent phantom dependencies
- Reliable dependency management in CI/CD pipelines
- Support for standalone distribution (no global installation required)
- Consistent versions across development and CI/CD environments

Traditional npm has several limitations:

- Flat node_modules structure can lead to phantom dependencies (accessing packages not in package.json)
- Slower installation times due to redundant copies of packages
- Higher disk space usage with duplicate packages across projects
- Less strict dependency resolution

Yarn Classic improved some aspects but still uses flat node_modules and has its own consistency issues. Yarn Berry (v2+) introduced significant breaking changes and has lower adoption.

## Decision

We will use **pnpm** (version 10.17.0) as the exclusive package manager for this project.

### Key Features of pnpm

1. **Content-Addressable Storage**
   - All packages stored once in a global store (~/.pnpm-store)
   - node_modules uses hard links to the global store
   - Dramatically reduces disk space usage (up to 50% compared to npm)

2. **Strict Dependency Resolution**
   - Non-flat node_modules structure prevents phantom dependencies
   - Only dependencies declared in package.json are accessible
   - Catches undeclared dependencies early in development

3. **Performance**
   - Up to 2x faster than npm for clean installs
   - Parallel installation of packages
   - Efficient caching and reuse

4. **Standalone Distribution**
   - Can run as standalone binary without global installation
   - Ensures consistent pnpm version across all environments
   - Configured via `packageManager` field in package.json

### Version Consistency Requirement

**CRITICAL**: The pnpm version must be consistent across ALL locations:

1. **package.json**: `"packageManager": "[email protected]"`
2. **Dockerfile**: `RUN npm install -g [email protected]`
3. **GitHub Actions workflows**: All workflow files using pnpm must specify `version: 10.17.0`
   - `.github/workflows/main.yml`
   - `.github/workflows/pr.yml`
   - `.github/workflows/publish.yml` (PNPM_VERSION environment variable)
   - `.github/workflows/reusable-setup.yml` (default pnpm-version input)
   - `.github/workflows/reusable-security.yml` (default pnpm-version input)
   - `.github/workflows/reusable-validate.yml` (default pnpm-version input)
4. **Documentation**: README.md, CONTRIBUTING.md
5. **Setup scripts**: `scripts/setup.sh`

**Why this matters**: If package.json and GitHub workflows have different pnpm versions, CI/CD fails with:

```
Error: Multiple versions of pnpm specified:
  - version X in the GitHub Action config with the key "version"
  - version pnpm@Y in the package.json with the key "packageManager"
```

### Configuration

**package.json**:

```json
{
  "packageManager": "[email protected]"
}
```

This field:

- Enables Corepack to automatically use the correct pnpm version
- Ensures all developers and CI/CD use the same version
- Prevents version drift and inconsistent behavior

## Consequences

### Positive

- **Disk Space Efficiency**: Saves 50%+ disk space compared to npm through global store
- **Faster Installs**: 2x faster clean installs, even faster with warm cache
- **Phantom Dependency Prevention**: Strict node_modules structure catches undeclared dependencies
- **Better Monorepo Support**: Built-in workspace support (though not needed for this project)
- **Consistent Environments**: `packageManager` field ensures version consistency
- **Standalone Distribution**: No global pnpm installation needed with Corepack
- **Better CI/CD Performance**: Faster installs reduce pipeline execution time
- **Symlink-based Structure**: Easy to understand dependency tree
- **Lock File Determinism**: pnpm-lock.yaml is more deterministic than package-lock.json

### Negative

- **Learning Curve**: Team members familiar with npm/yarn need to learn pnpm commands
- **Ecosystem Compatibility**: Some older tools may not recognize pnpm's node_modules structure
- **Version Management Overhead**: Must update version in multiple locations (mitigated by documentation in CLAUDE.md)
- **IDE Integration**: Some IDEs may not fully support pnpm's symlink structure (rare)
- **Docker Image Size**: Requires installing pnpm in container (minimal overhead)

### Neutral

- **Different Commands**: Some npm/yarn commands have different syntax in pnpm
- **Lock File Format**: pnpm-lock.yaml differs from package-lock.json or yarn.lock
- **Global Store Location**: Requires understanding of ~/.pnpm-store for troubleshooting
- **Corepack Dependency**: Relies on Node.js Corepack (experimental until Node 16.9+)

## Implementation

### Installation

**With Corepack (recommended)**:

```bash
corepack enable
corepack prepare [email protected] --activate
```

**Direct installation**:

```bash
npm install -g [email protected]
```

### Common Commands

```bash
# Install dependencies
pnpm install

# Install with frozen lockfile (CI/CD)
pnpm install --frozen-lockfile

# Add a dependency
pnpm add <package>

# Add a dev dependency
pnpm add -D <package>

# Remove a dependency
pnpm remove <package>

# Run a script
pnpm run <script>
pnpm <script>  # Short form

# Update dependencies
pnpm update

# Audit dependencies
pnpm audit
```

### CI/CD Integration

**GitHub Actions**:

```yaml
- name: Install pnpm
  uses: pnpm/action-setup@v4
  with:
    version: 10.17.0
    run_install: false
    standalone: true

- name: Setup Node.js
  uses: actions/setup-node@v4
  with:
    node-version: 22
    cache: pnpm

- name: Install dependencies
  run: pnpm install --frozen-lockfile
```

**Dockerfile**:

```dockerfile
# Install pnpm globally
RUN npm install -g [email protected]

# Install dependencies
COPY package.json pnpm-lock.yaml ./
RUN pnpm install --frozen-lockfile --prod
```

### Migration from npm

If migrating from npm:

1. Delete `package-lock.json` and `node_modules`
2. Add `"packageManager": "[email protected]"` to package.json
3. Run `pnpm install` to generate `pnpm-lock.yaml`
4. Update all CI/CD workflows to use pnpm
5. Update documentation and developer setup instructions
6. Commit `pnpm-lock.yaml` to version control

## Examples

### Before (with npm)

**Phantom dependency issue**:

```typescript
// lodash not in package.json, but accessible via transitive dependency
import _ from 'lodash'; // Works with npm, but fragile

// If the transitive dependency removes lodash, this breaks
```

**Disk space**:

```bash
project-1/node_modules/lodash  # 1.2 MB
project-2/node_modules/lodash  # 1.2 MB
project-3/node_modules/lodash  # 1.2 MB
Total: 3.6 MB for 3 projects
```

### After (with pnpm)

**Phantom dependency prevention**:

```typescript
// lodash not in package.json
import _ from 'lodash'; // Error: Cannot find module 'lodash'

// Forces explicit declaration in package.json
// Results in more reliable dependency management
```

**Disk space**:

```bash
~/.pnpm-store/[email protected]  # 1.2 MB (stored once)
project-1/node_modules/lodash  # symlink (few bytes)
project-2/node_modules/lodash  # symlink (few bytes)
project-3/node_modules/lodash  # symlink (few bytes)
Total: ~1.2 MB for 3 projects (70% savings)
```

### Performance Comparison

Benchmarks on this project (measured on CI/CD):

| Operation       | npm    | pnpm   | Improvement |
| --------------- | ------ | ------ | ----------- |
| Clean install   | ~45s   | ~22s   | 2.0x faster |
| With warm cache | ~30s   | ~8s    | 3.8x faster |
| Disk space      | 245 MB | 108 MB | 56% smaller |

## References

- pnpm Documentation: https://pnpm.io/
- Motivation for pnpm: https://pnpm.io/motivation
- pnpm CLI Reference: https://pnpm.io/cli/install
- Corepack Documentation: https://nodejs.org/api/corepack.html
- Version Consistency Guidelines: CLAUDE.md "Updating pnpm Version" section
- GitHub Actions Setup: .github/workflows/reusable-setup.yml
- Docker Setup: Dockerfile

```

--------------------------------------------------------------------------------
/src/__tests__/parameter-transformations.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect } from 'vitest';
import { mapToSonarQubeParams, nullToUndefined } from '../index.js';
import { z } from 'zod';
describe('Parameter Transformation Functions', () => {
  describe('nullToUndefined', () => {
    it('should convert null to undefined', () => {
      expect(nullToUndefined(null)).toBeUndefined();
    });
    it('should return the original value for non-null inputs', () => {
      expect(nullToUndefined(0)).toBe(0);
      expect(nullToUndefined('')).toBe('');
      expect(nullToUndefined('test')).toBe('test');
      expect(nullToUndefined(undefined)).toBeUndefined();
      expect(nullToUndefined(123)).toBe(123);
      expect(nullToUndefined(false)).toBe(false);
      expect(nullToUndefined(true)).toBe(true);
      const obj = { test: 'value' };
      const arr = [1, 2, 3];
      expect(nullToUndefined(obj)).toBe(obj);
      expect(nullToUndefined(arr)).toBe(arr);
    });
  });
  describe('mapToSonarQubeParams', () => {
    it('should map MCP tool parameters to SonarQube client parameters', () => {
      const result = mapToSonarQubeParams({
        project_key: 'my-project',
        severity: 'MAJOR',
        page: '10',
        page_size: '25',
        statuses: ['OPEN', 'CONFIRMED'],
        resolutions: ['FALSE-POSITIVE'],
        resolved: 'true',
        types: ['BUG', 'VULNERABILITY'],
        rules: ['rule1', 'rule2'],
        tags: ['tag1', 'tag2'],
        created_after: '2023-01-01',
        created_before: '2023-12-31',
        created_at: '2023-06-15',
        created_in_last: '30d',
        assignees: ['user1', 'user2'],
        authors: ['author1', 'author2'],
        cwe: ['cwe1', 'cwe2'],
        languages: ['java', 'js'],
        owasp_top10: ['a1', 'a2'],
        sans_top25: ['sans1', 'sans2'],
        sonarsource_security: ['ss1', 'ss2'],
        on_component_only: 'true',
        facets: ['facet1', 'facet2'],
        since_leak_period: 'true',
        in_new_code_period: 'true',
      });
      // Check key mappings
      expect(result.projectKey).toBe('my-project');
      expect(result.severity).toBe('MAJOR');
      expect(result.page).toBe('10');
      expect(result.pageSize).toBe('25');
      expect(result.statuses).toEqual(['OPEN', 'CONFIRMED']);
      expect(result.resolutions).toEqual(['FALSE-POSITIVE']);
      expect(result.resolved).toBe('true');
      expect(result.types).toEqual(['BUG', 'VULNERABILITY']);
      expect(result.rules).toEqual(['rule1', 'rule2']);
      expect(result.tags).toEqual(['tag1', 'tag2']);
      expect(result.createdAfter).toBe('2023-01-01');
      expect(result.createdBefore).toBe('2023-12-31');
      expect(result.createdAt).toBe('2023-06-15');
      expect(result.createdInLast).toBe('30d');
      expect(result.assignees).toEqual(['user1', 'user2']);
      expect(result.authors).toEqual(['author1', 'author2']);
      expect(result.cwe).toEqual(['cwe1', 'cwe2']);
      expect(result.languages).toEqual(['java', 'js']);
      expect(result.owaspTop10).toEqual(['a1', 'a2']);
      expect(result.sansTop25).toEqual(['sans1', 'sans2']);
      expect(result.sonarsourceSecurity).toEqual(['ss1', 'ss2']);
      expect(result.onComponentOnly).toBe('true');
      expect(result.facets).toEqual(['facet1', 'facet2']);
      expect(result.sinceLeakPeriod).toBe('true');
      expect(result.inNewCodePeriod).toBe('true');
    });
    it('should handle null and undefined values correctly', () => {
      const result = mapToSonarQubeParams({
        project_key: 'my-project',
        severity: null,
        statuses: null,
        resolved: null,
      });
      expect(result.projectKey).toBe('my-project');
      expect(result.severity).toBeUndefined();
      expect(result.statuses).toBeUndefined();
      expect(result.resolved).toBeUndefined();
    });
    it('should handle minimal parameters', () => {
      const result = mapToSonarQubeParams({
        project_key: 'my-project',
      });
      expect(result.projectKey).toBe('my-project');
      expect(result.severity).toBeUndefined();
      expect(result.page).toBeUndefined();
      expect(result.pageSize).toBeUndefined();
    });
    it('should handle empty parameters', () => {
      const result = mapToSonarQubeParams({
        project_key: 'my-project',
        statuses: [],
        resolutions: [],
        types: [],
        rules: [],
      });
      expect(result.projectKey).toBe('my-project');
      expect(result.statuses).toEqual([]);
      expect(result.resolutions).toEqual([]);
      expect(result.types).toEqual([]);
      expect(result.rules).toEqual([]);
    });
  });
  describe('Array parameter handling', () => {
    it('should handle array handling for issues parameters', () => {
      // Test with arrays
      const result1 = mapToSonarQubeParams({
        project_key: 'project1',
        statuses: ['OPEN', 'CONFIRMED'],
        types: ['BUG', 'VULNERABILITY'],
      });
      expect(result1.statuses).toEqual(['OPEN', 'CONFIRMED']);
      expect(result1.types).toEqual(['BUG', 'VULNERABILITY']);
      // Test with null
      const result2 = mapToSonarQubeParams({
        project_key: 'project1',
        statuses: null,
        types: null,
      });
      expect(result2.statuses).toBeUndefined();
      expect(result2.types).toBeUndefined();
    });
  });
  describe('Schema Transformations', () => {
    describe('Page Parameter Transformation', () => {
      it('should transform string values to numbers or null', () => {
        const pageSchema = z
          .string()
          .optional()
          .transform((val: any) => (val ? parseInt(val, 10) || null : null));
        // Test valid numeric strings
        expect(pageSchema.parse('1')).toBe(1);
        expect(pageSchema.parse('100')).toBe(100);
        // Test invalid values
        expect(pageSchema.parse('invalid')).toBe(null);
        expect(pageSchema.parse('')).toBe(null);
        expect(pageSchema.parse(undefined)).toBe(null);
      });
    });
    describe('Boolean Parameter Transformation', () => {
      it('should transform string "true"/"false" to boolean values', () => {
        const booleanSchema = z
          .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
          .nullable()
          .optional();
        // String values
        expect(booleanSchema.parse('true')).toBe(true);
        expect(booleanSchema.parse('false')).toBe(false);
        // Boolean values should pass through
        expect(booleanSchema.parse(true)).toBe(true);
        expect(booleanSchema.parse(false)).toBe(false);
        // Null/undefined values
        expect(booleanSchema.parse(null)).toBe(null);
        expect(booleanSchema.parse(undefined)).toBe(undefined);
      });
    });
    describe('Enum Arrays Parameter Transformation', () => {
      it('should validate enum arrays correctly', () => {
        const statusSchema = z
          .array(
            z.enum([
              'OPEN',
              'CONFIRMED',
              'REOPENED',
              'RESOLVED',
              'CLOSED',
              'TO_REVIEW',
              'IN_REVIEW',
              'REVIEWED',
            ])
          )
          .nullable()
          .optional();
        // Valid values
        expect(statusSchema.parse(['OPEN', 'CONFIRMED'])).toEqual(['OPEN', 'CONFIRMED']);
        // Null/undefined values
        expect(statusSchema.parse(null)).toBe(null);
        expect(statusSchema.parse(undefined)).toBe(undefined);
        // Invalid values should throw
        expect(() => statusSchema.parse(['INVALID'])).toThrow();
      });
      it('should validate resolution enums', () => {
        const resolutionSchema = z
          .array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
          .nullable()
          .optional();
        // Valid values
        expect(resolutionSchema.parse(['FALSE-POSITIVE', 'WONTFIX'])).toEqual([
          'FALSE-POSITIVE',
          'WONTFIX',
        ]);
        // Null/undefined values
        expect(resolutionSchema.parse(null)).toBe(null);
        expect(resolutionSchema.parse(undefined)).toBe(undefined);
        // Invalid values should throw
        expect(() => resolutionSchema.parse(['INVALID'])).toThrow();
      });
      it('should validate issue type enums', () => {
        const typeSchema = z
          .array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
          .nullable()
          .optional();
        // Valid values
        expect(typeSchema.parse(['CODE_SMELL', 'BUG'])).toEqual(['CODE_SMELL', 'BUG']);
        // Null/undefined values
        expect(typeSchema.parse(null)).toBe(null);
        expect(typeSchema.parse(undefined)).toBe(undefined);
        // Invalid values should throw
        expect(() => typeSchema.parse(['INVALID'])).toThrow();
      });
    });
  });
});

```

--------------------------------------------------------------------------------
/src/__tests__/assign-issue.test.ts:
--------------------------------------------------------------------------------

```typescript
import { vi, describe, it, expect, beforeEach } from 'vitest';
import { IssuesDomain } from '../domains/issues.js';
import { handleAssignIssue } from '../handlers/issues.js';
import type { SonarQubeIssue } from '../types/issues.js';

// Extended issue type for testing with assignee fields
type SonarQubeIssueWithAssignee = SonarQubeIssue & {
  assignee?: string | null;
  assigneeName?: string | null;
  resolution?: string | null;
};

describe('Assign Issue Functionality', () => {
  const organization = 'test-org';

  beforeEach(() => {
    vi.clearAllMocks();
  });

  describe('IssuesDomain.assignIssue', () => {
    it('should assign an issue and return updated details', async () => {
      const issueKey = 'ISSUE-123';
      const assignee = 'jane.doe';

      const mockSearchBuilder = {
        withIssues: vi.fn().mockReturnThis(),
        withAdditionalFields: vi.fn().mockReturnThis(),
        execute: vi.fn<() => Promise<any>>().mockResolvedValue({
          issues: [
            {
              key: issueKey,
              rule: 'test-rule',
              component: 'test-component',
              project: 'test-project',
              message: 'Test issue',
              assignee: assignee,
              assigneeName: 'Jane Doe',
              severity: 'CRITICAL',
              type: 'VULNERABILITY',
              status: 'OPEN',
              tags: [],
              creationDate: '2023-01-01T00:00:00.000Z',
              updateDate: '2023-01-01T00:00:00.000Z',
            } as unknown as SonarQubeIssueWithAssignee,
          ],
          total: 1,
        }),
      };

      const mockWebApiClient = {
        issues: {
          assign: vi.fn<(params: any) => Promise<any>>().mockResolvedValue({}),
          search: vi.fn().mockReturnValue(mockSearchBuilder),
        },
      };

      const issuesDomain = new IssuesDomain(mockWebApiClient as any, organization);
      const result = await issuesDomain.assignIssue({
        issueKey,
        assignee,
      });

      expect(mockWebApiClient.issues.assign).toHaveBeenCalledWith({
        issue: issueKey,
        assignee: assignee,
      });

      expect(mockWebApiClient.issues.search).toHaveBeenCalled();
      expect(mockSearchBuilder.withIssues).toHaveBeenCalledWith([issueKey]);
      expect(mockSearchBuilder.withAdditionalFields).toHaveBeenCalledWith(['_all']);
      expect(mockSearchBuilder.execute).toHaveBeenCalled();

      expect(result.key).toBe(issueKey);
      expect((result as SonarQubeIssueWithAssignee).assignee).toBe(assignee);
    });

    it('should handle unassignment', async () => {
      const issueKey = 'ISSUE-456';

      const mockSearchBuilder = {
        withIssues: vi.fn().mockReturnThis(),
        withAdditionalFields: vi.fn().mockReturnThis(),
        execute: vi.fn<() => Promise<any>>().mockResolvedValue({
          issues: [
            {
              key: issueKey,
              rule: 'test-rule',
              component: 'test-component',
              project: 'test-project',
              message: 'Test issue',
              assignee: null,
              assigneeName: null,
              severity: 'INFO',
              type: 'CODE_SMELL',
              status: 'OPEN',
              tags: [],
              creationDate: '2023-01-01T00:00:00.000Z',
              updateDate: '2023-01-01T00:00:00.000Z',
            } as unknown as SonarQubeIssueWithAssignee,
          ],
          total: 1,
        }),
      };

      const mockWebApiClient = {
        issues: {
          assign: vi.fn<(params: any) => Promise<any>>().mockResolvedValue({}),
          search: vi.fn().mockReturnValue(mockSearchBuilder),
        },
      };

      const issuesDomain = new IssuesDomain(mockWebApiClient as any, organization);
      const result = await issuesDomain.assignIssue({
        issueKey,
      });

      expect(mockWebApiClient.issues.assign).toHaveBeenCalledWith({
        issue: issueKey,
        assignee: undefined,
      });

      expect((result as SonarQubeIssueWithAssignee).assignee).toBeNull();
    });

    it('should throw error if issue not found after assignment', async () => {
      const issueKey = 'ISSUE-999';

      const mockSearchBuilder = {
        withIssues: vi.fn().mockReturnThis(),
        withAdditionalFields: vi.fn().mockReturnThis(),
        execute: vi.fn<() => Promise<any>>().mockResolvedValue({
          issues: [],
          total: 0,
        }),
      };

      const mockWebApiClient = {
        issues: {
          assign: vi.fn<(params: any) => Promise<any>>().mockResolvedValue({}),
          search: vi.fn().mockReturnValue(mockSearchBuilder),
        },
      };

      const issuesDomain = new IssuesDomain(mockWebApiClient as any, organization);

      await expect(
        issuesDomain.assignIssue({
          issueKey,
        })
      ).rejects.toThrow(`Issue ${issueKey} not found after assignment`);
    });
  });

  describe('handleAssignIssue', () => {
    it('should handle issue assignment and return formatted response', async () => {
      const mockClient = {
        assignIssue: vi.fn<(params: any) => Promise<any>>().mockResolvedValue({
          key: 'ISSUE-123',
          rule: 'test-rule',
          component: 'src/main.js',
          project: 'test-project',
          message: 'Test issue message',
          assignee: 'john.doe',
          assigneeName: 'John Doe',
          severity: 'MAJOR',
          type: 'BUG',
          status: 'OPEN',
          resolution: null,
          tags: [],
          creationDate: '2023-01-01T00:00:00.000Z',
          updateDate: '2023-01-01T00:00:00.000Z',
        } as unknown as SonarQubeIssueWithAssignee),
      };

      const result = await handleAssignIssue(
        {
          issueKey: 'ISSUE-123',
          assignee: 'john.doe',
        },

        mockClient as any
      );

      expect(mockClient.assignIssue).toHaveBeenCalledWith({
        issueKey: 'ISSUE-123',
        assignee: 'john.doe',
      });

      expect(result.content).toHaveLength(1);
      expect(result.content[0]?.type).toBe('text');

      const contentText = result.content[0]?.text;
      expect(contentText).toBeDefined();
      const parsedContent = JSON.parse(contentText as string) as {
        message: string;
        issue: {
          key: string;
          assignee: string | null;
          severity: string;
        };
      };
      expect(parsedContent.message).toContain('Assigned to: John Doe');
      expect(parsedContent.issue.key).toBe('ISSUE-123');
      expect(parsedContent.issue.assignee).toBe('john.doe');
      expect(parsedContent.issue.severity).toBe('MAJOR');
    });

    it('should handle issue unassignment', async () => {
      const mockClient = {
        assignIssue: vi.fn<(params: any) => Promise<any>>().mockResolvedValue({
          key: 'ISSUE-456',
          rule: 'test-rule',
          component: 'src/utils.js',
          project: 'test-project',
          message: 'Another test issue',
          assignee: null,
          assigneeName: null,
          severity: 'MINOR',
          type: 'CODE_SMELL',
          status: 'CONFIRMED',
          resolution: null,
          tags: [],
          creationDate: '2023-01-01T00:00:00.000Z',
          updateDate: '2023-01-01T00:00:00.000Z',
        } as unknown as SonarQubeIssueWithAssignee),
      };

      const result = await handleAssignIssue(
        {
          issueKey: 'ISSUE-456',
        },

        mockClient as any
      );

      expect(mockClient.assignIssue).toHaveBeenCalledWith({
        issueKey: 'ISSUE-456',
        assignee: undefined,
      });

      expect(result.content).toHaveLength(1);
      expect(result.content[0]?.type).toBe('text');

      const contentText = result.content[0]?.text;
      expect(contentText).toBeDefined();
      const parsedContent = JSON.parse(contentText as string) as {
        message: string;
        issue: {
          key: string;
          assignee: string | null;
          severity: string;
        };
      };
      expect(parsedContent.message).toContain('Issue unassigned');
      expect(parsedContent.issue.key).toBe('ISSUE-456');
      expect(parsedContent.issue.assignee).toBeNull();
      expect(parsedContent.issue.severity).toBe('MINOR');
    });

    it('should handle errors gracefully', async () => {
      const mockClient = {
        assignIssue: vi
          .fn<(params: any) => Promise<any>>()
          .mockRejectedValue(new Error('API Error')),
      };

      await expect(
        handleAssignIssue(
          {
            issueKey: 'ISSUE-789',
            assignee: 'invalid.user',
          },

          mockClient as any
        )
      ).rejects.toThrow('API Error');

      expect(mockClient.assignIssue).toHaveBeenCalledWith({
        issueKey: 'ISSUE-789',
        assignee: 'invalid.user',
      });
    });
  });
});

```

--------------------------------------------------------------------------------
/scripts/load-test.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/bash
# Load testing script for SonarQube MCP Server auto-scaling validation
# Tests HPA behavior under load using k6 or Apache Bench

set -e  # Exit on error

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

echo -e "${GREEN}⚡ SonarQube MCP Server - Load Testing & Auto-scaling Validation${NC}"
echo "================================================================="

# Configuration
NAMESPACE="${NAMESPACE:-sonarqube-mcp}"
SERVICE_NAME="${SERVICE_NAME:-sonarqube-mcp}"
PORT="${PORT:-3000}"
DURATION="${DURATION:-300}"  # 5 minutes default
CONCURRENT_USERS="${CONCURRENT_USERS:-50}"
REQUESTS_PER_SECOND="${RPS:-100}"

# Function to check if a command exists
command_exists() {
    command -v "$1" >/dev/null 2>&1
}

# Check prerequisites
echo -e "\n${YELLOW}📋 Checking prerequisites...${NC}"

# Check for kubectl
if ! command_exists kubectl; then
    echo -e "${RED}❌ kubectl is not installed. Please install it first.${NC}"
    exit 1
fi

# Check for load testing tools
LOAD_TOOL=""
if command_exists k6; then
    LOAD_TOOL="k6"
    echo -e "✅ k6 is installed"
elif command_exists ab; then
    LOAD_TOOL="ab"
    echo -e "✅ Apache Bench is installed"
else
    echo -e "${RED}❌ No load testing tool found. Please install k6 or Apache Bench.${NC}"
    echo "   Install k6: brew install k6 (macOS) or https://k6.io/docs/getting-started/installation/"
    echo "   Install ab: Usually comes with Apache (httpd-tools package)"
    exit 1
fi

# Function to get current replica count
get_replica_count() {
    kubectl get deployment "$SERVICE_NAME" -n "$NAMESPACE" -o jsonpath='{.status.replicas}' 2>/dev/null || echo "0"
}

# Function to get HPA status
get_hpa_status() {
    kubectl get hpa -n "$NAMESPACE" -o wide 2>/dev/null || echo "No HPA found"
}

# Function to monitor resources
monitor_resources() {
    echo -e "\n${BLUE}📊 Monitoring resources during load test...${NC}"
    echo "Time | Replicas | CPU Usage | Memory Usage | Ready Pods"
    echo "--------------------------------------------------------"
    
    while true; do
        timestamp=$(date +"%H:%M:%S")
        replicas=$(get_replica_count)
        
        # Get CPU and memory from HPA
        hpa_info=$(kubectl get hpa "$SERVICE_NAME" -n "$NAMESPACE" -o jsonpath='{.status.currentCPUUtilizationPercentage}:{.status.currentReplicas}:{.status.desiredReplicas}' 2>/dev/null || echo "0:0:0")
        cpu_usage=$(echo "$hpa_info" | cut -d: -f1)
        current_replicas=$(echo "$hpa_info" | cut -d: -f2)
        desired_replicas=$(echo "$hpa_info" | cut -d: -f3)
        
        # Get ready pods
        ready_pods=$(kubectl get pods -n "$NAMESPACE" -l "app.kubernetes.io/name=$SERVICE_NAME" -o jsonpath='{.items[?(@.status.conditions[?(@.type=="Ready")].status=="True")].metadata.name}' | wc -w | tr -d ' ')
        
        echo "$timestamp | $current_replicas/$desired_replicas | ${cpu_usage:-N/A}% | N/A | $ready_pods"
        
        sleep 5
    done
}

# Create k6 test script
create_k6_script() {
    cat > /tmp/sonarqube-mcp-load-test.js << 'EOF'
import http from 'k6/http';
import { check, sleep } from 'k6';
import { Rate } from 'k6/metrics';

const errorRate = new Rate('errors');

export const options = {
  stages: [
    { duration: '30s', target: __ENV.CONCURRENT_USERS / 2 }, // Ramp up to half users
    { duration: '30s', target: __ENV.CONCURRENT_USERS },     // Ramp up to full users
    { duration: __ENV.DURATION - 90 + 's', target: __ENV.CONCURRENT_USERS }, // Stay at full load
    { duration: '30s', target: 0 },                          // Ramp down
  ],
  thresholds: {
    http_req_duration: ['p(95)<500'], // 95% of requests should be below 500ms
    errors: ['rate<0.1'],             // Error rate should be below 10%
  },
};

const BASE_URL = `http://${__ENV.SERVICE_URL}`;

export default function () {
  // Test different endpoints
  const endpoints = [
    '/health',
    '/ready',
    '/metrics',
  ];
  
  const endpoint = endpoints[Math.floor(Math.random() * endpoints.length)];
  
  const res = http.get(`${BASE_URL}${endpoint}`);
  
  // Check response
  const success = check(res, {
    'status is 200 or 503': (r) => r.status === 200 || r.status === 503,
    'response time < 500ms': (r) => r.timings.duration < 500,
  });
  
  errorRate.add(!success);
  
  // Random sleep between 0.5 and 2 seconds
  sleep(Math.random() * 1.5 + 0.5);
}
EOF
}

# Function to run k6 load test
run_k6_test() {
    local service_url=$1
    
    echo -e "\n${BLUE}🚀 Running k6 load test...${NC}"
    echo "Target: http://$service_url"
    echo "Duration: $DURATION seconds"
    echo "Concurrent users: $CONCURRENT_USERS"
    
    create_k6_script
    
    k6 run \
        -e SERVICE_URL="$service_url" \
        -e CONCURRENT_USERS="$CONCURRENT_USERS" \
        -e DURATION="$DURATION" \
        /tmp/sonarqube-mcp-load-test.js
}

# Function to run Apache Bench test
run_ab_test() {
    local service_url=$1
    
    echo -e "\n${BLUE}🚀 Running Apache Bench load test...${NC}"
    echo "Target: http://$service_url/health"
    echo "Duration: $DURATION seconds"
    echo "Concurrent users: $CONCURRENT_USERS"
    
    # Calculate total requests
    total_requests=$((REQUESTS_PER_SECOND * DURATION))
    
    ab -n "$total_requests" \
       -c "$CONCURRENT_USERS" \
       -t "$DURATION" \
       -s 30 \
       "http://$service_url/health"
}

# Main execution
echo -e "\n${YELLOW}🔍 Checking deployment status...${NC}"

# Check if deployment exists
if ! kubectl get deployment "$SERVICE_NAME" -n "$NAMESPACE" >/dev/null 2>&1; then
    echo -e "${RED}❌ Deployment $SERVICE_NAME not found in namespace $NAMESPACE${NC}"
    exit 1
fi

# Check if HPA exists
if ! kubectl get hpa "$SERVICE_NAME" -n "$NAMESPACE" >/dev/null 2>&1; then
    echo -e "${RED}❌ HPA not found for $SERVICE_NAME in namespace $NAMESPACE${NC}"
    echo "Load testing without auto-scaling..."
fi

# Get initial state
echo -e "\n${BLUE}📊 Initial state:${NC}"
echo "Deployment: $SERVICE_NAME"
echo "Namespace: $NAMESPACE"
echo "Initial replicas: $(get_replica_count)"
echo -e "\nHPA Status:"
get_hpa_status

# Set up port forwarding
echo -e "\n${YELLOW}🔌 Setting up port forwarding...${NC}"
kubectl port-forward -n "$NAMESPACE" "svc/$SERVICE_NAME" 8080:$PORT > /dev/null 2>&1 &
PF_PID=$!
sleep 3

# Verify service is accessible
if ! curl -s -o /dev/null -w "%{http_code}" http://localhost:8080/health | grep -q "200\|503"; then
    echo -e "${RED}❌ Service is not responding correctly${NC}"
    kill $PF_PID 2>/dev/null
    exit 1
fi

echo -e "${GREEN}✅ Service is accessible${NC}"

# Start resource monitoring in background
monitor_resources &
MONITOR_PID=$!

# Run load test based on available tool
SERVICE_URL="localhost:8080"

echo -e "\n${YELLOW}⚡ Starting load test...${NC}"
START_TIME=$(date +%s)

case "$LOAD_TOOL" in
    k6)
        run_k6_test "$SERVICE_URL"
        ;;
    ab)
        run_ab_test "$SERVICE_URL"
        ;;
esac

END_TIME=$(date +%s)
DURATION_ACTUAL=$((END_TIME - START_TIME))

# Stop monitoring
kill $MONITOR_PID 2>/dev/null

# Wait for scale down
echo -e "\n${YELLOW}⏳ Waiting 60 seconds for scale down...${NC}"
sleep 60

# Get final state
echo -e "\n${BLUE}📊 Final state:${NC}"
echo "Final replicas: $(get_replica_count)"
echo -e "\nHPA Status:"
get_hpa_status

# Show pod events during test
echo -e "\n${BLUE}📝 Pod events during test:${NC}"
kubectl get events -n "$NAMESPACE" --field-selector involvedObject.kind=Pod \
    --sort-by='.lastTimestamp' | grep -E "(Scaled|Started|Killing)" | tail -10

# Analyze HPA metrics
echo -e "\n${BLUE}📈 HPA scaling analysis:${NC}"

# Get HPA events
kubectl describe hpa "$SERVICE_NAME" -n "$NAMESPACE" | grep -A 20 "Events:" || echo "No HPA events found"

# Cleanup
echo -e "\n${YELLOW}🧹 Cleaning up...${NC}"
kill $PF_PID 2>/dev/null
rm -f /tmp/sonarqube-mcp-load-test.js

# Summary
echo -e "\n================================================================="
echo -e "${GREEN}📊 Load Test Summary:${NC}"
echo "Duration: $DURATION_ACTUAL seconds"
echo "Load tool: $LOAD_TOOL"
echo "Concurrent users: $CONCURRENT_USERS"

# Check if scaling occurred
INITIAL_REPLICAS=1  # Assuming default
FINAL_REPLICAS=$(get_replica_count)

if [ "$FINAL_REPLICAS" -gt "$INITIAL_REPLICAS" ]; then
    echo -e "\n${GREEN}✅ Auto-scaling worked!${NC}"
    echo "Scaled from $INITIAL_REPLICAS to $FINAL_REPLICAS replicas"
else
    echo -e "\n${YELLOW}⚠️  No scaling observed${NC}"
    echo "This could mean:"
    echo "  - Load was not high enough to trigger scaling"
    echo "  - HPA thresholds are too high"
    echo "  - HPA is not configured correctly"
fi

echo -e "\n${YELLOW}💡 Tips:${NC}"
echo "- Increase CONCURRENT_USERS to generate more load"
echo "- Extend DURATION for longer tests"
echo "- Monitor 'kubectl top pods -n $NAMESPACE' during testing"
echo "- Check HPA configuration: kubectl describe hpa -n $NAMESPACE"
```

--------------------------------------------------------------------------------
/src/utils/__tests__/elicitation.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, vi, beforeEach } from 'vitest';
import type { Mocked } from 'vitest';
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { ElicitationManager, createElicitationManager } from '../elicitation.js';

describe('ElicitationManager', () => {
  let manager: ElicitationManager;
  let mockServer: Mocked<Server>;

  beforeEach(() => {
    // Reset environment variables
    delete process.env.SONARQUBE_MCP_ELICITATION;
    delete process.env.SONARQUBE_MCP_BULK_THRESHOLD;
    delete process.env.SONARQUBE_MCP_REQUIRE_COMMENTS;
    delete process.env.SONARQUBE_MCP_INTERACTIVE_SEARCH;

    manager = new ElicitationManager();
    mockServer = {
      elicitInput: vi.fn(),
    } as unknown as Mocked<Server>;
  });

  describe('initialization', () => {
    it('should default to disabled', () => {
      expect(manager.isEnabled()).toBe(false);
    });

    it('should not be enabled without a server', () => {
      manager.updateOptions({ enabled: true });
      expect(manager.isEnabled()).toBe(false);
    });

    it('should be enabled with server and enabled option', () => {
      manager.updateOptions({ enabled: true });
      manager.setServer(mockServer);
      expect(manager.isEnabled()).toBe(true);
    });

    it('should respect environment variables', () => {
      process.env.SONARQUBE_MCP_ELICITATION = 'true';
      process.env.SONARQUBE_MCP_BULK_THRESHOLD = '10';
      process.env.SONARQUBE_MCP_REQUIRE_COMMENTS = 'true';
      process.env.SONARQUBE_MCP_INTERACTIVE_SEARCH = 'true';

      const envManager = createElicitationManager();
      expect(envManager.getOptions()).toEqual({
        enabled: true,
        bulkOperationThreshold: 10,
        requireComments: true,
        interactiveSearch: true,
      });
    });
  });

  describe('confirmBulkOperation', () => {
    beforeEach(() => {
      manager.updateOptions({ enabled: true, bulkOperationThreshold: 5 });
      manager.setServer(mockServer);
    });

    it('should auto-accept when disabled', async () => {
      manager.updateOptions({ enabled: false });
      const result = await manager.confirmBulkOperation('delete', 10);
      expect(result).toEqual({ action: 'accept', content: { confirm: true } });
      expect(mockServer.elicitInput).not.toHaveBeenCalled();
    });

    it('should auto-accept when below threshold', async () => {
      const result = await manager.confirmBulkOperation('delete', 3);
      expect(result).toEqual({ action: 'accept', content: { confirm: true } });
      expect(mockServer.elicitInput).not.toHaveBeenCalled();
    });

    it('should request confirmation when above threshold', async () => {
      mockServer.elicitInput.mockResolvedValue({
        action: 'accept',
        content: { confirm: true, comment: 'Test comment' },
      });

      const result = await manager.confirmBulkOperation('delete', 10, ['item1', 'item2']);

      expect(mockServer.elicitInput).toHaveBeenCalledWith({
        message: expect.stringContaining('delete 10 items'),
        requestedSchema: expect.objectContaining({
          properties: expect.objectContaining({
            confirm: expect.any(Object),
            comment: expect.any(Object),
          }),
        }),
      });

      expect(result).toEqual({
        action: 'accept',
        content: { confirm: true, comment: 'Test comment' },
      });
    });

    it('should handle user rejection', async () => {
      mockServer.elicitInput.mockResolvedValue({
        action: 'accept',
        content: { confirm: false },
      });

      const result = await manager.confirmBulkOperation('delete', 10);
      expect(result).toEqual({ action: 'reject' });
    });

    it('should handle cancellation', async () => {
      mockServer.elicitInput.mockResolvedValue({ action: 'cancel' });

      const result = await manager.confirmBulkOperation('delete', 10);
      expect(result).toEqual({ action: 'cancel' });
    });

    it('should handle errors gracefully', async () => {
      mockServer.elicitInput.mockRejectedValue(new Error('Test error'));

      const result = await manager.confirmBulkOperation('delete', 10);
      expect(result).toEqual({ action: 'cancel' });
    });
  });

  describe('collectAuthentication', () => {
    beforeEach(() => {
      manager.updateOptions({ enabled: true });
      manager.setServer(mockServer);
    });

    it('should cancel when disabled', async () => {
      manager.updateOptions({ enabled: false });
      const result = await manager.collectAuthentication();
      expect(result).toEqual({ action: 'cancel' });
    });

    it('should collect token authentication', async () => {
      mockServer.elicitInput.mockResolvedValue({
        action: 'accept',
        content: { method: 'token', token: 'test-token' },
      });

      const result = await manager.collectAuthentication();

      expect(mockServer.elicitInput).toHaveBeenCalledWith({
        message: expect.stringContaining('authentication is not configured'),
        requestedSchema: expect.any(Object),
      });

      expect(result).toEqual({
        action: 'accept',
        content: { method: 'token', token: 'test-token' },
      });
    });

    it('should validate authentication schema', async () => {
      mockServer.elicitInput.mockResolvedValue({
        action: 'accept',
        content: { method: 'basic', username: 'user', password: 'pass' },
      });

      const result = await manager.collectAuthentication();
      expect(result.action).toBe('accept');
      expect(result.content).toEqual({
        method: 'basic',
        username: 'user',
        password: 'pass',
      });
    });
  });

  describe('collectResolutionComment', () => {
    beforeEach(() => {
      manager.updateOptions({ enabled: true, requireComments: true });
      manager.setServer(mockServer);
    });

    it('should auto-accept when comments not required', async () => {
      manager.updateOptions({ requireComments: false });
      const result = await manager.collectResolutionComment('ISSUE-123', 'false positive');
      expect(result).toEqual({ action: 'accept', content: { comment: '' } });
      expect(mockServer.elicitInput).not.toHaveBeenCalled();
    });

    it('should request comment when required', async () => {
      mockServer.elicitInput.mockResolvedValue({
        action: 'accept',
        content: { comment: 'This is a test pattern' },
      });

      const result = await manager.collectResolutionComment('ISSUE-123', 'false positive');

      expect(mockServer.elicitInput).toHaveBeenCalledWith({
        message: expect.stringContaining('ISSUE-123'),
        requestedSchema: expect.objectContaining({
          properties: expect.objectContaining({
            comment: expect.objectContaining({
              minLength: 1,
              maxLength: 500,
              type: 'string',
            }),
          }),
        }),
      });

      expect(result).toEqual({
        action: 'accept',
        content: { comment: 'This is a test pattern' },
      });
    });
  });

  describe('disambiguateSelection', () => {
    beforeEach(() => {
      manager.updateOptions({ enabled: true, interactiveSearch: true });
      manager.setServer(mockServer);
    });

    it('should auto-select single item', async () => {
      const items = [{ name: 'Project A', key: 'proj-a' }];
      const result = await manager.disambiguateSelection(items, 'project');
      expect(result).toEqual({ action: 'accept', content: { selection: 'proj-a' } });
    });

    it('should request selection for multiple items', async () => {
      const items = [
        { name: 'Project A', key: 'proj-a' },
        { name: 'Project B', key: 'proj-b' },
        { name: 'Project C', key: 'proj-c' },
      ];

      mockServer.elicitInput.mockResolvedValue({
        action: 'accept',
        content: { selection: 'proj-b' },
      });

      const result = await manager.disambiguateSelection(items, 'project');

      expect(mockServer.elicitInput).toHaveBeenCalledWith({
        message: expect.stringContaining('Multiple projects found'),
        requestedSchema: expect.objectContaining({
          properties: expect.objectContaining({
            selection: expect.objectContaining({
              enum: ['proj-a', 'proj-b', 'proj-c'],
            }),
          }),
        }),
      });

      expect(result).toEqual({
        action: 'accept',
        content: { selection: 'proj-b' },
      });
    });

    it('should not request when interactive search disabled', async () => {
      manager.updateOptions({ interactiveSearch: false });

      const items = [
        { name: 'Project A', key: 'proj-a' },
        { name: 'Project B', key: 'proj-b' },
      ];

      const result = await manager.disambiguateSelection(items, 'project');
      expect(result).toEqual({ action: 'accept', content: { selection: 'proj-a' } });
      expect(mockServer.elicitInput).not.toHaveBeenCalled();
    });
  });
});

```

--------------------------------------------------------------------------------
/src/utils/elicitation.ts:
--------------------------------------------------------------------------------

```typescript
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { z } from 'zod';
import { zodToJsonSchema } from 'zod-to-json-schema';
import { createLogger } from './logger.js';

export interface ElicitationOptions {
  enabled: boolean;
  bulkOperationThreshold: number;
  requireComments: boolean;
  interactiveSearch: boolean;
}

export interface ElicitationResult<T = unknown> {
  action: 'accept' | 'reject' | 'cancel' | 'decline';
  content?: T;
}

export const confirmationSchema = z.object({
  confirm: z.boolean().describe('Confirm the operation'),
  comment: z.string().max(500).optional().describe('Optional comment'),
});

export const authSchema = z
  .object({
    method: z.enum(['token', 'basic', 'passcode']).describe('Authentication method'),
    token: z.string().optional().describe('SonarQube token (for token auth)'),
    username: z.string().optional().describe('Username (for basic auth)'),
    password: z.string().optional().describe('Password (for basic auth)'),
    passcode: z.string().optional().describe('System passcode'),
  })
  .refine(
    (data) => {
      if (data.method === 'token' && !data.token) return false;
      if (data.method === 'basic' && (!data.username || !data.password)) return false;
      if (data.method === 'passcode' && !data.passcode) return false;
      return true;
    },
    {
      message: 'Required fields missing for selected authentication method',
    }
  );

export class ElicitationManager {
  private server: Server | null = null;
  private options: ElicitationOptions;
  private readonly logger = createLogger('ElicitationManager');

  constructor(options: Partial<ElicitationOptions> = {}) {
    this.options = {
      enabled: false,
      bulkOperationThreshold: 5,
      requireComments: false,
      interactiveSearch: false,
      ...options,
    };
  }

  setServer(server: Server): void {
    this.server = server;
  }

  isEnabled(): boolean {
    return this.options.enabled && this.server !== null;
  }

  getOptions(): ElicitationOptions {
    return { ...this.options };
  }

  updateOptions(updates: Partial<ElicitationOptions>): void {
    this.options = { ...this.options, ...updates };
  }

  async confirmBulkOperation(
    operation: string,
    itemCount: number,
    items?: string[]
  ): Promise<ElicitationResult<z.infer<typeof confirmationSchema>>> {
    if (!this.isEnabled() || itemCount < this.options.bulkOperationThreshold) {
      return { action: 'accept', content: { confirm: true } };
    }

    if (!this.server) {
      throw new Error('ElicitationManager not initialized with server');
    }

    const itemsPreview = items?.slice(0, 5).join(', ');
    const hasMore = items && items.length > 5;

    try {
      let itemsDisplay = '';
      if (itemsPreview) {
        itemsDisplay = `: ${itemsPreview}`;
        if (hasMore) {
          itemsDisplay += ', ...';
        }
      }

      const result = await this.server.elicitInput({
        message: `You are about to ${operation} ${itemCount} items${itemsDisplay}. This action cannot be undone.`,
        requestedSchema: {
          ...zodToJsonSchema(confirmationSchema),
          type: 'object' as const,
          // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-member-access
          properties: (zodToJsonSchema(confirmationSchema) as any).properties ?? {},
        },
      });

      if (result.action === 'accept' && result.content) {
        const parsed = confirmationSchema.parse(result.content);
        if (!parsed.confirm) {
          return { action: 'reject' };
        }
        return { action: 'accept', content: parsed };
      }

      return {
        action: result.action,
        content: result.content as z.infer<typeof confirmationSchema>,
      };
    } catch (error) {
      this.logger.error('Elicitation error:', error);
      return { action: 'cancel' };
    }
  }

  async collectAuthentication(): Promise<ElicitationResult<z.infer<typeof authSchema>>> {
    if (!this.isEnabled()) {
      return { action: 'cancel' };
    }

    if (!this.server) {
      throw new Error('ElicitationManager not initialized with server');
    }

    try {
      const result = await this.server.elicitInput({
        message: `SonarQube authentication is not configured. Please provide authentication details:

Available methods:
1. Token authentication (recommended) - Generate a token in SonarQube under User > My Account > Security
2. Basic authentication - Username and password
3. System passcode - For SonarQube instances with system authentication

Which method would you like to use?`,
        requestedSchema: {
          ...zodToJsonSchema(authSchema),
          type: 'object' as const,
          // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-member-access
          properties: (zodToJsonSchema(authSchema) as any).properties ?? {},
        },
      });

      if (result.action === 'accept' && result.content) {
        const parsed = authSchema.parse(result.content);
        return { action: 'accept', content: parsed };
      }

      return {
        action: result.action,
        content: result.content as z.infer<typeof authSchema>,
      };
    } catch (error) {
      this.logger.error('Elicitation error:', error);
      return { action: 'cancel' };
    }
  }

  async collectResolutionComment(
    issueKey: string,
    resolution: string
  ): Promise<ElicitationResult<{ comment: string }>> {
    if (!this.isEnabled() || !this.options.requireComments) {
      return { action: 'accept', content: { comment: '' } };
    }

    if (!this.server) {
      throw new Error('ElicitationManager not initialized with server');
    }

    const commentSchema = z.object({
      comment: z.string().min(1).max(500).describe(`Explanation for marking as ${resolution}`),
    });

    try {
      const result = await this.server.elicitInput({
        message: `Please provide a comment explaining why issue ${issueKey} is being marked as ${resolution}:`,
        requestedSchema: {
          ...zodToJsonSchema(commentSchema),
          type: 'object' as const,
          // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-member-access
          properties: (zodToJsonSchema(commentSchema) as any).properties ?? {},
        },
      });

      if (result.action === 'accept' && result.content) {
        const parsed = commentSchema.parse(result.content);
        return { action: 'accept', content: { comment: parsed.comment } };
      }

      return { action: result.action };
    } catch (error) {
      this.logger.error('Elicitation error:', error);
      return { action: 'cancel' };
    }
  }

  async disambiguateSelection<T extends { name: string; key: string }>(
    items: T[],
    itemType: string
  ): Promise<ElicitationResult<{ selection: string }>> {
    if (!this.isEnabled() || !this.options.interactiveSearch || items.length <= 1) {
      return {
        action: 'accept',
        content: { selection: items[0]?.key || '' },
      };
    }

    if (!this.server) {
      throw new Error('ElicitationManager not initialized with server');
    }

    const selectionSchema = z.object({
      selection: z
        .enum(items.map((item) => item.key) as [string, ...string[]])
        .describe(`Select a ${itemType}`),
    });

    const itemsList = items
      .slice(0, 10)
      .map((item, i) => `${i + 1}. ${item.name} (${item.key})`)
      .join('\n');

    try {
      const result = await this.server.elicitInput({
        message: `Multiple ${itemType}s found. Please select one:\n\n${itemsList}`,
        requestedSchema: {
          ...zodToJsonSchema(selectionSchema),
          type: 'object' as const,
          // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-member-access
          properties: (zodToJsonSchema(selectionSchema) as any).properties ?? {},
        },
      });

      if (result.action === 'accept' && result.content) {
        const parsed = selectionSchema.parse(result.content);
        return { action: 'accept', content: { selection: parsed.selection } };
      }

      return { action: result.action };
    } catch (error) {
      this.logger.error('Elicitation error:', error);
      return { action: 'cancel' };
    }
  }
}

export const createElicitationManager = (
  options?: Partial<ElicitationOptions>
): ElicitationManager => {
  const envEnabled = process.env.SONARQUBE_MCP_ELICITATION === 'true';
  const envThreshold = process.env.SONARQUBE_MCP_BULK_THRESHOLD
    ? Number.parseInt(process.env.SONARQUBE_MCP_BULK_THRESHOLD, 10)
    : undefined;
  const envRequireComments = process.env.SONARQUBE_MCP_REQUIRE_COMMENTS === 'true';
  const envInteractiveSearch = process.env.SONARQUBE_MCP_INTERACTIVE_SEARCH === 'true';

  const managerOptions: Partial<ElicitationOptions> = {
    enabled: envEnabled,
    requireComments: envRequireComments,
    interactiveSearch: envInteractiveSearch,
    ...options,
  };

  if (envThreshold !== undefined) {
    managerOptions.bulkOperationThreshold = envThreshold;
  }

  return new ElicitationManager(managerOptions);
};

```

--------------------------------------------------------------------------------
/src/__tests__/logger.test.ts:
--------------------------------------------------------------------------------

```typescript
import { Logger, LogLevel, createLogger } from '../utils/logger.js';
import fs from 'fs';
import path from 'path';
import os from 'os';

describe('Logger', () => {
  const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'logger-test-'));
  const logFile = path.join(tempDir, 'test.log');
  const originalEnv = process.env;

  beforeEach(() => {
    process.env = { ...originalEnv };
    // Clean up any existing log file
    if (fs.existsSync(logFile)) {
      fs.unlinkSync(logFile);
    }
  });

  afterEach(() => {
    process.env = originalEnv;
  });

  afterAll(() => {
    // Clean up temp directory
    if (fs.existsSync(tempDir)) {
      fs.rmSync(tempDir, { recursive: true });
    }
  });

  describe('Logger initialization', () => {
    it('should create logger with context', () => {
      const logger = new Logger('TestContext');
      expect(logger).toBeDefined();
    });

    it('should create logger without context', () => {
      const logger = new Logger();
      expect(logger).toBeDefined();
    });

    it('should create logger using createLogger helper', () => {
      const logger = createLogger('TestContext');
      expect(logger).toBeDefined();
    });
  });

  describe('Log file initialization', () => {
    it('should create log file when LOG_FILE is set', () => {
      process.env.LOG_FILE = logFile;
      process.env.LOG_LEVEL = 'DEBUG';

      const logger = new Logger();
      logger.debug('test message');

      expect(fs.existsSync(logFile)).toBe(true);
    });

    it('should handle nested directories', () => {
      // Create a nested directory manually to test path parsing
      const nestedDir = path.join(tempDir, 'nested', 'dir');
      fs.mkdirSync(nestedDir, { recursive: true });

      const nestedLogFile = path.join(nestedDir, 'test.log');
      process.env.LOG_FILE = nestedLogFile;
      process.env.LOG_LEVEL = 'DEBUG';

      const logger = new Logger();
      logger.debug('test message');

      // The logger should work with existing nested directories
      expect(fs.existsSync(nestedLogFile)).toBe(true);

      // Clean up
      if (fs.existsSync(path.join(tempDir, 'nested'))) {
        fs.rmSync(path.join(tempDir, 'nested'), { recursive: true });
      }
    });
  });

  describe('Log level filtering', () => {
    beforeEach(() => {
      process.env.LOG_FILE = logFile;
    });

    it('should log DEBUG messages when LOG_LEVEL is DEBUG', () => {
      process.env.LOG_LEVEL = 'DEBUG';
      const logger = new Logger();

      logger.debug('debug message');

      const content = fs.readFileSync(logFile, 'utf8');
      expect(content).toContain('DEBUG');
      expect(content).toContain('debug message');
    });

    it('should not log DEBUG messages when LOG_LEVEL is INFO', () => {
      process.env.LOG_LEVEL = 'INFO';
      const logger = new Logger();

      logger.debug('debug message');

      expect(fs.existsSync(logFile)).toBe(false);
    });

    it('should log INFO messages when LOG_LEVEL is INFO', () => {
      process.env.LOG_LEVEL = 'INFO';
      const logger = new Logger();

      logger.info('info message');

      const content = fs.readFileSync(logFile, 'utf8');
      expect(content).toContain('INFO');
      expect(content).toContain('info message');
    });

    it('should not log INFO messages when LOG_LEVEL is WARN', () => {
      process.env.LOG_LEVEL = 'WARN';
      const logger = new Logger();

      logger.info('info message');

      expect(fs.existsSync(logFile)).toBe(false);
    });

    it('should log WARN messages when LOG_LEVEL is WARN', () => {
      process.env.LOG_LEVEL = 'WARN';
      const logger = new Logger();

      logger.warn('warn message');

      const content = fs.readFileSync(logFile, 'utf8');
      expect(content).toContain('WARN');
      expect(content).toContain('warn message');
    });

    it('should not log WARN messages when LOG_LEVEL is ERROR', () => {
      process.env.LOG_LEVEL = 'ERROR';
      const logger = new Logger();

      logger.warn('warn message');

      expect(fs.existsSync(logFile)).toBe(false);
    });

    it('should log ERROR messages when LOG_LEVEL is ERROR', () => {
      process.env.LOG_LEVEL = 'ERROR';
      const logger = new Logger();

      logger.error('error message');

      const content = fs.readFileSync(logFile, 'utf8');
      expect(content).toContain('ERROR');
      expect(content).toContain('error message');
    });

    it('should default to DEBUG level when LOG_LEVEL is not set', () => {
      delete process.env.LOG_LEVEL;
      const logger = new Logger();

      logger.debug('debug message');

      const content = fs.readFileSync(logFile, 'utf8');
      expect(content).toContain('DEBUG');
      expect(content).toContain('debug message');
    });
  });

  describe('Log message formatting', () => {
    beforeEach(() => {
      process.env.LOG_FILE = logFile;
      process.env.LOG_LEVEL = 'DEBUG';
    });

    it('should format log message with timestamp, level, and context', () => {
      const logger = new Logger('TestContext');

      logger.debug('test message');

      const content = fs.readFileSync(logFile, 'utf8');
      expect(content).toMatch(
        /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z DEBUG \[TestContext\] test message/
      );
    });

    it('should format log message without context', () => {
      const logger = new Logger();

      logger.info('test message');

      const content = fs.readFileSync(logFile, 'utf8');
      expect(content).toMatch(/\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z INFO test message/);
    });

    it('should include data in log message', () => {
      const logger = new Logger();
      const data = { key: 'value', number: 42 };

      logger.debug('test message', data);

      const content = fs.readFileSync(logFile, 'utf8');
      expect(content).toContain('test message');
      expect(content).toContain('"key": "value"');
      expect(content).toContain('"number": 42');
    });

    it('should handle Error objects specially', () => {
      const logger = new Logger();
      const error = new Error('Test error');

      logger.error('An error occurred', error);

      const content = fs.readFileSync(logFile, 'utf8');
      expect(content).toContain('ERROR');
      expect(content).toContain('An error occurred');
      expect(content).toContain('Error: Test error');
    });

    it('should handle errors without stack traces', () => {
      const logger = new Logger();
      const error = new Error('Test error');
      delete error.stack;

      logger.error('An error occurred', error);

      const content = fs.readFileSync(logFile, 'utf8');
      expect(content).toContain('Error: Test error');
    });

    it('should handle non-Error objects in error logging', () => {
      const logger = new Logger();
      const errorData = { code: 'ERR_001', message: 'Something went wrong' };

      logger.error('An error occurred', errorData);

      const content = fs.readFileSync(logFile, 'utf8');
      expect(content).toContain('"code": "ERR_001"');
      expect(content).toContain('"message": "Something went wrong"');
    });

    it('should handle circular references in error data', () => {
      const logger = new Logger();
      const obj: Record<string, unknown> = { a: 1 };
      obj.circular = obj;

      logger.error('An error occurred', obj);

      const content = fs.readFileSync(logFile, 'utf8');
      expect(content).toContain('[object Object]');
    });
  });

  describe('No logging when LOG_FILE not set', () => {
    beforeEach(() => {
      delete process.env.LOG_FILE;
      process.env.LOG_LEVEL = 'DEBUG';
    });

    it('should not create log file when LOG_FILE is not set', () => {
      const logger = new Logger();

      logger.debug('debug message');
      logger.info('info message');
      logger.warn('warn message');
      logger.error('error message');

      expect(fs.existsSync(logFile)).toBe(false);
    });
  });

  describe('Multiple log entries', () => {
    it('should append multiple log entries', () => {
      process.env.LOG_FILE = logFile;
      process.env.LOG_LEVEL = 'DEBUG';

      const logger = new Logger();

      logger.debug('first message');
      logger.info('second message');
      logger.warn('third message');
      logger.error('fourth message');

      const content = fs.readFileSync(logFile, 'utf8');
      const lines = content.trim().split('\n');

      expect(lines).toHaveLength(4);
      expect(lines[0]).toContain('DEBUG');
      expect(lines[0]).toContain('first message');
      expect(lines[1]).toContain('INFO');
      expect(lines[1]).toContain('second message');
      expect(lines[2]).toContain('WARN');
      expect(lines[2]).toContain('third message');
      expect(lines[3]).toContain('ERROR');
      expect(lines[3]).toContain('fourth message');
    });
  });
});

describe('LogLevel enum', () => {
  it('should have correct log levels', () => {
    expect(LogLevel.DEBUG).toBe('DEBUG');
    expect(LogLevel.INFO).toBe('INFO');
    expect(LogLevel.WARN).toBe('WARN');
    expect(LogLevel.ERROR).toBe('ERROR');
  });
});

describe('Default logger export', () => {
  it('should export a default logger with SonarQubeMCP context', async () => {
    // Import the default export
    const module = await import('../utils/logger.js');
    const defaultLogger = module.default;
    expect(defaultLogger).toBeDefined();
    expect(defaultLogger).toBeInstanceOf(Logger);
  });
});

```

--------------------------------------------------------------------------------
/scripts/validate-docs.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/bash
# Documentation validation script for SonarQube MCP Server
# This script validates all documentation files for broken links and code snippets

set -e  # Exit on error

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

echo -e "${GREEN}📚 SonarQube MCP Server - Documentation Validation${NC}"
echo "==================================================="

# Configuration
DOCS_DIR="docs"
README_FILE="README.md"
TEMP_DIR="/tmp/doc-validation-$$"
ERRORS_FOUND=false

# Create temp directory
mkdir -p "$TEMP_DIR"

# Function to check if a command exists
command_exists() {
    command -v "$1" >/dev/null 2>&1
}

# Check prerequisites
echo -e "\n${YELLOW}📋 Checking prerequisites...${NC}"
if command_exists node; then
    echo -e "✅ Node.js is installed"
else
    echo -e "${YELLOW}⚠️  Node.js not installed. Some checks will be skipped.${NC}"
fi

# Function to validate internal links
validate_internal_links() {
    local file=$1
    local base_dir=$(dirname "$file")
    
    echo -e "\n${BLUE}🔗 Checking internal links in: $(basename $file)${NC}"
    
    # Extract markdown links [text](url)
    grep -oE '\[([^\]]+)\]\(([^)]+)\)' "$file" | while IFS= read -r link; do
        url=$(echo "$link" | sed -E 's/\[[^\]]+\]\(([^)]+)\)/\1/')
        
        # Skip external links
        if [[ "$url" =~ ^https?:// ]]; then
            continue
        fi
        
        # Skip anchors
        if [[ "$url" =~ ^# ]]; then
            continue
        fi
        
        # Remove anchor from URL if present
        file_path=$(echo "$url" | cut -d'#' -f1)
        
        # Resolve relative path
        if [[ "$file_path" =~ ^/ ]]; then
            # Absolute path from project root
            full_path="${file_path#/}"
        else
            # Relative path
            full_path="$base_dir/$file_path"
        fi
        
        # Normalize path
        full_path=$(cd "$(dirname "$full_path")" 2>/dev/null && pwd)/$(basename "$full_path") 2>/dev/null || echo "$full_path")
        
        # Check if file exists
        if [ ! -f "$full_path" ]; then
            echo -e "  ${RED}✗ Broken link: $url${NC}"
            echo "    Expected file: $full_path"
            ERRORS_FOUND=true
        else
            echo -e "  ${GREEN}✓ Valid link: $url${NC}"
        fi
    done
}

# Function to validate code blocks
validate_code_blocks() {
    local file=$1
    
    echo -e "\n${BLUE}💻 Checking code blocks in: $(basename $file)${NC}"
    
    # Extract code blocks with language
    awk '/^```[a-zA-Z]+/{lang=$1; gsub(/```/, "", lang); getline; code=""; 
         while ($0 !~ /^```/) {code=code"\n"$0; getline} 
         print lang"|"code}' "$file" > "$TEMP_DIR/code_blocks.txt"
    
    while IFS='|' read -r lang code; do
        if [ -z "$lang" ]; then
            continue
        fi
        
        case "$lang" in
            bash|sh)
                # Validate bash syntax
                if echo "$code" | bash -n 2>/dev/null; then
                    echo -e "  ${GREEN}✓ Valid bash code block${NC}"
                else
                    echo -e "  ${RED}✗ Invalid bash syntax${NC}"
                    ERRORS_FOUND=true
                fi
                ;;
            yaml|yml)
                # Check YAML syntax if yamllint is available
                if command_exists yamllint; then
                    if echo "$code" | yamllint - >/dev/null 2>&1; then
                        echo -e "  ${GREEN}✓ Valid YAML code block${NC}"
                    else
                        echo -e "  ${YELLOW}⚠️  YAML syntax issues${NC}"
                    fi
                else
                    echo -e "  ${BLUE}ℹ️  Skipping YAML validation (yamllint not installed)${NC}"
                fi
                ;;
            json)
                # Validate JSON syntax
                if echo "$code" | jq . >/dev/null 2>&1; then
                    echo -e "  ${GREEN}✓ Valid JSON code block${NC}"
                else
                    echo -e "  ${RED}✗ Invalid JSON syntax${NC}"
                    ERRORS_FOUND=true
                fi
                ;;
            typescript|javascript|ts|js)
                echo -e "  ${BLUE}ℹ️  TypeScript/JavaScript code block found${NC}"
                ;;
            *)
                echo -e "  ${BLUE}ℹ️  $lang code block found${NC}"
                ;;
        esac
    done < "$TEMP_DIR/code_blocks.txt"
}

# Function to check for required sections
check_required_sections() {
    local file=$1
    local required_sections=("## Overview" "## Prerequisites" "## Installation" "## Usage")
    
    echo -e "\n${BLUE}📑 Checking required sections in: $(basename $file)${NC}"
    
    for section in "${required_sections[@]}"; do
        if grep -q "^$section" "$file"; then
            echo -e "  ${GREEN}✓ Found section: $section${NC}"
        else
            echo -e "  ${YELLOW}⚠️  Missing section: $section${NC}"
        fi
    done
}

# Function to validate external links (basic check)
validate_external_links() {
    local file=$1
    
    echo -e "\n${BLUE}🌐 Checking external links in: $(basename $file)${NC}"
    
    # Extract external links
    grep -oE 'https?://[^ )]+' "$file" | sort -u | while read -r url; do
        # Remove trailing punctuation
        url=$(echo "$url" | sed 's/[.,;:]$//')
        
        # Basic URL format check
        if [[ "$url" =~ ^https?://[a-zA-Z0-9.-]+\.[a-zA-Z]{2,} ]]; then
            echo -e "  ${GREEN}✓ Valid URL format: $url${NC}"
        else
            echo -e "  ${RED}✗ Invalid URL format: $url${NC}"
            ERRORS_FOUND=true
        fi
    done
}

# Function to check Mermaid diagrams
check_mermaid_diagrams() {
    local file=$1
    
    if grep -q '```mermaid' "$file"; then
        echo -e "\n${BLUE}📊 Checking Mermaid diagrams in: $(basename $file)${NC}"
        
        # Count opening and closing tags
        open_count=$(grep -c '```mermaid' "$file")
        close_count=$(awk '/```mermaid/{count++} /```/{if(prev=="mermaid") count--} {prev=$0} END{print count}' "$file")
        
        if [ "$open_count" -eq "$close_count" ]; then
            echo -e "  ${GREEN}✓ Mermaid diagrams properly closed${NC}"
        else
            echo -e "  ${RED}✗ Unclosed Mermaid diagram blocks${NC}"
            ERRORS_FOUND=true
        fi
    fi
}

# Main validation
echo -e "\n${YELLOW}🔍 Starting documentation validation...${NC}"

# Validate all markdown files
all_files=()

# Add README if exists
if [ -f "$README_FILE" ]; then
    all_files+=("$README_FILE")
fi

# Add all docs files
if [ -d "$DOCS_DIR" ]; then
    while IFS= read -r -d '' file; do
        all_files+=("$file")
    done < <(find "$DOCS_DIR" -name "*.md" -print0)
fi

# Process each file
for file in "${all_files[@]}"; do
    echo -e "\n${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
    echo -e "${YELLOW}📄 Validating: $file${NC}"
    echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
    
    validate_internal_links "$file"
    validate_code_blocks "$file"
    validate_external_links "$file"
    check_mermaid_diagrams "$file"
    
    # Check required sections for main docs
    if [[ "$file" =~ (deployment|architecture|api-reference)\.md$ ]]; then
        check_required_sections "$file"
    fi
done

# Additional checks
echo -e "\n${YELLOW}📊 Running additional checks...${NC}"

# Check for orphaned images
if [ -d "$DOCS_DIR/images" ] || [ -d "images" ]; then
    echo -e "\n${BLUE}🖼️  Checking for orphaned images...${NC}"
    
    for img_dir in "$DOCS_DIR/images" "images"; do
        if [ -d "$img_dir" ]; then
            find "$img_dir" -type f \( -name "*.png" -o -name "*.jpg" -o -name "*.svg" \) | while read -r img; do
                img_name=$(basename "$img")
                if ! grep -r "$img_name" "$DOCS_DIR" "$README_FILE" 2>/dev/null | grep -v Binary >/dev/null; then
                    echo -e "  ${YELLOW}⚠️  Potentially orphaned image: $img${NC}"
                fi
            done
        fi
    done
fi

# Check for TODOs in documentation
echo -e "\n${BLUE}📝 Checking for TODOs...${NC}"
todo_count=$(grep -r "TODO\|FIXME\|XXX" "${all_files[@]}" 2>/dev/null | wc -l || echo 0)
if [ "$todo_count" -gt 0 ]; then
    echo -e "  ${YELLOW}⚠️  Found $todo_count TODO/FIXME/XXX markers${NC}"
    grep -n "TODO\|FIXME\|XXX" "${all_files[@]}" 2>/dev/null | head -5
else
    echo -e "  ${GREEN}✓ No TODOs found${NC}"
fi

# Check documentation structure
echo -e "\n${BLUE}📁 Checking documentation structure...${NC}"
expected_docs=(
    "docs/api-reference.md"
    "docs/architecture.md"
    "docs/deployment.md"
    "docs/security.md"
    "docs/troubleshooting.md"
)

for doc in "${expected_docs[@]}"; do
    if [ -f "$doc" ]; then
        echo -e "  ${GREEN}✓ Found: $doc${NC}"
    else
        echo -e "  ${RED}✗ Missing: $doc${NC}"
        ERRORS_FOUND=true
    fi
done

# Cleanup
rm -rf "$TEMP_DIR"

# Final summary
echo -e "\n==================================================="
if [ "$ERRORS_FOUND" = false ]; then
    echo -e "${GREEN}✅ Documentation validation passed!${NC}"
    echo -e "\n${YELLOW}Documentation is well-structured with:${NC}"
    echo "  - Valid internal links"
    echo "  - Syntactically correct code examples"
    echo "  - Proper formatting"
    echo "  - Complete structure"
else
    echo -e "${RED}❌ Documentation validation found issues${NC}"
    echo -e "${YELLOW}Please fix the errors above${NC}"
    exit 1
fi
```

--------------------------------------------------------------------------------
/src/__tests__/schema-parameter-transforms.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, vi } from 'vitest';
import { z } from 'zod';
import * as indexModule from '../index.js';
import { ISonarQubeClient } from '../sonarqube.js';
// Create a custom mock implementation of the handlers
const nullToUndefined = indexModule.nullToUndefined;
// Create a mock client
const mockClient: Partial<ISonarQubeClient> = {
  getMetrics: vi.fn<() => Promise<any>>().mockResolvedValue({
    metrics: [{ id: '1', key: 'test', name: 'Test Metric' }],
    paging: { pageIndex: 2, pageSize: 5, total: 10 },
  }),
  getIssues: vi.fn<() => Promise<any>>().mockResolvedValue({
    issues: [{ key: 'issue-1', rule: 'rule-1', severity: 'MAJOR' }],
    paging: { pageIndex: 1, pageSize: 10, total: 1 },
  }),
  getComponentMeasures: vi.fn<() => Promise<any>>().mockResolvedValue({
    component: { key: 'comp-1', measures: [{ metric: 'coverage', value: '75.0' }] },
    metrics: [{ key: 'coverage', name: 'Coverage' }],
  }),
  getComponentsMeasures: vi.fn<() => Promise<any>>().mockResolvedValue({
    components: [
      { key: 'comp-1', measures: [{ metric: 'coverage', value: '75.0' }] },
      { key: 'comp-2', measures: [{ metric: 'coverage', value: '85.0' }] },
    ],
    metrics: [{ key: 'coverage', name: 'Coverage' }],
    paging: { pageIndex: 1, pageSize: 10, total: 2 },
  }),
  getMeasuresHistory: vi.fn<() => Promise<any>>().mockResolvedValue({
    measures: [
      {
        metric: 'coverage',
        history: [
          { date: '2023-01-01', value: '70.0' },
          { date: '2023-02-01', value: '75.0' },
          { date: '2023-03-01', value: '80.0' },
        ],
      },
    ],
    paging: { pageIndex: 1, pageSize: 10, total: 1 },
  }),
};
// Mock handlers that don't actually call the HTTP methods
const mockMetricsHandler = async (params: { page: number | null; page_size: number | null }) => {
  const mockResult = await (mockClient as ISonarQubeClient).getMetrics({
    page: nullToUndefined(params.page),
    pageSize: nullToUndefined(params.page_size),
  });
  return {
    content: [
      {
        type: 'text' as const,
        text: JSON.stringify(mockResult, null, 2),
      },
    ],
  };
};
const mockIssuesHandler = async (params: Record<string, unknown>) => {
  const mockResult = await (mockClient as ISonarQubeClient).getIssues(params as any);
  return {
    content: [
      {
        type: 'text' as const,
        text: JSON.stringify(mockResult, null, 2),
      },
    ],
  };
};
const mockComponentMeasuresHandler = async (params: Record<string, unknown>) => {
  const mockResult = await (mockClient as ISonarQubeClient).getComponentMeasures(params as any);
  return {
    content: [
      {
        type: 'text' as const,
        text: JSON.stringify(mockResult, null, 2),
      },
    ],
  };
};
const mockComponentsMeasuresHandler = async (params: Record<string, unknown>) => {
  const mockResult = await (mockClient as ISonarQubeClient).getComponentsMeasures(params as any);
  return {
    content: [
      {
        type: 'text' as const,
        text: JSON.stringify(mockResult, null, 2),
      },
    ],
  };
};
const mockMeasuresHistoryHandler = async (params: Record<string, unknown>) => {
  const mockResult = await (mockClient as ISonarQubeClient).getMeasuresHistory(params as any);
  return {
    content: [
      {
        type: 'text' as const,
        text: JSON.stringify(mockResult, null, 2),
      },
    ],
  };
};
// Helper function to test string to number parameter transformations (not used directly)
// eslint-disable-next-line @typescript-eslint/no-unused-vars
function testNumberTransform(transformFn: (val: string | undefined) => number | null | undefined) {
  // Valid number
  expect(transformFn('10')).toBe(10);
  // Empty string should return null
  expect(transformFn('')).toBe(null);
  // Invalid number should return null
  expect(transformFn('abc')).toBe(null);
  // Undefined should return undefined
  expect(transformFn(undefined)).toBe(undefined);
}
describe('Schema Parameter Transformations', () => {
  describe('Number Transformations', () => {
    it('should transform string numbers to integers or null', () => {
      // Create a schema with number transformation
      const schema = z
        .string()
        .optional()
        .transform((val: any) => (val ? parseInt(val, 10) || null : null));
      // Test the transformation
      expect(schema.parse('10')).toBe(10);
      expect(schema.parse('')).toBe(null);
      expect(schema.parse('abc')).toBe(null);
      expect(schema.parse(undefined)).toBe(null);
    });
  });
  describe('Boolean Transformations', () => {
    it('should transform string booleans to boolean values', () => {
      // Create a schema with boolean transformation
      const schema = z
        .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
        .nullable()
        .optional();
      // Test the transformation
      expect(schema.parse('true')).toBe(true);
      expect(schema.parse('false')).toBe(false);
      expect(schema.parse(true)).toBe(true);
      expect(schema.parse(false)).toBe(false);
      expect(schema.parse(null)).toBe(null);
      expect(schema.parse(undefined)).toBe(undefined);
    });
  });
  describe('Parameter Transformations for Lambda Functions', () => {
    it('should handle nullToUndefined utility function', () => {
      expect(nullToUndefined(null)).toBeUndefined();
      expect(nullToUndefined(undefined)).toBeUndefined();
      expect(nullToUndefined(0)).toBe(0);
      expect(nullToUndefined('')).toBe('');
      expect(nullToUndefined('test')).toBe('test');
      expect(nullToUndefined(10)).toBe(10);
      expect(nullToUndefined(false)).toBe(false);
      expect(nullToUndefined(true)).toBe(true);
    });
    it('should handle metrics handler with string parameters', async () => {
      const result = await mockMetricsHandler({ page: null, page_size: null });
      // Verify the result structure
      expect(result).toHaveProperty('content');
      expect(result.content[0]).toHaveProperty('type', 'text');
      expect(result.content[0]).toHaveProperty('text');
      // Verify the result content
      const data = JSON.parse(result.content[0]?.text as string);
      expect(data).toHaveProperty('metrics');
      expect(data.metrics?.[0]).toHaveProperty('key', 'test');
    });
    it('should handle issues with complex parameters', async () => {
      const result = await mockIssuesHandler({
        project_key: 'test-project',
        severity: 'MAJOR',
        page: '1',
        page_size: '10',
        statuses: ['OPEN', 'CONFIRMED'],
        resolved: 'true',
        types: ['BUG', 'VULNERABILITY'],
        rules: ['rule1', 'rule2'],
        tags: ['tag1', 'tag2'],
        created_after: '2023-01-01',
        on_component_only: 'true',
        since_leak_period: 'true',
        in_new_code_period: 'true',
      });
      // Verify the result structure
      expect(result).toHaveProperty('content');
      expect(result.content[0]).toHaveProperty('type', 'text');
      expect(result.content[0]).toHaveProperty('text');
      // Verify the result content
      const data = JSON.parse(result.content[0]?.text as string);
      expect(data).toHaveProperty('issues');
      expect(data.issues?.[0]).toHaveProperty('key', 'issue-1');
    });
    it('should handle component measures with parameters', async () => {
      const result = await mockComponentMeasuresHandler({
        component: 'comp-1',
        metric_keys: ['coverage'],
        branch: 'main',
        period: '1',
        additional_fields: ['metrics'],
      });
      // Verify the result structure
      expect(result).toHaveProperty('content');
      expect(result.content[0]).toHaveProperty('type', 'text');
      expect(result.content[0]).toHaveProperty('text');
      // Verify the result content
      const data = JSON.parse(result.content[0]?.text as string);
      expect(data).toHaveProperty('component');
      expect(data.component).toHaveProperty('key', 'comp-1');
    });
    it('should handle components measures with parameters', async () => {
      const result = await mockComponentsMeasuresHandler({
        component_keys: ['comp-1', 'comp-2'],
        metric_keys: ['coverage'],
        branch: 'main',
        page: '1',
        page_size: '10',
        additional_fields: ['metrics'],
      });
      // Verify the result structure
      expect(result).toHaveProperty('content');
      expect(result.content[0]).toHaveProperty('type', 'text');
      expect(result.content[0]).toHaveProperty('text');
      // Verify the result content
      const data = JSON.parse(result.content[0]?.text as string);
      expect(data).toHaveProperty('components');
      expect(data.components).toHaveLength(2);
      expect(data.components?.[0]).toHaveProperty('key', 'comp-1');
    });
    it('should handle measures history with parameters', async () => {
      const result = await mockMeasuresHistoryHandler({
        component: 'comp-1',
        metrics: ['coverage'],
        from: '2023-01-01',
        to: '2023-03-01',
        page: '1',
        page_size: '10',
      });
      // Verify the result structure
      expect(result).toHaveProperty('content');
      expect(result.content[0]).toHaveProperty('type', 'text');
      expect(result.content[0]).toHaveProperty('text');
      // Verify the result content
      const data = JSON.parse(result.content[0]?.text as string);
      expect(data).toHaveProperty('measures');
      expect(data.measures?.[0]).toHaveProperty('metric', 'coverage');
      expect(data.measures?.[0]?.history).toHaveLength(3);
    });
  });
});

```

--------------------------------------------------------------------------------
/docs/architecture/decisions/0023-release-management-with-changesets.md:
--------------------------------------------------------------------------------

```markdown
# 23. Release Management with Changesets

Date: 2025-10-11

## Status

Accepted

## Context

The SonarQube MCP Server requires a systematic approach to version management, changelog generation, and release automation. Manual versioning and changelog maintenance are error-prone and time-consuming. The project needs:

- Automated semantic versioning based on change significance
- Human-readable changelogs generated from commit history
- Integration with GitHub releases for public visibility
- Prevention of releases without documented changes
- Developer-friendly workflow for documenting changes
- Support for multiple contributors documenting changes simultaneously

Traditional approaches have limitations:

- **Manual versioning**: Error-prone, requires manual package.json updates
- **Conventional commits alone**: Doesn't capture change intent or impact
- **Standard-version/semantic-release**: Less flexible, harder to customize changelog format
- **Manual changelogs**: Time-consuming, inconsistent format, often outdated

## Decision

We will use **Changesets** (@changesets/cli) for release management, version control, and changelog generation.

### Core Workflow

1. **Developer creates changeset** when making impactful changes:

   ```bash
   pnpm changeset
   ```

   - Interactive CLI prompts for change type (major/minor/patch)
   - Developer writes human-readable summary
   - Creates markdown file in `.changeset/` directory

2. **CI/CD validates changesets** on pull requests:
   - Custom script (`.github/scripts/version-and-release.js`) checks for changesets
   - Fails if feat/fix commits exist without corresponding changesets
   - Ensures all significant changes are documented

3. **Automated versioning** on main branch:
   - Script determines version bump from accumulated changesets
   - Updates `package.json` version
   - Generates/updates `CHANGELOG.md` with all changeset summaries
   - Commits changes with `[skip actions]` to prevent workflow recursion

4. **Release creation**:
   - GitHub Actions creates Git tag
   - Creates GitHub release with changelog excerpt
   - Publishes to NPM and Docker registries

### Configuration

**.changeset/config.json**:

```json
{
  "$schema": "https://unpkg.com/@changesets/[email protected]/schema.json",
  "changelog": [
    "changelog-github-custom",
    {
      "repo": "sapientpants/sonarqube-mcp-server"
    }
  ],
  "commit": false,
  "access": "public",
  "baseBranch": "main"
}
```

**Key settings**:

- `changelog-github-custom`: Custom GitHub changelog generator
- `commit: false`: CI handles commits (prevents double-commits)
- `access: public`: NPM package is public
- `baseBranch: main`: Releases from main branch only

### Changeset Types

**Major (breaking changes)**:

```bash
pnpm changeset
# Select: major
# Example: "Renamed `health()` to `getHealthV2()` (breaking API change)"
```

**Minor (new features)**:

```bash
pnpm changeset
# Select: minor
# Example: "Added support for multi-platform Docker images"
```

**Patch (bug fixes, docs, chores)**:

```bash
pnpm changeset
# Select: patch
# Example: "Fixed Docker publishing permissions issue"
```

### Validation Logic

The custom validation script (`.github/scripts/version-and-release.js`) enforces:

1. **Commit type analysis**: Checks commit messages for `feat:` or `fix:` prefixes
2. **Changeset requirement**: Fails if feat/fix commits exist without changesets
3. **Version determination**: Aggregates changesets to determine final version bump
4. **Outputs**: Sets GitHub Actions outputs for downstream jobs

**Example validation failure**:

```
Error: Found feat/fix commits without changesets:
  - feat: add new tool for hotspot analysis
  - fix: resolve authentication timeout issue

Please create changesets with: pnpm changeset
```

### CI/CD Integration

**Main workflow (.github/workflows/main.yml)**:

```yaml
- name: Version packages
  id: version
  env:
    GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
  run: |
    node .github/scripts/version-and-release.js

- name: Commit version changes
  if: steps.version.outputs.changed == 'true'
  run: |
    git config --local user.email "${{ github.actor_id }}+${{ github.actor }}@users.noreply.github.com"
    git config --local user.name "${{ github.actor }}"
    git add package.json CHANGELOG.md .changeset
    git commit -m "chore(release): v${{ steps.version.outputs.version }} [skip actions]"
    git push origin main
```

**PR workflow (.github/workflows/pr.yml)**:

```yaml
- name: Check for changesets
  run: |
    node .github/scripts/version-and-release.js
    # Fails PR if changesets missing for feat/fix commits
```

## Consequences

### Positive

- **Semantic Versioning**: Automatic version bumps based on change impact
- **Human-Readable Changelogs**: Developers write clear summaries, not parsing commits
- **Change Documentation**: Every significant change documented before merge
- **Parallel Development**: Multiple contributors can add changesets simultaneously
- **GitHub Integration**: Custom formatter creates GitHub-friendly changelogs
- **Release Automation**: Complete automation from changeset to GitHub release
- **Flexibility**: Custom validation script enforces project-specific rules
- **Monorepo Ready**: Changesets scales to monorepos (though not needed here)
- **Review-Friendly**: Changesets visible in PR for review
- **Version Planning**: Aggregate view of pending version bump

### Negative

- **Extra Step**: Developers must remember to create changesets
- **Learning Curve**: New contributors need to learn changeset workflow
- **CI Complexity**: Custom validation script adds maintenance burden
- **Commit Noise**: Creates `.changeset/*.md` files in version control
- **Manual Intervention**: Sometimes requires manual conflict resolution in CHANGELOG.md
- **Validation Strictness**: May block non-impactful PRs if validation is too strict

### Neutral

- **Commit Message Format**: Still benefits from conventional commits for context
- **Multiple Files**: Changesets create multiple small markdown files
- **Git History**: Version bump commits separate from feature commits
- **Changeset Cleanup**: `.changeset/*.md` files deleted after version bump

## Implementation

### Developer Workflow

**Adding a changeset**:

```bash
# 1. Make your code changes
git add .

# 2. Create a changeset (before committing)
pnpm changeset

# Interactive prompts:
# - Select change type (major/minor/patch)
# - Write summary: Clear, user-facing description
# - Confirm

# 3. Commit everything together
git commit -m "feat: add new feature

This adds support for...

Closes #123"

# Changeset is now part of the PR
```

**Example changeset file** (`.changeset/cool-feature.md`):

```markdown
---
'sonarqube-mcp-server': minor
---

Added support for security hotspot status updates through new MCP tool
```

### Maintainer Workflow

**Releasing a version**:

1. Merge PRs with changesets to main
2. CI automatically:
   - Aggregates changesets
   - Bumps version in package.json
   - Updates CHANGELOG.md
   - Commits and pushes
   - Creates Git tag
   - Creates GitHub release
   - Publishes to registries

**Manual release** (rare):

```bash
# Generate version bump and changelog
pnpm changeset version

# Review changes
git diff package.json CHANGELOG.md

# Commit
git add -A
git commit -m "chore(release): version packages"

# Create tag
git tag -a "v$(node -p "require('./package.json').version")" -m "Release"

# Push
git push && git push --tags
```

### Handling Empty Changesets

For non-impactful changes (docs, tests, refactoring), create an empty changeset:

```bash
pnpm changeset --empty
```

This satisfies validation without bumping version or adding changelog entry.

### Changeset Status

Check pending changesets:

```bash
pnpm changeset status

# Output:
# Changes to be included in next release:
#   minor: Added security hotspot tools
#   patch: Fixed authentication timeout
#   patch: Updated documentation
#
# Suggested version bump: minor (current: 1.10.18, next: 1.11.0)
```

## Examples

### Example 1: Adding a Feature

**Pull request with changeset**:

File: `.changeset/add-quality-gate-tool.md`

```markdown
---
'sonarqube-mcp-server': minor
---

Added new `quality_gate_status` tool to check project quality gate status directly from the MCP server
```

**Generated CHANGELOG.md entry**:

```markdown
## 1.11.0

### Minor Changes

- abc1234: Added new `quality_gate_status` tool to check project quality gate status directly from the MCP server
```

### Example 2: Fixing a Bug

**Pull request with changeset**:

File: `.changeset/fix-docker-permissions.md`

```markdown
---
'sonarqube-mcp-server': patch
---

Fixed Docker publishing workflow failing due to missing `packages:write` permission
```

**Generated CHANGELOG.md entry**:

```markdown
## 1.10.19

### Patch Changes

- def5678: Fixed Docker publishing workflow failing due to missing `packages:write` permission
```

### Example 3: Multiple Changes

**Three PRs merged with changesets**:

1. `.changeset/feat-hotspots.md` (minor)
2. `.changeset/fix-auth.md` (patch)
3. `.changeset/docs-update.md` (patch)

**Resulting version bump**: 1.10.18 → 1.11.0 (minor wins)

**Generated CHANGELOG.md**:

```markdown
## 1.11.0

### Minor Changes

- abc1234: Added security hotspot management tools with status update capabilities

### Patch Changes

- def5678: Fixed authentication timeout in circuit breaker
- ghi9012: Updated API documentation with new examples
```

## References

- Changesets Documentation: https://github.com/changesets/changesets
- Configuration: .changeset/config.json
- Validation Script: .github/scripts/version-and-release.js
- CI/CD Integration: .github/workflows/main.yml
- Changelog Format: changelog-github-custom package
- Package Scripts: package.json (changeset, changeset:status commands)

```

--------------------------------------------------------------------------------
/src/monitoring/__tests__/circuit-breaker.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, expect, it, vi, beforeEach, afterEach } from 'vitest';
import { CircuitBreakerFactory } from '../circuit-breaker.js';
import { getMetricsService, cleanupMetricsService } from '../metrics.js';

describe('Circuit Breaker', () => {
  let mockFn: ReturnType<typeof vi.fn<(...args: unknown[]) => Promise<unknown>>>;
  let metricsService: ReturnType<typeof getMetricsService>;

  beforeEach(() => {
    // Reset circuit breaker factory
    CircuitBreakerFactory.reset();
    cleanupMetricsService();
    metricsService = getMetricsService();

    mockFn = vi.fn<(...args: unknown[]) => Promise<unknown>>();
  });

  afterEach(() => {
    CircuitBreakerFactory.reset();
    cleanupMetricsService();
  });

  describe('Basic functionality', () => {
    it('should execute function when circuit is closed', async () => {
      mockFn.mockResolvedValue('success');

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);
      const result = await breaker.fire();

      expect(result).toBe('success');
      expect(mockFn).toHaveBeenCalledTimes(1);
    });

    it('should pass arguments to the wrapped function', async () => {
      mockFn.mockImplementation((...args: unknown[]) => Promise.resolve(`${args[0]}-${args[1]}`));

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);
      const result = await breaker.fire('test', 123);

      expect(result).toBe('test-123');
      expect(mockFn).toHaveBeenCalledWith('test', 123);
    });

    it('should reuse the same breaker for the same name', () => {
      const breaker1 = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);
      const breaker2 = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);

      expect(breaker1).toBe(breaker2);
    });

    it('should create different breakers for different names', () => {
      const breaker1 = CircuitBreakerFactory.getBreaker('breaker-1', mockFn);
      const breaker2 = CircuitBreakerFactory.getBreaker('breaker-2', mockFn);

      expect(breaker1).not.toBe(breaker2);
    });
  });

  describe('Circuit opening behavior', () => {
    it('should open circuit after threshold failures', async () => {
      mockFn.mockRejectedValue(new Error('Service unavailable'));

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
        errorThresholdPercentage: 50,
        resetTimeout: 100,
        volumeThreshold: 2,
      });

      // First two failures should open the circuit
      await expect(breaker.fire()).rejects.toThrow('Service unavailable');
      await expect(breaker.fire()).rejects.toThrow('Service unavailable');

      // Circuit should now be open
      await expect(breaker.fire()).rejects.toThrow('Breaker is open');

      // Function should not be called when circuit is open
      expect(mockFn).toHaveBeenCalledTimes(2);
    });

    it('should not open circuit if failures are below threshold', async () => {
      let callCount = 0;
      mockFn.mockImplementation(() => {
        callCount++;
        if (callCount === 1) {
          return Promise.reject(new Error('Temporary failure'));
        }
        return Promise.resolve('success');
      });

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
        errorThresholdPercentage: 50,
        resetTimeout: 100,
        volumeThreshold: 3,
      });

      // One failure, two successes - should not open
      await expect(breaker.fire()).rejects.toThrow('Temporary failure');
      await expect(breaker.fire()).resolves.toBe('success');
      await expect(breaker.fire()).resolves.toBe('success');

      // Circuit should still be closed
      await expect(breaker.fire()).resolves.toBe('success');
    });
  });

  describe('Circuit recovery behavior', () => {
    it('should move to half-open state after timeout', async () => {
      mockFn.mockRejectedValueOnce(new Error('Failure 1'));
      mockFn.mockRejectedValueOnce(new Error('Failure 2'));
      mockFn.mockResolvedValue('recovered');

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
        errorThresholdPercentage: 50,
        resetTimeout: 50, // 50ms reset timeout
        volumeThreshold: 2,
      });

      // Open the circuit
      await expect(breaker.fire()).rejects.toThrow('Failure 1');
      await expect(breaker.fire()).rejects.toThrow('Failure 2');

      // Circuit is open
      await expect(breaker.fire()).rejects.toThrow('Breaker is open');

      // Wait for reset timeout
      await new Promise((resolve) => setTimeout(resolve, 60));

      // Circuit should be half-open, allowing one request
      await expect(breaker.fire()).resolves.toBe('recovered');

      // Circuit should be closed again
      await expect(breaker.fire()).resolves.toBe('recovered');
    });

    it('should re-open circuit if half-open test fails', async () => {
      mockFn.mockRejectedValue(new Error('Persistent failure'));

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
        errorThresholdPercentage: 50,
        resetTimeout: 50,
        volumeThreshold: 2,
      });

      // Open the circuit
      await expect(breaker.fire()).rejects.toThrow('Persistent failure');
      await expect(breaker.fire()).rejects.toThrow('Persistent failure');

      // Wait for reset timeout
      await new Promise((resolve) => setTimeout(resolve, 60));

      // Half-open test should fail and re-open circuit
      await expect(breaker.fire()).rejects.toThrow('Persistent failure');

      // Circuit should be open again
      await expect(breaker.fire()).rejects.toThrow('Breaker is open');
    });
  });

  describe('Metrics integration', () => {
    it('should track circuit breaker metrics', async () => {
      mockFn.mockResolvedValueOnce('success');
      mockFn.mockRejectedValueOnce(new Error('failure'));
      mockFn.mockResolvedValueOnce('success');

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);

      await breaker.fire();
      await expect(breaker.fire()).rejects.toThrow('failure');
      await breaker.fire();

      const metrics = metricsService.getMetrics();

      // Check for circuit breaker metrics - the breaker tracks failures
      expect(metrics).toContain('mcp_circuit_breaker_failures_total{service="test-breaker"} 1');
    });

    it('should track circuit state changes', async () => {
      mockFn.mockRejectedValue(new Error('Service down'));

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
        errorThresholdPercentage: 50,
        resetTimeout: 50,
        volumeThreshold: 2,
      });

      // Open the circuit
      await expect(breaker.fire()).rejects.toThrow();
      await expect(breaker.fire()).rejects.toThrow();

      // Check metrics for open state
      const metrics = metricsService.getMetrics();
      expect(metrics).toContain('mcp_circuit_breaker_state{service="test-breaker"} 1');
    });
  });

  describe('Custom options', () => {
    it('should respect custom timeout', async () => {
      let timeoutId: NodeJS.Timeout;
      mockFn.mockImplementation(
        () =>
          new Promise((resolve) => {
            timeoutId = setTimeout(() => resolve('slow'), 200);
          })
      );

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
        timeout: 100, // 100ms timeout
      });

      // Should timeout
      await expect(breaker.fire()).rejects.toThrow('Timed out');

      // Clean up the timeout to prevent open handle
      if (timeoutId!) {
        clearTimeout(timeoutId);
      }
    });

    it('should respect custom error filter', async () => {
      // The errorFilter should return true for errors that should be counted
      mockFn.mockRejectedValueOnce(new Error('Network error'));
      mockFn.mockRejectedValueOnce(new Error('Timeout error'));
      mockFn.mockResolvedValue('success');

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
        errorThresholdPercentage: 50,
        volumeThreshold: 2,
        // Only count network errors toward circuit opening
        errorFilter: (err: Error) => err.message.includes('Network'),
      });

      // Network error should count
      await expect(breaker.fire()).rejects.toThrow('Network error');

      // Timeout error should NOT count (filtered out)
      await expect(breaker.fire()).rejects.toThrow('Timeout error');

      // Circuit should still be closed because only 1 error counted
      await expect(breaker.fire()).resolves.toBe('success');
    });
  });

  describe('Error handling', () => {
    it('should handle synchronous errors', async () => {
      mockFn.mockImplementation(() => {
        throw new Error('Sync error');
      });

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);

      await expect(breaker.fire()).rejects.toThrow('Sync error');
    });

    it('should handle different error types', async () => {
      const customError = { code: 'CUSTOM_ERROR', message: 'Custom error' };
      mockFn.mockRejectedValue(customError);

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);

      await expect(breaker.fire()).rejects.toEqual(customError);
    });
  });

  describe('Concurrency', () => {
    it('should handle concurrent requests', async () => {
      let resolveCount = 0;
      mockFn.mockImplementation(async () => {
        await new Promise((resolve) => setTimeout(resolve, 10));
        resolveCount++;
        return `result-${resolveCount}`;
      });

      const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);

      // Fire multiple concurrent requests
      const results = await Promise.all([breaker.fire(), breaker.fire(), breaker.fire()]);

      expect(results).toHaveLength(3);
      expect(mockFn).toHaveBeenCalledTimes(3);
    });
  });
});

```

--------------------------------------------------------------------------------
/src/schemas/issues.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from 'zod';
import { stringToNumberTransform, parseJsonStringArray } from '../utils/transforms.js';
import {
  severitySchema,
  severitiesSchema,
  statusSchema,
  resolutionSchema,
  typeSchema,
  cleanCodeAttributeCategoriesSchema,
  impactSeveritiesSchema,
  impactSoftwareQualitiesSchema,
  pullRequestNullableSchema,
} from './common.js';

/**
 * Schema for mark issue false positive tool
 */
export const markIssueFalsePositiveToolSchema = {
  issue_key: z.string().describe('The key of the issue to mark as false positive'),
  comment: z
    .string()
    .optional()
    .describe('Optional comment explaining why this is a false positive'),
};

/**
 * Schema for mark issue won\'t fix tool
 */
export const markIssueWontFixToolSchema = {
  issue_key: z.string().describe("The key of the issue to mark as won't fix"),
  comment: z.string().optional().describe("Optional comment explaining why this won't be fixed"),
};

/**
 * Schema for mark issues false positive (bulk) tool
 */
export const markIssuesFalsePositiveToolSchema = {
  issue_keys: z
    .array(z.string())
    .min(1, 'At least one issue key is required')
    .describe('Array of issue keys to mark as false positive'),
  comment: z
    .string()
    .optional()
    .describe('Optional comment explaining why these are false positives'),
};

/**
 * Schema for mark issues won\'t fix (bulk) tool
 */
export const markIssuesWontFixToolSchema = {
  issue_keys: z
    .array(z.string())
    .min(1, 'At least one issue key is required')
    .describe("Array of issue keys to mark as won't fix"),
  comment: z.string().optional().describe("Optional comment explaining why these won't be fixed"),
};

/**
 * Schema for add comment to issue tool
 */
export const addCommentToIssueToolSchema = {
  issue_key: z
    .string()
    .min(1, 'Issue key is required')
    .describe('The key of the issue to add a comment to'),
  text: z
    .string()
    .min(1, 'Comment text is required')
    .describe('The comment text to add. Supports markdown formatting for rich text content'),
};

/**
 * Schema for assign issue tool
 */
export const assignIssueToolSchema = {
  issueKey: z.string().min(1, 'Issue key is required').describe('The key of the issue to assign'),
  assignee: z
    .string()
    .optional()
    .describe('The username of the assignee. Leave empty to unassign the issue'),
};

/**
 * Schema for confirm issue tool
 */
export const confirmIssueToolSchema = {
  issue_key: z.string().describe('The key of the issue to confirm'),
  comment: z
    .string()
    .optional()
    .describe('Optional comment explaining why this issue is confirmed'),
};

/**
 * Schema for unconfirm issue tool
 */
export const unconfirmIssueToolSchema = {
  issue_key: z.string().describe('The key of the issue to unconfirm'),
  comment: z
    .string()
    .optional()
    .describe('Optional comment explaining why this issue needs further investigation'),
};

/**
 * Schema for resolve issue tool
 */
export const resolveIssueToolSchema = {
  issue_key: z.string().describe('The key of the issue to resolve'),
  comment: z.string().optional().describe('Optional comment explaining how the issue was resolved'),
};

/**
 * Schema for reopen issue tool
 */
export const reopenIssueToolSchema = {
  issue_key: z.string().describe('The key of the issue to reopen'),
  comment: z
    .string()
    .optional()
    .describe('Optional comment explaining why the issue is being reopened'),
};

/**
 * Schema for issues tool
 */
export const issuesToolSchema = {
  // Component filters (backward compatible)
  project_key: z.string().optional().describe('Single project key for backward compatibility'), // Made optional to support projects array
  projects: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional()
    .describe('Filter by project keys'),
  component_keys: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional()
    .describe(
      'Filter by component keys (file paths, directories, or modules). Use this to filter issues by specific files or folders'
    ),
  components: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional()
    .describe('Alias for component_keys - filter by file paths, directories, or modules'),
  on_component_only: z
    .union([z.boolean(), z.string().transform((val) => val === 'true')])
    .nullable()
    .optional()
    .describe('Return only issues on the specified components, not on their sub-components'),
  directories: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional()
    .describe('Filter by directory paths'),
  files: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional()
    .describe('Filter by specific file paths'),
  scopes: z
    .union([z.array(z.enum(['MAIN', 'TEST', 'OVERALL'])), z.string()])
    .transform((val) => {
      const parsed = parseJsonStringArray(val);
      // Validate that all values are valid scopes
      if (parsed && Array.isArray(parsed)) {
        return parsed.filter((v) => ['MAIN', 'TEST', 'OVERALL'].includes(v));
      }
      return parsed;
    })
    .nullable()
    .optional()
    .describe('Filter by issue scopes (MAIN, TEST, OVERALL)'),

  // Branch and PR support
  branch: z.string().nullable().optional(),
  pull_request: pullRequestNullableSchema,

  // Issue filters
  issues: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional(),
  severity: severitySchema, // Deprecated single value
  severities: severitiesSchema, // New array support
  statuses: statusSchema,
  resolutions: resolutionSchema,
  resolved: z
    .union([z.boolean(), z.string().transform((val) => val === 'true')])
    .nullable()
    .optional(),
  types: typeSchema,

  // Clean Code taxonomy (SonarQube 10.x+)
  clean_code_attribute_categories: cleanCodeAttributeCategoriesSchema,
  impact_severities: impactSeveritiesSchema,
  impact_software_qualities: impactSoftwareQualitiesSchema,
  issue_statuses: statusSchema, // New issue status values

  // Rules and tags
  rules: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional()
    .describe('Filter by rule keys'),
  tags: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional()
    .describe(
      'Filter by issue tags. Essential for security audits, regression testing, and categorized analysis'
    ),

  // Date filters
  created_after: z.string().nullable().optional(),
  created_before: z.string().nullable().optional(),
  created_at: z.string().nullable().optional(),
  created_in_last: z.string().nullable().optional(),

  // Assignment
  assigned: z
    .union([z.boolean(), z.string().transform((val) => val === 'true')])
    .nullable()
    .optional()
    .describe('Filter to only assigned (true) or unassigned (false) issues'),
  assignees: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional()
    .describe(
      'Filter by assignee logins. Critical for targeted clean-up sprints and workload analysis'
    ),
  author: z.string().nullable().optional().describe('Filter by single issue author'), // Single author
  authors: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional()
    .describe('Filter by multiple issue authors'), // Multiple authors

  // Security standards
  cwe: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional(),
  owasp_top10: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional(),
  owasp_top10_v2021: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional(), // New 2021 version
  sans_top25: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional(),
  sonarsource_security: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional(),
  sonarsource_security_category: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional(),

  // Languages
  languages: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional(),

  // Facets
  facets: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional()
    .describe(
      'Enable faceted search for aggregations. Critical for dashboards. Available facets: severities, statuses, resolutions, rules, tags, types, authors, assignees, languages, etc.'
    ),
  facet_mode: z
    .enum(['effort', 'count'])
    .nullable()
    .optional()
    .describe(
      'Mode for facet computation: count (number of issues) or effort (remediation effort)'
    ),

  // New code
  since_leak_period: z
    .union([z.boolean(), z.string().transform((val) => val === 'true')])
    .nullable()
    .optional(),
  in_new_code_period: z
    .union([z.boolean(), z.string().transform((val) => val === 'true')])
    .nullable()
    .optional(),

  // Sorting
  s: z.string().nullable().optional(), // Sort field
  asc: z
    .union([z.boolean(), z.string().transform((val) => val === 'true')])
    .nullable()
    .optional(), // Sort direction

  // Response optimization
  additional_fields: z
    .union([z.array(z.string()), z.string()])
    .transform(parseJsonStringArray)
    .nullable()
    .optional(),

  // Pagination
  page: z.string().optional().transform(stringToNumberTransform),
  page_size: z.string().optional().transform(stringToNumberTransform),
};

```

--------------------------------------------------------------------------------
/scripts/test-monitoring-integration.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/bash
# Comprehensive integration test suite for monitoring endpoints
# Tests health checks, metrics, distributed tracing, and circuit breaker functionality

set -e  # Exit on error

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

echo -e "${GREEN}📊 SonarQube MCP Server - Monitoring Integration Tests${NC}"
echo "======================================================"

# Configuration
NAMESPACE="${NAMESPACE:-sonarqube-mcp}"
SERVICE_NAME="${SERVICE_NAME:-sonarqube-mcp}"
PORT="${PORT:-3000}"
BASE_URL="http://localhost:$PORT"

# Test results
TESTS_PASSED=0
TESTS_FAILED=0

# Function to check if a command exists
command_exists() {
    command -v "$1" >/dev/null 2>&1
}

# Function to run a test
run_test() {
    local test_name=$1
    local test_command=$2
    
    echo -ne "  $test_name... "
    
    if eval "$test_command" >/dev/null 2>&1; then
        echo -e "${GREEN}✅ PASSED${NC}"
        ((TESTS_PASSED++))
        return 0
    else
        echo -e "${RED}❌ FAILED${NC}"
        ((TESTS_FAILED++))
        return 1
    fi
}

# Function to check endpoint response
check_endpoint() {
    local endpoint=$1
    local expected_status=${2:-200}
    local description=$3
    
    response=$(curl -s -o /dev/null -w "%{http_code}" "$BASE_URL$endpoint")
    
    if [ "$response" = "$expected_status" ]; then
        return 0
    elif [ "$expected_status" = "200" ] && [ "$response" = "503" ]; then
        # For health endpoints, 503 might be acceptable in test environment
        return 0
    else
        echo "    Expected: $expected_status, Got: $response"
        return 1
    fi
}

# Function to check JSON response
check_json_response() {
    local endpoint=$1
    local json_path=$2
    local expected_value=$3
    
    actual_value=$(curl -s "$BASE_URL$endpoint" | jq -r "$json_path" 2>/dev/null)
    
    if [ "$actual_value" = "$expected_value" ]; then
        return 0
    else
        echo "    Expected: $expected_value, Got: $actual_value"
        return 1
    fi
}

# Function to check metrics format
check_metrics_format() {
    local metrics=$(curl -s "$BASE_URL/metrics")
    
    # Check for standard Prometheus metrics
    if echo "$metrics" | grep -q "^# HELP" && echo "$metrics" | grep -q "^# TYPE"; then
        return 0
    else
        return 1
    fi
}

# Function to test circuit breaker behavior
test_circuit_breaker() {
    echo -e "\n${BLUE}🔌 Testing Circuit Breaker functionality...${NC}"
    
    # Note: In a real test environment, you would trigger failures to test circuit breaker
    # For now, we just check if circuit breaker metrics are exposed
    
    run_test "Circuit breaker metrics exposed" \
        "curl -s $BASE_URL/metrics | grep -q 'circuit_breaker'"
    
    run_test "Circuit breaker state metric" \
        "curl -s $BASE_URL/metrics | grep -q 'sonarqube_circuit_state'"
}

# Function to test health check details
test_health_checks() {
    echo -e "\n${BLUE}🏥 Testing Health Check endpoints...${NC}"
    
    # Test basic health endpoint
    run_test "Health endpoint accessible" \
        "check_endpoint /health"
    
    # Test ready endpoint
    run_test "Ready endpoint accessible" \
        "check_endpoint /ready"
    
    # Test health endpoint returns JSON
    run_test "Health endpoint returns JSON" \
        "curl -s $BASE_URL/health | jq . >/dev/null"
    
    # Test health check structure
    run_test "Health check has status field" \
        "curl -s $BASE_URL/health | jq -e '.status' >/dev/null"
    
    # Test ready check structure
    run_test "Ready check has appropriate response" \
        "curl -s $BASE_URL/ready | grep -E '(ready|ok|degraded)' >/dev/null"
}

# Function to test metrics endpoint
test_metrics() {
    echo -e "\n${BLUE}📈 Testing Metrics endpoint...${NC}"
    
    # Test metrics endpoint accessibility
    run_test "Metrics endpoint accessible" \
        "check_endpoint /metrics"
    
    # Test Prometheus format
    run_test "Metrics in Prometheus format" \
        "check_metrics_format"
    
    # Test for standard metrics
    run_test "Process metrics present" \
        "curl -s $BASE_URL/metrics | grep -q 'process_cpu_seconds_total'"
    
    run_test "Node.js metrics present" \
        "curl -s $BASE_URL/metrics | grep -q 'nodejs_'"
    
    # Test custom metrics
    run_test "HTTP request duration metric" \
        "curl -s $BASE_URL/metrics | grep -q 'http_request_duration_seconds'"
    
    run_test "HTTP requests total metric" \
        "curl -s $BASE_URL/metrics | grep -q 'http_requests_total'"
    
    # Test memory metrics
    run_test "Memory usage metrics" \
        "curl -s $BASE_URL/metrics | grep -q 'nodejs_external_memory_bytes'"
}

# Function to test OpenTelemetry integration
test_opentelemetry() {
    echo -e "\n${BLUE}🔭 Testing OpenTelemetry integration...${NC}"
    
    # Check for tracing headers support
    trace_id=$(uuidgen | tr '[:upper:]' '[:lower:]' | tr -d '-')
    
    run_test "Service accepts trace headers" \
        "curl -s -H 'traceparent: 00-$trace_id-0000000000000001-01' $BASE_URL/health -o /dev/null"
    
    # Check for tracing metrics
    run_test "Tracing metrics exposed" \
        "curl -s $BASE_URL/metrics | grep -E '(trace|span)' >/dev/null || true"
}

# Function to test monitoring middleware
test_monitoring_middleware() {
    echo -e "\n${BLUE}🛡️ Testing Monitoring Middleware...${NC}"
    
    # Make a few requests to generate metrics
    for i in {1..5}; do
        curl -s "$BASE_URL/health" >/dev/null
        curl -s "$BASE_URL/metrics" >/dev/null
    done
    
    # Check if request counts increased
    run_test "Request counter increments" \
        "curl -s $BASE_URL/metrics | grep 'http_requests_total' | grep -v '^#' | awk '{print \$2}' | awk '{s+=\$1} END {exit !(s>0)}'"
    
    # Test different HTTP methods tracking
    curl -X POST "$BASE_URL/health" >/dev/null 2>&1 || true
    run_test "Different HTTP methods tracked" \
        "curl -s $BASE_URL/metrics | grep 'http_requests_total' | grep -E 'method=\"(GET|POST)\"' >/dev/null"
}

# Function to test error tracking
test_error_tracking() {
    echo -e "\n${BLUE}❌ Testing Error Tracking...${NC}"
    
    # Try to access non-existent endpoint
    curl -s "$BASE_URL/non-existent-endpoint" >/dev/null 2>&1 || true
    
    # Check if 404 errors are tracked
    run_test "404 errors tracked in metrics" \
        "curl -s $BASE_URL/metrics | grep 'http_requests_total' | grep 'status=\"404\"' >/dev/null || true"
}

# Function to test performance metrics
test_performance_metrics() {
    echo -e "\n${BLUE}⚡ Testing Performance Metrics...${NC}"
    
    # Check for histogram metrics
    run_test "Request duration histogram" \
        "curl -s $BASE_URL/metrics | grep 'http_request_duration_seconds_bucket' >/dev/null"
    
    run_test "Request duration quantiles" \
        "curl -s $BASE_URL/metrics | grep -E 'http_request_duration_seconds{.*quantile=' >/dev/null || true"
    
    # Check for memory metrics
    run_test "Heap usage metrics" \
        "curl -s $BASE_URL/metrics | grep 'nodejs_heap_size_total_bytes' >/dev/null"
    
    run_test "GC metrics" \
        "curl -s $BASE_URL/metrics | grep 'nodejs_gc_duration_seconds' >/dev/null"
}

# Function to generate load for metrics
generate_test_load() {
    echo -e "\n${BLUE}🔄 Generating test load...${NC}"
    
    endpoints=("/health" "/ready" "/metrics")
    
    for i in {1..20}; do
        endpoint=${endpoints[$((i % ${#endpoints[@]}))]}
        curl -s "$BASE_URL$endpoint" >/dev/null 2>&1 &
    done
    
    wait
    echo "  Generated 20 requests across endpoints"
}

# Main execution
echo -e "\n${YELLOW}📋 Checking prerequisites...${NC}"

# Check if service is accessible
if ! curl -s -o /dev/null "$BASE_URL/health" 2>/dev/null; then
    echo -e "${RED}❌ Service is not accessible at $BASE_URL${NC}"
    echo "Please ensure the service is running and accessible."
    echo ""
    echo "To run locally:"
    echo "  npm run dev"
    echo ""
    echo "To test in Kubernetes:"
    echo "  kubectl port-forward -n $NAMESPACE svc/$SERVICE_NAME $PORT:$PORT"
    exit 1
fi

echo -e "${GREEN}✅ Service is accessible${NC}"

# Run all test suites
echo -e "\n${YELLOW}🚀 Starting integration tests...${NC}"

test_health_checks
test_metrics
test_circuit_breaker
test_opentelemetry
test_monitoring_middleware
generate_test_load
sleep 2  # Wait for metrics to update
test_error_tracking
test_performance_metrics

# Additional integration tests
echo -e "\n${BLUE}🔗 Testing Integration Scenarios...${NC}"

# Test metric labels
run_test "Metrics have proper labels" \
    "curl -s $BASE_URL/metrics | grep 'http_requests_total' | grep -E 'method=|status=|route=' >/dev/null"

# Test metric naming conventions
run_test "Metrics follow naming conventions" \
    "curl -s $BASE_URL/metrics | grep -v '^#' | grep -E '^[a-z_]+(_[a-z]+)*(_total|_bytes|_seconds|_count)?{' >/dev/null || true"

# Test health check during high load
echo -e "\n${BLUE}🏋️ Testing under load...${NC}"
(
    for i in {1..50}; do
        curl -s "$BASE_URL/health" >/dev/null 2>&1 &
    done
    wait
) &
LOAD_PID=$!

sleep 1
run_test "Health check responsive under load" \
    "curl -s --max-time 2 $BASE_URL/health >/dev/null"

wait $LOAD_PID 2>/dev/null

# Summary
echo -e "\n======================================================"
echo -e "${GREEN}📊 Test Summary${NC}"
echo -e "  Total tests: $((TESTS_PASSED + TESTS_FAILED))"
echo -e "  ${GREEN}Passed: $TESTS_PASSED${NC}"
echo -e "  ${RED}Failed: $TESTS_FAILED${NC}"

if [ $TESTS_FAILED -eq 0 ]; then
    echo -e "\n${GREEN}✅ All monitoring integration tests passed!${NC}"
    echo -e "\nThe monitoring stack is properly integrated with:"
    echo "  - Health and readiness checks"
    echo "  - Prometheus metrics exposition"
    echo "  - Request tracking and performance metrics"
    echo "  - Error tracking"
    echo "  - OpenTelemetry support"
    exit 0
else
    echo -e "\n${RED}❌ Some tests failed. Please review the failures above.${NC}"
    exit 1
fi
```
Page 4/8FirstPrevNextLast