This is page 2 of 8. Use http://codebase.md/sapientpants/sonarqube-mcp-server?page={x} to view the full context.
# Directory Structure
```
├── .adr-dir
├── .changeset
│ ├── config.json
│ └── README.md
├── .claude
│ ├── commands
│ │ ├── analyze-and-fix-github-issue.md
│ │ ├── fix-sonarqube-issues.md
│ │ ├── implement-github-issue.md
│ │ ├── release.md
│ │ ├── spec-feature.md
│ │ └── update-dependencies.md
│ ├── hooks
│ │ └── block-git-no-verify.ts
│ └── settings.json
├── .dockerignore
├── .github
│ ├── actionlint.yaml
│ ├── changeset.yml
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ └── feature_request.md
│ ├── pull_request_template.md
│ ├── scripts
│ │ ├── determine-artifact.sh
│ │ └── version-and-release.js
│ ├── workflows
│ │ ├── codeql.yml
│ │ ├── main.yml
│ │ ├── pr.yml
│ │ ├── publish.yml
│ │ ├── reusable-docker.yml
│ │ ├── reusable-security.yml
│ │ └── reusable-validate.yml
│ └── WORKFLOWS.md
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .markdownlint.yaml
├── .markdownlintignore
├── .npmrc
├── .prettierignore
├── .prettierrc
├── .trivyignore
├── .yaml-lint.yml
├── .yamllintignore
├── CHANGELOG.md
├── changes.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── COMPATIBILITY.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│ ├── architecture
│ │ └── decisions
│ │ ├── 0001-record-architecture-decisions.md
│ │ ├── 0002-use-node-js-with-typescript.md
│ │ ├── 0003-adopt-model-context-protocol-for-sonarqube-integration.md
│ │ ├── 0004-use-sonarqube-web-api-client-for-all-sonarqube-interactions.md
│ │ ├── 0005-domain-driven-design-of-sonarqube-modules.md
│ │ ├── 0006-expose-sonarqube-features-as-mcp-tools.md
│ │ ├── 0007-support-multiple-authentication-methods-for-sonarqube.md
│ │ ├── 0008-use-environment-variables-for-configuration.md
│ │ ├── 0009-file-based-logging-to-avoid-stdio-conflicts.md
│ │ ├── 0010-use-stdio-transport-for-mcp-communication.md
│ │ ├── 0011-docker-containerization-for-deployment.md
│ │ ├── 0012-add-elicitation-support-for-interactive-user-input.md
│ │ ├── 0014-current-security-model-and-future-oauth2-considerations.md
│ │ ├── 0015-transport-architecture-refactoring.md
│ │ ├── 0016-http-transport-with-oauth-2-0-metadata-endpoints.md
│ │ ├── 0017-comprehensive-audit-logging-system.md
│ │ ├── 0018-add-comprehensive-monitoring-and-observability.md
│ │ ├── 0019-simplify-to-stdio-only-transport-for-mcp-gateway-deployment.md
│ │ ├── 0020-testing-framework-and-strategy-vitest-with-property-based-testing.md
│ │ ├── 0021-code-quality-toolchain-eslint-prettier-strict-typescript.md
│ │ ├── 0022-package-manager-choice-pnpm.md
│ │ ├── 0023-release-management-with-changesets.md
│ │ ├── 0024-ci-cd-platform-github-actions.md
│ │ ├── 0025-container-and-security-scanning-strategy.md
│ │ ├── 0026-circuit-breaker-pattern-with-opossum.md
│ │ ├── 0027-docker-image-publishing-strategy-ghcr-to-docker-hub.md
│ │ └── 0028-session-based-http-transport-with-server-sent-events.md
│ ├── architecture.md
│ ├── security.md
│ └── troubleshooting.md
├── eslint.config.js
├── examples
│ └── http-client.ts
├── jest.config.js
├── LICENSE
├── LICENSES.md
├── osv-scanner.toml
├── package.json
├── pnpm-lock.yaml
├── README.md
├── scripts
│ ├── actionlint.sh
│ ├── ci-local.sh
│ ├── load-test.sh
│ ├── README.md
│ ├── run-all-tests.sh
│ ├── scan-container.sh
│ ├── security-scan.sh
│ ├── setup.sh
│ ├── test-monitoring-integration.sh
│ └── validate-docs.sh
├── SECURITY.md
├── sonar-project.properties
├── src
│ ├── __tests__
│ │ ├── additional-coverage.test.ts
│ │ ├── advanced-index.test.ts
│ │ ├── assign-issue.test.ts
│ │ ├── auth-methods.test.ts
│ │ ├── boolean-string-transform.test.ts
│ │ ├── components.test.ts
│ │ ├── config
│ │ │ └── service-accounts.test.ts
│ │ ├── dependency-injection.test.ts
│ │ ├── direct-handlers.test.ts
│ │ ├── direct-lambdas.test.ts
│ │ ├── direct-schema-validation.test.ts
│ │ ├── domains
│ │ │ ├── components-domain-full.test.ts
│ │ │ ├── components-domain.test.ts
│ │ │ ├── hotspots-domain.test.ts
│ │ │ └── source-code-domain.test.ts
│ │ ├── environment-validation.test.ts
│ │ ├── error-handler.test.ts
│ │ ├── error-handling.test.ts
│ │ ├── errors.test.ts
│ │ ├── function-tests.test.ts
│ │ ├── handlers
│ │ │ ├── components-handler-integration.test.ts
│ │ │ └── projects-authorization.test.ts
│ │ ├── handlers.test.ts
│ │ ├── handlers.test.ts.skip
│ │ ├── index.test.ts
│ │ ├── issue-resolution-elicitation.test.ts
│ │ ├── issue-resolution.test.ts
│ │ ├── issue-transitions.test.ts
│ │ ├── issues-enhanced-search.test.ts
│ │ ├── issues-new-parameters.test.ts
│ │ ├── json-array-transform.test.ts
│ │ ├── lambda-functions.test.ts
│ │ ├── lambda-handlers.test.ts.skip
│ │ ├── logger.test.ts
│ │ ├── mapping-functions.test.ts
│ │ ├── mocked-environment.test.ts
│ │ ├── null-to-undefined.test.ts
│ │ ├── parameter-transformations-advanced.test.ts
│ │ ├── parameter-transformations.test.ts
│ │ ├── protocol-version.test.ts
│ │ ├── pull-request-transform.test.ts
│ │ ├── quality-gates.test.ts
│ │ ├── schema-parameter-transforms.test.ts
│ │ ├── schema-transformation-mocks.test.ts
│ │ ├── schema-transforms.test.ts
│ │ ├── schema-validators.test.ts
│ │ ├── schemas
│ │ │ ├── components-schema.test.ts
│ │ │ ├── hotspots-tools-schema.test.ts
│ │ │ └── issues-schema.test.ts
│ │ ├── sonarqube-elicitation.test.ts
│ │ ├── sonarqube.test.ts
│ │ ├── source-code.test.ts
│ │ ├── standalone-handlers.test.ts
│ │ ├── string-to-number-transform.test.ts
│ │ ├── tool-handler-lambdas.test.ts
│ │ ├── tool-handlers.test.ts
│ │ ├── tool-registration-schema.test.ts
│ │ ├── tool-registration-transforms.test.ts
│ │ ├── transformation-util.test.ts
│ │ ├── transports
│ │ │ ├── base.test.ts
│ │ │ ├── factory.test.ts
│ │ │ ├── http.test.ts
│ │ │ ├── session-manager.test.ts
│ │ │ └── stdio.test.ts
│ │ ├── utils
│ │ │ ├── retry.test.ts
│ │ │ └── transforms.test.ts
│ │ ├── zod-boolean-transform.test.ts
│ │ ├── zod-schema-transforms.test.ts
│ │ └── zod-transforms.test.ts
│ ├── config
│ │ ├── service-accounts.ts
│ │ └── versions.ts
│ ├── domains
│ │ ├── base.ts
│ │ ├── components.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ ├── errors.ts
│ ├── handlers
│ │ ├── components.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ ├── index.ts
│ ├── monitoring
│ │ ├── __tests__
│ │ │ └── circuit-breaker.test.ts
│ │ ├── circuit-breaker.ts
│ │ ├── health.ts
│ │ └── metrics.ts
│ ├── schemas
│ │ ├── common.ts
│ │ ├── components.ts
│ │ ├── hotspots-tools.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ ├── sonarqube.ts
│ ├── transports
│ │ ├── base.ts
│ │ ├── factory.ts
│ │ ├── http.ts
│ │ ├── index.ts
│ │ ├── session-manager.ts
│ │ └── stdio.ts
│ ├── types
│ │ ├── common.ts
│ │ ├── components.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ └── utils
│ ├── __tests__
│ │ ├── elicitation.test.ts
│ │ ├── pattern-matcher.test.ts
│ │ └── structured-response.test.ts
│ ├── client-factory.ts
│ ├── elicitation.ts
│ ├── error-handler.ts
│ ├── logger.ts
│ ├── parameter-mappers.ts
│ ├── pattern-matcher.ts
│ ├── retry.ts
│ ├── structured-response.ts
│ └── transforms.ts
├── test-http-transport.sh
├── tmp
│ └── .gitkeep
├── tsconfig.build.json
├── tsconfig.json
├── vitest.config.d.ts
├── vitest.config.js
├── vitest.config.js.map
└── vitest.config.ts
```
# Files
--------------------------------------------------------------------------------
/src/__tests__/domains/components-domain.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { ComponentsDomain } from '../../domains/components.js';
import { SonarQubeClient as WebApiClient } from 'sonarqube-web-api-client';
import { resetDefaultClient } from '../../utils/client-factory.js';
// Mock environment variables
process.env.SONARQUBE_TOKEN = 'test-token';
process.env.SONARQUBE_URL = 'http://localhost:9000';
process.env.SONARQUBE_ORGANIZATION = 'test-org';
describe('ComponentsDomain', () => {
let domain: ComponentsDomain;
const baseUrl = 'http://localhost:9000';
const token = 'test-token';
const organization = 'test-org';
beforeEach(() => {
resetDefaultClient();
const webApiClient = WebApiClient.withToken(baseUrl, token, { organization });
domain = new ComponentsDomain(webApiClient, organization);
});
afterEach(() => {
resetDefaultClient();
});
it('should be instantiated correctly', () => {
expect(domain).toBeDefined();
expect(domain).toBeInstanceOf(ComponentsDomain);
});
it('should have searchComponents method', () => {
expect(domain.searchComponents).toBeDefined();
expect(typeof domain.searchComponents).toBe('function');
});
it('should have getComponentTree method', () => {
expect(domain.getComponentTree).toBeDefined();
expect(typeof domain.getComponentTree).toBe('function');
});
it('should have showComponent method', () => {
expect(domain.showComponent).toBeDefined();
expect(typeof domain.showComponent).toBe('function');
});
describe('searchComponents', () => {
it('should accept search parameters', () => {
// Just verify the method exists and is callable
const method = domain.searchComponents;
expect(method).toBeDefined();
expect(typeof method).toBe('function');
// Don't actually call it as it would make HTTP requests
expect(method.length).toBeLessThanOrEqual(1); // Expects 0 or 1 parameter
});
});
describe('getComponentTree', () => {
it('should accept tree parameters', () => {
// Just verify the method exists and is callable
const method = domain.getComponentTree;
expect(method).toBeDefined();
expect(typeof method).toBe('function');
// Don't actually call it as it would make HTTP requests
expect(method.length).toBeLessThanOrEqual(1); // Expects 1 parameter
});
});
describe('showComponent', () => {
it('should accept component key and optional parameters', () => {
// Just verify the method exists and is callable
const method = domain.showComponent;
expect(method).toBeDefined();
expect(typeof method).toBe('function');
// Don't actually call it as it would make HTTP requests
expect(method.length).toBeLessThanOrEqual(3); // Expects up to 3 parameters
});
});
});
```
--------------------------------------------------------------------------------
/src/__tests__/zod-boolean-transform.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import { z } from 'zod';
describe('Zod Boolean Transform Coverage', () => {
// This explicitly tests the transform used in index.ts for boolean parameters
// We're covering lines 705-731 in index.ts
describe('resolved parameter transform', () => {
// Recreate the exact schema used in index.ts
const resolvedSchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
it('should handle boolean true value', () => {
expect(resolvedSchema.parse(true)).toBe(true);
});
it('should handle boolean false value', () => {
expect(resolvedSchema.parse(false)).toBe(false);
});
it('should transform string "true" to boolean true', () => {
expect(resolvedSchema.parse('true')).toBe(true);
});
it('should transform string "false" to boolean false', () => {
expect(resolvedSchema.parse('false')).toBe(false);
});
it('should pass null and undefined through', () => {
expect(resolvedSchema.parse(null)).toBeNull();
expect(resolvedSchema.parse(undefined)).toBeUndefined();
});
});
describe('on_component_only parameter transform', () => {
// Recreate the exact schema used in index.ts
const onComponentOnlySchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
it('should transform valid values correctly', () => {
expect(onComponentOnlySchema.parse(true)).toBe(true);
expect(onComponentOnlySchema.parse('true')).toBe(true);
expect(onComponentOnlySchema.parse(false)).toBe(false);
expect(onComponentOnlySchema.parse('false')).toBe(false);
});
});
describe('since_leak_period parameter transform', () => {
// Recreate the exact schema used in index.ts
const sinceLeakPeriodSchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
it('should transform valid values correctly', () => {
expect(sinceLeakPeriodSchema.parse(true)).toBe(true);
expect(sinceLeakPeriodSchema.parse('true')).toBe(true);
expect(sinceLeakPeriodSchema.parse(false)).toBe(false);
expect(sinceLeakPeriodSchema.parse('false')).toBe(false);
});
});
describe('in_new_code_period parameter transform', () => {
// Recreate the exact schema used in index.ts
const inNewCodePeriodSchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
it('should transform valid values correctly', () => {
expect(inNewCodePeriodSchema.parse(true)).toBe(true);
expect(inNewCodePeriodSchema.parse('true')).toBe(true);
expect(inNewCodePeriodSchema.parse(false)).toBe(false);
expect(inNewCodePeriodSchema.parse('false')).toBe(false);
});
});
});
```
--------------------------------------------------------------------------------
/.github/workflows/reusable-security.yml:
--------------------------------------------------------------------------------
```yaml
# =============================================================================
# REUSABLE WORKFLOW: Security Scanning Suite
# PURPOSE: Run security scans (audit, OSV) and generate SBOM
# USAGE: Called by PR and main workflows for security validation
# OUTPUTS: Security findings uploaded to GitHub Security tab, SBOM artifact
# NOTE: CodeQL has its own dedicated workflow (codeql.yml) for better integration
# =============================================================================
name: Reusable Security
on:
workflow_call:
inputs:
node-version:
description: 'Node.js version (should match package.json engines.node)'
type: string
default: '22' # UPDATE: When upgrading Node.js
pnpm-version:
description: 'pnpm version (should match package.json packageManager)'
type: string
default: '10.17.0' # UPDATE: When upgrading pnpm
run-osv-scan:
description: 'Run OSV scanner for dependency vulnerabilities'
type: boolean
default: true
# SECURITY: Required permissions for security scanning
permissions:
actions: read # Read workflow metadata
contents: read # Read source code
security-events: write # Upload security findings
# EXAMPLE USAGE:
# jobs:
# security:
# uses: ./.github/workflows/reusable-security.yml
# with:
# run-osv-scan: true
jobs:
audit:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Full history for accurate analysis
# =============================================================================
# ENVIRONMENT SETUP
# Required for SBOM generation and dependency analysis
# =============================================================================
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: ${{ inputs.pnpm-version }}
run_install: false
standalone: true
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ inputs.node-version }}
cache: pnpm # Cache dependencies for speed
- name: Install dependencies
# Dependencies needed for accurate SBOM generation
run: pnpm install --frozen-lockfile
- name: Security audit
# Check for known vulnerabilities in dependencies
# FAILS IF: Critical vulnerabilities found
# To fix: Run 'pnpm update' or add overrides in package.json
run: pnpm audit --audit-level critical
osv-scan:
if: inputs.run-osv-scan
uses: google/osv-scanner-action/.github/workflows/[email protected]
with:
# Scan entire project including all manifests (package.json, pnpm-lock.yaml)
scan-args: |-
./
permissions:
security-events: write # Required to upload findings to Security tab
actions: read
contents: read
```
--------------------------------------------------------------------------------
/COMPATIBILITY.md:
--------------------------------------------------------------------------------
```markdown
# MCP Protocol Compatibility
This document outlines the Model Context Protocol (MCP) version compatibility for the SonarQube MCP Server.
## Protocol Version Support
The SonarQube MCP Server supports the following MCP protocol versions:
| Protocol Version | Status | SDK Version Required |
| ---------------- | ---------------------- | -------------------- |
| 2025-06-18 | ✅ Supported | 1.13.0+ |
| 2025-03-26 | ✅ Supported (Default) | 1.13.0+ |
| 2024-11-05 | ✅ Supported | 1.13.0+ |
| 2024-10-07 | ✅ Supported | 1.13.0+ |
### Version Negotiation
- The server uses `@modelcontextprotocol/sdk` version `1.13.0` or higher
- Protocol version is automatically negotiated during the client-server handshake
- The server will use the highest protocol version supported by both client and server
- If no common version is found, the connection will fail with a version mismatch error
### Current SDK Version
The project currently uses `@modelcontextprotocol/sdk` version `1.13.0`, which supports all protocol versions listed above.
## Feature Compatibility
### Protocol Version 2025-06-18
- Latest protocol version
- Full support for all server capabilities
- Enhanced error handling
### Protocol Version 2025-03-26
- Default negotiated version for most clients
- Full support for elicitation capabilities (required for our implementation)
- All standard MCP features
### Protocol Versions 2024-11-05 and 2024-10-07
- Basic MCP functionality
- May not support all advanced features
- Provided for backward compatibility
## Client Compatibility
This server is compatible with any MCP client that supports at least one of the protocol versions listed above. Common clients include:
- Claude Desktop App
- Continue.dev
- Other MCP-compliant clients
## SDK Update Process
When updating the MCP SDK:
1. Check the [MCP SDK releases](https://github.com/modelcontextprotocol/sdk/releases) for new versions
2. Review the changelog for breaking changes
3. Update the dependency in `package.json`
4. Run `pnpm install` to update the lock file
5. Test all server capabilities with multiple protocol versions
6. Update this compatibility document if new protocol versions are supported
7. Run the full test suite: `pnpm run precommit`
## Monitoring Protocol Usage
To see which protocol version is being used during runtime, run the server with debug logging enabled:
```bash
DEBUG=* pnpm start
```
The server will log the negotiated protocol version during client connection.
## Deprecated Features
As of MCP protocol version 2025-03-26:
- JSON-RPC batch support has been removed
- Our server uses the SDK's built-in transport layer, which handles this automatically
## References
- [MCP Specification](https://modelcontextprotocol.io)
- [MCP SDK Documentation](https://github.com/modelcontextprotocol/sdk)
- [Protocol Version History](https://modelcontextprotocol.io/docs/changelog)
```
--------------------------------------------------------------------------------
/src/transports/factory.ts:
--------------------------------------------------------------------------------
```typescript
import { ITransport, ITransportConfig, IHttpTransportConfig } from './base.js';
import { StdioTransport } from './stdio.js';
import { HttpTransport } from './http.js';
import { createLogger } from '../utils/logger.js';
const logger = createLogger('transport-factory');
/**
* Factory for creating transport instances.
* This factory creates transport instances for MCP communication based on configuration.
*/
export class TransportFactory {
/**
* Create a transport instance based on the provided configuration.
*
* @param config Transport configuration
* @returns A transport instance
* @throws Error if the transport type is not supported
*/
static create(config: ITransportConfig): ITransport {
logger.debug(`Creating transport of type: ${config.type}`);
switch (config.type) {
case 'stdio':
return new StdioTransport();
case 'http': {
const httpConfig = config as IHttpTransportConfig;
return new HttpTransport(httpConfig.options);
}
default:
throw new Error(`Unsupported transport type: ${config.type as string}`);
}
}
/**
* Create a transport based on environment variables.
* Supports both STDIO and HTTP transports based on MCP_TRANSPORT_TYPE.
*
* @returns A transport instance
*/
static createFromEnvironment(): ITransport {
const transportType = process.env.MCP_TRANSPORT_TYPE?.toLowerCase() || 'stdio';
logger.info(`Creating transport from environment: ${transportType}`);
if (transportType === 'http') {
// Parse HTTP configuration from environment
const options: IHttpTransportConfig['options'] = {};
if (process.env.MCP_HTTP_PORT) {
options.port = Number.parseInt(process.env.MCP_HTTP_PORT, 10);
}
if (process.env.MCP_HTTP_ALLOWED_HOSTS) {
options.allowedHosts = process.env.MCP_HTTP_ALLOWED_HOSTS.split(',').map((h) => h.trim());
}
if (process.env.MCP_HTTP_ALLOWED_ORIGINS) {
options.allowedOrigins = process.env.MCP_HTTP_ALLOWED_ORIGINS.split(',').map((o) =>
o.trim()
);
}
if (process.env.MCP_HTTP_SESSION_TIMEOUT) {
options.sessionTimeout = Number.parseInt(process.env.MCP_HTTP_SESSION_TIMEOUT, 10);
}
if (process.env.MCP_HTTP_ENABLE_DNS_REBINDING_PROTECTION === 'true') {
options.enableDnsRebindingProtection = true;
}
const config: IHttpTransportConfig = {
type: 'http',
options,
};
// Log configuration (without sensitive data)
logger.debug('HTTP transport configuration:', {
port: config.options?.port,
allowedHosts: config.options?.allowedHosts,
allowedOrigins: config.options?.allowedOrigins,
sessionTimeout: config.options?.sessionTimeout,
enableDnsRebindingProtection: config.options?.enableDnsRebindingProtection,
});
return TransportFactory.create(config);
}
// Default to STDIO transport
return new StdioTransport();
}
}
```
--------------------------------------------------------------------------------
/src/utils/transforms.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Helper function to convert null to undefined
* @param value Any value that might be null
* @returns The original value or undefined if null
*/
export function nullToUndefined<T>(value: T | null | undefined): T | undefined {
return value === null ? undefined : value;
}
/**
* Helper function to transform string to number or null
* @param val String value to transform
* @returns Number or null if conversion fails
*/
export function stringToNumberTransform(val: string | null | undefined): number | null | undefined {
if (val === null || val === undefined) {
return val;
}
const parsed = Number.parseInt(val, 10);
return Number.isNaN(parsed) ? null : parsed;
}
/**
* Ensures a value is an array
* @param value Single value, array, or undefined
* @returns Array containing the value(s), or empty array if undefined
*/
export function ensureArray<T>(value: T | T[] | undefined): T[] {
if (value === undefined) return [];
return Array.isArray(value) ? value : [value];
}
/**
* Ensures a string value is an array of strings
* Handles comma-separated strings for backward compatibility
* @param value Single string, array of strings, or undefined
* @returns Array of strings, or empty array if undefined
*/
export function ensureStringArray(value: string | string[] | undefined): string[] {
if (value === undefined) return [];
if (Array.isArray(value)) return value;
// Check if the string contains commas and split if so
if (value.includes(',')) return value.split(',').map((s) => s.trim());
return [value];
}
/**
* Converts a number or string to a string
* Useful for parameters that can be passed as either type but need to be strings for the API
* @param value Number, string, null, or undefined
* @returns String representation of the value, or the original null/undefined
*/
export function numberOrStringToString(
value: number | string | null | undefined
): string | null | undefined {
if (value === null || value === undefined) {
return value;
}
return String(value);
}
/**
* Parses a JSON string array or returns the array as-is
* Useful for MCP parameters that might be sent as JSON strings
* @param value Array, JSON string array, null, or undefined
* @returns Array of strings, or null/undefined
*/
export function parseJsonStringArray(
value: string[] | string | null | undefined
): string[] | null | undefined {
if (value === null || value === undefined) {
return value;
}
// If it's already an array, return it
if (Array.isArray(value)) {
return value;
}
// If it's a string, try to parse it as JSON
if (typeof value === 'string') {
try {
const parsed: unknown = JSON.parse(value);
if (Array.isArray(parsed)) {
return parsed.map(String);
}
// If parsed but not an array, wrap it
return [String(parsed)];
} catch {
// If not valid JSON, treat as single value array
return [value];
}
}
// Shouldn't reach here, but handle edge cases
return [String(value)];
}
```
--------------------------------------------------------------------------------
/docs/architecture/decisions/0014-current-security-model-and-future-oauth2-considerations.md:
--------------------------------------------------------------------------------
```markdown
# 14. Current Security Model and Future OAuth2 Considerations
Date: 2025-06-19
## Status
Accepted
## Context
The SonarQube MCP Server currently uses environment variables for authentication (token, basic auth, or admin passcode), which is appropriate for its design as a single-user local tool. However, the MCP specification positions MCP servers as OAuth 2.1 resource servers, which has implications for future development, especially if HTTP transport is added.
The MCP specification states that:
- MCP servers should act as OAuth 2.1 resource servers
- HTTP-based connections require OAuth token validation
- RFC8707 resource indicators should be used for token scoping
- Multi-client scenarios need proper authorization mechanisms
Our current implementation:
- Uses stdio transport (not HTTP)
- Designed for single-user local usage
- Manages authentication via environment variables
- Directly uses SonarQube's authentication mechanisms
## Decision
We will maintain the current environment variable-based authentication approach for local single-user scenarios while documenting the limitations and future OAuth2 considerations.
Specifically:
1. Continue using environment variables for authentication configuration
2. Document the current security model clearly in README.md
3. Document authentication best practices in SECURITY.md
4. Acknowledge OAuth2 requirements for potential future HTTP transport
5. Design the codebase to allow future OAuth2 implementation without breaking changes
The authentication priority remains:
1. Token authentication (most secure, recommended)
2. Basic authentication (username/password)
3. System passcode (for admin scenarios)
## Consequences
### Positive Consequences
- **Simple Setup**: Users can quickly configure authentication without complex OAuth flows
- **Appropriate Security**: The security model matches the single-user local tool use case
- **Direct Integration**: Leverages SonarQube's existing authentication mechanisms
- **Backward Compatible**: Future OAuth2 support can be added without breaking existing usage
### Negative Consequences
- **Limited to Local Use**: Not suitable for multi-user or hosted scenarios without modifications
- **No Token Validation**: The MCP server trusts the provided credentials without additional validation
- **Future Migration**: Adding OAuth2 support will require significant changes if HTTP transport is implemented
### Security Considerations
- Credentials are stored in the MCP client's configuration file (e.g., Claude Desktop config)
- Users must ensure proper file permissions on configuration files
- Token-based authentication is strongly recommended over passwords
- Tokens should be scoped with minimal required permissions
### Future Work
If HTTP transport is added:
1. Implement OAuth 2.1 resource server capabilities
2. Add token validation middleware
3. Support RFC8707 resource indicators
4. Implement proper multi-client authorization
5. Maintain backward compatibility with environment variable auth for local usage
```
--------------------------------------------------------------------------------
/scripts/setup.sh:
--------------------------------------------------------------------------------
```bash
#!/usr/bin/env bash
set -euo pipefail
# =============================================================================
# Project Setup Script
# Purpose: One-command setup for new developers
# Usage: ./scripts/setup.sh [project-name]
# =============================================================================
PROJECT_NAME="${1:-}"
CURRENT_DIR="$(pwd)"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log() {
echo -e "${GREEN}[SETUP]${NC} $1"
}
warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
error() {
echo -e "${RED}[ERROR]${NC} $1"
exit 1
}
info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
# Check if we're in the right directory
if [[ ! -f "$PROJECT_ROOT/package.json" ]]; then
error "Run this script from the project root directory"
fi
log "Starting project setup..."
# 1. Check prerequisites
log "Checking prerequisites..."
if ! command -v pnpm &> /dev/null; then
if command -v mise &> /dev/null; then
log "Installing pnpm via mise..."
mise install [email protected]
else
error "pnpm not found. Please install pnpm 10.17.0 or install mise first."
fi
fi
if ! command -v node &> /dev/null; then
if command -v mise &> /dev/null; then
log "Installing Node.js via mise..."
mise install node@22
else
error "Node.js not found. Please install Node.js 22+ or install mise first."
fi
fi
# 2. Install dependencies
log "Installing dependencies..."
pnpm install
# 3. Rename project if name provided
if [[ -n "$PROJECT_NAME" ]]; then
log "Renaming project to '$PROJECT_NAME'..."
# Update package.json
pnpm pkg set name="$PROJECT_NAME"
# Update README.md title
sed -i.bak "s/# Agentic Node + TypeScript Starter/# $PROJECT_NAME/" README.md && rm README.md.bak
info "Project renamed to '$PROJECT_NAME'. Update other references manually as needed."
fi
# 4. Initialize git hooks
log "Setting up git hooks..."
pnpm prepare
# 5. Run initial verification
log "Running initial verification..."
if pnpm verify; then
log "✅ All checks passed!"
else
warn "Some checks failed. Fix issues before committing."
fi
# 6. Create initial build
log "Creating initial build..."
pnpm build
# 7. Show next steps
echo
log "🎉 Setup complete!"
echo
info "Next steps:"
echo "1. Open the project in your IDE (VS Code recommended)"
echo "2. Review and customize package.json, README.md, and other files"
echo "3. Start developing with: pnpm test:watch"
echo "4. Create your first feature with: pnpm changeset"
echo
info "Available commands:"
echo " pnpm dev # Start development mode"
echo " pnpm test:watch # Run tests in watch mode"
echo " pnpm verify # Run all quality checks"
echo " pnpm changeset # Create a changeset for your changes"
echo
info "Documentation:"
echo " docs/GETTING_STARTED.md - Full setup guide"
echo " docs/PROCESS.md - Development workflow"
echo " CLAUDE.md - AI development tips"
```
--------------------------------------------------------------------------------
/src/__tests__/error-handler.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, vi } from 'vitest';
import { withMCPErrorHandling } from '../utils/error-handler.js';
import { SonarQubeAPIError, SonarQubeErrorType } from '../errors.js';
// Mock the logger
vi.mock('../utils/logger.js', () => ({
createLogger: () => ({
error: vi.fn(),
}),
}));
describe('Error Handler Utilities', () => {
describe('withMCPErrorHandling', () => {
it('should return result on success', async () => {
const fn = vi
.fn<(arg1: string, arg2: string) => Promise<{ content: string }>>()
.mockResolvedValue({ content: 'success' });
const wrapped = withMCPErrorHandling(fn);
const result = await wrapped('arg1', 'arg2');
expect(result).toEqual({ content: 'success' });
expect(fn).toHaveBeenCalledWith('arg1', 'arg2');
});
it('should convert SonarQubeAPIError to MCP format', async () => {
const apiError = new SonarQubeAPIError(
'Test error',
SonarQubeErrorType.AUTHENTICATION_FAILED,
{
operation: 'test-op',
solution: 'Test solution',
}
);
const fn = vi.fn<() => Promise<any>>().mockRejectedValue(apiError);
const wrapped = withMCPErrorHandling(fn);
await expect(wrapped()).rejects.toMatchObject({
code: -32001,
message: expect.stringContaining('Test error'),
});
});
it('should re-throw non-SonarQubeAPIError', async () => {
const error = new Error('Generic error');
const fn = vi.fn<() => Promise<any>>().mockRejectedValue(error);
const wrapped = withMCPErrorHandling(fn);
await expect(wrapped()).rejects.toThrow(error);
});
it('should preserve function signature', async () => {
const fn = vi.fn((a: string, b: number) => Promise.resolve({ result: a + b }));
const wrapped = withMCPErrorHandling(fn);
const result = await wrapped('test', 123);
expect(result).toEqual({ result: 'test123' });
expect(fn).toHaveBeenCalledWith('test', 123);
});
it('should handle all error types correctly', async () => {
const errorTypes = [
{ type: SonarQubeErrorType.AUTHENTICATION_FAILED, code: -32001 },
{ type: SonarQubeErrorType.AUTHORIZATION_FAILED, code: -32002 },
{ type: SonarQubeErrorType.RESOURCE_NOT_FOUND, code: -32003 },
{ type: SonarQubeErrorType.RATE_LIMITED, code: -32004 },
{ type: SonarQubeErrorType.NETWORK_ERROR, code: -32005 },
{ type: SonarQubeErrorType.CONFIGURATION_ERROR, code: -32006 },
{ type: SonarQubeErrorType.VALIDATION_ERROR, code: -32007 },
{ type: SonarQubeErrorType.SERVER_ERROR, code: -32008 },
{ type: SonarQubeErrorType.UNKNOWN_ERROR, code: -32000 },
];
for (const { type, code } of errorTypes) {
const error = new SonarQubeAPIError(`${type} error`, type);
const fn = vi.fn<() => Promise<any>>().mockRejectedValue(error);
const wrapped = withMCPErrorHandling(fn);
await expect(wrapped()).rejects.toMatchObject({
code,
message: expect.stringContaining(`${type} error`),
});
}
});
});
});
```
--------------------------------------------------------------------------------
/scripts/actionlint.sh:
--------------------------------------------------------------------------------
```bash
#!/usr/bin/env bash
# Wrapper script for actionlint - GitHub Actions workflow linter
# Checks if actionlint is installed and provides installation instructions if not
set -uo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Check if actionlint is installed
if ! command -v actionlint &> /dev/null; then
# In CI environments during release, skip actionlint since workflows were already validated in PR
if [ "${CI:-false}" = "true" ] || [ "${GITHUB_ACTIONS:-false}" = "true" ]; then
echo -e "${YELLOW}Skipping actionlint in CI (already validated in PR workflow)${NC}"
exit 0
fi
# In local development, provide installation instructions
echo -e "${RED}Error: actionlint is not installed${NC}"
echo ""
echo -e "${YELLOW}Please install actionlint using one of the following methods:${NC}"
echo ""
echo -e "${BLUE}macOS/Linux (Homebrew):${NC}"
echo " brew install actionlint"
echo ""
echo -e "${BLUE}Go:${NC}"
echo " go install github.com/rhysd/actionlint/cmd/actionlint@latest"
echo ""
echo -e "${BLUE}Download binary:${NC}"
echo " https://github.com/rhysd/actionlint/releases"
echo ""
echo "After installation, run this script again."
exit 1
fi
# Run actionlint with provided arguments or default to checking .github/workflows/
if [ $# -eq 0 ]; then
echo -e "${GREEN}Running actionlint on .github/workflows/...${NC}"
# Run actionlint and capture output
OUTPUT=$(actionlint 2>&1)
EXIT_CODE=$?
if [ -n "$OUTPUT" ]; then
# Always show the output for transparency
echo "$OUTPUT"
# Store grep results in variables to avoid redundant processing
HAS_SHELLCHECK_INFO=0
HAS_SHELLCHECK_ERRORS=0
if echo "$OUTPUT" | grep -q "SC[0-9]*:info:"; then
HAS_SHELLCHECK_INFO=1
fi
if echo "$OUTPUT" | grep -qE "SC[0-9]*:(error|warning):"; then
HAS_SHELLCHECK_ERRORS=1
fi
# Check if output only contains shellcheck info warnings
if [ "$HAS_SHELLCHECK_INFO" -eq 1 ]; then
# Has shellcheck info warnings - check if there are also errors
if [ "$HAS_SHELLCHECK_ERRORS" -eq 1 ]; then
# There are actual errors or warnings beyond info level
echo ""
echo -e "${RED}✗ Workflow validation failed - errors or warnings found${NC}"
exit 1
else
# Only shellcheck info warnings
echo ""
echo -e "${YELLOW}ℹ Info: Minor shellcheck suggestions found (not blocking)${NC}"
echo -e "${GREEN}✓ All workflow files are valid${NC}"
exit 0
fi
else
# Output exists but no shellcheck warnings - must be errors
echo ""
echo -e "${RED}✗ Workflow validation failed${NC}"
exit 1
fi
else
# No output means success
echo -e "${GREEN}✓ All workflow files are valid - no issues found${NC}"
exit 0
fi
else
actionlint "$@"
fi
```
--------------------------------------------------------------------------------
/src/__tests__/schema-validators.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import { z } from 'zod';
describe('Schema Validators and Transformers', () => {
it('should transform page string to number or null', () => {
const pageSchema = z
.string()
.optional()
.transform((val: any) => (val ? parseInt(val, 10) || null : null));
expect(pageSchema.parse('10')).toBe(10);
expect(pageSchema.parse('invalid')).toBe(null);
expect(pageSchema.parse('')).toBe(null);
expect(pageSchema.parse(undefined)).toBe(null);
});
it('should transform string to boolean', () => {
const booleanSchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
expect(booleanSchema.parse('true')).toBe(true);
expect(booleanSchema.parse('false')).toBe(false);
expect(booleanSchema.parse(true)).toBe(true);
expect(booleanSchema.parse(false)).toBe(false);
expect(booleanSchema.parse(null)).toBe(null);
expect(booleanSchema.parse(undefined)).toBe(undefined);
});
it('should validate severity enum', () => {
const severitySchema = z
.enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
.nullable()
.optional();
expect(severitySchema.parse('INFO')).toBe('INFO');
expect(severitySchema.parse('MINOR')).toBe('MINOR');
expect(severitySchema.parse('MAJOR')).toBe('MAJOR');
expect(severitySchema.parse('CRITICAL')).toBe('CRITICAL');
expect(severitySchema.parse('BLOCKER')).toBe('BLOCKER');
expect(severitySchema.parse(null)).toBe(null);
expect(severitySchema.parse(undefined)).toBe(undefined);
expect(() => severitySchema.parse('INVALID')).toThrow();
});
it('should validate status array enum', () => {
const statusSchema = z
.array(
z.enum([
'OPEN',
'CONFIRMED',
'REOPENED',
'RESOLVED',
'CLOSED',
'TO_REVIEW',
'IN_REVIEW',
'REVIEWED',
])
)
.nullable()
.optional();
expect(statusSchema.parse(['OPEN', 'CONFIRMED'])).toEqual(['OPEN', 'CONFIRMED']);
expect(statusSchema.parse(null)).toBe(null);
expect(statusSchema.parse(undefined)).toBe(undefined);
expect(() => statusSchema.parse(['INVALID'])).toThrow();
});
it('should validate resolution array enum', () => {
const resolutionSchema = z
.array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
.nullable()
.optional();
expect(resolutionSchema.parse(['FALSE-POSITIVE', 'WONTFIX'])).toEqual([
'FALSE-POSITIVE',
'WONTFIX',
]);
expect(resolutionSchema.parse(null)).toBe(null);
expect(resolutionSchema.parse(undefined)).toBe(undefined);
expect(() => resolutionSchema.parse(['INVALID'])).toThrow();
});
it('should validate type array enum', () => {
const typeSchema = z
.array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
.nullable()
.optional();
expect(typeSchema.parse(['CODE_SMELL', 'BUG'])).toEqual(['CODE_SMELL', 'BUG']);
expect(typeSchema.parse(null)).toBe(null);
expect(typeSchema.parse(undefined)).toBe(undefined);
expect(() => typeSchema.parse(['INVALID'])).toThrow();
});
});
```
--------------------------------------------------------------------------------
/src/__tests__/boolean-string-transform.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import { z } from 'zod';
describe('Boolean string transform', () => {
// Test the boolean transform that's used in the tool registrations
const booleanStringTransform = (val: string) => val === 'true';
// Create a schema that matches the one in index.ts
const booleanSchema = z
.union([z.boolean(), z.string().transform(booleanStringTransform)])
.nullable()
.optional();
describe('direct transform function', () => {
it('should transform "true" to true', () => {
expect(booleanStringTransform('true')).toBe(true);
});
it('should transform anything else to false', () => {
expect(booleanStringTransform('false')).toBe(false);
expect(booleanStringTransform('True')).toBe(false);
expect(booleanStringTransform('1')).toBe(false);
expect(booleanStringTransform('')).toBe(false);
});
});
describe('zod schema with boolean transform', () => {
it('should accept and pass through boolean values', () => {
expect(booleanSchema.parse(true)).toBe(true);
expect(booleanSchema.parse(false)).toBe(false);
});
it('should transform string "true" to boolean true', () => {
expect(booleanSchema.parse('true')).toBe(true);
});
it('should transform other string values to boolean false', () => {
expect(booleanSchema.parse('false')).toBe(false);
expect(booleanSchema.parse('1')).toBe(false);
expect(booleanSchema.parse('')).toBe(false);
});
it('should pass through null and undefined', () => {
expect(booleanSchema.parse(null)).toBeNull();
expect(booleanSchema.parse(undefined)).toBeUndefined();
});
});
// Test multiple boolean schema transformations in the same schema
describe('multiple boolean transforms in schema', () => {
// Create a schema with multiple boolean transforms
const complexSchema = z.object({
resolved: z
.union([z.boolean(), z.string().transform(booleanStringTransform)])
.nullable()
.optional(),
on_component_only: z
.union([z.boolean(), z.string().transform(booleanStringTransform)])
.nullable()
.optional(),
since_leak_period: z
.union([z.boolean(), z.string().transform(booleanStringTransform)])
.nullable()
.optional(),
in_new_code_period: z
.union([z.boolean(), z.string().transform(booleanStringTransform)])
.nullable()
.optional(),
});
it('should transform multiple boolean string values', () => {
const result = complexSchema.parse({
resolved: 'true',
on_component_only: 'false',
since_leak_period: true,
in_new_code_period: 'true',
});
expect(result).toEqual({
resolved: true,
on_component_only: false,
since_leak_period: true,
in_new_code_period: true,
});
});
it('should handle mix of boolean, string, null and undefined values', () => {
const result = complexSchema.parse({
resolved: true,
on_component_only: 'true',
since_leak_period: null,
});
expect(result).toEqual({
resolved: true,
on_component_only: true,
since_leak_period: null,
});
});
});
});
```
--------------------------------------------------------------------------------
/.claude/commands/implement-github-issue.md:
--------------------------------------------------------------------------------
```markdown
# Implement GitHub Issue
You are about to implement GitHub issue: $ARGUMENTS
## Implementation Workflow
### 1. Analyze the Issue
```bash
gh issue view $ARGUMENTS
```
- Review the full issue description
- If it contains Gherkin specs, parse acceptance criteria carefully
- Identify non-goals and constraints
- Note any technical requirements
### 2. Research Codebase
- Search for relevant existing code
- Identify files needing modification
- Look for similar patterns to maintain consistency
- Review existing tests for patterns
### 3. Plan Implementation
Create a plan with:
- Core functionality breakdown
- Test strategy (unit + property-based)
- Files to create/modify
- Edge cases and risks
### 4. Create Feature Branch
```bash
# Follow branch naming from CLAUDE.md
git checkout -b <type>/<issue-number>-<description>
# Example: feat/42-user-authentication
```
### 5. Implement Solution
- Follow patterns in CLAUDE.md (validation, testing, imports)
- Write clean, focused functions
- Add TypeScript types and Zod validation
- Document public APIs with JSDoc
### 6. Write Tests
Required test coverage:
- **Unit tests** in `tests/*.spec.ts`
- **Property-based tests** in `tests/*.property.spec.ts` for business logic
- Test both success and failure cases
- Verify edge cases
### 7. Verify Quality
```bash
pnpm verify # Runs all checks
```
### 8. Create Changeset
**Changeset Guidance for Features:**
```bash
# For bug fixes
pnpm changeset
# Select: patch
# Message: "Fix: [brief description of what was fixed]"
# For new features
pnpm changeset
# Select: minor
# Message: "Add [feature name]: [brief description]"
# For breaking changes
pnpm changeset
# Select: major
# Message: "BREAKING: [what changed and migration required]"
# For non-code changes (docs, tests, refactoring)
pnpm changeset --empty
# Message: "Internal: [what was changed]"
```
**Decision Guide:**
- **patch**: Bug fixes, security patches, performance improvements
- **minor**: New features, new APIs, significant enhancements
- **major**: Breaking changes, API removals, incompatible updates
- **--empty**: Documentation, tests, CI/CD, internal refactoring
### 9. Commit Changes
```bash
git add .
git commit -m "<type>: <description>
<body-if-needed>
Closes #<issue-number>"
```
### 10. Create Pull Request
```bash
git push -u origin <branch-name>
gh pr create \
--title "<type>: <description>" \
--body "## Summary
<what-and-why>
## Changes
- <list-changes>
## Testing
- <how-tested>
Closes #<issue-number>" \
--assignee @me
```
### 11. Monitor CI
```bash
gh pr checks --watch
```
### 12. Address Feedback
- Respond to review comments
- Make requested changes
- Re-verify after changes
### 13. Merge PR
```bash
# After approval and passing checks
gh pr merge --squash --delete-branch
```
## Key Points
- **Follow coding standards** in CLAUDE.md
- **Test thoroughly** - Unit + property-based tests required
- **Use changesets** for version management
- **Conventional commits** for clear history
- **Quality first** - All checks must pass
## Success Checklist
Before completing:
- [ ] All acceptance criteria met
- [ ] Tests comprehensive (unit + property)
- [ ] `pnpm verify` passes
- [ ] Documentation updated
- [ ] Changeset created and up to date
- [ ] PR reviewed and approved
See CLAUDE.md for detailed patterns, troubleshooting, and coding standards.
```
--------------------------------------------------------------------------------
/docs/architecture/decisions/0006-expose-sonarqube-features-as-mcp-tools.md:
--------------------------------------------------------------------------------
```markdown
# 6. Expose SonarQube Features as MCP Tools
Date: 2025-06-13
## Status
Accepted
## Context
The Model Context Protocol (MCP) defines a standard for exposing capabilities to AI clients through "tools". When integrating SonarQube functionality into an MCP server, we need to decide how to structure and expose the various SonarQube operations (projects, issues, metrics, quality gates, hotspots, etc.) to AI clients.
SonarQube provides a comprehensive REST API with numerous endpoints covering different aspects of code quality management. These operations have varying complexity, parameters, and return types. We need an architecture that:
1. Makes SonarQube functionality easily discoverable by AI clients
2. Provides clear, single-purpose operations
3. Enables scriptable automation of SonarQube tasks
4. Maintains consistency with MCP patterns
5. Allows for future extensibility
## Decision
We will expose each SonarQube operation as a separate MCP tool registered in index.ts. This tool-based architecture means:
1. **One Tool Per Operation**: Each distinct SonarQube capability (e.g., `searchIssues`, `getProjects`, `getMetrics`, `markIssueFalsePositive`) is implemented as its own MCP tool with a dedicated handler.
2. **Tool Registration**: All tools are registered in index.ts using the MCP server's tool registration mechanism, providing metadata about each tool's purpose, parameters, and schema.
3. **Domain Organization**: Tools are organized by domain modules (projects, issues, metrics, quality gates, hotspots, etc.) but exposed individually to the AI client.
4. **Consistent Naming**: Tools follow a consistent naming pattern that reflects their SonarQube domain and operation (e.g., `sonarqube.issues.search`, `sonarqube.projects.list`).
5. **Parameter Validation**: Each tool defines its own parameter schema for validation, ensuring type safety and clear documentation of required/optional parameters.
## Consequences
### Positive Consequences
1. **Discoverability**: AI clients can easily discover available SonarQube operations through MCP's tool listing mechanism.
2. **Clear Purpose**: Each tool has a single, well-defined purpose, making it easier for AI clients to understand and use correctly.
3. **Scriptability**: AI clients can compose multiple tool calls to create complex workflows (e.g., search for issues, then bulk mark them as false positives).
4. **Documentation**: Each tool can have its own detailed documentation, examples, and parameter descriptions.
5. **Extensibility**: New SonarQube operations can be added as new tools without modifying existing ones.
6. **Type Safety**: Individual parameter schemas per tool provide better type checking and validation.
7. **Testing**: Each tool can be tested independently with focused test cases.
### Negative Consequences
1. **Tool Proliferation**: Large number of tools may overwhelm AI clients or make tool selection more complex.
2. **Granularity**: Some related operations might benefit from being combined, but the tool-per-operation approach enforces separation.
3. **Registration Overhead**: Each new SonarQube feature requires tool registration boilerplate in index.ts.
4. **Naming Consistency**: Maintaining consistent naming across many tools requires discipline and documentation.
5. **Cross-Tool State**: Operations that might benefit from shared state or context must pass data explicitly between tool calls.
```
--------------------------------------------------------------------------------
/LICENSES.md:
--------------------------------------------------------------------------------
```markdown
# License Information
This document describes the licenses used in this project and its dependencies.
## Project License
This project is licensed under the MIT License. See [LICENSE](./LICENSE) for details.
## Container Image Dependencies
The Docker container is based on Alpine Linux and includes the following system packages with their respective licenses:
### Acceptable GPL/LGPL Licenses
The following packages are part of the Alpine Linux base system and use GPL/LGPL licenses. These are acceptable for containerized applications:
| Package | License | Purpose | Acceptability |
| ------------------------ | ----------------------------------- | ------------------------ | ------------------------------- |
| `alpine-baselayout` | GPL-2.0-only | Base directory structure | ✅ Alpine base - acceptable |
| `alpine-baselayout-data` | GPL-2.0-only | Base data files | ✅ Alpine base - acceptable |
| `apk-tools` | GPL-2.0-only | Package manager | ✅ Alpine base - acceptable |
| `busybox` | GPL-2.0-only | Core utilities | ✅ Alpine base - acceptable |
| `busybox-binsh` | GPL-2.0-only | Shell | ✅ Alpine base - acceptable |
| `libapk2` | GPL-2.0-only | APK library | ✅ Alpine base - acceptable |
| `libgcc` | GPL-2.0-or-later, LGPL-2.1-or-later | GCC runtime | ✅ Runtime library - acceptable |
| `libstdc++` | GPL-2.0-or-later, LGPL-2.1-or-later | C++ standard library | ✅ Runtime library - acceptable |
| `musl-utils` | GPL-2.0-or-later | C library utilities | ✅ Runtime library - acceptable |
### Other Licenses
| Package | License | Purpose |
| ------------------------ | ---------------- | ----------------- |
| `ca-certificates-bundle` | MPL-2.0, MIT | SSL certificates |
| All other packages | MIT, ISC, BSD-\* | Various utilities |
## License Compliance
### GPL/LGPL in Container Images
The use of GPL and LGPL licensed system libraries in container images is standard practice and acceptable because:
1. **Runtime Exception**: These are system libraries provided by Alpine Linux as part of the base operating system
2. **No Distribution of Modified Binaries**: We use these packages as-is from Alpine's official repositories
3. **Container Isolation**: The GPL components are part of the container runtime environment, not distributed as standalone software
4. **Industry Standard**: Major cloud providers and container registries (Docker Hub, GCR, ECR) all use Alpine Linux with these same packages
### Node.js Dependencies
All Node.js packages used in this project are licensed under permissive licenses (MIT, ISC, Apache-2.0). See `package.json` for the complete list.
## Trivy Security Scanning
This project uses Trivy to scan container images for:
- **Vulnerabilities** (CVEs)
- **Secrets** (hardcoded credentials)
- **Misconfigurations** (security best practices)
- **Licenses** (open-source compliance)
The license scanner will report GPL/LGPL packages from Alpine Linux. These findings are documented in this file and are acceptable for the reasons stated above.
## Questions?
If you have questions about licensing, please open an issue or contact the maintainers.
```
--------------------------------------------------------------------------------
/src/domains/base.ts:
--------------------------------------------------------------------------------
```typescript
import { SonarQubeClient as WebApiClient } from 'sonarqube-web-api-client';
import { createLogger } from '../utils/logger.js';
import { trackSonarQubeRequest } from '../monitoring/metrics.js';
import { wrapWithCircuitBreaker } from '../monitoring/circuit-breaker.js';
import { withRetry } from '../utils/retry.js';
/**
* Base class for all domain modules
*/
export abstract class BaseDomain {
protected readonly logger = createLogger(this.constructor.name);
constructor(
protected readonly webApiClient: WebApiClient,
protected readonly organization: string | null
) {}
/**
* Wrap a SonarQube API call with retry, metrics and circuit breaker
*/
protected async tracedApiCall<T>(endpoint: string, operation: () => Promise<T>): Promise<T> {
const startTime = Date.now();
// Wrap operation with retry logic
const retryableOperation = () =>
withRetry(operation, {
maxAttempts: 3,
initialDelay: 1000,
maxDelay: 5000,
shouldRetry: (error: Error) => {
const message = error.message.toLowerCase();
// Retry on network errors and 5xx server errors
return (
message.includes('econnrefused') ||
message.includes('etimedout') ||
message.includes('enotfound') ||
message.includes('econnreset') ||
message.includes('socket hang up') ||
message.includes('502') ||
message.includes('503') ||
message.includes('504') ||
(message.includes('50') && !message.includes('40')) // 5xx errors
);
},
});
// Wrap operation with circuit breaker
const breakerName = `sonarqube.${endpoint.replaceAll('/', '.')}`;
const wrappedOperation = wrapWithCircuitBreaker(breakerName, retryableOperation, {
timeout: 30000, // 30 seconds
errorThresholdPercentage: 50,
resetTimeout: 60000, // 1 minute
volumeThreshold: 5,
errorFilter: (error: Error) => {
// Don't count 4xx errors toward circuit breaker threshold
// (except 429 rate limiting)
const message = error.message.toLowerCase();
if (message.includes('429') || message.includes('rate limit')) {
return true;
}
return !(message.includes('40') && !message.includes('408'));
},
});
try {
const result = await wrappedOperation();
// Track successful request metric
trackSonarQubeRequest(endpoint, true, (Date.now() - startTime) / 1000);
return result;
} catch (error) {
// Track failed request metric
const errorType = this.categorizeError(error);
trackSonarQubeRequest(endpoint, false, (Date.now() - startTime) / 1000, errorType);
throw error;
}
}
/**
* Categorize error for metrics
*/
private categorizeError(error: unknown): string {
if (error instanceof Error) {
if (error.message.includes('timeout')) return 'timeout';
if (error.message.includes('401') || error.message.includes('unauthorized')) return 'auth';
if (error.message.includes('403') || error.message.includes('forbidden')) return 'forbidden';
if (error.message.includes('404') || error.message.includes('not found')) return 'not_found';
if (error.message.includes('429') || error.message.includes('rate limit'))
return 'rate_limit';
if (error.message.includes('500') || error.message.includes('server error'))
return 'server_error';
if (error.message.includes('network')) return 'network';
}
return 'unknown';
}
}
```
--------------------------------------------------------------------------------
/src/__tests__/environment-validation.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { createDefaultClient } from '../index.js';
// Mock the sonarqube module
vi.mock('../sonarqube.js', () => ({
createSonarQubeClientFromEnv: vi.fn(() => ({
// Mock client implementation
listProjects: vi.fn(),
getIssues: vi.fn(),
})),
setSonarQubeElicitationManager: vi.fn(),
createSonarQubeClientFromEnvWithElicitation: vi.fn(() =>
Promise.resolve({
// Mock client implementation
listProjects: vi.fn(),
getIssues: vi.fn(),
})
),
}));
describe('Environment Validation', () => {
// Save original env vars
const originalEnv = process.env;
beforeEach(() => {
// Clear environment variables
process.env = { ...originalEnv };
delete process.env.SONARQUBE_TOKEN;
delete process.env.SONARQUBE_USERNAME;
delete process.env.SONARQUBE_PASSWORD;
delete process.env.SONARQUBE_PASSCODE;
delete process.env.SONARQUBE_URL;
delete process.env.SONARQUBE_ORGANIZATION;
});
afterEach(() => {
// Restore original env vars
process.env = originalEnv;
});
describe('createDefaultClient', () => {
it('should create client with token authentication', () => {
process.env.SONARQUBE_TOKEN = 'test-token';
const client = createDefaultClient();
expect(client).toBeDefined();
});
it('should create client with basic authentication', () => {
process.env.SONARQUBE_USERNAME = 'test-user';
process.env.SONARQUBE_PASSWORD = 'test-pass';
const client = createDefaultClient();
expect(client).toBeDefined();
});
it('should create client with passcode authentication', () => {
process.env.SONARQUBE_PASSCODE = 'test-passcode';
const client = createDefaultClient();
expect(client).toBeDefined();
});
it('should throw error when no authentication is provided', () => {
expect(() => createDefaultClient()).toThrow('No SonarQube authentication configured');
});
it('should throw error with invalid URL', () => {
process.env.SONARQUBE_TOKEN = 'test-token';
process.env.SONARQUBE_URL = 'not-a-valid-url';
expect(() => createDefaultClient()).toThrow('Invalid SONARQUBE_URL');
});
it('should accept valid URL', () => {
process.env.SONARQUBE_TOKEN = 'test-token';
process.env.SONARQUBE_URL = 'https://sonarqube.example.com';
const client = createDefaultClient();
expect(client).toBeDefined();
});
it('should accept organization parameter', () => {
process.env.SONARQUBE_TOKEN = 'test-token';
process.env.SONARQUBE_ORGANIZATION = 'my-org';
const client = createDefaultClient();
expect(client).toBeDefined();
});
it('should prioritize token over other auth methods', () => {
process.env.SONARQUBE_TOKEN = 'test-token';
process.env.SONARQUBE_USERNAME = 'test-user';
process.env.SONARQUBE_PASSWORD = 'test-pass';
process.env.SONARQUBE_PASSCODE = 'test-passcode';
// Should not throw - uses token auth
const client = createDefaultClient();
expect(client).toBeDefined();
});
it('should create client when only username is provided (legacy token auth)', () => {
process.env.SONARQUBE_USERNAME = 'test-user';
const client = createDefaultClient();
expect(client).toBeDefined();
});
it('should throw error when only password is provided', () => {
process.env.SONARQUBE_PASSWORD = 'test-pass';
expect(() => createDefaultClient()).toThrow('No SonarQube authentication configured');
});
});
});
```
--------------------------------------------------------------------------------
/src/__tests__/schemas/components-schema.test.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from 'zod';
import { componentsToolSchema } from '../../schemas/components.js';
describe('componentsToolSchema', () => {
it('should validate minimal parameters', () => {
const input = {};
const result = z.object(componentsToolSchema).parse(input);
expect(result).toEqual({});
});
it('should validate search parameters', () => {
const input = {
query: 'UserService',
qualifiers: ['TRK', 'FIL'],
language: 'java',
};
const result = z.object(componentsToolSchema).parse(input);
expect(result.query).toBe('UserService');
expect(result.qualifiers).toEqual(['TRK', 'FIL']);
expect(result.language).toBe('java');
});
it('should validate tree navigation parameters', () => {
const input = {
component: 'com.example:project',
strategy: 'children',
qualifiers: ['DIR', 'FIL'],
};
const result = z.object(componentsToolSchema).parse(input);
expect(result.component).toBe('com.example:project');
expect(result.strategy).toBe('children');
expect(result.qualifiers).toEqual(['DIR', 'FIL']);
});
it('should validate pagination parameters with transformation', () => {
const input = {
asc: 'true',
ps: '50',
p: '2',
};
const result = z.object(componentsToolSchema).parse(input);
expect(result.asc).toBe(true);
expect(result.ps).toBe(50);
expect(result.p).toBe(2);
});
it('should validate branch and pull request parameters', () => {
const input = {
component: 'com.example:project',
branch: 'feature-branch',
pullRequest: '123',
};
const result = z.object(componentsToolSchema).parse(input);
expect(result.component).toBe('com.example:project');
expect(result.branch).toBe('feature-branch');
expect(result.pullRequest).toBe('123');
});
it('should validate all parameters together', () => {
const input = {
query: 'test',
qualifiers: ['TRK', 'DIR', 'FIL'],
language: 'typescript',
component: 'com.example:project',
strategy: 'all',
asc: 'false',
ps: '100',
p: '1',
branch: 'main',
pullRequest: '456',
};
const result = z.object(componentsToolSchema).parse(input);
expect(result).toMatchObject({
query: 'test',
qualifiers: ['TRK', 'DIR', 'FIL'],
language: 'typescript',
component: 'com.example:project',
strategy: 'all',
asc: false,
ps: 100,
p: 1,
branch: 'main',
pullRequest: '456',
});
});
it('should reject invalid qualifiers', () => {
const input = {
qualifiers: ['INVALID'],
};
expect(() => z.object(componentsToolSchema).parse(input)).toThrow();
});
it('should reject invalid strategy', () => {
const input = {
strategy: 'invalid',
};
expect(() => z.object(componentsToolSchema).parse(input)).toThrow();
});
it('should handle boolean string transformations', () => {
const testCases = [
{ asc: 'true', expected: true },
{ asc: 'false', expected: false },
];
testCases.forEach((testCase) => {
const result = z.object(componentsToolSchema).parse(testCase);
expect(result.asc).toBe(testCase.expected);
});
});
it('should handle number string transformations', () => {
const input = {
ps: '25',
p: '3',
};
const result = z.object(componentsToolSchema).parse(input);
expect(result.ps).toBe(25);
expect(result.p).toBe(3);
});
it('should handle null values for pagination', () => {
const input = {
ps: undefined,
p: undefined,
};
const result = z.object(componentsToolSchema).parse(input);
expect(result.ps).toBeUndefined();
expect(result.p).toBeUndefined();
});
});
```
--------------------------------------------------------------------------------
/docs/architecture/decisions/0015-transport-architecture-refactoring.md:
--------------------------------------------------------------------------------
```markdown
# 15. Transport Architecture Refactoring
Date: 2025-06-22
## Status
Accepted
## Context
The current SonarQube MCP server implementation is tightly coupled to STDIO transport, making it difficult to add support for alternative transport mechanisms like HTTP or WebSocket. As per ADR-0003, the transport layer should be abstracted to allow future support for different transport types.
### Current Issues:
1. Direct dependency on `StdioServerTransport` in the main entry point
2. No abstraction layer between the MCP server and transport implementation
3. Tight coupling makes testing and extending the transport layer difficult
4. The STDIO transport requires a workaround (adding a dummy `connect` method) for TypeScript compatibility
### Requirements:
1. Support multiple transport mechanisms (STDIO, HTTP, WebSocket)
2. Maintain backward compatibility with existing STDIO transport
3. Allow transport selection via environment variables
4. Enable easy addition of new transport types in the future
5. Follow existing architectural patterns in the codebase
## Decision
We will refactor the transport architecture by introducing:
1. **Transport Interface (`ITransport`)**: A common interface that all transport implementations must follow
2. **Transport Factory (`TransportFactory`)**: A factory pattern for creating transport instances based on configuration
3. **Environment-based Configuration**: Use `MCP_TRANSPORT` environment variable to select transport type
4. **Modular Transport Implementations**: Each transport type in its own module under `src/transports/`
### Architecture Details:
```typescript
// Transport Interface
interface ITransport {
connect(server: Server): Promise<void>;
getName(): string;
}
// Transport Factory
class TransportFactory {
static create(config: ITransportConfig): ITransport;
static createFromEnvironment(): ITransport;
}
```
### Environment Variables:
- `MCP_TRANSPORT`: Transport type selection (stdio|http), defaults to 'stdio'
- `MCP_HTTP_PORT`: HTTP transport port (for future HTTP implementation)
- `MCP_HTTP_HOST`: HTTP transport host (for future HTTP implementation)
## Consequences
### Positive:
1. **Extensibility**: Easy to add new transport types without modifying core logic
2. **Testability**: Transport implementations can be tested in isolation
3. **Backward Compatibility**: STDIO remains the default transport, no breaking changes
4. **Clean Architecture**: Follows SOLID principles with clear separation of concerns
5. **Type Safety**: Proper TypeScript interfaces ensure type safety across transport implementations
6. **Future-Ready**: HTTP transport can be added in a future story without architectural changes
### Negative:
1. **Additional Abstraction**: Adds one more layer of abstraction (minimal overhead)
2. **More Files**: Transport logic is now spread across multiple files (better organization)
### Neutral:
1. **Configuration**: Environment variable approach aligns with existing patterns (ADR-0008)
2. **Testing**: Requires new test files for transport modules (improves coverage)
## Implementation Notes
1. The STDIO transport workaround (adding `connect` method) is now encapsulated within the `StdioTransport` class
2. HTTP transport throws a "not yet implemented" error, allowing the structure to be in place for future implementation
3. All transport modules follow the project's existing patterns for module organization and exports
4. Tests are implemented using Jest globals pattern consistent with other tests in the project
## Related ADRs
- ADR-0003: Uses MCP SDK to Create Server (mentions future transport abstraction)
- ADR-0008: Uses Environment Variables for Configuration (transport selection pattern)
- ADR-0010: Uses STDIO Server Transport (current transport choice)
```
--------------------------------------------------------------------------------
/src/utils/__tests__/pattern-matcher.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, expect, it } from 'vitest';
import { PatternMatcher } from '../pattern-matcher.js';
describe('PatternMatcher', () => {
describe('constructor', () => {
it('should create a pattern matcher with a simple pattern', () => {
const matcher = new PatternMatcher('test');
expect(matcher.getPattern()).toBe('test');
});
it('should create a pattern matcher with wildcards', () => {
const matcher = new PatternMatcher('*@example.com');
expect(matcher.getPattern()).toBe('*@example.com');
});
});
describe('test', () => {
it('should match exact strings', () => {
const matcher = new PatternMatcher('[email protected]');
expect(matcher.test('[email protected]')).toBe(true);
expect(matcher.test('[email protected]')).toBe(false);
});
it('should match with * wildcard', () => {
const matcher = new PatternMatcher('*@example.com');
expect(matcher.test('[email protected]')).toBe(true);
expect(matcher.test('[email protected]')).toBe(true);
expect(matcher.test('[email protected]')).toBe(true);
expect(matcher.test('[email protected]')).toBe(false);
});
it('should match with ? wildcard', () => {
const matcher = new PatternMatcher('user-?');
expect(matcher.test('user-1')).toBe(true);
expect(matcher.test('user-a')).toBe(true);
expect(matcher.test('user-10')).toBe(false);
expect(matcher.test('user-')).toBe(false);
});
it('should match with multiple wildcards', () => {
const matcher = new PatternMatcher('*@*.example.com');
expect(matcher.test('[email protected]')).toBe(true);
expect(matcher.test('[email protected]')).toBe(true);
expect(matcher.test('[email protected]')).toBe(false);
});
it('should escape regex special characters', () => {
const matcher = new PatternMatcher('[email protected]');
expect(matcher.test('[email protected]')).toBe(true);
expect(matcher.test('[email protected]')).toBe(false);
});
it('should handle patterns with brackets', () => {
const matcher = new PatternMatcher('[test]@example.com');
expect(matcher.test('[test]@example.com')).toBe(true);
expect(matcher.test('[email protected]')).toBe(false);
});
it('should handle patterns with parentheses', () => {
const matcher = new PatternMatcher('(test)*@example.com');
expect(matcher.test('(test)[email protected]')).toBe(true);
expect(matcher.test('[email protected]')).toBe(false);
});
it('should handle URL patterns', () => {
const matcher = new PatternMatcher('https://*.auth.example.com');
expect(matcher.test('https://prod.auth.example.com')).toBe(true);
expect(matcher.test('https://dev.auth.example.com')).toBe(true);
expect(matcher.test('http://prod.auth.example.com')).toBe(false);
});
});
describe('create', () => {
it('should create a pattern matcher successfully', () => {
const matcher = PatternMatcher.create('test@*.com', 'test-context');
expect(matcher).toBeDefined();
expect(matcher?.getPattern()).toBe('test@*.com');
});
it('should handle creation errors gracefully', () => {
// Force an error by creating an invalid regex pattern
const invalidPattern = '['; // This will cause a SyntaxError in RegExp constructor
const matcher = PatternMatcher.create(invalidPattern, 'test-context');
// The glob to regex conversion should not create invalid patterns
// But if it somehow does, the create method should handle it gracefully
// In this case, '[' becomes '\[' which is valid, so matcher is created
expect(matcher).toBeDefined();
expect(matcher?.getPattern()).toBe('[');
});
});
});
```
--------------------------------------------------------------------------------
/eslint.config.js:
--------------------------------------------------------------------------------
```javascript
// @ts-check
import tseslint from '@typescript-eslint/eslint-plugin';
import tsParser from '@typescript-eslint/parser';
import eslintConfigPrettier from 'eslint-config-prettier';
import * as jsonc from 'eslint-plugin-jsonc';
import jsoncParser from 'jsonc-eslint-parser';
import { fileURLToPath } from 'node:url';
import { dirname } from 'node:path';
const __dirname = dirname(fileURLToPath(import.meta.url));
/** @type {import('eslint').Linter.FlatConfig[]} */
export default [
// Ignore patterns
{
ignores: ['dist/**', 'coverage/**', 'node_modules/**', 'sbom.cdx.json'],
},
// Base configuration for all JS/TS files
{
files: ['**/*.{ts,tsx,js}'],
languageOptions: {
parser: tsParser,
parserOptions: {
ecmaVersion: 2024,
sourceType: 'module',
},
},
plugins: { '@typescript-eslint': tseslint },
rules: {
...tseslint.configs['recommended'].rules,
'no-console': 'warn',
'no-debugger': 'error',
},
},
// Type-aware rules for TypeScript files only
{
files: ['src/**/*.ts', 'tests/**/*.ts'],
languageOptions: {
parser: tsParser,
parserOptions: {
ecmaVersion: 2024,
sourceType: 'module',
project: true,
tsconfigRootDir: __dirname,
},
},
rules: {
...tseslint.configs['recommended-type-checked'].rules,
},
},
// JSON/JSONC/JSON5 linting configuration
{
files: ['**/*.json', '**/*.json5', '**/*.jsonc'],
languageOptions: {
parser: jsoncParser,
},
plugins: {
jsonc,
},
rules: {
...jsonc.configs['recommended-with-json'].rules,
'jsonc/sort-keys': 'off', // Keep keys in logical order, not alphabetical
'jsonc/indent': ['error', 2], // Enforce 2-space indentation in JSON files
'jsonc/key-spacing': 'error', // Enforce consistent spacing between keys and values
'jsonc/comma-dangle': ['error', 'never'], // No trailing commas in JSON
'jsonc/quotes': ['error', 'double'], // Enforce double quotes in JSON
'jsonc/quote-props': ['error', 'always'], // Always quote property names
'jsonc/no-comments': 'off', // Allow comments in JSONC files
},
},
// Specific rules for package.json
{
files: ['**/package.json'],
rules: {
'jsonc/sort-keys': [
'error',
{
pathPattern: '^$', // Root object
order: [
'name',
'version',
'description',
'keywords',
'author',
'license',
'repository',
'bugs',
'homepage',
'private',
'type',
'main',
'module',
'exports',
'files',
'bin',
'packageManager',
'engines',
'scripts',
'lint-staged',
'dependencies',
'devDependencies',
'peerDependencies',
'optionalDependencies',
],
},
],
},
},
// Specific rules for tsconfig files
{
files: ['**/tsconfig*.json'],
rules: {
'jsonc/no-comments': 'off', // Allow comments in tsconfig files
},
},
// Relaxed rules for test files
{
files: ['**/__tests__/**/*.ts', '**/*.test.ts'],
rules: {
'@typescript-eslint/no-unsafe-assignment': 'off',
'@typescript-eslint/no-unsafe-call': 'off',
'@typescript-eslint/no-unsafe-member-access': 'off',
'@typescript-eslint/no-unsafe-argument': 'off',
'@typescript-eslint/no-unsafe-return': 'off',
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/unbound-method': 'off',
'@typescript-eslint/restrict-template-expressions': 'off',
},
},
// Keep Prettier last
eslintConfigPrettier,
];
```
--------------------------------------------------------------------------------
/.claude/commands/fix-sonarqube-issues.md:
--------------------------------------------------------------------------------
```markdown
# Fix SonarQube Issues
Analyze and fix all open issues reported by SonarQube for this project.
## Steps
1. **Connect to SonarQube and Check for Issues**
- Use the configured SonarQube MCP server
- Identify the project key (usually matches the repository name)
- Query all open issues for the project
- Filter by status: OPEN, CONFIRMED, REOPENED
- **If no issues found**: Report "✅ No SonarQube issues found!" and exit
2. **Analyze Retrieved Issues**
- Group by severity: BLOCKER, CRITICAL, MAJOR, MINOR, INFO
- Categorize by type:
- Code smells (maintainability issues)
- Bugs (reliability issues)
- Vulnerabilities (security issues)
- Security hotspots (potential security risks)
- Duplications (code duplication)
- Report summary of issues found
3. **Create Feature Branch** (only if issues exist)
```bash
git checkout -b fix/sonarqube-issues
```
4. **Fix Issues by Priority**
- Start with BLOCKER severity
- Then CRITICAL
- Then MAJOR
- Then MINOR
- Finally INFO
5. **For Each Issue**
- Read the affected file
- Understand the issue context
- Apply the recommended fix
- Verify the fix doesn't break existing functionality
6. **Common Issue Types and Fixes**
- **Unused variables/imports**: Remove them
- **Complex functions**: Split into smaller functions
- **Missing error handling**: Add try-catch blocks
- **Type safety issues**: Add proper TypeScript types
- **Security issues**: Sanitize inputs, use secure functions
- **Code duplication**: Extract common code into functions
- **Cognitive complexity**: Simplify logic, reduce nesting
7. **Validation**
- Run `pnpm verify` to ensure all tests pass
- Run `pnpm lint` to check for linting issues
- Run `pnpm typecheck` to verify TypeScript
8. **Create Changeset**
```bash
pnpm changeset
```
- Describe the fixes made
- Use patch version for bug fixes
- Use minor version for improvements
9. **Commit Changes**
```bash
git add -A
git commit -m "fix: resolve SonarQube issues
- Fix [number] code smells
- Fix [number] bugs
- Fix [number] vulnerabilities
- Improve code maintainability and reliability"
```
10. **Push Branch**
```bash
git push origin fix/sonarqube-issues
```
11. **Create Pull Request**
```bash
gh pr create --title "fix: resolve SonarQube issues" \
--body "## Summary
- Fixed all open SonarQube issues for the project
- Improved code quality, security, and maintainability
## Changes
- ✅ Fixed [X] BLOCKER issues
- ✅ Fixed [X] CRITICAL issues
- ✅ Fixed [X] MAJOR issues
- ✅ Fixed [X] MINOR issues
- ✅ Fixed [X] INFO issues
## Issue Categories
- 🐛 Bugs: [number] fixed
- 🔒 Vulnerabilities: [number] fixed
- 🧹 Code Smells: [number] fixed
- 📋 Duplications: [number] fixed
## Testing
- All tests passing
- Linting checks pass
- TypeScript compilation successful"
```
## Example Usage
```bash
# First, ensure SonarQube MCP server is configured
# Then run this command to fix all issues
# The command will:
# 1. Connect to SonarQube and check for issues
# 2. If no issues: exit early with success message
# 3. If issues exist:
# - Create a new branch
# - Fix them in priority order
# - Create a changeset
# - Commit all changes
# - Push the branch
# - Create a PR with all fixes
```
## Notes
- Some issues may be false positives - mark them as such in SonarQube
- Complex refactoring should be done carefully to avoid breaking changes
- Always run tests after fixing issues
- Consider fixing related issues together for better code organization
- The PR will need review before merging to main
- If no issues are found, no branch or PR will be created
```
--------------------------------------------------------------------------------
/src/__tests__/function-tests.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, vi } from 'vitest';
import { nullToUndefined, mapToSonarQubeParams } from '../index.js';
vi.mock('../sonarqube.js');
describe('Utility Function Tests', () => {
describe('nullToUndefined function', () => {
it('should convert null to undefined but preserve other values', () => {
expect(nullToUndefined(null)).toBeUndefined();
expect(nullToUndefined(undefined)).toBeUndefined();
// Other values should remain the same
expect(nullToUndefined(0)).toBe(0);
expect(nullToUndefined('')).toBe('');
expect(nullToUndefined('test')).toBe('test');
expect(nullToUndefined(123)).toBe(123);
expect(nullToUndefined(false)).toBe(false);
expect(nullToUndefined(true)).toBe(true);
// Objects and arrays should be passed through
const obj = { test: 'value' };
const arr = [1, 2, 3];
expect(nullToUndefined(obj)).toBe(obj);
expect(nullToUndefined(arr)).toBe(arr);
});
});
describe('mapToSonarQubeParams function', () => {
it('should map MCP tool parameters to SonarQube client parameters', () => {
const result = mapToSonarQubeParams({
project_key: 'my-project',
severity: 'MAJOR',
page: '10',
page_size: '25',
statuses: ['OPEN', 'CONFIRMED'],
resolutions: ['FALSE-POSITIVE'],
resolved: 'true',
types: ['BUG', 'VULNERABILITY'],
rules: ['rule1', 'rule2'],
tags: ['tag1', 'tag2'],
created_after: '2023-01-01',
created_before: '2023-12-31',
created_at: '2023-06-15',
created_in_last: '30d',
assignees: ['user1', 'user2'],
authors: ['author1', 'author2'],
cwe: ['cwe1', 'cwe2'],
languages: ['java', 'js'],
owasp_top10: ['a1', 'a2'],
sans_top25: ['sans1', 'sans2'],
sonarsource_security: ['ss1', 'ss2'],
on_component_only: 'true',
facets: ['facet1', 'facet2'],
since_leak_period: 'true',
in_new_code_period: 'true',
});
// Check key mappings
expect(result.projectKey).toBe('my-project');
expect(result.severity).toBe('MAJOR');
expect(result.page).toBe('10');
expect(result.pageSize).toBe('25');
expect(result.statuses).toEqual(['OPEN', 'CONFIRMED']);
expect(result.resolutions).toEqual(['FALSE-POSITIVE']);
expect(result.resolved).toBe('true');
expect(result.types).toEqual(['BUG', 'VULNERABILITY']);
expect(result.rules).toEqual(['rule1', 'rule2']);
expect(result.tags).toEqual(['tag1', 'tag2']);
expect(result.createdAfter).toBe('2023-01-01');
expect(result.createdBefore).toBe('2023-12-31');
expect(result.createdAt).toBe('2023-06-15');
expect(result.createdInLast).toBe('30d');
expect(result.assignees).toEqual(['user1', 'user2']);
expect(result.authors).toEqual(['author1', 'author2']);
expect(result.cwe).toEqual(['cwe1', 'cwe2']);
expect(result.languages).toEqual(['java', 'js']);
expect(result.owaspTop10).toEqual(['a1', 'a2']);
expect(result.sansTop25).toEqual(['sans1', 'sans2']);
expect(result.sonarsourceSecurity).toEqual(['ss1', 'ss2']);
expect(result.onComponentOnly).toBe('true');
expect(result.facets).toEqual(['facet1', 'facet2']);
expect(result.sinceLeakPeriod).toBe('true');
expect(result.inNewCodePeriod).toBe('true');
});
it('should handle null and undefined values correctly', () => {
const result = mapToSonarQubeParams({
project_key: 'my-project',
severity: null,
statuses: null,
resolved: null,
});
expect(result.projectKey).toBe('my-project');
expect(result.severity).toBeUndefined();
expect(result.statuses).toBeUndefined();
expect(result.resolved).toBeUndefined();
});
});
});
```
--------------------------------------------------------------------------------
/src/utils/client-factory.ts:
--------------------------------------------------------------------------------
```typescript
import { createLogger } from './logger.js';
import type { ISonarQubeClient } from '../types/index.js';
import { createSonarQubeClientFromEnv } from '../sonarqube.js';
import { SonarQubeAPIError, SonarQubeErrorType } from '../errors.js';
const logger = createLogger('client-factory');
/**
* Validates environment variables for SonarQube authentication
* @throws Error if no authentication method is configured or if invalid values are provided
*/
export const validateEnvironmentVariables = () => {
logger.debug('Validating environment variables');
// Check if any authentication method is configured
const hasToken = !!process.env.SONARQUBE_TOKEN;
const hasBasicAuth = !!process.env.SONARQUBE_USERNAME;
const hasPasscode = !!process.env.SONARQUBE_PASSCODE;
if (!hasToken && !hasBasicAuth && !hasPasscode) {
const error = new SonarQubeAPIError(
'No SonarQube authentication configured',
SonarQubeErrorType.CONFIGURATION_ERROR,
{
operation: 'validateEnvironmentVariables',
solution:
'Set one of the following authentication methods:\n' +
'• SONARQUBE_TOKEN for token-based authentication (recommended)\n' +
'• SONARQUBE_USERNAME and SONARQUBE_PASSWORD for basic authentication\n' +
'• SONARQUBE_PASSCODE for system passcode authentication',
context: {
hasToken,
hasBasicAuth,
hasPasscode,
},
}
);
logger.error('Missing authentication environment variables', error);
throw error;
}
// Validate URL if provided
if (process.env.SONARQUBE_URL) {
try {
new URL(process.env.SONARQUBE_URL);
logger.debug('Valid SONARQUBE_URL provided', { url: process.env.SONARQUBE_URL });
} catch {
const error = new SonarQubeAPIError(
`Invalid SONARQUBE_URL: "${process.env.SONARQUBE_URL}"`,
SonarQubeErrorType.CONFIGURATION_ERROR,
{
operation: 'validateEnvironmentVariables',
solution:
'Provide a valid URL including protocol (e.g., https://sonarcloud.io or https://your-sonarqube.com)\n' +
'Note: URL should not have a trailing slash',
context: {
providedUrl: process.env.SONARQUBE_URL,
},
}
);
logger.error('Invalid SONARQUBE_URL', error);
throw error;
}
}
// Validate organization if provided
if (process.env.SONARQUBE_ORGANIZATION && process.env.SONARQUBE_ORGANIZATION.trim() === '') {
const error = new SonarQubeAPIError(
'Empty SONARQUBE_ORGANIZATION',
SonarQubeErrorType.CONFIGURATION_ERROR,
{
operation: 'validateEnvironmentVariables',
solution:
'Provide a valid organization key (e.g., "my-org") or remove the environment variable',
context: {
providedValue: '(empty string)',
},
}
);
logger.error('Empty SONARQUBE_ORGANIZATION', error);
throw error;
}
// Log which authentication method is being used
if (hasToken) {
logger.info('Using token authentication');
} else if (hasBasicAuth) {
logger.info('Using basic authentication');
} else if (hasPasscode) {
logger.info('Using passcode authentication');
}
logger.info('Environment variables validated successfully');
};
// Create the SonarQube client
const createDefaultClient = (): ISonarQubeClient => {
// Validate environment variables
validateEnvironmentVariables();
// Create and return client
return createSonarQubeClientFromEnv();
};
// Default client instance for backward compatibility
// Created lazily to allow environment variable validation at runtime
let defaultClient: ISonarQubeClient | null = null;
export const getDefaultClient = (): ISonarQubeClient => {
defaultClient ??= createDefaultClient();
return defaultClient;
};
// Export for testing purposes
export const resetDefaultClient = () => {
defaultClient = null;
};
```
--------------------------------------------------------------------------------
/src/schemas/common.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from 'zod';
import { numberOrStringToString, parseJsonStringArray } from '../utils/transforms.js';
/**
* Common schemas used across multiple domains
*/
// Severity schemas
export const severitySchema = z
.enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
.nullable()
.optional();
export const severitiesSchema = z
.union([z.array(z.enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])), z.string()])
.transform((val) => {
const parsed = parseJsonStringArray(val);
// Validate that all values are valid severities
if (parsed && Array.isArray(parsed)) {
return parsed.filter((v) => ['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'].includes(v));
}
return parsed;
})
.nullable()
.optional();
// Status schemas
export const statusSchema = z
.union([z.array(z.enum(['OPEN', 'CONFIRMED', 'REOPENED', 'RESOLVED', 'CLOSED'])), z.string()])
.transform((val) => {
const parsed = parseJsonStringArray(val);
// Validate that all values are valid statuses
if (parsed && Array.isArray(parsed)) {
return parsed.filter((v) =>
['OPEN', 'CONFIRMED', 'REOPENED', 'RESOLVED', 'CLOSED'].includes(v)
);
}
return parsed;
})
.nullable()
.optional();
// Resolution schemas
export const resolutionSchema = z
.union([z.array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED'])), z.string()])
.transform((val) => {
const parsed = parseJsonStringArray(val);
// Validate that all values are valid resolutions
if (parsed && Array.isArray(parsed)) {
return parsed.filter((v) => ['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED'].includes(v));
}
return parsed;
})
.nullable()
.optional();
// Type schemas
export const typeSchema = z
.union([z.array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT'])), z.string()])
.transform((val) => {
const parsed = parseJsonStringArray(val);
// Validate that all values are valid types
if (parsed && Array.isArray(parsed)) {
return parsed.filter((v) =>
['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT'].includes(v)
);
}
return parsed;
})
.nullable()
.optional();
// Clean Code taxonomy schemas
export const cleanCodeAttributeCategoriesSchema = z
.union([z.array(z.enum(['ADAPTABLE', 'CONSISTENT', 'INTENTIONAL', 'RESPONSIBLE'])), z.string()])
.transform((val) => {
const parsed = parseJsonStringArray(val);
// Validate that all values are valid categories
if (parsed && Array.isArray(parsed)) {
return parsed.filter((v) =>
['ADAPTABLE', 'CONSISTENT', 'INTENTIONAL', 'RESPONSIBLE'].includes(v)
);
}
return parsed;
})
.nullable()
.optional();
export const impactSeveritiesSchema = z
.union([z.array(z.enum(['HIGH', 'MEDIUM', 'LOW'])), z.string()])
.transform((val) => {
const parsed = parseJsonStringArray(val);
// Validate that all values are valid impact severities
if (parsed && Array.isArray(parsed)) {
return parsed.filter((v) => ['HIGH', 'MEDIUM', 'LOW'].includes(v));
}
return parsed;
})
.nullable()
.optional();
export const impactSoftwareQualitiesSchema = z
.union([z.array(z.enum(['MAINTAINABILITY', 'RELIABILITY', 'SECURITY'])), z.string()])
.transform((val) => {
const parsed = parseJsonStringArray(val);
// Validate that all values are valid software qualities
if (parsed && Array.isArray(parsed)) {
return parsed.filter((v) => ['MAINTAINABILITY', 'RELIABILITY', 'SECURITY'].includes(v));
}
return parsed;
})
.nullable()
.optional();
// Pull request schema - accepts either string or number and converts to string
export const pullRequestSchema = z
.union([z.string(), z.number()])
.optional()
.transform(numberOrStringToString);
// Pull request schema with nullable - for schemas that allow null values
export const pullRequestNullableSchema = z
.union([z.string(), z.number()])
.nullable()
.optional()
.transform(numberOrStringToString);
```
--------------------------------------------------------------------------------
/src/monitoring/metrics.ts:
--------------------------------------------------------------------------------
```typescript
import { createLogger } from '../utils/logger.js';
const logger = createLogger('MetricsService');
/**
* Simplified metrics service for stdio-only transport
* Tracks metrics in memory for circuit breaker and logging purposes
*/
export class MetricsService {
private readonly metrics = new Map<string, number>();
constructor() {
logger.info('Metrics service initialized (stdio mode)');
}
/**
* Record a SonarQube API request
*/
recordSonarQubeRequest(endpoint: string, status: 'success' | 'error', duration: number): void {
const key = `sonarqube.${endpoint}.${status}`;
this.metrics.set(key, (this.metrics.get(key) || 0) + 1);
logger.debug(`SonarQube API call to ${endpoint}: ${status} (${duration}s)`);
}
/**
* Record a SonarQube API error
*/
recordSonarQubeError(type: string, endpoint: string): void {
const key = `sonarqube.error.${type}.${endpoint}`;
this.metrics.set(key, (this.metrics.get(key) || 0) + 1);
logger.warn(`SonarQube API error: ${type} on ${endpoint}`);
}
/**
* Update circuit breaker state
*/
updateCircuitBreakerState(service: string, state: 'closed' | 'open' | 'half-open'): void {
let stateValue: number;
if (state === 'open') {
stateValue = 1;
} else if (state === 'half-open') {
stateValue = 0.5;
} else {
stateValue = 0;
}
this.metrics.set(`circuit-breaker.state.${service}`, stateValue);
logger.info(`Circuit breaker ${service}: ${state}`);
}
/**
* Record a circuit breaker failure
*/
recordCircuitBreakerFailure(service: string): void {
const key = `circuit-breaker.failure.${service}`;
this.metrics.set(key, (this.metrics.get(key) || 0) + 1);
logger.warn(`Circuit breaker failure: ${service}`);
}
/**
* Get metrics in Prometheus format (for testing compatibility)
*/
getMetrics(): string {
const lines: string[] = [];
for (const [key, value] of Array.from(this.metrics.entries())) {
// Convert internal format to Prometheus format for tests
if (key.startsWith('circuit-breaker.failure.')) {
const service = key.replace('circuit-breaker.failure.', '');
lines.push(`mcp_circuit_breaker_failures_total{service="${service}"} ${value}`);
} else if (key.startsWith('circuit-breaker.state.')) {
const service = key.replace('circuit-breaker.state.', '');
lines.push(`mcp_circuit_breaker_state{service="${service}"} ${value}`);
} else if (key.includes('sonarqube.')) {
// Convert other metrics as needed
lines.push(`# Internal metric: ${key} = ${value}`);
}
}
return lines.join('\n');
}
/**
* Stop monitoring (cleanup)
*/
stopMonitoring(): void {
// No-op for stdio mode
}
}
// Singleton instance
let metricsService: MetricsService | null = null;
/**
* Get or create the metrics service instance
*/
export function getMetricsService(): MetricsService {
metricsService ??= new MetricsService();
return metricsService;
}
/**
* Cleanup the metrics service (for testing)
*/
export function cleanupMetricsService(): void {
if (metricsService) {
metricsService.stopMonitoring();
metricsService = null;
}
}
/**
* Track a SonarQube API request
*/
export function trackSonarQubeRequest(
endpoint: string,
success: boolean,
duration: number,
errorType?: string
): void {
const metrics = getMetricsService();
metrics.recordSonarQubeRequest(endpoint, success ? 'success' : 'error', duration);
if (!success && errorType) {
metrics.recordSonarQubeError(errorType, endpoint);
}
}
/**
* Update circuit breaker metrics
*/
export function updateCircuitBreakerMetrics(
service: string,
state: 'closed' | 'open' | 'half-open'
): void {
const metrics = getMetricsService();
metrics.updateCircuitBreakerState(service, state);
}
/**
* Track circuit breaker failure
*/
export function trackCircuitBreakerFailure(service: string): void {
const metrics = getMetricsService();
metrics.recordCircuitBreakerFailure(service);
}
```
--------------------------------------------------------------------------------
/.claude/commands/spec-feature.md:
--------------------------------------------------------------------------------
```markdown
# Spec a Feature
You are about to create a feature specification in Gherkin format and turn it into a GitHub issue ready for implementation.
## Process
1. **Gather Requirements**
- Ask the user for the feature name and description if not provided
- Understand the business value and user needs
- Identify scope, non-goals, and risks
2. **Write Gherkin Specification**
Create a comprehensive specification including:
- **Feature** name and description
- **Background** (if needed)
- **Scenarios** using Given/When/Then format
- **Examples** with data tables where appropriate
- **Acceptance Criteria**
- **Non-Goals** (what this feature won't do)
- **Risks & Mitigations**
- **Technical Considerations**
3. **Format as GitHub Issue**
Structure the issue with:
- Clear title: `feat: [Feature Name]`
- Labels: `enhancement`, `needs-implementation`
- Milestone (if applicable)
- Complete Gherkin specification in the body
- Testing requirements
4. **Create the Issue**
Use the `gh` CLI to create the issue:
```bash
gh issue create --title "feat: [Feature Name]" \
--body "[Full specification]" \
--label enhancement \
--label needs-implementation
```
## Template for Issue Body
````markdown
## Feature: [Feature Name]
### Business Value
[Describe the business value and user benefit]
### User Story
As a [type of user]
I want [goal/desire]
So that [benefit/value]
### Gherkin Specification
```gherkin
Feature: [Feature Name]
[Feature description explaining the feature's purpose]
Background:
Given [common preconditions for all scenarios]
Scenario: [Happy path scenario]
Given [initial context]
When [action/event]
Then [expected outcome]
And [additional outcomes]
Scenario: [Edge case or error scenario]
Given [initial context]
When [action/event]
Then [expected outcome]
Scenario Outline: [Parameterized scenario if needed]
Given [context with <parameter>]
When [action with <parameter>]
Then [outcome with <expected>]
Examples:
| parameter | expected |
| value1 | result1 |
| value2 | result2 |
```
````
### Acceptance Criteria
- [ ] Criterion 1
- [ ] Criterion 2
- [ ] Criterion 3
### Non-Goals
- This feature will NOT [explicitly excluded functionality]
- Out of scope: [related but excluded items]
### Risks & Mitigations
- **Risk**: [Potential risk]
**Mitigation**: [How to address it]
### Technical Considerations
- Architecture impact: [if any]
- Performance considerations: [if any]
- Security considerations: [if any]
- Dependencies: [external dependencies or prerequisites]
### Testing Requirements
- Unit test coverage for all new functions
- Property-based tests for business logic invariants
- Integration tests for external interactions
- Edge cases and error scenarios covered
### Definition of Done
- [ ] All acceptance criteria met
- [ ] All tests passing
- [ ] Documentation updated
- [ ] Code reviewed and approved
- [ ] Changeset added
- [ ] No security vulnerabilities
- [ ] Performance requirements met
````
## Important Notes
1. **Be Specific**: Write clear, unambiguous scenarios
2. **Focus on Behavior**: Describe WHAT, not HOW
3. **Keep it Testable**: Each scenario should be verifiable
4. **Consider Edge Cases**: Include error and boundary scenarios
5. **Make it Implementable**: Provide enough detail for the `implement-github-issue` command
## Example Output
After gathering requirements, create an issue like:
```bash
gh issue create --title "feat: Add user authentication with JWT" \
--body "## Feature: User Authentication with JWT
### Business Value
Enable secure user authentication to protect user data and provide personalized experiences.
### User Story
As a user
I want to securely log in to the application
So that I can access my personal data and features
### Gherkin Specification
[... full specification ...]" \
--label enhancement \
--label needs-implementation
````
The created issue will be ready for implementation using the `/implement-github-issue` command.
```
--------------------------------------------------------------------------------
/src/__tests__/transformation-util.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
describe('Field transformation utilities', () => {
it('should transform array parameters correctly', () => {
// Simulate the transformation logic in the tool registration
function transformToArray(value: unknown): string[] {
return Array.isArray(value) ? value : [value as string];
}
// Test with string input
expect(transformToArray('single')).toEqual(['single']);
// Test with array input
expect(transformToArray(['one', 'two'])).toEqual(['one', 'two']);
// Test with empty array
expect(transformToArray([])).toEqual([]);
});
it('should transform page parameters correctly', () => {
// Simulate the page transform logic
function transformPage(val: string | undefined | null): number | null | undefined {
return val ? parseInt(val, 10) || null : null;
}
// Valid number
expect(transformPage('10')).toBe(10);
// Invalid number
expect(transformPage('not-a-number')).toBe(null);
// Empty string
expect(transformPage('')).toBe(null);
// Undefined or null
expect(transformPage(undefined)).toBe(null);
expect(transformPage(null)).toBe(null);
});
it('should correctly transform page and page_size in tool handlers', () => {
// Simulate the transform in tool handler
function transformPageParams(params: Record<string, unknown>): {
page?: number;
pageSize?: number;
} {
function nullToUndefined<T>(value: T | null | undefined): T | undefined {
return value === null ? undefined : value;
}
const page = nullToUndefined(params.page) as number | undefined;
const pageSize = nullToUndefined(params.page_size) as number | undefined;
return {
...(page !== undefined && { page }),
...(pageSize !== undefined && { pageSize }),
};
}
// Test with numbers
expect(transformPageParams({ page: 5, page_size: 20 })).toEqual({ page: 5, pageSize: 20 });
// Test with strings
expect(transformPageParams({ page: '5', page_size: '20' })).toEqual({
page: '5',
pageSize: '20',
});
// Test with null
expect(transformPageParams({ page: null, page_size: null })).toEqual({
page: undefined,
pageSize: undefined,
});
// Test with mixed
expect(transformPageParams({ page: 5, page_size: null })).toEqual({
page: 5,
pageSize: undefined,
});
// Test with undefined
expect(transformPageParams({ page: undefined, page_size: undefined })).toEqual({
page: undefined,
pageSize: undefined,
});
// Test with empty object
expect(transformPageParams({})).toEqual({ page: undefined, pageSize: undefined });
});
it('should handle component key transformation correctly', () => {
// Simulate the component key transformation in the getComponentsMeasures handler
function transformComponentKeys(componentKeys: string | string[]): string {
return Array.isArray(componentKeys) ? componentKeys.join(',') : componentKeys;
}
// Test with string
expect(transformComponentKeys('single-component')).toBe('single-component');
// Test with array
expect(transformComponentKeys(['component1', 'component2'])).toBe('component1,component2');
// Test with single item array
expect(transformComponentKeys(['component1'])).toBe('component1');
// Test with empty array
expect(transformComponentKeys([])).toBe('');
});
it('should handle metric keys transformation correctly', () => {
// Simulate the metric keys transformation in the getComponentMeasures handler
function transformMetricKeys(metricKeys: string | string[]): string {
return Array.isArray(metricKeys) ? metricKeys.join(',') : metricKeys;
}
// Test with string
expect(transformMetricKeys('single-metric')).toBe('single-metric');
// Test with array
expect(transformMetricKeys(['metric1', 'metric2'])).toBe('metric1,metric2');
// Test with single item array
expect(transformMetricKeys(['metric1'])).toBe('metric1');
// Test with empty array
expect(transformMetricKeys([])).toBe('');
});
});
```
--------------------------------------------------------------------------------
/src/__tests__/protocol-version.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { createLogger } from '../utils/logger.js';
import {
SDK_VERSION,
SUPPORTED_PROTOCOL_VERSIONS,
LATEST_PROTOCOL_VERSION,
DEFAULT_NEGOTIATED_PROTOCOL_VERSION,
VERSION_INFO,
} from '../config/versions.js';
describe('Protocol Version Support', () => {
beforeEach(() => {
vi.clearAllMocks();
});
describe('Server Initialization', () => {
it('should log supported protocol versions on startup', () => {
const logger = createLogger('test');
const infoSpy = vi.spyOn(logger, 'info');
// Simulate server startup logging
logger.info('Starting SonarQube MCP server', {
...VERSION_INFO,
logFile: 'not configured',
logLevel: 'DEBUG',
elicitation: 'disabled',
});
expect(infoSpy).toHaveBeenCalledWith('Starting SonarQube MCP server', {
...VERSION_INFO,
logFile: 'not configured',
logLevel: 'DEBUG',
elicitation: 'disabled',
});
});
it('should log protocol negotiation info on successful connection', () => {
const logger = createLogger('test');
const infoSpy = vi.spyOn(logger, 'info');
// Simulate successful connection logging
logger.info('SonarQube MCP server started successfully', {
mcpProtocolInfo: 'Protocol version will be negotiated with client during initialization',
});
expect(infoSpy).toHaveBeenCalledWith('SonarQube MCP server started successfully', {
mcpProtocolInfo: 'Protocol version will be negotiated with client during initialization',
});
});
});
describe('Protocol Version Constants', () => {
it('should support all documented protocol versions', () => {
// Verify the versions match our documentation
expect(SUPPORTED_PROTOCOL_VERSIONS).toContain(LATEST_PROTOCOL_VERSION);
expect(SUPPORTED_PROTOCOL_VERSIONS).toContain(DEFAULT_NEGOTIATED_PROTOCOL_VERSION);
expect(SUPPORTED_PROTOCOL_VERSIONS.length).toBe(4);
expect(SUPPORTED_PROTOCOL_VERSIONS).toEqual([
'2025-06-18',
'2025-03-26',
'2024-11-05',
'2024-10-07',
]);
});
it('should use semantic versioning for SDK', () => {
const versionParts = SDK_VERSION.split('.');
expect(versionParts).toHaveLength(3);
expect(parseInt(versionParts[0]!, 10)).toBeGreaterThanOrEqual(1);
expect(parseInt(versionParts[1]!, 10)).toBeGreaterThanOrEqual(13);
expect(parseInt(versionParts[2]!, 10)).toBeGreaterThanOrEqual(0);
});
});
describe('Protocol Compatibility', () => {
it('should maintain backward compatibility with older protocol versions', () => {
const oldestSupportedVersion = '2024-10-07';
// Ensure we still support the oldest protocol version
expect(SUPPORTED_PROTOCOL_VERSIONS).toContain(oldestSupportedVersion);
});
it('should document protocol version support in COMPATIBILITY.md', () => {
// This test verifies that we have proper documentation
// The actual file content is maintained separately
const expectedSections = [
'Protocol Version Support',
'Version Negotiation',
'Current SDK Version',
'Feature Compatibility',
'Client Compatibility',
'SDK Update Process',
];
// This is a documentation test - it doesn't execute but serves as a reminder
expectedSections.forEach((section) => {
expect(section).toBeTruthy();
});
});
});
describe('SDK Version Management', () => {
it('should have consistent SDK version references', () => {
// Verify SDK version is correctly set
expect(SDK_VERSION).toBe('1.13.0');
});
it('should follow SDK update process as documented', () => {
// This test serves as a reminder of the update process
const updateSteps = [
'Check SDK release notes',
'Review changelog for breaking changes',
'Update dependency in package.json',
'Run tests',
'Update COMPATIBILITY.md',
'Test with multiple clients',
];
updateSteps.forEach((step) => {
expect(step).toBeTruthy();
});
});
});
});
```
--------------------------------------------------------------------------------
/src/__tests__/handlers/projects-authorization.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, vi, type MockedFunction } from 'vitest';
import { handleSonarQubeProjects } from '../../handlers/projects.js';
import type { ISonarQubeClient } from '../../types/index.js';
describe('Projects Handler Authorization Error', () => {
// Mock client
const mockClient: ISonarQubeClient = {
webApiClient: {} as any,
listProjects: vi.fn() as any,
getIssues: vi.fn() as any,
getMetrics: vi.fn() as any,
getHealth: vi.fn() as any,
getStatus: vi.fn() as any,
ping: vi.fn() as any,
getComponentMeasures: vi.fn() as any,
getComponentsMeasures: vi.fn() as any,
getMeasuresHistory: vi.fn() as any,
listQualityGates: vi.fn() as any,
getQualityGate: vi.fn() as any,
getProjectQualityGateStatus: vi.fn() as any,
getSourceCode: vi.fn() as any,
getScmBlame: vi.fn() as any,
hotspots: vi.fn() as any,
hotspot: vi.fn() as any,
updateHotspotStatus: vi.fn() as any,
markIssueFalsePositive: vi.fn() as any,
markIssueWontFix: vi.fn() as any,
markIssuesFalsePositive: vi.fn() as any,
markIssuesWontFix: vi.fn() as any,
addCommentToIssue: vi.fn() as any,
assignIssue: vi.fn() as any,
confirmIssue: vi.fn() as any,
unconfirmIssue: vi.fn() as any,
resolveIssue: vi.fn() as any,
reopenIssue: vi.fn() as any,
};
it('should provide helpful error message when authorization fails', async () => {
// Mock the listProjects method to throw an authorization error
const authError = new Error('Insufficient privileges');
(mockClient.listProjects as MockedFunction<typeof mockClient.listProjects>).mockRejectedValue(
authError
);
await expect(handleSonarQubeProjects({}, mockClient)).rejects.toThrow(
/Note: The 'projects' tool requires admin permissions/
);
});
it('should provide helpful error message for error containing "403"', async () => {
vi.clearAllMocks();
const authError = new Error('Error 403 Forbidden');
(mockClient.listProjects as MockedFunction<typeof mockClient.listProjects>).mockRejectedValue(
authError
);
await expect(handleSonarQubeProjects({}, mockClient)).rejects.toThrow(
/Note: The 'projects' tool requires admin permissions/
);
});
it('should provide helpful error message for "Insufficient privileges" error', async () => {
vi.clearAllMocks();
const authError = new Error('Insufficient privileges');
(mockClient.listProjects as MockedFunction<typeof mockClient.listProjects>).mockRejectedValue(
authError
);
await expect(handleSonarQubeProjects({}, mockClient)).rejects.toThrow(
/Note: The 'projects' tool requires admin permissions/
);
});
it('should not modify error message for non-authorization errors', async () => {
// Mock a different type of error
const serverError = new Error('Internal server error');
(mockClient.listProjects as MockedFunction<typeof mockClient.listProjects>).mockRejectedValue(
serverError
);
await expect(handleSonarQubeProjects({}, mockClient)).rejects.toThrow('Internal server error');
await expect(handleSonarQubeProjects({}, mockClient)).rejects.not.toThrow(
/Note: The 'projects' tool requires admin permissions/
);
});
it('should handle successful response without error', async () => {
const mockResponse = {
projects: [
{
key: 'test-project',
name: 'Test Project',
qualifier: 'TRK',
visibility: 'public',
lastAnalysisDate: '2023-01-01',
revision: 'abc123',
managed: false,
},
],
paging: {
pageIndex: 1,
pageSize: 10,
total: 1,
},
};
(mockClient.listProjects as MockedFunction<typeof mockClient.listProjects>).mockResolvedValue(
mockResponse
);
const result = await handleSonarQubeProjects({}, mockClient);
const firstContent = result.content[0]!;
if ('text' in firstContent && typeof firstContent.text === 'string') {
const data = JSON.parse(firstContent.text);
expect(data.projects).toHaveLength(1);
expect(data.projects[0].key).toBe('test-project');
} else {
throw new Error('Expected text content');
}
});
});
```
--------------------------------------------------------------------------------
/scripts/ci-local.sh:
--------------------------------------------------------------------------------
```bash
#!/usr/bin/env bash
set -euo pipefail
# =============================================================================
# Local CI Simulation Script
# Purpose: Run the same checks as CI locally before pushing
# Usage: ./scripts/ci-local.sh [--fast] [--no-security]
# =============================================================================
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
FAST_MODE=false
SKIP_SECURITY=false
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log() {
echo -e "${GREEN}[CI-LOCAL]${NC} $1"
}
warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
error() {
echo -e "${RED}[ERROR]${NC} $1"
exit 1
}
info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--fast)
FAST_MODE=true
shift
;;
--no-security)
SKIP_SECURITY=true
shift
;;
--help|-h)
echo "Usage: $0 [--fast] [--no-security]"
echo " --fast Skip slower checks (container scan, full coverage)"
echo " --no-security Skip security scans"
exit 0
;;
*)
error "Unknown option: $1"
;;
esac
done
cd "$PROJECT_ROOT"
log "Starting local CI simulation..."
if [ "$FAST_MODE" = true ]; then
info "Running in fast mode (some checks skipped)"
fi
# 1. Install dependencies (like CI setup)
log "Installing dependencies..."
pnpm install --frozen-lockfile
# 2. Core validation (parallel in real CI)
log "Running core validation checks..."
echo " → Audit check..."
pnpm audit --audit-level critical
echo " → Type checking..."
pnpm typecheck
echo " → Linting..."
pnpm lint
echo " → Format checking..."
pnpm format
echo " → Running tests..."
if [ "$FAST_MODE" = true ]; then
pnpm test
else
pnpm test:coverage
fi
# 3. Build check
log "Checking build..."
pnpm build
# 4. Security scans (if not skipped)
if [ "$SKIP_SECURITY" = false ]; then
log "Running security checks..."
# Check if OSV scanner is available
if command -v osv-scanner &> /dev/null; then
echo " → OSV vulnerability scan..."
if [ -r pnpm-lock.yaml ]; then
osv-scanner --lockfile=pnpm-lock.yaml
else
warn "pnpm-lock.yaml not found or not readable. Skipping OSV scan."
fi
else
warn "OSV scanner not installed. Run: go install github.com/google/osv-scanner/cmd/osv-scanner@latest"
fi
# Container scan (if not in fast mode)
if [ "$FAST_MODE" = false ] && [ -f "./scripts/scan-container.sh" ]; then
if command -v docker &> /dev/null && command -v trivy &> /dev/null; then
echo " → Container security scan..."
./scripts/scan-container.sh
else
warn "Docker or Trivy not available. Skipping container scan."
fi
fi
else
info "Security scans skipped"
fi
# 5. Changeset validation (simulate CI check)
log "Checking changesets..."
if git rev-parse --verify HEAD~1 >/dev/null 2>&1; then
# Check if we have any commits to compare
if git diff --name-only HEAD~1 | grep -E '(src/|tests/)' > /dev/null; then
# We have code changes, check for changesets
if ! pnpm changeset:status > /dev/null 2>&1; then
warn "Code changes detected but no changesets found"
echo " Run 'pnpm changeset' to create one, or 'pnpm changeset --empty' for non-release changes"
else
info "Changesets validated"
fi
else
info "No code changes detected, changeset check skipped"
fi
else
info "No previous commit found, changeset check skipped"
fi
# 6. Final summary
log "✅ Local CI simulation complete!"
echo
info "Summary:"
echo " ✅ Dependencies installed"
echo " ✅ Core validation passed"
echo " ✅ Build successful"
if [ "$SKIP_SECURITY" = false ]; then
echo " ✅ Security checks completed"
fi
echo " ✅ Changesets validated"
echo
info "Your code is ready to push! 🚀"
echo
info "Next steps:"
echo " 1. git add . && git commit -m 'your commit message'"
echo " 2. git push origin your-branch"
echo " 3. Create a pull request"
```
--------------------------------------------------------------------------------
/docs/architecture/decisions/0019-simplify-to-stdio-only-transport-for-mcp-gateway-deployment.md:
--------------------------------------------------------------------------------
```markdown
# 19. Simplify to stdio-only transport for MCP gateway deployment
Date: 2025-01-30
Partially Superseded: 2025-10-12 by ADR-0028
## Status
Partially Superseded by ADR-0028
This decision was partially reversed on 2025-10-12. While the removal of OAuth-based HTTP transport and authentication complexity remains valid, the "stdio-only" decision was superseded by ADR-0028, which re-introduced HTTP transport in a simpler, session-based form without OAuth complexity.
**What remains valid from this ADR:**
- Removal of OAuth 2.0 authentication infrastructure (60+ files)
- Removal of service account management and permission filtering
- Delegation of enterprise features to MCP gateways
- Simplified authentication model
**What was superseded:**
- "stdio-only" transport decision (HTTP transport re-added in ADR-0028)
- Removal of all HTTP endpoints (HTTP re-added with session management)
- Removal of SSE (SSE re-added for real-time notifications)
See ADR-0028 for the current HTTP transport implementation (session-based without OAuth).
## Context
The SonarQube MCP Server initially supported only stdio transport (ADR-0010). Later, HTTP transport with OAuth 2.0 support was added (ADR-0016) to enable enterprise features like multi-tenancy, authentication, and audit logging.
However, this added significant complexity:
- 60+ authentication/authorization files
- Complex OAuth token validation
- Service account management
- Permission filtering system
- Audit logging infrastructure
- HTTP server configuration
- External IdP integration
Meanwhile, the MCP ecosystem has evolved with gateway solutions that handle these enterprise concerns:
- Docker MCP Gateway
- IBM Context Forge
- SGNL
- Operant
These gateways provide authentication, multi-tenancy, monitoring, and other enterprise features at the gateway layer, making the HTTP transport implementation redundant.
## Decision
We will simplify the MCP server to support only stdio transport, removing all HTTP, OAuth, SSE, and related enterprise infrastructure. Enterprise features will be handled by MCP gateways.
This involves:
1. Removing HTTP transport and all OAuth/authentication code
2. Removing service account management and permission filtering
3. Removing audit logging (handled by gateways)
4. Removing Kubernetes/Helm/Terraform deployment configs
5. Simplifying configuration to core SonarQube settings
6. Reducing Docker image size and resource requirements
## Consequences
### Positive
- **Reduced Complexity**: ~40% reduction in codebase size
- **Improved Maintainability**: Focus on core SonarQube integration
- **Better Separation of Concerns**: Business logic vs infrastructure
- **Faster Startup**: No HTTP server or auth initialization
- **Smaller Attack Surface**: No network exposure
- **Easier Testing**: No auth/permission mocking needed
- **Gateway Flexibility**: Users can choose their preferred gateway
### Negative
- **Breaking Change**: Users of HTTP transport must migrate
- **Feature Migration**: Enterprise users need to adopt MCP gateways
- **Documentation Updates**: Significant documentation changes required
### Neutral
- **Unix Philosophy**: Aligns with "do one thing well"
- **Ecosystem Evolution**: Follows MCP community direction
- **Gateway Pattern**: Standard in microservices architecture
## Implementation
The simplification will be implemented in phases:
1. **Phase 1**: Remove HTTP/OAuth infrastructure files
2. **Phase 2**: Simplify configuration and environment variables
3. **Phase 3**: Update documentation for stdio-only approach
4. **Phase 4**: Optimize core functionality and startup time
5. **Phase 5**: Optimize Docker image for minimal footprint
6. **Phase 6**: Update tests and validate functionality
## Migration Path
Users currently using HTTP transport should:
1. Deploy an MCP gateway (Docker MCP Gateway, IBM Context Forge, etc.)
2. Configure the stdio server behind the gateway
3. Move authentication/authorization to the gateway layer
4. Leverage gateway features for monitoring and audit
## References
- GitHub Issue #243: Simplify to stdio-only transport
- ADR-0010: Use stdio transport for MCP communication
- ADR-0016: HTTP transport with OAuth 2.0 (being reverted)
- **ADR-0028: Session-Based HTTP Transport with SSE (PARTIALLY SUPERSEDES THIS ADR)**
- MCP Specification: Transport layer abstraction
```
--------------------------------------------------------------------------------
/src/domains/measures.ts:
--------------------------------------------------------------------------------
```typescript
import { MeasuresAdditionalField } from 'sonarqube-web-api-client';
import type {
ComponentMeasuresParams,
ComponentsMeasuresParams,
MeasuresHistoryParams,
SonarQubeComponentMeasuresResult,
SonarQubeComponentsMeasuresResult,
SonarQubeMeasuresHistoryResult,
} from '../types/index.js';
import { BaseDomain } from './base.js';
import { ensureStringArray } from '../utils/transforms.js';
/**
* Domain module for measures-related operations
*/
export class MeasuresDomain extends BaseDomain {
/**
* Gets measures for a specific component
* @param params Parameters including component key and metric keys
* @returns Promise with the component measures
*/
async getComponentMeasures(
params: ComponentMeasuresParams
): Promise<SonarQubeComponentMeasuresResult> {
const { component, metricKeys, additionalFields, branch, pullRequest } = params;
const request: {
component: string;
metricKeys: string[];
additionalFields?: MeasuresAdditionalField[];
branch?: string;
pullRequest?: string;
} = {
component,
metricKeys: ensureStringArray(metricKeys),
...(additionalFields && { additionalFields: additionalFields as MeasuresAdditionalField[] }),
...(branch && { branch }),
...(pullRequest && { pullRequest }),
};
const response = await this.webApiClient.measures.component(request);
return response as SonarQubeComponentMeasuresResult;
}
/**
* Gets measures for multiple components
* @param params Parameters including component keys and metric keys
* @returns Promise with the components measures
*/
async getComponentsMeasures(
params: ComponentsMeasuresParams
): Promise<SonarQubeComponentsMeasuresResult> {
// The API only supports querying one component at a time for detailed measures
// We need to make multiple requests and aggregate the results
const componentKeys = ensureStringArray(params.componentKeys);
const metricKeys = ensureStringArray(params.metricKeys);
const results = await Promise.all(
componentKeys.map((componentKey) => {
const requestParams: ComponentMeasuresParams = {
component: componentKey,
metricKeys,
...(params.additionalFields && { additionalFields: params.additionalFields }),
...(params.branch && { branch: params.branch }),
...(params.pullRequest && { pullRequest: params.pullRequest }),
...(params.period && { period: params.period }),
};
return this.getComponentMeasures(requestParams);
})
);
// Aggregate results with pagination
const allComponents = results.map((result) => result.component);
const page = params.page ?? 1;
const pageSize = params.pageSize ?? 100; // Default to 100 like SonarQube API
// Apply pagination
const startIndex = (page - 1) * pageSize;
const endIndex = startIndex + pageSize;
const paginatedComponents = allComponents.slice(startIndex, endIndex);
const response: SonarQubeComponentsMeasuresResult = {
components: paginatedComponents,
metrics: results[0]?.metrics ?? [],
paging: {
pageIndex: page,
pageSize: pageSize,
total: componentKeys.length,
},
};
// Only add period if it exists
if (results[0]?.period) {
response.period = results[0].period;
}
return response;
}
/**
* Gets measures history for a component
* @param params Parameters including component key and metrics
* @returns Promise with the measures history
*/
async getMeasuresHistory(params: MeasuresHistoryParams): Promise<SonarQubeMeasuresHistoryResult> {
const { component, metrics, from, to, branch, pullRequest, page, pageSize } = params;
const builder = this.webApiClient.measures.searchHistory(component, ensureStringArray(metrics));
if (from) {
builder.from(from);
}
if (to) {
builder.to(to);
}
if (branch) {
builder.withBranch(branch);
}
if (pullRequest) {
builder.withPullRequest(pullRequest);
}
if (page !== undefined) {
builder.page(page);
}
if (pageSize !== undefined) {
builder.pageSize(pageSize);
}
const response = await builder.execute();
return {
...response,
paging: response.paging ?? { pageIndex: 1, pageSize: 100, total: 0 },
};
}
}
```
--------------------------------------------------------------------------------
/docs/architecture/decisions/0020-testing-framework-and-strategy-vitest-with-property-based-testing.md:
--------------------------------------------------------------------------------
```markdown
# 20. Testing Framework and Strategy Vitest with Property Based Testing
Date: 2025-10-11
## Status
Accepted
## Context
The SonarQube MCP Server requires a comprehensive testing strategy to ensure reliability, maintainability, and code quality. The project needs:
- Fast test execution for rapid feedback during development
- Strong integration with TypeScript and ES modules
- Property-based testing to uncover edge cases
- High code coverage requirements (80% minimum)
- Modern test tooling with good developer experience
- Compatibility with CI/CD automation
Jest, while popular, has challenges with ES modules and TypeScript, requiring additional configuration and workarounds. We needed a testing framework that provides first-class support for modern JavaScript/TypeScript patterns.
## Decision
We will use **Vitest** as our primary testing framework, complemented by **fast-check** for property-based testing.
### Core Testing Stack
1. **Vitest** (v3.2.4+): Test framework and runner
- Native ES modules support
- Built-in TypeScript support
- Compatible with Jest API for easier migration
- Fast execution with smart watch mode
- Integrated coverage reporting with V8
2. **fast-check** (v4.3.0+): Property-based testing library
- Generate comprehensive test cases automatically
- Uncover edge cases that unit tests might miss
- Integrated with Vitest via @fast-check/vitest
3. **Coverage Requirements**:
- Minimum 80% coverage for lines, functions, branches, and statements
- Enforced in CI/CD pipeline
- Configured via vitest.config.ts
### Test Organization
```
src/__tests__/
├── [feature].test.ts # Unit tests
├── domains/ # Domain-specific tests
│ └── [domain].test.ts
├── schemas/ # Schema validation tests
│ └── [schema].test.ts
└── transports/ # Transport layer tests
└── [transport].test.ts
```
### Testing Approach
1. **Unit Tests**: Test individual functions and classes in isolation
2. **Integration Tests**: Test interactions between components
3. **Property-Based Tests**: Use fast-check to test properties that should hold for all inputs
4. **Schema Tests**: Validate Zod schema definitions with comprehensive inputs
## Consequences
### Positive
- **Fast Execution**: Vitest is significantly faster than Jest (2-3x in our benchmarks)
- **Better DX**: Native TypeScript support eliminates configuration complexity
- **ES Modules**: First-class support for modern JavaScript patterns
- **Property-Based Testing**: fast-check uncovers edge cases that traditional tests miss
- **Coverage Enforcement**: Built-in V8 coverage ensures quality standards
- **Familiar API**: Jest-compatible API reduces migration friction
- **Watch Mode**: Intelligent test re-running speeds up development
- **Type Safety**: Strong TypeScript integration catches errors early
### Negative
- **Ecosystem Maturity**: Vitest is newer than Jest, with smaller community
- **Learning Curve**: Property-based testing requires different thinking
- **CI Time**: Property-based tests can be slower (mitigated with test timeouts)
- **Documentation**: Less third-party documentation compared to Jest
### Neutral
- **Migration Path**: Jest-compatible API makes future changes easier
- **Property Test Complexity**: Requires careful generator design
- **Coverage Tools**: V8 coverage differs slightly from Istanbul
## Implementation
### Configuration
```typescript
// vitest.config.ts
export default defineConfig({
test: {
environment: 'node',
globals: true,
coverage: {
provider: 'v8',
reporter: ['text', 'html', 'json-summary', 'lcov'],
thresholds: {
branches: 80,
functions: 80,
lines: 80,
statements: 80,
},
},
},
});
```
### Example Tests
**Unit Test**:
```typescript
describe('nullToUndefined', () => {
it('should convert null to undefined', () => {
expect(nullToUndefined(null)).toBeUndefined();
});
});
```
**Property-Based Test**:
```typescript
import { fc, test } from '@fast-check/vitest';
test.prop([fc.string()])('should handle any string input', (input) => {
const result = processString(input);
expect(typeof result).toBe('string');
});
```
## References
- Vitest Documentation: https://vitest.dev/
- fast-check Documentation: https://fast-check.dev/
- Coverage Configuration: vitest.config.ts
- Test Examples: src/**tests**/
```
--------------------------------------------------------------------------------
/src/domains/hotspots.ts:
--------------------------------------------------------------------------------
```typescript
import type {
HotspotSearchParams,
SonarQubeHotspotSearchResult,
SonarQubeHotspotDetails,
HotspotStatusUpdateParams,
SonarQubeHotspot,
SeverityLevel,
} from '../types/index.js';
import { BaseDomain } from './base.js';
/**
* Domain module for security hotspots operations
*/
export class HotspotsDomain extends BaseDomain {
/**
* Search for security hotspots
* @param params Search parameters
* @returns Promise with the search results
*/
async hotspots(params: HotspotSearchParams): Promise<SonarQubeHotspotSearchResult> {
const builder = this.webApiClient.hotspots.search();
if (params.projectKey) {
builder.projectKey(params.projectKey);
}
// Note: The hotspots API doesn't support branch/pullRequest filtering directly
// These parameters might be ignored or need to be handled differently
if (params.status) {
builder.status(params.status);
}
if (params.resolution) {
builder.resolution(params.resolution);
}
if (params.files) {
builder.files(params.files);
}
if (params.assignedToMe !== undefined) {
builder.onlyMine(params.assignedToMe);
}
if (params.sinceLeakPeriod !== undefined) {
builder.sinceLeakPeriod(params.sinceLeakPeriod);
}
if (params.inNewCodePeriod !== undefined) {
// inNewCodePeriod might not be available, use sinceLeakPeriod instead
if (params.inNewCodePeriod) {
builder.sinceLeakPeriod(true);
}
}
if (params.page !== undefined) {
builder.page(params.page);
}
if (params.pageSize !== undefined) {
builder.pageSize(params.pageSize);
}
const response = await builder.execute();
return {
hotspots: response.hotspots as SonarQubeHotspot[],
components: response.components?.map((comp) => ({
key: comp.key,
qualifier: comp.qualifier,
name: comp.name,
longName: comp.longName,
path: comp.path,
})),
paging: response.paging ?? { pageIndex: 1, pageSize: 100, total: 0 },
};
}
/**
* Get details for a specific hotspot
* @param hotspotKey The hotspot key
* @returns Promise with the hotspot details
*/
async hotspot(hotspotKey: string): Promise<SonarQubeHotspotDetails> {
const response = await this.webApiClient.hotspots.show({ hotspot: hotspotKey });
// Map the response to our interface
return {
key: response.key,
component: response.component.key,
project: response.project.key,
securityCategory: response.rule.securityCategory,
vulnerabilityProbability: response.rule.vulnerabilityProbability as SeverityLevel,
status: response.status,
...(response.resolution && { resolution: response.resolution }),
line: response.line ?? 0,
message: response.message,
...(response.assignee?.login && { assignee: response.assignee.login }),
...(response.author?.login && { author: response.author.login }),
creationDate: response.creationDate,
updateDate: response.updateDate,
rule: {
key: response.rule.key,
name: response.rule.name,
securityCategory: response.rule.securityCategory,
vulnerabilityProbability: response.rule.vulnerabilityProbability as SeverityLevel,
},
changelog: response.changelog?.map((change) => ({
user: change.user?.login,
userName: change.user?.name,
creationDate: change.creationDate,
diffs:
change.diffs?.map((diff) => ({
key: diff.key,
oldValue: diff.oldValue,
newValue: diff.newValue,
})) ?? [],
})),
comment: response.comment,
users: (response as { users?: SonarQubeHotspotDetails['users'] }).users,
};
}
/**
* Update the status of a hotspot
* @param params Update parameters
* @returns Promise that resolves when the update is complete
*/
async updateHotspotStatus(params: HotspotStatusUpdateParams): Promise<void> {
const request: {
hotspot: string;
status: 'TO_REVIEW' | 'REVIEWED';
resolution?: 'FIXED' | 'SAFE';
comment?: string;
} = {
hotspot: params.hotspot,
status: params.status,
};
if (params.resolution !== undefined) {
request.resolution = params.resolution;
}
if (params.comment !== undefined) {
request.comment = params.comment;
}
await this.webApiClient.hotspots.changeStatus(request);
}
}
```
--------------------------------------------------------------------------------
/.github/scripts/version-and-release.js:
--------------------------------------------------------------------------------
```javascript
#!/usr/bin/env node
/**
* =============================================================================
* SCRIPT: Version and Release Manager
* PURPOSE: Validate changesets and manage version bumps for releases
* USAGE: Called by main.yml workflow after successful validation
* OUTPUTS: Sets GitHub Actions outputs for version and changed status
* =============================================================================
*/
import { execSync } from 'child_process';
import fs from 'fs';
// Execute shell command and return trimmed output
const exec = (cmd) => execSync(cmd, { encoding: 'utf-8', stdio: 'pipe' }).trim();
// eslint-disable-next-line no-console
const log = (msg) => console.log(msg);
async function main() {
try {
// =============================================================================
// CHANGESET DETECTION
// Check if changesets exist in .changeset directory
// =============================================================================
// Look for changeset markdown files (excluding README.md)
const hasChangesets =
fs.existsSync('.changeset') &&
fs.readdirSync('.changeset').some((f) => f.endsWith('.md') && f !== 'README.md');
if (!hasChangesets) {
// =============================================================================
// VALIDATE COMMITS MATCH CHANGESETS
// Ensure feat/fix commits have corresponding changesets
// =============================================================================
// Find the last git tag to determine commit range
let lastTag = '';
try {
lastTag = exec('git describe --tags --abbrev=0');
} catch {
// No tags exist yet (first release)
lastTag = '';
}
// Get commits since last tag (or all commits if no tags)
const commitRange = lastTag ? `${lastTag}..HEAD` : 'HEAD';
const commits = exec(`git log ${commitRange} --pretty=format:"%s"`).split('\n');
// Check if any commits require a release (feat, fix, perf, refactor)
const hasReleasableCommits = commits.some((c) =>
/^(feat|fix|perf|refactor)(\(.+\))?:/.test(c)
);
if (!hasReleasableCommits) {
// No commits that need a release
log('⏭️ No releasable commits found, skipping release');
process.exit(0);
}
// VALIDATION ERROR: Found releasable commits without changesets
// This enforces that all features/fixes are documented in changelog
log('❌ Found releasable commits but no changeset');
log('Commits that require a changeset:');
commits
.filter((c) => /^(feat|fix|perf|refactor)(\(.+\))?:/.test(c))
.forEach((c) => log(` - ${c}`));
log('\nPlease add a changeset by running: pnpm changeset');
process.exit(1);
}
// =============================================================================
// VERSION MANAGEMENT
// Apply changesets to bump version and update CHANGELOG.md
// =============================================================================
// Get current version from package.json
const pkg = JSON.parse(fs.readFileSync('package.json', 'utf-8'));
const currentVersion = pkg.version;
log(`Current version: ${currentVersion}`);
// Apply all pending changesets
// This updates package.json version and CHANGELOG.md
exec('pnpm changeset version');
// Check if version actually changed
const updatedPkg = JSON.parse(fs.readFileSync('package.json', 'utf-8'));
const newVersion = updatedPkg.version;
if (currentVersion === newVersion) {
// No version bump needed (e.g., all changesets were --empty)
log('⏭️ No version change');
process.exit(0);
}
log(`📦 Version changed to: ${newVersion}`);
// =============================================================================
// GITHUB ACTIONS OUTPUT
// Set outputs for workflow to use in subsequent steps
// =============================================================================
// Output for GitHub Actions
// These values are used by main.yml to decide whether to create a release
if (process.env.GITHUB_OUTPUT) {
fs.appendFileSync(process.env.GITHUB_OUTPUT, `changed=true\n`);
fs.appendFileSync(process.env.GITHUB_OUTPUT, `version=${newVersion}\n`);
}
} catch (error) {
// Error handling with clear message
// Common errors: permission issues, git conflicts, invalid changesets
// eslint-disable-next-line no-console
console.error('Error:', error.message);
process.exit(1);
}
}
main();
```
--------------------------------------------------------------------------------
/.github/workflows/pr.yml:
--------------------------------------------------------------------------------
```yaml
# =============================================================================
# WORKFLOW: Pull Request Validation
# PURPOSE: Ensure code quality and security before merging to main
# TRIGGERS: Pull requests targeting main branch
# REQUIREMENTS: All checks must pass, changesets required for features/fixes
# =============================================================================
name: PR
on:
pull_request:
branches: [main]
# Allow only one PR workflow per branch
# cancel-in-progress: true cancels old runs when new commits are pushed
# This speeds up feedback by focusing on the latest code
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# Minimal permissions for security
# contents: read - Read code for analysis
# security-events: write - Upload security findings
# actions: read - Access workflow artifacts
# packages: write - Required by reusable-docker.yml (not used in PR builds)
permissions:
contents: read
security-events: write
actions: read
packages: write
jobs:
# =============================================================================
# PARALLEL VALIDATION
# All checks run simultaneously for faster feedback
# =============================================================================
# Core validation: audit, typecheck, lint, format, tests
# upload-coverage: true generates coverage reports for visibility
# FAILS IF: Any check fails or coverage drops below 80%
validate:
uses: ./.github/workflows/reusable-validate.yml
secrets:
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
with:
validate-changesets: true
# Security: Static security analysis for TypeScript/JavaScript
# Scans for: XSS, injection attacks, insecure patterns
# Results appear in Security tab of the PR
security:
uses: ./.github/workflows/reusable-security.yml
# =============================================================================
# DOCKER CONTAINER VALIDATION
# Build and scan Docker image for vulnerabilities
# =============================================================================
# Docker: Build and security scan container image
# Only runs when ENABLE_DOCKER_RELEASE is configured
# Scans for: CVEs, misconfigurations, secrets in image layers
docker:
if: vars.ENABLE_DOCKER_RELEASE == 'true'
uses: ./.github/workflows/reusable-docker.yml
with:
platforms: 'linux/amd64' # Single platform for faster PR validation
save-artifact: false # Don't save artifact for PRs
image-name: 'sonarqube-mcp-server-pr'
# =============================================================================
# FINAL STATUS CHECK
# Single job to verify all parallel checks succeeded
# =============================================================================
# Final status check - ensures all jobs passed
# Required for branch protection rules
pr-status:
needs: [validate, security, docker]
if: always() # Run even if previous jobs failed
runs-on: ubuntu-latest
steps:
- name: Check status
# Aggregates results from all parallel jobs
# This single check can be used as a required status check
# FAILS IF: Any validation job failed
# Common failures:
# - validate: Tests fail, coverage below 80%, lint errors, workflow errors, missing changesets
# - security: Security vulnerabilities, vulnerable dependencies, audit failures
# - docker: Container vulnerabilities or build failures (when enabled)
run: |
# Check Docker job status
# The job can be:
# - success: Job ran and passed
# - failure: Job ran and failed
# - cancelled: Job was cancelled
# - skipped: Job condition was not met (e.g., ENABLE_DOCKER_RELEASE != 'true')
DOCKER_RESULT="${{ needs.docker.result }}"
# Docker is acceptable if it succeeded or was skipped
# It's a failure only if it actually ran and failed/was cancelled
if [ "$DOCKER_RESULT" == "failure" ] || [ "$DOCKER_RESULT" == "cancelled" ]; then
DOCKER_FAILED=true
else
DOCKER_FAILED=false
fi
if [ "${{ needs.validate.result }}" != "success" ] || \
[ "${{ needs.security.result }}" != "success" ] || \
[ "$DOCKER_FAILED" == "true" ]; then
echo "❌ PR validation failed"
# Check individual job results for debugging
echo "Validate: ${{ needs.validate.result }}"
echo "Security: ${{ needs.security.result }}"
echo "Docker: ${{ needs.docker.result }}"
exit 1
fi
echo "✅ All PR checks passed"
```
--------------------------------------------------------------------------------
/src/__tests__/additional-coverage.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import nock from 'nock';
// Mock environment variables
process.env.SONARQUBE_TOKEN = 'test-token';
process.env.SONARQUBE_URL = 'http://localhost:9000';
describe('Lambda Handlers Coverage Tests', () => {
beforeEach(() => {
vi.resetModules();
// Setup nock to mock SonarQube API responses
nock('http://localhost:9000')
.persist()
.get('/api/metrics/search')
.query(true)
.reply(200, {
metrics: [
{
key: 'test-metric',
name: 'Test Metric',
description: 'Test metric description',
domain: 'test',
type: 'INT',
},
],
paging: {
pageIndex: 1,
pageSize: 10,
total: 1,
},
});
nock('http://localhost:9000')
.persist()
.get('/api/measures/component')
.query(true)
.reply(200, {
component: {
key: 'test-component',
name: 'Test Component',
qualifier: 'TRK',
measures: [
{
metric: 'coverage',
value: '85.4',
},
],
},
metrics: [
{
key: 'coverage',
name: 'Coverage',
description: 'Test coverage',
domain: 'Coverage',
type: 'PERCENT',
},
],
});
nock('http://localhost:9000')
.persist()
.get('/api/measures/components')
.query(true)
.reply(200, {
components: [
{
key: 'test-component-1',
name: 'Test Component 1',
qualifier: 'TRK',
measures: [
{
metric: 'coverage',
value: '85.4',
},
],
},
],
metrics: [
{
key: 'coverage',
name: 'Coverage',
description: 'Test coverage',
domain: 'Coverage',
type: 'PERCENT',
},
],
paging: {
pageIndex: 1,
pageSize: 100,
total: 1,
},
});
nock('http://localhost:9000')
.persist()
.get('/api/measures/search_history')
.query(true)
.reply(200, {
measures: [
{
metric: 'coverage',
history: [
{
date: '2023-01-01T00:00:00+0000',
value: '80.0',
},
],
},
],
paging: {
pageIndex: 1,
pageSize: 100,
total: 1,
},
});
// No need for this now since we're importing directly in each test
});
afterEach(() => {
nock.cleanAll();
});
// Import the module directly in each test to ensure it's available
it('should call metricsHandler', async () => {
const module = await import('../index.js');
const result = await module.metricsHandler({ page: 1, page_size: 10 });
expect(result).toBeDefined();
expect(result.content).toBeDefined();
expect(result.content?.[0]?.text).toBeDefined();
});
it('should call componentMeasuresHandler', async () => {
const module = await import('../index.js');
const result = await module.componentMeasuresHandler({
component: 'test-component',
metric_keys: ['coverage'],
additional_fields: ['periods'],
branch: 'main',
pull_request: 'pr-123',
period: '1',
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
expect(result.content?.[0]?.text).toBeDefined();
});
it('should call componentsMeasuresHandler', async () => {
const module = await import('../index.js');
const result = await module.componentsMeasuresHandler({
component_keys: ['component1', 'component2'],
metric_keys: ['coverage', 'bugs'],
additional_fields: ['metrics'],
branch: 'develop',
pull_request: 'pr-456',
period: '2',
page: '1',
page_size: '20',
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
expect(result.content?.[0]?.text).toBeDefined();
});
it('should call measuresHistoryHandler', async () => {
const module = await import('../index.js');
const result = await module.measuresHistoryHandler({
component: 'test-component',
metrics: ['coverage', 'bugs'],
from: '2023-01-01',
to: '2023-12-31',
branch: 'feature',
pull_request: 'pr-789',
page: '1',
page_size: '30',
});
expect(result).toBeDefined();
expect(result.content).toBeDefined();
expect(result.content?.[0]?.text).toBeDefined();
});
});
```
--------------------------------------------------------------------------------
/src/__tests__/utils/transforms.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, test, expect } from 'vitest';
import {
ensureArray,
ensureStringArray,
nullToUndefined,
stringToNumberTransform,
numberOrStringToString,
} from '../../utils/transforms.js';
describe('transforms', () => {
describe('nullToUndefined', () => {
test('converts null to undefined', () => {
expect(nullToUndefined(null)).toBeUndefined();
});
test('preserves undefined', () => {
expect(nullToUndefined(undefined)).toBeUndefined();
});
test('preserves other values', () => {
expect(nullToUndefined(0)).toBe(0);
expect(nullToUndefined('')).toBe('');
expect(nullToUndefined(false)).toBe(false);
expect(nullToUndefined({ foo: 'bar' })).toEqual({ foo: 'bar' });
});
});
describe('stringToNumberTransform', () => {
test('converts valid string to number', () => {
expect(stringToNumberTransform('123')).toBe(123);
expect(stringToNumberTransform('0')).toBe(0);
expect(stringToNumberTransform('-456')).toBe(-456);
});
test('returns null for invalid strings', () => {
expect(stringToNumberTransform('abc')).toBeNull();
expect(stringToNumberTransform('')).toBeNull();
expect(stringToNumberTransform('12.34')).toBe(12); // parseInt behavior
});
test('preserves null and undefined', () => {
expect(stringToNumberTransform(null)).toBeNull();
expect(stringToNumberTransform(undefined)).toBeUndefined();
});
});
describe('ensureArray', () => {
test('returns empty array for undefined', () => {
expect(ensureArray(undefined)).toEqual([]);
});
test('wraps single value in array', () => {
expect(ensureArray('hello')).toEqual(['hello']);
expect(ensureArray(123)).toEqual([123]);
expect(ensureArray(true)).toEqual([true]);
expect(ensureArray({ key: 'value' })).toEqual([{ key: 'value' }]);
});
test('returns array as-is', () => {
expect(ensureArray(['a', 'b', 'c'])).toEqual(['a', 'b', 'c']);
expect(ensureArray([1, 2, 3])).toEqual([1, 2, 3]);
expect(ensureArray([])).toEqual([]);
});
test('handles mixed type arrays', () => {
const mixed = [1, 'two', { three: 3 }];
expect(ensureArray(mixed)).toEqual(mixed);
});
test('handles null as a value', () => {
expect(ensureArray(null)).toEqual([null]);
});
test('handles zero and empty string', () => {
expect(ensureArray(0)).toEqual([0]);
expect(ensureArray('')).toEqual(['']);
});
});
describe('ensureStringArray', () => {
test('returns empty array for undefined', () => {
expect(ensureStringArray(undefined)).toEqual([]);
});
test('wraps single string in array', () => {
expect(ensureStringArray('hello')).toEqual(['hello']);
expect(ensureStringArray('')).toEqual(['']);
});
test('returns string array as-is', () => {
expect(ensureStringArray(['a', 'b', 'c'])).toEqual(['a', 'b', 'c']);
expect(ensureStringArray([])).toEqual([]);
});
test('preserves array reference', () => {
const arr = ['test'];
expect(ensureStringArray(arr)).toBe(arr);
});
test('splits comma-separated strings', () => {
expect(ensureStringArray('a,b,c')).toEqual(['a', 'b', 'c']);
expect(ensureStringArray('comp1,comp2')).toEqual(['comp1', 'comp2']);
expect(ensureStringArray('single,double,triple')).toEqual(['single', 'double', 'triple']);
});
test('handles strings with no commas', () => {
expect(ensureStringArray('nocommas')).toEqual(['nocommas']);
expect(ensureStringArray('single-value')).toEqual(['single-value']);
});
});
describe('numberOrStringToString', () => {
test('converts number to string', () => {
expect(numberOrStringToString(123)).toBe('123');
expect(numberOrStringToString(0)).toBe('0');
expect(numberOrStringToString(-456)).toBe('-456');
expect(numberOrStringToString(12.34)).toBe('12.34');
});
test('preserves string values', () => {
expect(numberOrStringToString('123')).toBe('123');
expect(numberOrStringToString('abc')).toBe('abc');
expect(numberOrStringToString('')).toBe('');
expect(numberOrStringToString('pr-123')).toBe('pr-123');
});
test('preserves null and undefined', () => {
expect(numberOrStringToString(null)).toBeNull();
expect(numberOrStringToString(undefined)).toBeUndefined();
});
test('handles edge cases', () => {
expect(numberOrStringToString(0)).toBe('0');
expect(numberOrStringToString('')).toBe('');
expect(numberOrStringToString(NaN)).toBe('NaN');
expect(numberOrStringToString(Infinity)).toBe('Infinity');
expect(numberOrStringToString(-Infinity)).toBe('-Infinity');
});
});
});
```
--------------------------------------------------------------------------------
/docs/architecture/decisions/0016-http-transport-with-oauth-2-0-metadata-endpoints.md:
--------------------------------------------------------------------------------
```markdown
# 16. HTTP Transport with OAuth 2.0 Metadata Endpoints
Date: 2025-06-22
Superseded: 2025-01-30 by ADR-0019
## Status
Superseded by ADR-0019
This decision was reversed on 2025-01-30. The HTTP transport and OAuth 2.0 implementation was removed in favor of stdio-only transport with enterprise features delegated to MCP gateways (Docker MCP Gateway, IBM Context Forge, SGNL, Operant, etc.).
**Rationale for reversal:**
- HTTP transport added significant complexity (~60+ authentication/authorization files)
- MCP gateway solutions now provide these enterprise features at the infrastructure layer
- Better separation of concerns: business logic vs infrastructure concerns
- Reduced attack surface and maintenance burden
- Aligns with Unix philosophy of "do one thing well"
See ADR-0019 for the current stdio-only transport approach.
## Context
Following the transport architecture refactoring in ADR-0015, we need to implement HTTP transport to support enterprise deployment scenarios. The HTTP transport must provide authentication discovery mechanisms for MCP clients as outlined in the MCP specification.
### Requirements:
1. Implement HTTP transport as an alternative to STDIO
2. Support OAuth 2.0 metadata discovery endpoints (RFC9728 and RFC8414)
3. Enable enterprise authentication workflows
4. Maintain compatibility with existing transport architecture
5. Prepare for future OAuth 2.0 flow implementation
### Standards Compliance:
- RFC9728: OAuth 2.0 Protected Resource Metadata
- RFC8414: OAuth 2.0 Authorization Server Metadata
- RFC6750: Bearer Token Usage
## Decision
We will implement HTTP transport with OAuth 2.0 metadata endpoints:
1. **HTTP Transport Implementation**: Express-based HTTP server following the ITransport interface
2. **Metadata Endpoints**:
- `/.well-known/oauth-protected-resource` (RFC9728)
- `/.well-known/oauth-authorization-server` (RFC8414, optional)
3. **Authentication Structure**: WWW-Authenticate headers with resource metadata URLs
4. **Configuration**: Environment variable-based configuration consistent with existing patterns
### Architecture Details:
```typescript
// HTTP Transport with OAuth metadata
class HttpTransport implements ITransport {
// Express server with CORS support
// OAuth metadata endpoints
// Bearer token authentication middleware
// MCP HTTP transport integration
}
// Protected Resource Metadata response
{
"resource": "https://mcp.company.com",
"authorization_servers": ["https://auth.company.com"],
"bearer_methods_supported": ["header"],
"resource_signing_alg_values_supported": ["RS256"]
}
```
### Environment Variables:
- `MCP_TRANSPORT=http`: Enable HTTP transport
- `MCP_HTTP_PORT`: HTTP server port
- `MCP_HTTP_HOST`: HTTP server host
- `MCP_HTTP_PUBLIC_URL`: Public URL for metadata endpoints
- `MCP_OAUTH_AUTH_SERVERS`: External authorization server URLs
- `MCP_OAUTH_BUILTIN`: Enable built-in auth server metadata
## Consequences
### Positive:
1. **Enterprise Ready**: Supports enterprise authentication discovery workflows
2. **Standards Compliant**: Follows OAuth 2.0 RFCs for metadata discovery
3. **Extensible**: Structure ready for full OAuth 2.0 flow implementation
4. **Backward Compatible**: STDIO transport remains default
5. **Discovery Mechanism**: Clients can automatically discover authentication requirements
### Negative:
1. **Token Validation Pending**: Actual token validation not yet implemented
2. **Additional Dependencies**: Requires Express and CORS packages
### Neutral:
1. **Incremental Implementation**: Sets foundation for future OAuth stories
2. **Documentation Required**: New transport needs comprehensive documentation
## Implementation Notes
1. HTTP transport integrates with MCP SDK's HTTP server transport
2. Authentication middleware prepared for future token validation
3. Health check endpoint provided for monitoring
4. CORS enabled by default for cross-origin requests
5. All responses follow RFC specifications for JSON structure
## Related ADRs
- ADR-0015: Transport Architecture Refactoring
- ADR-0014: Current Security Model and Future OAuth2 Considerations
- ADR-0008: Use Environment Variables for Configuration
- **ADR-0019: Simplify to stdio-only transport (SUPERSEDES THIS ADR)**
## Historical Note
This ADR documents the HTTP transport implementation that was later removed. The decision to implement HTTP transport with OAuth 2.0 was sound at the time (June 2025), but the rapid evolution of the MCP ecosystem with purpose-built gateway solutions made this approach redundant. The code and infrastructure described in this ADR were removed in January 2025 as part of a significant simplification effort that reduced the codebase by ~40%.
This ADR is retained for historical context and to document the architectural exploration that led to the current stdio-only approach.
```
--------------------------------------------------------------------------------
/src/__tests__/pull-request-transform.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, test, expect } from 'vitest';
import { z } from 'zod';
import { issuesToolSchema } from '../schemas/issues.js';
import {
componentMeasuresToolSchema,
componentsMeasuresToolSchema,
measuresHistoryToolSchema,
} from '../schemas/measures.js';
import { hotspotsToolSchema } from '../schemas/hotspots-tools.js';
import { sourceCodeToolSchema, scmBlameToolSchema } from '../schemas/source-code.js';
import { qualityGateStatusToolSchema } from '../schemas/quality-gates.js';
import { componentsToolSchema } from '../schemas/components.js';
describe('pull_request parameter transform', () => {
describe('issues schema', () => {
test('accepts string pull_request', () => {
const schema = z.object(issuesToolSchema);
const result = schema.parse({
pull_request: '123',
});
expect(result.pull_request).toBe('123');
});
test('accepts number pull_request and converts to string', () => {
const schema = z.object(issuesToolSchema);
const result = schema.parse({
pull_request: 123,
});
expect(result.pull_request).toBe('123');
});
test('preserves null values', () => {
const schema = z.object(issuesToolSchema);
const result = schema.parse({
pull_request: null,
});
expect(result.pull_request).toBeNull();
});
});
describe('measures schemas', () => {
test('componentMeasuresToolSchema accepts number and converts to string', () => {
const schema = z.object(componentMeasuresToolSchema);
const result = schema.parse({
component: 'test',
metric_keys: ['coverage'],
pull_request: 456,
});
expect(result.pull_request).toBe('456');
});
test('componentsMeasuresToolSchema accepts number and converts to string', () => {
const schema = z.object(componentsMeasuresToolSchema);
const result = schema.parse({
component_keys: ['test'],
metric_keys: ['coverage'],
pull_request: 789,
});
expect(result.pull_request).toBe('789');
});
test('measuresHistoryToolSchema accepts number and converts to string', () => {
const schema = z.object(measuresHistoryToolSchema);
const result = schema.parse({
component: 'test',
metrics: ['coverage'],
pull_request: 999,
});
expect(result.pull_request).toBe('999');
});
});
describe('hotspots schema', () => {
test('accepts number pull_request and converts to string', () => {
const schema = z.object(hotspotsToolSchema);
const result = schema.parse({
pull_request: 111,
});
expect(result.pull_request).toBe('111');
});
});
describe('source code schemas', () => {
test('sourceCodeToolSchema accepts number and converts to string', () => {
const schema = z.object(sourceCodeToolSchema);
const result = schema.parse({
key: 'test',
pull_request: 222,
});
expect(result.pull_request).toBe('222');
});
test('scmBlameToolSchema accepts number and converts to string', () => {
const schema = z.object(scmBlameToolSchema);
const result = schema.parse({
key: 'test',
pull_request: 333,
});
expect(result.pull_request).toBe('333');
});
});
describe('quality gates schema', () => {
test('accepts number pull_request and converts to string', () => {
const schema = z.object(qualityGateStatusToolSchema);
const result = schema.parse({
project_key: 'test',
pull_request: 444,
});
expect(result.pull_request).toBe('444');
});
});
describe('components schema', () => {
test('accepts number pullRequest and converts to string', () => {
const schema = z.object(componentsToolSchema);
const result = schema.parse({
pullRequest: 555,
});
expect(result.pullRequest).toBe('555');
});
test('accepts string pullRequest', () => {
const schema = z.object(componentsToolSchema);
const result = schema.parse({
pullRequest: 'pr-666',
});
expect(result.pullRequest).toBe('pr-666');
});
});
describe('edge cases', () => {
test('handles decimal numbers', () => {
const schema = z.object(issuesToolSchema);
const result = schema.parse({
pull_request: 123.456,
});
expect(result.pull_request).toBe('123.456');
});
test('handles negative numbers', () => {
const schema = z.object(issuesToolSchema);
const result = schema.parse({
pull_request: -123,
});
expect(result.pull_request).toBe('-123');
});
test('handles zero', () => {
const schema = z.object(issuesToolSchema);
const result = schema.parse({
pull_request: 0,
});
expect(result.pull_request).toBe('0');
});
});
});
```
--------------------------------------------------------------------------------
/docs/architecture/decisions/0017-comprehensive-audit-logging-system.md:
--------------------------------------------------------------------------------
```markdown
# 17. Comprehensive Audit Logging System
Date: 2025-06-25
## Status
Accepted
## Context
The SonarQube MCP Server needs comprehensive audit logging to meet enterprise compliance requirements including SOC 2 Type II, ISO 27001, and GDPR. The system must log all security-relevant events including authentication attempts, tool invocations, permission checks, and configuration changes while protecting sensitive data and ensuring log integrity.
Current logging infrastructure:
- File-based logging system (to avoid STDIO conflicts with MCP protocol)
- Service account auditor for tracking account usage
- Basic error and debug logging
Requirements:
- Structured JSON audit logs with all required fields
- Log all authentication attempts (success and failure)
- Log all tool invocations with parameters
- Log permission checks and denials
- Configurable log retention policies
- Log shipping to SIEM systems
- PII redaction capabilities
- Audit log integrity protection
## Decision
We will implement a comprehensive audit logging system that builds on the existing logging infrastructure with the following components:
### 1. Audit Logger Service
A dedicated service for handling audit events that:
- Extends the existing logger utility
- Provides structured JSON logging format
- Implements PII redaction
- Ensures log integrity with checksums
- Manages log rotation and retention
### 2. Audit Event Schema
Standardized schema for all audit events:
```typescript
interface AuditEvent {
// Core fields
timestamp: string; // ISO 8601 format
eventId: string; // Unique event identifier
eventType: AuditEventType; // Enumerated event types
eventCategory: AuditEventCategory; // auth, access, config, etc.
// Actor information
actor: {
userId: string;
userGroups?: string[];
sessionId?: string;
ipAddress?: string;
userAgent?: string;
};
// Target information
target: {
type: string; // tool, project, issue, etc.
id: string; // Resource identifier
name?: string; // Human-readable name
};
// Action details
action: {
type: string; // read, write, delete, etc.
parameters?: Record<string, unknown>;
result: 'success' | 'failure' | 'partial';
error?: string;
};
// Context
context: {
serviceAccount?: string;
sonarqubeUrl?: string;
environment?: string;
traceId?: string; // For request correlation
};
// Security metadata
security: {
tokenAud?: string;
tokenIss?: string;
tokenJti?: string;
tlsVersion?: string;
permissionChecks?: Array<{
permission: string;
result: boolean;
reason?: string;
}>;
};
// Compliance fields
compliance: {
dataClassification?: string;
piiRedacted?: boolean;
retentionPeriod?: number;
};
// Integrity
checksum?: string; // SHA-256 of event content
}
```
### 3. Event Types
Comprehensive event types covering all security-relevant actions:
- Authentication: login, logout, token validation, MFA events
- Authorization: permission checks, access grants/denials
- Tool invocation: all MCP tool calls with parameters
- Data access: project/issue/component queries
- Configuration: service account changes, permission updates
- System: health checks, errors, maintenance
### 4. PII Redaction
Automatic redaction of sensitive data:
- Email addresses (except domain)
- IP addresses (configurable)
- User names in free-text fields
- Custom patterns via configuration
### 5. Log Storage and Rotation
- Separate audit log files from application logs
- Daily rotation with compression
- Configurable retention periods
- Archive to cold storage after retention period
### 6. SIEM Integration
- JSON format compatible with major SIEM systems
- Syslog forwarding support
- Webhook delivery for real-time streaming
- Batch export for bulk ingestion
### 7. Integrity Protection
- SHA-256 checksums for each event
- Optional HMAC signing with rotating keys
- Tamper detection on log files
- Chain of custody documentation
## Consequences
### Positive
- Full compliance with SOC 2, ISO 27001, and GDPR requirements
- Complete audit trail for security investigations
- Ability to detect and investigate security incidents
- Support for compliance reporting and audits
- Integration with enterprise security tools
- Protection of sensitive user data
### Negative
- Increased storage requirements for audit logs
- Performance overhead for logging and checksumming
- Complexity in managing log retention and rotation
- Additional configuration for SIEM integration
- Potential for log files to contain sensitive data if redaction fails
### Implementation Notes
1. Build on existing `logger.ts` utility
2. Extend `service-account-auditor.ts` for broader audit coverage
3. Add audit hooks to permission wrapper and handlers
4. Implement as middleware for HTTP transport
5. Create separate audit log directory structure
6. Add configuration for retention, redaction, and SIEM
7. Ensure all audit logging is async and non-blocking
```
--------------------------------------------------------------------------------
/.claude/commands/update-dependencies.md:
--------------------------------------------------------------------------------
```markdown
# Update Dependencies
You are about to update the dependencies of the project. This command helps maintain the project's dependencies while adhering to the established CI/CD workflow and changeset requirements.
## Workflow Steps
### 1. Create a Feature Branch
Create a new branch following the naming convention:
```bash
git checkout -b chore/update-dependencies-<date>
# Example: chore/update-dependencies-2024-01
```
### 2. Update Dependencies
#### For Production Dependencies:
```bash
# Check outdated packages
pnpm outdated
# Update all dependencies to latest
pnpm update --latest
# Or update specific packages
pnpm update <package-name> --latest
```
#### For Dev Dependencies:
```bash
# Update dev dependencies
pnpm update --latest --dev
```
### 3. Install and Lock Dependencies
```bash
# Ensure pnpm-lock.yaml is updated
pnpm install
# Deduplicate dependencies if needed
pnpm dedupe
```
### 4. Test the Updates
Run the full verification suite to ensure compatibility:
```bash
# Run all checks (audit, typecheck, lint, format, test)
pnpm verify
# Run specific checks if needed
pnpm test
pnpm typecheck
pnpm lint
```
### 5. Create a Changeset
**IMPORTANT**: The CI/CD pipeline requires changesets for all changes. Choose the right type based on impact:
#### When to Use Empty Changeset
Use `pnpm changeset --empty` for changes that don't affect users:
- Dev dependency updates only
- CI/CD configuration changes
- Test-only improvements
- Internal tooling updates
- Documentation changes
```bash
# Create an empty changeset (no version bump)
pnpm changeset --empty
# The changeset message should document:
# - Which dependencies were updated
# - Why they were updated
# - Any tooling changes developers should know about
```
#### When to Use Regular Changeset
Use `pnpm changeset` for changes that affect the package/application:
- Production dependency updates
- Security fixes visible to users
- Breaking changes in dependencies
- Bug fixes from dependency updates
- New features from dependency updates
```bash
# Create a proper changeset (will bump version)
pnpm changeset
# Select version bump:
# - patch: security updates, bug fixes, minor dependency updates
# - minor: significant updates with new capabilities
# - major: breaking changes requiring user action
# Examples:
# patch: "Update zod to fix validation edge case"
# minor: "Update Pino with new structured logging features"
# major: "Update to Node 24 (drops Node 22 support)"
```
### 6. Commit the Changes
Follow conventional commit format:
```bash
git add .
# For routine updates
git commit -m "chore: update dependencies
- Updated production dependencies to latest versions
- Updated dev dependencies to latest versions
- No breaking changes identified"
# For updates with notable changes
git commit -m "chore: update dependencies with <notable-package> v<version>
- Updated <package> from v<old> to v<new>
- <List any important changes>
- All tests passing"
```
### 7. Push and Create Pull Request
```bash
# Push the branch
git push -u origin chore/update-dependencies-<date>
# Create PR with detailed description
gh pr create \
--title "chore: update dependencies" \
--body "## Summary
Updates all dependencies to their latest versions.
## Changes
- Production dependencies updated
- Dev dependencies updated
- No breaking changes identified
## Testing
- ✅ All tests passing
- ✅ Type checking successful
- ✅ Linting clean
- ✅ Coverage maintained at 80%+
## Changeset
- [x] Empty changeset added for dev dependency updates" \
--assignee @me
```
### 8. Monitor CI/CD Pipeline
```bash
# Watch the PR checks
gh pr checks --watch
# View detailed CI logs if needed
gh run list
gh run view <run-id>
```
### 9. Merge the Pull Request
Once all checks pass:
```bash
# Squash and merge (maintains clean history)
gh pr merge --squash --delete-branch
# Or merge through GitHub UI with "Squash and merge"
```
## Important Notes
### Changeset Requirements
- **Dev dependencies only**: Use `pnpm changeset --empty` to satisfy CI requirements
- **Production dependencies**: Create a proper changeset with appropriate version bump
- **Mixed updates**: Use proper changeset and document both types
### Common Issues and Solutions
#### CI Fails Due to Missing Changeset
```bash
# Add an empty changeset if you forgot
pnpm changeset --empty
git add .
git commit --amend
git push --force-with-lease
```
#### Breaking Changes in Dependencies
1. Review the changelog of the updated package
2. Update code to accommodate changes
3. Add tests for affected functionality
4. Use minor or major version bump in changeset
#### Audit Vulnerabilities
```bash
# Check for vulnerabilities
pnpm audit
# Fix automatically if possible
pnpm audit --fix
# For critical vulnerabilities that can't be auto-fixed,
# document in PR and consider alternatives
```
### Security Considerations
- Always run `pnpm audit` after updates
- Review security advisories for updated packages
- Be cautious with major version updates
- Consider the security track record of new dependencies
```
--------------------------------------------------------------------------------
/src/__tests__/mapping-functions.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach } from 'vitest';
describe('Mapping Functions', () => {
let mapToSonarQubeParams: any;
beforeEach(async () => {
// Import the function fresh for each test
const module = await import('../index.js');
mapToSonarQubeParams = module.mapToSonarQubeParams;
});
it('should properly map basic required parameters', () => {
const params = mapToSonarQubeParams({ project_key: 'my-project' });
expect(params.projectKey).toBe('my-project');
expect(params.severity).toBeUndefined();
expect(params.statuses).toBeUndefined();
});
it('should map pagination parameters', () => {
const params = mapToSonarQubeParams({
project_key: 'my-project',
page: 2,
page_size: 20,
});
expect(params.projectKey).toBe('my-project');
expect(params.page).toBe(2);
expect(params.pageSize).toBe(20);
});
it('should map severity parameter', () => {
const params = mapToSonarQubeParams({
project_key: 'my-project',
severity: 'MAJOR',
});
expect(params.projectKey).toBe('my-project');
expect(params.severity).toBe('MAJOR');
});
it('should map array parameters', () => {
const params = mapToSonarQubeParams({
project_key: 'my-project',
statuses: ['OPEN', 'CONFIRMED'],
types: ['BUG', 'VULNERABILITY'],
rules: ['rule1', 'rule2'],
tags: ['tag1', 'tag2'],
});
expect(params.projectKey).toBe('my-project');
expect(params.statuses).toEqual(['OPEN', 'CONFIRMED']);
expect(params.types).toEqual(['BUG', 'VULNERABILITY']);
expect(params.rules).toEqual(['rule1', 'rule2']);
expect(params.tags).toEqual(['tag1', 'tag2']);
});
it('should map boolean parameters', () => {
const params = mapToSonarQubeParams({
project_key: 'my-project',
resolved: true,
on_component_only: false,
since_leak_period: true,
in_new_code_period: false,
});
expect(params.projectKey).toBe('my-project');
expect(params.resolved).toBe(true);
expect(params.onComponentOnly).toBe(false);
expect(params.sinceLeakPeriod).toBe(true);
expect(params.inNewCodePeriod).toBe(false);
});
it('should map date parameters', () => {
const params = mapToSonarQubeParams({
project_key: 'my-project',
created_after: '2023-01-01',
created_before: '2023-12-31',
created_at: '2023-06-15',
created_in_last: '7d',
});
expect(params.projectKey).toBe('my-project');
expect(params.createdAfter).toBe('2023-01-01');
expect(params.createdBefore).toBe('2023-12-31');
expect(params.createdAt).toBe('2023-06-15');
expect(params.createdInLast).toBe('7d');
});
it('should map assignees and authors', () => {
const params = mapToSonarQubeParams({
project_key: 'my-project',
assignees: ['user1', 'user2'],
authors: ['author1', 'author2'],
});
expect(params.projectKey).toBe('my-project');
expect(params.assignees).toEqual(['user1', 'user2']);
expect(params.authors).toEqual(['author1', 'author2']);
});
it('should map security-related parameters', () => {
const params = mapToSonarQubeParams({
project_key: 'my-project',
cwe: ['cwe1', 'cwe2'],
languages: ['java', 'typescript'],
owasp_top10: ['a1', 'a2'],
sans_top25: ['sans1', 'sans2'],
sonarsource_security: ['sec1', 'sec2'],
});
expect(params.projectKey).toBe('my-project');
expect(params.cwe).toEqual(['cwe1', 'cwe2']);
expect(params.languages).toEqual(['java', 'typescript']);
expect(params.owaspTop10).toEqual(['a1', 'a2']);
expect(params.sansTop25).toEqual(['sans1', 'sans2']);
expect(params.sonarsourceSecurity).toEqual(['sec1', 'sec2']);
});
it('should map facets parameter', () => {
const params = mapToSonarQubeParams({
project_key: 'my-project',
facets: ['facet1', 'facet2'],
});
expect(params.projectKey).toBe('my-project');
expect(params.facets).toEqual(['facet1', 'facet2']);
});
it('should correctly handle null values', () => {
const params = mapToSonarQubeParams({
project_key: 'my-project',
severity: null,
statuses: null,
rules: null,
});
expect(params.projectKey).toBe('my-project');
expect(params.severity).toBeUndefined();
expect(params.statuses).toBeUndefined();
expect(params.rules).toBeUndefined();
});
it('should handle a mix of parameter types', () => {
const params = mapToSonarQubeParams({
project_key: 'my-project',
severity: 'MAJOR',
page: 2,
statuses: ['OPEN'],
resolved: true,
created_after: '2023-01-01',
assignees: ['user1'],
cwe: ['cwe1'],
facets: ['facet1'],
});
expect(params.projectKey).toBe('my-project');
expect(params.severity).toBe('MAJOR');
expect(params.page).toBe(2);
expect(params.statuses).toEqual(['OPEN']);
expect(params.resolved).toBe(true);
expect(params.createdAfter).toBe('2023-01-01');
expect(params.assignees).toEqual(['user1']);
expect(params.cwe).toEqual(['cwe1']);
expect(params.facets).toEqual(['facet1']);
});
});
```
--------------------------------------------------------------------------------
/src/__tests__/schema-transformation-mocks.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
// These tests mock the transformations used in the tool registrations in index.ts
describe('Schema Transformation Mocks', () => {
describe('Page and PageSize Transformations in Tool Registrations', () => {
it('should test page schema transformation - projects tool', () => {
const pageTransform = (val: any) => (val ? parseInt(val, 10) || null : null);
expect(pageTransform('10')).toBe(10);
expect(pageTransform('invalid')).toBe(null);
expect(pageTransform(undefined)).toBe(null);
expect(pageTransform('')).toBe(null);
});
it('should test page_size schema transformation - projects tool', () => {
const pageSizeTransform = (val: any) => (val ? parseInt(val, 10) || null : null);
expect(pageSizeTransform('20')).toBe(20);
expect(pageSizeTransform('invalid')).toBe(null);
expect(pageSizeTransform(undefined)).toBe(null);
expect(pageSizeTransform('')).toBe(null);
});
it('should test page schema transformation - metrics tool', () => {
const pageTransform = (val: any) => (val ? parseInt(val, 10) || null : null);
expect(pageTransform('10')).toBe(10);
expect(pageTransform('invalid')).toBe(null);
expect(pageTransform(undefined)).toBe(null);
expect(pageTransform('')).toBe(null);
});
it('should test page_size schema transformation - metrics tool', () => {
const pageSizeTransform = (val: any) => (val ? parseInt(val, 10) || null : null);
expect(pageSizeTransform('20')).toBe(20);
expect(pageSizeTransform('invalid')).toBe(null);
expect(pageSizeTransform(undefined)).toBe(null);
expect(pageSizeTransform('')).toBe(null);
});
it('should test page schema transformation - issues tool', () => {
const pageTransform = (val: any) => (val ? parseInt(val, 10) || null : null);
expect(pageTransform('10')).toBe(10);
expect(pageTransform('invalid')).toBe(null);
expect(pageTransform(undefined)).toBe(null);
expect(pageTransform('')).toBe(null);
});
it('should test page_size schema transformation - issues tool', () => {
const pageSizeTransform = (val: any) => (val ? parseInt(val, 10) || null : null);
expect(pageSizeTransform('20')).toBe(20);
expect(pageSizeTransform('invalid')).toBe(null);
expect(pageSizeTransform(undefined)).toBe(null);
expect(pageSizeTransform('')).toBe(null);
});
it('should test page schema transformation - measures_components tool', () => {
const pageTransform = (val: any) => (val ? parseInt(val, 10) || null : null);
expect(pageTransform('10')).toBe(10);
expect(pageTransform('invalid')).toBe(null);
expect(pageTransform(undefined)).toBe(null);
expect(pageTransform('')).toBe(null);
});
it('should test page_size schema transformation - measures_components tool', () => {
const pageSizeTransform = (val: any) => (val ? parseInt(val, 10) || null : null);
expect(pageSizeTransform('20')).toBe(20);
expect(pageSizeTransform('invalid')).toBe(null);
expect(pageSizeTransform(undefined)).toBe(null);
expect(pageSizeTransform('')).toBe(null);
});
it('should test page schema transformation - measures_history tool', () => {
const pageTransform = (val: any) => (val ? parseInt(val, 10) || null : null);
expect(pageTransform('10')).toBe(10);
expect(pageTransform('invalid')).toBe(null);
expect(pageTransform(undefined)).toBe(null);
expect(pageTransform('')).toBe(null);
});
it('should test page_size schema transformation - measures_history tool', () => {
const pageSizeTransform = (val: any) => (val ? parseInt(val, 10) || null : null);
expect(pageSizeTransform('20')).toBe(20);
expect(pageSizeTransform('invalid')).toBe(null);
expect(pageSizeTransform(undefined)).toBe(null);
expect(pageSizeTransform('')).toBe(null);
});
});
describe('Boolean Parameter Transformations in Issues Tool Registration', () => {
it('should test resolved parameter transformation', () => {
const boolTransform = (val: any) => val === 'true';
expect(boolTransform('true')).toBe(true);
expect(boolTransform('false')).toBe(false);
expect(boolTransform('something')).toBe(false);
});
it('should test on_component_only parameter transformation', () => {
const boolTransform = (val: any) => val === 'true';
expect(boolTransform('true')).toBe(true);
expect(boolTransform('false')).toBe(false);
expect(boolTransform('something')).toBe(false);
});
it('should test since_leak_period parameter transformation', () => {
const boolTransform = (val: any) => val === 'true';
expect(boolTransform('true')).toBe(true);
expect(boolTransform('false')).toBe(false);
expect(boolTransform('something')).toBe(false);
});
it('should test in_new_code_period parameter transformation', () => {
const boolTransform = (val: any) => val === 'true';
expect(boolTransform('true')).toBe(true);
expect(boolTransform('false')).toBe(false);
expect(boolTransform('something')).toBe(false);
});
});
});
```
--------------------------------------------------------------------------------
/.github/WORKFLOWS.md:
--------------------------------------------------------------------------------
```markdown
# GitHub Actions Workflows Documentation
This document describes the GitHub Actions workflows used in this repository.
## Workflow Overview
```mermaid
graph LR
PR[Pull Request] --> VALIDATE[Validate]
VALIDATE --> TEST[Test & Build]
PUSH[Push to Main] --> MAIN[Main Workflow]
MAIN --> VERSION[Version Packages]
VERSION --> RELEASE[Create Release]
RELEASE --> PUBLISH[Publish Workflow]
PUBLISH --> NPM[NPM Package]
PUBLISH --> DOCKER[Docker Image]
```
## Important Setup Requirements
### Personal Access Token for Release Workflow
The Main workflow creates GitHub releases that should trigger the Publish workflow. However, due to GitHub's security features, workflows triggered by the default `GITHUB_TOKEN` cannot trigger other workflows (this prevents infinite loops).
**To enable the Publish workflow to trigger automatically:**
1. Create a Personal Access Token (PAT) with the following permissions:
- `contents:write` - To create releases
- `actions:read` - To trigger workflows
2. Add the PAT as a repository secret named `RELEASE_TOKEN`:
- Go to Settings → Secrets and variables → Actions
- Click "New repository secret"
- Name: `RELEASE_TOKEN`
- Value: Your PAT
3. The Main workflow will automatically use `RELEASE_TOKEN` if available, falling back to `GITHUB_TOKEN` if not configured.
## Workflows
### 1. Pull Request Workflow (`pr.yml`)
**Purpose**: Validates and tests all pull requests.
**Triggers**: Pull requests to `main` branch
**Jobs**:
- **validate**: Checks changeset status
- **security-audit**: Runs security audit
- **type-checking**: TypeScript type checking
- **linting**: ESLint checks
- **format-checking**: Prettier formatting checks
- **test**: Runs test suite with coverage
- **codeql**: Security analysis
- **osv-scan**: Vulnerability scanning
### 2. Main Workflow (`main.yml`)
**Purpose**: Handles versioning and release creation when changes are merged to main.
**Triggers**: Push to `main` branch
**Jobs**:
- **build**:
- Checks for changesets (fails if releasable commits lack changesets)
- Versions packages using changesets
- Builds artifacts and generates SBOM
- Creates GitHub release with artifacts
- Generates build attestations
**Important**: Requires `RELEASE_TOKEN` secret to trigger the publish workflow.
### 3. Publish Workflow (`publish.yml`)
**Purpose**: Publishes packages to registries after a release.
**Triggers**:
- Release published event
- Manual workflow dispatch
**Jobs**:
- **npm**: Publishes to NPM (requires `ENABLE_NPM_RELEASE` variable and `NPM_TOKEN` secret)
- **docker**: Builds and publishes Docker images (requires `ENABLE_DOCKER_RELEASE` variable)
## Configuration
### Required Secrets
| Secret | Purpose | Required For |
| -------------------- | --------------------------------------------- | --------------- |
| `RELEASE_TOKEN` | PAT to trigger publish workflow from releases | Auto-publish |
| `NPM_TOKEN` | NPM authentication | NPM publishing |
| `DOCKERHUB_USERNAME` | Docker Hub username | Docker Hub push |
| `DOCKERHUB_TOKEN` | Docker Hub access token | Docker Hub push |
### Repository Variables
| Variable | Purpose | Default |
| ----------------------- | -------------------------- | ------- |
| `ENABLE_NPM_RELEASE` | Enable NPM publishing | `false` |
| `ENABLE_DOCKER_RELEASE` | Enable Docker distribution | `false` |
## Setup Instructions
### 1. Create a Personal Access Token for Releases
Create a fine-grained PAT with:
- **Repository access**: Your repository
- **Permissions**: Contents (Write), Actions (Read)
Add as `RELEASE_TOKEN` secret.
### 2. Configure NPM Publishing
1. Get NPM token from npmjs.com
2. Add as `NPM_TOKEN` secret
3. Set `ENABLE_NPM_RELEASE=true` variable
### 3. Enable Docker Distribution (Optional)
1. Set `ENABLE_DOCKER_RELEASE=true` variable
2. Add Docker Hub credentials if using Docker Hub
## How It Works
1. **Developer creates PR** with changes and changeset
2. **CI validates** the PR (tests pass, changeset present)
3. **PR is merged** to main branch
4. **Main workflow runs**:
- Checks for changesets
- Versions packages
- Builds artifacts
- Creates GitHub release
5. **Publish workflow triggers** (if `RELEASE_TOKEN` configured):
- Publishes to NPM
- Builds Docker images
- Deploys to configured targets
## Troubleshooting
### Publish Workflow Not Triggering
- Verify `RELEASE_TOKEN` secret is configured
- Ensure the PAT has `contents:write` and `actions:read` permissions
- Check that the release was created (not draft)
### NPM Publish Failing
- Verify `NPM_TOKEN` is valid
- Check `ENABLE_NPM_RELEASE` variable is set to `true`
- Ensure package.json version doesn't already exist
### Missing Changesets
- Main workflow will fail if releasable commits lack changesets
- Add changesets with `pnpm changeset`
## Best Practices
1. **Always include changesets** in PRs with changes
2. **Configure `RELEASE_TOKEN`** for automatic publishing
3. **Keep secrets secure** and rotate regularly
4. **Monitor workflow runs** for failures
5. **Use `pnpm verify`** before pushing code
```
--------------------------------------------------------------------------------
/src/__tests__/error-handling.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach, beforeAll, vi } from 'vitest';
import nock from 'nock';
// Mock environment variables
process.env.SONARQUBE_TOKEN = 'test-token';
process.env.SONARQUBE_URL = 'http://localhost:9000';
// Save environment variables
const originalEnv = process.env;
beforeAll(() => {
nock.cleanAll();
});
afterAll(() => {
nock.cleanAll();
});
let nullToUndefined: any;
// No need to mock axios anymore since we're using sonarqube-web-api-client
describe('Error Handling', () => {
beforeAll(async () => {
const module = await import('../index.js');
nullToUndefined = module.nullToUndefined;
});
beforeEach(() => {
vi.resetModules();
process.env = { ...originalEnv };
nock.cleanAll();
});
afterEach(() => {
process.env = originalEnv;
vi.restoreAllMocks();
nock.cleanAll();
});
describe('nullToUndefined function', () => {
it('should handle various input types correctly', () => {
// Test nulls
expect(nullToUndefined(null)).toBeUndefined();
// Test undefined
expect(nullToUndefined(undefined)).toBeUndefined();
// Test various other types
expect(nullToUndefined(0)).toBe(0);
expect(nullToUndefined('')).toBe('');
expect(nullToUndefined('test')).toBe('test');
expect(nullToUndefined(false)).toBe(false);
expect(nullToUndefined(true)).toBe(true);
// Test objects and arrays
const obj = { test: 1 };
const arr = [1, 2, 3];
expect(nullToUndefined(obj)).toBe(obj);
expect(nullToUndefined(arr)).toBe(arr);
});
});
describe('mapToSonarQubeParams', () => {
it('should handle all parameters', async () => {
const module = await import('../index.js');
const mapToSonarQubeParams = module.mapToSonarQubeParams;
const params = mapToSonarQubeParams({
project_key: 'test-project',
severity: 'MAJOR',
page: 1,
page_size: 10,
statuses: ['OPEN', 'CONFIRMED'],
resolutions: ['FALSE-POSITIVE', 'FIXED'],
resolved: true,
types: ['BUG', 'VULNERABILITY'],
rules: ['rule1', 'rule2'],
tags: ['tag1', 'tag2'],
created_after: '2023-01-01',
created_before: '2023-12-31',
created_at: '2023-06-15',
created_in_last: '7d',
assignees: ['user1', 'user2'],
authors: ['author1', 'author2'],
cwe: ['cwe1', 'cwe2'],
languages: ['java', 'typescript'],
owasp_top10: ['a1', 'a2'],
sans_top25: ['sans1', 'sans2'],
sonarsource_security: ['sec1', 'sec2'],
on_component_only: true,
facets: ['facet1', 'facet2'],
since_leak_period: true,
in_new_code_period: true,
});
expect(params.projectKey).toBe('test-project');
expect(params.severity).toBe('MAJOR');
expect(params.page).toBe(1);
expect(params.pageSize).toBe(10);
expect(params.statuses).toEqual(['OPEN', 'CONFIRMED']);
expect(params.resolutions).toEqual(['FALSE-POSITIVE', 'FIXED']);
expect(params.resolved).toBe(true);
expect(params.types).toEqual(['BUG', 'VULNERABILITY']);
expect(params.rules).toEqual(['rule1', 'rule2']);
expect(params.tags).toEqual(['tag1', 'tag2']);
expect(params.createdAfter).toBe('2023-01-01');
expect(params.createdBefore).toBe('2023-12-31');
expect(params.createdAt).toBe('2023-06-15');
expect(params.createdInLast).toBe('7d');
expect(params.assignees).toEqual(['user1', 'user2']);
expect(params.authors).toEqual(['author1', 'author2']);
expect(params.cwe).toEqual(['cwe1', 'cwe2']);
expect(params.languages).toEqual(['java', 'typescript']);
expect(params.owaspTop10).toEqual(['a1', 'a2']);
expect(params.sansTop25).toEqual(['sans1', 'sans2']);
expect(params.sonarsourceSecurity).toEqual(['sec1', 'sec2']);
expect(params.onComponentOnly).toBe(true);
expect(params.facets).toEqual(['facet1', 'facet2']);
expect(params.sinceLeakPeriod).toBe(true);
expect(params.inNewCodePeriod).toBe(true);
});
it('should handle empty parameters', async () => {
const module = await import('../index.js');
const mapToSonarQubeParams = module.mapToSonarQubeParams;
const params = mapToSonarQubeParams({ project_key: 'test-project' });
expect(params.projectKey).toBe('test-project');
expect(params.severity).toBeUndefined();
expect(params.statuses).toBeUndefined();
expect(params.resolutions).toBeUndefined();
expect(params.resolved).toBeUndefined();
expect(params.types).toBeUndefined();
expect(params.rules).toBeUndefined();
});
});
describe('Error handling utility functions', () => {
it('should properly handle null parameters', () => {
expect(nullToUndefined(null)).toBeUndefined();
});
it('should pass through non-null values', () => {
expect(nullToUndefined('value')).toBe('value');
expect(nullToUndefined(123)).toBe(123);
expect(nullToUndefined(true)).toBe(true);
expect(nullToUndefined(false)).toBe(false);
expect(nullToUndefined([])).toEqual([]);
expect(nullToUndefined({})).toEqual({});
});
it('should handle undefined parameters', () => {
expect(nullToUndefined(undefined)).toBeUndefined();
});
});
});
```
--------------------------------------------------------------------------------
/src/monitoring/health.ts:
--------------------------------------------------------------------------------
```typescript
/* istanbul ignore file */
import { createLogger } from '../utils/logger.js';
import { createSonarQubeClient } from '../sonarqube.js';
import { getServiceAccountConfig } from '../config/service-accounts.js';
import { SERVER_VERSION } from '../config/versions.js';
const logger = createLogger('HealthService');
export type HealthStatus = 'healthy' | 'unhealthy' | 'degraded';
export interface DependencyHealth {
name: string;
status: HealthStatus;
message?: string;
latency?: number;
lastCheck?: Date;
}
export interface HealthCheckResult {
status: HealthStatus;
version: string;
uptime: number;
timestamp: Date;
dependencies: Record<string, DependencyHealth>;
features: Record<string, boolean>;
metrics?: {
requests: number;
errors: number;
activeSession: number;
};
}
export class HealthService {
private static instance: HealthService;
private readonly startTime: Date;
private cachedHealth?: {
result: HealthCheckResult;
timestamp: number;
};
private readonly cacheTimeout = 5000; // 5 seconds cache
private constructor() {
this.startTime = new Date();
}
static getInstance(): HealthService {
if (!this.instance) {
this.instance = new HealthService();
}
return this.instance;
}
/**
* Perform comprehensive health check
*/
async checkHealth(): Promise<HealthCheckResult> {
// Check cache
if (this.cachedHealth && Date.now() - this.cachedHealth.timestamp < this.cacheTimeout) {
return this.cachedHealth.result;
}
const dependencies: Record<string, DependencyHealth> = {};
// Check SonarQube connectivity
dependencies.sonarqube = await this.checkSonarQube();
// Determine overall status
const statuses = Object.values(dependencies);
let overallStatus: HealthStatus = 'healthy';
if (statuses.some((d) => d.status === 'unhealthy')) {
overallStatus = 'unhealthy';
} else if (statuses.some((d) => d.status === 'degraded')) {
overallStatus = 'degraded';
}
// Get metrics summary
const metrics = this.getMetricsSummary();
const result: HealthCheckResult = {
status: overallStatus,
version: SERVER_VERSION,
uptime: Date.now() - this.startTime.getTime(),
timestamp: new Date(),
dependencies,
features: {
metrics: true,
},
};
// Only add metrics if they exist
if (metrics !== undefined) {
result.metrics = metrics;
}
// Cache result
this.cachedHealth = {
result,
timestamp: Date.now(),
};
return result;
}
/**
* Check SonarQube connectivity
*/
private async checkSonarQube(): Promise<DependencyHealth> {
const startTime = Date.now();
try {
const config = getServiceAccountConfig('default');
if (!config?.token) {
return {
name: 'SonarQube',
status: 'unhealthy',
message: 'No default service account configured',
lastCheck: new Date(),
};
}
const client = createSonarQubeClient(
config.token,
config.url ?? process.env.SONARQUBE_URL ?? 'https://sonarcloud.io',
config.organization ?? process.env.SONARQUBE_ORGANIZATION
);
// Try to ping SonarQube
await client.ping();
return {
name: 'SonarQube',
status: 'healthy',
latency: Date.now() - startTime,
lastCheck: new Date(),
};
} catch (error) {
logger.error('SonarQube health check failed', error);
return {
name: 'SonarQube',
status: 'unhealthy',
message: error instanceof Error ? error.message : 'Unknown error',
latency: Date.now() - startTime,
lastCheck: new Date(),
};
}
}
/**
* Get metrics summary
*/
private getMetricsSummary(): HealthCheckResult['metrics'] {
// Get current metric values (this is a simplified version)
// In a real implementation, you'd query the actual metric values
// For now, we'll return static values
return {
requests: 0, // Would query mcpRequestsTotal
errors: 0, // Would query sonarqubeErrorsTotal + authFailuresTotal
activeSession: 0, // Would query activeSessions
};
}
/**
* Get readiness status (for Kubernetes)
*/
async checkReadiness(): Promise<{
ready: boolean;
checks: Record<string, { ready: boolean; message?: string }>;
}> {
const checks: Record<string, { ready: boolean; message?: string }> = {};
// Check if server is initialized
checks.server = { ready: true };
// Check SonarQube connectivity (async)
const sonarqubeHealth = await this.checkSonarQube();
const sonarqubeCheck: { ready: boolean; message?: string } = {
ready: sonarqubeHealth.status !== 'unhealthy',
};
if (sonarqubeHealth.message !== undefined) {
sonarqubeCheck.message = sonarqubeHealth.message;
}
checks.sonarqube = sonarqubeCheck;
// Overall readiness
const ready = Object.values(checks).every((check) => check.ready);
return { ready, checks };
}
/**
* Reset the singleton instance (for testing)
*/
static resetInstance(): void {
// Reset the singleton instance for testing purposes
// @ts-expect-error - Intentionally setting to undefined for testing
this.instance = undefined;
}
}
```
--------------------------------------------------------------------------------
/src/monitoring/circuit-breaker.ts:
--------------------------------------------------------------------------------
```typescript
import CircuitBreaker from 'opossum';
import { createLogger } from '../utils/logger.js';
import { updateCircuitBreakerMetrics, trackCircuitBreakerFailure } from './metrics.js';
const logger = createLogger('CircuitBreaker');
export interface CircuitBreakerOptions {
timeout?: number;
errorThresholdPercentage?: number;
resetTimeout?: number;
rollingCountTimeout?: number;
rollingCountBuckets?: number;
name?: string;
volumeThreshold?: number;
errorFilter?: (error: Error) => boolean;
}
/**
* Circuit breaker factory for external service calls
*/
export class CircuitBreakerFactory {
private static readonly breakers = new Map<string, CircuitBreaker>();
/**
* Create or get a circuit breaker for a service
*/
static getBreaker<T extends unknown[], R>(
name: string,
fn: (...args: T) => Promise<R>,
options: CircuitBreakerOptions = {}
): CircuitBreaker<T, R> {
// Check if breaker already exists
const existing = this.breakers.get(name);
if (existing) {
return existing as CircuitBreaker<T, R>;
}
// Create new circuit breaker with defaults
const breakerOptions: CircuitBreaker.Options = {
timeout: options.timeout ?? 10000, // 10 seconds
errorThresholdPercentage: options.errorThresholdPercentage ?? 50,
resetTimeout: options.resetTimeout ?? 30000, // 30 seconds
rollingCountTimeout: options.rollingCountTimeout ?? 10000, // 10 seconds
rollingCountBuckets: options.rollingCountBuckets ?? 10,
name: options.name ?? name,
volumeThreshold: options.volumeThreshold ?? 5,
errorFilter: options.errorFilter,
};
const breaker = new CircuitBreaker(fn, breakerOptions);
// Add event listeners for metrics
this.attachEventListeners(name, breaker);
// Store breaker
this.breakers.set(name, breaker);
logger.info('Circuit breaker created', {
name,
options: breakerOptions,
});
return breaker;
}
/**
* Attach event listeners for metrics and logging
*/
private static attachEventListeners(name: string, breaker: CircuitBreaker): void {
// Circuit opened (failure threshold reached)
breaker.on('open', () => {
logger.warn('Circuit breaker opened', { name });
updateCircuitBreakerMetrics(name, 'open');
});
// Circuit closed (recovered)
breaker.on('close', () => {
logger.info('Circuit breaker closed', { name });
updateCircuitBreakerMetrics(name, 'closed');
});
// Circuit half-open (testing if service recovered)
breaker.on('halfOpen', () => {
logger.info('Circuit breaker half-open', { name });
updateCircuitBreakerMetrics(name, 'half-open');
});
// Request rejected due to open circuit
breaker.on('reject', () => {
trackCircuitBreakerFailure(name);
logger.debug('Request rejected by circuit breaker', { name });
});
// Request failed
breaker.on('failure', (error: Error) => {
trackCircuitBreakerFailure(name);
logger.debug('Request failed in circuit breaker', {
name,
error: error.message,
});
});
// Request succeeded
breaker.on('success', (result: unknown) => {
logger.debug('Request succeeded in circuit breaker', {
name,
hasResult: !!result,
});
});
// Timeout
breaker.on('timeout', () => {
trackCircuitBreakerFailure(name);
logger.warn('Request timed out in circuit breaker', { name });
});
}
/**
* Get circuit breaker statistics
*/
static getStats(name: string): CircuitBreaker.Stats | undefined {
const breaker = this.breakers.get(name);
return breaker?.stats;
}
/**
* Get all circuit breakers
*/
static getAllBreakers(): Map<string, CircuitBreaker> {
return new Map(this.breakers);
}
/**
* Shutdown all circuit breakers
*/
static shutdown(): void {
logger.info('Shutting down all circuit breakers', {
count: this.breakers.size,
});
for (const [name, breaker] of Array.from(this.breakers.entries())) {
breaker.shutdown();
logger.debug('Circuit breaker shut down', { name });
}
this.breakers.clear();
}
/**
* Reset all circuit breakers (for testing)
*/
static reset(): void {
this.breakers.clear();
}
}
/**
* Decorator to apply circuit breaker to a method
*/
export function withCircuitBreaker(name: string, options?: CircuitBreakerOptions): MethodDecorator {
return function (target: unknown, propertyKey: string | symbol, descriptor: PropertyDescriptor) {
const originalMethod = descriptor.value as (...args: unknown[]) => Promise<unknown>;
descriptor.value = async function (...args: unknown[]) {
const breaker = CircuitBreakerFactory.getBreaker(
`${name}.${String(propertyKey)}`,
originalMethod.bind(this) as (...args: unknown[]) => Promise<unknown>,
options
);
return breaker.fire(...args);
};
return descriptor;
};
}
/**
* Wrap a function with a circuit breaker
*/
export function wrapWithCircuitBreaker<T extends unknown[], R>(
name: string,
fn: (...args: T) => Promise<R>,
options?: CircuitBreakerOptions
): (...args: T) => Promise<R> {
const breaker = CircuitBreakerFactory.getBreaker(name, fn, options);
return async (...args: T) => {
return breaker.fire(...args);
};
}
```