This is page 3 of 8. Use http://codebase.md/sapientpants/sonarqube-mcp-server?page={x} to view the full context.
# Directory Structure
```
├── .adr-dir
├── .changeset
│ ├── config.json
│ └── README.md
├── .claude
│ ├── commands
│ │ ├── analyze-and-fix-github-issue.md
│ │ ├── fix-sonarqube-issues.md
│ │ ├── implement-github-issue.md
│ │ ├── release.md
│ │ ├── spec-feature.md
│ │ └── update-dependencies.md
│ ├── hooks
│ │ └── block-git-no-verify.ts
│ └── settings.json
├── .dockerignore
├── .github
│ ├── actionlint.yaml
│ ├── changeset.yml
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ └── feature_request.md
│ ├── pull_request_template.md
│ ├── scripts
│ │ ├── determine-artifact.sh
│ │ └── version-and-release.js
│ ├── workflows
│ │ ├── codeql.yml
│ │ ├── main.yml
│ │ ├── pr.yml
│ │ ├── publish.yml
│ │ ├── reusable-docker.yml
│ │ ├── reusable-security.yml
│ │ └── reusable-validate.yml
│ └── WORKFLOWS.md
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .markdownlint.yaml
├── .markdownlintignore
├── .npmrc
├── .prettierignore
├── .prettierrc
├── .trivyignore
├── .yaml-lint.yml
├── .yamllintignore
├── CHANGELOG.md
├── changes.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── COMPATIBILITY.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│ ├── architecture
│ │ └── decisions
│ │ ├── 0001-record-architecture-decisions.md
│ │ ├── 0002-use-node-js-with-typescript.md
│ │ ├── 0003-adopt-model-context-protocol-for-sonarqube-integration.md
│ │ ├── 0004-use-sonarqube-web-api-client-for-all-sonarqube-interactions.md
│ │ ├── 0005-domain-driven-design-of-sonarqube-modules.md
│ │ ├── 0006-expose-sonarqube-features-as-mcp-tools.md
│ │ ├── 0007-support-multiple-authentication-methods-for-sonarqube.md
│ │ ├── 0008-use-environment-variables-for-configuration.md
│ │ ├── 0009-file-based-logging-to-avoid-stdio-conflicts.md
│ │ ├── 0010-use-stdio-transport-for-mcp-communication.md
│ │ ├── 0011-docker-containerization-for-deployment.md
│ │ ├── 0012-add-elicitation-support-for-interactive-user-input.md
│ │ ├── 0014-current-security-model-and-future-oauth2-considerations.md
│ │ ├── 0015-transport-architecture-refactoring.md
│ │ ├── 0016-http-transport-with-oauth-2-0-metadata-endpoints.md
│ │ ├── 0017-comprehensive-audit-logging-system.md
│ │ ├── 0018-add-comprehensive-monitoring-and-observability.md
│ │ ├── 0019-simplify-to-stdio-only-transport-for-mcp-gateway-deployment.md
│ │ ├── 0020-testing-framework-and-strategy-vitest-with-property-based-testing.md
│ │ ├── 0021-code-quality-toolchain-eslint-prettier-strict-typescript.md
│ │ ├── 0022-package-manager-choice-pnpm.md
│ │ ├── 0023-release-management-with-changesets.md
│ │ ├── 0024-ci-cd-platform-github-actions.md
│ │ ├── 0025-container-and-security-scanning-strategy.md
│ │ ├── 0026-circuit-breaker-pattern-with-opossum.md
│ │ ├── 0027-docker-image-publishing-strategy-ghcr-to-docker-hub.md
│ │ └── 0028-session-based-http-transport-with-server-sent-events.md
│ ├── architecture.md
│ ├── security.md
│ └── troubleshooting.md
├── eslint.config.js
├── examples
│ └── http-client.ts
├── jest.config.js
├── LICENSE
├── LICENSES.md
├── osv-scanner.toml
├── package.json
├── pnpm-lock.yaml
├── README.md
├── scripts
│ ├── actionlint.sh
│ ├── ci-local.sh
│ ├── load-test.sh
│ ├── README.md
│ ├── run-all-tests.sh
│ ├── scan-container.sh
│ ├── security-scan.sh
│ ├── setup.sh
│ ├── test-monitoring-integration.sh
│ └── validate-docs.sh
├── SECURITY.md
├── sonar-project.properties
├── src
│ ├── __tests__
│ │ ├── additional-coverage.test.ts
│ │ ├── advanced-index.test.ts
│ │ ├── assign-issue.test.ts
│ │ ├── auth-methods.test.ts
│ │ ├── boolean-string-transform.test.ts
│ │ ├── components.test.ts
│ │ ├── config
│ │ │ └── service-accounts.test.ts
│ │ ├── dependency-injection.test.ts
│ │ ├── direct-handlers.test.ts
│ │ ├── direct-lambdas.test.ts
│ │ ├── direct-schema-validation.test.ts
│ │ ├── domains
│ │ │ ├── components-domain-full.test.ts
│ │ │ ├── components-domain.test.ts
│ │ │ ├── hotspots-domain.test.ts
│ │ │ └── source-code-domain.test.ts
│ │ ├── environment-validation.test.ts
│ │ ├── error-handler.test.ts
│ │ ├── error-handling.test.ts
│ │ ├── errors.test.ts
│ │ ├── function-tests.test.ts
│ │ ├── handlers
│ │ │ ├── components-handler-integration.test.ts
│ │ │ └── projects-authorization.test.ts
│ │ ├── handlers.test.ts
│ │ ├── handlers.test.ts.skip
│ │ ├── index.test.ts
│ │ ├── issue-resolution-elicitation.test.ts
│ │ ├── issue-resolution.test.ts
│ │ ├── issue-transitions.test.ts
│ │ ├── issues-enhanced-search.test.ts
│ │ ├── issues-new-parameters.test.ts
│ │ ├── json-array-transform.test.ts
│ │ ├── lambda-functions.test.ts
│ │ ├── lambda-handlers.test.ts.skip
│ │ ├── logger.test.ts
│ │ ├── mapping-functions.test.ts
│ │ ├── mocked-environment.test.ts
│ │ ├── null-to-undefined.test.ts
│ │ ├── parameter-transformations-advanced.test.ts
│ │ ├── parameter-transformations.test.ts
│ │ ├── protocol-version.test.ts
│ │ ├── pull-request-transform.test.ts
│ │ ├── quality-gates.test.ts
│ │ ├── schema-parameter-transforms.test.ts
│ │ ├── schema-transformation-mocks.test.ts
│ │ ├── schema-transforms.test.ts
│ │ ├── schema-validators.test.ts
│ │ ├── schemas
│ │ │ ├── components-schema.test.ts
│ │ │ ├── hotspots-tools-schema.test.ts
│ │ │ └── issues-schema.test.ts
│ │ ├── sonarqube-elicitation.test.ts
│ │ ├── sonarqube.test.ts
│ │ ├── source-code.test.ts
│ │ ├── standalone-handlers.test.ts
│ │ ├── string-to-number-transform.test.ts
│ │ ├── tool-handler-lambdas.test.ts
│ │ ├── tool-handlers.test.ts
│ │ ├── tool-registration-schema.test.ts
│ │ ├── tool-registration-transforms.test.ts
│ │ ├── transformation-util.test.ts
│ │ ├── transports
│ │ │ ├── base.test.ts
│ │ │ ├── factory.test.ts
│ │ │ ├── http.test.ts
│ │ │ ├── session-manager.test.ts
│ │ │ └── stdio.test.ts
│ │ ├── utils
│ │ │ ├── retry.test.ts
│ │ │ └── transforms.test.ts
│ │ ├── zod-boolean-transform.test.ts
│ │ ├── zod-schema-transforms.test.ts
│ │ └── zod-transforms.test.ts
│ ├── config
│ │ ├── service-accounts.ts
│ │ └── versions.ts
│ ├── domains
│ │ ├── base.ts
│ │ ├── components.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ ├── errors.ts
│ ├── handlers
│ │ ├── components.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ ├── index.ts
│ ├── monitoring
│ │ ├── __tests__
│ │ │ └── circuit-breaker.test.ts
│ │ ├── circuit-breaker.ts
│ │ ├── health.ts
│ │ └── metrics.ts
│ ├── schemas
│ │ ├── common.ts
│ │ ├── components.ts
│ │ ├── hotspots-tools.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ ├── sonarqube.ts
│ ├── transports
│ │ ├── base.ts
│ │ ├── factory.ts
│ │ ├── http.ts
│ │ ├── index.ts
│ │ ├── session-manager.ts
│ │ └── stdio.ts
│ ├── types
│ │ ├── common.ts
│ │ ├── components.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ └── utils
│ ├── __tests__
│ │ ├── elicitation.test.ts
│ │ ├── pattern-matcher.test.ts
│ │ └── structured-response.test.ts
│ ├── client-factory.ts
│ ├── elicitation.ts
│ ├── error-handler.ts
│ ├── logger.ts
│ ├── parameter-mappers.ts
│ ├── pattern-matcher.ts
│ ├── retry.ts
│ ├── structured-response.ts
│ └── transforms.ts
├── test-http-transport.sh
├── tmp
│ └── .gitkeep
├── tsconfig.build.json
├── tsconfig.json
├── vitest.config.d.ts
├── vitest.config.js
├── vitest.config.js.map
└── vitest.config.ts
```
# Files
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
```json
{
"name": "sonarqube-mcp-server",
"version": "1.10.21",
"description": "Model Context Protocol server for SonarQube",
"keywords": [
"sonarqube",
"mcp",
"model-context-protocol"
],
"author": "Marc Tremblay <[email protected]>",
"license": "MIT",
"repository": {
"type": "git",
"url": "git+https://github.com/sapientpants/sonarqube-mcp-server.git"
},
"bugs": {
"url": "https://github.com/sapientpants/sonarqube-mcp-server/issues"
},
"homepage": "https://github.com/sapientpants/sonarqube-mcp-server",
"type": "module",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"exports": {
".": {
"import": "./dist/index.js",
"types": "./dist/index.d.ts"
}
},
"bin": {
"sonarqube-mcp-server": "./dist/index.js"
},
"packageManager": "[email protected]",
"scripts": {
"dev": "tsc --watch --preserveWatchOutput",
"start": "node dist/index.js",
"build": "tsc -p tsconfig.build.json",
"build:watch": "tsc -p tsconfig.build.json --watch",
"typecheck": "tsc -p tsconfig.json --noEmit",
"typecheck:watch": "tsc -p tsconfig.json --noEmit --watch",
"lint": "eslint .",
"lint:fix": "eslint . --fix",
"lint:workflows": "./scripts/actionlint.sh",
"lint:markdown": "markdownlint-cli2 \"**/*.md\" \"#node_modules\"",
"lint:markdown:fix": "markdownlint-cli2 --fix \"**/*.md\" \"#node_modules\"",
"lint:yaml": "yamllint '**/*.{yml,yaml}' --ignore='node_modules/**' --ignore='.github/workflows/**' --ignore='pnpm-lock.yaml'",
"format": "prettier --check .",
"format:fix": "prettier --write .",
"test": "vitest run --reporter=verbose",
"test:watch": "vitest",
"test:ui": "vitest --ui",
"test:coverage": "vitest run --reporter=verbose --coverage",
"test:coverage:watch": "vitest --coverage",
"coverage:report": "vitest run --coverage --reporter=verbose",
"coverage:open": "open coverage/index.html",
"clean": "rimraf dist coverage",
"reset": "pnpm clean && pnpm install",
"quick-check": "pnpm typecheck && pnpm lint && pnpm test",
"precommit": "pnpm audit --audit-level critical && pnpm typecheck && pnpm lint && pnpm lint:workflows && pnpm lint:markdown && pnpm lint:yaml && pnpm format && pnpm test",
"verify": "pnpm precommit",
"setup": "./scripts/setup.sh",
"lint-staged": "lint-staged",
"sbom": "pnpm dlx @cyclonedx/cdxgen -o sbom.cdx.json",
"scan:container": "./scripts/scan-container.sh",
"scan:container:sarif": "./scripts/scan-container.sh --format sarif --output container-scan.sarif",
"prepare": "husky",
"release": "changeset version && pnpm build",
"release:publish": "pnpm build && changeset publish",
"release:tag": "git add -A && git commit -m \"chore(release): version packages\" && git tag -a v$(node -p \"require('./package.json').version\") -m \"Release\"",
"changeset": "changeset",
"changeset:status": "changeset status --since=main",
"ci:local": "./scripts/ci-local.sh",
"ci:local:fast": "./scripts/ci-local.sh --fast",
"doctor": "node -e \"console.log('Node:', process.version); console.log('npm scripts:', Object.keys(require('./package.json').scripts).length, 'available'); console.log('Run: pnpm run help')\"",
"help": "node -e \"const s=require('./package.json').scripts; console.log('Available commands:'); Object.keys(s).sort().forEach(k => console.log(' pnpm run ' + k.padEnd(20) + ' # ' + s[k].split(' ')[0]))\""
},
"lint-staged": {
"*.{ts,tsx,js,json,md,yml,yaml}": [
"prettier --write"
],
"*.{ts,tsx,js,json,jsonc,json5}": [
"eslint --fix"
],
"*.md": [
"markdownlint-cli2 --fix"
],
"*.{yml,yaml}": [
"yamllint"
]
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.20.1",
"cors": "2.8.5",
"express": "5.1.0",
"lodash": "^4.17.21",
"opossum": "^9.0.0",
"pino": "10.1.0",
"pino-roll": "4.0.0",
"pino-syslog": "3.2.0",
"sonarqube-web-api-client": "1.0.1",
"uuid": "13.0.0",
"zod": "^3.25.76",
"zod-to-json-schema": "^3.24.6"
},
"devDependencies": {
"@changesets/cli": "^2.29.7",
"@commitlint/cli": "20.1.0",
"@commitlint/config-conventional": "20.0.0",
"@cyclonedx/cdxgen": "11.10.0",
"@eslint/js": "^9.38.0",
"@fast-check/vitest": "^0.2.2",
"@jest/globals": "^30.2.0",
"@types/cors": "2.8.19",
"@types/express": "5.0.3",
"@types/jest": "^30.0.0",
"@types/lodash": "^4.17.20",
"@types/node": "^24.8.1",
"@types/opossum": "^8.1.9",
"@typescript-eslint/eslint-plugin": "^8.46.1",
"@typescript-eslint/parser": "^8.46.1",
"@vitest/coverage-v8": "^3.2.4",
"changelog-github-custom": "1.2.7",
"eslint": "^9.38.0",
"eslint-config-prettier": "^10.1.8",
"eslint-plugin-jsonc": "2.21.0",
"eslint-plugin-prettier": "^5.5.4",
"fast-check": "^4.3.0",
"husky": "^9.1.7",
"jest": "^30.2.0",
"jsonc-eslint-parser": "2.4.1",
"lint-staged": "^16.2.4",
"markdownlint-cli2": "0.18.1",
"nock": "^14.0.10",
"pino-pretty": "13.1.2",
"prettier": "^3.6.2",
"rimraf": "^6.0.1",
"supertest": "^7.1.4",
"ts-jest": "^29.4.5",
"ts-node": "^10.9.2",
"ts-node-dev": "^2.0.0",
"typescript": "^5.9.3",
"vite": "7.1.11",
"vitest": "^3.2.4",
"yaml-lint": "1.7.0"
},
"pnpm": {
"overrides": {
"@eslint/plugin-kit": ">=0.3.3"
}
}
}
```
--------------------------------------------------------------------------------
/src/__tests__/schemas/hotspots-tools-schema.test.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from 'zod';
import {
hotspotsToolSchema,
hotspotToolSchema,
updateHotspotStatusToolSchema,
} from '../../schemas/hotspots-tools.js';
describe('hotspotsToolSchema', () => {
it('should validate minimal hotspots search parameters', () => {
const input = {};
const result = z.object(hotspotsToolSchema).parse(input);
expect(result).toEqual({});
});
it('should validate hotspots search with all parameters', () => {
const input = {
project_key: 'my-project',
branch: 'main',
pull_request: 'PR-123',
status: 'TO_REVIEW',
resolution: 'FIXED',
files: ['file1.java', 'file2.java'],
assigned_to_me: true,
since_leak_period: false,
in_new_code_period: true,
page: '2',
page_size: '50',
};
const result = z.object(hotspotsToolSchema).parse(input);
expect(result.project_key).toBe('my-project');
expect(result.status).toBe('TO_REVIEW');
expect(result.resolution).toBe('FIXED');
expect(result.files).toEqual(['file1.java', 'file2.java']);
expect(result.assigned_to_me).toBe(true);
expect(result.page).toBe(2);
});
it('should handle boolean string conversions', () => {
const input = {
assigned_to_me: 'true',
since_leak_period: 'false',
in_new_code_period: 'true',
};
const result = z.object(hotspotsToolSchema).parse(input);
expect(result.assigned_to_me).toBe(true);
expect(result.since_leak_period).toBe(false);
expect(result.in_new_code_period).toBe(true);
});
it('should handle page number string conversions', () => {
const input = {
page: '3',
page_size: '25',
};
const result = z.object(hotspotsToolSchema).parse(input);
expect(result.page).toBe(3);
expect(result.page_size).toBe(25);
});
it('should handle null values', () => {
const input = {
branch: null,
pull_request: null,
status: null,
resolution: null,
files: null,
assigned_to_me: null,
since_leak_period: null,
in_new_code_period: null,
};
const result = z.object(hotspotsToolSchema).parse(input);
expect(result.branch).toBeNull();
expect(result.pull_request).toBeNull();
expect(result.status).toBeNull();
expect(result.resolution).toBeNull();
expect(result.files).toBeNull();
expect(result.assigned_to_me).toBeNull();
});
it('should reject invalid status values', () => {
const input = {
status: 'INVALID_STATUS',
};
expect(() => z.object(hotspotsToolSchema).parse(input)).toThrow();
});
it('should reject invalid resolution values', () => {
const input = {
resolution: 'INVALID_RESOLUTION',
};
expect(() => z.object(hotspotsToolSchema).parse(input)).toThrow();
});
});
describe('hotspotToolSchema', () => {
it('should validate hotspot key parameter', () => {
const input = {
hotspot_key: 'AYg1234567890',
};
const result = z.object(hotspotToolSchema).parse(input);
expect(result.hotspot_key).toBe('AYg1234567890');
});
it('should require hotspot_key', () => {
const input = {};
expect(() => z.object(hotspotToolSchema).parse(input)).toThrow();
});
});
describe('updateHotspotStatusToolSchema', () => {
it('should validate minimal update parameters', () => {
const input = {
hotspot_key: 'AYg1234567890',
status: 'REVIEWED',
};
const result = z.object(updateHotspotStatusToolSchema).parse(input);
expect(result.hotspot_key).toBe('AYg1234567890');
expect(result.status).toBe('REVIEWED');
expect(result.resolution).toBeUndefined();
expect(result.comment).toBeUndefined();
});
it('should validate update with all parameters', () => {
const input = {
hotspot_key: 'AYg1234567890',
status: 'REVIEWED',
resolution: 'SAFE',
comment: 'This is safe after review',
};
const result = z.object(updateHotspotStatusToolSchema).parse(input);
expect(result.hotspot_key).toBe('AYg1234567890');
expect(result.status).toBe('REVIEWED');
expect(result.resolution).toBe('SAFE');
expect(result.comment).toBe('This is safe after review');
});
it('should handle null values for optional parameters', () => {
const input = {
hotspot_key: 'AYg1234567890',
status: 'TO_REVIEW',
resolution: null,
comment: null,
};
const result = z.object(updateHotspotStatusToolSchema).parse(input);
expect(result.hotspot_key).toBe('AYg1234567890');
expect(result.status).toBe('TO_REVIEW');
expect(result.resolution).toBeNull();
expect(result.comment).toBeNull();
});
it('should require hotspot_key and status', () => {
const input1 = { status: 'REVIEWED' };
expect(() => z.object(updateHotspotStatusToolSchema).parse(input1)).toThrow();
const input2 = { hotspot_key: 'AYg1234567890' };
expect(() => z.object(updateHotspotStatusToolSchema).parse(input2)).toThrow();
});
it('should reject invalid status values', () => {
const input = {
hotspot_key: 'AYg1234567890',
status: 'INVALID_STATUS',
};
expect(() => z.object(updateHotspotStatusToolSchema).parse(input)).toThrow();
});
it('should reject invalid resolution values', () => {
const input = {
hotspot_key: 'AYg1234567890',
status: 'REVIEWED',
resolution: 'INVALID_RESOLUTION',
};
expect(() => z.object(updateHotspotStatusToolSchema).parse(input)).toThrow();
});
});
```
--------------------------------------------------------------------------------
/src/__tests__/advanced-index.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach, beforeAll, vi } from 'vitest';
import nock from 'nock';
import { z } from 'zod';
// Mock environment variables
process.env.SONARQUBE_TOKEN = 'test-token';
process.env.SONARQUBE_URL = 'http://localhost:9000';
process.env.SONARQUBE_ORGANIZATION = 'test-org';
// Save environment variables
const originalEnv = process.env;
beforeAll(() => {
nock.cleanAll();
// Common mocks for all tests
nock('http://localhost:9000')
.persist()
.get('/api/projects/search')
.query(true)
.reply(200, {
components: [
{
key: 'test-project',
name: 'Test Project',
qualifier: 'TRK',
visibility: 'public',
},
],
paging: {
pageIndex: 1,
pageSize: 10,
total: 1,
},
});
});
afterAll(() => {
nock.cleanAll();
});
let nullToUndefined: any;
let mapToSonarQubeParams: any;
// No need to mock axios anymore since we're using sonarqube-web-api-client
describe('Advanced MCP Server Tests', () => {
beforeAll(async () => {
// Import functions we need to test
const module = await import('../index.js');
nullToUndefined = module.nullToUndefined;
mapToSonarQubeParams = module.mapToSonarQubeParams;
});
beforeEach(() => {
vi.resetModules();
process.env = { ...originalEnv };
});
afterEach(() => {
process.env = originalEnv;
vi.clearAllMocks();
nock.cleanAll();
});
describe('Schema Transformation Tests', () => {
it('should transform page parameters correctly', () => {
// Create a schema that matches the one in the tool registration
const pageSchema = z
.string()
.optional()
.transform((val: any) => (val ? parseInt(val, 10) || null : null));
// Test valid inputs
expect(pageSchema.parse('10')).toBe(10);
expect(pageSchema.parse('100')).toBe(100);
// Test invalid or empty inputs
expect(pageSchema.parse('')).toBe(null);
expect(pageSchema.parse('abc')).toBe(null);
expect(pageSchema.parse(undefined)).toBe(null);
});
it('should transform boolean parameters correctly', () => {
const booleanSchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
// Test string values
expect(booleanSchema.parse('true')).toBe(true);
expect(booleanSchema.parse('false')).toBe(false);
// Test boolean values
expect(booleanSchema.parse(true)).toBe(true);
expect(booleanSchema.parse(false)).toBe(false);
// Test null/undefined values
expect(booleanSchema.parse(null)).toBe(null);
expect(booleanSchema.parse(undefined)).toBe(undefined);
});
});
describe('nullToUndefined Tests', () => {
it('should convert null to undefined', () => {
expect(nullToUndefined(null)).toBeUndefined();
});
it('should pass through other values', () => {
expect(nullToUndefined(123)).toBe(123);
expect(nullToUndefined('string')).toBe('string');
expect(nullToUndefined(false)).toBe(false);
expect(nullToUndefined({})).toEqual({});
expect(nullToUndefined([])).toEqual([]);
expect(nullToUndefined(undefined)).toBeUndefined();
});
});
describe('mapToSonarQubeParams Tests', () => {
it('should map MCP parameters to SonarQube parameters', () => {
const mcpParams = {
project_key: 'test-project',
severity: 'MAJOR',
page: 1,
page_size: 10,
};
const sonarQubeParams = mapToSonarQubeParams(mcpParams);
expect(sonarQubeParams.projectKey).toBe('test-project');
expect(sonarQubeParams.severity).toBe('MAJOR');
expect(sonarQubeParams.page).toBe(1);
expect(sonarQubeParams.pageSize).toBe(10);
});
it('should handle empty optional parameters', () => {
const mcpParams = {
project_key: 'test-project',
};
const sonarQubeParams = mapToSonarQubeParams(mcpParams);
expect(sonarQubeParams.projectKey).toBe('test-project');
expect(sonarQubeParams.severity).toBeUndefined();
expect(sonarQubeParams.page).toBeUndefined();
expect(sonarQubeParams.pageSize).toBeUndefined();
});
it('should handle array parameters', () => {
const mcpParams = {
project_key: 'test-project',
statuses: ['OPEN', 'CONFIRMED'],
types: ['BUG', 'VULNERABILITY'],
};
const sonarQubeParams = mapToSonarQubeParams(mcpParams);
expect(sonarQubeParams.projectKey).toBe('test-project');
expect(sonarQubeParams.statuses).toEqual(['OPEN', 'CONFIRMED']);
expect(sonarQubeParams.types).toEqual(['BUG', 'VULNERABILITY']);
});
it('should handle boolean parameters', () => {
const mcpParams = {
project_key: 'test-project',
resolved: true,
on_component_only: false,
};
const sonarQubeParams = mapToSonarQubeParams(mcpParams);
expect(sonarQubeParams.projectKey).toBe('test-project');
expect(sonarQubeParams.resolved).toBe(true);
expect(sonarQubeParams.onComponentOnly).toBe(false);
});
});
describe('Environment Handling', () => {
it('should correctly retrieve environment variables', () => {
expect(process.env.SONARQUBE_TOKEN).toBe('test-token');
expect(process.env.SONARQUBE_URL).toBe('http://localhost:9000');
expect(process.env.SONARQUBE_ORGANIZATION).toBe('test-org');
});
});
});
```
--------------------------------------------------------------------------------
/.github/workflows/reusable-validate.yml:
--------------------------------------------------------------------------------
```yaml
# =============================================================================
# REUSABLE WORKFLOW: Code Validation Suite
# PURPOSE: Run all quality checks (audit, typecheck, lint, format, test)
# USAGE: Called by PR and main workflows for consistent validation
# OUTPUTS: Coverage reports when requested
# =============================================================================
name: Reusable Validate
on:
workflow_call:
inputs:
node-version:
description: 'Node.js version (should match package.json engines.node)'
type: string
default: '22' # UPDATE: When upgrading Node.js
pnpm-version:
description: 'pnpm version (should match package.json packageManager)'
type: string
default: '10.17.0' # UPDATE: When upgrading pnpm
validate-changesets:
description: 'validate that a changeset exists on the branch'
type: boolean
default: false
secrets:
SONAR_TOKEN:
description: 'SonarCloud authentication token'
required: true
# EXAMPLE USAGE:
# jobs:
# validate:
# uses: ./.github/workflows/reusable-validate.yml
# with:
# upload-coverage: true # For PRs to show coverage
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Full history for accurate analysis
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: ${{ inputs.pnpm-version }}
run_install: false
standalone: true
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ inputs.node-version }}
cache: pnpm # Cache dependencies for speed
- name: Install dependencies
# Ensures exact versions from lock file
# FAILS IF: Lock file out of sync with package.json
run: pnpm install --frozen-lockfile
- name: Tests with coverage
# Run test suite with coverage
# Coverage enforces 80% minimum threshold for all metrics
# FAILS IF: Tests fail or coverage below 80% (when coverage enabled)
# To debug: Check test output and coverage/index.html
run: pnpm test:coverage
- name: SonarQube Scan
uses: SonarSource/sonarqube-scan-action@v6
env:
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
- name: Upload coverage
# Make coverage reports available for review
# Download from Actions tab to view detailed HTML report
uses: actions/upload-artifact@v4
with:
name: coverage-${{ github.sha }}
path: coverage/
retention-days: 7 # Keep for a week for PR reviews
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Full history for accurate analysis
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: ${{ inputs.pnpm-version }}
run_install: false
standalone: true
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ inputs.node-version }}
cache: pnpm # Cache dependencies for speed
- name: Install dependencies
# Ensures exact versions from lock file
# FAILS IF: Lock file out of sync with package.json
run: pnpm install --frozen-lockfile
# =============================================================================
# VALIDATION CHECKS
# All checks run in sequence to provide clear failure messages
# =============================================================================
- name: Type checking
# Validate TypeScript types without emitting files
# FAILS IF: Type errors in any .ts file
# To debug: Run 'pnpm typecheck' locally for detailed errors
run: pnpm typecheck
- name: Linting
# Run ESLint with type-aware rules
# FAILS IF: Linting errors (not warnings)
# To fix: Run 'pnpm lint:fix' for auto-fixable issues
run: pnpm lint
- name: Format checking
# Verify code follows Prettier formatting
# FAILS IF: Any file not formatted
# To fix: Run 'pnpm format:fix' to auto-format
run: pnpm format
- name: Install actionlint
# Install actionlint for workflow validation
# Uses the official installer script from rhysd/actionlint
run: |
echo "Installing actionlint..."
bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash)
echo "${PWD}" >> $GITHUB_PATH
- name: Workflow linting
# Validate GitHub Actions workflow files
# FAILS IF: Workflow syntax errors or issues found
# To debug: Run 'pnpm lint:workflows' locally
run: pnpm lint:workflows
- name: Fetch main branch for changesets
if: inputs.validate-changesets
# Need main branch to compare changesets
run: git fetch origin main:main
- name: Changeset status
if: inputs.validate-changesets
# Validates that changesets exist for features/fixes
# FAILS IF: feat/fix commits exist without changesets
# To fix: Run 'pnpm changeset' and commit the generated file
# For non-code changes: Run 'pnpm changeset --empty'
run: pnpm changeset:status
```
--------------------------------------------------------------------------------
/src/utils/parameter-mappers.ts:
--------------------------------------------------------------------------------
```typescript
import type { IssuesParams } from '../types/index.js';
import { nullToUndefined } from './transforms.js';
/**
* Maps MCP tool parameters to SonarQube client parameters
* @param params Parameters from the MCP tool
* @returns Parameters for the SonarQube client
*/
export function mapToSonarQubeParams(params: Record<string, unknown>): IssuesParams {
const result: IssuesParams = {
page: undefined,
pageSize: undefined,
};
// Helper function to add property only if not undefined
const addIfDefined = <K extends keyof IssuesParams>(
key: K,
value: IssuesParams[K] | undefined
): void => {
if (value !== undefined) {
result[key] = value;
}
};
// Component filters (support both single project_key and multiple projects)
addIfDefined('projectKey', nullToUndefined(params.project_key) as string | undefined);
addIfDefined('projects', nullToUndefined(params.projects) as string[] | undefined);
addIfDefined('componentKeys', nullToUndefined(params.component_keys) as string[] | undefined);
addIfDefined('components', nullToUndefined(params.components) as string[] | undefined);
addIfDefined('onComponentOnly', nullToUndefined(params.on_component_only) as boolean | undefined);
addIfDefined('directories', nullToUndefined(params.directories) as string[] | undefined);
addIfDefined('files', nullToUndefined(params.files) as string[] | undefined);
addIfDefined('scopes', nullToUndefined(params.scopes) as IssuesParams['scopes']);
// Branch and PR support
addIfDefined('branch', nullToUndefined(params.branch) as string | undefined);
addIfDefined('pullRequest', nullToUndefined(params.pull_request) as string | undefined);
// Issue filters
addIfDefined('issues', nullToUndefined(params.issues) as string[] | undefined);
addIfDefined('severity', nullToUndefined(params.severity) as IssuesParams['severity']); // Deprecated
addIfDefined('severities', nullToUndefined(params.severities) as IssuesParams['severities']);
addIfDefined('statuses', nullToUndefined(params.statuses) as IssuesParams['statuses']);
addIfDefined('resolutions', nullToUndefined(params.resolutions) as IssuesParams['resolutions']);
addIfDefined('resolved', nullToUndefined(params.resolved) as boolean | undefined);
addIfDefined('types', nullToUndefined(params.types) as IssuesParams['types']);
// Clean Code taxonomy
addIfDefined(
'cleanCodeAttributeCategories',
nullToUndefined(
params.clean_code_attribute_categories
) as IssuesParams['cleanCodeAttributeCategories']
);
addIfDefined(
'impactSeverities',
nullToUndefined(params.impact_severities) as IssuesParams['impactSeverities']
);
addIfDefined(
'impactSoftwareQualities',
nullToUndefined(params.impact_software_qualities) as IssuesParams['impactSoftwareQualities']
);
addIfDefined(
'issueStatuses',
nullToUndefined(params.issue_statuses) as IssuesParams['issueStatuses']
);
// Rules and tags
addIfDefined('rules', nullToUndefined(params.rules) as string[] | undefined);
addIfDefined('tags', nullToUndefined(params.tags) as string[] | undefined);
// Date filters
addIfDefined('createdAfter', nullToUndefined(params.created_after) as string | undefined);
addIfDefined('createdBefore', nullToUndefined(params.created_before) as string | undefined);
addIfDefined('createdAt', nullToUndefined(params.created_at) as string | undefined);
addIfDefined('createdInLast', nullToUndefined(params.created_in_last) as string | undefined);
// Assignment
addIfDefined('assigned', nullToUndefined(params.assigned) as boolean | undefined);
addIfDefined('assignees', nullToUndefined(params.assignees) as string[] | undefined);
addIfDefined('author', nullToUndefined(params.author) as string | undefined);
addIfDefined('authors', nullToUndefined(params.authors) as string[] | undefined);
// Security standards
addIfDefined('cwe', nullToUndefined(params.cwe) as string[] | undefined);
addIfDefined('owaspTop10', nullToUndefined(params.owasp_top10) as string[] | undefined);
addIfDefined(
'owaspTop10v2021',
nullToUndefined(params.owasp_top10_v2021) as string[] | undefined
);
addIfDefined('sansTop25', nullToUndefined(params.sans_top25) as string[] | undefined);
addIfDefined(
'sonarsourceSecurity',
nullToUndefined(params.sonarsource_security) as string[] | undefined
);
addIfDefined(
'sonarsourceSecurityCategory',
nullToUndefined(params.sonarsource_security_category) as string[] | undefined
);
// Languages
addIfDefined('languages', nullToUndefined(params.languages) as string[] | undefined);
// Facets
addIfDefined('facets', nullToUndefined(params.facets) as string[] | undefined);
addIfDefined('facetMode', nullToUndefined(params.facet_mode) as IssuesParams['facetMode']);
// New code
addIfDefined('sinceLeakPeriod', nullToUndefined(params.since_leak_period) as boolean | undefined);
addIfDefined(
'inNewCodePeriod',
nullToUndefined(params.in_new_code_period) as boolean | undefined
);
// Sorting
addIfDefined('s', nullToUndefined(params.s) as string | undefined);
addIfDefined('asc', nullToUndefined(params.asc) as boolean | undefined);
// Response optimization
addIfDefined(
'additionalFields',
nullToUndefined(params.additional_fields) as string[] | undefined
);
// Pagination
addIfDefined('page', nullToUndefined(params.page) as number | undefined);
addIfDefined('pageSize', nullToUndefined(params.page_size) as number | undefined);
return result;
}
```
--------------------------------------------------------------------------------
/examples/http-client.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Example HTTP client for the SonarQube MCP Server with HTTP transport.
* This demonstrates how to interact with the MCP server over HTTP.
*
* To run this example:
* 1. Start the server with HTTP transport:
* MCP_TRANSPORT_TYPE=http MCP_HTTP_PORT=3000 pnpm start
* 2. Run this client:
* npx tsx examples/http-client.ts
*/
interface McpHttpRequest {
sessionId?: string;
method: string;
params?: unknown;
}
interface McpHttpResponse {
sessionId?: string;
result?: unknown;
error?: {
code: number;
message: string;
data?: unknown;
};
}
class McpHttpClient {
private sessionId?: string;
private baseUrl: string;
constructor(baseUrl = 'http://localhost:3000') {
this.baseUrl = baseUrl;
}
/**
* Check server health.
*/
async health(): Promise<unknown> {
const response = await fetch(`${this.baseUrl}/health`);
if (!response.ok) {
throw new Error(`Health check failed: ${response.statusText}`);
}
return response.json();
}
/**
* Initialize a new session.
*/
async connect(): Promise<void> {
const response = await fetch(`${this.baseUrl}/session`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
});
if (!response.ok) {
throw new Error(`Failed to create session: ${response.statusText}`);
}
const data = await response.json();
this.sessionId = data.sessionId;
// Session created successfully
}
/**
* Call an MCP method.
*/
async call(method: string, params?: unknown): Promise<unknown> {
if (!this.sessionId) {
throw new Error('Not connected. Call connect() first.');
}
const request: McpHttpRequest = {
sessionId: this.sessionId,
method,
params,
};
const response = await fetch(`${this.baseUrl}/mcp`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(request),
});
if (!response.ok) {
throw new Error(`MCP call failed: ${response.statusText}`);
}
const data = (await response.json()) as McpHttpResponse;
if (data.error) {
throw new Error(`MCP error: ${data.error.message}`);
}
return data.result;
}
/**
* Connect to server-sent events for notifications.
*/
connectToEvents(onMessage: (data: unknown) => void): EventSource {
if (!this.sessionId) {
throw new Error('Not connected. Call connect() first.');
}
// Note: EventSource is not available in Node.js by default
// You'd need to use a library like 'eventsource' for Node.js
if (typeof EventSource === 'undefined') {
throw new Error('EventSource not available. Install "eventsource" package for Node.js.');
}
const eventSource = new EventSource(`${this.baseUrl}/events/${this.sessionId}`);
eventSource.onmessage = (event) => {
const data = JSON.parse(event.data);
onMessage(data);
};
eventSource.onerror = (error) => {
throw new Error(`SSE error: ${error}`);
};
return eventSource;
}
/**
* Disconnect and cleanup session.
*/
async disconnect(): Promise<void> {
if (!this.sessionId) {
return;
}
try {
const response = await fetch(`${this.baseUrl}/session/${this.sessionId}`, {
method: 'DELETE',
});
if (!response.ok) {
throw new Error(`Failed to close session: ${response.statusText}`);
}
} catch {
// Ignore errors during cleanup
} finally {
this.sessionId = undefined;
}
}
}
// Example usage
async function main() {
/* eslint-disable no-console */
const client = new McpHttpClient();
try {
// Check server health
console.log('Checking server health...');
const health = await client.health();
console.log('Server health:', health);
// Connect to the server
console.log('\nConnecting to server...');
await client.connect();
// Example MCP calls (these would need to be implemented in the server)
console.log('\nMaking example MCP calls...');
// List available tools
const tools = await client.call('tools/list');
console.log('Available tools:', tools);
// Call a specific tool (example with SonarQube projects)
const projects = await client.call('tools/execute', {
name: 'projects',
params: {
page: 1,
page_size: 10,
},
});
console.log('Projects:', projects);
// Connect to events for real-time notifications
console.log('\nConnecting to server events...');
const eventSource = client.connectToEvents((data) => {
console.log('Event received:', data);
});
// Keep the connection open for a bit to receive events
if (eventSource) {
await new Promise((resolve) => setTimeout(resolve, 5000));
eventSource.close();
}
} catch (error) {
console.error('Error:', error);
} finally {
// Always disconnect when done
console.log('\nDisconnecting...');
await client.disconnect();
}
/* eslint-enable no-console */
}
// Run the example if this file is executed directly
// Note: For ES modules, use import.meta.url comparison
// For CommonJS compatibility, we check if this is the main module
if (typeof require !== 'undefined' && require.main === module) {
// eslint-disable-next-line no-console
main().catch(console.error);
} else if (typeof import.meta !== 'undefined' && import.meta.url === `file://${process.argv[1]}`) {
// ES module execution detection
// eslint-disable-next-line no-console
main().catch(console.error);
}
export { McpHttpClient };
```
--------------------------------------------------------------------------------
/src/domains/source-code.ts:
--------------------------------------------------------------------------------
```typescript
import type {
SourceCodeParams,
ScmBlameParams,
SonarQubeSourceResult,
SonarQubeScmBlameResult,
SonarQubeSourceLine,
IssuesParams,
} from '../types/index.js';
import type { SonarQubeClient as WebApiClient } from 'sonarqube-web-api-client';
import type { IssuesDomain } from './issues.js';
import { BaseDomain } from './base.js';
/**
* Domain module for source code operations
*/
export class SourceCodeDomain extends BaseDomain {
constructor(
webApiClient: WebApiClient,
organization: string | null,
private readonly issuesDomain?: IssuesDomain
) {
super(webApiClient, organization);
}
/**
* Gets source code for a file
* @param params Parameters including file key and line range
* @returns Promise with the source code
*/
async getSourceCode(params: SourceCodeParams): Promise<SonarQubeSourceResult> {
const { key, from, to, branch, pullRequest } = params;
// Get raw source code
const response = await this.webApiClient.sources.raw({
key,
...(branch && { branch }),
...(pullRequest && { pullRequest }),
});
// Transform the response to match our interface
// The raw method returns a string with lines separated by newlines
const lines = response.split('\n');
let sourcesArray = lines.map((line, index) => ({
line: index + 1,
code: line,
}));
// Apply line range filtering if specified
if (from !== undefined || to !== undefined) {
const startLine = from ?? 1;
const endLine = to ?? sourcesArray.length;
sourcesArray = sourcesArray.filter(
(source) => source.line >= startLine && source.line <= endLine
);
}
const sources = {
sources: sourcesArray,
component: {
key,
qualifier: 'FIL', // Default for files
name: key.split('/').pop() ?? key,
longName: key,
},
};
// Get issues for this component to annotate the source
if (key && this.issuesDomain) {
try {
const issuesParams: IssuesParams = {
projects: [key],
onComponentOnly: true,
page: 1,
pageSize: 100,
};
if (params.branch) {
issuesParams.branch = params.branch;
}
if (params.pullRequest) {
issuesParams.pullRequest = params.pullRequest;
}
const issues = await this.issuesDomain.getIssues(issuesParams);
// Map issues to source lines
const sourceLines: SonarQubeSourceLine[] = sources.sources.map((line) => {
const lineIssues = issues.issues.filter((issue) => issue.line === line.line);
return {
line: line.line,
code: line.code,
scmAuthor: undefined,
scmDate: undefined,
scmRevision: undefined,
duplicated: undefined,
isNew: undefined,
lineHits: undefined,
conditions: undefined,
coveredConditions: undefined,
highlightedText: undefined,
issues: lineIssues.length > 0 ? lineIssues : undefined,
};
});
return {
component: {
key: sources.component.key,
path: undefined,
qualifier: sources.component.qualifier,
name: sources.component.name,
longName: sources.component.longName,
language: undefined,
},
sources: sourceLines,
};
} catch (error) {
// Log the error for debugging but continue with source code without annotations
this.logger.error('Failed to retrieve issues for source code annotation', error);
// Return source code without issue annotations
return this.mapSourceToResult(sources);
}
}
return this.mapSourceToResult(sources);
}
private mapSourceToResult(sources: {
sources: Array<{
line: number;
code: string;
scmAuthor?: string;
scmDate?: string;
scmRevision?: string;
duplicated?: boolean;
isNew?: boolean;
lineHits?: number;
conditions?: number;
coveredConditions?: number;
highlightedText?: string;
}>;
component: {
key: string;
path?: string;
qualifier: string;
name: string;
longName?: string;
language?: string;
};
}): SonarQubeSourceResult {
const mappedSources: SonarQubeSourceLine[] = sources.sources.map((line) => ({
line: line.line,
code: line.code,
scmAuthor: line.scmAuthor,
scmDate: line.scmDate,
scmRevision: line.scmRevision,
duplicated: line.duplicated,
isNew: line.isNew,
lineHits: line.lineHits,
conditions: line.conditions,
coveredConditions: line.coveredConditions,
highlightedText: line.highlightedText,
issues: undefined,
}));
return {
component: {
key: sources.component.key,
path: sources.component.path,
qualifier: sources.component.qualifier,
name: sources.component.name,
longName: sources.component.longName,
language: sources.component.language,
},
sources: mappedSources,
};
}
/**
* Gets SCM blame information for a file
* @param params Parameters including file key and line range
* @returns Promise with the SCM blame information
*/
async getScmBlame(params: ScmBlameParams): Promise<SonarQubeScmBlameResult> {
const { key, from, to } = params;
const response = await this.webApiClient.sources.scm({
key,
...(from && { from }),
...(to && { to }),
});
return response as unknown as SonarQubeScmBlameResult;
}
}
```
--------------------------------------------------------------------------------
/src/handlers/components.ts:
--------------------------------------------------------------------------------
```typescript
import type {
ComponentsParams,
ISonarQubeClient,
ComponentsTreeParams,
ComponentsSearchParams,
} from '../types/index.js';
import type { ComponentQualifier } from '../types/components.js';
import { getDefaultClient } from '../utils/client-factory.js';
import { nullToUndefined } from '../utils/transforms.js';
import { createLogger } from '../utils/logger.js';
import { withErrorHandling } from '../errors.js';
import { withMCPErrorHandling } from '../utils/error-handler.js';
import { ComponentsDomain } from '../domains/components.js';
import { createStructuredResponse } from '../utils/structured-response.js';
const logger = createLogger('handlers/components');
/**
* Build tree parameters from component params
*/
function buildTreeParams(params: ComponentsParams): ComponentsTreeParams {
const treeParams: ComponentsTreeParams = {
component: params.component ?? '',
};
if (nullToUndefined(params.strategy) !== undefined) {
treeParams.strategy = nullToUndefined(params.strategy) as 'all' | 'children' | 'leaves';
}
if (params.qualifiers !== undefined) {
treeParams.qualifiers = params.qualifiers;
}
if (nullToUndefined(params.asc) !== undefined) {
treeParams.asc = nullToUndefined(params.asc) as boolean;
}
if (nullToUndefined(params.p) !== undefined) {
treeParams.page = nullToUndefined(params.p) as number;
}
if (nullToUndefined(params.ps) !== undefined) {
treeParams.pageSize = nullToUndefined(params.ps) as number;
}
if (nullToUndefined(params.branch) !== undefined) {
treeParams.branch = nullToUndefined(params.branch) as string;
}
if (nullToUndefined(params.pullRequest) !== undefined) {
treeParams.pullRequest = nullToUndefined(params.pullRequest) as string;
}
return treeParams;
}
/**
* Build search parameters from component params
*/
function buildSearchParams(params: ComponentsParams): ComponentsSearchParams {
const searchParams: ComponentsSearchParams = {};
if (nullToUndefined(params.query) !== undefined) {
searchParams.query = nullToUndefined(params.query) as string;
}
if (params.qualifiers !== undefined) {
searchParams.qualifiers = params.qualifiers;
}
if (nullToUndefined(params.language) !== undefined) {
searchParams.language = nullToUndefined(params.language) as string;
}
if (nullToUndefined(params.p) !== undefined) {
searchParams.page = nullToUndefined(params.p) as number;
}
if (nullToUndefined(params.ps) !== undefined) {
searchParams.pageSize = nullToUndefined(params.ps) as number;
}
return searchParams;
}
/**
* Build default project listing parameters
*/
function buildDefaultParams(params: ComponentsParams): ComponentsSearchParams {
const searchParams: ComponentsSearchParams = {
qualifiers: ['TRK'] as ComponentQualifier[],
};
if (nullToUndefined(params.p) !== undefined) {
searchParams.page = nullToUndefined(params.p) as number;
}
if (nullToUndefined(params.ps) !== undefined) {
searchParams.pageSize = nullToUndefined(params.ps) as number;
}
return searchParams;
}
/**
* Handles component search and tree navigation operations
* @param params Parameters for component operations
* @param client Optional SonarQube client instance
* @returns A response containing the list of components
*/
export const handleSonarQubeComponents = withMCPErrorHandling(
async (params: ComponentsParams, client: ISonarQubeClient = getDefaultClient()) => {
logger.debug('Handling SonarQube components request', params);
// Determine which operation to perform based on parameters
const isShowOperation = params.key !== undefined && params.key !== null;
const isTreeOperation =
!isShowOperation && params.component !== undefined && params.component !== null;
const isSearchOperation =
!isShowOperation && !isTreeOperation && (params.query || params.qualifiers);
const webApiClient = client.webApiClient;
// Get organization from client if it has one, otherwise null
const organization =
(client as ISonarQubeClient & { organization?: string | null }).organization ?? null;
const domain = new ComponentsDomain(webApiClient, organization);
let result;
if (isShowOperation) {
// Show component details
result = await withErrorHandling('Show component', () =>
domain.showComponent(
params.key!,
nullToUndefined(params.branch),
nullToUndefined(params.pullRequest)
)
);
logger.info('Successfully retrieved component details', {
key: params.key,
});
} else if (isTreeOperation) {
// Component tree navigation
const treeParams = buildTreeParams(params);
result = await withErrorHandling('Get component tree', () =>
domain.getComponentTree(treeParams)
);
logger.info('Successfully retrieved component tree', {
component: params.component,
count: result.components.length,
});
} else if (isSearchOperation) {
// Component search
const searchParams = buildSearchParams(params);
result = await withErrorHandling('Search components', () =>
domain.searchComponents(searchParams)
);
logger.info('Successfully searched components', {
query: params.query,
count: result.components.length,
});
} else {
// Default to listing all projects
const searchParams = buildDefaultParams(params);
result = await withErrorHandling('List all projects', () =>
domain.searchComponents(searchParams)
);
logger.info('Successfully listed all projects', {
count: result.components.length,
});
}
return createStructuredResponse(result);
}
);
```
--------------------------------------------------------------------------------
/docs/architecture/decisions/0011-docker-containerization-for-deployment.md:
--------------------------------------------------------------------------------
```markdown
# 11. Docker containerization for deployment
Date: 2025-06-13
Updated: 2025-10-11 (Added multi-platform, GHCR, security scanning)
## Status
Accepted
## Context
The SonarQube MCP server needs to be easily deployable across different environments with consistent behavior. Users require a simple deployment method that:
- Eliminates dependency management issues (Node.js version, npm packages)
- Ensures consistent runtime environments across different systems
- Simplifies the deployment process for non-technical users
- Supports various MCP transport mechanisms (stdio, SSE)
- Enables easy updates and version management
- Works on multiple CPU architectures (Intel/AMD x64, Apple Silicon ARM64)
- Provides security guarantees through vulnerability scanning
## Decision
We will provide Docker containerization as the recommended deployment option for the SonarQube MCP server. This includes:
- Maintaining a Dockerfile in the repository that packages the server with all dependencies
- Publishing Docker images to both GitHub Container Registry (GHCR) and Docker Hub
- Documenting Docker usage in the README as a primary deployment method
- Supporting stdio transport within the containerized environment (SSE removed as of ADR-0019)
### Modern Implementation (as of 2025-10-11)
The Docker implementation has evolved significantly beyond the initial decision:
#### Multi-Platform Support
- **Platforms**: Builds for both `linux/amd64` and `linux/arm64`
- **Tooling**: Uses Docker Buildx with QEMU emulation for ARM64 builds
- **Distribution**: Multi-platform manifest lists allow Docker to automatically select the correct architecture
- **References**: See ADR-0027 for publishing strategy details
#### Two-Registry Strategy
1. **GitHub Container Registry (GHCR)** - Primary build target:
- Built and pushed in main workflow
- Security scanned before any public distribution
- Used as intermediate registry for Docker Hub
- Registry: `ghcr.io/sapientpants/sonarqube-mcp-server`
2. **Docker Hub** - Public distribution:
- Images copied from GHCR in publish workflow
- Better discoverability for end users
- Registry: `sapientpants/sonarqube-mcp-server`
- References\*\*: See ADR-0027 (Docker Image Publishing Strategy)
#### Security Scanning
- **Tool**: Trivy vulnerability scanner
- **Severity**: Blocks on HIGH and CRITICAL vulnerabilities
- **Integration**: Automatic in CI/CD pipeline before publishing
- **SARIF Upload**: Results uploaded to GitHub Security tab
- **References**: See ADR-0025 (Container and Security Scanning Strategy)
#### Supply Chain Security
- **SLSA Provenance**: Build attestations attached to all images
- **SBOM**: Software Bill of Materials (CycloneDX format) generated and attached
- **Verification**: Users can verify image provenance using GitHub attestation API
#### Base Image and Configuration
The Dockerfile:
- Uses `node:22-alpine` as base image for minimal size
- Installs [email protected] for dependency management
- Multi-stage build for optimal layer caching
- Non-root user (`node`) for security
- Proper signal handling for graceful shutdown
- Health check endpoint for container orchestration
#### Tagging Strategy
Each release creates four tags:
- Full semantic version: `1.10.18`
- Major.minor version: `1.10`
- Major version: `1`
- Latest: `latest`
## Consequences
### Positive
- **Simplified deployment**: Users can run the server with a single `docker run` command
- **Dependency isolation**: All Node.js and npm dependencies are packaged within the container
- **Version consistency**: Specific server versions can be deployed using Docker tags
- **Cross-platform compatibility**: Works identically on Linux, macOS, and Windows with Docker
- **Easy updates**: Users can update by pulling new image versions
- **Multi-Architecture**: Native support for both Intel/AMD and ARM64 (Apple Silicon, AWS Graviton)
- **Security First**: Images are scanned for vulnerabilities before distribution
- **Supply Chain Security**: SLSA provenance and SBOM provide transparency
- **Dual Registry**: GHCR for GitHub users, Docker Hub for broader community
- **Efficient Publishing**: Build once, copy to Docker Hub (no rebuild required)
### Negative
- **Additional maintenance**: Requires maintaining Dockerfile and multi-registry releases
- **Image size**: Docker images are larger than source distributions (includes Node.js runtime)
- Current size: ~150MB compressed (Alpine-based)
- **Docker requirement**: Users must have Docker installed and running
- **Resource overhead**: Containers have slight performance overhead compared to native execution
- **Multi-Platform Build Time**: ARM64 emulation adds 2-3 minutes to build time
- **Registry Costs**: Potential GHCR storage costs (minimal for public repos)
- **Two Registry Sync**: Need to maintain consistency between GHCR and Docker Hub
### Neutral
- Docker deployment is the recommended approach but not mandatory - users can still install from source
- Docker image tags align with npm package versions for consistency
- Multi-platform manifest lists handled transparently by Docker (users don't need to specify platform)
- GHCR acts as intermediate storage (invisible to most users)
## References
- Dockerfile: `Dockerfile` (in repository root)
- ADR-0019: Simplify to stdio-only transport (removed SSE support)
- ADR-0024: CI/CD Platform - GitHub Actions (workflow automation)
- ADR-0025: Container and Security Scanning Strategy (Trivy, SLSA, SBOM)
- ADR-0027: Docker Image Publishing Strategy - GHCR to Docker Hub (two-registry approach)
- GitHub Container Registry: https://ghcr.io/sapientpants/sonarqube-mcp-server
- Docker Hub: https://hub.docker.com/r/sapientpants/sonarqube-mcp-server
```
--------------------------------------------------------------------------------
/src/__tests__/auth-methods.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import {
createSonarQubeClient,
createSonarQubeClientWithBasicAuth,
createSonarQubeClientWithPasscode,
createSonarQubeClientFromEnv,
SonarQubeClient,
} from '../sonarqube.js';
describe('Authentication Methods', () => {
// Save original env vars
const originalEnv = process.env;
beforeEach(() => {
// Clear environment variables
process.env = { ...originalEnv };
delete process.env.SONARQUBE_TOKEN;
delete process.env.SONARQUBE_USERNAME;
delete process.env.SONARQUBE_PASSWORD;
delete process.env.SONARQUBE_PASSCODE;
delete process.env.SONARQUBE_URL;
delete process.env.SONARQUBE_ORGANIZATION;
});
afterEach(() => {
// Restore original env vars
process.env = originalEnv;
});
describe('createSonarQubeClient', () => {
it('should create a client with token authentication', () => {
const client = createSonarQubeClient('test-token', 'https://sonarqube.example.com', 'org1');
expect(client).toBeDefined();
expect(client).toBeInstanceOf(SonarQubeClient);
});
it('should use default URL when not provided', () => {
const client = createSonarQubeClient('test-token');
expect(client).toBeDefined();
});
});
describe('createSonarQubeClientWithBasicAuth', () => {
it('should create a client with basic authentication', () => {
const client = createSonarQubeClientWithBasicAuth(
'username',
'password',
'https://sonarqube.example.com',
'org1'
);
expect(client).toBeDefined();
expect(client).toBeInstanceOf(SonarQubeClient);
});
it('should use default URL when not provided', () => {
const client = createSonarQubeClientWithBasicAuth('username', 'password');
expect(client).toBeDefined();
});
});
describe('createSonarQubeClientWithPasscode', () => {
it('should create a client with passcode authentication', () => {
const client = createSonarQubeClientWithPasscode(
'test-passcode',
'https://sonarqube.example.com',
'org1'
);
expect(client).toBeDefined();
expect(client).toBeInstanceOf(SonarQubeClient);
});
it('should use default URL when not provided', () => {
const client = createSonarQubeClientWithPasscode('test-passcode');
expect(client).toBeDefined();
});
});
describe('createSonarQubeClientFromEnv', () => {
it('should create a client with token from environment', () => {
process.env.SONARQUBE_TOKEN = 'env-token';
process.env.SONARQUBE_URL = 'https://sonarqube.example.com';
process.env.SONARQUBE_ORGANIZATION = 'org1';
const client = createSonarQubeClientFromEnv();
expect(client).toBeDefined();
expect(client).toBeInstanceOf(SonarQubeClient);
});
it('should create a client with basic auth from environment', () => {
process.env.SONARQUBE_USERNAME = 'env-user';
process.env.SONARQUBE_PASSWORD = 'env-pass';
process.env.SONARQUBE_URL = 'https://sonarqube.example.com';
const client = createSonarQubeClientFromEnv();
expect(client).toBeDefined();
expect(client).toBeInstanceOf(SonarQubeClient);
});
it('should create a client with passcode from environment', () => {
process.env.SONARQUBE_PASSCODE = 'env-passcode';
process.env.SONARQUBE_URL = 'https://sonarqube.example.com';
const client = createSonarQubeClientFromEnv();
expect(client).toBeDefined();
expect(client).toBeInstanceOf(SonarQubeClient);
});
it('should prioritize token auth when multiple methods are available', () => {
process.env.SONARQUBE_TOKEN = 'env-token';
process.env.SONARQUBE_USERNAME = 'env-user';
process.env.SONARQUBE_PASSWORD = 'env-pass';
process.env.SONARQUBE_PASSCODE = 'env-passcode';
// Should not throw and use token auth
const client = createSonarQubeClientFromEnv();
expect(client).toBeDefined();
});
it('should use default URL when not provided', () => {
process.env.SONARQUBE_TOKEN = 'env-token';
const client = createSonarQubeClientFromEnv();
expect(client).toBeDefined();
});
it('should throw error when no authentication is configured', () => {
expect(() => createSonarQubeClientFromEnv()).toThrow(
'No SonarQube authentication configured'
);
});
it('should create client with basic auth when only username is provided (legacy token auth)', () => {
process.env.SONARQUBE_USERNAME = 'env-user';
const client = createSonarQubeClientFromEnv();
expect(client).toBeDefined();
});
it('should throw error when only password is provided', () => {
process.env.SONARQUBE_PASSWORD = 'env-pass';
expect(() => createSonarQubeClientFromEnv()).toThrow(
'No SonarQube authentication configured'
);
});
});
describe('SonarQubeClient static factory methods', () => {
it('should create client with withBasicAuth', () => {
const client = SonarQubeClient.withBasicAuth(
'username',
'password',
'https://sonarqube.example.com',
'org1'
);
expect(client).toBeDefined();
expect(client).toBeInstanceOf(SonarQubeClient);
});
it('should create client with withPasscode', () => {
const client = SonarQubeClient.withPasscode(
'passcode',
'https://sonarqube.example.com',
'org1'
);
expect(client).toBeDefined();
expect(client).toBeInstanceOf(SonarQubeClient);
});
it('should use default URL in static methods', () => {
const basicClient = SonarQubeClient.withBasicAuth('username', 'password');
expect(basicClient).toBeDefined();
const passcodeClient = SonarQubeClient.withPasscode('passcode');
expect(passcodeClient).toBeDefined();
});
});
});
```
--------------------------------------------------------------------------------
/src/__tests__/parameter-transformations-advanced.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import { z } from 'zod';
describe('Parameter Transformations', () => {
describe('Page and PageSize Transformations', () => {
it('should transform valid and invalid page values', () => {
// Create schema that matches what's used in index.ts for page transformation
const pageSchema = z
.string()
.optional()
.transform((val: any) => (val ? parseInt(val, 10) || null : null));
// Test with valid number strings
expect(pageSchema.parse('10')).toBe(10);
expect(pageSchema.parse('20')).toBe(20);
// In the actual implementation, '0' returns 0 or null depending on the parseInt result
// Our implementation here returns null for '0' since parseInt('0', 10) is 0, which is falsy
expect(pageSchema.parse('0')).toBe(null);
// Test with invalid number strings - should return null
expect(pageSchema.parse('invalid')).toBe(null);
expect(pageSchema.parse('abc123')).toBe(null);
expect(pageSchema.parse('true')).toBe(null);
// Test with empty/undefined values - should return null
expect(pageSchema.parse(undefined)).toBe(null);
expect(pageSchema.parse('')).toBe(null);
});
});
describe('Boolean Transformations', () => {
it('should transform boolean string values', () => {
// Create schema that matches what's used in index.ts for boolean transformation
const booleanSchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
// Test with string values
expect(booleanSchema.parse('true')).toBe(true);
expect(booleanSchema.parse('false')).toBe(false);
expect(booleanSchema.parse('anything-else')).toBe(false);
// Test with actual boolean values
expect(booleanSchema.parse(true)).toBe(true);
expect(booleanSchema.parse(false)).toBe(false);
// Test with null/undefined values
expect(booleanSchema.parse(null)).toBe(null);
expect(booleanSchema.parse(undefined)).toBe(undefined);
});
});
describe('Status Schema', () => {
it('should validate correct status values', () => {
// Create schema that matches what's used in index.ts for status validation
const statusSchema = z
.array(
z.enum([
'OPEN',
'CONFIRMED',
'REOPENED',
'RESOLVED',
'CLOSED',
'TO_REVIEW',
'IN_REVIEW',
'REVIEWED',
])
)
.nullable()
.optional();
// Test with valid status arrays
expect(statusSchema.parse(['OPEN', 'CONFIRMED'])).toEqual(['OPEN', 'CONFIRMED']);
expect(statusSchema.parse(['RESOLVED', 'CLOSED'])).toEqual(['RESOLVED', 'CLOSED']);
expect(statusSchema.parse(['TO_REVIEW', 'IN_REVIEW', 'REVIEWED'])).toEqual([
'TO_REVIEW',
'IN_REVIEW',
'REVIEWED',
]);
// Test with null/undefined values
expect(statusSchema.parse(null)).toBe(null);
expect(statusSchema.parse(undefined)).toBe(undefined);
// Should throw on invalid values
expect(() => statusSchema.parse(['INVALID'])).toThrow();
expect(() => statusSchema.parse(['open'])).toThrow(); // case sensitive
});
});
describe('Resolution Schema', () => {
it('should validate correct resolution values', () => {
// Create schema that matches what's used in index.ts for resolution validation
const resolutionSchema = z
.array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
.nullable()
.optional();
// Test with valid resolution arrays
expect(resolutionSchema.parse(['FALSE-POSITIVE', 'WONTFIX'])).toEqual([
'FALSE-POSITIVE',
'WONTFIX',
]);
expect(resolutionSchema.parse(['FIXED', 'REMOVED'])).toEqual(['FIXED', 'REMOVED']);
// Test with null/undefined values
expect(resolutionSchema.parse(null)).toBe(null);
expect(resolutionSchema.parse(undefined)).toBe(undefined);
// Should throw on invalid values
expect(() => resolutionSchema.parse(['INVALID'])).toThrow();
});
});
describe('Type Schema', () => {
it('should validate correct type values', () => {
// Create schema that matches what's used in index.ts for type validation
const typeSchema = z
.array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
.nullable()
.optional();
// Test with valid type arrays
expect(typeSchema.parse(['CODE_SMELL', 'BUG'])).toEqual(['CODE_SMELL', 'BUG']);
expect(typeSchema.parse(['VULNERABILITY', 'SECURITY_HOTSPOT'])).toEqual([
'VULNERABILITY',
'SECURITY_HOTSPOT',
]);
// Test with null/undefined values
expect(typeSchema.parse(null)).toBe(null);
expect(typeSchema.parse(undefined)).toBe(undefined);
// Should throw on invalid values
expect(() => typeSchema.parse(['INVALID'])).toThrow();
});
});
describe('Severity Schema', () => {
it('should validate correct severity values', () => {
// Create schema that matches what's used in index.ts for severity validation
const severitySchema = z
.enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
.nullable()
.optional();
// Test with valid severities
expect(severitySchema.parse('INFO')).toBe('INFO');
expect(severitySchema.parse('MINOR')).toBe('MINOR');
expect(severitySchema.parse('MAJOR')).toBe('MAJOR');
expect(severitySchema.parse('CRITICAL')).toBe('CRITICAL');
expect(severitySchema.parse('BLOCKER')).toBe('BLOCKER');
// Test with null/undefined values
expect(severitySchema.parse(null)).toBe(null);
expect(severitySchema.parse(undefined)).toBe(undefined);
// Should throw on invalid values
expect(() => severitySchema.parse('INVALID')).toThrow();
expect(() => severitySchema.parse('minor')).toThrow(); // case sensitive
});
});
});
```
--------------------------------------------------------------------------------
/src/domains/components.ts:
--------------------------------------------------------------------------------
```typescript
import type {
ComponentsResult,
ComponentsTreeResult,
ComponentShowResult,
SonarQubeComponent,
} from '../types/index.js';
import type { ComponentQualifier } from '../types/components.js';
import { BaseDomain } from './base.js';
type ComponentsSearchParams = {
query?: string;
qualifiers?: ComponentQualifier[];
language?: string;
page?: number;
pageSize?: number;
};
type ComponentsTreeParams = {
component: string;
strategy?: 'all' | 'children' | 'leaves';
qualifiers?: ComponentQualifier[];
sort?: 'name' | 'path' | 'qualifier';
asc?: boolean;
page?: number;
pageSize?: number;
branch?: string;
pullRequest?: string;
};
/**
* Domain module for component-related operations
*/
export class ComponentsDomain extends BaseDomain {
/**
* Search for components across projects
* @param params Search parameters
* @returns Promise with the list of components
*/
async searchComponents(params: ComponentsSearchParams = {}): Promise<ComponentsResult> {
const { query, qualifiers, language, page, pageSize } = params;
this.logger.debug('Searching components', params);
try {
const builder = this.webApiClient.components.search();
if (query !== undefined) {
builder.query(query);
}
if (qualifiers !== undefined && qualifiers.length > 0) {
builder.qualifiers(qualifiers as Parameters<typeof builder.qualifiers>[0]);
}
if (language !== undefined) {
builder.languages([language]);
}
if (page !== undefined) {
builder.page(page);
}
if (pageSize !== undefined) {
// Limit page size to maximum of 500
builder.pageSize(Math.min(pageSize, 500));
}
const response = await builder.execute();
this.logger.debug('Components retrieved successfully', {
count: response.components.length,
});
return {
components: response.components.map((comp) => this.transformComponent(comp)),
paging: response.paging || {
pageIndex: 1,
pageSize: 100,
total: response.components.length,
},
};
} catch (error) {
this.logger.error('Failed to search components', error);
throw error;
}
}
/**
* Navigate component tree hierarchy
* @param params Tree navigation parameters
* @returns Promise with the component tree
*/
async getComponentTree(params: ComponentsTreeParams): Promise<ComponentsTreeResult> {
const {
component,
strategy = 'children',
qualifiers,
sort = 'name',
page,
pageSize,
branch,
pullRequest,
} = params;
this.logger.debug('Getting component tree', params);
try {
const builder = this.webApiClient.components.tree().component(component);
// Apply tree-specific methods based on strategy
if (strategy === 'children') {
builder.childrenOnly();
} else if (strategy === 'leaves') {
builder.leavesOnly();
}
if (qualifiers !== undefined && qualifiers.length > 0) {
builder.qualifiers(qualifiers as Parameters<typeof builder.qualifiers>[0]);
}
// Apply sorting
if (sort === 'name') {
builder.sortByName();
} else if (sort === 'path') {
builder.sortByPath();
} else if (sort === 'qualifier') {
builder.sortByQualifier();
}
// asc parameter is not directly supported in tree builder
if (page !== undefined) {
builder.page(page);
}
if (pageSize !== undefined) {
// Limit page size to maximum of 500
builder.pageSize(Math.min(pageSize, 500));
}
if (branch !== undefined) {
builder.branch(branch);
}
if (pullRequest !== undefined) {
builder.pullRequest(pullRequest);
}
const response = await builder.execute();
this.logger.debug('Component tree retrieved successfully', {
count: response.components.length,
});
return {
components: response.components.map((comp) => this.transformComponent(comp)),
baseComponent: response.baseComponent
? this.transformComponent(response.baseComponent)
: undefined,
paging: response.paging || {
pageIndex: 1,
pageSize: 100,
total: response.components.length,
},
};
} catch (error) {
this.logger.error('Failed to get component tree', error);
throw error;
}
}
/**
* Get detailed information about a specific component
* @param key Component key
* @param branch Optional branch name (not currently supported by API)
* @param pullRequest Optional pull request ID (not currently supported by API)
* @returns Promise with component details
*/
async showComponent(
key: string,
branch?: string,
pullRequest?: string
): Promise<ComponentShowResult> {
this.logger.debug('Showing component', { key, branch, pullRequest });
try {
// The API client's show method expects a key parameter
// Note: branch and pullRequest are not currently supported by the API client
const response = await this.webApiClient.components.show(key);
this.logger.debug('Component details retrieved successfully', { key });
return {
component: this.transformComponent(response.component),
ancestors: response.ancestors?.map((comp) => this.transformComponent(comp)) ?? [],
};
} catch (error) {
this.logger.error('Failed to show component', error);
throw error;
}
}
/**
* Transform API response component to our domain model
*/
private transformComponent(component: {
key: string;
name: string;
qualifier: string;
path?: string;
longName?: string;
enabled?: boolean;
}): SonarQubeComponent {
return {
key: component.key,
name: component.name,
qualifier: component.qualifier,
path: component.path,
longName: component.longName,
enabled: component.enabled,
};
}
}
```
--------------------------------------------------------------------------------
/src/types/index.ts:
--------------------------------------------------------------------------------
```typescript
// Import types for interface definitions
import type { PaginationParams } from './common.js';
import type { SonarQubeClient as WebApiClient } from 'sonarqube-web-api-client';
import type { SonarQubeProjectsResult } from './projects.js';
import type {
IssuesParams,
SonarQubeIssuesResult,
SonarQubeIssue,
MarkIssueFalsePositiveParams,
MarkIssueWontFixParams,
BulkIssueMarkParams,
AddCommentToIssueParams,
AssignIssueParams,
ConfirmIssueParams,
UnconfirmIssueParams,
ResolveIssueParams,
ReopenIssueParams,
SonarQubeIssueComment,
DoTransitionResponse,
} from './issues.js';
import type { SonarQubeMetricsResult } from './metrics.js';
import type {
ComponentMeasuresParams,
ComponentsMeasuresParams,
MeasuresHistoryParams,
SonarQubeComponentMeasuresResult,
SonarQubeComponentsMeasuresResult,
SonarQubeMeasuresHistoryResult,
} from './measures.js';
import type { SonarQubeHealthStatus, SonarQubeSystemStatus } from './system.js';
import type {
SonarQubeQualityGate,
SonarQubeQualityGatesResult,
SonarQubeQualityGateStatus,
ProjectQualityGateParams,
} from './quality-gates.js';
import type {
SourceCodeParams,
ScmBlameParams,
SonarQubeSourceResult,
SonarQubeScmBlameResult,
} from './source-code.js';
import type {
HotspotSearchParams,
SonarQubeHotspotSearchResult,
SonarQubeHotspotDetails,
HotspotStatusUpdateParams,
} from './hotspots.js';
// Components types are imported and re-exported below
// Re-export all types for backward compatibility and ease of use
// Common types
export type { PaginationParams, SeverityLevel } from './common.js';
// Project types
export type { SonarQubeProject, SonarQubeProjectsResult } from './projects.js';
// Issue types
export type {
SonarQubeIssue,
SonarQubeIssueComment,
SonarQubeIssueFlow,
SonarQubeIssueImpact,
SonarQubeIssueLocation,
SonarQubeMessageFormatting,
SonarQubeTextRange,
SonarQubeComponent,
SonarQubeRule,
SonarQubeUser,
SonarQubeFacet,
SonarQubeFacetValue,
SonarQubeIssuesResult,
IssuesParams,
MarkIssueFalsePositiveParams,
MarkIssueWontFixParams,
BulkIssueMarkParams,
AddCommentToIssueParams,
AssignIssueParams,
ConfirmIssueParams,
UnconfirmIssueParams,
ResolveIssueParams,
ReopenIssueParams,
DoTransitionRequest,
DoTransitionResponse,
} from './issues.js';
// Metric types
export type { SonarQubeMetric, SonarQubeMetricsResult } from './metrics.js';
// Measure types
export type {
ComponentMeasuresParams,
ComponentsMeasuresParams,
MeasuresHistoryParams,
SonarQubeMeasure,
SonarQubeMeasureComponent,
SonarQubeComponentMeasuresResult,
SonarQubeComponentsMeasuresResult,
SonarQubeMeasuresHistoryResult,
} from './measures.js';
// System types
export type { SonarQubeHealthStatus, SonarQubeSystemStatus } from './system.js';
// Quality gate types
export type {
SonarQubeQualityGateCondition,
SonarQubeQualityGate,
SonarQubeQualityGatesResult,
SonarQubeQualityGateStatus,
ProjectQualityGateParams,
} from './quality-gates.js';
// Source code types
export type {
SourceCodeParams,
ScmBlameParams,
SonarQubeLineIssue,
SonarQubeScmAuthor,
SonarQubeSourceLine,
SonarQubeSourceResult,
SonarQubeScmBlameResult,
} from './source-code.js';
// Hotspot types
export type {
HotspotSearchParams,
SonarQubeHotspot,
SonarQubeHotspotSearchResult,
SonarQubeHotspotDetails,
HotspotStatusUpdateParams,
} from './hotspots.js';
// Component types
export type {
ComponentQualifier,
ComponentsResult,
ComponentsTreeResult,
ComponentShowResult,
ComponentsParams,
ComponentsSearchParams,
ComponentsTreeParams,
ComponentShowParams,
} from './components.js';
// Client interface
export interface ISonarQubeClient {
// Expose webApiClient for testing purposes
readonly webApiClient: WebApiClient;
listProjects(params?: PaginationParams): Promise<SonarQubeProjectsResult>;
getIssues(params: IssuesParams): Promise<SonarQubeIssuesResult>;
getMetrics(params?: PaginationParams): Promise<SonarQubeMetricsResult>;
getHealth(): Promise<SonarQubeHealthStatus>;
getStatus(): Promise<SonarQubeSystemStatus>;
ping(): Promise<string>;
getComponentMeasures(params: ComponentMeasuresParams): Promise<SonarQubeComponentMeasuresResult>;
getComponentsMeasures(
params: ComponentsMeasuresParams
): Promise<SonarQubeComponentsMeasuresResult>;
getMeasuresHistory(params: MeasuresHistoryParams): Promise<SonarQubeMeasuresHistoryResult>;
// Quality Gates API methods
listQualityGates(): Promise<SonarQubeQualityGatesResult>;
getQualityGate(id: string): Promise<SonarQubeQualityGate>;
getProjectQualityGateStatus(
params: ProjectQualityGateParams
): Promise<SonarQubeQualityGateStatus>;
// Source Code API methods
getSourceCode(params: SourceCodeParams): Promise<SonarQubeSourceResult>;
getScmBlame(params: ScmBlameParams): Promise<SonarQubeScmBlameResult>;
// Security Hotspots API methods
hotspots(params: HotspotSearchParams): Promise<SonarQubeHotspotSearchResult>;
hotspot(hotspotKey: string): Promise<SonarQubeHotspotDetails>;
updateHotspotStatus(params: HotspotStatusUpdateParams): Promise<void>;
// Issue resolution methods
markIssueFalsePositive(params: MarkIssueFalsePositiveParams): Promise<DoTransitionResponse>;
markIssueWontFix(params: MarkIssueWontFixParams): Promise<DoTransitionResponse>;
markIssuesFalsePositive(params: BulkIssueMarkParams): Promise<DoTransitionResponse[]>;
markIssuesWontFix(params: BulkIssueMarkParams): Promise<DoTransitionResponse[]>;
// Issue comment methods
addCommentToIssue(params: AddCommentToIssueParams): Promise<SonarQubeIssueComment>;
// Issue assignment methods
assignIssue(params: AssignIssueParams): Promise<SonarQubeIssue>;
// Issue transition methods
confirmIssue(params: ConfirmIssueParams): Promise<DoTransitionResponse>;
unconfirmIssue(params: UnconfirmIssueParams): Promise<DoTransitionResponse>;
resolveIssue(params: ResolveIssueParams): Promise<DoTransitionResponse>;
reopenIssue(params: ReopenIssueParams): Promise<DoTransitionResponse>;
}
```
--------------------------------------------------------------------------------
/scripts/scan-container.sh:
--------------------------------------------------------------------------------
```bash
#!/usr/bin/env bash
# Container Security Scanning Script
# This script runs the same container security scan locally that runs in CI
# Usage: ./scripts/scan-container.sh [options]
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Default values
IMAGE_NAME=""
DOCKERFILE="./Dockerfile"
SEVERITY="HIGH,CRITICAL"
FORMAT="table"
OUTPUT_FILE=""
IGNORE_UNFIXED=false
SKIP_BUILD=false
VERBOSE=false
# Help function
show_help() {
cat << EOF
Container Security Scanning Script
USAGE:
$0 [OPTIONS]
OPTIONS:
-h, --help Show this help message
-i, --image NAME Image name to scan (default: builds from Dockerfile)
-f, --file PATH Path to Dockerfile (default: ./Dockerfile)
-s, --severity LEVEL Comma-separated severity levels (default: HIGH,CRITICAL)
Options: UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL
-o, --output FILE Output file for results
--format FORMAT Output format: table,json,sarif (default: table)
--ignore-unfixed Ignore unpatched/unfixed vulnerabilities
--skip-build Skip building the image (requires --image)
-v, --verbose Enable verbose output
EXAMPLES:
# Scan the default Dockerfile
$0
# Scan with all severity levels
$0 --severity UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL
# Scan an existing image
$0 --image myapp:latest --skip-build
# Generate SARIF report for GitHub
$0 --format sarif --output scan-results.sarif
# Scan and ignore unfixed vulnerabilities
$0 --ignore-unfixed
EOF
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
-h|--help)
show_help
exit 0
;;
-i|--image)
IMAGE_NAME="$2"
shift 2
;;
-f|--file)
DOCKERFILE="$2"
shift 2
;;
-s|--severity)
SEVERITY="$2"
shift 2
;;
-o|--output)
OUTPUT_FILE="$2"
shift 2
;;
--format)
FORMAT="$2"
shift 2
;;
--ignore-unfixed)
IGNORE_UNFIXED=true
shift
;;
--skip-build)
SKIP_BUILD=true
shift
;;
-v|--verbose)
VERBOSE=true
shift
;;
*)
echo -e "${RED}Unknown option: $1${NC}"
show_help
exit 1
;;
esac
done
# Check for required tools
check_requirements() {
local missing_tools=()
if ! command -v docker >/dev/null 2>&1; then
missing_tools+=("docker")
fi
if ! command -v trivy >/dev/null 2>&1; then
missing_tools+=("trivy")
fi
if [ ${#missing_tools[@]} -gt 0 ]; then
echo -e "${RED}Error: Required tools are not installed:${NC}"
for tool in "${missing_tools[@]}"; do
echo -e " - $tool"
done
echo ""
echo -e "${YELLOW}Installation instructions:${NC}"
echo ""
if [[ " ${missing_tools[@]} " =~ " docker " ]]; then
echo "Docker:"
echo " - macOS/Windows: Download Docker Desktop from https://docker.com"
echo " - Linux: Follow instructions at https://docs.docker.com/engine/install/"
echo ""
fi
if [[ " ${missing_tools[@]} " =~ " trivy " ]]; then
echo "Trivy:"
echo " - macOS: brew install aquasecurity/trivy/trivy"
echo " - Linux: See https://aquasecurity.github.io/trivy/latest/getting-started/installation/"
echo " - Docker: docker run --rm aquasecurity/trivy:latest"
echo ""
fi
exit 1
fi
}
# Check requirements before proceeding
check_requirements
# Build Docker image if needed
if [[ "$SKIP_BUILD" == "false" ]]; then
if [[ -z "$IMAGE_NAME" ]]; then
IMAGE_NAME="security-scan:$(git rev-parse --short HEAD 2>/dev/null || echo 'latest')"
fi
echo -e "${BLUE}Building Docker image: $IMAGE_NAME${NC}"
if [[ "$VERBOSE" == "true" ]]; then
docker build -f "$DOCKERFILE" -t "$IMAGE_NAME" .
else
docker build -f "$DOCKERFILE" -t "$IMAGE_NAME" . > /dev/null 2>&1
fi
if [[ $? -ne 0 ]]; then
echo -e "${RED}Failed to build Docker image${NC}"
exit 1
fi
echo -e "${GREEN}Successfully built: $IMAGE_NAME${NC}"
else
if [[ -z "$IMAGE_NAME" ]]; then
echo -e "${RED}--image is required when using --skip-build${NC}"
exit 1
fi
fi
# Prepare Trivy command
TRIVY_CMD="trivy image"
# Add options
if [[ "$IGNORE_UNFIXED" == "true" ]]; then
TRIVY_CMD="$TRIVY_CMD --ignore-unfixed"
fi
TRIVY_CMD="$TRIVY_CMD --severity $SEVERITY"
TRIVY_CMD="$TRIVY_CMD --format $FORMAT"
if [[ -n "$OUTPUT_FILE" ]]; then
TRIVY_CMD="$TRIVY_CMD --output $OUTPUT_FILE"
fi
if [[ "$VERBOSE" == "true" ]]; then
TRIVY_CMD="$TRIVY_CMD --debug"
fi
# Add the image name
TRIVY_CMD="$TRIVY_CMD $IMAGE_NAME"
# Run the scan
echo -e "${BLUE}Running container security scan...${NC}"
echo -e "${BLUE}Command: $TRIVY_CMD${NC}"
# Execute scan and capture result
if eval $TRIVY_CMD; then
SCAN_RESULT=$?
else
SCAN_RESULT=$?
fi
# Process results
if [[ $SCAN_RESULT -eq 0 ]]; then
echo -e "${GREEN}✅ Container security scan passed!${NC}"
echo -e "${GREEN}No vulnerabilities found matching severity threshold: $SEVERITY${NC}"
else
echo -e "${RED}❌ Container security scan failed!${NC}"
echo -e "${RED}Vulnerabilities found matching severity threshold: $SEVERITY${NC}"
# Provide remediation tips
echo -e "\n${YELLOW}Remediation Tips:${NC}"
echo -e "1. Update base image to latest version"
echo -e "2. Update dependencies in package.json"
echo -e "3. Check for security advisories for your dependencies"
echo -e "4. Consider using --ignore-unfixed flag for unpatched vulnerabilities"
echo -e "5. Review detailed results above or in output file"
fi
# If output file was created, notify user
if [[ -n "$OUTPUT_FILE" ]] && [[ -f "$OUTPUT_FILE" ]]; then
echo -e "\n${BLUE}Scan results saved to: $OUTPUT_FILE${NC}"
fi
exit $SCAN_RESULT
```
--------------------------------------------------------------------------------
/src/__tests__/sonarqube-elicitation.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import type { Mocked } from 'vitest';
import {
createSonarQubeClientFromEnvWithElicitation,
setSonarQubeElicitationManager,
} from '../sonarqube.js';
import { ElicitationManager } from '../utils/elicitation.js';
describe('SonarQube Client with Elicitation', () => {
const originalEnv = process.env;
let mockElicitationManager: Mocked<ElicitationManager>;
beforeEach(() => {
vi.resetModules();
process.env = { ...originalEnv };
// Clear any existing auth
delete process.env.SONARQUBE_TOKEN;
delete process.env.SONARQUBE_USERNAME;
delete process.env.SONARQUBE_PASSWORD;
delete process.env.SONARQUBE_PASSCODE;
// Create mock elicitation manager
mockElicitationManager = {
isEnabled: vi.fn(),
collectAuthentication: vi.fn(),
setServer: vi.fn(),
getOptions: vi.fn(),
updateOptions: vi.fn(),
confirmBulkOperation: vi.fn(),
collectResolutionComment: vi.fn(),
disambiguateSelection: vi.fn(),
} as unknown as Mocked<ElicitationManager>;
// Set the mock manager
setSonarQubeElicitationManager(mockElicitationManager);
});
afterEach(() => {
process.env = originalEnv;
});
describe('createSonarQubeClientFromEnvWithElicitation', () => {
it('should create client when environment is already configured', async () => {
process.env.SONARQUBE_TOKEN = 'test-token';
process.env.SONARQUBE_URL = 'https://test.sonarqube.com';
const client = await createSonarQubeClientFromEnvWithElicitation();
expect(client).toBeDefined();
expect(mockElicitationManager.isEnabled).not.toHaveBeenCalled();
expect(mockElicitationManager.collectAuthentication).not.toHaveBeenCalled();
});
it('should collect token authentication when no auth configured and elicitation enabled', async () => {
mockElicitationManager.isEnabled.mockReturnValue(true);
mockElicitationManager.collectAuthentication.mockResolvedValue({
action: 'accept',
content: {
method: 'token',
token: 'elicited-token',
},
});
const client = await createSonarQubeClientFromEnvWithElicitation();
expect(client).toBeDefined();
expect(mockElicitationManager.isEnabled).toHaveBeenCalled();
expect(mockElicitationManager.collectAuthentication).toHaveBeenCalled();
expect(process.env.SONARQUBE_TOKEN).toBe('elicited-token');
});
it('should collect basic authentication when elicitation provides it', async () => {
mockElicitationManager.isEnabled.mockReturnValue(true);
mockElicitationManager.collectAuthentication.mockResolvedValue({
action: 'accept',
content: {
method: 'basic',
username: 'test-user',
password: 'test-pass',
},
});
const client = await createSonarQubeClientFromEnvWithElicitation();
expect(client).toBeDefined();
expect(process.env.SONARQUBE_USERNAME).toBe('test-user');
expect(process.env.SONARQUBE_PASSWORD).toBe('test-pass');
});
it('should collect passcode authentication when elicitation provides it', async () => {
mockElicitationManager.isEnabled.mockReturnValue(true);
mockElicitationManager.collectAuthentication.mockResolvedValue({
action: 'accept',
content: {
method: 'passcode',
passcode: 'test-passcode',
},
});
const client = await createSonarQubeClientFromEnvWithElicitation();
expect(client).toBeDefined();
expect(process.env.SONARQUBE_PASSCODE).toBe('test-passcode');
});
it('should throw error when elicitation is cancelled', async () => {
mockElicitationManager.isEnabled.mockReturnValue(true);
mockElicitationManager.collectAuthentication.mockResolvedValue({
action: 'cancel',
});
await expect(createSonarQubeClientFromEnvWithElicitation()).rejects.toThrow(
'No SonarQube authentication configured'
);
});
it('should throw error when elicitation is rejected', async () => {
mockElicitationManager.isEnabled.mockReturnValue(true);
mockElicitationManager.collectAuthentication.mockResolvedValue({
action: 'reject',
});
await expect(createSonarQubeClientFromEnvWithElicitation()).rejects.toThrow(
'No SonarQube authentication configured'
);
});
it('should throw error when elicitation is disabled and no auth configured', async () => {
mockElicitationManager.isEnabled.mockReturnValue(false);
await expect(createSonarQubeClientFromEnvWithElicitation()).rejects.toThrow(
'No SonarQube authentication configured'
);
expect(mockElicitationManager.collectAuthentication).not.toHaveBeenCalled();
});
it('should handle missing token in elicitation response', async () => {
mockElicitationManager.isEnabled.mockReturnValue(true);
mockElicitationManager.collectAuthentication.mockResolvedValue({
action: 'accept',
content: {
method: 'token',
// token is missing
},
});
await expect(createSonarQubeClientFromEnvWithElicitation()).rejects.toThrow(
'No SonarQube authentication configured'
);
});
it('should handle missing credentials in basic auth elicitation', async () => {
mockElicitationManager.isEnabled.mockReturnValue(true);
mockElicitationManager.collectAuthentication.mockResolvedValue({
action: 'accept',
content: {
method: 'basic',
username: 'test-user',
// password is missing
},
});
await expect(createSonarQubeClientFromEnvWithElicitation()).rejects.toThrow(
'No SonarQube authentication configured'
);
});
it('should handle missing passcode in elicitation response', async () => {
mockElicitationManager.isEnabled.mockReturnValue(true);
mockElicitationManager.collectAuthentication.mockResolvedValue({
action: 'accept',
content: {
method: 'passcode',
// passcode is missing
},
});
await expect(createSonarQubeClientFromEnvWithElicitation()).rejects.toThrow(
'No SonarQube authentication configured'
);
});
});
});
```
--------------------------------------------------------------------------------
/src/types/issues.ts:
--------------------------------------------------------------------------------
```typescript
import type { PaginationParams, SeverityLevel } from './common.js';
export type { DoTransitionRequest, DoTransitionResponse } from 'sonarqube-web-api-client';
/**
* Interface for SonarQube issue impact
*/
export interface SonarQubeIssueImpact {
softwareQuality: string;
severity: string;
}
/**
* Interface for text range in SonarQube
*/
export interface SonarQubeTextRange {
startLine: number;
endLine: number;
startOffset: number;
endOffset: number;
}
/**
* Interface for message formatting in SonarQube
*/
export interface SonarQubeMessageFormatting {
start: number;
end: number;
type: string;
}
/**
* Interface for issue location in SonarQube
*/
export interface SonarQubeIssueLocation {
textRange: SonarQubeTextRange;
msg: string;
msgFormattings?: SonarQubeMessageFormatting[];
}
/**
* Interface for issue flow in SonarQube
*/
export interface SonarQubeIssueFlow {
locations: SonarQubeIssueLocation[];
}
/**
* Interface for issue comment in SonarQube
*/
export interface SonarQubeIssueComment {
key: string;
login: string;
htmlText: string;
markdown: string;
updatable: boolean;
createdAt: string;
}
/**
* Interface for SonarQube issue
*/
export interface SonarQubeIssue {
key: string;
rule: string;
component: string;
project: string;
line?: number;
hash?: string;
textRange?: SonarQubeTextRange;
message: string;
messageFormattings?: SonarQubeMessageFormatting[];
status: string;
issueStatus?: string;
effort?: string;
debt?: string;
author?: string;
assignee?: string;
severity?: string;
tags: string[];
creationDate: string;
updateDate: string;
type?: string;
cleanCodeAttribute?: string;
cleanCodeAttributeCategory?: string;
prioritizedRule?: boolean;
impacts?: SonarQubeIssueImpact[];
comments?: SonarQubeIssueComment[];
transitions?: string[];
actions?: string[];
flows?: SonarQubeIssueFlow[];
quickFixAvailable?: boolean;
ruleDescriptionContextKey?: string;
codeVariants?: string[];
}
/**
* Interface for SonarQube component
*/
export interface SonarQubeComponent {
key: string;
enabled: boolean | undefined;
qualifier: string;
name: string;
longName: string | undefined;
path: string | undefined;
}
/**
* Interface for SonarQube rule
*/
export interface SonarQubeRule {
key: string;
name: string;
status: string;
lang: string;
langName: string;
}
/**
* Interface for SonarQube user
*/
export interface SonarQubeUser {
login: string;
name: string;
active: boolean;
avatar?: string;
}
/**
* Interface for SonarQube facet value
*/
export interface SonarQubeFacetValue {
val: string;
count: number;
}
/**
* Interface for SonarQube facet
*/
export interface SonarQubeFacet {
property: string;
values: SonarQubeFacetValue[];
}
/**
* Interface for SonarQube issues result
*/
export interface SonarQubeIssuesResult {
issues: SonarQubeIssue[];
components: SonarQubeComponent[];
rules: SonarQubeRule[];
users: SonarQubeUser[] | undefined;
facets: SonarQubeFacet[] | undefined;
paging: {
pageIndex: number;
pageSize: number;
total: number;
};
}
/**
* Interface for get issues parameters
*/
export interface IssuesParams extends PaginationParams {
// Component filters
projectKey?: string;
componentKeys?: string[];
components?: string[];
projects?: string[];
onComponentOnly?: boolean;
directories?: string[];
files?: string[];
scopes?: ('MAIN' | 'TEST' | 'OVERALL')[];
// Branch and PR
branch?: string;
pullRequest?: string;
// Issue filters
issues?: string[];
severities?: ('INFO' | 'MINOR' | 'MAJOR' | 'CRITICAL' | 'BLOCKER')[];
statuses?: ('OPEN' | 'CONFIRMED' | 'REOPENED' | 'RESOLVED' | 'CLOSED')[];
resolutions?: ('FALSE-POSITIVE' | 'WONTFIX' | 'FIXED' | 'REMOVED')[];
resolved?: boolean;
types?: ('CODE_SMELL' | 'BUG' | 'VULNERABILITY' | 'SECURITY_HOTSPOT')[];
// Clean Code taxonomy
cleanCodeAttributeCategories?: ('ADAPTABLE' | 'CONSISTENT' | 'INTENTIONAL' | 'RESPONSIBLE')[];
impactSeverities?: SeverityLevel[];
impactSoftwareQualities?: ('MAINTAINABILITY' | 'RELIABILITY' | 'SECURITY')[];
issueStatuses?: ('OPEN' | 'CONFIRMED' | 'RESOLVED' | 'REOPENED' | 'CLOSED')[];
// Rules and tags
rules?: string[];
tags?: string[];
// Date filters
createdAfter?: string;
createdBefore?: string;
createdAt?: string;
createdInLast?: string;
// Assignment
assigned?: boolean;
assignees?: string[];
author?: string;
authors?: string[];
// Security standards
cwe?: string[];
owaspTop10?: string[];
owaspTop10v2021?: string[];
sansTop25?: string[];
sonarsourceSecurity?: string[];
sonarsourceSecurityCategory?: string[];
// Languages
languages?: string[];
// Facets
facets?: string[];
facetMode?: 'effort' | 'count';
// New code
sinceLeakPeriod?: boolean;
inNewCodePeriod?: boolean;
// Sorting
s?: string;
asc?: boolean;
// Additional fields
additionalFields?: string[];
// Deprecated
hotspots?: boolean;
severity?: 'INFO' | 'MINOR' | 'MAJOR' | 'CRITICAL' | 'BLOCKER';
}
/**
* Parameters for marking an issue as false positive
*/
export interface MarkIssueFalsePositiveParams {
issueKey: string;
comment?: string;
}
/**
* Parameters for marking an issue as won't fix
*/
export interface MarkIssueWontFixParams {
issueKey: string;
comment?: string;
}
/**
* Parameters for bulk issue operations
*/
export interface BulkIssueMarkParams {
issueKeys: string[];
comment?: string;
}
/**
* Parameters for adding a comment to an issue
*/
export interface AddCommentToIssueParams {
issueKey: string;
text: string;
}
/**
* Parameters for assigning an issue
*/
export interface AssignIssueParams {
issueKey: string;
assignee?: string;
}
/**
* Parameters for confirming an issue
*/
export interface ConfirmIssueParams {
issueKey: string;
comment?: string;
}
/**
* Parameters for unconfirming an issue
*/
export interface UnconfirmIssueParams {
issueKey: string;
comment?: string;
}
/**
* Parameters for resolving an issue
*/
export interface ResolveIssueParams {
issueKey: string;
comment?: string;
}
/**
* Parameters for reopening an issue
*/
export interface ReopenIssueParams {
issueKey: string;
comment?: string;
}
// Transition types are re-exported at the top of the file
```
--------------------------------------------------------------------------------
/docs/architecture.md:
--------------------------------------------------------------------------------
```markdown
# Architecture Documentation
## Overview
The SonarQube MCP Server is designed as a Model Context Protocol (MCP) server that bridges SonarQube's powerful code quality analysis capabilities with AI assistants. This document provides a comprehensive overview of the system architecture, design decisions, and implementation details.
## Architecture Diagram
```mermaid
graph TB
subgraph "MCP Client"
Claude[Claude Desktop/AI Assistant]
end
subgraph "MCP Server"
Transport[STDIO Transport]
Handlers[Tool Handlers]
Domains[Domain Services]
Client[SonarQube Client]
Circuit[Circuit Breaker]
end
subgraph "External Services"
SQ[SonarQube API]
end
Claude <--> Transport
Transport --> Handlers
Handlers --> Domains
Domains --> Client
Client --> Circuit
Circuit --> SQ
style Transport fill:#f9f,stroke:#333,stroke-width:2px
style Domains fill:#bfb,stroke:#333,stroke-width:2px
style Circuit fill:#fbb,stroke:#333,stroke-width:2px
```
## Core Components
### 1. Transport Layer
The server uses **STDIO Transport** exclusively, providing:
- Simple, reliable communication via standard input/output
- No network configuration required
- Perfect for local usage and MCP gateway deployments
- Minimal resource overhead
### 2. Tool Handlers
MCP tools are the primary interface for AI assistants to interact with SonarQube:
- Each tool maps to specific SonarQube API endpoints
- Input validation and parameter transformation
- Error handling with user-friendly messages
- Response formatting for AI consumption
### 3. Domain Services
Following Domain-Driven Design (DDD), functionality is organized into cohesive domains:
- **Projects Domain**: Project management and navigation
- **Issues Domain**: Code issues, bugs, and vulnerabilities
- **Metrics Domain**: Available metrics and their definitions
- **Measures Domain**: Metric values and history
- **Quality Gates Domain**: Quality gate definitions and status
- **Hotspots Domain**: Security hotspots management
- **Source Code Domain**: Source viewing with SCM blame info
- **System Domain**: Health and status monitoring
- **Components Domain**: File and directory navigation
### 4. SonarQube Client
The client layer handles all communication with SonarQube:
- **Authentication**: Supports token, basic auth, and system passcode
- **Error Handling**: Comprehensive error messages with solutions
- **Circuit Breaker**: Prevents cascading failures
- **Response Caching**: Reduces API calls for repeated requests
### 5. Circuit Breaker Pattern
Protects against SonarQube API failures:
```typescript
interface CircuitBreakerConfig {
timeout: 30000; // 30 second timeout
errorThreshold: 0.5; // 50% error rate triggers open
volumeThreshold: 5; // Minimum 5 requests
resetTimeout: 60000; // 60 seconds before retry
}
```
## Data Flow
1. **Request Flow**:
```
AI Assistant → STDIO → Tool Handler → Domain Service → SonarQube Client → Circuit Breaker → SonarQube API
```
2. **Response Flow**:
```
SonarQube API → Circuit Breaker → SonarQube Client → Domain Service → Tool Handler → STDIO → AI Assistant
```
## Authentication
The server supports multiple authentication methods for SonarQube:
1. **Token Authentication** (Recommended)
- Bearer tokens for SonarQube 10.0+
- Token as username for older versions
2. **Basic Authentication**
- Username/password combination
- Suitable for self-hosted instances
3. **System Passcode**
- For automated deployment scenarios
## Error Handling
Multi-level error handling ensures reliability:
1. **Transport Level**: Connection and protocol errors
2. **Tool Level**: Parameter validation and tool-specific errors
3. **Domain Level**: Business logic validation
4. **Client Level**: API communication errors
5. **Circuit Breaker**: Failure prevention and recovery
## Logging
File-based logging to avoid stdio conflicts:
```typescript
interface LogConfig {
file?: string; // Log file path
level: 'DEBUG' | 'INFO' | 'WARN' | 'ERROR';
format: 'json' | 'text';
}
```
## Performance Optimizations
1. **Minimal Dependencies**: Reduced package size
2. **Lazy Loading**: Components loaded on demand
3. **Response Caching**: Reduces API calls
4. **Circuit Breaker**: Prevents unnecessary failed requests
5. **Efficient Data Structures**: Optimized for common operations
## Deployment Architecture
### Local Deployment
```
┌─────────────────┐ ┌──────────────┐ ┌─────────────┐
│ Claude Desktop │────▶│ MCP Server │────▶│ SonarQube │
│ (MCP Client) │◀────│ (stdio) │◀────│ API │
└─────────────────┘ └──────────────┘ └─────────────┘
```
### Gateway Deployment
```
┌─────────────────┐ ┌──────────────┐ ┌──────────────┐ ┌─────────────┐
│ AI Client │────▶│ MCP Gateway │────▶│ MCP Server │────▶│ SonarQube │
│ │◀────│ │◀────│ (stdio) │◀────│ API │
└─────────────────┘ └──────────────┘ └──────────────┘ └─────────────┘
│
├── Authentication
├── Multi-tenancy
├── Load Balancing
└── Monitoring
```
## Design Principles
1. **Simplicity**: stdio-only transport reduces complexity
2. **Reliability**: Circuit breakers and comprehensive error handling
3. **Performance**: Minimal resource usage and efficient operations
4. **Flexibility**: Works with various MCP gateways
5. **Maintainability**: Clean domain separation and clear interfaces
## Future Considerations
While the current stdio-only design is optimal for most use cases, the architecture allows for:
1. **Gateway Extensions**: Enhanced features via MCP gateways
2. **Performance Improvements**: Further optimizations as needed
3. **Additional Tools**: New SonarQube features as they become available
4. **Enhanced Caching**: Smarter caching strategies
## Architecture Decision Records
All significant architectural decisions are documented in ADRs located in `/docs/architecture/decisions/`. Key decisions include:
- ADR-0003: Adopt Model Context Protocol
- ADR-0004: Use SonarQube Web API Client
- ADR-0005: Domain-Driven Design approach
- ADR-0010: Use stdio transport for MCP communication
- ADR-0011: Docker containerization for deployment
```
--------------------------------------------------------------------------------
/docs/architecture/decisions/0021-code-quality-toolchain-eslint-prettier-strict-typescript.md:
--------------------------------------------------------------------------------
```markdown
# 21. Code Quality Toolchain ESLint Prettier Strict TypeScript
Date: 2025-10-11
## Status
Accepted
## Context
The SonarQube MCP Server requires high code quality to ensure maintainability, reliability, and consistency across contributions. A comprehensive code quality toolchain is needed to:
- Enforce consistent code style across the codebase
- Catch potential bugs and code smells early
- Leverage TypeScript's type system to maximum effect
- Automate quality checks in development and CI/CD
- Reduce manual code review burden
- Prevent low-quality code from reaching production
Manual code review alone is insufficient for maintaining quality standards, especially as the codebase grows and more contributors join.
## Decision
We will implement a comprehensive code quality toolchain consisting of:
### 1. TypeScript with Strict Configuration
**Configuration** (tsconfig.json):
```json
{
"compilerOptions": {
"strict": true,
"noUncheckedIndexedAccess": true,
"exactOptionalPropertyTypes": true,
"noImplicitOverride": true,
"forceConsistentCasingInFileNames": true,
"noFallthroughCasesInSwitch": true
}
}
```
**Key Features**:
- `strict`: Enables all strict type checking options
- `noUncheckedIndexedAccess`: Requires checking for undefined when accessing arrays/objects by index
- `exactOptionalPropertyTypes`: Distinguishes between `undefined` and missing properties
- `noImplicitOverride`: Requires explicit `override` keyword for inherited methods
- `noFallthroughCasesInSwitch`: Prevents unintentional switch fallthrough
### 2. ESLint for Code Linting
**Configuration** (eslint.config.js with flat config):
- @typescript-eslint/eslint-plugin for TypeScript-specific rules
- eslint-config-prettier to disable conflicting rules
- eslint-plugin-prettier to run Prettier as an ESLint rule
- eslint-plugin-jsonc for JSON/JSONC linting
- Custom rules enforcing project conventions
**Key Rules**:
- Maximum cognitive complexity: 15
- Prefer `const` over `let`
- Prefer template literals over string concatenation
- Require explicit return types on exported functions
- No unused variables or imports
- Consistent naming conventions
### 3. Prettier for Code Formatting
**Configuration** (.prettierrc):
```json
{
"semi": true,
"trailingComma": "es5",
"singleQuote": true,
"printWidth": 100,
"tabWidth": 2
}
```
**Features**:
- Consistent code formatting across all files
- Formats TypeScript, JavaScript, JSON, Markdown, YAML
- Integrated with ESLint to avoid conflicts
- Automatic formatting on save (via editor integration)
### 4. Pre-commit Hooks with Husky
**Configuration** (.husky/pre-commit):
```bash
#!/usr/bin/env sh
pnpm precommit
```
**Pre-commit checks**:
```bash
pnpm precommit:
- pnpm audit --audit-level critical
- pnpm typecheck
- pnpm lint
- pnpm lint:workflows (actionlint)
- pnpm lint:markdown
- pnpm lint:yaml
- pnpm format
- pnpm test
```
### 5. Lint-staged for Performance
Only lints files that are staged for commit:
```json
{
"*.{ts,tsx,js,json,md,yml,yaml}": ["prettier --write"],
"*.{ts,tsx,js,json,jsonc,json5}": ["eslint --fix"],
"*.md": ["markdownlint-cli2 --fix"],
"*.{yml,yaml}": ["yamllint"]
}
```
## Consequences
### Positive
- **Consistent Code Style**: Prettier ensures uniform formatting across the codebase
- **Early Bug Detection**: TypeScript strict mode and ESLint catch errors before runtime
- **Type Safety**: Strict TypeScript configuration prevents common type-related bugs
- **Automated Quality**: Pre-commit hooks prevent low-quality code from being committed
- **Reduced Review Time**: Automated checks handle style and common issues
- **Confidence in Refactoring**: Strong typing enables safe refactoring
- **Better IDE Support**: Strict typing provides better autocomplete and error detection
- **Onboarding**: New contributors immediately follow project standards
### Negative
- **Initial Setup Complexity**: Multiple tools require configuration
- **Slower Commits**: Pre-commit hooks add time to the commit process (~30 seconds)
- **Learning Curve**: Strict TypeScript can be challenging for beginners
- **False Positives**: Occasionally requires `eslint-disable` comments
- **Configuration Maintenance**: Tools require updates and rule adjustments
### Neutral
- **Opinionated Decisions**: Some style choices may not match personal preferences
- **Tool Ecosystem**: Requires keeping multiple tools in sync
- **CI/CD Integration**: Same checks run in CI, adding pipeline time
## Implementation
### Installation
All tools are configured as dev dependencies:
```json
{
"devDependencies": {
"@typescript-eslint/eslint-plugin": "^8.46.0",
"@typescript-eslint/parser": "^8.46.0",
"eslint": "^9.37.0",
"eslint-config-prettier": "^10.1.8",
"eslint-plugin-prettier": "^5.5.4",
"prettier": "^3.6.2",
"husky": "^9.1.7",
"lint-staged": "^16.2.4",
"markdownlint-cli2": "0.18.1",
"yaml-lint": "1.7.0"
}
}
```
### Usage
**Development workflow**:
```bash
# Run all quality checks
pnpm precommit
# Individual checks
pnpm typecheck # Type checking
pnpm lint # ESLint
pnpm lint:fix # Auto-fix ESLint issues
pnpm format # Check Prettier formatting
pnpm format:fix # Auto-format with Prettier
```
**CI/CD Integration**:
Quality checks run in parallel in the CI pipeline (see ADR-0024).
### Enforcement
1. **Pre-commit hooks**: Block commits with quality issues
2. **CI/CD pipeline**: Fail builds with quality issues
3. **Required status checks**: PR cannot merge without passing checks
4. **SonarCloud**: Additional continuous code quality analysis
## Examples
### Before (without tooling):
```typescript
function foo(x) {
if (x) {
if (x.bar) {
if (x.bar.baz) {
return x.bar.baz.qux;
}
}
}
return null;
}
```
### After (with tooling):
```typescript
function foo(x: unknown): string | undefined {
if (
x &&
typeof x === 'object' &&
'bar' in x &&
x.bar &&
typeof x.bar === 'object' &&
'baz' in x.bar
) {
return x.bar.baz?.qux;
}
return undefined;
}
```
## References
- TypeScript Strict Mode: https://www.typescriptlang.org/tsconfig#strict
- ESLint Configuration: eslint.config.js
- Prettier Configuration: .prettierrc
- Pre-commit Hooks: .husky/pre-commit
- SonarCloud Analysis: https://sonarcloud.io/project/overview?id=sonarqube-mcp-server
```
--------------------------------------------------------------------------------
/src/__tests__/tool-registration-schema.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import { z } from 'zod';
describe('Tool Registration Schemas', () => {
describe('Page Transformations', () => {
// Test page and page_size transformations
it('should transform string to number in page parameters', () => {
// Define a schema similar to what's used in the MCP tool registrations
const pageSchema = z
.string()
.optional()
.transform((val: any) => (val ? parseInt(val, 10) || null : null));
// Valid number
expect(pageSchema.parse('10')).toBe(10);
// Invalid number should return null
expect(pageSchema.parse('not-a-number')).toBe(null);
// Empty string should return null
expect(pageSchema.parse('')).toBe(null);
// Undefined should return null
expect(pageSchema.parse(undefined)).toBe(null);
});
});
describe('Boolean Transformations', () => {
// Test boolean transformations
it('should transform string to boolean in boolean parameters', () => {
// Define a schema similar to what's used in the MCP tool registrations
const booleanSchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
// String 'true' should become boolean true
expect(booleanSchema.parse('true')).toBe(true);
// String 'false' should become boolean false
expect(booleanSchema.parse('false')).toBe(false);
// Boolean true should remain true
expect(booleanSchema.parse(true)).toBe(true);
// Boolean false should remain false
expect(booleanSchema.parse(false)).toBe(false);
// Null should remain null
expect(booleanSchema.parse(null)).toBe(null);
// Undefined should remain undefined
expect(booleanSchema.parse(undefined)).toBe(undefined);
});
});
describe('Enumeration Validations', () => {
// Test severity enum validations
it('should validate severity enum values', () => {
// Define a schema similar to what's used in the MCP tool registrations
const severitySchema = z
.enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
.nullable()
.optional();
// Valid values should pass through
expect(severitySchema.parse('INFO')).toBe('INFO');
expect(severitySchema.parse('MINOR')).toBe('MINOR');
expect(severitySchema.parse('MAJOR')).toBe('MAJOR');
expect(severitySchema.parse('CRITICAL')).toBe('CRITICAL');
expect(severitySchema.parse('BLOCKER')).toBe('BLOCKER');
// Null should remain null
expect(severitySchema.parse(null)).toBe(null);
// Undefined should remain undefined
expect(severitySchema.parse(undefined)).toBe(undefined);
// Invalid values should throw
expect(() => severitySchema.parse('INVALID')).toThrow();
});
// Test status enum array validations
it('should validate status enum arrays', () => {
// Define a schema similar to what's used in the MCP tool registrations
const statusSchema = z
.array(
z.enum([
'OPEN',
'CONFIRMED',
'REOPENED',
'RESOLVED',
'CLOSED',
'TO_REVIEW',
'IN_REVIEW',
'REVIEWED',
])
)
.nullable()
.optional();
// Valid array should pass through
expect(statusSchema.parse(['OPEN', 'CONFIRMED'])).toEqual(['OPEN', 'CONFIRMED']);
// Null should remain null
expect(statusSchema.parse(null)).toBe(null);
// Undefined should remain undefined
expect(statusSchema.parse(undefined)).toBe(undefined);
// Invalid values should throw
expect(() => statusSchema.parse(['INVALID'])).toThrow();
});
// Test complete projects tool schema
it('should correctly parse and transform projects tool parameters', () => {
// Define a schema similar to the projects tool schema
const projectsSchema = z.object({
page: z
.string()
.optional()
.transform((val: any) => (val ? parseInt(val, 10) || null : null)),
page_size: z
.string()
.optional()
.transform((val: any) => (val ? parseInt(val, 10) || null : null)),
});
// Test with valid parameters
const result = projectsSchema.parse({
page: '2',
page_size: '20',
});
expect(result.page).toBe(2);
expect(result.page_size).toBe(20);
});
// Test complete issues tool schema
it('should correctly parse and transform issues tool parameters', () => {
// Define schemas similar to the issues tool schema
const severitySchema = z
.enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
.nullable()
.optional();
const pageSchema = z
.string()
.optional()
.transform((val: any) => (val ? parseInt(val, 10) || null : null));
const booleanSchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
const stringArraySchema = z.array(z.string()).nullable().optional();
// Create the schema
const issuesSchema = z.object({
project_key: z.string(),
severity: severitySchema,
page: pageSchema,
page_size: pageSchema,
resolved: booleanSchema,
rules: stringArraySchema,
});
// Test with valid parameters
const result = issuesSchema.parse({
project_key: 'my-project',
severity: 'MAJOR',
page: '5',
page_size: '25',
resolved: 'true',
rules: ['rule1', 'rule2'],
});
expect(result.project_key).toBe('my-project');
expect(result.severity).toBe('MAJOR');
expect(result.page).toBe(5);
expect(result.page_size).toBe(25);
expect(result.resolved).toBe(true);
expect(result.rules).toEqual(['rule1', 'rule2']);
});
// Test union schema for component_keys and metric_keys
it('should handle union schema for string or array inputs', () => {
// Define a schema similar to the component_keys and metric_keys parameters
const unionSchema = z.union([z.string(), z.array(z.string())]);
// Test with string
expect(unionSchema.parse('single-value')).toBe('single-value');
// Test with array
expect(unionSchema.parse(['value1', 'value2'])).toEqual(['value1', 'value2']);
});
});
});
```
--------------------------------------------------------------------------------
/src/__tests__/tool-handler-lambdas.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import { z } from 'zod';
// Since we can't easily mock the MCP server after it's already been created in index.js,
// we'll directly test the transformation functions used in the schemas
describe('Tool Schema Transformations', () => {
describe('Page Parameter Transformation', () => {
it('should properly transform page string values to numbers or null', () => {
// Create a transform function matching what's in the code
const transformPageValue = (val: string | null | undefined) => {
return val ? parseInt(val, 10) || null : null;
};
// Test valid numeric strings
expect(transformPageValue('1')).toBe(1);
expect(transformPageValue('100')).toBe(100);
// Test strings that parseInt can't fully convert
expect(transformPageValue('123abc')).toBe(123); // Still parses first part
expect(transformPageValue('abc123')).toBe(null); // Can't parse, returns null
// Test non-numeric strings
expect(transformPageValue('invalid')).toBe(null);
expect(transformPageValue('null')).toBe(null);
expect(transformPageValue('undefined')).toBe(null);
// Test edge cases
expect(transformPageValue('')).toBe(null);
expect(transformPageValue(null)).toBe(null);
expect(transformPageValue(undefined)).toBe(null);
});
});
describe('Boolean Parameter Transformation', () => {
it('should properly transform string values to booleans', () => {
// Create a schema similar to what's in the code
const booleanTransform = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
// Test string values
expect(booleanTransform.parse('true')).toBe(true);
expect(booleanTransform.parse('false')).toBe(false);
expect(booleanTransform.parse('True')).toBe(false); // Case sensitive
expect(booleanTransform.parse('1')).toBe(false);
expect(booleanTransform.parse('0')).toBe(false);
expect(booleanTransform.parse('yes')).toBe(false);
expect(booleanTransform.parse('no')).toBe(false);
// Test boolean values (pass through)
expect(booleanTransform.parse(true)).toBe(true);
expect(booleanTransform.parse(false)).toBe(false);
// Test null/undefined handling
expect(booleanTransform.parse(null)).toBe(null);
expect(booleanTransform.parse(undefined)).toBe(undefined);
});
});
describe('String Array Parameter Validation', () => {
it('should properly validate string arrays', () => {
// Create a schema similar to what's in the code
const stringArraySchema = z.array(z.string()).nullable().optional();
// Test valid arrays
expect(stringArraySchema.parse(['test1', 'test2'])).toEqual(['test1', 'test2']);
expect(stringArraySchema.parse([''])).toEqual(['']);
expect(stringArraySchema.parse([])).toEqual([]);
// Test null/undefined handling
expect(stringArraySchema.parse(null)).toBe(null);
expect(stringArraySchema.parse(undefined)).toBe(undefined);
});
});
describe('Enum Parameter Validation', () => {
it('should properly validate status enum values', () => {
// Create a schema similar to what's in the code
const statusSchema = z
.array(
z.enum([
'OPEN',
'CONFIRMED',
'REOPENED',
'RESOLVED',
'CLOSED',
'TO_REVIEW',
'IN_REVIEW',
'REVIEWED',
])
)
.nullable()
.optional();
// Test valid status arrays
expect(statusSchema.parse(['OPEN', 'CONFIRMED'])).toEqual(['OPEN', 'CONFIRMED']);
expect(statusSchema.parse(['RESOLVED'])).toEqual(['RESOLVED']);
// Test null/undefined handling
expect(statusSchema.parse(null)).toBe(null);
expect(statusSchema.parse(undefined)).toBe(undefined);
// Test invalid values
expect(() => statusSchema.parse(['INVALID'])).toThrow();
expect(() => statusSchema.parse(['open'])).toThrow(); // Case sensitive
});
it('should properly validate resolution enum values', () => {
// Create a schema similar to what's in the code
const resolutionSchema = z
.array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
.nullable()
.optional();
// Test valid resolution arrays
expect(resolutionSchema.parse(['FALSE-POSITIVE', 'WONTFIX'])).toEqual([
'FALSE-POSITIVE',
'WONTFIX',
]);
expect(resolutionSchema.parse(['FIXED', 'REMOVED'])).toEqual(['FIXED', 'REMOVED']);
// Test null/undefined handling
expect(resolutionSchema.parse(null)).toBe(null);
expect(resolutionSchema.parse(undefined)).toBe(undefined);
// Test invalid values
expect(() => resolutionSchema.parse(['INVALID'])).toThrow();
});
it('should properly validate type enum values', () => {
// Create a schema similar to what's in the code
const typeSchema = z
.array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
.nullable()
.optional();
// Test valid type arrays
expect(typeSchema.parse(['CODE_SMELL', 'BUG'])).toEqual(['CODE_SMELL', 'BUG']);
expect(typeSchema.parse(['VULNERABILITY', 'SECURITY_HOTSPOT'])).toEqual([
'VULNERABILITY',
'SECURITY_HOTSPOT',
]);
// Test null/undefined handling
expect(typeSchema.parse(null)).toBe(null);
expect(typeSchema.parse(undefined)).toBe(undefined);
// Test invalid values
expect(() => typeSchema.parse(['INVALID'])).toThrow();
});
it('should properly validate severity enum value', () => {
// Create a schema similar to what's in the code
const severitySchema = z
.enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
.nullable()
.optional();
// Test valid severities
expect(severitySchema.parse('INFO')).toBe('INFO');
expect(severitySchema.parse('MINOR')).toBe('MINOR');
expect(severitySchema.parse('MAJOR')).toBe('MAJOR');
expect(severitySchema.parse('CRITICAL')).toBe('CRITICAL');
expect(severitySchema.parse('BLOCKER')).toBe('BLOCKER');
// Test null/undefined handling
expect(severitySchema.parse(null)).toBe(null);
expect(severitySchema.parse(undefined)).toBe(undefined);
// Test invalid values
expect(() => severitySchema.parse('INVALID')).toThrow();
expect(() => severitySchema.parse('info')).toThrow(); // Case sensitive
});
});
});
```
--------------------------------------------------------------------------------
/src/__tests__/domains/hotspots-domain.test.ts:
--------------------------------------------------------------------------------
```typescript
import nock from 'nock';
import { HotspotsDomain } from '../../domains/hotspots.js';
import { SonarQubeClient as WebApiClient } from 'sonarqube-web-api-client';
describe('HotspotsDomain', () => {
const baseUrl = 'https://sonarqube.example.com';
const organization = 'test-org';
let domain: HotspotsDomain;
let webApiClient: WebApiClient;
beforeEach(() => {
webApiClient = WebApiClient.withToken(baseUrl, 'test-token', { organization });
domain = new HotspotsDomain(webApiClient, organization);
nock.cleanAll();
});
afterEach(() => {
nock.cleanAll();
});
describe('hotspots', () => {
it('should search hotspots with all parameters', async () => {
const mockResponse = {
paging: { pageIndex: 1, pageSize: 100, total: 1 },
hotspots: [
{
key: 'hotspot1',
component: 'com.example:file.java',
project: 'com.example',
securityCategory: 'sql-injection',
vulnerabilityProbability: 'HIGH',
status: 'TO_REVIEW',
line: 42,
message: 'SQL injection vulnerability',
author: 'user1',
creationDate: '2023-01-01T00:00:00Z',
updateDate: '2023-01-02T00:00:00Z',
},
],
components: [
{
key: 'com.example:file.java',
qualifier: 'FIL',
name: 'file.java',
longName: 'src/main/java/com/example/file.java',
path: 'src/main/java/com/example/file.java',
},
],
};
nock(baseUrl)
.get('/api/hotspots/search')
.query({
projectKey: 'test-project',
status: 'TO_REVIEW',
resolution: 'FIXED',
files: 'file1.java,file2.java',
onlyMine: 'true',
sinceLeakPeriod: 'true',
p: 2,
ps: 50,
organization,
})
.reply(200, mockResponse);
const result = await domain.hotspots({
projectKey: 'test-project',
branch: 'main',
pullRequest: '123',
status: 'TO_REVIEW',
resolution: 'FIXED',
files: ['file1.java', 'file2.java'],
assignedToMe: true,
sinceLeakPeriod: true,
inNewCodePeriod: true,
page: 2,
pageSize: 50,
});
expect(result).toEqual({
hotspots: mockResponse.hotspots,
components: mockResponse.components,
paging: mockResponse.paging,
});
});
it('should search hotspots with minimal parameters', async () => {
const mockResponse = {
paging: { pageIndex: 1, pageSize: 100, total: 0 },
hotspots: [],
};
nock(baseUrl).get('/api/hotspots/search').query({ organization }).reply(200, mockResponse);
const result = await domain.hotspots({ page: undefined, pageSize: undefined });
expect(result).toEqual({
hotspots: [],
components: undefined,
paging: mockResponse.paging,
});
});
it('should handle hotspots without optional parameters', async () => {
const mockResponse = {
paging: { pageIndex: 1, pageSize: 100, total: 1 },
hotspots: [
{
key: 'hotspot1',
component: 'com.example:file.java',
project: 'com.example',
securityCategory: 'sql-injection',
vulnerabilityProbability: 'HIGH',
status: 'TO_REVIEW',
line: 42,
message: 'SQL injection vulnerability',
},
],
};
nock(baseUrl)
.get('/api/hotspots/search')
.query({
projectKey: 'test-project',
organization,
})
.reply(200, mockResponse);
const result = await domain.hotspots({
projectKey: 'test-project',
page: undefined,
pageSize: undefined,
});
expect(result.hotspots).toHaveLength(1);
expect(result.hotspots[0]!.key).toBe('hotspot1');
});
});
describe('hotspot', () => {
it('should get hotspot details', async () => {
const mockApiResponse = {
key: 'hotspot1',
component: {
key: 'com.example:file.java',
qualifier: 'FIL',
name: 'file.java',
},
project: {
key: 'com.example',
name: 'Example Project',
},
rule: {
key: 'squid:S2077',
name: 'SQL queries should not be vulnerable to injection attacks',
securityCategory: 'sql-injection',
vulnerabilityProbability: 'HIGH',
},
status: 'TO_REVIEW',
line: 42,
message: 'SQL injection vulnerability',
author: {
login: 'user1',
name: 'User One',
},
creationDate: '2023-01-01T00:00:00Z',
updateDate: '2023-01-02T00:00:00Z',
changelog: [],
comment: [],
};
nock(baseUrl)
.get('/api/hotspots/show')
.query({
hotspot: 'hotspot1',
organization,
})
.reply(200, mockApiResponse);
const result = await domain.hotspot('hotspot1');
expect(result.key).toBe('hotspot1');
expect(result.component).toBe('com.example:file.java');
expect(result.project).toBe('com.example');
expect(result.securityCategory).toBe('sql-injection');
expect(result.vulnerabilityProbability).toBe('HIGH');
expect(result.status).toBe('TO_REVIEW');
expect(result.author).toBe('user1');
expect(result.rule).toEqual(mockApiResponse.rule);
});
});
describe('updateHotspotStatus', () => {
it('should update hotspot status with all parameters', async () => {
const scope = nock(baseUrl)
.post('/api/hotspots/change_status', {
hotspot: 'hotspot1',
status: 'REVIEWED',
resolution: 'SAFE',
comment: 'This is safe',
})
.query({ organization })
.reply(204);
await domain.updateHotspotStatus({
hotspot: 'hotspot1',
status: 'REVIEWED',
resolution: 'SAFE',
comment: 'This is safe',
});
expect(scope.isDone()).toBe(true);
});
it('should update hotspot status without optional parameters', async () => {
const scope = nock(baseUrl)
.post('/api/hotspots/change_status', {
hotspot: 'hotspot1',
status: 'TO_REVIEW',
})
.query({ organization })
.reply(204);
await domain.updateHotspotStatus({
hotspot: 'hotspot1',
status: 'TO_REVIEW',
});
expect(scope.isDone()).toBe(true);
});
});
});
```
--------------------------------------------------------------------------------
/src/__tests__/quality-gates.test.ts:
--------------------------------------------------------------------------------
```typescript
import nock from 'nock';
import { createSonarQubeClient, SonarQubeClient, ProjectQualityGateParams } from '../sonarqube.js';
import {
handleSonarQubeListQualityGates,
handleSonarQubeGetQualityGate,
handleSonarQubeQualityGateStatus,
} from '../index.js';
describe('SonarQube Quality Gates API', () => {
const baseUrl = 'https://sonarcloud.io';
const token = 'fake-token';
let client: SonarQubeClient;
beforeEach(() => {
client = createSonarQubeClient(token, baseUrl) as SonarQubeClient;
nock.disableNetConnect();
});
afterEach(() => {
nock.cleanAll();
nock.enableNetConnect();
});
describe('listQualityGates', () => {
it('should return a list of quality gates', async () => {
const mockResponse = {
qualitygates: [
{
id: '1',
name: 'Sonar way',
isDefault: true,
isBuiltIn: true,
},
{
id: '2',
name: 'Custom Quality Gate',
isDefault: false,
isBuiltIn: false,
},
],
default: '1',
actions: {
create: true,
},
};
nock(baseUrl)
.get('/api/qualitygates/list')
.query(() => true)
.reply(200, mockResponse);
const result = await client.listQualityGates();
expect(result).toEqual(mockResponse);
});
it('handler should return quality gates in the expected format', async () => {
const mockResponse = {
qualitygates: [
{
id: '1',
name: 'Sonar way',
isDefault: true,
isBuiltIn: true,
},
],
default: '1',
};
nock(baseUrl)
.get('/api/qualitygates/list')
.query(() => true)
.reply(200, mockResponse);
const response = await handleSonarQubeListQualityGates(client);
expect(response).toHaveProperty('content');
expect(response.content).toHaveLength(1);
expect(response.content[0]?.type).toBe('text');
const parsedContent = JSON.parse(response.content[0]?.text as string);
expect(parsedContent).toEqual(mockResponse);
});
});
describe('getQualityGate', () => {
it('should return quality gate details including conditions', async () => {
const gateId = '1';
const mockResponse = {
id: '1',
name: 'Sonar way',
isDefault: true,
isBuiltIn: true,
conditions: [
{
id: '3',
metric: 'new_coverage',
op: 'LT',
error: '80',
},
{
id: '4',
metric: 'new_bugs',
op: 'GT',
error: '0',
},
],
};
nock(baseUrl).get('/api/qualitygates/show').query({ id: gateId }).reply(200, mockResponse);
const result = await client.getQualityGate(gateId);
expect(result).toEqual(mockResponse);
});
it('handler should return quality gate details in the expected format', async () => {
const gateId = '1';
const mockResponse = {
id: '1',
name: 'Sonar way',
conditions: [
{
id: '3',
metric: 'new_coverage',
op: 'LT',
error: '80',
},
],
};
nock(baseUrl).get('/api/qualitygates/show').query({ id: gateId }).reply(200, mockResponse);
const response = await handleSonarQubeGetQualityGate({ id: gateId }, client);
expect(response).toHaveProperty('content');
expect(response.content).toHaveLength(1);
expect(response.content[0]?.type).toBe('text');
const parsedContent = JSON.parse(response.content[0]?.text as string);
expect(parsedContent).toEqual(mockResponse);
});
});
describe('getProjectQualityGateStatus', () => {
it('should return the quality gate status for a project', async () => {
const params: ProjectQualityGateParams = {
projectKey: 'my-project',
};
const mockResponse = {
projectStatus: {
status: 'OK',
conditions: [
{
status: 'OK',
metricKey: 'new_reliability_rating',
comparator: 'GT',
errorThreshold: '1',
actualValue: '1',
},
{
status: 'ERROR',
metricKey: 'new_security_rating',
comparator: 'GT',
errorThreshold: '1',
actualValue: '2',
},
],
periods: [
{
index: 1,
mode: 'previous_version',
date: '2020-01-01T00:00:00+0000',
},
],
ignoredConditions: false,
},
};
nock(baseUrl)
.get('/api/qualitygates/project_status')
.query({ projectKey: params.projectKey })
.reply(200, mockResponse);
const result = await client.getProjectQualityGateStatus(params);
expect(result).toEqual(mockResponse);
});
it('should include branch parameter if provided', async () => {
const params: ProjectQualityGateParams = {
projectKey: 'my-project',
branch: 'feature/branch',
};
const mockResponse = {
projectStatus: {
status: 'OK',
conditions: [],
ignoredConditions: false,
},
};
const scope = nock(baseUrl)
.get('/api/qualitygates/project_status')
.query({ projectKey: params.projectKey, branch: params.branch })
.reply(200, mockResponse);
const result = await client.getProjectQualityGateStatus(params);
expect(result).toEqual(mockResponse);
expect(scope.isDone()).toBe(true);
});
it('handler should return project quality gate status in the expected format', async () => {
const params: ProjectQualityGateParams = {
projectKey: 'my-project',
};
const mockResponse = {
projectStatus: {
status: 'OK',
conditions: [],
ignoredConditions: false,
},
};
nock(baseUrl)
.get('/api/qualitygates/project_status')
.query({ projectKey: params.projectKey })
.reply(200, mockResponse);
const response = await handleSonarQubeQualityGateStatus(params, client);
expect(response).toHaveProperty('content');
expect(response.content).toHaveLength(1);
expect(response.content[0]?.type).toBe('text');
const parsedContent = JSON.parse(response.content[0]?.text as string);
expect(parsedContent).toEqual(mockResponse);
});
});
});
```
--------------------------------------------------------------------------------
/docs/troubleshooting.md:
--------------------------------------------------------------------------------
```markdown
# Troubleshooting Guide
## Overview
This guide helps diagnose and resolve common issues with the SonarQube MCP Server. It covers authentication problems, connection issues, performance concerns, and provides debugging techniques.
## Quick Diagnostics
### Enable Debug Logging
```bash
# Set environment variables
export LOG_LEVEL=DEBUG
export LOG_FILE=/tmp/sonarqube-mcp-debug.log
# Or in your configuration
{
"env": {
"LOG_LEVEL": "DEBUG",
"LOG_FILE": "/tmp/sonarqube-mcp-debug.log"
}
}
```
### Check Server Logs
```bash
# View recent logs
tail -f /tmp/sonarqube-mcp-debug.log
# Search for errors
grep ERROR /tmp/sonarqube-mcp-debug.log
# Check authentication issues
grep "auth" /tmp/sonarqube-mcp-debug.log
```
## Common Issues
### 1. Authentication Failures
#### "Authentication failed"
**Causes & Solutions:**
1. **Invalid Token**
- Verify token in SonarQube: **My Account** → **Security**
- Generate a new token if expired
- Ensure token has proper permissions
2. **Wrong Authentication Method**
```json
// Token authentication (recommended)
{
"env": {
"SONARQUBE_TOKEN": "squ_xxxxxxxx"
}
}
// Basic authentication
{
"env": {
"SONARQUBE_USERNAME": "user",
"SONARQUBE_PASSWORD": "pass"
}
}
```
3. **SonarQube Version Compatibility**
- SonarQube 10.0+: Uses Bearer token authentication
- SonarQube < 10.0: Token used as username in Basic auth
- Server automatically handles this
#### "No SonarQube authentication configured"
**Solution:** Set one of these authentication methods:
- `SONARQUBE_TOKEN` (recommended)
- `SONARQUBE_USERNAME` and `SONARQUBE_PASSWORD`
- `SONARQUBE_PASSCODE`
### 2. Connection Issues
#### "Connection refused" or "ECONNREFUSED"
**Diagnostics:**
```bash
# Test SonarQube connectivity
curl -I https://your-sonarqube.com/api/system/status
# Check with authentication
curl -H "Authorization: Bearer YOUR_TOKEN" \
https://your-sonarqube.com/api/system/status
```
**Solutions:**
1. Verify `SONARQUBE_URL` is correct
2. Check network connectivity
3. Verify firewall rules
4. For self-hosted: ensure SonarQube is running
#### "SSL/TLS Certificate Error"
**For self-signed certificates:**
```bash
# Option 1: Add CA certificate
export NODE_EXTRA_CA_CERTS=/path/to/ca-cert.pem
# Option 2: DEVELOPMENT ONLY - disable verification
export NODE_TLS_REJECT_UNAUTHORIZED=0
```
### 3. SonarCloud Issues
#### "Organization required"
**Solution:** Add organization for SonarCloud:
```json
{
"env": {
"SONARQUBE_URL": "https://sonarcloud.io",
"SONARQUBE_TOKEN": "your-token",
"SONARQUBE_ORGANIZATION": "your-org-key"
}
}
```
### 4. Permission Issues
#### "Access denied" or "Insufficient permissions"
**Solutions:**
1. Check token permissions in SonarQube
2. Ensure user has "Browse" permission on projects
3. For admin tools: verify admin permissions
4. Create project-specific tokens if needed
### 5. Performance Issues
#### Slow Response Times
**Diagnostics:**
```bash
# Check circuit breaker status in logs
grep "circuit breaker" /tmp/sonarqube-mcp-debug.log
# Monitor API response times
grep "API call took" /tmp/sonarqube-mcp-debug.log
```
**Solutions:**
1. Check SonarQube server performance
2. Review network latency
3. Circuit breaker may be activated - check logs
4. Consider reducing request complexity
### 6. Docker Issues
#### Container Exits Immediately
**Diagnostics:**
```bash
# Check container logs
docker logs sonarqube-mcp
# Run interactively for debugging
docker run -it --rm \
-e SONARQUBE_URL=https://sonarqube.com \
-e SONARQUBE_TOKEN=token \
-e LOG_LEVEL=DEBUG \
sapientpants/sonarqube-mcp-server:latest
```
#### "No such file or directory"
**Solution:** Ensure volume mounts exist:
```bash
# Create log directory
mkdir -p ./logs
# Run with proper volume
docker run -v ./logs:/logs ...
```
### 7. Claude Desktop Issues
#### Server Not Appearing in Claude
**Solutions:**
1. Restart Claude Desktop after config changes
2. Check configuration syntax:
```json
{
"mcpServers": {
"sonarqube": {
"command": "npx",
"args": ["-y", "sonarqube-mcp-server@latest"],
"env": {
"SONARQUBE_URL": "https://sonarqube.com",
"SONARQUBE_TOKEN": "your-token"
}
}
}
}
```
#### "Command not found"
**Solutions:**
1. Ensure Node.js is installed
2. Use Docker instead of npx
3. Check PATH environment variable
## Debugging Techniques
### 1. Verbose Logging
```json
{
"env": {
"LOG_LEVEL": "DEBUG",
"LOG_FILE": "/tmp/sonarqube-mcp.log"
}
}
```
### 2. Test Specific Tools
In Claude Desktop:
```
Use the system_ping tool to check SonarQube connectivity
```
### 3. Isolate Issues
Test components individually:
```bash
# Test SonarQube API directly
curl -H "Authorization: Bearer $SONARQUBE_TOKEN" \
$SONARQUBE_URL/api/system/status
# Test MCP server standalone
echo '{"jsonrpc":"2.0","method":"tools/list","id":1}' | \
npx sonarqube-mcp-server
```
### 4. Environment Verification
```bash
# Check all environment variables
env | grep SONARQUBE
# Verify no conflicting variables
env | grep -E "(PROXY|SSL|TLS|NODE)"
```
## Error Messages Reference
| Error | Cause | Solution |
| ----------------------- | ------------------------- | -------------------------- |
| "Authentication failed" | Invalid credentials | Check token/password |
| "Resource not found" | Invalid project/component | Verify resource exists |
| "Network error" | Connection issue | Check URL and network |
| "Rate limit exceeded" | Too many requests | Wait and retry |
| "Circuit breaker open" | Multiple failures | Check SonarQube health |
| "Invalid URL" | Malformed URL | Remove trailing slash |
| "Organization required" | SonarCloud without org | Add SONARQUBE_ORGANIZATION |
## Getting Help
### Collect Debug Information
When reporting issues, include:
1. Server version
2. Error messages from logs
3. Environment configuration (without secrets)
4. Steps to reproduce
### Support Channels
- GitHub Issues: https://github.com/sapientpants/sonarqube-mcp-server/issues
- Documentation: https://github.com/sapientpants/sonarqube-mcp-server/docs
### Debug Checklist
- [ ] Enable debug logging
- [ ] Check authentication configuration
- [ ] Verify network connectivity
- [ ] Test SonarQube API directly
- [ ] Review recent changes
- [ ] Check for version compatibility
- [ ] Isolate the failing component
```
--------------------------------------------------------------------------------
/src/__tests__/zod-transforms.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import { z } from 'zod';
describe('Zod Schema Transformations', () => {
describe('Page and PageSize Transformations', () => {
it('should transform page parameter from string to number', () => {
const pageSchema = z
.string()
.optional()
.transform((val: any) => (val ? parseInt(val, 10) || null : null));
expect(pageSchema.parse('10')).toBe(10);
expect(pageSchema.parse('invalid')).toBe(null);
expect(pageSchema.parse(undefined)).toBe(null);
expect(pageSchema.parse('')).toBe(null);
});
it('should transform page_size parameter from string to number', () => {
const pageSizeSchema = z
.string()
.optional()
.transform((val: any) => (val ? parseInt(val, 10) || null : null));
expect(pageSizeSchema.parse('20')).toBe(20);
expect(pageSizeSchema.parse('invalid')).toBe(null);
expect(pageSizeSchema.parse(undefined)).toBe(null);
expect(pageSizeSchema.parse('')).toBe(null);
});
});
describe('Boolean Parameter Transformations', () => {
it('should transform resolved parameter from string to boolean', () => {
const resolvedSchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
expect(resolvedSchema.parse('true')).toBe(true);
expect(resolvedSchema.parse('false')).toBe(false);
expect(resolvedSchema.parse(true)).toBe(true);
expect(resolvedSchema.parse(false)).toBe(false);
expect(resolvedSchema.parse(null)).toBe(null);
expect(resolvedSchema.parse(undefined)).toBe(undefined);
});
it('should transform on_component_only parameter from string to boolean', () => {
const onComponentOnlySchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
expect(onComponentOnlySchema.parse('true')).toBe(true);
expect(onComponentOnlySchema.parse('false')).toBe(false);
expect(onComponentOnlySchema.parse(true)).toBe(true);
expect(onComponentOnlySchema.parse(false)).toBe(false);
expect(onComponentOnlySchema.parse(null)).toBe(null);
expect(onComponentOnlySchema.parse(undefined)).toBe(undefined);
});
it('should transform since_leak_period parameter from string to boolean', () => {
const sinceLeakPeriodSchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
expect(sinceLeakPeriodSchema.parse('true')).toBe(true);
expect(sinceLeakPeriodSchema.parse('false')).toBe(false);
expect(sinceLeakPeriodSchema.parse(true)).toBe(true);
expect(sinceLeakPeriodSchema.parse(false)).toBe(false);
expect(sinceLeakPeriodSchema.parse(null)).toBe(null);
expect(sinceLeakPeriodSchema.parse(undefined)).toBe(undefined);
});
it('should transform in_new_code_period parameter from string to boolean', () => {
const inNewCodePeriodSchema = z
.union([z.boolean(), z.string().transform((val: any) => val === 'true')])
.nullable()
.optional();
expect(inNewCodePeriodSchema.parse('true')).toBe(true);
expect(inNewCodePeriodSchema.parse('false')).toBe(false);
expect(inNewCodePeriodSchema.parse(true)).toBe(true);
expect(inNewCodePeriodSchema.parse(false)).toBe(false);
expect(inNewCodePeriodSchema.parse(null)).toBe(null);
expect(inNewCodePeriodSchema.parse(undefined)).toBe(undefined);
});
});
describe('Enum Parameter Transformations', () => {
it('should validate severity enum values', () => {
const severitySchema = z
.enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
.nullable()
.optional();
expect(severitySchema.parse('INFO')).toBe('INFO');
expect(severitySchema.parse('MINOR')).toBe('MINOR');
expect(severitySchema.parse('MAJOR')).toBe('MAJOR');
expect(severitySchema.parse('CRITICAL')).toBe('CRITICAL');
expect(severitySchema.parse('BLOCKER')).toBe('BLOCKER');
expect(severitySchema.parse(null)).toBe(null);
expect(severitySchema.parse(undefined)).toBe(undefined);
expect(() => severitySchema.parse('INVALID')).toThrow();
});
it('should validate statuses enum values', () => {
const statusSchema = z
.array(
z.enum([
'OPEN',
'CONFIRMED',
'REOPENED',
'RESOLVED',
'CLOSED',
'TO_REVIEW',
'IN_REVIEW',
'REVIEWED',
])
)
.nullable()
.optional();
expect(statusSchema.parse(['OPEN', 'CONFIRMED'])).toEqual(['OPEN', 'CONFIRMED']);
expect(statusSchema.parse(['REOPENED', 'RESOLVED'])).toEqual(['REOPENED', 'RESOLVED']);
expect(statusSchema.parse(null)).toBe(null);
expect(statusSchema.parse(undefined)).toBe(undefined);
expect(() => statusSchema.parse(['INVALID'])).toThrow();
});
it('should validate resolutions enum values', () => {
const resolutionSchema = z
.array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
.nullable()
.optional();
expect(resolutionSchema.parse(['FALSE-POSITIVE', 'WONTFIX'])).toEqual([
'FALSE-POSITIVE',
'WONTFIX',
]);
expect(resolutionSchema.parse(['FIXED', 'REMOVED'])).toEqual(['FIXED', 'REMOVED']);
expect(resolutionSchema.parse(null)).toBe(null);
expect(resolutionSchema.parse(undefined)).toBe(undefined);
expect(() => resolutionSchema.parse(['INVALID'])).toThrow();
});
it('should validate types enum values', () => {
const typeSchema = z
.array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
.nullable()
.optional();
expect(typeSchema.parse(['CODE_SMELL', 'BUG'])).toEqual(['CODE_SMELL', 'BUG']);
expect(typeSchema.parse(['VULNERABILITY', 'SECURITY_HOTSPOT'])).toEqual([
'VULNERABILITY',
'SECURITY_HOTSPOT',
]);
expect(typeSchema.parse(null)).toBe(null);
expect(typeSchema.parse(undefined)).toBe(undefined);
expect(() => typeSchema.parse(['INVALID'])).toThrow();
});
});
describe('Array Parameter Transformations', () => {
it('should validate array of strings', () => {
const stringArraySchema = z.array(z.string()).nullable().optional();
expect(stringArraySchema.parse(['a', 'b', 'c'])).toEqual(['a', 'b', 'c']);
expect(stringArraySchema.parse([])).toEqual([]);
expect(stringArraySchema.parse(null)).toBe(null);
expect(stringArraySchema.parse(undefined)).toBe(undefined);
});
});
});
```
--------------------------------------------------------------------------------
/src/transports/session-manager.ts:
--------------------------------------------------------------------------------
```typescript
import { v4 as uuidv4 } from 'uuid';
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { createLogger } from '../utils/logger.js';
const logger = createLogger('session-manager');
/**
* Represents a single HTTP session with its own MCP server instance.
*/
export interface ISession {
id: string;
server: Server;
createdAt: Date;
lastActivityAt: Date;
metadata?: Record<string, unknown>;
}
/**
* Configuration for the session manager.
*/
export interface ISessionManagerConfig {
/**
* Session timeout in milliseconds.
* Default: 30 minutes (1800000 ms)
*/
sessionTimeout?: number;
/**
* Interval for cleaning up inactive sessions in milliseconds.
* Default: 5 minutes (300000 ms)
*/
cleanupInterval?: number;
/**
* Maximum number of concurrent sessions.
* Default: 100
*/
maxSessions?: number;
}
/**
* Manages HTTP transport sessions with lifecycle management.
*/
export class SessionManager {
private readonly sessions: Map<string, ISession> = new Map();
private cleanupTimer?: NodeJS.Timeout;
private readonly config: Required<ISessionManagerConfig>;
constructor(config: ISessionManagerConfig = {}) {
this.config = {
sessionTimeout: config.sessionTimeout ?? 1800000, // 30 minutes
cleanupInterval: config.cleanupInterval ?? 300000, // 5 minutes
maxSessions: config.maxSessions ?? 100,
};
// Start cleanup timer
this.startCleanupTimer();
}
/**
* Create a new session with a unique ID.
*
* @param server The MCP server instance for this session
* @param metadata Optional metadata to associate with the session
* @returns The created session
* @throws Error if maximum sessions limit is reached
*/
createSession(server: Server, metadata?: Record<string, unknown>): ISession {
if (this.sessions.size >= this.config.maxSessions) {
throw new Error(
`Maximum number of sessions (${this.config.maxSessions}) reached. Please try again later.`
);
}
const sessionId = uuidv4();
const session: ISession = metadata
? {
id: sessionId,
server,
createdAt: new Date(),
lastActivityAt: new Date(),
metadata,
}
: {
id: sessionId,
server,
createdAt: new Date(),
lastActivityAt: new Date(),
};
this.sessions.set(sessionId, session);
logger.info(`Session created: ${sessionId}`);
logger.debug(`Active sessions: ${this.sessions.size}`);
return session;
}
/**
* Get a session by its ID.
*
* @param sessionId The session ID
* @returns The session if found, undefined otherwise
*/
getSession(sessionId: string): ISession | undefined {
const session = this.sessions.get(sessionId);
if (session) {
// Update last activity timestamp
session.lastActivityAt = new Date();
}
return session;
}
/**
* Remove a session by its ID.
*
* @param sessionId The session ID
* @returns True if the session was removed, false if not found
*/
removeSession(sessionId: string): boolean {
const deleted = this.sessions.delete(sessionId);
if (deleted) {
logger.info(`Session removed: ${sessionId}`);
logger.debug(`Active sessions: ${this.sessions.size}`);
}
return deleted;
}
/**
* Check if a session exists and is still valid.
*
* @param sessionId The session ID
* @returns True if the session exists and is valid, false otherwise
*/
hasSession(sessionId: string): boolean {
const session = this.sessions.get(sessionId);
if (!session) {
return false;
}
// Check if session has timed out
const now = Date.now();
const lastActivity = session.lastActivityAt.getTime();
if (now - lastActivity > this.config.sessionTimeout) {
this.removeSession(sessionId);
return false;
}
return true;
}
/**
* Get all active sessions.
*
* @returns Array of active sessions
*/
getAllSessions(): ISession[] {
return Array.from(this.sessions.values());
}
/**
* Get the number of active sessions.
*
* @returns Number of active sessions
*/
getSessionCount(): number {
return this.sessions.size;
}
/**
* Clean up inactive sessions based on timeout.
*/
private cleanupInactiveSessions(): void {
const now = Date.now();
const sessionsToRemove: string[] = [];
for (const [sessionId, session] of this.sessions) {
const lastActivity = session.lastActivityAt.getTime();
if (now - lastActivity > this.config.sessionTimeout) {
sessionsToRemove.push(sessionId);
}
}
if (sessionsToRemove.length > 0) {
logger.info(`Cleaning up ${sessionsToRemove.length} inactive sessions`);
for (const sessionId of sessionsToRemove) {
this.removeSession(sessionId);
}
}
}
/**
* Start the cleanup timer for inactive sessions.
*/
private startCleanupTimer(): void {
this.cleanupTimer = setInterval(() => {
this.cleanupInactiveSessions();
}, this.config.cleanupInterval);
// Ensure timer doesn't prevent process from exiting
if (this.cleanupTimer.unref) {
this.cleanupTimer.unref();
}
}
/**
* Stop the cleanup timer and clear all sessions.
* Should be called when shutting down the HTTP transport.
*/
shutdown(): void {
logger.info('Shutting down session manager');
// Stop cleanup timer
if (this.cleanupTimer) {
clearInterval(this.cleanupTimer);
// Delete the property for exactOptionalPropertyTypes compatibility
delete this.cleanupTimer;
}
// Clear all sessions
const sessionCount = this.sessions.size;
this.sessions.clear();
logger.info(`Cleared ${sessionCount} active sessions`);
}
/**
* Get session statistics for monitoring.
*
* @returns Session statistics
*/
getStatistics(): {
activeSessions: number;
maxSessions: number;
sessionTimeout: number;
oldestSession?: Date;
newestSession?: Date;
} {
const sessions = this.getAllSessions();
const stats: {
activeSessions: number;
maxSessions: number;
sessionTimeout: number;
oldestSession?: Date;
newestSession?: Date;
} = {
activeSessions: sessions.length,
maxSessions: this.config.maxSessions,
sessionTimeout: this.config.sessionTimeout,
};
if (sessions.length > 0) {
sessions.sort((a, b) => a.createdAt.getTime() - b.createdAt.getTime());
const oldestSession = sessions[0];
const newestSession = sessions.at(-1);
if (oldestSession) {
stats.oldestSession = oldestSession.createdAt;
}
if (newestSession) {
stats.newestSession = newestSession.createdAt;
}
}
return stats;
}
}
```
--------------------------------------------------------------------------------
/.github/scripts/determine-artifact.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
# =============================================================================
# SCRIPT: Determine Build Artifact from GitHub Releases
# PURPOSE: Find and validate the correct artifact from a GitHub release
# USAGE: ./determine-artifact.sh --tag <tag> --repo <repo> --version <version> --prefix <prefix> --output <output_file>
# =============================================================================
set -euo pipefail
# Default values
TAG_NAME=""
REPO=""
VERSION=""
PREFIX=""
OUTPUT_FILE=""
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--tag)
TAG_NAME="$2"
shift 2
;;
--repo)
REPO="$2"
shift 2
;;
--version)
VERSION="$2"
shift 2
;;
--prefix)
PREFIX="$2"
shift 2
;;
--output)
OUTPUT_FILE="$2"
shift 2
;;
*)
echo "Unknown option: $1"
exit 1
;;
esac
done
# Validate required parameters
if [ -z "$TAG_NAME" ] || [ -z "$REPO" ] || [ -z "$VERSION" ] || [ -z "$PREFIX" ] || [ -z "$OUTPUT_FILE" ]; then
echo "❌ Missing required parameters"
echo "Usage: $0 --tag <tag> --repo <repo> --version <version> --prefix <prefix> --output <output_file>"
exit 1
fi
echo "🔍 Determining artifact source for $PREFIX-$VERSION from release $TAG_NAME"
# Fetch tag information from GitHub API
TAG_API_URL="https://api.github.com/repos/$REPO/git/refs/tags/$TAG_NAME"
TAG_RESPONSE=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" -w "\n%{http_code}" $TAG_API_URL)
TAG_BODY=$(echo "$TAG_RESPONSE" | head -n -1)
TAG_STATUS=$(echo "$TAG_RESPONSE" | tail -n 1)
if [ "$TAG_STATUS" != "200" ]; then
echo "❌ GitHub API request failed for $TAG_API_URL with status $TAG_STATUS"
echo "Response: $TAG_BODY"
exit 1
fi
# Extract the object SHA and type
TAG_OBJECT_SHA=$(echo "$TAG_BODY" | jq -r '.object.sha')
TAG_OBJECT_TYPE=$(echo "$TAG_BODY" | jq -r '.object.type')
echo "📌 Tag $TAG_NAME points to $TAG_OBJECT_TYPE: $TAG_OBJECT_SHA"
# Determine the commit SHA based on tag type
if [ "$TAG_OBJECT_TYPE" = "tag" ]; then
# Annotated tag - fetch the tag object to get the commit SHA
TAG_OBJECT_URL="https://api.github.com/repos/$REPO/git/tags/$TAG_OBJECT_SHA"
TAG_OBJECT_RESPONSE=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" -w "\n%{http_code}" $TAG_OBJECT_URL)
TAG_OBJECT_BODY=$(echo "$TAG_OBJECT_RESPONSE" | head -n -1)
TAG_OBJECT_STATUS=$(echo "$TAG_OBJECT_RESPONSE" | tail -n 1)
if [ "$TAG_OBJECT_STATUS" != "200" ]; then
echo "❌ Failed to fetch annotated tag object with status $TAG_OBJECT_STATUS"
echo "Response: $TAG_OBJECT_BODY"
exit 1
fi
COMMIT_SHA=$(echo "$TAG_OBJECT_BODY" | jq -r '.object.sha')
echo "📌 Annotated tag references commit: $COMMIT_SHA"
elif [ "$TAG_OBJECT_TYPE" = "commit" ]; then
# Lightweight tag - directly references a commit
COMMIT_SHA=$TAG_OBJECT_SHA
echo "📌 Lightweight tag directly references commit: $COMMIT_SHA"
else
echo "❌ Unexpected tag object type: $TAG_OBJECT_TYPE"
exit 1
fi
# The tag points to the version commit, but artifacts were built with the previous commit
# Get the parent commit (the one that triggered the build)
echo "🔍 Getting parent commit of $COMMIT_SHA"
PARENT_COMMIT_URL="https://api.github.com/repos/$REPO/commits/$COMMIT_SHA"
PARENT_RESPONSE=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" -w "\n%{http_code}" $PARENT_COMMIT_URL)
PARENT_BODY=$(echo "$PARENT_RESPONSE" | head -n -1)
PARENT_STATUS=$(echo "$PARENT_RESPONSE" | tail -n 1)
if [ "$PARENT_STATUS" != "200" ]; then
echo "❌ Failed to fetch commit information with status $PARENT_STATUS"
echo "Response: $PARENT_BODY"
exit 1
fi
# Get the parent SHA (the commit that triggered the build)
PARENT_SHA=$(echo "$PARENT_BODY" | jq -r '.parents[0].sha')
echo "📌 Parent commit (build trigger): $PARENT_SHA"
# Find the workflow run that created the release artifacts
# Retry with exponential backoff to handle race conditions
RUNS_API_URL="https://api.github.com/repos/$REPO/actions/runs?head_sha=$PARENT_SHA&status=success&event=push"
echo "🔍 Searching for successful workflow runs for parent commit $PARENT_SHA"
MAX_RETRIES=5
RETRY_COUNT=0
MAIN_RUN=""
while [ $RETRY_COUNT -lt $MAX_RETRIES ] && [ -z "$MAIN_RUN" ]; do
if [ $RETRY_COUNT -gt 0 ]; then
WAIT_TIME=$((5 * RETRY_COUNT))
echo "⏳ Waiting ${WAIT_TIME}s before retry $RETRY_COUNT/$MAX_RETRIES..."
sleep $WAIT_TIME
fi
RUNS_RESPONSE=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" -w "\n%{http_code}" $RUNS_API_URL)
RUNS_BODY=$(echo "$RUNS_RESPONSE" | head -n -1)
RUNS_STATUS=$(echo "$RUNS_RESPONSE" | tail -n 1)
if [ "$RUNS_STATUS" != "200" ]; then
echo "❌ Failed to fetch workflow runs with status $RUNS_STATUS"
echo "Response: $RUNS_BODY"
exit 1
fi
# Find the Main workflow run
MAIN_RUN=$(echo "$RUNS_BODY" | jq -r '.workflow_runs[] | select(.name == "Main") | {id: .id, created_at: .created_at}')
if [ -z "$MAIN_RUN" ]; then
RETRY_COUNT=$((RETRY_COUNT + 1))
if [ $RETRY_COUNT -lt $MAX_RETRIES ]; then
echo "⚠️ Main workflow not found yet (attempt $RETRY_COUNT/$MAX_RETRIES)"
fi
fi
done
if [ -z "$MAIN_RUN" ]; then
echo "❌ No successful Main workflow run found for parent commit $PARENT_SHA after $MAX_RETRIES attempts"
echo "Available runs:"
echo "$RUNS_BODY" | jq -r '.workflow_runs[] | "\(.name): \(.id) (\(.status))"'
exit 1
fi
RUN_ID=$(echo "$MAIN_RUN" | jq -r '.id')
echo "✅ Found Main workflow run: $RUN_ID"
# Get artifacts from the workflow run
ARTIFACTS_API_URL="https://api.github.com/repos/$REPO/actions/runs/$RUN_ID/artifacts"
echo "🔍 Fetching artifacts from run $RUN_ID"
ARTIFACTS_RESPONSE=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" -w "\n%{http_code}" $ARTIFACTS_API_URL)
ARTIFACTS_BODY=$(echo "$ARTIFACTS_RESPONSE" | head -n -1)
ARTIFACTS_STATUS=$(echo "$ARTIFACTS_RESPONSE" | tail -n 1)
if [ "$ARTIFACTS_STATUS" != "200" ]; then
echo "❌ Failed to fetch artifacts with status $ARTIFACTS_STATUS"
echo "Response: $ARTIFACTS_BODY"
exit 1
fi
# Find the artifact with the specified prefix (using full parent SHA to match artifact naming)
ARTIFACT_NAME="$PREFIX-$VERSION-${PARENT_SHA}"
ARTIFACT=$(echo "$ARTIFACTS_BODY" | jq -r --arg name "$ARTIFACT_NAME" '.artifacts[] | select(.name == $name)')
if [ -z "$ARTIFACT" ]; then
echo "❌ Artifact $ARTIFACT_NAME not found in workflow run $RUN_ID"
echo "Available artifacts:"
echo "$ARTIFACTS_BODY" | jq -r '.artifacts[].name'
exit 1
fi
ARTIFACT_ID=$(echo "$ARTIFACT" | jq -r '.id')
ARTIFACT_SIZE=$(echo "$ARTIFACT" | jq -r '.size_in_bytes')
echo "✅ Found artifact: $ARTIFACT_NAME (ID: $ARTIFACT_ID, Size: $ARTIFACT_SIZE bytes)"
# Output the results for GitHub Actions
{
echo "artifact_name=$ARTIFACT_NAME"
echo "artifact_id=$ARTIFACT_ID"
echo "run_id=$RUN_ID"
echo "commit_sha=$PARENT_SHA" # Use parent SHA since that's what built the artifacts
} >> "$OUTPUT_FILE"
echo "✅ Artifact information written to $OUTPUT_FILE"
```
--------------------------------------------------------------------------------
/src/__tests__/config/service-accounts.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, expect, it, beforeEach, afterEach } from 'vitest';
import { getServiceAccountConfig } from '../../config/service-accounts.js';
describe('Service Accounts Configuration', () => {
let originalEnv: NodeJS.ProcessEnv;
beforeEach(() => {
// Save original environment
originalEnv = { ...process.env };
// Clear relevant environment variables
delete process.env.SONARQUBE_TOKEN;
delete process.env.SONARQUBE_URL;
delete process.env.SONARQUBE_ORGANIZATION;
// Clear numbered service accounts
for (let i = 1; i <= 10; i++) {
delete process.env[`SONARQUBE_SA${i}_TOKEN`];
delete process.env[`SONARQUBE_SA${i}_URL`];
delete process.env[`SONARQUBE_SA${i}_ORGANIZATION`];
}
});
afterEach(() => {
// Restore original environment
process.env = originalEnv;
});
describe('getServiceAccountConfig', () => {
describe('default account', () => {
it('should return null when no token is set', () => {
const config = getServiceAccountConfig('default');
expect(config).toBeNull();
});
it('should return basic config with token only', () => {
process.env.SONARQUBE_TOKEN = 'test-token';
const config = getServiceAccountConfig('default');
expect(config).toEqual({
id: 'default',
token: 'test-token',
url: undefined,
organization: undefined,
});
});
it('should return full config with all environment variables', () => {
process.env.SONARQUBE_TOKEN = 'test-token';
process.env.SONARQUBE_URL = 'https://sonarqube.example.com';
process.env.SONARQUBE_ORGANIZATION = 'my-org';
const config = getServiceAccountConfig('default');
expect(config).toEqual({
id: 'default',
token: 'test-token',
url: 'https://sonarqube.example.com',
organization: 'my-org',
});
});
it('should handle empty token environment variable', () => {
process.env.SONARQUBE_TOKEN = '';
const config = getServiceAccountConfig('default');
expect(config).toBeNull();
});
});
describe('numbered service accounts', () => {
it('should return null when no token is set for SA1', () => {
const config = getServiceAccountConfig('SA1');
expect(config).toBeNull();
});
it('should return basic config for SA1 with token only', () => {
process.env.SONARQUBE_SA1_TOKEN = 'sa1-token';
const config = getServiceAccountConfig('SA1');
expect(config).toEqual({
id: 'SA1',
token: 'sa1-token',
url: undefined,
organization: undefined,
});
});
it('should return full config for SA5 with all environment variables', () => {
process.env.SONARQUBE_SA5_TOKEN = 'sa5-token';
process.env.SONARQUBE_SA5_URL = 'https://sonarqube5.example.com';
process.env.SONARQUBE_SA5_ORGANIZATION = 'sa5-org';
const config = getServiceAccountConfig('SA5');
expect(config).toEqual({
id: 'SA5',
token: 'sa5-token',
url: 'https://sonarqube5.example.com',
organization: 'sa5-org',
});
});
it('should handle SA10 (double digit)', () => {
process.env.SONARQUBE_SA10_TOKEN = 'sa10-token';
process.env.SONARQUBE_SA10_URL = 'https://sonarqube10.example.com';
const config = getServiceAccountConfig('SA10');
expect(config).toEqual({
id: 'SA10',
token: 'sa10-token',
url: 'https://sonarqube10.example.com',
organization: undefined,
});
});
it('should return null for SA account with empty token', () => {
process.env.SONARQUBE_SA3_TOKEN = '';
process.env.SONARQUBE_SA3_URL = 'https://sonarqube3.example.com';
const config = getServiceAccountConfig('SA3');
expect(config).toBeNull();
});
it('should handle multiple service accounts independently', () => {
process.env.SONARQUBE_SA1_TOKEN = 'sa1-token';
process.env.SONARQUBE_SA2_TOKEN = 'sa2-token';
process.env.SONARQUBE_SA2_URL = 'https://sa2.example.com';
const config1 = getServiceAccountConfig('SA1');
const config2 = getServiceAccountConfig('SA2');
expect(config1).toEqual({
id: 'SA1',
token: 'sa1-token',
url: undefined,
organization: undefined,
});
expect(config2).toEqual({
id: 'SA2',
token: 'sa2-token',
url: 'https://sa2.example.com',
organization: undefined,
});
});
});
describe('invalid account IDs', () => {
it('should return null for unknown account ID', () => {
const config = getServiceAccountConfig('unknown');
expect(config).toBeNull();
});
it('should return null for empty account ID', () => {
const config = getServiceAccountConfig('');
expect(config).toBeNull();
});
it('should return null for SA with invalid number format', () => {
const config = getServiceAccountConfig('SA');
expect(config).toBeNull();
});
it('should return null for SA with non-numeric suffix', () => {
const config = getServiceAccountConfig('SAx');
expect(config).toBeNull();
});
it('should return null for SA with zero', () => {
const config = getServiceAccountConfig('SA0');
expect(config).toBeNull();
});
it('should return null for SA with leading zeros', () => {
const config = getServiceAccountConfig('SA01');
expect(config).toBeNull();
});
it('should return null for lowercase sa', () => {
process.env.SONARQUBE_SA1_TOKEN = 'test-token';
const config = getServiceAccountConfig('sa1');
expect(config).toBeNull();
});
it('should return null for mixed case', () => {
process.env.SONARQUBE_SA1_TOKEN = 'test-token';
const config = getServiceAccountConfig('Sa1');
expect(config).toBeNull();
});
});
describe('edge cases', () => {
it('should handle null account ID', () => {
const config = getServiceAccountConfig(null as any);
expect(config).toBeNull();
});
it('should handle undefined account ID', () => {
const config = getServiceAccountConfig(undefined as any);
expect(config).toBeNull();
});
it('should not interfere between default and numbered accounts', () => {
process.env.SONARQUBE_TOKEN = 'default-token';
process.env.SONARQUBE_SA1_TOKEN = 'sa1-token';
const defaultConfig = getServiceAccountConfig('default');
const sa1Config = getServiceAccountConfig('SA1');
expect(defaultConfig?.token).toBe('default-token');
expect(sa1Config?.token).toBe('sa1-token');
});
});
});
});
```
--------------------------------------------------------------------------------
/scripts/run-all-tests.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
# Master test runner for SonarQube MCP Server
# Runs all validation and test scripts in the correct order
set -e # Exit on error
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
MAGENTA='\033[0;35m'
NC='\033[0m' # No Color
echo -e "${MAGENTA}🚀 SonarQube MCP Server - Comprehensive Test Suite${NC}"
echo "==================================================="
echo "This script runs all tests for the deployment artifacts"
echo ""
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SKIP_CLEANUP=false
TESTS_TO_RUN="all"
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--skip-cleanup)
SKIP_CLEANUP=true
shift
;;
--only)
TESTS_TO_RUN="$2"
shift 2
;;
--help)
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Options:"
echo " --skip-cleanup Don't clean up test resources after completion"
echo " --only <test> Run only specific test suite:"
echo " docs, security, monitoring, load"
echo " --help Show this help message"
exit 0
;;
*)
echo "Unknown option: $1"
echo "Run '$0 --help' for usage information"
exit 1
;;
esac
done
# Track test results
declare -A TEST_RESULTS
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
# Function to run a test suite
run_test_suite() {
local suite_name=$1
local script_path=$2
local description=$3
((TOTAL_TESTS++))
echo -e "\n${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "${BLUE}🧪 Test Suite: $suite_name${NC}"
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo "Description: $description"
echo "Script: $script_path"
echo ""
if [ ! -f "$script_path" ]; then
echo -e "${RED}❌ Test script not found!${NC}"
TEST_RESULTS[$suite_name]="NOT_FOUND"
((FAILED_TESTS++))
return 1
fi
if [ ! -x "$script_path" ]; then
echo -e "${YELLOW}⚠️ Making script executable...${NC}"
chmod +x "$script_path"
fi
# Run the test
if "$script_path"; then
echo -e "\n${GREEN}✅ $suite_name tests PASSED${NC}"
TEST_RESULTS[$suite_name]="PASSED"
((PASSED_TESTS++))
return 0
else
echo -e "\n${RED}❌ $suite_name tests FAILED${NC}"
TEST_RESULTS[$suite_name]="FAILED"
((FAILED_TESTS++))
return 1
fi
}
# Function to check if we should run a test
should_run_test() {
local test_name=$1
if [ "$TESTS_TO_RUN" = "all" ]; then
return 0
elif [ "$TESTS_TO_RUN" = "$test_name" ]; then
return 0
else
return 1
fi
}
# Change to project root
cd "$PROJECT_ROOT"
echo -e "${YELLOW}📋 Pre-flight checks...${NC}"
# Check if required directories exist
if [ ! -d "docs" ]; then
echo -e "${YELLOW}⚠️ Documentation directory not found${NC}"
fi
# Start test execution
echo -e "\n${GREEN}🚀 Starting test execution...${NC}"
START_TIME=$(date +%s)
# 1. Documentation Validation
if should_run_test "docs"; then
run_test_suite "Documentation" \
"$SCRIPT_DIR/validate-docs.sh" \
"Validates all documentation for broken links and code examples" || true
fi
# 4. Security Scanning
if should_run_test "security"; then
run_test_suite "Security" \
"$SCRIPT_DIR/security-scan.sh" \
"Scans for security vulnerabilities and misconfigurations" || true
fi
# 5. Monitoring Integration Tests (requires running service)
if should_run_test "monitoring"; then
echo -e "\n${YELLOW}📊 Monitoring tests require a running service${NC}"
echo "Checking if service is available locally..."
if curl -s -o /dev/null http://localhost:3000/health 2>/dev/null; then
run_test_suite "Monitoring Integration" \
"$SCRIPT_DIR/test-monitoring-integration.sh" \
"Tests monitoring endpoints and metrics collection" || true
else
echo -e "${YELLOW}⚠️ Service not running locally, skipping monitoring tests${NC}"
echo " To run: npm run dev"
fi
fi
# 7. Load Testing (optional - requires deployed service)
if should_run_test "load"; then
echo -e "\n${YELLOW}⚡ Load tests require a deployed service with HPA${NC}"
echo -n "Do you want to run load tests? (y/N): "
read -r response
if [[ "$response" =~ ^[Yy]$ ]]; then
echo -n "Enter namespace (default: sonarqube-mcp): "
read -r namespace
export NAMESPACE="${namespace:-sonarqube-mcp}"
run_test_suite "Load Testing" \
"$SCRIPT_DIR/load-test.sh" \
"Tests auto-scaling behavior under load" || true
else
echo "Skipping load tests"
fi
fi
# Calculate execution time
END_TIME=$(date +%s)
DURATION=$((END_TIME - START_TIME))
# Generate summary report
echo -e "\n${MAGENTA}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "${MAGENTA}📊 Test Execution Summary${NC}"
echo -e "${MAGENTA}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
echo -e "\nTest Results:"
for test_name in "${!TEST_RESULTS[@]}"; do
result="${TEST_RESULTS[$test_name]}"
case "$result" in
PASSED)
echo -e " ${GREEN}✅ $test_name${NC}"
;;
FAILED)
echo -e " ${RED}❌ $test_name${NC}"
;;
NOT_FOUND)
echo -e " ${YELLOW}⚠️ $test_name (script not found)${NC}"
;;
esac
done
echo -e "\nStatistics:"
echo " Total test suites: $TOTAL_TESTS"
echo -e " ${GREEN}Passed: $PASSED_TESTS${NC}"
echo -e " ${RED}Failed: $FAILED_TESTS${NC}"
echo " Execution time: ${DURATION}s"
# Provide recommendations
if [ $FAILED_TESTS -gt 0 ]; then
echo -e "\n${YELLOW}📋 Recommendations:${NC}"
if [[ "${TEST_RESULTS[Documentation]}" == "FAILED" ]]; then
echo " - Fix documentation issues (broken links, invalid code examples)"
fi
if [[ "${TEST_RESULTS[Security]}" == "FAILED" ]]; then
echo " - Address security vulnerabilities and misconfigurations"
fi
echo -e "\n${RED}⚠️ Please fix the failing tests before deployment${NC}"
exit 1
else
echo -e "\n${GREEN}🎉 All tests passed successfully!${NC}"
echo -e "\nThe deployment artifacts are ready for:"
echo " - Production deployment"
echo " - CI/CD pipeline integration"
echo " - Security review"
echo " - Performance testing"
echo -e "\n${YELLOW}Next steps:${NC}"
echo " 1. Review the changes with: git diff"
echo " 2. Commit the test scripts"
echo " 3. Run tests in CI/CD pipeline"
echo " 4. Deploy to staging environment"
fi
# Cleanup reminder
if [ "$SKIP_CLEANUP" = false ]; then
echo -e "\n${YELLOW}💡 Tip:${NC} Use --skip-cleanup to preserve test resources for debugging"
fi
exit 0
```
--------------------------------------------------------------------------------
/changes.md:
--------------------------------------------------------------------------------
```markdown
# Changes for Issue #183: Documentation & Deployment Artifacts
## Overview
This branch implements comprehensive documentation and deployment artifacts for the SonarQube MCP Server, addressing all requirements from issue #183. The changes transform the project into an enterprise-ready solution with production-grade deployment options, extensive documentation, and cloud-native support.
## Documentation Changes
### 1. Architecture Documentation (`docs/architecture.md`)
- **System Architecture**: Complete overview with Mermaid diagrams showing component relationships
- **Core Components**: Detailed explanation of Transport Layer, Authentication, Domain Services, Tool Handlers, and Monitoring
- **Data Flow**: Sequence diagrams illustrating request processing pipeline
- **Security Architecture**: Defense-in-depth approach with multiple security layers
- **Technology Stack**: Comprehensive list of technologies and their purposes
- **Architecture Decision Records**: Links to all relevant ADRs
### 2. Enterprise Deployment Guide (`docs/deployment.md`)
- **Docker Deployment**: Production configurations with docker-compose
- **Kubernetes Deployment**: Full manifest set with best practices
- **Helm Chart Usage**: Comprehensive values.yaml with all options
- **Cloud-Specific Guides**: AWS EKS, Azure AKS, and Google GKE configurations
- **Monitoring Setup**: Prometheus and Grafana integration
- **Backup & Recovery**: Audit log backup strategies
- **Performance Tuning**: Node.js and connection pool optimization
### 3. Security Configuration Guide (`docs/security.md`)
- **Authentication Methods**: Token, Basic, and Passcode authentication for SonarQube
- **Service Account Management**: Multi-tenant configuration with health monitoring
- **Permission System**: Fine-grained access control with regex project filtering
- **Data Protection**: PII redaction, encryption at rest, and response filtering
- **Compliance**: SOC 2, GDPR, and ISO 27001 control implementations
- **Incident Response**: Security monitoring and response procedures
- **Security Checklist**: 15-point verification list
### 5. Identity Provider Integration Guide (`docs/idp-integration.md`)
- **Azure AD Integration**: Step-by-step setup with app registration
- **Okta Integration**: Complete configuration with authorization server
- **Auth0 Integration**: Application setup and rule configuration
- **Keycloak Integration**: Realm and client configuration
- **Group Mapping**: Provider-specific claim transformations
- **Multi-tenant Support**: Handling multiple Azure AD tenants
- **Troubleshooting**: Common issues and debugging steps
### 6. Troubleshooting Guide (`docs/troubleshooting.md`)
- **Common Issues**: 10+ scenarios with detailed solutions
- **Diagnostic Tools**: Health checks, debug logging, and metrics
- **Error Reference**: Comprehensive error codes and meanings
- **Performance Issues**: Memory, CPU, and network troubleshooting
- **Support Resources**: Links and contact information
### 7. Performance Tuning Guide (`docs/performance.md`)
- **Resource Optimization**: CPU, memory, and connection settings
- **Caching Strategies**: Token, permission, and JWKS caching
- **Scaling Guidelines**: Horizontal and vertical scaling approaches
- **Monitoring Metrics**: Key performance indicators
- **Benchmarking**: Load testing recommendations
## Infrastructure Changes
### 1. Enhanced Dockerfile
```dockerfile
# Added health check support
RUN apk add --no-cache curl
# Security improvements
RUN addgroup -g 1001 nodejs && \
adduser -S -u 1001 -G nodejs nodejs
# Health check configuration
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD curl -f http://localhost:3000/health || exit 1
```
### 2. Kubernetes Manifests (`k8s/`)
- **base/deployment.yaml**: Production-ready with 3 replicas, resource limits, security context
- **base/service.yaml**: ClusterIP service for internal access
- **base/ingress.yaml**: NGINX ingress with TLS and annotations
- **base/configmap.yaml**: Non-sensitive configuration
- **base/secret.yaml**: Template for sensitive data
- **base/hpa.yaml**: Auto-scaling based on CPU/memory
- **base/pdb.yaml**: Pod disruption budget for high availability
- **base/networkpolicy.yaml**: Network segmentation
- **overlays/**: Environment-specific configurations (dev, staging, production)
- **base/kustomization.yaml**: Kustomize configuration
### 3. Helm Chart (`helm/sonarqube-mcp/`)
- **Chart.yaml**: Chart metadata with version 0.1.0
- **values.yaml**: Comprehensive configuration options:
- Image configuration
- Service types and ports
- Ingress with TLS
- Resource requests/limits
- Auto-scaling settings
- Persistence options
- Security contexts
- Monitoring integration
- **templates/**: All Kubernetes resources as templates
- **templates/NOTES.txt**: Post-install instructions
### 4. Terraform Modules (`terraform/`)
- **aws/main.tf**: AWS-specific resources
- **aws/variables.tf**: Input variables for customization
- **aws/outputs.tf**: Exported values
- **aws/iam.tf**: IAM roles and policies for IRSA
- **aws/cloudwatch.tf**: CloudWatch log group and metrics
- **modules/base/**: Reusable base configuration
## Project Updates
### 1. README.md Enhancement
- Added comprehensive documentation section with links to all guides
- Updated version references to v1.9.0
- Maintained existing content while adding documentation references
### 2. .gitignore Updates
- Added Terraform state files and directories
- Added Helm package artifacts
- Added Kubernetes generated files
- Added various backup and temporary files
## Key Features Implemented
### 1. Production-Ready Docker
- Multi-stage builds for smaller images
- Non-root user execution
- Health check endpoints
- Security hardening
### 2. Enterprise Kubernetes Deployment
- High availability with 3+ replicas
- Auto-scaling based on metrics
- Pod disruption budgets
- Network policies for security
- RBAC for service accounts
### 3. Flexible Helm Chart
- Configurable for any environment
- Built-in security defaults
- Monitoring integration
- Persistence support
### 4. Cloud-Native Terraform
- AWS-focused with plans for Azure/GCP
- IAM integration
- CloudWatch monitoring
- Infrastructure as Code
### 5. Comprehensive Security
- Multiple authentication methods
- Fine-grained authorization
- Audit logging
- Compliance support
- Incident response procedures
## Testing & Validation
- All YAML files are syntactically valid
- Kubernetes manifests follow best practices
- Helm chart can be rendered without errors
- Documentation is comprehensive and accurate
- All acceptance criteria from issue #183 are met
## Migration Guide
For existing users:
1. Review new security configuration options
2. Update deployment method if desired
3. Configure monitoring and observability
4. Implement recommended security practices
5. Set up backup procedures
## Next Steps
1. Deploy to staging environment for validation
2. Gather feedback from operations team
3. Create automated deployment pipelines
4. Develop additional cloud provider modules
5. Create video tutorials for complex setups
```
--------------------------------------------------------------------------------
/src/__tests__/json-array-transform.test.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from 'zod';
import { parseJsonStringArray } from '../utils/transforms.js';
import { issuesToolSchema } from '../schemas/issues.js';
import {
resolutionSchema,
typeSchema,
cleanCodeAttributeCategoriesSchema,
impactSeveritiesSchema,
impactSoftwareQualitiesSchema,
} from '../schemas/common.js';
describe('JSON Array Transform', () => {
describe('parseJsonStringArray', () => {
test('handles arrays correctly', () => {
const result = parseJsonStringArray(['item1', 'item2']);
expect(result).toEqual(['item1', 'item2']);
});
test('parses JSON string arrays', () => {
const result = parseJsonStringArray('["item1", "item2"]');
expect(result).toEqual(['item1', 'item2']);
});
test('handles single string as array', () => {
const result = parseJsonStringArray('single-item');
expect(result).toEqual(['single-item']);
});
test('handles null values', () => {
const result = parseJsonStringArray(null);
expect(result).toBeNull();
});
test('handles undefined values', () => {
const result = parseJsonStringArray(undefined);
expect(result).toBeUndefined();
});
test('handles invalid JSON as single item array', () => {
const result = parseJsonStringArray('{invalid json');
expect(result).toEqual(['{invalid json']);
});
});
describe('issues schema with JSON arrays', () => {
const schema = z.object(issuesToolSchema);
test('accepts facets as array', () => {
const result = schema.parse({
facets: ['severities', 'statuses', 'types', 'rules', 'files'],
});
expect(result.facets).toEqual(['severities', 'statuses', 'types', 'rules', 'files']);
});
test('accepts facets as JSON string', () => {
const result = schema.parse({
facets: '["severities", "statuses", "types", "rules", "files"]',
});
expect(result.facets).toEqual(['severities', 'statuses', 'types', 'rules', 'files']);
});
test('accepts multiple array fields as JSON strings', () => {
const result = schema.parse({
projects: '["project1", "project2"]',
facets: '["severities", "statuses"]',
tags: '["security", "performance"]',
assignees: '["user1", "user2"]',
rules: '["rule1", "rule2"]',
severities: '["CRITICAL", "MAJOR"]',
statuses: '["OPEN", "CONFIRMED"]',
});
expect(result.projects).toEqual(['project1', 'project2']);
expect(result.facets).toEqual(['severities', 'statuses']);
expect(result.tags).toEqual(['security', 'performance']);
expect(result.assignees).toEqual(['user1', 'user2']);
expect(result.rules).toEqual(['rule1', 'rule2']);
expect(result.severities).toEqual(['CRITICAL', 'MAJOR']);
expect(result.statuses).toEqual(['OPEN', 'CONFIRMED']);
});
test('filters invalid enum values', () => {
const result = schema.parse({
severities: '["CRITICAL", "INVALID", "MAJOR"]',
statuses: '["OPEN", "INVALID_STATUS", "CONFIRMED"]',
scopes: '["MAIN", "INVALID_SCOPE", "TEST"]',
});
expect(result.severities).toEqual(['CRITICAL', 'MAJOR']);
expect(result.statuses).toEqual(['OPEN', 'CONFIRMED']);
expect(result.scopes).toEqual(['MAIN', 'TEST']);
});
test('handles complex MCP client scenario', () => {
// This simulates what an MCP client might send
const input = {
project_key: 'sonarqube-mcp-server',
page_size: '50',
facets: '["severities", "statuses", "types", "rules", "files"]',
};
const result = schema.parse(input);
expect(result.project_key).toBe('sonarqube-mcp-server');
expect(result.page_size).toBe(50); // Transformed by stringToNumberTransform
expect(result.facets).toEqual(['severities', 'statuses', 'types', 'rules', 'files']);
});
test('handles resolutions with JSON strings and filters invalid values', () => {
const result = schema.parse({
resolutions: '["FALSE-POSITIVE", "INVALID", "WONTFIX"]',
});
expect(result.resolutions).toEqual(['FALSE-POSITIVE', 'WONTFIX']);
});
test('handles types with JSON strings and filters invalid values', () => {
const result = schema.parse({
types: '["BUG", "INVALID_TYPE", "VULNERABILITY"]',
});
expect(result.types).toEqual(['BUG', 'VULNERABILITY']);
});
test('handles clean code attributes with JSON strings', () => {
const result = schema.parse({
clean_code_attribute_categories: '["ADAPTABLE", "INVALID", "CONSISTENT"]',
});
expect(result.clean_code_attribute_categories).toEqual(['ADAPTABLE', 'CONSISTENT']);
});
test('handles impact severities with JSON strings', () => {
const result = schema.parse({
impact_severities: '["HIGH", "INVALID", "LOW"]',
});
expect(result.impact_severities).toEqual(['HIGH', 'LOW']);
});
test('handles impact software qualities with JSON strings', () => {
const result = schema.parse({
impact_software_qualities: '["MAINTAINABILITY", "INVALID", "SECURITY"]',
});
expect(result.impact_software_qualities).toEqual(['MAINTAINABILITY', 'SECURITY']);
});
});
describe('common schemas with JSON arrays', () => {
test('resolutionSchema accepts JSON strings and filters invalid values', () => {
const schema = z.object({ resolution: resolutionSchema });
const result = schema.parse({
resolution: '["FALSE-POSITIVE", "INVALID_RESOLUTION", "FIXED"]',
});
expect(result.resolution).toEqual(['FALSE-POSITIVE', 'FIXED']);
});
test('resolutionSchema accepts arrays directly', () => {
const schema = z.object({ resolution: resolutionSchema });
const result = schema.parse({
resolution: ['WONTFIX', 'REMOVED'],
});
expect(result.resolution).toEqual(['WONTFIX', 'REMOVED']);
});
test('typeSchema accepts JSON strings and filters invalid values', () => {
const schema = z.object({ type: typeSchema });
const result = schema.parse({
type: '["CODE_SMELL", "INVALID_TYPE", "BUG"]',
});
expect(result.type).toEqual(['CODE_SMELL', 'BUG']);
});
test('cleanCodeAttributeCategoriesSchema accepts JSON strings', () => {
const schema = z.object({ categories: cleanCodeAttributeCategoriesSchema });
const result = schema.parse({
categories: '["INTENTIONAL", "INVALID_CATEGORY", "RESPONSIBLE"]',
});
expect(result.categories).toEqual(['INTENTIONAL', 'RESPONSIBLE']);
});
test('impactSeveritiesSchema accepts JSON strings', () => {
const schema = z.object({ severities: impactSeveritiesSchema });
const result = schema.parse({
severities: '["HIGH", "INVALID_SEVERITY", "MEDIUM"]',
});
expect(result.severities).toEqual(['HIGH', 'MEDIUM']);
});
test('impactSoftwareQualitiesSchema accepts JSON strings', () => {
const schema = z.object({ qualities: impactSoftwareQualitiesSchema });
const result = schema.parse({
qualities: '["RELIABILITY", "INVALID_QUALITY", "SECURITY"]',
});
expect(result.qualities).toEqual(['RELIABILITY', 'SECURITY']);
});
});
});
```