This is page 5 of 11. Use http://codebase.md/sapientpants/sonarqube-mcp-server?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .adr-dir
├── .changeset
│ ├── config.json
│ └── README.md
├── .claude
│ ├── commands
│ │ ├── analyze-and-fix-github-issue.md
│ │ ├── fix-sonarqube-issues.md
│ │ ├── implement-github-issue.md
│ │ ├── release.md
│ │ ├── spec-feature.md
│ │ └── update-dependencies.md
│ ├── hooks
│ │ └── block-git-no-verify.ts
│ └── settings.json
├── .dockerignore
├── .github
│ ├── actionlint.yaml
│ ├── changeset.yml
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ └── feature_request.md
│ ├── pull_request_template.md
│ ├── scripts
│ │ ├── determine-artifact.sh
│ │ └── version-and-release.js
│ ├── workflows
│ │ ├── codeql.yml
│ │ ├── main.yml
│ │ ├── pr.yml
│ │ ├── publish.yml
│ │ ├── reusable-docker.yml
│ │ ├── reusable-security.yml
│ │ └── reusable-validate.yml
│ └── WORKFLOWS.md
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .markdownlint.yaml
├── .markdownlintignore
├── .npmrc
├── .prettierignore
├── .prettierrc
├── .trivyignore
├── .yaml-lint.yml
├── .yamllintignore
├── CHANGELOG.md
├── changes.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── COMPATIBILITY.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│ ├── architecture
│ │ └── decisions
│ │ ├── 0001-record-architecture-decisions.md
│ │ ├── 0002-use-node-js-with-typescript.md
│ │ ├── 0003-adopt-model-context-protocol-for-sonarqube-integration.md
│ │ ├── 0004-use-sonarqube-web-api-client-for-all-sonarqube-interactions.md
│ │ ├── 0005-domain-driven-design-of-sonarqube-modules.md
│ │ ├── 0006-expose-sonarqube-features-as-mcp-tools.md
│ │ ├── 0007-support-multiple-authentication-methods-for-sonarqube.md
│ │ ├── 0008-use-environment-variables-for-configuration.md
│ │ ├── 0009-file-based-logging-to-avoid-stdio-conflicts.md
│ │ ├── 0010-use-stdio-transport-for-mcp-communication.md
│ │ ├── 0011-docker-containerization-for-deployment.md
│ │ ├── 0012-add-elicitation-support-for-interactive-user-input.md
│ │ ├── 0014-current-security-model-and-future-oauth2-considerations.md
│ │ ├── 0015-transport-architecture-refactoring.md
│ │ ├── 0016-http-transport-with-oauth-2-0-metadata-endpoints.md
│ │ ├── 0017-comprehensive-audit-logging-system.md
│ │ ├── 0018-add-comprehensive-monitoring-and-observability.md
│ │ ├── 0019-simplify-to-stdio-only-transport-for-mcp-gateway-deployment.md
│ │ ├── 0020-testing-framework-and-strategy-vitest-with-property-based-testing.md
│ │ ├── 0021-code-quality-toolchain-eslint-prettier-strict-typescript.md
│ │ ├── 0022-package-manager-choice-pnpm.md
│ │ ├── 0023-release-management-with-changesets.md
│ │ ├── 0024-ci-cd-platform-github-actions.md
│ │ ├── 0025-container-and-security-scanning-strategy.md
│ │ ├── 0026-circuit-breaker-pattern-with-opossum.md
│ │ ├── 0027-docker-image-publishing-strategy-ghcr-to-docker-hub.md
│ │ └── 0028-session-based-http-transport-with-server-sent-events.md
│ ├── architecture.md
│ ├── security.md
│ └── troubleshooting.md
├── eslint.config.js
├── examples
│ └── http-client.ts
├── jest.config.js
├── LICENSE
├── LICENSES.md
├── osv-scanner.toml
├── package.json
├── pnpm-lock.yaml
├── README.md
├── scripts
│ ├── actionlint.sh
│ ├── ci-local.sh
│ ├── load-test.sh
│ ├── README.md
│ ├── run-all-tests.sh
│ ├── scan-container.sh
│ ├── security-scan.sh
│ ├── setup.sh
│ ├── test-monitoring-integration.sh
│ └── validate-docs.sh
├── SECURITY.md
├── sonar-project.properties
├── src
│ ├── __tests__
│ │ ├── additional-coverage.test.ts
│ │ ├── advanced-index.test.ts
│ │ ├── assign-issue.test.ts
│ │ ├── auth-methods.test.ts
│ │ ├── boolean-string-transform.test.ts
│ │ ├── components.test.ts
│ │ ├── config
│ │ │ └── service-accounts.test.ts
│ │ ├── dependency-injection.test.ts
│ │ ├── direct-handlers.test.ts
│ │ ├── direct-lambdas.test.ts
│ │ ├── direct-schema-validation.test.ts
│ │ ├── domains
│ │ │ ├── components-domain-full.test.ts
│ │ │ ├── components-domain.test.ts
│ │ │ ├── hotspots-domain.test.ts
│ │ │ └── source-code-domain.test.ts
│ │ ├── environment-validation.test.ts
│ │ ├── error-handler.test.ts
│ │ ├── error-handling.test.ts
│ │ ├── errors.test.ts
│ │ ├── function-tests.test.ts
│ │ ├── handlers
│ │ │ ├── components-handler-integration.test.ts
│ │ │ └── projects-authorization.test.ts
│ │ ├── handlers.test.ts
│ │ ├── handlers.test.ts.skip
│ │ ├── index.test.ts
│ │ ├── issue-resolution-elicitation.test.ts
│ │ ├── issue-resolution.test.ts
│ │ ├── issue-transitions.test.ts
│ │ ├── issues-enhanced-search.test.ts
│ │ ├── issues-new-parameters.test.ts
│ │ ├── json-array-transform.test.ts
│ │ ├── lambda-functions.test.ts
│ │ ├── lambda-handlers.test.ts.skip
│ │ ├── logger.test.ts
│ │ ├── mapping-functions.test.ts
│ │ ├── mocked-environment.test.ts
│ │ ├── null-to-undefined.test.ts
│ │ ├── parameter-transformations-advanced.test.ts
│ │ ├── parameter-transformations.test.ts
│ │ ├── protocol-version.test.ts
│ │ ├── pull-request-transform.test.ts
│ │ ├── quality-gates.test.ts
│ │ ├── schema-parameter-transforms.test.ts
│ │ ├── schema-transformation-mocks.test.ts
│ │ ├── schema-transforms.test.ts
│ │ ├── schema-validators.test.ts
│ │ ├── schemas
│ │ │ ├── components-schema.test.ts
│ │ │ ├── hotspots-tools-schema.test.ts
│ │ │ └── issues-schema.test.ts
│ │ ├── sonarqube-elicitation.test.ts
│ │ ├── sonarqube.test.ts
│ │ ├── source-code.test.ts
│ │ ├── standalone-handlers.test.ts
│ │ ├── string-to-number-transform.test.ts
│ │ ├── tool-handler-lambdas.test.ts
│ │ ├── tool-handlers.test.ts
│ │ ├── tool-registration-schema.test.ts
│ │ ├── tool-registration-transforms.test.ts
│ │ ├── transformation-util.test.ts
│ │ ├── transports
│ │ │ ├── base.test.ts
│ │ │ ├── factory.test.ts
│ │ │ ├── http.test.ts
│ │ │ ├── session-manager.test.ts
│ │ │ └── stdio.test.ts
│ │ ├── utils
│ │ │ ├── retry.test.ts
│ │ │ └── transforms.test.ts
│ │ ├── zod-boolean-transform.test.ts
│ │ ├── zod-schema-transforms.test.ts
│ │ └── zod-transforms.test.ts
│ ├── config
│ │ ├── service-accounts.ts
│ │ └── versions.ts
│ ├── domains
│ │ ├── base.ts
│ │ ├── components.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ ├── errors.ts
│ ├── handlers
│ │ ├── components.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ ├── index.ts
│ ├── monitoring
│ │ ├── __tests__
│ │ │ └── circuit-breaker.test.ts
│ │ ├── circuit-breaker.ts
│ │ ├── health.ts
│ │ └── metrics.ts
│ ├── schemas
│ │ ├── common.ts
│ │ ├── components.ts
│ │ ├── hotspots-tools.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ ├── sonarqube.ts
│ ├── transports
│ │ ├── base.ts
│ │ ├── factory.ts
│ │ ├── http.ts
│ │ ├── index.ts
│ │ ├── session-manager.ts
│ │ └── stdio.ts
│ ├── types
│ │ ├── common.ts
│ │ ├── components.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ └── utils
│ ├── __tests__
│ │ ├── elicitation.test.ts
│ │ ├── pattern-matcher.test.ts
│ │ └── structured-response.test.ts
│ ├── client-factory.ts
│ ├── elicitation.ts
│ ├── error-handler.ts
│ ├── logger.ts
│ ├── parameter-mappers.ts
│ ├── pattern-matcher.ts
│ ├── retry.ts
│ ├── structured-response.ts
│ └── transforms.ts
├── test-http-transport.sh
├── tmp
│ └── .gitkeep
├── tsconfig.build.json
├── tsconfig.json
├── vitest.config.d.ts
├── vitest.config.js
├── vitest.config.js.map
└── vitest.config.ts
```
# Files
--------------------------------------------------------------------------------
/changes.md:
--------------------------------------------------------------------------------
```markdown
1 | # Changes for Issue #183: Documentation & Deployment Artifacts
2 |
3 | ## Overview
4 |
5 | This branch implements comprehensive documentation and deployment artifacts for the SonarQube MCP Server, addressing all requirements from issue #183. The changes transform the project into an enterprise-ready solution with production-grade deployment options, extensive documentation, and cloud-native support.
6 |
7 | ## Documentation Changes
8 |
9 | ### 1. Architecture Documentation (`docs/architecture.md`)
10 |
11 | - **System Architecture**: Complete overview with Mermaid diagrams showing component relationships
12 | - **Core Components**: Detailed explanation of Transport Layer, Authentication, Domain Services, Tool Handlers, and Monitoring
13 | - **Data Flow**: Sequence diagrams illustrating request processing pipeline
14 | - **Security Architecture**: Defense-in-depth approach with multiple security layers
15 | - **Technology Stack**: Comprehensive list of technologies and their purposes
16 | - **Architecture Decision Records**: Links to all relevant ADRs
17 |
18 | ### 2. Enterprise Deployment Guide (`docs/deployment.md`)
19 |
20 | - **Docker Deployment**: Production configurations with docker-compose
21 | - **Kubernetes Deployment**: Full manifest set with best practices
22 | - **Helm Chart Usage**: Comprehensive values.yaml with all options
23 | - **Cloud-Specific Guides**: AWS EKS, Azure AKS, and Google GKE configurations
24 | - **Monitoring Setup**: Prometheus and Grafana integration
25 | - **Backup & Recovery**: Audit log backup strategies
26 | - **Performance Tuning**: Node.js and connection pool optimization
27 |
28 | ### 3. Security Configuration Guide (`docs/security.md`)
29 |
30 | - **Authentication Methods**: Token, Basic, and Passcode authentication for SonarQube
31 | - **Service Account Management**: Multi-tenant configuration with health monitoring
32 | - **Permission System**: Fine-grained access control with regex project filtering
33 | - **Data Protection**: PII redaction, encryption at rest, and response filtering
34 | - **Compliance**: SOC 2, GDPR, and ISO 27001 control implementations
35 | - **Incident Response**: Security monitoring and response procedures
36 | - **Security Checklist**: 15-point verification list
37 |
38 | ### 5. Identity Provider Integration Guide (`docs/idp-integration.md`)
39 |
40 | - **Azure AD Integration**: Step-by-step setup with app registration
41 | - **Okta Integration**: Complete configuration with authorization server
42 | - **Auth0 Integration**: Application setup and rule configuration
43 | - **Keycloak Integration**: Realm and client configuration
44 | - **Group Mapping**: Provider-specific claim transformations
45 | - **Multi-tenant Support**: Handling multiple Azure AD tenants
46 | - **Troubleshooting**: Common issues and debugging steps
47 |
48 | ### 6. Troubleshooting Guide (`docs/troubleshooting.md`)
49 |
50 | - **Common Issues**: 10+ scenarios with detailed solutions
51 | - **Diagnostic Tools**: Health checks, debug logging, and metrics
52 | - **Error Reference**: Comprehensive error codes and meanings
53 | - **Performance Issues**: Memory, CPU, and network troubleshooting
54 | - **Support Resources**: Links and contact information
55 |
56 | ### 7. Performance Tuning Guide (`docs/performance.md`)
57 |
58 | - **Resource Optimization**: CPU, memory, and connection settings
59 | - **Caching Strategies**: Token, permission, and JWKS caching
60 | - **Scaling Guidelines**: Horizontal and vertical scaling approaches
61 | - **Monitoring Metrics**: Key performance indicators
62 | - **Benchmarking**: Load testing recommendations
63 |
64 | ## Infrastructure Changes
65 |
66 | ### 1. Enhanced Dockerfile
67 |
68 | ```dockerfile
69 | # Added health check support
70 | RUN apk add --no-cache curl
71 |
72 | # Security improvements
73 | RUN addgroup -g 1001 nodejs && \
74 | adduser -S -u 1001 -G nodejs nodejs
75 |
76 | # Health check configuration
77 | HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
78 | CMD curl -f http://localhost:3000/health || exit 1
79 | ```
80 |
81 | ### 2. Kubernetes Manifests (`k8s/`)
82 |
83 | - **base/deployment.yaml**: Production-ready with 3 replicas, resource limits, security context
84 | - **base/service.yaml**: ClusterIP service for internal access
85 | - **base/ingress.yaml**: NGINX ingress with TLS and annotations
86 | - **base/configmap.yaml**: Non-sensitive configuration
87 | - **base/secret.yaml**: Template for sensitive data
88 | - **base/hpa.yaml**: Auto-scaling based on CPU/memory
89 | - **base/pdb.yaml**: Pod disruption budget for high availability
90 | - **base/networkpolicy.yaml**: Network segmentation
91 | - **overlays/**: Environment-specific configurations (dev, staging, production)
92 | - **base/kustomization.yaml**: Kustomize configuration
93 |
94 | ### 3. Helm Chart (`helm/sonarqube-mcp/`)
95 |
96 | - **Chart.yaml**: Chart metadata with version 0.1.0
97 | - **values.yaml**: Comprehensive configuration options:
98 | - Image configuration
99 | - Service types and ports
100 | - Ingress with TLS
101 | - Resource requests/limits
102 | - Auto-scaling settings
103 | - Persistence options
104 | - Security contexts
105 | - Monitoring integration
106 | - **templates/**: All Kubernetes resources as templates
107 | - **templates/NOTES.txt**: Post-install instructions
108 |
109 | ### 4. Terraform Modules (`terraform/`)
110 |
111 | - **aws/main.tf**: AWS-specific resources
112 | - **aws/variables.tf**: Input variables for customization
113 | - **aws/outputs.tf**: Exported values
114 | - **aws/iam.tf**: IAM roles and policies for IRSA
115 | - **aws/cloudwatch.tf**: CloudWatch log group and metrics
116 | - **modules/base/**: Reusable base configuration
117 |
118 | ## Project Updates
119 |
120 | ### 1. README.md Enhancement
121 |
122 | - Added comprehensive documentation section with links to all guides
123 | - Updated version references to v1.9.0
124 | - Maintained existing content while adding documentation references
125 |
126 | ### 2. .gitignore Updates
127 |
128 | - Added Terraform state files and directories
129 | - Added Helm package artifacts
130 | - Added Kubernetes generated files
131 | - Added various backup and temporary files
132 |
133 | ## Key Features Implemented
134 |
135 | ### 1. Production-Ready Docker
136 |
137 | - Multi-stage builds for smaller images
138 | - Non-root user execution
139 | - Health check endpoints
140 | - Security hardening
141 |
142 | ### 2. Enterprise Kubernetes Deployment
143 |
144 | - High availability with 3+ replicas
145 | - Auto-scaling based on metrics
146 | - Pod disruption budgets
147 | - Network policies for security
148 | - RBAC for service accounts
149 |
150 | ### 3. Flexible Helm Chart
151 |
152 | - Configurable for any environment
153 | - Built-in security defaults
154 | - Monitoring integration
155 | - Persistence support
156 |
157 | ### 4. Cloud-Native Terraform
158 |
159 | - AWS-focused with plans for Azure/GCP
160 | - IAM integration
161 | - CloudWatch monitoring
162 | - Infrastructure as Code
163 |
164 | ### 5. Comprehensive Security
165 |
166 | - Multiple authentication methods
167 | - Fine-grained authorization
168 | - Audit logging
169 | - Compliance support
170 | - Incident response procedures
171 |
172 | ## Testing & Validation
173 |
174 | - All YAML files are syntactically valid
175 | - Kubernetes manifests follow best practices
176 | - Helm chart can be rendered without errors
177 | - Documentation is comprehensive and accurate
178 | - All acceptance criteria from issue #183 are met
179 |
180 | ## Migration Guide
181 |
182 | For existing users:
183 |
184 | 1. Review new security configuration options
185 | 2. Update deployment method if desired
186 | 3. Configure monitoring and observability
187 | 4. Implement recommended security practices
188 | 5. Set up backup procedures
189 |
190 | ## Next Steps
191 |
192 | 1. Deploy to staging environment for validation
193 | 2. Gather feedback from operations team
194 | 3. Create automated deployment pipelines
195 | 4. Develop additional cloud provider modules
196 | 5. Create video tutorials for complex setups
197 |
```
--------------------------------------------------------------------------------
/src/__tests__/json-array-transform.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod';
2 | import { parseJsonStringArray } from '../utils/transforms.js';
3 | import { issuesToolSchema } from '../schemas/issues.js';
4 | import {
5 | resolutionSchema,
6 | typeSchema,
7 | cleanCodeAttributeCategoriesSchema,
8 | impactSeveritiesSchema,
9 | impactSoftwareQualitiesSchema,
10 | } from '../schemas/common.js';
11 |
12 | describe('JSON Array Transform', () => {
13 | describe('parseJsonStringArray', () => {
14 | test('handles arrays correctly', () => {
15 | const result = parseJsonStringArray(['item1', 'item2']);
16 | expect(result).toEqual(['item1', 'item2']);
17 | });
18 |
19 | test('parses JSON string arrays', () => {
20 | const result = parseJsonStringArray('["item1", "item2"]');
21 | expect(result).toEqual(['item1', 'item2']);
22 | });
23 |
24 | test('handles single string as array', () => {
25 | const result = parseJsonStringArray('single-item');
26 | expect(result).toEqual(['single-item']);
27 | });
28 |
29 | test('handles null values', () => {
30 | const result = parseJsonStringArray(null);
31 | expect(result).toBeNull();
32 | });
33 |
34 | test('handles undefined values', () => {
35 | const result = parseJsonStringArray(undefined);
36 | expect(result).toBeUndefined();
37 | });
38 |
39 | test('handles invalid JSON as single item array', () => {
40 | const result = parseJsonStringArray('{invalid json');
41 | expect(result).toEqual(['{invalid json']);
42 | });
43 | });
44 |
45 | describe('issues schema with JSON arrays', () => {
46 | const schema = z.object(issuesToolSchema);
47 |
48 | test('accepts facets as array', () => {
49 | const result = schema.parse({
50 | facets: ['severities', 'statuses', 'types', 'rules', 'files'],
51 | });
52 | expect(result.facets).toEqual(['severities', 'statuses', 'types', 'rules', 'files']);
53 | });
54 |
55 | test('accepts facets as JSON string', () => {
56 | const result = schema.parse({
57 | facets: '["severities", "statuses", "types", "rules", "files"]',
58 | });
59 | expect(result.facets).toEqual(['severities', 'statuses', 'types', 'rules', 'files']);
60 | });
61 |
62 | test('accepts multiple array fields as JSON strings', () => {
63 | const result = schema.parse({
64 | projects: '["project1", "project2"]',
65 | facets: '["severities", "statuses"]',
66 | tags: '["security", "performance"]',
67 | assignees: '["user1", "user2"]',
68 | rules: '["rule1", "rule2"]',
69 | severities: '["CRITICAL", "MAJOR"]',
70 | statuses: '["OPEN", "CONFIRMED"]',
71 | });
72 |
73 | expect(result.projects).toEqual(['project1', 'project2']);
74 | expect(result.facets).toEqual(['severities', 'statuses']);
75 | expect(result.tags).toEqual(['security', 'performance']);
76 | expect(result.assignees).toEqual(['user1', 'user2']);
77 | expect(result.rules).toEqual(['rule1', 'rule2']);
78 | expect(result.severities).toEqual(['CRITICAL', 'MAJOR']);
79 | expect(result.statuses).toEqual(['OPEN', 'CONFIRMED']);
80 | });
81 |
82 | test('filters invalid enum values', () => {
83 | const result = schema.parse({
84 | severities: '["CRITICAL", "INVALID", "MAJOR"]',
85 | statuses: '["OPEN", "INVALID_STATUS", "CONFIRMED"]',
86 | scopes: '["MAIN", "INVALID_SCOPE", "TEST"]',
87 | });
88 |
89 | expect(result.severities).toEqual(['CRITICAL', 'MAJOR']);
90 | expect(result.statuses).toEqual(['OPEN', 'CONFIRMED']);
91 | expect(result.scopes).toEqual(['MAIN', 'TEST']);
92 | });
93 |
94 | test('handles complex MCP client scenario', () => {
95 | // This simulates what an MCP client might send
96 | const input = {
97 | project_key: 'sonarqube-mcp-server',
98 | page_size: '50',
99 | facets: '["severities", "statuses", "types", "rules", "files"]',
100 | };
101 |
102 | const result = schema.parse(input);
103 |
104 | expect(result.project_key).toBe('sonarqube-mcp-server');
105 | expect(result.page_size).toBe(50); // Transformed by stringToNumberTransform
106 | expect(result.facets).toEqual(['severities', 'statuses', 'types', 'rules', 'files']);
107 | });
108 |
109 | test('handles resolutions with JSON strings and filters invalid values', () => {
110 | const result = schema.parse({
111 | resolutions: '["FALSE-POSITIVE", "INVALID", "WONTFIX"]',
112 | });
113 |
114 | expect(result.resolutions).toEqual(['FALSE-POSITIVE', 'WONTFIX']);
115 | });
116 |
117 | test('handles types with JSON strings and filters invalid values', () => {
118 | const result = schema.parse({
119 | types: '["BUG", "INVALID_TYPE", "VULNERABILITY"]',
120 | });
121 |
122 | expect(result.types).toEqual(['BUG', 'VULNERABILITY']);
123 | });
124 |
125 | test('handles clean code attributes with JSON strings', () => {
126 | const result = schema.parse({
127 | clean_code_attribute_categories: '["ADAPTABLE", "INVALID", "CONSISTENT"]',
128 | });
129 |
130 | expect(result.clean_code_attribute_categories).toEqual(['ADAPTABLE', 'CONSISTENT']);
131 | });
132 |
133 | test('handles impact severities with JSON strings', () => {
134 | const result = schema.parse({
135 | impact_severities: '["HIGH", "INVALID", "LOW"]',
136 | });
137 |
138 | expect(result.impact_severities).toEqual(['HIGH', 'LOW']);
139 | });
140 |
141 | test('handles impact software qualities with JSON strings', () => {
142 | const result = schema.parse({
143 | impact_software_qualities: '["MAINTAINABILITY", "INVALID", "SECURITY"]',
144 | });
145 |
146 | expect(result.impact_software_qualities).toEqual(['MAINTAINABILITY', 'SECURITY']);
147 | });
148 | });
149 |
150 | describe('common schemas with JSON arrays', () => {
151 | test('resolutionSchema accepts JSON strings and filters invalid values', () => {
152 | const schema = z.object({ resolution: resolutionSchema });
153 |
154 | const result = schema.parse({
155 | resolution: '["FALSE-POSITIVE", "INVALID_RESOLUTION", "FIXED"]',
156 | });
157 |
158 | expect(result.resolution).toEqual(['FALSE-POSITIVE', 'FIXED']);
159 | });
160 |
161 | test('resolutionSchema accepts arrays directly', () => {
162 | const schema = z.object({ resolution: resolutionSchema });
163 |
164 | const result = schema.parse({
165 | resolution: ['WONTFIX', 'REMOVED'],
166 | });
167 |
168 | expect(result.resolution).toEqual(['WONTFIX', 'REMOVED']);
169 | });
170 |
171 | test('typeSchema accepts JSON strings and filters invalid values', () => {
172 | const schema = z.object({ type: typeSchema });
173 |
174 | const result = schema.parse({
175 | type: '["CODE_SMELL", "INVALID_TYPE", "BUG"]',
176 | });
177 |
178 | expect(result.type).toEqual(['CODE_SMELL', 'BUG']);
179 | });
180 |
181 | test('cleanCodeAttributeCategoriesSchema accepts JSON strings', () => {
182 | const schema = z.object({ categories: cleanCodeAttributeCategoriesSchema });
183 |
184 | const result = schema.parse({
185 | categories: '["INTENTIONAL", "INVALID_CATEGORY", "RESPONSIBLE"]',
186 | });
187 |
188 | expect(result.categories).toEqual(['INTENTIONAL', 'RESPONSIBLE']);
189 | });
190 |
191 | test('impactSeveritiesSchema accepts JSON strings', () => {
192 | const schema = z.object({ severities: impactSeveritiesSchema });
193 |
194 | const result = schema.parse({
195 | severities: '["HIGH", "INVALID_SEVERITY", "MEDIUM"]',
196 | });
197 |
198 | expect(result.severities).toEqual(['HIGH', 'MEDIUM']);
199 | });
200 |
201 | test('impactSoftwareQualitiesSchema accepts JSON strings', () => {
202 | const schema = z.object({ qualities: impactSoftwareQualitiesSchema });
203 |
204 | const result = schema.parse({
205 | qualities: '["RELIABILITY", "INVALID_QUALITY", "SECURITY"]',
206 | });
207 |
208 | expect(result.qualities).toEqual(['RELIABILITY', 'SECURITY']);
209 | });
210 | });
211 | });
212 |
```
--------------------------------------------------------------------------------
/src/__tests__/handlers.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeEach, afterEach, beforeAll, vi } from 'vitest';
2 | // Mock environment variables
3 | process.env.SONARQUBE_TOKEN = 'test-token';
4 | process.env.SONARQUBE_URL = 'http://localhost:9000';
5 | process.env.SONARQUBE_ORGANIZATION = 'test-org';
6 |
7 | // Mock the sonarqube client
8 | vi.mock('../sonarqube.js', () => ({
9 | createSonarQubeClientFromEnv: vi.fn(() => ({
10 | listProjects: vi.fn().mockResolvedValue({
11 | projects: [{ key: 'test-project', name: 'Test Project' }],
12 | paging: { pageIndex: 1, pageSize: 10, total: 1 },
13 | }),
14 | getIssues: vi.fn().mockResolvedValue({
15 | issues: [{ key: 'test-issue', rule: 'test-rule', severity: 'MAJOR' }],
16 | paging: { pageIndex: 1, pageSize: 10, total: 1 },
17 | }),
18 | getMetrics: vi.fn().mockResolvedValue({
19 | metrics: [{ key: 'coverage', name: 'Coverage' }],
20 | paging: { pageIndex: 1, pageSize: 10, total: 1 },
21 | }),
22 | getHealth: vi.fn().mockResolvedValue({ health: 'GREEN', causes: [] }),
23 | getStatus: vi.fn().mockResolvedValue({ id: 'test-id', version: '10.3.0.82913', status: 'UP' }),
24 | ping: vi.fn().mockResolvedValue('pong'),
25 | getComponentMeasures: vi.fn().mockResolvedValue({
26 | component: { key: 'test-component', measures: [{ metric: 'coverage', value: '85.4' }] },
27 | metrics: [{ key: 'coverage', name: 'Coverage' }],
28 | }),
29 | getComponentsMeasures: vi.fn().mockResolvedValue({
30 | components: [{ key: 'test-component-1', measures: [{ metric: 'coverage', value: '85.4' }] }],
31 | metrics: [{ key: 'coverage', name: 'Coverage' }],
32 | paging: { pageIndex: 1, pageSize: 10, total: 1 },
33 | }),
34 | getMeasuresHistory: vi.fn().mockResolvedValue({
35 | measures: [{ metric: 'coverage', history: [{ date: '2023-01-01', value: '85.4' }] }],
36 | paging: { pageIndex: 1, pageSize: 10, total: 1 },
37 | }),
38 | })),
39 | setSonarQubeElicitationManager: vi.fn(),
40 | createSonarQubeClientFromEnvWithElicitation: vi.fn(() =>
41 | Promise.resolve({
42 | listProjects: vi.fn(),
43 | getIssues: vi.fn(),
44 | })
45 | ),
46 | }));
47 |
48 | // Save environment variables
49 | const originalEnv = process.env;
50 | let handleSonarQubeProjects: any;
51 | let handleSonarQubeGetIssues: any;
52 | let handleSonarQubeGetMetrics: any;
53 | let handleSonarQubeGetHealth: any;
54 | let handleSonarQubeGetStatus: any;
55 | let handleSonarQubePing: any;
56 | let handleSonarQubeComponentMeasures: any;
57 | let handleSonarQubeComponentsMeasures: any;
58 | let handleSonarQubeMeasuresHistory: any;
59 | // No need to mock axios anymore since we're using sonarqube-web-api-client
60 | describe('Handler Functions', () => {
61 | beforeAll(async () => {
62 | const module = await import('../index.js');
63 | handleSonarQubeProjects = module.handleSonarQubeProjects;
64 | handleSonarQubeGetIssues = module.handleSonarQubeGetIssues;
65 | handleSonarQubeGetMetrics = module.handleSonarQubeGetMetrics;
66 | handleSonarQubeGetHealth = module.handleSonarQubeGetHealth;
67 | handleSonarQubeGetStatus = module.handleSonarQubeGetStatus;
68 | handleSonarQubePing = module.handleSonarQubePing;
69 | handleSonarQubeComponentMeasures = module.handleSonarQubeComponentMeasures;
70 | handleSonarQubeComponentsMeasures = module.handleSonarQubeComponentsMeasures;
71 | handleSonarQubeMeasuresHistory = module.handleSonarQubeMeasuresHistory;
72 | });
73 | beforeEach(() => {
74 | vi.resetModules();
75 | process.env = { ...originalEnv };
76 | });
77 | afterEach(() => {
78 | process.env = originalEnv;
79 | vi.clearAllMocks();
80 | });
81 | describe('handleSonarQubeProjects', () => {
82 | it('should handle projects correctly', async () => {
83 | const result = await handleSonarQubeProjects({});
84 | const data = JSON.parse(result.content[0].text);
85 | expect(data.projects).toBeDefined();
86 | expect(data.projects).toHaveLength(1);
87 | expect(data.projects[0].key).toBe('test-project');
88 | expect(data.paging).toBeDefined();
89 | });
90 | it('should handle pagination parameters', async () => {
91 | const result = await handleSonarQubeProjects({ page: 2, page_size: 10 });
92 | const data = JSON.parse(result.content[0].text);
93 | expect(data.projects).toBeDefined();
94 | expect(data.paging).toBeDefined();
95 | });
96 | });
97 | describe('handleSonarQubeGetIssues', () => {
98 | it('should handle issues correctly', async () => {
99 | const result = await handleSonarQubeGetIssues({ projectKey: 'test-project' });
100 | const data = JSON.parse(result.content[0].text);
101 | expect(data.issues).toBeDefined();
102 | expect(data.issues).toHaveLength(1);
103 | expect(data.issues[0].severity).toBe('MAJOR');
104 | expect(data.paging).toBeDefined();
105 | });
106 | });
107 | describe('handleSonarQubeGetMetrics', () => {
108 | it('should handle metrics correctly', async () => {
109 | const result = await handleSonarQubeGetMetrics({});
110 | const data = JSON.parse(result.content[0].text);
111 | expect(data.metrics).toBeDefined();
112 | expect(data.metrics).toHaveLength(1);
113 | expect(data.metrics[0].key).toBe('coverage');
114 | expect(data.paging).toBeDefined();
115 | });
116 | });
117 | describe('System API Handlers', () => {
118 | it('should handle health correctly', async () => {
119 | const result = await handleSonarQubeGetHealth();
120 | const data = JSON.parse(result.content[0].text);
121 | expect(data.health).toBe('GREEN');
122 | expect(data.causes).toEqual([]);
123 | });
124 | it('should handle status correctly', async () => {
125 | const result = await handleSonarQubeGetStatus();
126 | const data = JSON.parse(result.content[0].text);
127 | expect(data.id).toBe('test-id');
128 | expect(data.version).toBe('10.3.0.82913');
129 | expect(data.status).toBe('UP');
130 | });
131 | it('should handle ping correctly', async () => {
132 | const result = await handleSonarQubePing();
133 | expect(result.content[0].text).toBe('pong');
134 | });
135 | });
136 | describe('Measures API Handlers', () => {
137 | it('should handle component measures correctly', async () => {
138 | const result = await handleSonarQubeComponentMeasures({
139 | component: 'test-component',
140 | metricKeys: ['coverage'],
141 | });
142 | const data = JSON.parse(result.content[0].text);
143 | expect(data.component).toBeDefined();
144 | expect(data.component.key).toBe('test-component');
145 | expect(data.component.measures).toHaveLength(1);
146 | expect(data.component.measures[0].metric).toBe('coverage');
147 | expect(data.metrics).toBeDefined();
148 | });
149 | it('should handle components measures correctly', async () => {
150 | const result = await handleSonarQubeComponentsMeasures({
151 | componentKeys: ['test-component-1'],
152 | metricKeys: ['coverage'],
153 | });
154 | const data = JSON.parse(result.content[0].text);
155 | expect(data.components).toBeDefined();
156 | expect(data.components).toHaveLength(1);
157 | expect(data.components[0].key).toBe('test-component-1');
158 | expect(data.metrics).toBeDefined();
159 | expect(data.paging).toBeDefined();
160 | });
161 | it('should handle measures history correctly', async () => {
162 | const result = await handleSonarQubeMeasuresHistory({
163 | component: 'test-component',
164 | metrics: ['coverage'],
165 | });
166 | const data = JSON.parse(result.content[0].text);
167 | expect(data.measures).toBeDefined();
168 | expect(data.measures).toHaveLength(1);
169 | expect(data.measures[0].metric).toBe('coverage');
170 | expect(data.measures[0].history).toHaveLength(1);
171 | expect(data.paging).toBeDefined();
172 | });
173 | });
174 | });
175 |
```
--------------------------------------------------------------------------------
/docs/architecture/decisions/0012-add-elicitation-support-for-interactive-user-input.md:
--------------------------------------------------------------------------------
```markdown
1 | # 12. Add Elicitation Support for Interactive User Input
2 |
3 | Date: 2025-06-19
4 |
5 | ## Status
6 |
7 | Proposed
8 |
9 | ## Context
10 |
11 | The SonarQube MCP server currently operates in a non-interactive mode, requiring all configuration and parameters to be provided upfront through environment variables or tool arguments. This approach has several limitations:
12 |
13 | 1. **Bulk Operations Risk**: When performing bulk operations (e.g., marking multiple issues as false positive), there's no confirmation step, risking accidental modifications.
14 |
15 | 2. **Configuration Complexity**: Users must configure authentication before starting the server, with no guidance when configuration is missing or incorrect.
16 |
17 | 3. **Limited Discoverability**: Users must know exact project keys, component paths, and valid parameter values without assistance.
18 |
19 | 4. **No Context Collection**: Operations like marking issues as false positive or won't fix lack the ability to collect explanatory comments interactively.
20 |
21 | MCP SDK v1.13.0 introduced the elicitation capability, which allows servers to request structured input from users through clients during operation. This feature enables:
22 |
23 | - Interactive data collection with JSON schema validation
24 | - Multi-attempt collection with confirmation
25 | - Type-safe input handling
26 | - User-controlled data sharing
27 |
28 | ## Decision
29 |
30 | We will add elicitation support to the SonarQube MCP server to enable interactive user input collection in specific scenarios where it provides clear value for safety, usability, or data quality.
31 |
32 | ## Implementation Plan
33 |
34 | ### 1. Elicitation Use Cases
35 |
36 | #### Critical Safety Confirmations
37 |
38 | - **Bulk False Positive**: Confirm before marking >5 issues as false positive
39 | - **Bulk Won't Fix**: Confirm before marking >5 issues as won't fix
40 | - **Bulk Assignment**: Confirm before assigning >10 issues to a user
41 |
42 | #### Configuration Assistance
43 |
44 | - **Missing Authentication**: Guide users through auth setup when not configured
45 | - **Invalid Credentials**: Help users correct authentication issues
46 | - **Organization Selection**: List available organizations for SonarCloud users
47 |
48 | #### Context Collection
49 |
50 | - **False Positive Justification**: Collect explanation when marking issues
51 | - **Won't Fix Reasoning**: Document why issues won't be addressed
52 | - **Resolution Comments**: Gather details about how issues were resolved
53 |
54 | #### Search Refinement
55 |
56 | - **Component Disambiguation**: When multiple components match a query
57 | - **Project Selection**: When multiple projects are available
58 | - **Filter Refinement**: When initial search returns too many results
59 |
60 | ### 2. Technical Implementation
61 |
62 | #### Schema Definitions
63 |
64 | ```typescript
65 | // Confirmation schema
66 | const confirmationSchema = {
67 | type: 'object',
68 | properties: {
69 | confirm: {
70 | type: 'boolean',
71 | description: 'Confirm the operation',
72 | },
73 | comment: {
74 | type: 'string',
75 | description: 'Optional comment',
76 | maxLength: 500,
77 | },
78 | },
79 | required: ['confirm'],
80 | };
81 |
82 | // Authentication schema
83 | const authSchema = {
84 | type: 'object',
85 | properties: {
86 | method: {
87 | type: 'string',
88 | enum: ['token', 'basic', 'passcode'],
89 | description: 'Authentication method',
90 | },
91 | token: {
92 | type: 'string',
93 | description: 'SonarQube token (for token auth)',
94 | },
95 | username: {
96 | type: 'string',
97 | description: 'Username (for basic auth)',
98 | },
99 | password: {
100 | type: 'string',
101 | description: 'Password (for basic auth)',
102 | },
103 | passcode: {
104 | type: 'string',
105 | description: 'System passcode',
106 | },
107 | },
108 | dependencies: {
109 | method: {
110 | oneOf: [
111 | {
112 | properties: { method: { const: 'token' } },
113 | required: ['token'],
114 | },
115 | {
116 | properties: { method: { const: 'basic' } },
117 | required: ['username', 'password'],
118 | },
119 | {
120 | properties: { method: { const: 'passcode' } },
121 | required: ['passcode'],
122 | },
123 | ],
124 | },
125 | },
126 | };
127 | ```
128 |
129 | #### Integration Points
130 |
131 | 1. **Bulk Operations**: Add threshold checks and confirmation elicitation
132 | 2. **Authentication**: Detect missing/invalid auth and offer setup assistance
133 | 3. **Tool Enhancement**: Update existing tools to use elicitation when beneficial
134 | 4. **Error Recovery**: Use elicitation to help users recover from common errors
135 |
136 | ### 3. Configuration Options
137 |
138 | Add server options to control elicitation behavior:
139 |
140 | ```typescript
141 | interface ElicitationOptions {
142 | enabled: boolean; // Master switch for elicitation
143 | bulkOperationThreshold: number; // Items before confirmation (default: 5)
144 | requireComments: boolean; // Require comments for resolutions
145 | interactiveSearch: boolean; // Enable search refinement
146 | }
147 | ```
148 |
149 | ### 4. Backward Compatibility
150 |
151 | - Elicitation will be **opt-in** by default
152 | - Environment variable `SONARQUBE_MCP_ELICITATION=true` to enable
153 | - All existing workflows continue to work without elicitation
154 | - Tools detect elicitation availability and adapt behavior
155 |
156 | ## Consequences
157 |
158 | ### Positive
159 |
160 | 1. **Improved Safety**: Prevents accidental bulk modifications
161 | 2. **Better UX**: Interactive guidance for complex operations
162 | 3. **Higher Data Quality**: Collects context and justifications
163 | 4. **Easier Onboarding**: Helps new users configure the server
164 | 5. **Reduced Errors**: Validates input before operations
165 | 6. **Enhanced Discoverability**: Users learn available options interactively
166 |
167 | ### Negative
168 |
169 | 1. **SDK Dependency**: Requires upgrade to MCP SDK v1.13.0+
170 | 2. **Increased Complexity**: More code paths to maintain
171 | 3. **Workflow Interruption**: May slow down automated workflows
172 | 4. **Testing Overhead**: Requires testing both interactive and non-interactive modes
173 | 5. **Client Compatibility**: Only works with clients that support elicitation
174 |
175 | ### Neutral
176 |
177 | 1. **Optional Feature**: Can be disabled for automation scenarios
178 | 2. **Gradual Adoption**: Can be implemented incrementally
179 | 3. **Learning Curve**: Users need to understand when elicitation occurs
180 |
181 | ## Migration Strategy
182 |
183 | ### Phase 1: Foundation (Week 1)
184 |
185 | - Upgrade MCP SDK to v1.13.0+
186 | - Add elicitation configuration system
187 | - Create base elicitation utilities
188 |
189 | ### Phase 2: Critical Safety (Week 2)
190 |
191 | - Implement bulk operation confirmations
192 | - Add tests for confirmation flows
193 | - Document safety features
194 |
195 | ### Phase 3: Enhanced UX (Week 3-4)
196 |
197 | - Add authentication setup assistance
198 | - Implement search refinement
199 | - Add context collection for resolutions
200 |
201 | ### Phase 4: Polish (Week 5)
202 |
203 | - Performance optimization
204 | - Extended documentation
205 | - User feedback incorporation
206 |
207 | ## Alternatives Considered
208 |
209 | 1. **Status Quo**: Continue with non-interactive operation
210 | - Pros: Simple, predictable
211 | - Cons: Risk of accidents, poor discoverability
212 |
213 | 2. **Custom Prompting**: Use MCP prompts instead of elicitation
214 | - Pros: Available in current SDK
215 | - Cons: Less structured, no validation, one-way communication
216 |
217 | 3. **External Configuration Tool**: Separate CLI for configuration
218 | - Pros: Separation of concerns
219 | - Cons: Additional tool to maintain, poor integration
220 |
221 | 4. **Client-Side Validation**: Rely on clients to validate
222 | - Pros: No server changes needed
223 | - Cons: Inconsistent experience, no server control
224 |
225 | ## References
226 |
227 | - [MCP Elicitation Specification](https://modelcontextprotocol.io/specification/2025-06-18/client/elicitation)
228 | - [MCP SDK v1.13.0 Release Notes](https://github.com/modelcontextprotocol/sdk/releases/tag/v1.13.0)
229 | - [SonarQube Web API Documentation](https://docs.sonarqube.org/latest/web-api/)
230 |
```
--------------------------------------------------------------------------------
/src/__tests__/schema-transforms.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect } from 'vitest';
2 | import { z } from 'zod';
3 | import { nullToUndefined } from '../index.js';
4 | describe('Schema Transformations', () => {
5 | describe('nullToUndefined function', () => {
6 | it('should convert null to undefined', () => {
7 | expect(nullToUndefined(null)).toBeUndefined();
8 | });
9 | it('should keep undefined as undefined', () => {
10 | expect(nullToUndefined(undefined)).toBeUndefined();
11 | });
12 | it('should pass through non-null values', () => {
13 | expect(nullToUndefined('test')).toBe('test');
14 | expect(nullToUndefined(42)).toBe(42);
15 | expect(nullToUndefined(true)).toBe(true);
16 | expect(nullToUndefined(false)).toBe(false);
17 | expect(nullToUndefined(0)).toBe(0);
18 | expect(nullToUndefined('')).toBe('');
19 | const obj = { test: 'value' };
20 | expect(nullToUndefined(obj)).toBe(obj);
21 | const arr = [1, 2, 3];
22 | expect(nullToUndefined(arr)).toBe(arr);
23 | });
24 | });
25 | describe('Common Zod Schemas', () => {
26 | it('should transform page parameters correctly', () => {
27 | const pageSchema = z
28 | .string()
29 | .optional()
30 | .transform((val: any) => (val ? parseInt(val, 10) || null : null));
31 | // Valid numbers
32 | expect(pageSchema.parse('1')).toBe(1);
33 | expect(pageSchema.parse('10')).toBe(10);
34 | expect(pageSchema.parse('100')).toBe(100);
35 | // Invalid or empty values
36 | expect(pageSchema.parse('abc')).toBe(null);
37 | expect(pageSchema.parse('')).toBe(null);
38 | expect(pageSchema.parse(undefined)).toBe(null);
39 | });
40 | it('should transform page_size parameters correctly', () => {
41 | const pageSizeSchema = z
42 | .string()
43 | .optional()
44 | .transform((val: any) => (val ? parseInt(val, 10) || null : null));
45 | // Valid numbers
46 | expect(pageSizeSchema.parse('10')).toBe(10);
47 | expect(pageSizeSchema.parse('50')).toBe(50);
48 | expect(pageSizeSchema.parse('100')).toBe(100);
49 | // Invalid or empty values
50 | expect(pageSizeSchema.parse('abc')).toBe(null);
51 | expect(pageSizeSchema.parse('')).toBe(null);
52 | expect(pageSizeSchema.parse(undefined)).toBe(null);
53 | });
54 | it('should validate severity values correctly', () => {
55 | const severitySchema = z
56 | .enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
57 | .nullable()
58 | .optional();
59 | // Valid severities
60 | expect(severitySchema.parse('INFO')).toBe('INFO');
61 | expect(severitySchema.parse('MINOR')).toBe('MINOR');
62 | expect(severitySchema.parse('MAJOR')).toBe('MAJOR');
63 | expect(severitySchema.parse('CRITICAL')).toBe('CRITICAL');
64 | expect(severitySchema.parse('BLOCKER')).toBe('BLOCKER');
65 | // Null/undefined
66 | expect(severitySchema.parse(null)).toBe(null);
67 | expect(severitySchema.parse(undefined)).toBe(undefined);
68 | // Invalid
69 | expect(() => severitySchema.parse('INVALID')).toThrow();
70 | });
71 | it('should validate status values correctly', () => {
72 | const statusSchema = z
73 | .array(
74 | z.enum([
75 | 'OPEN',
76 | 'CONFIRMED',
77 | 'REOPENED',
78 | 'RESOLVED',
79 | 'CLOSED',
80 | 'TO_REVIEW',
81 | 'IN_REVIEW',
82 | 'REVIEWED',
83 | ])
84 | )
85 | .nullable()
86 | .optional();
87 | // Valid statuses
88 | expect(statusSchema.parse(['OPEN'])).toEqual(['OPEN']);
89 | expect(statusSchema.parse(['CONFIRMED', 'REOPENED'])).toEqual(['CONFIRMED', 'REOPENED']);
90 | expect(statusSchema.parse(['RESOLVED', 'CLOSED'])).toEqual(['RESOLVED', 'CLOSED']);
91 | expect(statusSchema.parse(['TO_REVIEW', 'IN_REVIEW', 'REVIEWED'])).toEqual([
92 | 'TO_REVIEW',
93 | 'IN_REVIEW',
94 | 'REVIEWED',
95 | ]);
96 | // Null/undefined
97 | expect(statusSchema.parse(null)).toBe(null);
98 | expect(statusSchema.parse(undefined)).toBe(undefined);
99 | // Invalid
100 | expect(() => statusSchema.parse(['INVALID'])).toThrow();
101 | expect(() => statusSchema.parse(['open'])).toThrow(); // case sensitivity
102 | });
103 | it('should validate resolution values correctly', () => {
104 | const resolutionSchema = z
105 | .array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
106 | .nullable()
107 | .optional();
108 | // Valid resolutions
109 | expect(resolutionSchema.parse(['FALSE-POSITIVE'])).toEqual(['FALSE-POSITIVE']);
110 | expect(resolutionSchema.parse(['WONTFIX', 'FIXED'])).toEqual(['WONTFIX', 'FIXED']);
111 | expect(resolutionSchema.parse(['REMOVED'])).toEqual(['REMOVED']);
112 | expect(resolutionSchema.parse(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED'])).toEqual([
113 | 'FALSE-POSITIVE',
114 | 'WONTFIX',
115 | 'FIXED',
116 | 'REMOVED',
117 | ]);
118 | // Null/undefined
119 | expect(resolutionSchema.parse(null)).toBe(null);
120 | expect(resolutionSchema.parse(undefined)).toBe(undefined);
121 | // Invalid
122 | expect(() => resolutionSchema.parse(['INVALID'])).toThrow();
123 | });
124 | it('should validate type values correctly', () => {
125 | const typeSchema = z
126 | .array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
127 | .nullable()
128 | .optional();
129 | // Valid types
130 | expect(typeSchema.parse(['CODE_SMELL'])).toEqual(['CODE_SMELL']);
131 | expect(typeSchema.parse(['BUG', 'VULNERABILITY'])).toEqual(['BUG', 'VULNERABILITY']);
132 | expect(typeSchema.parse(['SECURITY_HOTSPOT'])).toEqual(['SECURITY_HOTSPOT']);
133 | expect(typeSchema.parse(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT'])).toEqual([
134 | 'CODE_SMELL',
135 | 'BUG',
136 | 'VULNERABILITY',
137 | 'SECURITY_HOTSPOT',
138 | ]);
139 | // Null/undefined
140 | expect(typeSchema.parse(null)).toBe(null);
141 | expect(typeSchema.parse(undefined)).toBe(undefined);
142 | // Invalid
143 | expect(() => typeSchema.parse(['INVALID'])).toThrow();
144 | });
145 | it('should transform boolean values correctly', () => {
146 | const booleanSchema = z
147 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
148 | .nullable()
149 | .optional();
150 | // String values
151 | expect(booleanSchema.parse('true')).toBe(true);
152 | expect(booleanSchema.parse('false')).toBe(false);
153 | // Boolean values
154 | expect(booleanSchema.parse(true)).toBe(true);
155 | expect(booleanSchema.parse(false)).toBe(false);
156 | // Null/undefined
157 | expect(booleanSchema.parse(null)).toBe(null);
158 | expect(booleanSchema.parse(undefined)).toBe(undefined);
159 | });
160 | it('should validate string arrays correctly', () => {
161 | const stringArraySchema = z.array(z.string()).nullable().optional();
162 | // Valid arrays
163 | expect(stringArraySchema.parse(['test'])).toEqual(['test']);
164 | expect(stringArraySchema.parse(['one', 'two', 'three'])).toEqual(['one', 'two', 'three']);
165 | expect(stringArraySchema.parse([])).toEqual([]);
166 | // Null/undefined
167 | expect(stringArraySchema.parse(null)).toBe(null);
168 | expect(stringArraySchema.parse(undefined)).toBe(undefined);
169 | // Invalid
170 | expect(() => stringArraySchema.parse('not-an-array')).toThrow();
171 | expect(() => stringArraySchema.parse([1, 2, 3])).toThrow();
172 | });
173 | it('should validate and transform string or array unions', () => {
174 | const unionSchema = z.union([z.string(), z.array(z.string())]);
175 | // Single string
176 | expect(unionSchema.parse('test')).toBe('test');
177 | // String array
178 | expect(unionSchema.parse(['one', 'two'])).toEqual(['one', 'two']);
179 | // Invalid
180 | expect(() => unionSchema.parse(123)).toThrow();
181 | expect(() => unionSchema.parse([1, 2, 3])).toThrow();
182 | });
183 | });
184 | });
185 |
```
--------------------------------------------------------------------------------
/src/utils/logger.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * @fileoverview Simple logging service for the application.
3 | * This provides a centralized place for all logging functionality.
4 | *
5 | * Configuration:
6 | * - LOG_LEVEL: Sets the minimum log level (DEBUG, INFO, WARN, ERROR). Defaults to DEBUG.
7 | * - LOG_FILE: Path to the log file. If not set, no logs will be written.
8 | *
9 | * Note: Since MCP servers use stdout for protocol communication, logs are written
10 | * to a file instead of stdout/stderr to avoid interference.
11 | */
12 |
13 | import { writeFileSync, appendFileSync, existsSync, mkdirSync } from 'node:fs';
14 | import { dirname } from 'node:path';
15 |
16 | /**
17 | * Log levels for the application
18 | * @enum {string}
19 | */
20 | export enum LogLevel {
21 | DEBUG = 'DEBUG',
22 | INFO = 'INFO',
23 | WARN = 'WARN',
24 | ERROR = 'ERROR',
25 | }
26 |
27 | /**
28 | * Environment-aware logging configuration
29 | */
30 | const LOG_LEVELS_PRIORITY: Record<LogLevel, number> = {
31 | [LogLevel.DEBUG]: 0,
32 | [LogLevel.INFO]: 1,
33 | [LogLevel.WARN]: 2,
34 | [LogLevel.ERROR]: 3,
35 | };
36 |
37 | /**
38 | * Get the log file path from environment
39 | * @returns {string | null} The log file path or null if not configured
40 | * @private
41 | */
42 | function getLogFilePath(): string | null {
43 | return process.env.LOG_FILE ?? null;
44 | }
45 |
46 | let logFileInitialized = false;
47 |
48 | /**
49 | * Initialize the log file if needed by creating the directory and file
50 | * Only initializes once per process to avoid redundant file operations
51 | * @private
52 | * @returns {void}
53 | */
54 | function initializeLogFile(): void {
55 | const logFile = getLogFilePath();
56 | if (logFile && !logFileInitialized) {
57 | try {
58 | // Create directory if it doesn't exist
59 | const dir = dirname(logFile);
60 | if (!existsSync(dir)) {
61 | mkdirSync(dir, { recursive: true });
62 | }
63 | // Create or truncate the log file
64 | writeFileSync(logFile, '');
65 | logFileInitialized = true;
66 | } catch {
67 | // Fail silently if we can't create the log file
68 | logFileInitialized = true; // Don't retry
69 | }
70 | }
71 | }
72 |
73 | /**
74 | * Formats non-JSON-serializable values to string
75 | * @param value The value to format
76 | * @returns String representation of the value
77 | */
78 | function formatNonSerializable(value: unknown): string {
79 | if (value === null) return 'null';
80 | if (value === undefined) return 'undefined';
81 |
82 | if (typeof value === 'object') {
83 | const constructorName =
84 | 'constructor' in value && value.constructor?.name ? value.constructor.name : 'Object';
85 | return `[object ${constructorName}]`;
86 | }
87 |
88 | return Object.prototype.toString.call(value);
89 | }
90 |
91 | /**
92 | * Formats an error for logging
93 | * @param error The error to format
94 | * @returns Formatted error string
95 | */
96 | function formatError(error: unknown): string {
97 | if (error === undefined) {
98 | return '';
99 | }
100 |
101 | if (error instanceof Error) {
102 | const stack = error.stack ? `\n${error.stack}` : '';
103 | return `${error.name}: ${error.message}${stack}`;
104 | }
105 |
106 | try {
107 | return JSON.stringify(error, null, 2);
108 | } catch {
109 | // Fallback to string representation if JSON.stringify fails
110 | return formatNonSerializable(error);
111 | }
112 | }
113 |
114 | /**
115 | * Write a log message to file
116 | * @param message The formatted log message to write
117 | * @private
118 | */
119 | function writeToLogFile(message: string): void {
120 | const logFile = getLogFilePath();
121 | if (logFile) {
122 | try {
123 | if (!logFileInitialized) {
124 | initializeLogFile();
125 | }
126 | appendFileSync(logFile, `${message}\n`);
127 | } catch {
128 | // Fail silently if we can't write to the log file
129 | }
130 | }
131 | }
132 |
133 | /**
134 | * Check if a log level should be displayed based on the environment configuration
135 | * @param level The log level to check
136 | * @returns {boolean} True if the log level should be displayed
137 | * @private
138 | */
139 | function shouldLog(level: LogLevel): boolean {
140 | const configuredLevel = (process.env.LOG_LEVEL ?? 'DEBUG') as LogLevel;
141 | return LOG_LEVELS_PRIORITY[level] >= LOG_LEVELS_PRIORITY[configuredLevel];
142 | }
143 |
144 | /**
145 | * Format a log message with timestamp, level, and context information
146 | * @param level The log level of the message
147 | * @param message The log message content
148 | * @param context Optional context identifier
149 | * @returns {string} Formatted log message
150 | * @private
151 | */
152 | function formatLogMessage(level: LogLevel, message: string, context?: string): string {
153 | const timestamp = new Date().toISOString();
154 | const contextStr = context ? `[${context}] ` : '';
155 | return `${timestamp} ${level} ${contextStr}${message}`;
156 | }
157 |
158 | /**
159 | * Logger service for consistent logging throughout the application
160 | */
161 | export class Logger {
162 | private readonly context: string | undefined;
163 |
164 | /**
165 | * Create a new logger instance, optionally with a context
166 | * @param context Optional context name to identify the log source
167 | */
168 | constructor(context?: string) {
169 | this.context = context;
170 | }
171 |
172 | /**
173 | * Log a debug message
174 | * @param message The message to log
175 | * @param data Optional data to include in the log
176 | */
177 | debug(message: string, data?: unknown): void {
178 | if (shouldLog(LogLevel.DEBUG) && getLogFilePath()) {
179 | const formattedMessage = formatLogMessage(LogLevel.DEBUG, message, this.context);
180 | const fullMessage =
181 | data === undefined
182 | ? formattedMessage
183 | : `${formattedMessage} ${JSON.stringify(data, null, 2)}`;
184 | writeToLogFile(fullMessage);
185 | }
186 | }
187 |
188 | /**
189 | * Log an info message
190 | * @param message The message to log
191 | * @param data Optional data to include in the log
192 | */
193 | info(message: string, data?: unknown): void {
194 | if (shouldLog(LogLevel.INFO) && getLogFilePath()) {
195 | const formattedMessage = formatLogMessage(LogLevel.INFO, message, this.context);
196 | const fullMessage =
197 | data === undefined
198 | ? formattedMessage
199 | : `${formattedMessage} ${JSON.stringify(data, null, 2)}`;
200 | writeToLogFile(fullMessage);
201 | }
202 | }
203 |
204 | /**
205 | * Log a warning message
206 | * @param message The message to log
207 | * @param data Optional data to include in the log
208 | */
209 | warn(message: string, data?: unknown): void {
210 | if (shouldLog(LogLevel.WARN) && getLogFilePath()) {
211 | const formattedMessage = formatLogMessage(LogLevel.WARN, message, this.context);
212 | const fullMessage =
213 | data === undefined
214 | ? formattedMessage
215 | : `${formattedMessage} ${JSON.stringify(data, null, 2)}`;
216 | writeToLogFile(fullMessage);
217 | }
218 | }
219 |
220 | /**
221 | * Log an error message with improved error formatting
222 | * @param message The message to log
223 | * @param error Optional error to include in the log. The error will be formatted for better readability:
224 | * - Error objects will include name, message and stack trace
225 | * - Objects will be stringified with proper indentation
226 | * - Other values will be converted to strings
227 | */
228 | error(message: string, error?: unknown): void {
229 | if (!shouldLog(LogLevel.ERROR) || !getLogFilePath()) {
230 | return;
231 | }
232 |
233 | const formattedMessage = formatLogMessage(LogLevel.ERROR, message, this.context);
234 | const errorOutput = formatError(error);
235 | const fullMessage = errorOutput ? `${formattedMessage} ${errorOutput}` : formattedMessage;
236 | writeToLogFile(fullMessage);
237 | }
238 | }
239 |
240 | /**
241 | * Default logger instance for the application
242 | * Pre-configured with the 'SonarQubeMCP' context for quick imports
243 | * @const {Logger}
244 | */
245 | export const defaultLogger = new Logger('SonarQubeMCP');
246 |
247 | /**
248 | * Helper function to create a logger with a specific context
249 | * @param context The context to use for the logger
250 | * @returns A new logger instance with the specified context
251 | */
252 | export function createLogger(context: string): Logger {
253 | return new Logger(context);
254 | }
255 |
256 | /**
257 | * Default export for simpler imports
258 | */
259 | export default defaultLogger;
260 |
```
--------------------------------------------------------------------------------
/src/__tests__/domains/source-code-domain.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import nock from 'nock';
2 | import { SourceCodeDomain } from '../../domains/source-code.js';
3 | import { IssuesDomain } from '../../domains/issues.js';
4 | import { SonarQubeClient as WebApiClient } from 'sonarqube-web-api-client';
5 |
6 | describe('SourceCodeDomain', () => {
7 | const baseUrl = 'https://sonarqube.example.com';
8 | const organization = 'test-org';
9 | let domain: SourceCodeDomain;
10 | let webApiClient: WebApiClient;
11 | let issuesDomain: IssuesDomain;
12 |
13 | beforeEach(() => {
14 | webApiClient = WebApiClient.withToken(baseUrl, 'test-token', { organization });
15 | issuesDomain = new IssuesDomain(webApiClient, organization);
16 | domain = new SourceCodeDomain(webApiClient, organization, issuesDomain);
17 | nock.cleanAll();
18 | });
19 |
20 | afterEach(() => {
21 | nock.cleanAll();
22 | });
23 |
24 | describe('getSourceCode', () => {
25 | const mockSourceResponse = [
26 | 'public class Example {',
27 | ' public void method() {',
28 | ' // TODO: implement',
29 | ' }',
30 | '}',
31 | ].join('\n');
32 |
33 | const mockIssuesResponse = {
34 | paging: { pageIndex: 1, pageSize: 100, total: 2 },
35 | issues: [
36 | {
37 | key: 'issue1',
38 | rule: 'squid:S1234',
39 | component: 'com.example:Example.java',
40 | project: 'com.example',
41 | line: 2,
42 | message: 'Fix this issue',
43 | severity: 'MAJOR',
44 | status: 'OPEN',
45 | type: 'BUG',
46 | textRange: {
47 | startLine: 2,
48 | endLine: 2,
49 | startOffset: 10,
50 | endOffset: 20,
51 | },
52 | tags: [],
53 | creationDate: '2023-01-01T00:00:00Z',
54 | updateDate: '2023-01-01T00:00:00Z',
55 | },
56 | {
57 | key: 'issue2',
58 | rule: 'squid:S5678',
59 | component: 'com.example:Example.java',
60 | project: 'com.example',
61 | line: 3,
62 | message: 'Another issue',
63 | severity: 'MINOR',
64 | status: 'OPEN',
65 | type: 'CODE_SMELL',
66 | tags: [],
67 | creationDate: '2023-01-01T00:00:00Z',
68 | updateDate: '2023-01-01T00:00:00Z',
69 | },
70 | ],
71 | components: [],
72 | rules: [],
73 | };
74 |
75 | it('should get source code with issues for all lines', async () => {
76 | nock(baseUrl)
77 | .get('/api/sources/raw')
78 | .query({
79 | key: 'com.example:Example.java',
80 | organization,
81 | })
82 | .reply(200, mockSourceResponse);
83 |
84 | nock(baseUrl)
85 | .get('/api/issues/search')
86 | .query({
87 | projects: 'com.example:Example.java',
88 | onComponentOnly: 'true',
89 | organization,
90 | p: '1',
91 | ps: '100',
92 | })
93 | .reply(200, mockIssuesResponse);
94 |
95 | const result = await domain.getSourceCode({
96 | key: 'com.example:Example.java',
97 | });
98 |
99 | expect(result.component.key).toBe('com.example:Example.java');
100 | expect(result.component.name).toBe('com.example:Example.java'); // name is the full key since there's no '/' in the path
101 | expect(result.component.qualifier).toBe('FIL');
102 | expect(result.sources).toHaveLength(5);
103 | expect(result.sources[0]).toEqual({
104 | line: 1,
105 | code: 'public class Example {',
106 | issues: undefined,
107 | });
108 | expect(result.sources[1]).toBeDefined();
109 | expect(result.sources[1]!.issues).toHaveLength(1);
110 | expect(result.sources[1]!.issues?.[0]!.key).toBe('issue1');
111 | expect(result.sources[2]).toBeDefined();
112 | expect(result.sources[2]!.issues).toHaveLength(1);
113 | expect(result.sources[2]!.issues?.[0]!.key).toBe('issue2');
114 | });
115 |
116 | it('should get source code with line range and branch', async () => {
117 | nock(baseUrl)
118 | .get('/api/sources/raw')
119 | .query({
120 | key: 'com.example:Example.java',
121 | branch: 'feature-branch',
122 | organization,
123 | })
124 | .reply(200, mockSourceResponse);
125 |
126 | nock(baseUrl)
127 | .get('/api/issues/search')
128 | .query({
129 | projects: 'com.example:Example.java',
130 | branch: 'feature-branch',
131 | onComponentOnly: 'true',
132 | organization,
133 | })
134 | .reply(200, { ...mockIssuesResponse, issues: [] });
135 |
136 | const result = await domain.getSourceCode({
137 | key: 'com.example:Example.java',
138 | from: 2,
139 | to: 4,
140 | branch: 'feature-branch',
141 | });
142 |
143 | expect(result.sources).toHaveLength(3);
144 | expect(result.sources[0]!.line).toBe(2);
145 | expect(result.sources[2]!.line).toBe(4);
146 | });
147 |
148 | it('should get source code for pull request', async () => {
149 | nock(baseUrl)
150 | .get('/api/sources/raw')
151 | .query({
152 | key: 'com.example:Example.java',
153 | pullRequest: '123',
154 | organization,
155 | })
156 | .reply(200, mockSourceResponse);
157 |
158 | nock(baseUrl)
159 | .get('/api/issues/search')
160 | .query({
161 | projects: 'com.example:Example.java',
162 | pullRequest: '123',
163 | onComponentOnly: 'true',
164 | organization,
165 | })
166 | .reply(200, { ...mockIssuesResponse, issues: [] });
167 |
168 | const result = await domain.getSourceCode({
169 | key: 'com.example:Example.java',
170 | pullRequest: '123',
171 | });
172 |
173 | expect(result.sources).toHaveLength(5);
174 | });
175 |
176 | it('should handle source code without issues domain', async () => {
177 | const domainWithoutIssues = new SourceCodeDomain(webApiClient, organization);
178 |
179 | nock(baseUrl)
180 | .get('/api/sources/raw')
181 | .query({
182 | key: 'com.example:Example.java',
183 | organization,
184 | })
185 | .reply(200, mockSourceResponse);
186 |
187 | const result = await domainWithoutIssues.getSourceCode({
188 | key: 'com.example:Example.java',
189 | });
190 |
191 | expect(result.sources).toHaveLength(5);
192 | expect(result.sources[0]!.issues).toBeUndefined();
193 | });
194 |
195 | it('should handle error when fetching issues', async () => {
196 | nock(baseUrl)
197 | .get('/api/sources/raw')
198 | .query({
199 | key: 'com.example:Example.java',
200 | organization,
201 | })
202 | .reply(200, mockSourceResponse);
203 |
204 | nock(baseUrl)
205 | .get('/api/issues/search')
206 | .query({
207 | projects: 'com.example:Example.java',
208 | onComponentOnly: 'true',
209 | organization,
210 | })
211 | .reply(500, 'Internal Server Error');
212 |
213 | const result = await domain.getSourceCode({
214 | key: 'com.example:Example.java',
215 | });
216 |
217 | // Should still return source code without issues
218 | expect(result.sources).toHaveLength(5);
219 | expect(result.sources[0]!.issues).toBeUndefined();
220 | });
221 | });
222 |
223 | describe('getScmBlame', () => {
224 | const mockScmResponse = {
225 | scm: [
226 | ['abc123', '[email protected]', '2023-01-01T00:00:00Z'],
227 | ['def456', '[email protected]', '2023-01-02T00:00:00Z'],
228 | ],
229 | };
230 |
231 | it('should get SCM blame information', async () => {
232 | nock(baseUrl)
233 | .get('/api/sources/scm')
234 | .query({
235 | key: 'com.example:Example.java',
236 | organization,
237 | })
238 | .reply(200, mockScmResponse);
239 |
240 | const result = await domain.getScmBlame({
241 | key: 'com.example:Example.java',
242 | });
243 |
244 | expect(result).toEqual(mockScmResponse);
245 | });
246 |
247 | it('should get SCM blame with line range', async () => {
248 | nock(baseUrl)
249 | .get('/api/sources/scm')
250 | .query({
251 | key: 'com.example:Example.java',
252 | from: 1,
253 | to: 3,
254 | organization,
255 | })
256 | .reply(200, mockScmResponse);
257 |
258 | const result = await domain.getScmBlame({
259 | key: 'com.example:Example.java',
260 | from: 1,
261 | to: 3,
262 | });
263 |
264 | expect(result).toEqual(mockScmResponse);
265 | });
266 | });
267 | });
268 |
```
--------------------------------------------------------------------------------
/src/__tests__/tool-registration-transforms.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect } from 'vitest';
2 | import { z } from 'zod';
3 | describe('Tool Registration Schema Transforms', () => {
4 | describe('Pagination parameters', () => {
5 | it('should transform page string to number or null', () => {
6 | const pageSchema = z
7 | .string()
8 | .optional()
9 | .transform((val: any) => (val ? parseInt(val, 10) || null : null));
10 | expect(pageSchema.parse('10')).toBe(10);
11 | expect(pageSchema.parse('invalid')).toBe(null);
12 | expect(pageSchema.parse('')).toBe(null);
13 | expect(pageSchema.parse(undefined)).toBe(null);
14 | });
15 | });
16 | describe('Boolean parameters', () => {
17 | it('should transform string to boolean', () => {
18 | const booleanSchema = z
19 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
20 | .nullable()
21 | .optional();
22 | expect(booleanSchema.parse('true')).toBe(true);
23 | expect(booleanSchema.parse('false')).toBe(false);
24 | expect(booleanSchema.parse(true)).toBe(true);
25 | expect(booleanSchema.parse(false)).toBe(false);
26 | expect(booleanSchema.parse(null)).toBe(null);
27 | expect(booleanSchema.parse(undefined)).toBe(undefined);
28 | });
29 | });
30 | describe('Array union with string', () => {
31 | it('should handle both string and array inputs', () => {
32 | const schema = z.union([z.string(), z.array(z.string())]);
33 | // Test with string input
34 | expect(schema.parse('test')).toBe('test');
35 | // Test with array input
36 | expect(schema.parse(['test1', 'test2'])).toEqual(['test1', 'test2']);
37 | });
38 | });
39 | describe('Union schemas for tool parameters', () => {
40 | it('should validate both array and string metrics parameters', () => {
41 | // Similar to how the metrics_keys parameter is defined
42 | const metricsSchema = z.union([z.string(), z.array(z.string())]);
43 | expect(metricsSchema.parse('coverage')).toBe('coverage');
44 | expect(metricsSchema.parse(['coverage', 'bugs'])).toEqual(['coverage', 'bugs']);
45 | });
46 | it('should validate both array and string component keys parameters', () => {
47 | // Similar to how the component_keys parameter is defined
48 | const componentKeysSchema = z.union([z.string(), z.array(z.string())]);
49 | expect(componentKeysSchema.parse('component1')).toBe('component1');
50 | expect(componentKeysSchema.parse(['component1', 'component2'])).toEqual([
51 | 'component1',
52 | 'component2',
53 | ]);
54 | });
55 | });
56 | describe('Enumeration schemas', () => {
57 | it('should validate severity enum value', () => {
58 | const severitySchema = z
59 | .enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
60 | .nullable()
61 | .optional();
62 | expect(severitySchema.parse('INFO')).toBe('INFO');
63 | expect(severitySchema.parse('MINOR')).toBe('MINOR');
64 | expect(severitySchema.parse('MAJOR')).toBe('MAJOR');
65 | expect(severitySchema.parse('CRITICAL')).toBe('CRITICAL');
66 | expect(severitySchema.parse('BLOCKER')).toBe('BLOCKER');
67 | expect(severitySchema.parse(null)).toBe(null);
68 | expect(severitySchema.parse(undefined)).toBe(undefined);
69 | expect(() => severitySchema.parse('INVALID')).toThrow();
70 | });
71 | it('should validate status array enum values', () => {
72 | const statusSchema = z
73 | .array(
74 | z.enum([
75 | 'OPEN',
76 | 'CONFIRMED',
77 | 'REOPENED',
78 | 'RESOLVED',
79 | 'CLOSED',
80 | 'TO_REVIEW',
81 | 'IN_REVIEW',
82 | 'REVIEWED',
83 | ])
84 | )
85 | .nullable()
86 | .optional();
87 | expect(statusSchema.parse(['OPEN', 'CONFIRMED'])).toEqual(['OPEN', 'CONFIRMED']);
88 | expect(statusSchema.parse(null)).toBe(null);
89 | expect(statusSchema.parse(undefined)).toBe(undefined);
90 | expect(() => statusSchema.parse(['INVALID'])).toThrow();
91 | });
92 | it('should validate resolution array enum values', () => {
93 | const resolutionSchema = z
94 | .array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
95 | .nullable()
96 | .optional();
97 | expect(resolutionSchema.parse(['FALSE-POSITIVE', 'WONTFIX'])).toEqual([
98 | 'FALSE-POSITIVE',
99 | 'WONTFIX',
100 | ]);
101 | expect(resolutionSchema.parse(null)).toBe(null);
102 | expect(resolutionSchema.parse(undefined)).toBe(undefined);
103 | expect(() => resolutionSchema.parse(['INVALID'])).toThrow();
104 | });
105 | it('should validate type array enum values', () => {
106 | const typeSchema = z
107 | .array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
108 | .nullable()
109 | .optional();
110 | expect(typeSchema.parse(['CODE_SMELL', 'BUG'])).toEqual(['CODE_SMELL', 'BUG']);
111 | expect(typeSchema.parse(null)).toBe(null);
112 | expect(typeSchema.parse(undefined)).toBe(undefined);
113 | expect(() => typeSchema.parse(['INVALID'])).toThrow();
114 | });
115 | });
116 | describe('Complete registration schema', () => {
117 | it('should validate and transform a complete issues tool schema', () => {
118 | // Create schemas similar to what's in the tool registration
119 | const pageSchema = z
120 | .string()
121 | .optional()
122 | .transform((val: any) => (val ? parseInt(val, 10) || null : null));
123 | const booleanSchema = z
124 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
125 | .nullable()
126 | .optional();
127 | const severitySchema = z
128 | .enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
129 | .nullable()
130 | .optional();
131 | const statusSchema = z
132 | .array(
133 | z.enum([
134 | 'OPEN',
135 | 'CONFIRMED',
136 | 'REOPENED',
137 | 'RESOLVED',
138 | 'CLOSED',
139 | 'TO_REVIEW',
140 | 'IN_REVIEW',
141 | 'REVIEWED',
142 | ])
143 | )
144 | .nullable()
145 | .optional();
146 | const resolutionSchema = z
147 | .array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
148 | .nullable()
149 | .optional();
150 | const typeSchema = z
151 | .array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
152 | .nullable()
153 | .optional();
154 | const stringArraySchema = z.array(z.string()).nullable().optional();
155 | // Create the complete schema
156 | const schema = z.object({
157 | project_key: z.string(),
158 | severity: severitySchema,
159 | page: pageSchema,
160 | page_size: pageSchema,
161 | statuses: statusSchema,
162 | resolutions: resolutionSchema,
163 | resolved: booleanSchema,
164 | types: typeSchema,
165 | rules: stringArraySchema,
166 | tags: stringArraySchema,
167 | });
168 | // Test with valid data
169 | const validData = {
170 | project_key: 'test-project',
171 | severity: 'MAJOR',
172 | page: '10',
173 | page_size: '20',
174 | statuses: ['OPEN', 'CONFIRMED'],
175 | resolutions: ['FALSE-POSITIVE', 'WONTFIX'],
176 | resolved: 'true',
177 | types: ['CODE_SMELL', 'BUG'],
178 | rules: ['rule1', 'rule2'],
179 | tags: ['tag1', 'tag2'],
180 | };
181 | const result = schema.parse(validData);
182 | // Check that transformations worked correctly
183 | expect(result.project_key).toBe('test-project');
184 | expect(result.severity).toBe('MAJOR');
185 | expect(result.page).toBe(10); // Transformed from string to number
186 | expect(result.page_size).toBe(20); // Transformed from string to number
187 | expect(result.statuses).toEqual(['OPEN', 'CONFIRMED']);
188 | expect(result.resolutions).toEqual(['FALSE-POSITIVE', 'WONTFIX']);
189 | expect(result.resolved).toBe(true); // Transformed from string to boolean
190 | expect(result.types).toEqual(['CODE_SMELL', 'BUG']);
191 | expect(result.rules).toEqual(['rule1', 'rule2']);
192 | expect(result.tags).toEqual(['tag1', 'tag2']);
193 | });
194 | });
195 | });
196 |
```
--------------------------------------------------------------------------------
/src/errors.ts:
--------------------------------------------------------------------------------
```typescript
1 | import {
2 | SonarQubeError as SonarQubeClientError,
3 | ApiError,
4 | AuthenticationError,
5 | AuthorizationError,
6 | NotFoundError,
7 | RateLimitError,
8 | NetworkError,
9 | ServerError,
10 | ValidationError,
11 | } from 'sonarqube-web-api-client';
12 | import { createLogger } from './utils/logger.js';
13 |
14 | export enum SonarQubeErrorType {
15 | AUTHENTICATION_FAILED = 'AUTHENTICATION_FAILED',
16 | AUTHORIZATION_FAILED = 'AUTHORIZATION_FAILED',
17 | RESOURCE_NOT_FOUND = 'RESOURCE_NOT_FOUND',
18 | RATE_LIMITED = 'RATE_LIMITED',
19 | NETWORK_ERROR = 'NETWORK_ERROR',
20 | CONFIGURATION_ERROR = 'CONFIGURATION_ERROR',
21 | VALIDATION_ERROR = 'VALIDATION_ERROR',
22 | SERVER_ERROR = 'SERVER_ERROR',
23 | UNKNOWN_ERROR = 'UNKNOWN_ERROR',
24 | }
25 |
26 | export interface SonarQubeError extends Error {
27 | type: SonarQubeErrorType;
28 | operation?: string;
29 | statusCode?: number;
30 | context?: Record<string, unknown>;
31 | solution?: string;
32 | }
33 |
34 | export class SonarQubeAPIError extends Error implements SonarQubeError {
35 | type: SonarQubeErrorType;
36 | operation?: string;
37 | statusCode?: number;
38 | context?: Record<string, unknown>;
39 | solution?: string;
40 |
41 | constructor(
42 | message: string,
43 | type: SonarQubeErrorType,
44 | options?: {
45 | operation?: string;
46 | statusCode?: number;
47 | context?: Record<string, unknown>;
48 | solution?: string;
49 | }
50 | ) {
51 | super(message);
52 | this.name = 'SonarQubeAPIError';
53 | this.type = type;
54 | if (options?.operation !== undefined) {
55 | this.operation = options.operation;
56 | }
57 | if (options?.statusCode !== undefined) {
58 | this.statusCode = options.statusCode;
59 | }
60 | if (options?.context !== undefined) {
61 | this.context = options.context;
62 | }
63 | if (options?.solution !== undefined) {
64 | this.solution = options.solution;
65 | }
66 | }
67 |
68 | override toString(): string {
69 | let result = `Error: ${this.message}`;
70 | if (this.operation) {
71 | result += `\nOperation: ${this.operation}`;
72 | }
73 | if (this.statusCode) {
74 | result += `\nStatus Code: ${this.statusCode}`;
75 | }
76 | if (this.solution) {
77 | result += `\nSolution: ${this.solution}`;
78 | }
79 | if (this.context && Object.keys(this.context).length > 0) {
80 | result += `\nContext: ${JSON.stringify(this.context, null, 2)}`;
81 | }
82 | return result;
83 | }
84 | }
85 |
86 | function getErrorTypeFromClientError(error: SonarQubeClientError): {
87 | type: SonarQubeErrorType;
88 | solution: string | undefined;
89 | } {
90 | if (error instanceof AuthenticationError) {
91 | return {
92 | type: SonarQubeErrorType.AUTHENTICATION_FAILED,
93 | solution:
94 | 'Please check your SONARQUBE_TOKEN or credentials. Ensure the token is valid and not expired.',
95 | };
96 | }
97 | if (error instanceof AuthorizationError) {
98 | return {
99 | type: SonarQubeErrorType.AUTHORIZATION_FAILED,
100 | solution: 'Ensure your token has the required permissions for this operation.',
101 | };
102 | }
103 | if (error instanceof NotFoundError) {
104 | return {
105 | type: SonarQubeErrorType.RESOURCE_NOT_FOUND,
106 | solution: 'Verify the project key/component exists and you have access to it.',
107 | };
108 | }
109 | if (error instanceof RateLimitError) {
110 | return {
111 | type: SonarQubeErrorType.RATE_LIMITED,
112 | solution: 'Please wait before retrying. Consider implementing request throttling.',
113 | };
114 | }
115 | if (error instanceof NetworkError) {
116 | return {
117 | type: SonarQubeErrorType.NETWORK_ERROR,
118 | solution: 'Check your network connection and verify the SonarQube URL.',
119 | };
120 | }
121 | if (error instanceof ServerError) {
122 | return {
123 | type: SonarQubeErrorType.SERVER_ERROR,
124 | solution:
125 | 'The server is experiencing issues. Please try again later or contact your administrator.',
126 | };
127 | }
128 | if (error instanceof ValidationError) {
129 | return {
130 | type: SonarQubeErrorType.VALIDATION_ERROR,
131 | solution: 'Please check your request parameters and try again.',
132 | };
133 | }
134 | return {
135 | type: SonarQubeErrorType.UNKNOWN_ERROR,
136 | solution: undefined,
137 | };
138 | }
139 |
140 | export function transformError(error: unknown, operation: string): SonarQubeAPIError {
141 | if (error instanceof SonarQubeAPIError) {
142 | return error;
143 | }
144 |
145 | if (error instanceof SonarQubeClientError) {
146 | const { type, solution } = getErrorTypeFromClientError(error);
147 | const context: Record<string, unknown> = {};
148 |
149 | // Extract status code if available
150 | let statusCode: number | undefined;
151 | if (error instanceof ApiError && 'statusCode' in error) {
152 | statusCode = (error as ApiError & { statusCode?: number }).statusCode;
153 | }
154 |
155 | const errorOptions: {
156 | operation?: string;
157 | statusCode?: number;
158 | context?: Record<string, unknown>;
159 | solution?: string;
160 | } = {
161 | operation,
162 | context,
163 | };
164 | if (statusCode !== undefined) {
165 | errorOptions.statusCode = statusCode;
166 | }
167 | if (solution !== undefined) {
168 | errorOptions.solution = solution;
169 | }
170 | return new SonarQubeAPIError(error.message, type, errorOptions);
171 | }
172 |
173 | if (error instanceof Error) {
174 | return new SonarQubeAPIError(error.message, SonarQubeErrorType.UNKNOWN_ERROR, {
175 | operation,
176 | });
177 | }
178 |
179 | return new SonarQubeAPIError(String(error), SonarQubeErrorType.UNKNOWN_ERROR, {
180 | operation,
181 | });
182 | }
183 |
184 | interface RetryOptions {
185 | maxRetries?: number;
186 | initialDelay?: number;
187 | maxDelay?: number;
188 | backoffFactor?: number;
189 | }
190 |
191 | const logger = createLogger('errors');
192 |
193 | const DEFAULT_RETRY_OPTIONS: Required<RetryOptions> = {
194 | maxRetries: 3,
195 | initialDelay: 1000,
196 | maxDelay: 10000,
197 | backoffFactor: 2,
198 | };
199 |
200 | function shouldRetry(error: unknown): boolean {
201 | if (!(error instanceof SonarQubeAPIError)) {
202 | return false;
203 | }
204 |
205 | // Retry on network errors, rate limiting, and server errors
206 | return [
207 | SonarQubeErrorType.NETWORK_ERROR,
208 | SonarQubeErrorType.RATE_LIMITED,
209 | SonarQubeErrorType.SERVER_ERROR,
210 | ].includes(error.type);
211 | }
212 |
213 | async function sleep(ms: number): Promise<void> {
214 | return new Promise((resolve) => setTimeout(resolve, ms));
215 | }
216 |
217 | export async function withErrorHandling<T>(
218 | operation: string,
219 | apiCall: () => Promise<T>,
220 | retryOptions?: RetryOptions
221 | ): Promise<T> {
222 | const options = { ...DEFAULT_RETRY_OPTIONS, ...retryOptions };
223 | let lastError: unknown;
224 | let delay = options.initialDelay;
225 |
226 | for (let attempt = 0; attempt <= options.maxRetries; attempt++) {
227 | try {
228 | return await apiCall();
229 | } catch (error) {
230 | // Only transform errors from the SonarQube client
231 | if (error instanceof SonarQubeClientError) {
232 | lastError = transformError(error, operation);
233 | } else {
234 | // Pass through other errors unchanged (e.g., test mocks)
235 | lastError = error;
236 | }
237 |
238 | if (attempt < options.maxRetries && shouldRetry(lastError)) {
239 | const retryDelay = Math.min(delay, options.maxDelay);
240 | logger.info(`Retrying ${operation} after ${retryDelay}ms`, {
241 | attempt: attempt + 1,
242 | maxRetries: options.maxRetries,
243 | delay: retryDelay,
244 | });
245 | await sleep(retryDelay);
246 | delay *= options.backoffFactor;
247 | } else {
248 | break;
249 | }
250 | }
251 | }
252 |
253 | throw lastError;
254 | }
255 |
256 | export function formatErrorForMCP(error: SonarQubeAPIError): { code: number; message: string } {
257 | const errorMap: Record<SonarQubeErrorType, number> = {
258 | [SonarQubeErrorType.AUTHENTICATION_FAILED]: -32001,
259 | [SonarQubeErrorType.AUTHORIZATION_FAILED]: -32002,
260 | [SonarQubeErrorType.RESOURCE_NOT_FOUND]: -32003,
261 | [SonarQubeErrorType.RATE_LIMITED]: -32004,
262 | [SonarQubeErrorType.NETWORK_ERROR]: -32005,
263 | [SonarQubeErrorType.CONFIGURATION_ERROR]: -32006,
264 | [SonarQubeErrorType.VALIDATION_ERROR]: -32007,
265 | [SonarQubeErrorType.SERVER_ERROR]: -32008,
266 | [SonarQubeErrorType.UNKNOWN_ERROR]: -32000,
267 | };
268 |
269 | return {
270 | code: errorMap[error.type] ?? -32000,
271 | message: error.toString(),
272 | };
273 | }
274 |
```
--------------------------------------------------------------------------------
/src/utils/__tests__/structured-response.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect } from 'vitest';
2 | import {
3 | createStructuredResponse,
4 | createTextResponse,
5 | createErrorResponse,
6 | } from '../structured-response.js';
7 |
8 | describe('structured-response', () => {
9 | describe('createStructuredResponse', () => {
10 | it('should create response with text and structured content', () => {
11 | const data = { foo: 'bar', count: 42 };
12 | const result = createStructuredResponse(data);
13 |
14 | expect(result).toEqual({
15 | content: [
16 | {
17 | type: 'text',
18 | text: JSON.stringify(data, null, 2),
19 | },
20 | ],
21 | structuredContent: data,
22 | });
23 | });
24 |
25 | it('should handle null data', () => {
26 | const result = createStructuredResponse(null);
27 |
28 | expect(result).toEqual({
29 | content: [
30 | {
31 | type: 'text',
32 | text: 'null',
33 | },
34 | ],
35 | structuredContent: null,
36 | });
37 | });
38 |
39 | it('should handle undefined data', () => {
40 | const result = createStructuredResponse(undefined);
41 |
42 | expect(result).toEqual({
43 | content: [
44 | {
45 | type: 'text',
46 | text: undefined, // JSON.stringify(undefined) returns undefined
47 | },
48 | ],
49 | structuredContent: undefined,
50 | });
51 | });
52 |
53 | it('should handle array data', () => {
54 | const data = [1, 2, 3, 'test'];
55 | const result = createStructuredResponse(data);
56 |
57 | expect(result).toEqual({
58 | content: [
59 | {
60 | type: 'text',
61 | text: JSON.stringify(data, null, 2),
62 | },
63 | ],
64 | structuredContent: data,
65 | });
66 | });
67 |
68 | it('should handle complex nested objects', () => {
69 | const data = {
70 | level1: {
71 | level2: {
72 | level3: ['a', 'b', 'c'],
73 | number: 123,
74 | },
75 | },
76 | array: [{ id: 1 }, { id: 2 }],
77 | };
78 | const result = createStructuredResponse(data);
79 |
80 | expect(result.content[0]?.text).toBe(JSON.stringify(data, null, 2));
81 | expect(result.structuredContent).toBe(data);
82 | });
83 |
84 | it('should handle circular references gracefully', () => {
85 | const data: Record<string, unknown> = { name: 'test' };
86 | data.circular = data;
87 |
88 | expect(() => createStructuredResponse(data)).toThrow();
89 | });
90 |
91 | it('should preserve Date objects in structured content', () => {
92 | const date = new Date('2023-01-01');
93 | const data = { created: date };
94 | const result = createStructuredResponse(data);
95 |
96 | expect(result.structuredContent).toEqual({ created: date });
97 | expect(result.content[0]?.text).toBe(JSON.stringify(data, null, 2));
98 | });
99 | });
100 |
101 | describe('createTextResponse', () => {
102 | it('should create response with only text content', () => {
103 | const text = 'Hello, world!';
104 | const result = createTextResponse(text);
105 |
106 | expect(result).toEqual({
107 | content: [
108 | {
109 | type: 'text',
110 | text,
111 | },
112 | ],
113 | });
114 | });
115 |
116 | it('should handle empty string', () => {
117 | const result = createTextResponse('');
118 |
119 | expect(result).toEqual({
120 | content: [
121 | {
122 | type: 'text',
123 | text: '',
124 | },
125 | ],
126 | });
127 | });
128 |
129 | it('should handle multiline text', () => {
130 | const text = 'Line 1\nLine 2\nLine 3';
131 | const result = createTextResponse(text);
132 |
133 | expect(result).toEqual({
134 | content: [
135 | {
136 | type: 'text',
137 | text,
138 | },
139 | ],
140 | });
141 | });
142 |
143 | it('should handle special characters', () => {
144 | const text = 'Special chars: < > & " \' \\ \n \t';
145 | const result = createTextResponse(text);
146 |
147 | expect(result.content[0]?.text).toBe(text);
148 | });
149 |
150 | it('should not include structuredContent', () => {
151 | const result = createTextResponse('test');
152 |
153 | expect(result.structuredContent).toBeUndefined();
154 | expect(result.isError).toBeUndefined();
155 | });
156 | });
157 |
158 | describe('createErrorResponse', () => {
159 | it('should create error response with message only', () => {
160 | const message = 'Something went wrong';
161 | const result = createErrorResponse(message);
162 |
163 | expect(result).toEqual({
164 | content: [
165 | {
166 | type: 'text',
167 | text: message,
168 | },
169 | ],
170 | structuredContent: {
171 | error: message,
172 | },
173 | isError: true,
174 | });
175 | });
176 |
177 | it('should create error response with message and details', () => {
178 | const message = 'Validation failed';
179 | const details = {
180 | field: 'email',
181 | reason: 'invalid format',
182 | };
183 | const result = createErrorResponse(message, details);
184 |
185 | expect(result).toEqual({
186 | content: [
187 | {
188 | type: 'text',
189 | text: message,
190 | },
191 | ],
192 | structuredContent: {
193 | error: message,
194 | details,
195 | },
196 | isError: true,
197 | });
198 | });
199 |
200 | it('should handle null details', () => {
201 | const message = 'Error occurred';
202 | const result = createErrorResponse(message, null);
203 |
204 | expect(result).toEqual({
205 | content: [
206 | {
207 | type: 'text',
208 | text: message,
209 | },
210 | ],
211 | structuredContent: {
212 | error: message,
213 | details: null,
214 | },
215 | isError: true,
216 | });
217 | });
218 |
219 | it('should handle undefined details explicitly', () => {
220 | const message = 'Error occurred';
221 | const result = createErrorResponse(message, undefined);
222 |
223 | expect(result.structuredContent).toEqual({
224 | error: message,
225 | });
226 | expect('details' in result.structuredContent!).toBe(false);
227 | });
228 |
229 | it('should handle complex error details', () => {
230 | const message = 'Multiple errors';
231 | const details = {
232 | errors: [
233 | { field: 'name', message: 'required' },
234 | { field: 'age', message: 'must be positive' },
235 | ],
236 | timestamp: new Date(),
237 | requestId: '123456',
238 | };
239 | const result = createErrorResponse(message, details);
240 |
241 | expect(result.structuredContent).toEqual({
242 | error: message,
243 | details,
244 | });
245 | expect(result.isError).toBe(true);
246 | });
247 |
248 | it('should handle empty error message', () => {
249 | const result = createErrorResponse('');
250 |
251 | expect(result).toEqual({
252 | content: [
253 | {
254 | type: 'text',
255 | text: '',
256 | },
257 | ],
258 | structuredContent: {
259 | error: '',
260 | },
261 | isError: true,
262 | });
263 | });
264 |
265 | it('should handle error details with circular references', () => {
266 | const message = 'Circular error';
267 | const details: Record<string, unknown> = { type: 'error' };
268 | details.self = details;
269 |
270 | const result = createErrorResponse(message, details);
271 |
272 | expect(result.structuredContent).toEqual({
273 | error: message,
274 | details,
275 | });
276 | });
277 | });
278 |
279 | describe('type safety', () => {
280 | it('should maintain proper types for content array', () => {
281 | const result = createStructuredResponse({ test: true });
282 |
283 | // Check that content is an array
284 | expect(Array.isArray(result.content)).toBe(true);
285 | expect(result.content).toHaveLength(1);
286 |
287 | // Check that content item has correct type
288 | expect(result.content[0]?.type).toBe('text');
289 | expect(typeof result.content[0]?.text).toBe('string');
290 | });
291 |
292 | it('should cast structuredContent to Record<string, unknown>', () => {
293 | const data = { num: 123, str: 'test', bool: true };
294 | const result = createStructuredResponse(data);
295 |
296 | expect(result.structuredContent).toBe(data);
297 | expect(typeof result.structuredContent).toBe('object');
298 | });
299 | });
300 | });
301 |
```
--------------------------------------------------------------------------------
/src/__tests__/mocked-environment.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
2 | // Mock all dependencies
3 | vi.mock('@modelcontextprotocol/sdk/server/mcp.js', () => ({
4 | McpServer: vi.fn(() => ({
5 | name: 'sonarqube-mcp-server',
6 | version: '1.1.0',
7 | tool: vi.fn(),
8 | connect: vi.fn<() => Promise<any>>().mockResolvedValue(undefined as never),
9 | server: { use: vi.fn() },
10 | })),
11 | }));
12 | vi.mock('@modelcontextprotocol/sdk/server/stdio.js', () => ({
13 | StdioServerTransport: vi.fn(() => ({
14 | connect: vi.fn<() => Promise<any>>().mockResolvedValue(undefined as never),
15 | })),
16 | }));
17 | // Save original environment variables
18 | const originalEnv = process.env;
19 | describe('Mocked Environment Tests', () => {
20 | beforeEach(() => {
21 | vi.resetModules();
22 | process.env = { ...originalEnv };
23 | process.env.SONARQUBE_TOKEN = 'test-token';
24 | process.env.SONARQUBE_URL = 'http://localhost:9000';
25 | process.env.SONARQUBE_ORGANIZATION = 'test-organization';
26 | });
27 | afterEach(() => {
28 | process.env = originalEnv;
29 | vi.clearAllMocks();
30 | });
31 | describe('Server Initialization', () => {
32 | it('should initialize the MCP server with correct configuration', async () => {
33 | const { mcpServer } = await import('../index.js');
34 | expect(mcpServer).toBeDefined();
35 | expect((mcpServer as any).name).toBe('sonarqube-mcp-server');
36 | expect((mcpServer as any).version).toBe('1.1.0');
37 | });
38 | it('should register tools on the server', async () => {
39 | const { mcpServer } = await import('../index.js');
40 | expect((mcpServer as any).tool).toBeDefined();
41 | expect((mcpServer as any).tool).toHaveBeenCalled();
42 | // Check number of tool registrations (28 tools total)
43 | expect((mcpServer as any).tool).toHaveBeenCalledTimes(28);
44 | });
45 | it('should not connect to transport in test mode', async () => {
46 | process.env.NODE_ENV = 'test';
47 | const { mcpServer } = await import('../index.js');
48 | expect((mcpServer as any).connect).not.toHaveBeenCalled();
49 | });
50 | it('should connect to transport in non-test mode', async () => {
51 | process.env.NODE_ENV = 'development';
52 | // Special mock for this specific test that simulates a clean import
53 | vi.resetModules();
54 | // Import the module with development environment
55 | await import('../index.js');
56 | // Since we're not directly importing mcpServer here, we check connection indirectly
57 | // We've mocked the StdioServerTransport so its connect method should have been called
58 | const { StdioServerTransport } = await import('@modelcontextprotocol/sdk/server/stdio.js');
59 | expect(StdioServerTransport).toHaveBeenCalled();
60 | // Reset to test mode
61 | process.env.NODE_ENV = 'test';
62 | });
63 | });
64 | describe('Environment Variables', () => {
65 | it('should use environment variables to configure SonarQube client', async () => {
66 | // Set specific test environment variables
67 | process.env.SONARQUBE_TOKEN = 'specific-test-token';
68 | process.env.SONARQUBE_URL = 'https://specific-test-url.com';
69 | process.env.SONARQUBE_ORGANIZATION = 'specific-test-org';
70 |
71 | // Use dynamic import to test environment variable handling
72 | // Since we've already mocked the module at the top level, we can just verify the behavior
73 | const { mcpServer } = await import('../index.js');
74 |
75 | // The server should be properly initialized
76 | expect(mcpServer).toBeDefined();
77 | expect((mcpServer as any).name).toBe('sonarqube-mcp-server');
78 | });
79 | });
80 | describe('Tool Registration Complete', () => {
81 | it('should register all expected tools', async () => {
82 | const { mcpServer } = await import('../index.js');
83 | // Verify all tools are registered
84 | const toolNames = (mcpServer as any).tool.mock.calls.map((call: any) => call[0]);
85 | expect(toolNames).toContain('projects');
86 | expect(toolNames).toContain('metrics');
87 | expect(toolNames).toContain('issues');
88 | expect(toolNames).toContain('system_health');
89 | expect(toolNames).toContain('system_status');
90 | expect(toolNames).toContain('system_ping');
91 | expect(toolNames).toContain('measures_component');
92 | expect(toolNames).toContain('measures_components');
93 | expect(toolNames).toContain('measures_history');
94 | });
95 | it('should register tools with correct descriptions', async () => {
96 | const { mcpServer } = await import('../index.js');
97 | // Map of tool names to their descriptions from the mcpServer.tool mock calls
98 | const toolDescriptions = new Map(
99 | (mcpServer as any).tool.mock.calls.map((call: any) => [call[0], call[1]])
100 | );
101 | expect(toolDescriptions.get('projects')).toBe(
102 | 'List all SonarQube projects with metadata. Essential for project discovery, inventory management, and accessing project-specific analysis data (requires admin permissions)'
103 | );
104 | expect(toolDescriptions.get('metrics')).toBe(
105 | 'Get available metrics from SonarQube. Use this to discover all measurable code quality dimensions (lines of code, complexity, coverage, duplications, etc.) for reports and dashboards'
106 | );
107 | expect(toolDescriptions.get('issues')).toBe(
108 | 'Search and filter SonarQube issues by severity, status, assignee, tag, file path, directory, scope, and more. Critical for dashboards, targeted clean-up sprints, security audits, and regression testing. Supports faceted search for aggregations.'
109 | );
110 | expect(toolDescriptions.get('system_health')).toBe(
111 | 'Get the health status of the SonarQube instance. Monitor system components, database connectivity, and overall service availability for operational insights'
112 | );
113 | expect(toolDescriptions.get('system_status')).toBe(
114 | 'Get the status of the SonarQube instance'
115 | );
116 | expect(toolDescriptions.get('system_ping')).toBe(
117 | 'Ping the SonarQube instance to check if it is up'
118 | );
119 | expect(toolDescriptions.get('measures_component')).toBe(
120 | 'Get measures for a specific component (project, directory, or file). Essential for tracking code quality metrics, technical debt, and trends over time'
121 | );
122 | expect(toolDescriptions.get('measures_components')).toBe(
123 | 'Get measures for multiple components'
124 | );
125 | expect(toolDescriptions.get('measures_history')).toBe('Get measures history for a component');
126 | });
127 | it('should register tools with valid schemas', async () => {
128 | const { mcpServer } = await import('../index.js');
129 | // Extract schemas from the mcpServer.tool mock calls
130 | const toolSchemas = new Map(
131 | (mcpServer as any).tool.mock.calls.map((call: any) => [call[0], call[2]])
132 | );
133 | // Check if each tool has a schema defined
134 | for (const [, schema] of toolSchemas.entries()) {
135 | expect(schema).toBeDefined();
136 | }
137 | // Check specific schemas for required tools
138 | expect(toolSchemas.get('projects')).toHaveProperty('page');
139 | expect(toolSchemas.get('projects')).toHaveProperty('page_size');
140 | expect(toolSchemas.get('issues')).toHaveProperty('project_key');
141 | expect(toolSchemas.get('issues')).toHaveProperty('severity');
142 | expect(toolSchemas.get('measures_component')).toHaveProperty('component');
143 | expect(toolSchemas.get('measures_component')).toHaveProperty('metric_keys');
144 | expect(toolSchemas.get('measures_components')).toHaveProperty('component_keys');
145 | expect(toolSchemas.get('measures_components')).toHaveProperty('metric_keys');
146 | expect(toolSchemas.get('measures_history')).toHaveProperty('component');
147 | expect(toolSchemas.get('measures_history')).toHaveProperty('metrics');
148 | });
149 | it('should register tools with valid handlers', async () => {
150 | const { mcpServer } = await import('../index.js');
151 | // Extract handlers from the mcpServer.tool mock calls
152 | const toolHandlers = new Map(
153 | (mcpServer as any).tool.mock.calls.map((call: any) => [call[0], call[4]])
154 | );
155 | // Check if each tool has a handler defined and it's a function
156 | for (const [, handler] of toolHandlers.entries()) {
157 | expect(handler).toBeDefined();
158 | expect(typeof handler).toBe('function');
159 | }
160 | });
161 | });
162 | });
163 |
```
--------------------------------------------------------------------------------
/docs/architecture/decisions/0022-package-manager-choice-pnpm.md:
--------------------------------------------------------------------------------
```markdown
1 | # 22. Package Manager Choice pnpm
2 |
3 | Date: 2025-10-11
4 |
5 | ## Status
6 |
7 | Accepted
8 |
9 | ## Context
10 |
11 | The choice of package manager significantly impacts development workflow, CI/CD performance, disk usage, and dependency management reliability. The SonarQube MCP Server requires:
12 |
13 | - Fast dependency installation for rapid development iteration
14 | - Efficient disk space usage across multiple projects
15 | - Strict dependency resolution to prevent phantom dependencies
16 | - Reliable dependency management in CI/CD pipelines
17 | - Support for standalone distribution (no global installation required)
18 | - Consistent versions across development and CI/CD environments
19 |
20 | Traditional npm has several limitations:
21 |
22 | - Flat node_modules structure can lead to phantom dependencies (accessing packages not in package.json)
23 | - Slower installation times due to redundant copies of packages
24 | - Higher disk space usage with duplicate packages across projects
25 | - Less strict dependency resolution
26 |
27 | Yarn Classic improved some aspects but still uses flat node_modules and has its own consistency issues. Yarn Berry (v2+) introduced significant breaking changes and has lower adoption.
28 |
29 | ## Decision
30 |
31 | We will use **pnpm** (version 10.17.0) as the exclusive package manager for this project.
32 |
33 | ### Key Features of pnpm
34 |
35 | 1. **Content-Addressable Storage**
36 | - All packages stored once in a global store (~/.pnpm-store)
37 | - node_modules uses hard links to the global store
38 | - Dramatically reduces disk space usage (up to 50% compared to npm)
39 |
40 | 2. **Strict Dependency Resolution**
41 | - Non-flat node_modules structure prevents phantom dependencies
42 | - Only dependencies declared in package.json are accessible
43 | - Catches undeclared dependencies early in development
44 |
45 | 3. **Performance**
46 | - Up to 2x faster than npm for clean installs
47 | - Parallel installation of packages
48 | - Efficient caching and reuse
49 |
50 | 4. **Standalone Distribution**
51 | - Can run as standalone binary without global installation
52 | - Ensures consistent pnpm version across all environments
53 | - Configured via `packageManager` field in package.json
54 |
55 | ### Version Consistency Requirement
56 |
57 | **CRITICAL**: The pnpm version must be consistent across ALL locations:
58 |
59 | 1. **package.json**: `"packageManager": "[email protected]"`
60 | 2. **Dockerfile**: `RUN npm install -g [email protected]`
61 | 3. **GitHub Actions workflows**: All workflow files using pnpm must specify `version: 10.17.0`
62 | - `.github/workflows/main.yml`
63 | - `.github/workflows/pr.yml`
64 | - `.github/workflows/publish.yml` (PNPM_VERSION environment variable)
65 | - `.github/workflows/reusable-setup.yml` (default pnpm-version input)
66 | - `.github/workflows/reusable-security.yml` (default pnpm-version input)
67 | - `.github/workflows/reusable-validate.yml` (default pnpm-version input)
68 | 4. **Documentation**: README.md, CONTRIBUTING.md
69 | 5. **Setup scripts**: `scripts/setup.sh`
70 |
71 | **Why this matters**: If package.json and GitHub workflows have different pnpm versions, CI/CD fails with:
72 |
73 | ```
74 | Error: Multiple versions of pnpm specified:
75 | - version X in the GitHub Action config with the key "version"
76 | - version pnpm@Y in the package.json with the key "packageManager"
77 | ```
78 |
79 | ### Configuration
80 |
81 | **package.json**:
82 |
83 | ```json
84 | {
85 | "packageManager": "[email protected]"
86 | }
87 | ```
88 |
89 | This field:
90 |
91 | - Enables Corepack to automatically use the correct pnpm version
92 | - Ensures all developers and CI/CD use the same version
93 | - Prevents version drift and inconsistent behavior
94 |
95 | ## Consequences
96 |
97 | ### Positive
98 |
99 | - **Disk Space Efficiency**: Saves 50%+ disk space compared to npm through global store
100 | - **Faster Installs**: 2x faster clean installs, even faster with warm cache
101 | - **Phantom Dependency Prevention**: Strict node_modules structure catches undeclared dependencies
102 | - **Better Monorepo Support**: Built-in workspace support (though not needed for this project)
103 | - **Consistent Environments**: `packageManager` field ensures version consistency
104 | - **Standalone Distribution**: No global pnpm installation needed with Corepack
105 | - **Better CI/CD Performance**: Faster installs reduce pipeline execution time
106 | - **Symlink-based Structure**: Easy to understand dependency tree
107 | - **Lock File Determinism**: pnpm-lock.yaml is more deterministic than package-lock.json
108 |
109 | ### Negative
110 |
111 | - **Learning Curve**: Team members familiar with npm/yarn need to learn pnpm commands
112 | - **Ecosystem Compatibility**: Some older tools may not recognize pnpm's node_modules structure
113 | - **Version Management Overhead**: Must update version in multiple locations (mitigated by documentation in CLAUDE.md)
114 | - **IDE Integration**: Some IDEs may not fully support pnpm's symlink structure (rare)
115 | - **Docker Image Size**: Requires installing pnpm in container (minimal overhead)
116 |
117 | ### Neutral
118 |
119 | - **Different Commands**: Some npm/yarn commands have different syntax in pnpm
120 | - **Lock File Format**: pnpm-lock.yaml differs from package-lock.json or yarn.lock
121 | - **Global Store Location**: Requires understanding of ~/.pnpm-store for troubleshooting
122 | - **Corepack Dependency**: Relies on Node.js Corepack (experimental until Node 16.9+)
123 |
124 | ## Implementation
125 |
126 | ### Installation
127 |
128 | **With Corepack (recommended)**:
129 |
130 | ```bash
131 | corepack enable
132 | corepack prepare [email protected] --activate
133 | ```
134 |
135 | **Direct installation**:
136 |
137 | ```bash
138 | npm install -g [email protected]
139 | ```
140 |
141 | ### Common Commands
142 |
143 | ```bash
144 | # Install dependencies
145 | pnpm install
146 |
147 | # Install with frozen lockfile (CI/CD)
148 | pnpm install --frozen-lockfile
149 |
150 | # Add a dependency
151 | pnpm add <package>
152 |
153 | # Add a dev dependency
154 | pnpm add -D <package>
155 |
156 | # Remove a dependency
157 | pnpm remove <package>
158 |
159 | # Run a script
160 | pnpm run <script>
161 | pnpm <script> # Short form
162 |
163 | # Update dependencies
164 | pnpm update
165 |
166 | # Audit dependencies
167 | pnpm audit
168 | ```
169 |
170 | ### CI/CD Integration
171 |
172 | **GitHub Actions**:
173 |
174 | ```yaml
175 | - name: Install pnpm
176 | uses: pnpm/action-setup@v4
177 | with:
178 | version: 10.17.0
179 | run_install: false
180 | standalone: true
181 |
182 | - name: Setup Node.js
183 | uses: actions/setup-node@v4
184 | with:
185 | node-version: 22
186 | cache: pnpm
187 |
188 | - name: Install dependencies
189 | run: pnpm install --frozen-lockfile
190 | ```
191 |
192 | **Dockerfile**:
193 |
194 | ```dockerfile
195 | # Install pnpm globally
196 | RUN npm install -g [email protected]
197 |
198 | # Install dependencies
199 | COPY package.json pnpm-lock.yaml ./
200 | RUN pnpm install --frozen-lockfile --prod
201 | ```
202 |
203 | ### Migration from npm
204 |
205 | If migrating from npm:
206 |
207 | 1. Delete `package-lock.json` and `node_modules`
208 | 2. Add `"packageManager": "[email protected]"` to package.json
209 | 3. Run `pnpm install` to generate `pnpm-lock.yaml`
210 | 4. Update all CI/CD workflows to use pnpm
211 | 5. Update documentation and developer setup instructions
212 | 6. Commit `pnpm-lock.yaml` to version control
213 |
214 | ## Examples
215 |
216 | ### Before (with npm)
217 |
218 | **Phantom dependency issue**:
219 |
220 | ```typescript
221 | // lodash not in package.json, but accessible via transitive dependency
222 | import _ from 'lodash'; // Works with npm, but fragile
223 |
224 | // If the transitive dependency removes lodash, this breaks
225 | ```
226 |
227 | **Disk space**:
228 |
229 | ```bash
230 | project-1/node_modules/lodash # 1.2 MB
231 | project-2/node_modules/lodash # 1.2 MB
232 | project-3/node_modules/lodash # 1.2 MB
233 | Total: 3.6 MB for 3 projects
234 | ```
235 |
236 | ### After (with pnpm)
237 |
238 | **Phantom dependency prevention**:
239 |
240 | ```typescript
241 | // lodash not in package.json
242 | import _ from 'lodash'; // Error: Cannot find module 'lodash'
243 |
244 | // Forces explicit declaration in package.json
245 | // Results in more reliable dependency management
246 | ```
247 |
248 | **Disk space**:
249 |
250 | ```bash
251 | ~/.pnpm-store/[email protected] # 1.2 MB (stored once)
252 | project-1/node_modules/lodash # symlink (few bytes)
253 | project-2/node_modules/lodash # symlink (few bytes)
254 | project-3/node_modules/lodash # symlink (few bytes)
255 | Total: ~1.2 MB for 3 projects (70% savings)
256 | ```
257 |
258 | ### Performance Comparison
259 |
260 | Benchmarks on this project (measured on CI/CD):
261 |
262 | | Operation | npm | pnpm | Improvement |
263 | | --------------- | ------ | ------ | ----------- |
264 | | Clean install | ~45s | ~22s | 2.0x faster |
265 | | With warm cache | ~30s | ~8s | 3.8x faster |
266 | | Disk space | 245 MB | 108 MB | 56% smaller |
267 |
268 | ## References
269 |
270 | - pnpm Documentation: https://pnpm.io/
271 | - Motivation for pnpm: https://pnpm.io/motivation
272 | - pnpm CLI Reference: https://pnpm.io/cli/install
273 | - Corepack Documentation: https://nodejs.org/api/corepack.html
274 | - Version Consistency Guidelines: CLAUDE.md "Updating pnpm Version" section
275 | - GitHub Actions Setup: .github/workflows/reusable-setup.yml
276 | - Docker Setup: Dockerfile
277 |
```
--------------------------------------------------------------------------------
/src/__tests__/parameter-transformations.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect } from 'vitest';
2 | import { mapToSonarQubeParams, nullToUndefined } from '../index.js';
3 | import { z } from 'zod';
4 | describe('Parameter Transformation Functions', () => {
5 | describe('nullToUndefined', () => {
6 | it('should convert null to undefined', () => {
7 | expect(nullToUndefined(null)).toBeUndefined();
8 | });
9 | it('should return the original value for non-null inputs', () => {
10 | expect(nullToUndefined(0)).toBe(0);
11 | expect(nullToUndefined('')).toBe('');
12 | expect(nullToUndefined('test')).toBe('test');
13 | expect(nullToUndefined(undefined)).toBeUndefined();
14 | expect(nullToUndefined(123)).toBe(123);
15 | expect(nullToUndefined(false)).toBe(false);
16 | expect(nullToUndefined(true)).toBe(true);
17 | const obj = { test: 'value' };
18 | const arr = [1, 2, 3];
19 | expect(nullToUndefined(obj)).toBe(obj);
20 | expect(nullToUndefined(arr)).toBe(arr);
21 | });
22 | });
23 | describe('mapToSonarQubeParams', () => {
24 | it('should map MCP tool parameters to SonarQube client parameters', () => {
25 | const result = mapToSonarQubeParams({
26 | project_key: 'my-project',
27 | severity: 'MAJOR',
28 | page: '10',
29 | page_size: '25',
30 | statuses: ['OPEN', 'CONFIRMED'],
31 | resolutions: ['FALSE-POSITIVE'],
32 | resolved: 'true',
33 | types: ['BUG', 'VULNERABILITY'],
34 | rules: ['rule1', 'rule2'],
35 | tags: ['tag1', 'tag2'],
36 | created_after: '2023-01-01',
37 | created_before: '2023-12-31',
38 | created_at: '2023-06-15',
39 | created_in_last: '30d',
40 | assignees: ['user1', 'user2'],
41 | authors: ['author1', 'author2'],
42 | cwe: ['cwe1', 'cwe2'],
43 | languages: ['java', 'js'],
44 | owasp_top10: ['a1', 'a2'],
45 | sans_top25: ['sans1', 'sans2'],
46 | sonarsource_security: ['ss1', 'ss2'],
47 | on_component_only: 'true',
48 | facets: ['facet1', 'facet2'],
49 | since_leak_period: 'true',
50 | in_new_code_period: 'true',
51 | });
52 | // Check key mappings
53 | expect(result.projectKey).toBe('my-project');
54 | expect(result.severity).toBe('MAJOR');
55 | expect(result.page).toBe('10');
56 | expect(result.pageSize).toBe('25');
57 | expect(result.statuses).toEqual(['OPEN', 'CONFIRMED']);
58 | expect(result.resolutions).toEqual(['FALSE-POSITIVE']);
59 | expect(result.resolved).toBe('true');
60 | expect(result.types).toEqual(['BUG', 'VULNERABILITY']);
61 | expect(result.rules).toEqual(['rule1', 'rule2']);
62 | expect(result.tags).toEqual(['tag1', 'tag2']);
63 | expect(result.createdAfter).toBe('2023-01-01');
64 | expect(result.createdBefore).toBe('2023-12-31');
65 | expect(result.createdAt).toBe('2023-06-15');
66 | expect(result.createdInLast).toBe('30d');
67 | expect(result.assignees).toEqual(['user1', 'user2']);
68 | expect(result.authors).toEqual(['author1', 'author2']);
69 | expect(result.cwe).toEqual(['cwe1', 'cwe2']);
70 | expect(result.languages).toEqual(['java', 'js']);
71 | expect(result.owaspTop10).toEqual(['a1', 'a2']);
72 | expect(result.sansTop25).toEqual(['sans1', 'sans2']);
73 | expect(result.sonarsourceSecurity).toEqual(['ss1', 'ss2']);
74 | expect(result.onComponentOnly).toBe('true');
75 | expect(result.facets).toEqual(['facet1', 'facet2']);
76 | expect(result.sinceLeakPeriod).toBe('true');
77 | expect(result.inNewCodePeriod).toBe('true');
78 | });
79 | it('should handle null and undefined values correctly', () => {
80 | const result = mapToSonarQubeParams({
81 | project_key: 'my-project',
82 | severity: null,
83 | statuses: null,
84 | resolved: null,
85 | });
86 | expect(result.projectKey).toBe('my-project');
87 | expect(result.severity).toBeUndefined();
88 | expect(result.statuses).toBeUndefined();
89 | expect(result.resolved).toBeUndefined();
90 | });
91 | it('should handle minimal parameters', () => {
92 | const result = mapToSonarQubeParams({
93 | project_key: 'my-project',
94 | });
95 | expect(result.projectKey).toBe('my-project');
96 | expect(result.severity).toBeUndefined();
97 | expect(result.page).toBeUndefined();
98 | expect(result.pageSize).toBeUndefined();
99 | });
100 | it('should handle empty parameters', () => {
101 | const result = mapToSonarQubeParams({
102 | project_key: 'my-project',
103 | statuses: [],
104 | resolutions: [],
105 | types: [],
106 | rules: [],
107 | });
108 | expect(result.projectKey).toBe('my-project');
109 | expect(result.statuses).toEqual([]);
110 | expect(result.resolutions).toEqual([]);
111 | expect(result.types).toEqual([]);
112 | expect(result.rules).toEqual([]);
113 | });
114 | });
115 | describe('Array parameter handling', () => {
116 | it('should handle array handling for issues parameters', () => {
117 | // Test with arrays
118 | const result1 = mapToSonarQubeParams({
119 | project_key: 'project1',
120 | statuses: ['OPEN', 'CONFIRMED'],
121 | types: ['BUG', 'VULNERABILITY'],
122 | });
123 | expect(result1.statuses).toEqual(['OPEN', 'CONFIRMED']);
124 | expect(result1.types).toEqual(['BUG', 'VULNERABILITY']);
125 | // Test with null
126 | const result2 = mapToSonarQubeParams({
127 | project_key: 'project1',
128 | statuses: null,
129 | types: null,
130 | });
131 | expect(result2.statuses).toBeUndefined();
132 | expect(result2.types).toBeUndefined();
133 | });
134 | });
135 | describe('Schema Transformations', () => {
136 | describe('Page Parameter Transformation', () => {
137 | it('should transform string values to numbers or null', () => {
138 | const pageSchema = z
139 | .string()
140 | .optional()
141 | .transform((val: any) => (val ? parseInt(val, 10) || null : null));
142 | // Test valid numeric strings
143 | expect(pageSchema.parse('1')).toBe(1);
144 | expect(pageSchema.parse('100')).toBe(100);
145 | // Test invalid values
146 | expect(pageSchema.parse('invalid')).toBe(null);
147 | expect(pageSchema.parse('')).toBe(null);
148 | expect(pageSchema.parse(undefined)).toBe(null);
149 | });
150 | });
151 | describe('Boolean Parameter Transformation', () => {
152 | it('should transform string "true"/"false" to boolean values', () => {
153 | const booleanSchema = z
154 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
155 | .nullable()
156 | .optional();
157 | // String values
158 | expect(booleanSchema.parse('true')).toBe(true);
159 | expect(booleanSchema.parse('false')).toBe(false);
160 | // Boolean values should pass through
161 | expect(booleanSchema.parse(true)).toBe(true);
162 | expect(booleanSchema.parse(false)).toBe(false);
163 | // Null/undefined values
164 | expect(booleanSchema.parse(null)).toBe(null);
165 | expect(booleanSchema.parse(undefined)).toBe(undefined);
166 | });
167 | });
168 | describe('Enum Arrays Parameter Transformation', () => {
169 | it('should validate enum arrays correctly', () => {
170 | const statusSchema = z
171 | .array(
172 | z.enum([
173 | 'OPEN',
174 | 'CONFIRMED',
175 | 'REOPENED',
176 | 'RESOLVED',
177 | 'CLOSED',
178 | 'TO_REVIEW',
179 | 'IN_REVIEW',
180 | 'REVIEWED',
181 | ])
182 | )
183 | .nullable()
184 | .optional();
185 | // Valid values
186 | expect(statusSchema.parse(['OPEN', 'CONFIRMED'])).toEqual(['OPEN', 'CONFIRMED']);
187 | // Null/undefined values
188 | expect(statusSchema.parse(null)).toBe(null);
189 | expect(statusSchema.parse(undefined)).toBe(undefined);
190 | // Invalid values should throw
191 | expect(() => statusSchema.parse(['INVALID'])).toThrow();
192 | });
193 | it('should validate resolution enums', () => {
194 | const resolutionSchema = z
195 | .array(z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']))
196 | .nullable()
197 | .optional();
198 | // Valid values
199 | expect(resolutionSchema.parse(['FALSE-POSITIVE', 'WONTFIX'])).toEqual([
200 | 'FALSE-POSITIVE',
201 | 'WONTFIX',
202 | ]);
203 | // Null/undefined values
204 | expect(resolutionSchema.parse(null)).toBe(null);
205 | expect(resolutionSchema.parse(undefined)).toBe(undefined);
206 | // Invalid values should throw
207 | expect(() => resolutionSchema.parse(['INVALID'])).toThrow();
208 | });
209 | it('should validate issue type enums', () => {
210 | const typeSchema = z
211 | .array(z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']))
212 | .nullable()
213 | .optional();
214 | // Valid values
215 | expect(typeSchema.parse(['CODE_SMELL', 'BUG'])).toEqual(['CODE_SMELL', 'BUG']);
216 | // Null/undefined values
217 | expect(typeSchema.parse(null)).toBe(null);
218 | expect(typeSchema.parse(undefined)).toBe(undefined);
219 | // Invalid values should throw
220 | expect(() => typeSchema.parse(['INVALID'])).toThrow();
221 | });
222 | });
223 | });
224 | });
225 |
```
--------------------------------------------------------------------------------
/src/__tests__/assign-issue.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { vi, describe, it, expect, beforeEach } from 'vitest';
2 | import { IssuesDomain } from '../domains/issues.js';
3 | import { handleAssignIssue } from '../handlers/issues.js';
4 | import type { SonarQubeIssue } from '../types/issues.js';
5 |
6 | // Extended issue type for testing with assignee fields
7 | type SonarQubeIssueWithAssignee = SonarQubeIssue & {
8 | assignee?: string | null;
9 | assigneeName?: string | null;
10 | resolution?: string | null;
11 | };
12 |
13 | describe('Assign Issue Functionality', () => {
14 | const organization = 'test-org';
15 |
16 | beforeEach(() => {
17 | vi.clearAllMocks();
18 | });
19 |
20 | describe('IssuesDomain.assignIssue', () => {
21 | it('should assign an issue and return updated details', async () => {
22 | const issueKey = 'ISSUE-123';
23 | const assignee = 'jane.doe';
24 |
25 | const mockSearchBuilder = {
26 | withIssues: vi.fn().mockReturnThis(),
27 | withAdditionalFields: vi.fn().mockReturnThis(),
28 | execute: vi.fn<() => Promise<any>>().mockResolvedValue({
29 | issues: [
30 | {
31 | key: issueKey,
32 | rule: 'test-rule',
33 | component: 'test-component',
34 | project: 'test-project',
35 | message: 'Test issue',
36 | assignee: assignee,
37 | assigneeName: 'Jane Doe',
38 | severity: 'CRITICAL',
39 | type: 'VULNERABILITY',
40 | status: 'OPEN',
41 | tags: [],
42 | creationDate: '2023-01-01T00:00:00.000Z',
43 | updateDate: '2023-01-01T00:00:00.000Z',
44 | } as unknown as SonarQubeIssueWithAssignee,
45 | ],
46 | total: 1,
47 | }),
48 | };
49 |
50 | const mockWebApiClient = {
51 | issues: {
52 | assign: vi.fn<(params: any) => Promise<any>>().mockResolvedValue({}),
53 | search: vi.fn().mockReturnValue(mockSearchBuilder),
54 | },
55 | };
56 |
57 | const issuesDomain = new IssuesDomain(mockWebApiClient as any, organization);
58 | const result = await issuesDomain.assignIssue({
59 | issueKey,
60 | assignee,
61 | });
62 |
63 | expect(mockWebApiClient.issues.assign).toHaveBeenCalledWith({
64 | issue: issueKey,
65 | assignee: assignee,
66 | });
67 |
68 | expect(mockWebApiClient.issues.search).toHaveBeenCalled();
69 | expect(mockSearchBuilder.withIssues).toHaveBeenCalledWith([issueKey]);
70 | expect(mockSearchBuilder.withAdditionalFields).toHaveBeenCalledWith(['_all']);
71 | expect(mockSearchBuilder.execute).toHaveBeenCalled();
72 |
73 | expect(result.key).toBe(issueKey);
74 | expect((result as SonarQubeIssueWithAssignee).assignee).toBe(assignee);
75 | });
76 |
77 | it('should handle unassignment', async () => {
78 | const issueKey = 'ISSUE-456';
79 |
80 | const mockSearchBuilder = {
81 | withIssues: vi.fn().mockReturnThis(),
82 | withAdditionalFields: vi.fn().mockReturnThis(),
83 | execute: vi.fn<() => Promise<any>>().mockResolvedValue({
84 | issues: [
85 | {
86 | key: issueKey,
87 | rule: 'test-rule',
88 | component: 'test-component',
89 | project: 'test-project',
90 | message: 'Test issue',
91 | assignee: null,
92 | assigneeName: null,
93 | severity: 'INFO',
94 | type: 'CODE_SMELL',
95 | status: 'OPEN',
96 | tags: [],
97 | creationDate: '2023-01-01T00:00:00.000Z',
98 | updateDate: '2023-01-01T00:00:00.000Z',
99 | } as unknown as SonarQubeIssueWithAssignee,
100 | ],
101 | total: 1,
102 | }),
103 | };
104 |
105 | const mockWebApiClient = {
106 | issues: {
107 | assign: vi.fn<(params: any) => Promise<any>>().mockResolvedValue({}),
108 | search: vi.fn().mockReturnValue(mockSearchBuilder),
109 | },
110 | };
111 |
112 | const issuesDomain = new IssuesDomain(mockWebApiClient as any, organization);
113 | const result = await issuesDomain.assignIssue({
114 | issueKey,
115 | });
116 |
117 | expect(mockWebApiClient.issues.assign).toHaveBeenCalledWith({
118 | issue: issueKey,
119 | assignee: undefined,
120 | });
121 |
122 | expect((result as SonarQubeIssueWithAssignee).assignee).toBeNull();
123 | });
124 |
125 | it('should throw error if issue not found after assignment', async () => {
126 | const issueKey = 'ISSUE-999';
127 |
128 | const mockSearchBuilder = {
129 | withIssues: vi.fn().mockReturnThis(),
130 | withAdditionalFields: vi.fn().mockReturnThis(),
131 | execute: vi.fn<() => Promise<any>>().mockResolvedValue({
132 | issues: [],
133 | total: 0,
134 | }),
135 | };
136 |
137 | const mockWebApiClient = {
138 | issues: {
139 | assign: vi.fn<(params: any) => Promise<any>>().mockResolvedValue({}),
140 | search: vi.fn().mockReturnValue(mockSearchBuilder),
141 | },
142 | };
143 |
144 | const issuesDomain = new IssuesDomain(mockWebApiClient as any, organization);
145 |
146 | await expect(
147 | issuesDomain.assignIssue({
148 | issueKey,
149 | })
150 | ).rejects.toThrow(`Issue ${issueKey} not found after assignment`);
151 | });
152 | });
153 |
154 | describe('handleAssignIssue', () => {
155 | it('should handle issue assignment and return formatted response', async () => {
156 | const mockClient = {
157 | assignIssue: vi.fn<(params: any) => Promise<any>>().mockResolvedValue({
158 | key: 'ISSUE-123',
159 | rule: 'test-rule',
160 | component: 'src/main.js',
161 | project: 'test-project',
162 | message: 'Test issue message',
163 | assignee: 'john.doe',
164 | assigneeName: 'John Doe',
165 | severity: 'MAJOR',
166 | type: 'BUG',
167 | status: 'OPEN',
168 | resolution: null,
169 | tags: [],
170 | creationDate: '2023-01-01T00:00:00.000Z',
171 | updateDate: '2023-01-01T00:00:00.000Z',
172 | } as unknown as SonarQubeIssueWithAssignee),
173 | };
174 |
175 | const result = await handleAssignIssue(
176 | {
177 | issueKey: 'ISSUE-123',
178 | assignee: 'john.doe',
179 | },
180 |
181 | mockClient as any
182 | );
183 |
184 | expect(mockClient.assignIssue).toHaveBeenCalledWith({
185 | issueKey: 'ISSUE-123',
186 | assignee: 'john.doe',
187 | });
188 |
189 | expect(result.content).toHaveLength(1);
190 | expect(result.content[0]?.type).toBe('text');
191 |
192 | const contentText = result.content[0]?.text;
193 | expect(contentText).toBeDefined();
194 | const parsedContent = JSON.parse(contentText as string) as {
195 | message: string;
196 | issue: {
197 | key: string;
198 | assignee: string | null;
199 | severity: string;
200 | };
201 | };
202 | expect(parsedContent.message).toContain('Assigned to: John Doe');
203 | expect(parsedContent.issue.key).toBe('ISSUE-123');
204 | expect(parsedContent.issue.assignee).toBe('john.doe');
205 | expect(parsedContent.issue.severity).toBe('MAJOR');
206 | });
207 |
208 | it('should handle issue unassignment', async () => {
209 | const mockClient = {
210 | assignIssue: vi.fn<(params: any) => Promise<any>>().mockResolvedValue({
211 | key: 'ISSUE-456',
212 | rule: 'test-rule',
213 | component: 'src/utils.js',
214 | project: 'test-project',
215 | message: 'Another test issue',
216 | assignee: null,
217 | assigneeName: null,
218 | severity: 'MINOR',
219 | type: 'CODE_SMELL',
220 | status: 'CONFIRMED',
221 | resolution: null,
222 | tags: [],
223 | creationDate: '2023-01-01T00:00:00.000Z',
224 | updateDate: '2023-01-01T00:00:00.000Z',
225 | } as unknown as SonarQubeIssueWithAssignee),
226 | };
227 |
228 | const result = await handleAssignIssue(
229 | {
230 | issueKey: 'ISSUE-456',
231 | },
232 |
233 | mockClient as any
234 | );
235 |
236 | expect(mockClient.assignIssue).toHaveBeenCalledWith({
237 | issueKey: 'ISSUE-456',
238 | assignee: undefined,
239 | });
240 |
241 | expect(result.content).toHaveLength(1);
242 | expect(result.content[0]?.type).toBe('text');
243 |
244 | const contentText = result.content[0]?.text;
245 | expect(contentText).toBeDefined();
246 | const parsedContent = JSON.parse(contentText as string) as {
247 | message: string;
248 | issue: {
249 | key: string;
250 | assignee: string | null;
251 | severity: string;
252 | };
253 | };
254 | expect(parsedContent.message).toContain('Issue unassigned');
255 | expect(parsedContent.issue.key).toBe('ISSUE-456');
256 | expect(parsedContent.issue.assignee).toBeNull();
257 | expect(parsedContent.issue.severity).toBe('MINOR');
258 | });
259 |
260 | it('should handle errors gracefully', async () => {
261 | const mockClient = {
262 | assignIssue: vi
263 | .fn<(params: any) => Promise<any>>()
264 | .mockRejectedValue(new Error('API Error')),
265 | };
266 |
267 | await expect(
268 | handleAssignIssue(
269 | {
270 | issueKey: 'ISSUE-789',
271 | assignee: 'invalid.user',
272 | },
273 |
274 | mockClient as any
275 | )
276 | ).rejects.toThrow('API Error');
277 |
278 | expect(mockClient.assignIssue).toHaveBeenCalledWith({
279 | issueKey: 'ISSUE-789',
280 | assignee: 'invalid.user',
281 | });
282 | });
283 | });
284 | });
285 |
```
--------------------------------------------------------------------------------
/scripts/load-test.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 | # Load testing script for SonarQube MCP Server auto-scaling validation
3 | # Tests HPA behavior under load using k6 or Apache Bench
4 |
5 | set -e # Exit on error
6 |
7 | # Colors for output
8 | RED='\033[0;31m'
9 | GREEN='\033[0;32m'
10 | YELLOW='\033[1;33m'
11 | BLUE='\033[0;34m'
12 | NC='\033[0m' # No Color
13 |
14 | echo -e "${GREEN}⚡ SonarQube MCP Server - Load Testing & Auto-scaling Validation${NC}"
15 | echo "================================================================="
16 |
17 | # Configuration
18 | NAMESPACE="${NAMESPACE:-sonarqube-mcp}"
19 | SERVICE_NAME="${SERVICE_NAME:-sonarqube-mcp}"
20 | PORT="${PORT:-3000}"
21 | DURATION="${DURATION:-300}" # 5 minutes default
22 | CONCURRENT_USERS="${CONCURRENT_USERS:-50}"
23 | REQUESTS_PER_SECOND="${RPS:-100}"
24 |
25 | # Function to check if a command exists
26 | command_exists() {
27 | command -v "$1" >/dev/null 2>&1
28 | }
29 |
30 | # Check prerequisites
31 | echo -e "\n${YELLOW}📋 Checking prerequisites...${NC}"
32 |
33 | # Check for kubectl
34 | if ! command_exists kubectl; then
35 | echo -e "${RED}❌ kubectl is not installed. Please install it first.${NC}"
36 | exit 1
37 | fi
38 |
39 | # Check for load testing tools
40 | LOAD_TOOL=""
41 | if command_exists k6; then
42 | LOAD_TOOL="k6"
43 | echo -e "✅ k6 is installed"
44 | elif command_exists ab; then
45 | LOAD_TOOL="ab"
46 | echo -e "✅ Apache Bench is installed"
47 | else
48 | echo -e "${RED}❌ No load testing tool found. Please install k6 or Apache Bench.${NC}"
49 | echo " Install k6: brew install k6 (macOS) or https://k6.io/docs/getting-started/installation/"
50 | echo " Install ab: Usually comes with Apache (httpd-tools package)"
51 | exit 1
52 | fi
53 |
54 | # Function to get current replica count
55 | get_replica_count() {
56 | kubectl get deployment "$SERVICE_NAME" -n "$NAMESPACE" -o jsonpath='{.status.replicas}' 2>/dev/null || echo "0"
57 | }
58 |
59 | # Function to get HPA status
60 | get_hpa_status() {
61 | kubectl get hpa -n "$NAMESPACE" -o wide 2>/dev/null || echo "No HPA found"
62 | }
63 |
64 | # Function to monitor resources
65 | monitor_resources() {
66 | echo -e "\n${BLUE}📊 Monitoring resources during load test...${NC}"
67 | echo "Time | Replicas | CPU Usage | Memory Usage | Ready Pods"
68 | echo "--------------------------------------------------------"
69 |
70 | while true; do
71 | timestamp=$(date +"%H:%M:%S")
72 | replicas=$(get_replica_count)
73 |
74 | # Get CPU and memory from HPA
75 | hpa_info=$(kubectl get hpa "$SERVICE_NAME" -n "$NAMESPACE" -o jsonpath='{.status.currentCPUUtilizationPercentage}:{.status.currentReplicas}:{.status.desiredReplicas}' 2>/dev/null || echo "0:0:0")
76 | cpu_usage=$(echo "$hpa_info" | cut -d: -f1)
77 | current_replicas=$(echo "$hpa_info" | cut -d: -f2)
78 | desired_replicas=$(echo "$hpa_info" | cut -d: -f3)
79 |
80 | # Get ready pods
81 | ready_pods=$(kubectl get pods -n "$NAMESPACE" -l "app.kubernetes.io/name=$SERVICE_NAME" -o jsonpath='{.items[?(@.status.conditions[?(@.type=="Ready")].status=="True")].metadata.name}' | wc -w | tr -d ' ')
82 |
83 | echo "$timestamp | $current_replicas/$desired_replicas | ${cpu_usage:-N/A}% | N/A | $ready_pods"
84 |
85 | sleep 5
86 | done
87 | }
88 |
89 | # Create k6 test script
90 | create_k6_script() {
91 | cat > /tmp/sonarqube-mcp-load-test.js << 'EOF'
92 | import http from 'k6/http';
93 | import { check, sleep } from 'k6';
94 | import { Rate } from 'k6/metrics';
95 |
96 | const errorRate = new Rate('errors');
97 |
98 | export const options = {
99 | stages: [
100 | { duration: '30s', target: __ENV.CONCURRENT_USERS / 2 }, // Ramp up to half users
101 | { duration: '30s', target: __ENV.CONCURRENT_USERS }, // Ramp up to full users
102 | { duration: __ENV.DURATION - 90 + 's', target: __ENV.CONCURRENT_USERS }, // Stay at full load
103 | { duration: '30s', target: 0 }, // Ramp down
104 | ],
105 | thresholds: {
106 | http_req_duration: ['p(95)<500'], // 95% of requests should be below 500ms
107 | errors: ['rate<0.1'], // Error rate should be below 10%
108 | },
109 | };
110 |
111 | const BASE_URL = `http://${__ENV.SERVICE_URL}`;
112 |
113 | export default function () {
114 | // Test different endpoints
115 | const endpoints = [
116 | '/health',
117 | '/ready',
118 | '/metrics',
119 | ];
120 |
121 | const endpoint = endpoints[Math.floor(Math.random() * endpoints.length)];
122 |
123 | const res = http.get(`${BASE_URL}${endpoint}`);
124 |
125 | // Check response
126 | const success = check(res, {
127 | 'status is 200 or 503': (r) => r.status === 200 || r.status === 503,
128 | 'response time < 500ms': (r) => r.timings.duration < 500,
129 | });
130 |
131 | errorRate.add(!success);
132 |
133 | // Random sleep between 0.5 and 2 seconds
134 | sleep(Math.random() * 1.5 + 0.5);
135 | }
136 | EOF
137 | }
138 |
139 | # Function to run k6 load test
140 | run_k6_test() {
141 | local service_url=$1
142 |
143 | echo -e "\n${BLUE}🚀 Running k6 load test...${NC}"
144 | echo "Target: http://$service_url"
145 | echo "Duration: $DURATION seconds"
146 | echo "Concurrent users: $CONCURRENT_USERS"
147 |
148 | create_k6_script
149 |
150 | k6 run \
151 | -e SERVICE_URL="$service_url" \
152 | -e CONCURRENT_USERS="$CONCURRENT_USERS" \
153 | -e DURATION="$DURATION" \
154 | /tmp/sonarqube-mcp-load-test.js
155 | }
156 |
157 | # Function to run Apache Bench test
158 | run_ab_test() {
159 | local service_url=$1
160 |
161 | echo -e "\n${BLUE}🚀 Running Apache Bench load test...${NC}"
162 | echo "Target: http://$service_url/health"
163 | echo "Duration: $DURATION seconds"
164 | echo "Concurrent users: $CONCURRENT_USERS"
165 |
166 | # Calculate total requests
167 | total_requests=$((REQUESTS_PER_SECOND * DURATION))
168 |
169 | ab -n "$total_requests" \
170 | -c "$CONCURRENT_USERS" \
171 | -t "$DURATION" \
172 | -s 30 \
173 | "http://$service_url/health"
174 | }
175 |
176 | # Main execution
177 | echo -e "\n${YELLOW}🔍 Checking deployment status...${NC}"
178 |
179 | # Check if deployment exists
180 | if ! kubectl get deployment "$SERVICE_NAME" -n "$NAMESPACE" >/dev/null 2>&1; then
181 | echo -e "${RED}❌ Deployment $SERVICE_NAME not found in namespace $NAMESPACE${NC}"
182 | exit 1
183 | fi
184 |
185 | # Check if HPA exists
186 | if ! kubectl get hpa "$SERVICE_NAME" -n "$NAMESPACE" >/dev/null 2>&1; then
187 | echo -e "${RED}❌ HPA not found for $SERVICE_NAME in namespace $NAMESPACE${NC}"
188 | echo "Load testing without auto-scaling..."
189 | fi
190 |
191 | # Get initial state
192 | echo -e "\n${BLUE}📊 Initial state:${NC}"
193 | echo "Deployment: $SERVICE_NAME"
194 | echo "Namespace: $NAMESPACE"
195 | echo "Initial replicas: $(get_replica_count)"
196 | echo -e "\nHPA Status:"
197 | get_hpa_status
198 |
199 | # Set up port forwarding
200 | echo -e "\n${YELLOW}🔌 Setting up port forwarding...${NC}"
201 | kubectl port-forward -n "$NAMESPACE" "svc/$SERVICE_NAME" 8080:$PORT > /dev/null 2>&1 &
202 | PF_PID=$!
203 | sleep 3
204 |
205 | # Verify service is accessible
206 | if ! curl -s -o /dev/null -w "%{http_code}" http://localhost:8080/health | grep -q "200\|503"; then
207 | echo -e "${RED}❌ Service is not responding correctly${NC}"
208 | kill $PF_PID 2>/dev/null
209 | exit 1
210 | fi
211 |
212 | echo -e "${GREEN}✅ Service is accessible${NC}"
213 |
214 | # Start resource monitoring in background
215 | monitor_resources &
216 | MONITOR_PID=$!
217 |
218 | # Run load test based on available tool
219 | SERVICE_URL="localhost:8080"
220 |
221 | echo -e "\n${YELLOW}⚡ Starting load test...${NC}"
222 | START_TIME=$(date +%s)
223 |
224 | case "$LOAD_TOOL" in
225 | k6)
226 | run_k6_test "$SERVICE_URL"
227 | ;;
228 | ab)
229 | run_ab_test "$SERVICE_URL"
230 | ;;
231 | esac
232 |
233 | END_TIME=$(date +%s)
234 | DURATION_ACTUAL=$((END_TIME - START_TIME))
235 |
236 | # Stop monitoring
237 | kill $MONITOR_PID 2>/dev/null
238 |
239 | # Wait for scale down
240 | echo -e "\n${YELLOW}⏳ Waiting 60 seconds for scale down...${NC}"
241 | sleep 60
242 |
243 | # Get final state
244 | echo -e "\n${BLUE}📊 Final state:${NC}"
245 | echo "Final replicas: $(get_replica_count)"
246 | echo -e "\nHPA Status:"
247 | get_hpa_status
248 |
249 | # Show pod events during test
250 | echo -e "\n${BLUE}📝 Pod events during test:${NC}"
251 | kubectl get events -n "$NAMESPACE" --field-selector involvedObject.kind=Pod \
252 | --sort-by='.lastTimestamp' | grep -E "(Scaled|Started|Killing)" | tail -10
253 |
254 | # Analyze HPA metrics
255 | echo -e "\n${BLUE}📈 HPA scaling analysis:${NC}"
256 |
257 | # Get HPA events
258 | kubectl describe hpa "$SERVICE_NAME" -n "$NAMESPACE" | grep -A 20 "Events:" || echo "No HPA events found"
259 |
260 | # Cleanup
261 | echo -e "\n${YELLOW}🧹 Cleaning up...${NC}"
262 | kill $PF_PID 2>/dev/null
263 | rm -f /tmp/sonarqube-mcp-load-test.js
264 |
265 | # Summary
266 | echo -e "\n================================================================="
267 | echo -e "${GREEN}📊 Load Test Summary:${NC}"
268 | echo "Duration: $DURATION_ACTUAL seconds"
269 | echo "Load tool: $LOAD_TOOL"
270 | echo "Concurrent users: $CONCURRENT_USERS"
271 |
272 | # Check if scaling occurred
273 | INITIAL_REPLICAS=1 # Assuming default
274 | FINAL_REPLICAS=$(get_replica_count)
275 |
276 | if [ "$FINAL_REPLICAS" -gt "$INITIAL_REPLICAS" ]; then
277 | echo -e "\n${GREEN}✅ Auto-scaling worked!${NC}"
278 | echo "Scaled from $INITIAL_REPLICAS to $FINAL_REPLICAS replicas"
279 | else
280 | echo -e "\n${YELLOW}⚠️ No scaling observed${NC}"
281 | echo "This could mean:"
282 | echo " - Load was not high enough to trigger scaling"
283 | echo " - HPA thresholds are too high"
284 | echo " - HPA is not configured correctly"
285 | fi
286 |
287 | echo -e "\n${YELLOW}💡 Tips:${NC}"
288 | echo "- Increase CONCURRENT_USERS to generate more load"
289 | echo "- Extend DURATION for longer tests"
290 | echo "- Monitor 'kubectl top pods -n $NAMESPACE' during testing"
291 | echo "- Check HPA configuration: kubectl describe hpa -n $NAMESPACE"
```
--------------------------------------------------------------------------------
/src/utils/__tests__/elicitation.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, vi, beforeEach } from 'vitest';
2 | import type { Mocked } from 'vitest';
3 | import { Server } from '@modelcontextprotocol/sdk/server/index.js';
4 | import { ElicitationManager, createElicitationManager } from '../elicitation.js';
5 |
6 | describe('ElicitationManager', () => {
7 | let manager: ElicitationManager;
8 | let mockServer: Mocked<Server>;
9 |
10 | beforeEach(() => {
11 | // Reset environment variables
12 | delete process.env.SONARQUBE_MCP_ELICITATION;
13 | delete process.env.SONARQUBE_MCP_BULK_THRESHOLD;
14 | delete process.env.SONARQUBE_MCP_REQUIRE_COMMENTS;
15 | delete process.env.SONARQUBE_MCP_INTERACTIVE_SEARCH;
16 |
17 | manager = new ElicitationManager();
18 | mockServer = {
19 | elicitInput: vi.fn(),
20 | } as unknown as Mocked<Server>;
21 | });
22 |
23 | describe('initialization', () => {
24 | it('should default to disabled', () => {
25 | expect(manager.isEnabled()).toBe(false);
26 | });
27 |
28 | it('should not be enabled without a server', () => {
29 | manager.updateOptions({ enabled: true });
30 | expect(manager.isEnabled()).toBe(false);
31 | });
32 |
33 | it('should be enabled with server and enabled option', () => {
34 | manager.updateOptions({ enabled: true });
35 | manager.setServer(mockServer);
36 | expect(manager.isEnabled()).toBe(true);
37 | });
38 |
39 | it('should respect environment variables', () => {
40 | process.env.SONARQUBE_MCP_ELICITATION = 'true';
41 | process.env.SONARQUBE_MCP_BULK_THRESHOLD = '10';
42 | process.env.SONARQUBE_MCP_REQUIRE_COMMENTS = 'true';
43 | process.env.SONARQUBE_MCP_INTERACTIVE_SEARCH = 'true';
44 |
45 | const envManager = createElicitationManager();
46 | expect(envManager.getOptions()).toEqual({
47 | enabled: true,
48 | bulkOperationThreshold: 10,
49 | requireComments: true,
50 | interactiveSearch: true,
51 | });
52 | });
53 | });
54 |
55 | describe('confirmBulkOperation', () => {
56 | beforeEach(() => {
57 | manager.updateOptions({ enabled: true, bulkOperationThreshold: 5 });
58 | manager.setServer(mockServer);
59 | });
60 |
61 | it('should auto-accept when disabled', async () => {
62 | manager.updateOptions({ enabled: false });
63 | const result = await manager.confirmBulkOperation('delete', 10);
64 | expect(result).toEqual({ action: 'accept', content: { confirm: true } });
65 | expect(mockServer.elicitInput).not.toHaveBeenCalled();
66 | });
67 |
68 | it('should auto-accept when below threshold', async () => {
69 | const result = await manager.confirmBulkOperation('delete', 3);
70 | expect(result).toEqual({ action: 'accept', content: { confirm: true } });
71 | expect(mockServer.elicitInput).not.toHaveBeenCalled();
72 | });
73 |
74 | it('should request confirmation when above threshold', async () => {
75 | mockServer.elicitInput.mockResolvedValue({
76 | action: 'accept',
77 | content: { confirm: true, comment: 'Test comment' },
78 | });
79 |
80 | const result = await manager.confirmBulkOperation('delete', 10, ['item1', 'item2']);
81 |
82 | expect(mockServer.elicitInput).toHaveBeenCalledWith({
83 | message: expect.stringContaining('delete 10 items'),
84 | requestedSchema: expect.objectContaining({
85 | properties: expect.objectContaining({
86 | confirm: expect.any(Object),
87 | comment: expect.any(Object),
88 | }),
89 | }),
90 | });
91 |
92 | expect(result).toEqual({
93 | action: 'accept',
94 | content: { confirm: true, comment: 'Test comment' },
95 | });
96 | });
97 |
98 | it('should handle user rejection', async () => {
99 | mockServer.elicitInput.mockResolvedValue({
100 | action: 'accept',
101 | content: { confirm: false },
102 | });
103 |
104 | const result = await manager.confirmBulkOperation('delete', 10);
105 | expect(result).toEqual({ action: 'reject' });
106 | });
107 |
108 | it('should handle cancellation', async () => {
109 | mockServer.elicitInput.mockResolvedValue({ action: 'cancel' });
110 |
111 | const result = await manager.confirmBulkOperation('delete', 10);
112 | expect(result).toEqual({ action: 'cancel' });
113 | });
114 |
115 | it('should handle errors gracefully', async () => {
116 | mockServer.elicitInput.mockRejectedValue(new Error('Test error'));
117 |
118 | const result = await manager.confirmBulkOperation('delete', 10);
119 | expect(result).toEqual({ action: 'cancel' });
120 | });
121 | });
122 |
123 | describe('collectAuthentication', () => {
124 | beforeEach(() => {
125 | manager.updateOptions({ enabled: true });
126 | manager.setServer(mockServer);
127 | });
128 |
129 | it('should cancel when disabled', async () => {
130 | manager.updateOptions({ enabled: false });
131 | const result = await manager.collectAuthentication();
132 | expect(result).toEqual({ action: 'cancel' });
133 | });
134 |
135 | it('should collect token authentication', async () => {
136 | mockServer.elicitInput.mockResolvedValue({
137 | action: 'accept',
138 | content: { method: 'token', token: 'test-token' },
139 | });
140 |
141 | const result = await manager.collectAuthentication();
142 |
143 | expect(mockServer.elicitInput).toHaveBeenCalledWith({
144 | message: expect.stringContaining('authentication is not configured'),
145 | requestedSchema: expect.any(Object),
146 | });
147 |
148 | expect(result).toEqual({
149 | action: 'accept',
150 | content: { method: 'token', token: 'test-token' },
151 | });
152 | });
153 |
154 | it('should validate authentication schema', async () => {
155 | mockServer.elicitInput.mockResolvedValue({
156 | action: 'accept',
157 | content: { method: 'basic', username: 'user', password: 'pass' },
158 | });
159 |
160 | const result = await manager.collectAuthentication();
161 | expect(result.action).toBe('accept');
162 | expect(result.content).toEqual({
163 | method: 'basic',
164 | username: 'user',
165 | password: 'pass',
166 | });
167 | });
168 | });
169 |
170 | describe('collectResolutionComment', () => {
171 | beforeEach(() => {
172 | manager.updateOptions({ enabled: true, requireComments: true });
173 | manager.setServer(mockServer);
174 | });
175 |
176 | it('should auto-accept when comments not required', async () => {
177 | manager.updateOptions({ requireComments: false });
178 | const result = await manager.collectResolutionComment('ISSUE-123', 'false positive');
179 | expect(result).toEqual({ action: 'accept', content: { comment: '' } });
180 | expect(mockServer.elicitInput).not.toHaveBeenCalled();
181 | });
182 |
183 | it('should request comment when required', async () => {
184 | mockServer.elicitInput.mockResolvedValue({
185 | action: 'accept',
186 | content: { comment: 'This is a test pattern' },
187 | });
188 |
189 | const result = await manager.collectResolutionComment('ISSUE-123', 'false positive');
190 |
191 | expect(mockServer.elicitInput).toHaveBeenCalledWith({
192 | message: expect.stringContaining('ISSUE-123'),
193 | requestedSchema: expect.objectContaining({
194 | properties: expect.objectContaining({
195 | comment: expect.objectContaining({
196 | minLength: 1,
197 | maxLength: 500,
198 | type: 'string',
199 | }),
200 | }),
201 | }),
202 | });
203 |
204 | expect(result).toEqual({
205 | action: 'accept',
206 | content: { comment: 'This is a test pattern' },
207 | });
208 | });
209 | });
210 |
211 | describe('disambiguateSelection', () => {
212 | beforeEach(() => {
213 | manager.updateOptions({ enabled: true, interactiveSearch: true });
214 | manager.setServer(mockServer);
215 | });
216 |
217 | it('should auto-select single item', async () => {
218 | const items = [{ name: 'Project A', key: 'proj-a' }];
219 | const result = await manager.disambiguateSelection(items, 'project');
220 | expect(result).toEqual({ action: 'accept', content: { selection: 'proj-a' } });
221 | });
222 |
223 | it('should request selection for multiple items', async () => {
224 | const items = [
225 | { name: 'Project A', key: 'proj-a' },
226 | { name: 'Project B', key: 'proj-b' },
227 | { name: 'Project C', key: 'proj-c' },
228 | ];
229 |
230 | mockServer.elicitInput.mockResolvedValue({
231 | action: 'accept',
232 | content: { selection: 'proj-b' },
233 | });
234 |
235 | const result = await manager.disambiguateSelection(items, 'project');
236 |
237 | expect(mockServer.elicitInput).toHaveBeenCalledWith({
238 | message: expect.stringContaining('Multiple projects found'),
239 | requestedSchema: expect.objectContaining({
240 | properties: expect.objectContaining({
241 | selection: expect.objectContaining({
242 | enum: ['proj-a', 'proj-b', 'proj-c'],
243 | }),
244 | }),
245 | }),
246 | });
247 |
248 | expect(result).toEqual({
249 | action: 'accept',
250 | content: { selection: 'proj-b' },
251 | });
252 | });
253 |
254 | it('should not request when interactive search disabled', async () => {
255 | manager.updateOptions({ interactiveSearch: false });
256 |
257 | const items = [
258 | { name: 'Project A', key: 'proj-a' },
259 | { name: 'Project B', key: 'proj-b' },
260 | ];
261 |
262 | const result = await manager.disambiguateSelection(items, 'project');
263 | expect(result).toEqual({ action: 'accept', content: { selection: 'proj-a' } });
264 | expect(mockServer.elicitInput).not.toHaveBeenCalled();
265 | });
266 | });
267 | });
268 |
```
--------------------------------------------------------------------------------
/src/utils/elicitation.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Server } from '@modelcontextprotocol/sdk/server/index.js';
2 | import { z } from 'zod';
3 | import { zodToJsonSchema } from 'zod-to-json-schema';
4 | import { createLogger } from './logger.js';
5 |
6 | export interface ElicitationOptions {
7 | enabled: boolean;
8 | bulkOperationThreshold: number;
9 | requireComments: boolean;
10 | interactiveSearch: boolean;
11 | }
12 |
13 | export interface ElicitationResult<T = unknown> {
14 | action: 'accept' | 'reject' | 'cancel' | 'decline';
15 | content?: T;
16 | }
17 |
18 | export const confirmationSchema = z.object({
19 | confirm: z.boolean().describe('Confirm the operation'),
20 | comment: z.string().max(500).optional().describe('Optional comment'),
21 | });
22 |
23 | export const authSchema = z
24 | .object({
25 | method: z.enum(['token', 'basic', 'passcode']).describe('Authentication method'),
26 | token: z.string().optional().describe('SonarQube token (for token auth)'),
27 | username: z.string().optional().describe('Username (for basic auth)'),
28 | password: z.string().optional().describe('Password (for basic auth)'),
29 | passcode: z.string().optional().describe('System passcode'),
30 | })
31 | .refine(
32 | (data) => {
33 | if (data.method === 'token' && !data.token) return false;
34 | if (data.method === 'basic' && (!data.username || !data.password)) return false;
35 | if (data.method === 'passcode' && !data.passcode) return false;
36 | return true;
37 | },
38 | {
39 | message: 'Required fields missing for selected authentication method',
40 | }
41 | );
42 |
43 | export class ElicitationManager {
44 | private server: Server | null = null;
45 | private options: ElicitationOptions;
46 | private readonly logger = createLogger('ElicitationManager');
47 |
48 | constructor(options: Partial<ElicitationOptions> = {}) {
49 | this.options = {
50 | enabled: false,
51 | bulkOperationThreshold: 5,
52 | requireComments: false,
53 | interactiveSearch: false,
54 | ...options,
55 | };
56 | }
57 |
58 | setServer(server: Server): void {
59 | this.server = server;
60 | }
61 |
62 | isEnabled(): boolean {
63 | return this.options.enabled && this.server !== null;
64 | }
65 |
66 | getOptions(): ElicitationOptions {
67 | return { ...this.options };
68 | }
69 |
70 | updateOptions(updates: Partial<ElicitationOptions>): void {
71 | this.options = { ...this.options, ...updates };
72 | }
73 |
74 | async confirmBulkOperation(
75 | operation: string,
76 | itemCount: number,
77 | items?: string[]
78 | ): Promise<ElicitationResult<z.infer<typeof confirmationSchema>>> {
79 | if (!this.isEnabled() || itemCount < this.options.bulkOperationThreshold) {
80 | return { action: 'accept', content: { confirm: true } };
81 | }
82 |
83 | if (!this.server) {
84 | throw new Error('ElicitationManager not initialized with server');
85 | }
86 |
87 | const itemsPreview = items?.slice(0, 5).join(', ');
88 | const hasMore = items && items.length > 5;
89 |
90 | try {
91 | let itemsDisplay = '';
92 | if (itemsPreview) {
93 | itemsDisplay = `: ${itemsPreview}`;
94 | if (hasMore) {
95 | itemsDisplay += ', ...';
96 | }
97 | }
98 |
99 | const result = await this.server.elicitInput({
100 | message: `You are about to ${operation} ${itemCount} items${itemsDisplay}. This action cannot be undone.`,
101 | requestedSchema: {
102 | ...zodToJsonSchema(confirmationSchema),
103 | type: 'object' as const,
104 | // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-member-access
105 | properties: (zodToJsonSchema(confirmationSchema) as any).properties ?? {},
106 | },
107 | });
108 |
109 | if (result.action === 'accept' && result.content) {
110 | const parsed = confirmationSchema.parse(result.content);
111 | if (!parsed.confirm) {
112 | return { action: 'reject' };
113 | }
114 | return { action: 'accept', content: parsed };
115 | }
116 |
117 | return {
118 | action: result.action,
119 | content: result.content as z.infer<typeof confirmationSchema>,
120 | };
121 | } catch (error) {
122 | this.logger.error('Elicitation error:', error);
123 | return { action: 'cancel' };
124 | }
125 | }
126 |
127 | async collectAuthentication(): Promise<ElicitationResult<z.infer<typeof authSchema>>> {
128 | if (!this.isEnabled()) {
129 | return { action: 'cancel' };
130 | }
131 |
132 | if (!this.server) {
133 | throw new Error('ElicitationManager not initialized with server');
134 | }
135 |
136 | try {
137 | const result = await this.server.elicitInput({
138 | message: `SonarQube authentication is not configured. Please provide authentication details:
139 |
140 | Available methods:
141 | 1. Token authentication (recommended) - Generate a token in SonarQube under User > My Account > Security
142 | 2. Basic authentication - Username and password
143 | 3. System passcode - For SonarQube instances with system authentication
144 |
145 | Which method would you like to use?`,
146 | requestedSchema: {
147 | ...zodToJsonSchema(authSchema),
148 | type: 'object' as const,
149 | // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-member-access
150 | properties: (zodToJsonSchema(authSchema) as any).properties ?? {},
151 | },
152 | });
153 |
154 | if (result.action === 'accept' && result.content) {
155 | const parsed = authSchema.parse(result.content);
156 | return { action: 'accept', content: parsed };
157 | }
158 |
159 | return {
160 | action: result.action,
161 | content: result.content as z.infer<typeof authSchema>,
162 | };
163 | } catch (error) {
164 | this.logger.error('Elicitation error:', error);
165 | return { action: 'cancel' };
166 | }
167 | }
168 |
169 | async collectResolutionComment(
170 | issueKey: string,
171 | resolution: string
172 | ): Promise<ElicitationResult<{ comment: string }>> {
173 | if (!this.isEnabled() || !this.options.requireComments) {
174 | return { action: 'accept', content: { comment: '' } };
175 | }
176 |
177 | if (!this.server) {
178 | throw new Error('ElicitationManager not initialized with server');
179 | }
180 |
181 | const commentSchema = z.object({
182 | comment: z.string().min(1).max(500).describe(`Explanation for marking as ${resolution}`),
183 | });
184 |
185 | try {
186 | const result = await this.server.elicitInput({
187 | message: `Please provide a comment explaining why issue ${issueKey} is being marked as ${resolution}:`,
188 | requestedSchema: {
189 | ...zodToJsonSchema(commentSchema),
190 | type: 'object' as const,
191 | // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-member-access
192 | properties: (zodToJsonSchema(commentSchema) as any).properties ?? {},
193 | },
194 | });
195 |
196 | if (result.action === 'accept' && result.content) {
197 | const parsed = commentSchema.parse(result.content);
198 | return { action: 'accept', content: { comment: parsed.comment } };
199 | }
200 |
201 | return { action: result.action };
202 | } catch (error) {
203 | this.logger.error('Elicitation error:', error);
204 | return { action: 'cancel' };
205 | }
206 | }
207 |
208 | async disambiguateSelection<T extends { name: string; key: string }>(
209 | items: T[],
210 | itemType: string
211 | ): Promise<ElicitationResult<{ selection: string }>> {
212 | if (!this.isEnabled() || !this.options.interactiveSearch || items.length <= 1) {
213 | return {
214 | action: 'accept',
215 | content: { selection: items[0]?.key || '' },
216 | };
217 | }
218 |
219 | if (!this.server) {
220 | throw new Error('ElicitationManager not initialized with server');
221 | }
222 |
223 | const selectionSchema = z.object({
224 | selection: z
225 | .enum(items.map((item) => item.key) as [string, ...string[]])
226 | .describe(`Select a ${itemType}`),
227 | });
228 |
229 | const itemsList = items
230 | .slice(0, 10)
231 | .map((item, i) => `${i + 1}. ${item.name} (${item.key})`)
232 | .join('\n');
233 |
234 | try {
235 | const result = await this.server.elicitInput({
236 | message: `Multiple ${itemType}s found. Please select one:\n\n${itemsList}`,
237 | requestedSchema: {
238 | ...zodToJsonSchema(selectionSchema),
239 | type: 'object' as const,
240 | // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-member-access
241 | properties: (zodToJsonSchema(selectionSchema) as any).properties ?? {},
242 | },
243 | });
244 |
245 | if (result.action === 'accept' && result.content) {
246 | const parsed = selectionSchema.parse(result.content);
247 | return { action: 'accept', content: { selection: parsed.selection } };
248 | }
249 |
250 | return { action: result.action };
251 | } catch (error) {
252 | this.logger.error('Elicitation error:', error);
253 | return { action: 'cancel' };
254 | }
255 | }
256 | }
257 |
258 | export const createElicitationManager = (
259 | options?: Partial<ElicitationOptions>
260 | ): ElicitationManager => {
261 | const envEnabled = process.env.SONARQUBE_MCP_ELICITATION === 'true';
262 | const envThreshold = process.env.SONARQUBE_MCP_BULK_THRESHOLD
263 | ? Number.parseInt(process.env.SONARQUBE_MCP_BULK_THRESHOLD, 10)
264 | : undefined;
265 | const envRequireComments = process.env.SONARQUBE_MCP_REQUIRE_COMMENTS === 'true';
266 | const envInteractiveSearch = process.env.SONARQUBE_MCP_INTERACTIVE_SEARCH === 'true';
267 |
268 | const managerOptions: Partial<ElicitationOptions> = {
269 | enabled: envEnabled,
270 | requireComments: envRequireComments,
271 | interactiveSearch: envInteractiveSearch,
272 | ...options,
273 | };
274 |
275 | if (envThreshold !== undefined) {
276 | managerOptions.bulkOperationThreshold = envThreshold;
277 | }
278 |
279 | return new ElicitationManager(managerOptions);
280 | };
281 |
```
--------------------------------------------------------------------------------
/src/__tests__/logger.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Logger, LogLevel, createLogger } from '../utils/logger.js';
2 | import fs from 'fs';
3 | import path from 'path';
4 | import os from 'os';
5 |
6 | describe('Logger', () => {
7 | const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'logger-test-'));
8 | const logFile = path.join(tempDir, 'test.log');
9 | const originalEnv = process.env;
10 |
11 | beforeEach(() => {
12 | process.env = { ...originalEnv };
13 | // Clean up any existing log file
14 | if (fs.existsSync(logFile)) {
15 | fs.unlinkSync(logFile);
16 | }
17 | });
18 |
19 | afterEach(() => {
20 | process.env = originalEnv;
21 | });
22 |
23 | afterAll(() => {
24 | // Clean up temp directory
25 | if (fs.existsSync(tempDir)) {
26 | fs.rmSync(tempDir, { recursive: true });
27 | }
28 | });
29 |
30 | describe('Logger initialization', () => {
31 | it('should create logger with context', () => {
32 | const logger = new Logger('TestContext');
33 | expect(logger).toBeDefined();
34 | });
35 |
36 | it('should create logger without context', () => {
37 | const logger = new Logger();
38 | expect(logger).toBeDefined();
39 | });
40 |
41 | it('should create logger using createLogger helper', () => {
42 | const logger = createLogger('TestContext');
43 | expect(logger).toBeDefined();
44 | });
45 | });
46 |
47 | describe('Log file initialization', () => {
48 | it('should create log file when LOG_FILE is set', () => {
49 | process.env.LOG_FILE = logFile;
50 | process.env.LOG_LEVEL = 'DEBUG';
51 |
52 | const logger = new Logger();
53 | logger.debug('test message');
54 |
55 | expect(fs.existsSync(logFile)).toBe(true);
56 | });
57 |
58 | it('should handle nested directories', () => {
59 | // Create a nested directory manually to test path parsing
60 | const nestedDir = path.join(tempDir, 'nested', 'dir');
61 | fs.mkdirSync(nestedDir, { recursive: true });
62 |
63 | const nestedLogFile = path.join(nestedDir, 'test.log');
64 | process.env.LOG_FILE = nestedLogFile;
65 | process.env.LOG_LEVEL = 'DEBUG';
66 |
67 | const logger = new Logger();
68 | logger.debug('test message');
69 |
70 | // The logger should work with existing nested directories
71 | expect(fs.existsSync(nestedLogFile)).toBe(true);
72 |
73 | // Clean up
74 | if (fs.existsSync(path.join(tempDir, 'nested'))) {
75 | fs.rmSync(path.join(tempDir, 'nested'), { recursive: true });
76 | }
77 | });
78 | });
79 |
80 | describe('Log level filtering', () => {
81 | beforeEach(() => {
82 | process.env.LOG_FILE = logFile;
83 | });
84 |
85 | it('should log DEBUG messages when LOG_LEVEL is DEBUG', () => {
86 | process.env.LOG_LEVEL = 'DEBUG';
87 | const logger = new Logger();
88 |
89 | logger.debug('debug message');
90 |
91 | const content = fs.readFileSync(logFile, 'utf8');
92 | expect(content).toContain('DEBUG');
93 | expect(content).toContain('debug message');
94 | });
95 |
96 | it('should not log DEBUG messages when LOG_LEVEL is INFO', () => {
97 | process.env.LOG_LEVEL = 'INFO';
98 | const logger = new Logger();
99 |
100 | logger.debug('debug message');
101 |
102 | expect(fs.existsSync(logFile)).toBe(false);
103 | });
104 |
105 | it('should log INFO messages when LOG_LEVEL is INFO', () => {
106 | process.env.LOG_LEVEL = 'INFO';
107 | const logger = new Logger();
108 |
109 | logger.info('info message');
110 |
111 | const content = fs.readFileSync(logFile, 'utf8');
112 | expect(content).toContain('INFO');
113 | expect(content).toContain('info message');
114 | });
115 |
116 | it('should not log INFO messages when LOG_LEVEL is WARN', () => {
117 | process.env.LOG_LEVEL = 'WARN';
118 | const logger = new Logger();
119 |
120 | logger.info('info message');
121 |
122 | expect(fs.existsSync(logFile)).toBe(false);
123 | });
124 |
125 | it('should log WARN messages when LOG_LEVEL is WARN', () => {
126 | process.env.LOG_LEVEL = 'WARN';
127 | const logger = new Logger();
128 |
129 | logger.warn('warn message');
130 |
131 | const content = fs.readFileSync(logFile, 'utf8');
132 | expect(content).toContain('WARN');
133 | expect(content).toContain('warn message');
134 | });
135 |
136 | it('should not log WARN messages when LOG_LEVEL is ERROR', () => {
137 | process.env.LOG_LEVEL = 'ERROR';
138 | const logger = new Logger();
139 |
140 | logger.warn('warn message');
141 |
142 | expect(fs.existsSync(logFile)).toBe(false);
143 | });
144 |
145 | it('should log ERROR messages when LOG_LEVEL is ERROR', () => {
146 | process.env.LOG_LEVEL = 'ERROR';
147 | const logger = new Logger();
148 |
149 | logger.error('error message');
150 |
151 | const content = fs.readFileSync(logFile, 'utf8');
152 | expect(content).toContain('ERROR');
153 | expect(content).toContain('error message');
154 | });
155 |
156 | it('should default to DEBUG level when LOG_LEVEL is not set', () => {
157 | delete process.env.LOG_LEVEL;
158 | const logger = new Logger();
159 |
160 | logger.debug('debug message');
161 |
162 | const content = fs.readFileSync(logFile, 'utf8');
163 | expect(content).toContain('DEBUG');
164 | expect(content).toContain('debug message');
165 | });
166 | });
167 |
168 | describe('Log message formatting', () => {
169 | beforeEach(() => {
170 | process.env.LOG_FILE = logFile;
171 | process.env.LOG_LEVEL = 'DEBUG';
172 | });
173 |
174 | it('should format log message with timestamp, level, and context', () => {
175 | const logger = new Logger('TestContext');
176 |
177 | logger.debug('test message');
178 |
179 | const content = fs.readFileSync(logFile, 'utf8');
180 | expect(content).toMatch(
181 | /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z DEBUG \[TestContext\] test message/
182 | );
183 | });
184 |
185 | it('should format log message without context', () => {
186 | const logger = new Logger();
187 |
188 | logger.info('test message');
189 |
190 | const content = fs.readFileSync(logFile, 'utf8');
191 | expect(content).toMatch(/\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z INFO test message/);
192 | });
193 |
194 | it('should include data in log message', () => {
195 | const logger = new Logger();
196 | const data = { key: 'value', number: 42 };
197 |
198 | logger.debug('test message', data);
199 |
200 | const content = fs.readFileSync(logFile, 'utf8');
201 | expect(content).toContain('test message');
202 | expect(content).toContain('"key": "value"');
203 | expect(content).toContain('"number": 42');
204 | });
205 |
206 | it('should handle Error objects specially', () => {
207 | const logger = new Logger();
208 | const error = new Error('Test error');
209 |
210 | logger.error('An error occurred', error);
211 |
212 | const content = fs.readFileSync(logFile, 'utf8');
213 | expect(content).toContain('ERROR');
214 | expect(content).toContain('An error occurred');
215 | expect(content).toContain('Error: Test error');
216 | });
217 |
218 | it('should handle errors without stack traces', () => {
219 | const logger = new Logger();
220 | const error = new Error('Test error');
221 | delete error.stack;
222 |
223 | logger.error('An error occurred', error);
224 |
225 | const content = fs.readFileSync(logFile, 'utf8');
226 | expect(content).toContain('Error: Test error');
227 | });
228 |
229 | it('should handle non-Error objects in error logging', () => {
230 | const logger = new Logger();
231 | const errorData = { code: 'ERR_001', message: 'Something went wrong' };
232 |
233 | logger.error('An error occurred', errorData);
234 |
235 | const content = fs.readFileSync(logFile, 'utf8');
236 | expect(content).toContain('"code": "ERR_001"');
237 | expect(content).toContain('"message": "Something went wrong"');
238 | });
239 |
240 | it('should handle circular references in error data', () => {
241 | const logger = new Logger();
242 | const obj: Record<string, unknown> = { a: 1 };
243 | obj.circular = obj;
244 |
245 | logger.error('An error occurred', obj);
246 |
247 | const content = fs.readFileSync(logFile, 'utf8');
248 | expect(content).toContain('[object Object]');
249 | });
250 | });
251 |
252 | describe('No logging when LOG_FILE not set', () => {
253 | beforeEach(() => {
254 | delete process.env.LOG_FILE;
255 | process.env.LOG_LEVEL = 'DEBUG';
256 | });
257 |
258 | it('should not create log file when LOG_FILE is not set', () => {
259 | const logger = new Logger();
260 |
261 | logger.debug('debug message');
262 | logger.info('info message');
263 | logger.warn('warn message');
264 | logger.error('error message');
265 |
266 | expect(fs.existsSync(logFile)).toBe(false);
267 | });
268 | });
269 |
270 | describe('Multiple log entries', () => {
271 | it('should append multiple log entries', () => {
272 | process.env.LOG_FILE = logFile;
273 | process.env.LOG_LEVEL = 'DEBUG';
274 |
275 | const logger = new Logger();
276 |
277 | logger.debug('first message');
278 | logger.info('second message');
279 | logger.warn('third message');
280 | logger.error('fourth message');
281 |
282 | const content = fs.readFileSync(logFile, 'utf8');
283 | const lines = content.trim().split('\n');
284 |
285 | expect(lines).toHaveLength(4);
286 | expect(lines[0]).toContain('DEBUG');
287 | expect(lines[0]).toContain('first message');
288 | expect(lines[1]).toContain('INFO');
289 | expect(lines[1]).toContain('second message');
290 | expect(lines[2]).toContain('WARN');
291 | expect(lines[2]).toContain('third message');
292 | expect(lines[3]).toContain('ERROR');
293 | expect(lines[3]).toContain('fourth message');
294 | });
295 | });
296 | });
297 |
298 | describe('LogLevel enum', () => {
299 | it('should have correct log levels', () => {
300 | expect(LogLevel.DEBUG).toBe('DEBUG');
301 | expect(LogLevel.INFO).toBe('INFO');
302 | expect(LogLevel.WARN).toBe('WARN');
303 | expect(LogLevel.ERROR).toBe('ERROR');
304 | });
305 | });
306 |
307 | describe('Default logger export', () => {
308 | it('should export a default logger with SonarQubeMCP context', async () => {
309 | // Import the default export
310 | const module = await import('../utils/logger.js');
311 | const defaultLogger = module.default;
312 | expect(defaultLogger).toBeDefined();
313 | expect(defaultLogger).toBeInstanceOf(Logger);
314 | });
315 | });
316 |
```