This is page 6 of 11. Use http://codebase.md/sapientpants/sonarqube-mcp-server?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .adr-dir
├── .changeset
│ ├── config.json
│ └── README.md
├── .claude
│ ├── commands
│ │ ├── analyze-and-fix-github-issue.md
│ │ ├── fix-sonarqube-issues.md
│ │ ├── implement-github-issue.md
│ │ ├── release.md
│ │ ├── spec-feature.md
│ │ └── update-dependencies.md
│ ├── hooks
│ │ └── block-git-no-verify.ts
│ └── settings.json
├── .dockerignore
├── .github
│ ├── actionlint.yaml
│ ├── changeset.yml
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ └── feature_request.md
│ ├── pull_request_template.md
│ ├── scripts
│ │ ├── determine-artifact.sh
│ │ └── version-and-release.js
│ ├── workflows
│ │ ├── codeql.yml
│ │ ├── main.yml
│ │ ├── pr.yml
│ │ ├── publish.yml
│ │ ├── reusable-docker.yml
│ │ ├── reusable-security.yml
│ │ └── reusable-validate.yml
│ └── WORKFLOWS.md
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .markdownlint.yaml
├── .markdownlintignore
├── .npmrc
├── .prettierignore
├── .prettierrc
├── .trivyignore
├── .yaml-lint.yml
├── .yamllintignore
├── CHANGELOG.md
├── changes.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── COMPATIBILITY.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│ ├── architecture
│ │ └── decisions
│ │ ├── 0001-record-architecture-decisions.md
│ │ ├── 0002-use-node-js-with-typescript.md
│ │ ├── 0003-adopt-model-context-protocol-for-sonarqube-integration.md
│ │ ├── 0004-use-sonarqube-web-api-client-for-all-sonarqube-interactions.md
│ │ ├── 0005-domain-driven-design-of-sonarqube-modules.md
│ │ ├── 0006-expose-sonarqube-features-as-mcp-tools.md
│ │ ├── 0007-support-multiple-authentication-methods-for-sonarqube.md
│ │ ├── 0008-use-environment-variables-for-configuration.md
│ │ ├── 0009-file-based-logging-to-avoid-stdio-conflicts.md
│ │ ├── 0010-use-stdio-transport-for-mcp-communication.md
│ │ ├── 0011-docker-containerization-for-deployment.md
│ │ ├── 0012-add-elicitation-support-for-interactive-user-input.md
│ │ ├── 0014-current-security-model-and-future-oauth2-considerations.md
│ │ ├── 0015-transport-architecture-refactoring.md
│ │ ├── 0016-http-transport-with-oauth-2-0-metadata-endpoints.md
│ │ ├── 0017-comprehensive-audit-logging-system.md
│ │ ├── 0018-add-comprehensive-monitoring-and-observability.md
│ │ ├── 0019-simplify-to-stdio-only-transport-for-mcp-gateway-deployment.md
│ │ ├── 0020-testing-framework-and-strategy-vitest-with-property-based-testing.md
│ │ ├── 0021-code-quality-toolchain-eslint-prettier-strict-typescript.md
│ │ ├── 0022-package-manager-choice-pnpm.md
│ │ ├── 0023-release-management-with-changesets.md
│ │ ├── 0024-ci-cd-platform-github-actions.md
│ │ ├── 0025-container-and-security-scanning-strategy.md
│ │ ├── 0026-circuit-breaker-pattern-with-opossum.md
│ │ ├── 0027-docker-image-publishing-strategy-ghcr-to-docker-hub.md
│ │ └── 0028-session-based-http-transport-with-server-sent-events.md
│ ├── architecture.md
│ ├── security.md
│ └── troubleshooting.md
├── eslint.config.js
├── examples
│ └── http-client.ts
├── jest.config.js
├── LICENSE
├── LICENSES.md
├── osv-scanner.toml
├── package.json
├── pnpm-lock.yaml
├── README.md
├── scripts
│ ├── actionlint.sh
│ ├── ci-local.sh
│ ├── load-test.sh
│ ├── README.md
│ ├── run-all-tests.sh
│ ├── scan-container.sh
│ ├── security-scan.sh
│ ├── setup.sh
│ ├── test-monitoring-integration.sh
│ └── validate-docs.sh
├── SECURITY.md
├── sonar-project.properties
├── src
│ ├── __tests__
│ │ ├── additional-coverage.test.ts
│ │ ├── advanced-index.test.ts
│ │ ├── assign-issue.test.ts
│ │ ├── auth-methods.test.ts
│ │ ├── boolean-string-transform.test.ts
│ │ ├── components.test.ts
│ │ ├── config
│ │ │ └── service-accounts.test.ts
│ │ ├── dependency-injection.test.ts
│ │ ├── direct-handlers.test.ts
│ │ ├── direct-lambdas.test.ts
│ │ ├── direct-schema-validation.test.ts
│ │ ├── domains
│ │ │ ├── components-domain-full.test.ts
│ │ │ ├── components-domain.test.ts
│ │ │ ├── hotspots-domain.test.ts
│ │ │ └── source-code-domain.test.ts
│ │ ├── environment-validation.test.ts
│ │ ├── error-handler.test.ts
│ │ ├── error-handling.test.ts
│ │ ├── errors.test.ts
│ │ ├── function-tests.test.ts
│ │ ├── handlers
│ │ │ ├── components-handler-integration.test.ts
│ │ │ └── projects-authorization.test.ts
│ │ ├── handlers.test.ts
│ │ ├── handlers.test.ts.skip
│ │ ├── index.test.ts
│ │ ├── issue-resolution-elicitation.test.ts
│ │ ├── issue-resolution.test.ts
│ │ ├── issue-transitions.test.ts
│ │ ├── issues-enhanced-search.test.ts
│ │ ├── issues-new-parameters.test.ts
│ │ ├── json-array-transform.test.ts
│ │ ├── lambda-functions.test.ts
│ │ ├── lambda-handlers.test.ts.skip
│ │ ├── logger.test.ts
│ │ ├── mapping-functions.test.ts
│ │ ├── mocked-environment.test.ts
│ │ ├── null-to-undefined.test.ts
│ │ ├── parameter-transformations-advanced.test.ts
│ │ ├── parameter-transformations.test.ts
│ │ ├── protocol-version.test.ts
│ │ ├── pull-request-transform.test.ts
│ │ ├── quality-gates.test.ts
│ │ ├── schema-parameter-transforms.test.ts
│ │ ├── schema-transformation-mocks.test.ts
│ │ ├── schema-transforms.test.ts
│ │ ├── schema-validators.test.ts
│ │ ├── schemas
│ │ │ ├── components-schema.test.ts
│ │ │ ├── hotspots-tools-schema.test.ts
│ │ │ └── issues-schema.test.ts
│ │ ├── sonarqube-elicitation.test.ts
│ │ ├── sonarqube.test.ts
│ │ ├── source-code.test.ts
│ │ ├── standalone-handlers.test.ts
│ │ ├── string-to-number-transform.test.ts
│ │ ├── tool-handler-lambdas.test.ts
│ │ ├── tool-handlers.test.ts
│ │ ├── tool-registration-schema.test.ts
│ │ ├── tool-registration-transforms.test.ts
│ │ ├── transformation-util.test.ts
│ │ ├── transports
│ │ │ ├── base.test.ts
│ │ │ ├── factory.test.ts
│ │ │ ├── http.test.ts
│ │ │ ├── session-manager.test.ts
│ │ │ └── stdio.test.ts
│ │ ├── utils
│ │ │ ├── retry.test.ts
│ │ │ └── transforms.test.ts
│ │ ├── zod-boolean-transform.test.ts
│ │ ├── zod-schema-transforms.test.ts
│ │ └── zod-transforms.test.ts
│ ├── config
│ │ ├── service-accounts.ts
│ │ └── versions.ts
│ ├── domains
│ │ ├── base.ts
│ │ ├── components.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ ├── errors.ts
│ ├── handlers
│ │ ├── components.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ ├── index.ts
│ ├── monitoring
│ │ ├── __tests__
│ │ │ └── circuit-breaker.test.ts
│ │ ├── circuit-breaker.ts
│ │ ├── health.ts
│ │ └── metrics.ts
│ ├── schemas
│ │ ├── common.ts
│ │ ├── components.ts
│ │ ├── hotspots-tools.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ ├── sonarqube.ts
│ ├── transports
│ │ ├── base.ts
│ │ ├── factory.ts
│ │ ├── http.ts
│ │ ├── index.ts
│ │ ├── session-manager.ts
│ │ └── stdio.ts
│ ├── types
│ │ ├── common.ts
│ │ ├── components.ts
│ │ ├── hotspots.ts
│ │ ├── index.ts
│ │ ├── issues.ts
│ │ ├── measures.ts
│ │ ├── metrics.ts
│ │ ├── projects.ts
│ │ ├── quality-gates.ts
│ │ ├── source-code.ts
│ │ └── system.ts
│ └── utils
│ ├── __tests__
│ │ ├── elicitation.test.ts
│ │ ├── pattern-matcher.test.ts
│ │ └── structured-response.test.ts
│ ├── client-factory.ts
│ ├── elicitation.ts
│ ├── error-handler.ts
│ ├── logger.ts
│ ├── parameter-mappers.ts
│ ├── pattern-matcher.ts
│ ├── retry.ts
│ ├── structured-response.ts
│ └── transforms.ts
├── test-http-transport.sh
├── tmp
│ └── .gitkeep
├── tsconfig.build.json
├── tsconfig.json
├── vitest.config.d.ts
├── vitest.config.js
├── vitest.config.js.map
└── vitest.config.ts
```
# Files
--------------------------------------------------------------------------------
/scripts/validate-docs.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 | # Documentation validation script for SonarQube MCP Server
3 | # This script validates all documentation files for broken links and code snippets
4 |
5 | set -e # Exit on error
6 |
7 | # Colors for output
8 | RED='\033[0;31m'
9 | GREEN='\033[0;32m'
10 | YELLOW='\033[1;33m'
11 | BLUE='\033[0;34m'
12 | NC='\033[0m' # No Color
13 |
14 | echo -e "${GREEN}📚 SonarQube MCP Server - Documentation Validation${NC}"
15 | echo "==================================================="
16 |
17 | # Configuration
18 | DOCS_DIR="docs"
19 | README_FILE="README.md"
20 | TEMP_DIR="/tmp/doc-validation-$$"
21 | ERRORS_FOUND=false
22 |
23 | # Create temp directory
24 | mkdir -p "$TEMP_DIR"
25 |
26 | # Function to check if a command exists
27 | command_exists() {
28 | command -v "$1" >/dev/null 2>&1
29 | }
30 |
31 | # Check prerequisites
32 | echo -e "\n${YELLOW}📋 Checking prerequisites...${NC}"
33 | if command_exists node; then
34 | echo -e "✅ Node.js is installed"
35 | else
36 | echo -e "${YELLOW}⚠️ Node.js not installed. Some checks will be skipped.${NC}"
37 | fi
38 |
39 | # Function to validate internal links
40 | validate_internal_links() {
41 | local file=$1
42 | local base_dir=$(dirname "$file")
43 |
44 | echo -e "\n${BLUE}🔗 Checking internal links in: $(basename $file)${NC}"
45 |
46 | # Extract markdown links [text](url)
47 | grep -oE '\[([^\]]+)\]\(([^)]+)\)' "$file" | while IFS= read -r link; do
48 | url=$(echo "$link" | sed -E 's/\[[^\]]+\]\(([^)]+)\)/\1/')
49 |
50 | # Skip external links
51 | if [[ "$url" =~ ^https?:// ]]; then
52 | continue
53 | fi
54 |
55 | # Skip anchors
56 | if [[ "$url" =~ ^# ]]; then
57 | continue
58 | fi
59 |
60 | # Remove anchor from URL if present
61 | file_path=$(echo "$url" | cut -d'#' -f1)
62 |
63 | # Resolve relative path
64 | if [[ "$file_path" =~ ^/ ]]; then
65 | # Absolute path from project root
66 | full_path="${file_path#/}"
67 | else
68 | # Relative path
69 | full_path="$base_dir/$file_path"
70 | fi
71 |
72 | # Normalize path
73 | full_path=$(cd "$(dirname "$full_path")" 2>/dev/null && pwd)/$(basename "$full_path") 2>/dev/null || echo "$full_path")
74 |
75 | # Check if file exists
76 | if [ ! -f "$full_path" ]; then
77 | echo -e " ${RED}✗ Broken link: $url${NC}"
78 | echo " Expected file: $full_path"
79 | ERRORS_FOUND=true
80 | else
81 | echo -e " ${GREEN}✓ Valid link: $url${NC}"
82 | fi
83 | done
84 | }
85 |
86 | # Function to validate code blocks
87 | validate_code_blocks() {
88 | local file=$1
89 |
90 | echo -e "\n${BLUE}💻 Checking code blocks in: $(basename $file)${NC}"
91 |
92 | # Extract code blocks with language
93 | awk '/^```[a-zA-Z]+/{lang=$1; gsub(/```/, "", lang); getline; code="";
94 | while ($0 !~ /^```/) {code=code"\n"$0; getline}
95 | print lang"|"code}' "$file" > "$TEMP_DIR/code_blocks.txt"
96 |
97 | while IFS='|' read -r lang code; do
98 | if [ -z "$lang" ]; then
99 | continue
100 | fi
101 |
102 | case "$lang" in
103 | bash|sh)
104 | # Validate bash syntax
105 | if echo "$code" | bash -n 2>/dev/null; then
106 | echo -e " ${GREEN}✓ Valid bash code block${NC}"
107 | else
108 | echo -e " ${RED}✗ Invalid bash syntax${NC}"
109 | ERRORS_FOUND=true
110 | fi
111 | ;;
112 | yaml|yml)
113 | # Check YAML syntax if yamllint is available
114 | if command_exists yamllint; then
115 | if echo "$code" | yamllint - >/dev/null 2>&1; then
116 | echo -e " ${GREEN}✓ Valid YAML code block${NC}"
117 | else
118 | echo -e " ${YELLOW}⚠️ YAML syntax issues${NC}"
119 | fi
120 | else
121 | echo -e " ${BLUE}ℹ️ Skipping YAML validation (yamllint not installed)${NC}"
122 | fi
123 | ;;
124 | json)
125 | # Validate JSON syntax
126 | if echo "$code" | jq . >/dev/null 2>&1; then
127 | echo -e " ${GREEN}✓ Valid JSON code block${NC}"
128 | else
129 | echo -e " ${RED}✗ Invalid JSON syntax${NC}"
130 | ERRORS_FOUND=true
131 | fi
132 | ;;
133 | typescript|javascript|ts|js)
134 | echo -e " ${BLUE}ℹ️ TypeScript/JavaScript code block found${NC}"
135 | ;;
136 | *)
137 | echo -e " ${BLUE}ℹ️ $lang code block found${NC}"
138 | ;;
139 | esac
140 | done < "$TEMP_DIR/code_blocks.txt"
141 | }
142 |
143 | # Function to check for required sections
144 | check_required_sections() {
145 | local file=$1
146 | local required_sections=("## Overview" "## Prerequisites" "## Installation" "## Usage")
147 |
148 | echo -e "\n${BLUE}📑 Checking required sections in: $(basename $file)${NC}"
149 |
150 | for section in "${required_sections[@]}"; do
151 | if grep -q "^$section" "$file"; then
152 | echo -e " ${GREEN}✓ Found section: $section${NC}"
153 | else
154 | echo -e " ${YELLOW}⚠️ Missing section: $section${NC}"
155 | fi
156 | done
157 | }
158 |
159 | # Function to validate external links (basic check)
160 | validate_external_links() {
161 | local file=$1
162 |
163 | echo -e "\n${BLUE}🌐 Checking external links in: $(basename $file)${NC}"
164 |
165 | # Extract external links
166 | grep -oE 'https?://[^ )]+' "$file" | sort -u | while read -r url; do
167 | # Remove trailing punctuation
168 | url=$(echo "$url" | sed 's/[.,;:]$//')
169 |
170 | # Basic URL format check
171 | if [[ "$url" =~ ^https?://[a-zA-Z0-9.-]+\.[a-zA-Z]{2,} ]]; then
172 | echo -e " ${GREEN}✓ Valid URL format: $url${NC}"
173 | else
174 | echo -e " ${RED}✗ Invalid URL format: $url${NC}"
175 | ERRORS_FOUND=true
176 | fi
177 | done
178 | }
179 |
180 | # Function to check Mermaid diagrams
181 | check_mermaid_diagrams() {
182 | local file=$1
183 |
184 | if grep -q '```mermaid' "$file"; then
185 | echo -e "\n${BLUE}📊 Checking Mermaid diagrams in: $(basename $file)${NC}"
186 |
187 | # Count opening and closing tags
188 | open_count=$(grep -c '```mermaid' "$file")
189 | close_count=$(awk '/```mermaid/{count++} /```/{if(prev=="mermaid") count--} {prev=$0} END{print count}' "$file")
190 |
191 | if [ "$open_count" -eq "$close_count" ]; then
192 | echo -e " ${GREEN}✓ Mermaid diagrams properly closed${NC}"
193 | else
194 | echo -e " ${RED}✗ Unclosed Mermaid diagram blocks${NC}"
195 | ERRORS_FOUND=true
196 | fi
197 | fi
198 | }
199 |
200 | # Main validation
201 | echo -e "\n${YELLOW}🔍 Starting documentation validation...${NC}"
202 |
203 | # Validate all markdown files
204 | all_files=()
205 |
206 | # Add README if exists
207 | if [ -f "$README_FILE" ]; then
208 | all_files+=("$README_FILE")
209 | fi
210 |
211 | # Add all docs files
212 | if [ -d "$DOCS_DIR" ]; then
213 | while IFS= read -r -d '' file; do
214 | all_files+=("$file")
215 | done < <(find "$DOCS_DIR" -name "*.md" -print0)
216 | fi
217 |
218 | # Process each file
219 | for file in "${all_files[@]}"; do
220 | echo -e "\n${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
221 | echo -e "${YELLOW}📄 Validating: $file${NC}"
222 | echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
223 |
224 | validate_internal_links "$file"
225 | validate_code_blocks "$file"
226 | validate_external_links "$file"
227 | check_mermaid_diagrams "$file"
228 |
229 | # Check required sections for main docs
230 | if [[ "$file" =~ (deployment|architecture|api-reference)\.md$ ]]; then
231 | check_required_sections "$file"
232 | fi
233 | done
234 |
235 | # Additional checks
236 | echo -e "\n${YELLOW}📊 Running additional checks...${NC}"
237 |
238 | # Check for orphaned images
239 | if [ -d "$DOCS_DIR/images" ] || [ -d "images" ]; then
240 | echo -e "\n${BLUE}🖼️ Checking for orphaned images...${NC}"
241 |
242 | for img_dir in "$DOCS_DIR/images" "images"; do
243 | if [ -d "$img_dir" ]; then
244 | find "$img_dir" -type f \( -name "*.png" -o -name "*.jpg" -o -name "*.svg" \) | while read -r img; do
245 | img_name=$(basename "$img")
246 | if ! grep -r "$img_name" "$DOCS_DIR" "$README_FILE" 2>/dev/null | grep -v Binary >/dev/null; then
247 | echo -e " ${YELLOW}⚠️ Potentially orphaned image: $img${NC}"
248 | fi
249 | done
250 | fi
251 | done
252 | fi
253 |
254 | # Check for TODOs in documentation
255 | echo -e "\n${BLUE}📝 Checking for TODOs...${NC}"
256 | todo_count=$(grep -r "TODO\|FIXME\|XXX" "${all_files[@]}" 2>/dev/null | wc -l || echo 0)
257 | if [ "$todo_count" -gt 0 ]; then
258 | echo -e " ${YELLOW}⚠️ Found $todo_count TODO/FIXME/XXX markers${NC}"
259 | grep -n "TODO\|FIXME\|XXX" "${all_files[@]}" 2>/dev/null | head -5
260 | else
261 | echo -e " ${GREEN}✓ No TODOs found${NC}"
262 | fi
263 |
264 | # Check documentation structure
265 | echo -e "\n${BLUE}📁 Checking documentation structure...${NC}"
266 | expected_docs=(
267 | "docs/api-reference.md"
268 | "docs/architecture.md"
269 | "docs/deployment.md"
270 | "docs/security.md"
271 | "docs/troubleshooting.md"
272 | )
273 |
274 | for doc in "${expected_docs[@]}"; do
275 | if [ -f "$doc" ]; then
276 | echo -e " ${GREEN}✓ Found: $doc${NC}"
277 | else
278 | echo -e " ${RED}✗ Missing: $doc${NC}"
279 | ERRORS_FOUND=true
280 | fi
281 | done
282 |
283 | # Cleanup
284 | rm -rf "$TEMP_DIR"
285 |
286 | # Final summary
287 | echo -e "\n==================================================="
288 | if [ "$ERRORS_FOUND" = false ]; then
289 | echo -e "${GREEN}✅ Documentation validation passed!${NC}"
290 | echo -e "\n${YELLOW}Documentation is well-structured with:${NC}"
291 | echo " - Valid internal links"
292 | echo " - Syntactically correct code examples"
293 | echo " - Proper formatting"
294 | echo " - Complete structure"
295 | else
296 | echo -e "${RED}❌ Documentation validation found issues${NC}"
297 | echo -e "${YELLOW}Please fix the errors above${NC}"
298 | exit 1
299 | fi
```
--------------------------------------------------------------------------------
/src/__tests__/schema-parameter-transforms.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, vi } from 'vitest';
2 | import { z } from 'zod';
3 | import * as indexModule from '../index.js';
4 | import { ISonarQubeClient } from '../sonarqube.js';
5 | // Create a custom mock implementation of the handlers
6 | const nullToUndefined = indexModule.nullToUndefined;
7 | // Create a mock client
8 | const mockClient: Partial<ISonarQubeClient> = {
9 | getMetrics: vi.fn<() => Promise<any>>().mockResolvedValue({
10 | metrics: [{ id: '1', key: 'test', name: 'Test Metric' }],
11 | paging: { pageIndex: 2, pageSize: 5, total: 10 },
12 | }),
13 | getIssues: vi.fn<() => Promise<any>>().mockResolvedValue({
14 | issues: [{ key: 'issue-1', rule: 'rule-1', severity: 'MAJOR' }],
15 | paging: { pageIndex: 1, pageSize: 10, total: 1 },
16 | }),
17 | getComponentMeasures: vi.fn<() => Promise<any>>().mockResolvedValue({
18 | component: { key: 'comp-1', measures: [{ metric: 'coverage', value: '75.0' }] },
19 | metrics: [{ key: 'coverage', name: 'Coverage' }],
20 | }),
21 | getComponentsMeasures: vi.fn<() => Promise<any>>().mockResolvedValue({
22 | components: [
23 | { key: 'comp-1', measures: [{ metric: 'coverage', value: '75.0' }] },
24 | { key: 'comp-2', measures: [{ metric: 'coverage', value: '85.0' }] },
25 | ],
26 | metrics: [{ key: 'coverage', name: 'Coverage' }],
27 | paging: { pageIndex: 1, pageSize: 10, total: 2 },
28 | }),
29 | getMeasuresHistory: vi.fn<() => Promise<any>>().mockResolvedValue({
30 | measures: [
31 | {
32 | metric: 'coverage',
33 | history: [
34 | { date: '2023-01-01', value: '70.0' },
35 | { date: '2023-02-01', value: '75.0' },
36 | { date: '2023-03-01', value: '80.0' },
37 | ],
38 | },
39 | ],
40 | paging: { pageIndex: 1, pageSize: 10, total: 1 },
41 | }),
42 | };
43 | // Mock handlers that don't actually call the HTTP methods
44 | const mockMetricsHandler = async (params: { page: number | null; page_size: number | null }) => {
45 | const mockResult = await (mockClient as ISonarQubeClient).getMetrics({
46 | page: nullToUndefined(params.page),
47 | pageSize: nullToUndefined(params.page_size),
48 | });
49 | return {
50 | content: [
51 | {
52 | type: 'text' as const,
53 | text: JSON.stringify(mockResult, null, 2),
54 | },
55 | ],
56 | };
57 | };
58 | const mockIssuesHandler = async (params: Record<string, unknown>) => {
59 | const mockResult = await (mockClient as ISonarQubeClient).getIssues(params as any);
60 | return {
61 | content: [
62 | {
63 | type: 'text' as const,
64 | text: JSON.stringify(mockResult, null, 2),
65 | },
66 | ],
67 | };
68 | };
69 | const mockComponentMeasuresHandler = async (params: Record<string, unknown>) => {
70 | const mockResult = await (mockClient as ISonarQubeClient).getComponentMeasures(params as any);
71 | return {
72 | content: [
73 | {
74 | type: 'text' as const,
75 | text: JSON.stringify(mockResult, null, 2),
76 | },
77 | ],
78 | };
79 | };
80 | const mockComponentsMeasuresHandler = async (params: Record<string, unknown>) => {
81 | const mockResult = await (mockClient as ISonarQubeClient).getComponentsMeasures(params as any);
82 | return {
83 | content: [
84 | {
85 | type: 'text' as const,
86 | text: JSON.stringify(mockResult, null, 2),
87 | },
88 | ],
89 | };
90 | };
91 | const mockMeasuresHistoryHandler = async (params: Record<string, unknown>) => {
92 | const mockResult = await (mockClient as ISonarQubeClient).getMeasuresHistory(params as any);
93 | return {
94 | content: [
95 | {
96 | type: 'text' as const,
97 | text: JSON.stringify(mockResult, null, 2),
98 | },
99 | ],
100 | };
101 | };
102 | // Helper function to test string to number parameter transformations (not used directly)
103 | // eslint-disable-next-line @typescript-eslint/no-unused-vars
104 | function testNumberTransform(transformFn: (val: string | undefined) => number | null | undefined) {
105 | // Valid number
106 | expect(transformFn('10')).toBe(10);
107 | // Empty string should return null
108 | expect(transformFn('')).toBe(null);
109 | // Invalid number should return null
110 | expect(transformFn('abc')).toBe(null);
111 | // Undefined should return undefined
112 | expect(transformFn(undefined)).toBe(undefined);
113 | }
114 | describe('Schema Parameter Transformations', () => {
115 | describe('Number Transformations', () => {
116 | it('should transform string numbers to integers or null', () => {
117 | // Create a schema with number transformation
118 | const schema = z
119 | .string()
120 | .optional()
121 | .transform((val: any) => (val ? parseInt(val, 10) || null : null));
122 | // Test the transformation
123 | expect(schema.parse('10')).toBe(10);
124 | expect(schema.parse('')).toBe(null);
125 | expect(schema.parse('abc')).toBe(null);
126 | expect(schema.parse(undefined)).toBe(null);
127 | });
128 | });
129 | describe('Boolean Transformations', () => {
130 | it('should transform string booleans to boolean values', () => {
131 | // Create a schema with boolean transformation
132 | const schema = z
133 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
134 | .nullable()
135 | .optional();
136 | // Test the transformation
137 | expect(schema.parse('true')).toBe(true);
138 | expect(schema.parse('false')).toBe(false);
139 | expect(schema.parse(true)).toBe(true);
140 | expect(schema.parse(false)).toBe(false);
141 | expect(schema.parse(null)).toBe(null);
142 | expect(schema.parse(undefined)).toBe(undefined);
143 | });
144 | });
145 | describe('Parameter Transformations for Lambda Functions', () => {
146 | it('should handle nullToUndefined utility function', () => {
147 | expect(nullToUndefined(null)).toBeUndefined();
148 | expect(nullToUndefined(undefined)).toBeUndefined();
149 | expect(nullToUndefined(0)).toBe(0);
150 | expect(nullToUndefined('')).toBe('');
151 | expect(nullToUndefined('test')).toBe('test');
152 | expect(nullToUndefined(10)).toBe(10);
153 | expect(nullToUndefined(false)).toBe(false);
154 | expect(nullToUndefined(true)).toBe(true);
155 | });
156 | it('should handle metrics handler with string parameters', async () => {
157 | const result = await mockMetricsHandler({ page: null, page_size: null });
158 | // Verify the result structure
159 | expect(result).toHaveProperty('content');
160 | expect(result.content[0]).toHaveProperty('type', 'text');
161 | expect(result.content[0]).toHaveProperty('text');
162 | // Verify the result content
163 | const data = JSON.parse(result.content[0]?.text as string);
164 | expect(data).toHaveProperty('metrics');
165 | expect(data.metrics?.[0]).toHaveProperty('key', 'test');
166 | });
167 | it('should handle issues with complex parameters', async () => {
168 | const result = await mockIssuesHandler({
169 | project_key: 'test-project',
170 | severity: 'MAJOR',
171 | page: '1',
172 | page_size: '10',
173 | statuses: ['OPEN', 'CONFIRMED'],
174 | resolved: 'true',
175 | types: ['BUG', 'VULNERABILITY'],
176 | rules: ['rule1', 'rule2'],
177 | tags: ['tag1', 'tag2'],
178 | created_after: '2023-01-01',
179 | on_component_only: 'true',
180 | since_leak_period: 'true',
181 | in_new_code_period: 'true',
182 | });
183 | // Verify the result structure
184 | expect(result).toHaveProperty('content');
185 | expect(result.content[0]).toHaveProperty('type', 'text');
186 | expect(result.content[0]).toHaveProperty('text');
187 | // Verify the result content
188 | const data = JSON.parse(result.content[0]?.text as string);
189 | expect(data).toHaveProperty('issues');
190 | expect(data.issues?.[0]).toHaveProperty('key', 'issue-1');
191 | });
192 | it('should handle component measures with parameters', async () => {
193 | const result = await mockComponentMeasuresHandler({
194 | component: 'comp-1',
195 | metric_keys: ['coverage'],
196 | branch: 'main',
197 | period: '1',
198 | additional_fields: ['metrics'],
199 | });
200 | // Verify the result structure
201 | expect(result).toHaveProperty('content');
202 | expect(result.content[0]).toHaveProperty('type', 'text');
203 | expect(result.content[0]).toHaveProperty('text');
204 | // Verify the result content
205 | const data = JSON.parse(result.content[0]?.text as string);
206 | expect(data).toHaveProperty('component');
207 | expect(data.component).toHaveProperty('key', 'comp-1');
208 | });
209 | it('should handle components measures with parameters', async () => {
210 | const result = await mockComponentsMeasuresHandler({
211 | component_keys: ['comp-1', 'comp-2'],
212 | metric_keys: ['coverage'],
213 | branch: 'main',
214 | page: '1',
215 | page_size: '10',
216 | additional_fields: ['metrics'],
217 | });
218 | // Verify the result structure
219 | expect(result).toHaveProperty('content');
220 | expect(result.content[0]).toHaveProperty('type', 'text');
221 | expect(result.content[0]).toHaveProperty('text');
222 | // Verify the result content
223 | const data = JSON.parse(result.content[0]?.text as string);
224 | expect(data).toHaveProperty('components');
225 | expect(data.components).toHaveLength(2);
226 | expect(data.components?.[0]).toHaveProperty('key', 'comp-1');
227 | });
228 | it('should handle measures history with parameters', async () => {
229 | const result = await mockMeasuresHistoryHandler({
230 | component: 'comp-1',
231 | metrics: ['coverage'],
232 | from: '2023-01-01',
233 | to: '2023-03-01',
234 | page: '1',
235 | page_size: '10',
236 | });
237 | // Verify the result structure
238 | expect(result).toHaveProperty('content');
239 | expect(result.content[0]).toHaveProperty('type', 'text');
240 | expect(result.content[0]).toHaveProperty('text');
241 | // Verify the result content
242 | const data = JSON.parse(result.content[0]?.text as string);
243 | expect(data).toHaveProperty('measures');
244 | expect(data.measures?.[0]).toHaveProperty('metric', 'coverage');
245 | expect(data.measures?.[0]?.history).toHaveLength(3);
246 | });
247 | });
248 | });
249 |
```
--------------------------------------------------------------------------------
/docs/architecture/decisions/0023-release-management-with-changesets.md:
--------------------------------------------------------------------------------
```markdown
1 | # 23. Release Management with Changesets
2 |
3 | Date: 2025-10-11
4 |
5 | ## Status
6 |
7 | Accepted
8 |
9 | ## Context
10 |
11 | The SonarQube MCP Server requires a systematic approach to version management, changelog generation, and release automation. Manual versioning and changelog maintenance are error-prone and time-consuming. The project needs:
12 |
13 | - Automated semantic versioning based on change significance
14 | - Human-readable changelogs generated from commit history
15 | - Integration with GitHub releases for public visibility
16 | - Prevention of releases without documented changes
17 | - Developer-friendly workflow for documenting changes
18 | - Support for multiple contributors documenting changes simultaneously
19 |
20 | Traditional approaches have limitations:
21 |
22 | - **Manual versioning**: Error-prone, requires manual package.json updates
23 | - **Conventional commits alone**: Doesn't capture change intent or impact
24 | - **Standard-version/semantic-release**: Less flexible, harder to customize changelog format
25 | - **Manual changelogs**: Time-consuming, inconsistent format, often outdated
26 |
27 | ## Decision
28 |
29 | We will use **Changesets** (@changesets/cli) for release management, version control, and changelog generation.
30 |
31 | ### Core Workflow
32 |
33 | 1. **Developer creates changeset** when making impactful changes:
34 |
35 | ```bash
36 | pnpm changeset
37 | ```
38 |
39 | - Interactive CLI prompts for change type (major/minor/patch)
40 | - Developer writes human-readable summary
41 | - Creates markdown file in `.changeset/` directory
42 |
43 | 2. **CI/CD validates changesets** on pull requests:
44 | - Custom script (`.github/scripts/version-and-release.js`) checks for changesets
45 | - Fails if feat/fix commits exist without corresponding changesets
46 | - Ensures all significant changes are documented
47 |
48 | 3. **Automated versioning** on main branch:
49 | - Script determines version bump from accumulated changesets
50 | - Updates `package.json` version
51 | - Generates/updates `CHANGELOG.md` with all changeset summaries
52 | - Commits changes with `[skip actions]` to prevent workflow recursion
53 |
54 | 4. **Release creation**:
55 | - GitHub Actions creates Git tag
56 | - Creates GitHub release with changelog excerpt
57 | - Publishes to NPM and Docker registries
58 |
59 | ### Configuration
60 |
61 | **.changeset/config.json**:
62 |
63 | ```json
64 | {
65 | "$schema": "https://unpkg.com/@changesets/[email protected]/schema.json",
66 | "changelog": [
67 | "changelog-github-custom",
68 | {
69 | "repo": "sapientpants/sonarqube-mcp-server"
70 | }
71 | ],
72 | "commit": false,
73 | "access": "public",
74 | "baseBranch": "main"
75 | }
76 | ```
77 |
78 | **Key settings**:
79 |
80 | - `changelog-github-custom`: Custom GitHub changelog generator
81 | - `commit: false`: CI handles commits (prevents double-commits)
82 | - `access: public`: NPM package is public
83 | - `baseBranch: main`: Releases from main branch only
84 |
85 | ### Changeset Types
86 |
87 | **Major (breaking changes)**:
88 |
89 | ```bash
90 | pnpm changeset
91 | # Select: major
92 | # Example: "Renamed `health()` to `getHealthV2()` (breaking API change)"
93 | ```
94 |
95 | **Minor (new features)**:
96 |
97 | ```bash
98 | pnpm changeset
99 | # Select: minor
100 | # Example: "Added support for multi-platform Docker images"
101 | ```
102 |
103 | **Patch (bug fixes, docs, chores)**:
104 |
105 | ```bash
106 | pnpm changeset
107 | # Select: patch
108 | # Example: "Fixed Docker publishing permissions issue"
109 | ```
110 |
111 | ### Validation Logic
112 |
113 | The custom validation script (`.github/scripts/version-and-release.js`) enforces:
114 |
115 | 1. **Commit type analysis**: Checks commit messages for `feat:` or `fix:` prefixes
116 | 2. **Changeset requirement**: Fails if feat/fix commits exist without changesets
117 | 3. **Version determination**: Aggregates changesets to determine final version bump
118 | 4. **Outputs**: Sets GitHub Actions outputs for downstream jobs
119 |
120 | **Example validation failure**:
121 |
122 | ```
123 | Error: Found feat/fix commits without changesets:
124 | - feat: add new tool for hotspot analysis
125 | - fix: resolve authentication timeout issue
126 |
127 | Please create changesets with: pnpm changeset
128 | ```
129 |
130 | ### CI/CD Integration
131 |
132 | **Main workflow (.github/workflows/main.yml)**:
133 |
134 | ```yaml
135 | - name: Version packages
136 | id: version
137 | env:
138 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
139 | run: |
140 | node .github/scripts/version-and-release.js
141 |
142 | - name: Commit version changes
143 | if: steps.version.outputs.changed == 'true'
144 | run: |
145 | git config --local user.email "${{ github.actor_id }}+${{ github.actor }}@users.noreply.github.com"
146 | git config --local user.name "${{ github.actor }}"
147 | git add package.json CHANGELOG.md .changeset
148 | git commit -m "chore(release): v${{ steps.version.outputs.version }} [skip actions]"
149 | git push origin main
150 | ```
151 |
152 | **PR workflow (.github/workflows/pr.yml)**:
153 |
154 | ```yaml
155 | - name: Check for changesets
156 | run: |
157 | node .github/scripts/version-and-release.js
158 | # Fails PR if changesets missing for feat/fix commits
159 | ```
160 |
161 | ## Consequences
162 |
163 | ### Positive
164 |
165 | - **Semantic Versioning**: Automatic version bumps based on change impact
166 | - **Human-Readable Changelogs**: Developers write clear summaries, not parsing commits
167 | - **Change Documentation**: Every significant change documented before merge
168 | - **Parallel Development**: Multiple contributors can add changesets simultaneously
169 | - **GitHub Integration**: Custom formatter creates GitHub-friendly changelogs
170 | - **Release Automation**: Complete automation from changeset to GitHub release
171 | - **Flexibility**: Custom validation script enforces project-specific rules
172 | - **Monorepo Ready**: Changesets scales to monorepos (though not needed here)
173 | - **Review-Friendly**: Changesets visible in PR for review
174 | - **Version Planning**: Aggregate view of pending version bump
175 |
176 | ### Negative
177 |
178 | - **Extra Step**: Developers must remember to create changesets
179 | - **Learning Curve**: New contributors need to learn changeset workflow
180 | - **CI Complexity**: Custom validation script adds maintenance burden
181 | - **Commit Noise**: Creates `.changeset/*.md` files in version control
182 | - **Manual Intervention**: Sometimes requires manual conflict resolution in CHANGELOG.md
183 | - **Validation Strictness**: May block non-impactful PRs if validation is too strict
184 |
185 | ### Neutral
186 |
187 | - **Commit Message Format**: Still benefits from conventional commits for context
188 | - **Multiple Files**: Changesets create multiple small markdown files
189 | - **Git History**: Version bump commits separate from feature commits
190 | - **Changeset Cleanup**: `.changeset/*.md` files deleted after version bump
191 |
192 | ## Implementation
193 |
194 | ### Developer Workflow
195 |
196 | **Adding a changeset**:
197 |
198 | ```bash
199 | # 1. Make your code changes
200 | git add .
201 |
202 | # 2. Create a changeset (before committing)
203 | pnpm changeset
204 |
205 | # Interactive prompts:
206 | # - Select change type (major/minor/patch)
207 | # - Write summary: Clear, user-facing description
208 | # - Confirm
209 |
210 | # 3. Commit everything together
211 | git commit -m "feat: add new feature
212 |
213 | This adds support for...
214 |
215 | Closes #123"
216 |
217 | # Changeset is now part of the PR
218 | ```
219 |
220 | **Example changeset file** (`.changeset/cool-feature.md`):
221 |
222 | ```markdown
223 | ---
224 | 'sonarqube-mcp-server': minor
225 | ---
226 |
227 | Added support for security hotspot status updates through new MCP tool
228 | ```
229 |
230 | ### Maintainer Workflow
231 |
232 | **Releasing a version**:
233 |
234 | 1. Merge PRs with changesets to main
235 | 2. CI automatically:
236 | - Aggregates changesets
237 | - Bumps version in package.json
238 | - Updates CHANGELOG.md
239 | - Commits and pushes
240 | - Creates Git tag
241 | - Creates GitHub release
242 | - Publishes to registries
243 |
244 | **Manual release** (rare):
245 |
246 | ```bash
247 | # Generate version bump and changelog
248 | pnpm changeset version
249 |
250 | # Review changes
251 | git diff package.json CHANGELOG.md
252 |
253 | # Commit
254 | git add -A
255 | git commit -m "chore(release): version packages"
256 |
257 | # Create tag
258 | git tag -a "v$(node -p "require('./package.json').version")" -m "Release"
259 |
260 | # Push
261 | git push && git push --tags
262 | ```
263 |
264 | ### Handling Empty Changesets
265 |
266 | For non-impactful changes (docs, tests, refactoring), create an empty changeset:
267 |
268 | ```bash
269 | pnpm changeset --empty
270 | ```
271 |
272 | This satisfies validation without bumping version or adding changelog entry.
273 |
274 | ### Changeset Status
275 |
276 | Check pending changesets:
277 |
278 | ```bash
279 | pnpm changeset status
280 |
281 | # Output:
282 | # Changes to be included in next release:
283 | # minor: Added security hotspot tools
284 | # patch: Fixed authentication timeout
285 | # patch: Updated documentation
286 | #
287 | # Suggested version bump: minor (current: 1.10.18, next: 1.11.0)
288 | ```
289 |
290 | ## Examples
291 |
292 | ### Example 1: Adding a Feature
293 |
294 | **Pull request with changeset**:
295 |
296 | File: `.changeset/add-quality-gate-tool.md`
297 |
298 | ```markdown
299 | ---
300 | 'sonarqube-mcp-server': minor
301 | ---
302 |
303 | Added new `quality_gate_status` tool to check project quality gate status directly from the MCP server
304 | ```
305 |
306 | **Generated CHANGELOG.md entry**:
307 |
308 | ```markdown
309 | ## 1.11.0
310 |
311 | ### Minor Changes
312 |
313 | - abc1234: Added new `quality_gate_status` tool to check project quality gate status directly from the MCP server
314 | ```
315 |
316 | ### Example 2: Fixing a Bug
317 |
318 | **Pull request with changeset**:
319 |
320 | File: `.changeset/fix-docker-permissions.md`
321 |
322 | ```markdown
323 | ---
324 | 'sonarqube-mcp-server': patch
325 | ---
326 |
327 | Fixed Docker publishing workflow failing due to missing `packages:write` permission
328 | ```
329 |
330 | **Generated CHANGELOG.md entry**:
331 |
332 | ```markdown
333 | ## 1.10.19
334 |
335 | ### Patch Changes
336 |
337 | - def5678: Fixed Docker publishing workflow failing due to missing `packages:write` permission
338 | ```
339 |
340 | ### Example 3: Multiple Changes
341 |
342 | **Three PRs merged with changesets**:
343 |
344 | 1. `.changeset/feat-hotspots.md` (minor)
345 | 2. `.changeset/fix-auth.md` (patch)
346 | 3. `.changeset/docs-update.md` (patch)
347 |
348 | **Resulting version bump**: 1.10.18 → 1.11.0 (minor wins)
349 |
350 | **Generated CHANGELOG.md**:
351 |
352 | ```markdown
353 | ## 1.11.0
354 |
355 | ### Minor Changes
356 |
357 | - abc1234: Added security hotspot management tools with status update capabilities
358 |
359 | ### Patch Changes
360 |
361 | - def5678: Fixed authentication timeout in circuit breaker
362 | - ghi9012: Updated API documentation with new examples
363 | ```
364 |
365 | ## References
366 |
367 | - Changesets Documentation: https://github.com/changesets/changesets
368 | - Configuration: .changeset/config.json
369 | - Validation Script: .github/scripts/version-and-release.js
370 | - CI/CD Integration: .github/workflows/main.yml
371 | - Changelog Format: changelog-github-custom package
372 | - Package Scripts: package.json (changeset, changeset:status commands)
373 |
```
--------------------------------------------------------------------------------
/src/monitoring/__tests__/circuit-breaker.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, expect, it, vi, beforeEach, afterEach } from 'vitest';
2 | import { CircuitBreakerFactory } from '../circuit-breaker.js';
3 | import { getMetricsService, cleanupMetricsService } from '../metrics.js';
4 |
5 | describe('Circuit Breaker', () => {
6 | let mockFn: ReturnType<typeof vi.fn<(...args: unknown[]) => Promise<unknown>>>;
7 | let metricsService: ReturnType<typeof getMetricsService>;
8 |
9 | beforeEach(() => {
10 | // Reset circuit breaker factory
11 | CircuitBreakerFactory.reset();
12 | cleanupMetricsService();
13 | metricsService = getMetricsService();
14 |
15 | mockFn = vi.fn<(...args: unknown[]) => Promise<unknown>>();
16 | });
17 |
18 | afterEach(() => {
19 | CircuitBreakerFactory.reset();
20 | cleanupMetricsService();
21 | });
22 |
23 | describe('Basic functionality', () => {
24 | it('should execute function when circuit is closed', async () => {
25 | mockFn.mockResolvedValue('success');
26 |
27 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);
28 | const result = await breaker.fire();
29 |
30 | expect(result).toBe('success');
31 | expect(mockFn).toHaveBeenCalledTimes(1);
32 | });
33 |
34 | it('should pass arguments to the wrapped function', async () => {
35 | mockFn.mockImplementation((...args: unknown[]) => Promise.resolve(`${args[0]}-${args[1]}`));
36 |
37 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);
38 | const result = await breaker.fire('test', 123);
39 |
40 | expect(result).toBe('test-123');
41 | expect(mockFn).toHaveBeenCalledWith('test', 123);
42 | });
43 |
44 | it('should reuse the same breaker for the same name', () => {
45 | const breaker1 = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);
46 | const breaker2 = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);
47 |
48 | expect(breaker1).toBe(breaker2);
49 | });
50 |
51 | it('should create different breakers for different names', () => {
52 | const breaker1 = CircuitBreakerFactory.getBreaker('breaker-1', mockFn);
53 | const breaker2 = CircuitBreakerFactory.getBreaker('breaker-2', mockFn);
54 |
55 | expect(breaker1).not.toBe(breaker2);
56 | });
57 | });
58 |
59 | describe('Circuit opening behavior', () => {
60 | it('should open circuit after threshold failures', async () => {
61 | mockFn.mockRejectedValue(new Error('Service unavailable'));
62 |
63 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
64 | errorThresholdPercentage: 50,
65 | resetTimeout: 100,
66 | volumeThreshold: 2,
67 | });
68 |
69 | // First two failures should open the circuit
70 | await expect(breaker.fire()).rejects.toThrow('Service unavailable');
71 | await expect(breaker.fire()).rejects.toThrow('Service unavailable');
72 |
73 | // Circuit should now be open
74 | await expect(breaker.fire()).rejects.toThrow('Breaker is open');
75 |
76 | // Function should not be called when circuit is open
77 | expect(mockFn).toHaveBeenCalledTimes(2);
78 | });
79 |
80 | it('should not open circuit if failures are below threshold', async () => {
81 | let callCount = 0;
82 | mockFn.mockImplementation(() => {
83 | callCount++;
84 | if (callCount === 1) {
85 | return Promise.reject(new Error('Temporary failure'));
86 | }
87 | return Promise.resolve('success');
88 | });
89 |
90 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
91 | errorThresholdPercentage: 50,
92 | resetTimeout: 100,
93 | volumeThreshold: 3,
94 | });
95 |
96 | // One failure, two successes - should not open
97 | await expect(breaker.fire()).rejects.toThrow('Temporary failure');
98 | await expect(breaker.fire()).resolves.toBe('success');
99 | await expect(breaker.fire()).resolves.toBe('success');
100 |
101 | // Circuit should still be closed
102 | await expect(breaker.fire()).resolves.toBe('success');
103 | });
104 | });
105 |
106 | describe('Circuit recovery behavior', () => {
107 | it('should move to half-open state after timeout', async () => {
108 | mockFn.mockRejectedValueOnce(new Error('Failure 1'));
109 | mockFn.mockRejectedValueOnce(new Error('Failure 2'));
110 | mockFn.mockResolvedValue('recovered');
111 |
112 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
113 | errorThresholdPercentage: 50,
114 | resetTimeout: 50, // 50ms reset timeout
115 | volumeThreshold: 2,
116 | });
117 |
118 | // Open the circuit
119 | await expect(breaker.fire()).rejects.toThrow('Failure 1');
120 | await expect(breaker.fire()).rejects.toThrow('Failure 2');
121 |
122 | // Circuit is open
123 | await expect(breaker.fire()).rejects.toThrow('Breaker is open');
124 |
125 | // Wait for reset timeout
126 | await new Promise((resolve) => setTimeout(resolve, 60));
127 |
128 | // Circuit should be half-open, allowing one request
129 | await expect(breaker.fire()).resolves.toBe('recovered');
130 |
131 | // Circuit should be closed again
132 | await expect(breaker.fire()).resolves.toBe('recovered');
133 | });
134 |
135 | it('should re-open circuit if half-open test fails', async () => {
136 | mockFn.mockRejectedValue(new Error('Persistent failure'));
137 |
138 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
139 | errorThresholdPercentage: 50,
140 | resetTimeout: 50,
141 | volumeThreshold: 2,
142 | });
143 |
144 | // Open the circuit
145 | await expect(breaker.fire()).rejects.toThrow('Persistent failure');
146 | await expect(breaker.fire()).rejects.toThrow('Persistent failure');
147 |
148 | // Wait for reset timeout
149 | await new Promise((resolve) => setTimeout(resolve, 60));
150 |
151 | // Half-open test should fail and re-open circuit
152 | await expect(breaker.fire()).rejects.toThrow('Persistent failure');
153 |
154 | // Circuit should be open again
155 | await expect(breaker.fire()).rejects.toThrow('Breaker is open');
156 | });
157 | });
158 |
159 | describe('Metrics integration', () => {
160 | it('should track circuit breaker metrics', async () => {
161 | mockFn.mockResolvedValueOnce('success');
162 | mockFn.mockRejectedValueOnce(new Error('failure'));
163 | mockFn.mockResolvedValueOnce('success');
164 |
165 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);
166 |
167 | await breaker.fire();
168 | await expect(breaker.fire()).rejects.toThrow('failure');
169 | await breaker.fire();
170 |
171 | const metrics = metricsService.getMetrics();
172 |
173 | // Check for circuit breaker metrics - the breaker tracks failures
174 | expect(metrics).toContain('mcp_circuit_breaker_failures_total{service="test-breaker"} 1');
175 | });
176 |
177 | it('should track circuit state changes', async () => {
178 | mockFn.mockRejectedValue(new Error('Service down'));
179 |
180 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
181 | errorThresholdPercentage: 50,
182 | resetTimeout: 50,
183 | volumeThreshold: 2,
184 | });
185 |
186 | // Open the circuit
187 | await expect(breaker.fire()).rejects.toThrow();
188 | await expect(breaker.fire()).rejects.toThrow();
189 |
190 | // Check metrics for open state
191 | const metrics = metricsService.getMetrics();
192 | expect(metrics).toContain('mcp_circuit_breaker_state{service="test-breaker"} 1');
193 | });
194 | });
195 |
196 | describe('Custom options', () => {
197 | it('should respect custom timeout', async () => {
198 | let timeoutId: NodeJS.Timeout;
199 | mockFn.mockImplementation(
200 | () =>
201 | new Promise((resolve) => {
202 | timeoutId = setTimeout(() => resolve('slow'), 200);
203 | })
204 | );
205 |
206 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
207 | timeout: 100, // 100ms timeout
208 | });
209 |
210 | // Should timeout
211 | await expect(breaker.fire()).rejects.toThrow('Timed out');
212 |
213 | // Clean up the timeout to prevent open handle
214 | if (timeoutId!) {
215 | clearTimeout(timeoutId);
216 | }
217 | });
218 |
219 | it('should respect custom error filter', async () => {
220 | // The errorFilter should return true for errors that should be counted
221 | mockFn.mockRejectedValueOnce(new Error('Network error'));
222 | mockFn.mockRejectedValueOnce(new Error('Timeout error'));
223 | mockFn.mockResolvedValue('success');
224 |
225 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn, {
226 | errorThresholdPercentage: 50,
227 | volumeThreshold: 2,
228 | // Only count network errors toward circuit opening
229 | errorFilter: (err: Error) => err.message.includes('Network'),
230 | });
231 |
232 | // Network error should count
233 | await expect(breaker.fire()).rejects.toThrow('Network error');
234 |
235 | // Timeout error should NOT count (filtered out)
236 | await expect(breaker.fire()).rejects.toThrow('Timeout error');
237 |
238 | // Circuit should still be closed because only 1 error counted
239 | await expect(breaker.fire()).resolves.toBe('success');
240 | });
241 | });
242 |
243 | describe('Error handling', () => {
244 | it('should handle synchronous errors', async () => {
245 | mockFn.mockImplementation(() => {
246 | throw new Error('Sync error');
247 | });
248 |
249 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);
250 |
251 | await expect(breaker.fire()).rejects.toThrow('Sync error');
252 | });
253 |
254 | it('should handle different error types', async () => {
255 | const customError = { code: 'CUSTOM_ERROR', message: 'Custom error' };
256 | mockFn.mockRejectedValue(customError);
257 |
258 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);
259 |
260 | await expect(breaker.fire()).rejects.toEqual(customError);
261 | });
262 | });
263 |
264 | describe('Concurrency', () => {
265 | it('should handle concurrent requests', async () => {
266 | let resolveCount = 0;
267 | mockFn.mockImplementation(async () => {
268 | await new Promise((resolve) => setTimeout(resolve, 10));
269 | resolveCount++;
270 | return `result-${resolveCount}`;
271 | });
272 |
273 | const breaker = CircuitBreakerFactory.getBreaker('test-breaker', mockFn);
274 |
275 | // Fire multiple concurrent requests
276 | const results = await Promise.all([breaker.fire(), breaker.fire(), breaker.fire()]);
277 |
278 | expect(results).toHaveLength(3);
279 | expect(mockFn).toHaveBeenCalledTimes(3);
280 | });
281 | });
282 | });
283 |
```
--------------------------------------------------------------------------------
/src/schemas/issues.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod';
2 | import { stringToNumberTransform, parseJsonStringArray } from '../utils/transforms.js';
3 | import {
4 | severitySchema,
5 | severitiesSchema,
6 | statusSchema,
7 | resolutionSchema,
8 | typeSchema,
9 | cleanCodeAttributeCategoriesSchema,
10 | impactSeveritiesSchema,
11 | impactSoftwareQualitiesSchema,
12 | pullRequestNullableSchema,
13 | } from './common.js';
14 |
15 | /**
16 | * Schema for mark issue false positive tool
17 | */
18 | export const markIssueFalsePositiveToolSchema = {
19 | issue_key: z.string().describe('The key of the issue to mark as false positive'),
20 | comment: z
21 | .string()
22 | .optional()
23 | .describe('Optional comment explaining why this is a false positive'),
24 | };
25 |
26 | /**
27 | * Schema for mark issue won\'t fix tool
28 | */
29 | export const markIssueWontFixToolSchema = {
30 | issue_key: z.string().describe("The key of the issue to mark as won't fix"),
31 | comment: z.string().optional().describe("Optional comment explaining why this won't be fixed"),
32 | };
33 |
34 | /**
35 | * Schema for mark issues false positive (bulk) tool
36 | */
37 | export const markIssuesFalsePositiveToolSchema = {
38 | issue_keys: z
39 | .array(z.string())
40 | .min(1, 'At least one issue key is required')
41 | .describe('Array of issue keys to mark as false positive'),
42 | comment: z
43 | .string()
44 | .optional()
45 | .describe('Optional comment explaining why these are false positives'),
46 | };
47 |
48 | /**
49 | * Schema for mark issues won\'t fix (bulk) tool
50 | */
51 | export const markIssuesWontFixToolSchema = {
52 | issue_keys: z
53 | .array(z.string())
54 | .min(1, 'At least one issue key is required')
55 | .describe("Array of issue keys to mark as won't fix"),
56 | comment: z.string().optional().describe("Optional comment explaining why these won't be fixed"),
57 | };
58 |
59 | /**
60 | * Schema for add comment to issue tool
61 | */
62 | export const addCommentToIssueToolSchema = {
63 | issue_key: z
64 | .string()
65 | .min(1, 'Issue key is required')
66 | .describe('The key of the issue to add a comment to'),
67 | text: z
68 | .string()
69 | .min(1, 'Comment text is required')
70 | .describe('The comment text to add. Supports markdown formatting for rich text content'),
71 | };
72 |
73 | /**
74 | * Schema for assign issue tool
75 | */
76 | export const assignIssueToolSchema = {
77 | issueKey: z.string().min(1, 'Issue key is required').describe('The key of the issue to assign'),
78 | assignee: z
79 | .string()
80 | .optional()
81 | .describe('The username of the assignee. Leave empty to unassign the issue'),
82 | };
83 |
84 | /**
85 | * Schema for confirm issue tool
86 | */
87 | export const confirmIssueToolSchema = {
88 | issue_key: z.string().describe('The key of the issue to confirm'),
89 | comment: z
90 | .string()
91 | .optional()
92 | .describe('Optional comment explaining why this issue is confirmed'),
93 | };
94 |
95 | /**
96 | * Schema for unconfirm issue tool
97 | */
98 | export const unconfirmIssueToolSchema = {
99 | issue_key: z.string().describe('The key of the issue to unconfirm'),
100 | comment: z
101 | .string()
102 | .optional()
103 | .describe('Optional comment explaining why this issue needs further investigation'),
104 | };
105 |
106 | /**
107 | * Schema for resolve issue tool
108 | */
109 | export const resolveIssueToolSchema = {
110 | issue_key: z.string().describe('The key of the issue to resolve'),
111 | comment: z.string().optional().describe('Optional comment explaining how the issue was resolved'),
112 | };
113 |
114 | /**
115 | * Schema for reopen issue tool
116 | */
117 | export const reopenIssueToolSchema = {
118 | issue_key: z.string().describe('The key of the issue to reopen'),
119 | comment: z
120 | .string()
121 | .optional()
122 | .describe('Optional comment explaining why the issue is being reopened'),
123 | };
124 |
125 | /**
126 | * Schema for issues tool
127 | */
128 | export const issuesToolSchema = {
129 | // Component filters (backward compatible)
130 | project_key: z.string().optional().describe('Single project key for backward compatibility'), // Made optional to support projects array
131 | projects: z
132 | .union([z.array(z.string()), z.string()])
133 | .transform(parseJsonStringArray)
134 | .nullable()
135 | .optional()
136 | .describe('Filter by project keys'),
137 | component_keys: z
138 | .union([z.array(z.string()), z.string()])
139 | .transform(parseJsonStringArray)
140 | .nullable()
141 | .optional()
142 | .describe(
143 | 'Filter by component keys (file paths, directories, or modules). Use this to filter issues by specific files or folders'
144 | ),
145 | components: z
146 | .union([z.array(z.string()), z.string()])
147 | .transform(parseJsonStringArray)
148 | .nullable()
149 | .optional()
150 | .describe('Alias for component_keys - filter by file paths, directories, or modules'),
151 | on_component_only: z
152 | .union([z.boolean(), z.string().transform((val) => val === 'true')])
153 | .nullable()
154 | .optional()
155 | .describe('Return only issues on the specified components, not on their sub-components'),
156 | directories: z
157 | .union([z.array(z.string()), z.string()])
158 | .transform(parseJsonStringArray)
159 | .nullable()
160 | .optional()
161 | .describe('Filter by directory paths'),
162 | files: z
163 | .union([z.array(z.string()), z.string()])
164 | .transform(parseJsonStringArray)
165 | .nullable()
166 | .optional()
167 | .describe('Filter by specific file paths'),
168 | scopes: z
169 | .union([z.array(z.enum(['MAIN', 'TEST', 'OVERALL'])), z.string()])
170 | .transform((val) => {
171 | const parsed = parseJsonStringArray(val);
172 | // Validate that all values are valid scopes
173 | if (parsed && Array.isArray(parsed)) {
174 | return parsed.filter((v) => ['MAIN', 'TEST', 'OVERALL'].includes(v));
175 | }
176 | return parsed;
177 | })
178 | .nullable()
179 | .optional()
180 | .describe('Filter by issue scopes (MAIN, TEST, OVERALL)'),
181 |
182 | // Branch and PR support
183 | branch: z.string().nullable().optional(),
184 | pull_request: pullRequestNullableSchema,
185 |
186 | // Issue filters
187 | issues: z
188 | .union([z.array(z.string()), z.string()])
189 | .transform(parseJsonStringArray)
190 | .nullable()
191 | .optional(),
192 | severity: severitySchema, // Deprecated single value
193 | severities: severitiesSchema, // New array support
194 | statuses: statusSchema,
195 | resolutions: resolutionSchema,
196 | resolved: z
197 | .union([z.boolean(), z.string().transform((val) => val === 'true')])
198 | .nullable()
199 | .optional(),
200 | types: typeSchema,
201 |
202 | // Clean Code taxonomy (SonarQube 10.x+)
203 | clean_code_attribute_categories: cleanCodeAttributeCategoriesSchema,
204 | impact_severities: impactSeveritiesSchema,
205 | impact_software_qualities: impactSoftwareQualitiesSchema,
206 | issue_statuses: statusSchema, // New issue status values
207 |
208 | // Rules and tags
209 | rules: z
210 | .union([z.array(z.string()), z.string()])
211 | .transform(parseJsonStringArray)
212 | .nullable()
213 | .optional()
214 | .describe('Filter by rule keys'),
215 | tags: z
216 | .union([z.array(z.string()), z.string()])
217 | .transform(parseJsonStringArray)
218 | .nullable()
219 | .optional()
220 | .describe(
221 | 'Filter by issue tags. Essential for security audits, regression testing, and categorized analysis'
222 | ),
223 |
224 | // Date filters
225 | created_after: z.string().nullable().optional(),
226 | created_before: z.string().nullable().optional(),
227 | created_at: z.string().nullable().optional(),
228 | created_in_last: z.string().nullable().optional(),
229 |
230 | // Assignment
231 | assigned: z
232 | .union([z.boolean(), z.string().transform((val) => val === 'true')])
233 | .nullable()
234 | .optional()
235 | .describe('Filter to only assigned (true) or unassigned (false) issues'),
236 | assignees: z
237 | .union([z.array(z.string()), z.string()])
238 | .transform(parseJsonStringArray)
239 | .nullable()
240 | .optional()
241 | .describe(
242 | 'Filter by assignee logins. Critical for targeted clean-up sprints and workload analysis'
243 | ),
244 | author: z.string().nullable().optional().describe('Filter by single issue author'), // Single author
245 | authors: z
246 | .union([z.array(z.string()), z.string()])
247 | .transform(parseJsonStringArray)
248 | .nullable()
249 | .optional()
250 | .describe('Filter by multiple issue authors'), // Multiple authors
251 |
252 | // Security standards
253 | cwe: z
254 | .union([z.array(z.string()), z.string()])
255 | .transform(parseJsonStringArray)
256 | .nullable()
257 | .optional(),
258 | owasp_top10: z
259 | .union([z.array(z.string()), z.string()])
260 | .transform(parseJsonStringArray)
261 | .nullable()
262 | .optional(),
263 | owasp_top10_v2021: z
264 | .union([z.array(z.string()), z.string()])
265 | .transform(parseJsonStringArray)
266 | .nullable()
267 | .optional(), // New 2021 version
268 | sans_top25: z
269 | .union([z.array(z.string()), z.string()])
270 | .transform(parseJsonStringArray)
271 | .nullable()
272 | .optional(),
273 | sonarsource_security: z
274 | .union([z.array(z.string()), z.string()])
275 | .transform(parseJsonStringArray)
276 | .nullable()
277 | .optional(),
278 | sonarsource_security_category: z
279 | .union([z.array(z.string()), z.string()])
280 | .transform(parseJsonStringArray)
281 | .nullable()
282 | .optional(),
283 |
284 | // Languages
285 | languages: z
286 | .union([z.array(z.string()), z.string()])
287 | .transform(parseJsonStringArray)
288 | .nullable()
289 | .optional(),
290 |
291 | // Facets
292 | facets: z
293 | .union([z.array(z.string()), z.string()])
294 | .transform(parseJsonStringArray)
295 | .nullable()
296 | .optional()
297 | .describe(
298 | 'Enable faceted search for aggregations. Critical for dashboards. Available facets: severities, statuses, resolutions, rules, tags, types, authors, assignees, languages, etc.'
299 | ),
300 | facet_mode: z
301 | .enum(['effort', 'count'])
302 | .nullable()
303 | .optional()
304 | .describe(
305 | 'Mode for facet computation: count (number of issues) or effort (remediation effort)'
306 | ),
307 |
308 | // New code
309 | since_leak_period: z
310 | .union([z.boolean(), z.string().transform((val) => val === 'true')])
311 | .nullable()
312 | .optional(),
313 | in_new_code_period: z
314 | .union([z.boolean(), z.string().transform((val) => val === 'true')])
315 | .nullable()
316 | .optional(),
317 |
318 | // Sorting
319 | s: z.string().nullable().optional(), // Sort field
320 | asc: z
321 | .union([z.boolean(), z.string().transform((val) => val === 'true')])
322 | .nullable()
323 | .optional(), // Sort direction
324 |
325 | // Response optimization
326 | additional_fields: z
327 | .union([z.array(z.string()), z.string()])
328 | .transform(parseJsonStringArray)
329 | .nullable()
330 | .optional(),
331 |
332 | // Pagination
333 | page: z.string().optional().transform(stringToNumberTransform),
334 | page_size: z.string().optional().transform(stringToNumberTransform),
335 | };
336 |
```
--------------------------------------------------------------------------------
/scripts/test-monitoring-integration.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 | # Comprehensive integration test suite for monitoring endpoints
3 | # Tests health checks, metrics, distributed tracing, and circuit breaker functionality
4 |
5 | set -e # Exit on error
6 |
7 | # Colors for output
8 | RED='\033[0;31m'
9 | GREEN='\033[0;32m'
10 | YELLOW='\033[1;33m'
11 | BLUE='\033[0;34m'
12 | NC='\033[0m' # No Color
13 |
14 | echo -e "${GREEN}📊 SonarQube MCP Server - Monitoring Integration Tests${NC}"
15 | echo "======================================================"
16 |
17 | # Configuration
18 | NAMESPACE="${NAMESPACE:-sonarqube-mcp}"
19 | SERVICE_NAME="${SERVICE_NAME:-sonarqube-mcp}"
20 | PORT="${PORT:-3000}"
21 | BASE_URL="http://localhost:$PORT"
22 |
23 | # Test results
24 | TESTS_PASSED=0
25 | TESTS_FAILED=0
26 |
27 | # Function to check if a command exists
28 | command_exists() {
29 | command -v "$1" >/dev/null 2>&1
30 | }
31 |
32 | # Function to run a test
33 | run_test() {
34 | local test_name=$1
35 | local test_command=$2
36 |
37 | echo -ne " $test_name... "
38 |
39 | if eval "$test_command" >/dev/null 2>&1; then
40 | echo -e "${GREEN}✅ PASSED${NC}"
41 | ((TESTS_PASSED++))
42 | return 0
43 | else
44 | echo -e "${RED}❌ FAILED${NC}"
45 | ((TESTS_FAILED++))
46 | return 1
47 | fi
48 | }
49 |
50 | # Function to check endpoint response
51 | check_endpoint() {
52 | local endpoint=$1
53 | local expected_status=${2:-200}
54 | local description=$3
55 |
56 | response=$(curl -s -o /dev/null -w "%{http_code}" "$BASE_URL$endpoint")
57 |
58 | if [ "$response" = "$expected_status" ]; then
59 | return 0
60 | elif [ "$expected_status" = "200" ] && [ "$response" = "503" ]; then
61 | # For health endpoints, 503 might be acceptable in test environment
62 | return 0
63 | else
64 | echo " Expected: $expected_status, Got: $response"
65 | return 1
66 | fi
67 | }
68 |
69 | # Function to check JSON response
70 | check_json_response() {
71 | local endpoint=$1
72 | local json_path=$2
73 | local expected_value=$3
74 |
75 | actual_value=$(curl -s "$BASE_URL$endpoint" | jq -r "$json_path" 2>/dev/null)
76 |
77 | if [ "$actual_value" = "$expected_value" ]; then
78 | return 0
79 | else
80 | echo " Expected: $expected_value, Got: $actual_value"
81 | return 1
82 | fi
83 | }
84 |
85 | # Function to check metrics format
86 | check_metrics_format() {
87 | local metrics=$(curl -s "$BASE_URL/metrics")
88 |
89 | # Check for standard Prometheus metrics
90 | if echo "$metrics" | grep -q "^# HELP" && echo "$metrics" | grep -q "^# TYPE"; then
91 | return 0
92 | else
93 | return 1
94 | fi
95 | }
96 |
97 | # Function to test circuit breaker behavior
98 | test_circuit_breaker() {
99 | echo -e "\n${BLUE}🔌 Testing Circuit Breaker functionality...${NC}"
100 |
101 | # Note: In a real test environment, you would trigger failures to test circuit breaker
102 | # For now, we just check if circuit breaker metrics are exposed
103 |
104 | run_test "Circuit breaker metrics exposed" \
105 | "curl -s $BASE_URL/metrics | grep -q 'circuit_breaker'"
106 |
107 | run_test "Circuit breaker state metric" \
108 | "curl -s $BASE_URL/metrics | grep -q 'sonarqube_circuit_state'"
109 | }
110 |
111 | # Function to test health check details
112 | test_health_checks() {
113 | echo -e "\n${BLUE}🏥 Testing Health Check endpoints...${NC}"
114 |
115 | # Test basic health endpoint
116 | run_test "Health endpoint accessible" \
117 | "check_endpoint /health"
118 |
119 | # Test ready endpoint
120 | run_test "Ready endpoint accessible" \
121 | "check_endpoint /ready"
122 |
123 | # Test health endpoint returns JSON
124 | run_test "Health endpoint returns JSON" \
125 | "curl -s $BASE_URL/health | jq . >/dev/null"
126 |
127 | # Test health check structure
128 | run_test "Health check has status field" \
129 | "curl -s $BASE_URL/health | jq -e '.status' >/dev/null"
130 |
131 | # Test ready check structure
132 | run_test "Ready check has appropriate response" \
133 | "curl -s $BASE_URL/ready | grep -E '(ready|ok|degraded)' >/dev/null"
134 | }
135 |
136 | # Function to test metrics endpoint
137 | test_metrics() {
138 | echo -e "\n${BLUE}📈 Testing Metrics endpoint...${NC}"
139 |
140 | # Test metrics endpoint accessibility
141 | run_test "Metrics endpoint accessible" \
142 | "check_endpoint /metrics"
143 |
144 | # Test Prometheus format
145 | run_test "Metrics in Prometheus format" \
146 | "check_metrics_format"
147 |
148 | # Test for standard metrics
149 | run_test "Process metrics present" \
150 | "curl -s $BASE_URL/metrics | grep -q 'process_cpu_seconds_total'"
151 |
152 | run_test "Node.js metrics present" \
153 | "curl -s $BASE_URL/metrics | grep -q 'nodejs_'"
154 |
155 | # Test custom metrics
156 | run_test "HTTP request duration metric" \
157 | "curl -s $BASE_URL/metrics | grep -q 'http_request_duration_seconds'"
158 |
159 | run_test "HTTP requests total metric" \
160 | "curl -s $BASE_URL/metrics | grep -q 'http_requests_total'"
161 |
162 | # Test memory metrics
163 | run_test "Memory usage metrics" \
164 | "curl -s $BASE_URL/metrics | grep -q 'nodejs_external_memory_bytes'"
165 | }
166 |
167 | # Function to test OpenTelemetry integration
168 | test_opentelemetry() {
169 | echo -e "\n${BLUE}🔭 Testing OpenTelemetry integration...${NC}"
170 |
171 | # Check for tracing headers support
172 | trace_id=$(uuidgen | tr '[:upper:]' '[:lower:]' | tr -d '-')
173 |
174 | run_test "Service accepts trace headers" \
175 | "curl -s -H 'traceparent: 00-$trace_id-0000000000000001-01' $BASE_URL/health -o /dev/null"
176 |
177 | # Check for tracing metrics
178 | run_test "Tracing metrics exposed" \
179 | "curl -s $BASE_URL/metrics | grep -E '(trace|span)' >/dev/null || true"
180 | }
181 |
182 | # Function to test monitoring middleware
183 | test_monitoring_middleware() {
184 | echo -e "\n${BLUE}🛡️ Testing Monitoring Middleware...${NC}"
185 |
186 | # Make a few requests to generate metrics
187 | for i in {1..5}; do
188 | curl -s "$BASE_URL/health" >/dev/null
189 | curl -s "$BASE_URL/metrics" >/dev/null
190 | done
191 |
192 | # Check if request counts increased
193 | run_test "Request counter increments" \
194 | "curl -s $BASE_URL/metrics | grep 'http_requests_total' | grep -v '^#' | awk '{print \$2}' | awk '{s+=\$1} END {exit !(s>0)}'"
195 |
196 | # Test different HTTP methods tracking
197 | curl -X POST "$BASE_URL/health" >/dev/null 2>&1 || true
198 | run_test "Different HTTP methods tracked" \
199 | "curl -s $BASE_URL/metrics | grep 'http_requests_total' | grep -E 'method=\"(GET|POST)\"' >/dev/null"
200 | }
201 |
202 | # Function to test error tracking
203 | test_error_tracking() {
204 | echo -e "\n${BLUE}❌ Testing Error Tracking...${NC}"
205 |
206 | # Try to access non-existent endpoint
207 | curl -s "$BASE_URL/non-existent-endpoint" >/dev/null 2>&1 || true
208 |
209 | # Check if 404 errors are tracked
210 | run_test "404 errors tracked in metrics" \
211 | "curl -s $BASE_URL/metrics | grep 'http_requests_total' | grep 'status=\"404\"' >/dev/null || true"
212 | }
213 |
214 | # Function to test performance metrics
215 | test_performance_metrics() {
216 | echo -e "\n${BLUE}⚡ Testing Performance Metrics...${NC}"
217 |
218 | # Check for histogram metrics
219 | run_test "Request duration histogram" \
220 | "curl -s $BASE_URL/metrics | grep 'http_request_duration_seconds_bucket' >/dev/null"
221 |
222 | run_test "Request duration quantiles" \
223 | "curl -s $BASE_URL/metrics | grep -E 'http_request_duration_seconds{.*quantile=' >/dev/null || true"
224 |
225 | # Check for memory metrics
226 | run_test "Heap usage metrics" \
227 | "curl -s $BASE_URL/metrics | grep 'nodejs_heap_size_total_bytes' >/dev/null"
228 |
229 | run_test "GC metrics" \
230 | "curl -s $BASE_URL/metrics | grep 'nodejs_gc_duration_seconds' >/dev/null"
231 | }
232 |
233 | # Function to generate load for metrics
234 | generate_test_load() {
235 | echo -e "\n${BLUE}🔄 Generating test load...${NC}"
236 |
237 | endpoints=("/health" "/ready" "/metrics")
238 |
239 | for i in {1..20}; do
240 | endpoint=${endpoints[$((i % ${#endpoints[@]}))]}
241 | curl -s "$BASE_URL$endpoint" >/dev/null 2>&1 &
242 | done
243 |
244 | wait
245 | echo " Generated 20 requests across endpoints"
246 | }
247 |
248 | # Main execution
249 | echo -e "\n${YELLOW}📋 Checking prerequisites...${NC}"
250 |
251 | # Check if service is accessible
252 | if ! curl -s -o /dev/null "$BASE_URL/health" 2>/dev/null; then
253 | echo -e "${RED}❌ Service is not accessible at $BASE_URL${NC}"
254 | echo "Please ensure the service is running and accessible."
255 | echo ""
256 | echo "To run locally:"
257 | echo " npm run dev"
258 | echo ""
259 | echo "To test in Kubernetes:"
260 | echo " kubectl port-forward -n $NAMESPACE svc/$SERVICE_NAME $PORT:$PORT"
261 | exit 1
262 | fi
263 |
264 | echo -e "${GREEN}✅ Service is accessible${NC}"
265 |
266 | # Run all test suites
267 | echo -e "\n${YELLOW}🚀 Starting integration tests...${NC}"
268 |
269 | test_health_checks
270 | test_metrics
271 | test_circuit_breaker
272 | test_opentelemetry
273 | test_monitoring_middleware
274 | generate_test_load
275 | sleep 2 # Wait for metrics to update
276 | test_error_tracking
277 | test_performance_metrics
278 |
279 | # Additional integration tests
280 | echo -e "\n${BLUE}🔗 Testing Integration Scenarios...${NC}"
281 |
282 | # Test metric labels
283 | run_test "Metrics have proper labels" \
284 | "curl -s $BASE_URL/metrics | grep 'http_requests_total' | grep -E 'method=|status=|route=' >/dev/null"
285 |
286 | # Test metric naming conventions
287 | run_test "Metrics follow naming conventions" \
288 | "curl -s $BASE_URL/metrics | grep -v '^#' | grep -E '^[a-z_]+(_[a-z]+)*(_total|_bytes|_seconds|_count)?{' >/dev/null || true"
289 |
290 | # Test health check during high load
291 | echo -e "\n${BLUE}🏋️ Testing under load...${NC}"
292 | (
293 | for i in {1..50}; do
294 | curl -s "$BASE_URL/health" >/dev/null 2>&1 &
295 | done
296 | wait
297 | ) &
298 | LOAD_PID=$!
299 |
300 | sleep 1
301 | run_test "Health check responsive under load" \
302 | "curl -s --max-time 2 $BASE_URL/health >/dev/null"
303 |
304 | wait $LOAD_PID 2>/dev/null
305 |
306 | # Summary
307 | echo -e "\n======================================================"
308 | echo -e "${GREEN}📊 Test Summary${NC}"
309 | echo -e " Total tests: $((TESTS_PASSED + TESTS_FAILED))"
310 | echo -e " ${GREEN}Passed: $TESTS_PASSED${NC}"
311 | echo -e " ${RED}Failed: $TESTS_FAILED${NC}"
312 |
313 | if [ $TESTS_FAILED -eq 0 ]; then
314 | echo -e "\n${GREEN}✅ All monitoring integration tests passed!${NC}"
315 | echo -e "\nThe monitoring stack is properly integrated with:"
316 | echo " - Health and readiness checks"
317 | echo " - Prometheus metrics exposition"
318 | echo " - Request tracking and performance metrics"
319 | echo " - Error tracking"
320 | echo " - OpenTelemetry support"
321 | exit 0
322 | else
323 | echo -e "\n${RED}❌ Some tests failed. Please review the failures above.${NC}"
324 | exit 1
325 | fi
```
--------------------------------------------------------------------------------
/src/__tests__/direct-schema-validation.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect } from 'vitest';
2 | import { z } from 'zod';
3 | describe('Schema Validation by Direct Testing', () => {
4 | // Test specific schema transformation functions
5 | describe('Transformation Functions', () => {
6 | it('should transform string numbers to integers or null', () => {
7 | // Create a transformation function similar to the ones in index.ts
8 | const transformFn = (val?: string) => (val ? parseInt(val, 10) || null : null);
9 | // Valid number
10 | expect(transformFn('10')).toBe(10);
11 | // Empty string should return null
12 | expect(transformFn('')).toBe(null);
13 | // Invalid number should return null
14 | expect(transformFn('abc')).toBe(null);
15 | // Undefined should return null
16 | expect(transformFn(undefined)).toBe(null);
17 | });
18 | it('should transform string booleans to boolean values', () => {
19 | // Create a schema with boolean transformation
20 | const booleanSchema = z
21 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
22 | .nullable()
23 | .optional();
24 | // Test the transformation
25 | expect(booleanSchema.parse('true')).toBe(true);
26 | expect(booleanSchema.parse('false')).toBe(false);
27 | expect(booleanSchema.parse(true)).toBe(true);
28 | expect(booleanSchema.parse(false)).toBe(false);
29 | expect(booleanSchema.parse(null)).toBe(null);
30 | expect(booleanSchema.parse(undefined)).toBe(undefined);
31 | });
32 | it('should validate enum values', () => {
33 | // Create a schema with enum validation
34 | const severitySchema = z
35 | .enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER'])
36 | .nullable()
37 | .optional();
38 | // Test the validation
39 | expect(severitySchema.parse('MAJOR')).toBe('MAJOR');
40 | expect(severitySchema.parse('CRITICAL')).toBe('CRITICAL');
41 | expect(severitySchema.parse(null)).toBe(null);
42 | expect(severitySchema.parse(undefined)).toBe(undefined);
43 | // Invalid value should throw
44 | expect(() => severitySchema.parse('INVALID')).toThrow();
45 | });
46 | });
47 | // Test complex schema objects
48 | describe('Complex Schema Objects', () => {
49 | it('should validate issues schema parameters', () => {
50 | // Create a schema similar to issues schema in index.ts
51 | const statusEnumSchema = z.enum([
52 | 'OPEN',
53 | 'CONFIRMED',
54 | 'REOPENED',
55 | 'RESOLVED',
56 | 'CLOSED',
57 | 'TO_REVIEW',
58 | 'IN_REVIEW',
59 | 'REVIEWED',
60 | ]);
61 | const statusSchema = z.array(statusEnumSchema).nullable().optional();
62 | const resolutionEnumSchema = z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']);
63 | const resolutionSchema = z.array(resolutionEnumSchema).nullable().optional();
64 | const typeEnumSchema = z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']);
65 | const typeSchema = z.array(typeEnumSchema).nullable().optional();
66 | const issuesSchema = z.object({
67 | project_key: z.string(),
68 | severity: z.enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER']).nullable().optional(),
69 | page: z
70 | .string()
71 | .optional()
72 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
73 | page_size: z
74 | .string()
75 | .optional()
76 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
77 | statuses: statusSchema,
78 | resolutions: resolutionSchema,
79 | resolved: z
80 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
81 | .nullable()
82 | .optional(),
83 | types: typeSchema,
84 | rules: z.array(z.string()).nullable().optional(),
85 | tags: z.array(z.string()).nullable().optional(),
86 | created_after: z.string().nullable().optional(),
87 | created_before: z.string().nullable().optional(),
88 | created_at: z.string().nullable().optional(),
89 | created_in_last: z.string().nullable().optional(),
90 | assignees: z.array(z.string()).nullable().optional(),
91 | authors: z.array(z.string()).nullable().optional(),
92 | cwe: z.array(z.string()).nullable().optional(),
93 | languages: z.array(z.string()).nullable().optional(),
94 | owasp_top10: z.array(z.string()).nullable().optional(),
95 | sans_top25: z.array(z.string()).nullable().optional(),
96 | sonarsource_security: z.array(z.string()).nullable().optional(),
97 | on_component_only: z
98 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
99 | .nullable()
100 | .optional(),
101 | facets: z.array(z.string()).nullable().optional(),
102 | since_leak_period: z
103 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
104 | .nullable()
105 | .optional(),
106 | in_new_code_period: z
107 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
108 | .nullable()
109 | .optional(),
110 | });
111 | // Test with various parameter types
112 | const result = issuesSchema.parse({
113 | project_key: 'test-project',
114 | severity: 'MAJOR',
115 | page: '2',
116 | page_size: '10',
117 | statuses: ['OPEN', 'CONFIRMED'],
118 | resolved: 'true',
119 | types: ['BUG', 'VULNERABILITY'],
120 | rules: ['rule1', 'rule2'],
121 | tags: ['tag1', 'tag2'],
122 | created_after: '2023-01-01',
123 | on_component_only: 'true',
124 | since_leak_period: 'true',
125 | in_new_code_period: 'true',
126 | });
127 | // Check the transformations
128 | expect(result.project_key).toBe('test-project');
129 | expect(result.severity).toBe('MAJOR');
130 | expect(result.page).toBe(2);
131 | expect(result.page_size).toBe(10);
132 | expect(result.statuses).toEqual(['OPEN', 'CONFIRMED']);
133 | expect(result.resolved).toBe(true);
134 | expect(result.types).toEqual(['BUG', 'VULNERABILITY']);
135 | expect(result.on_component_only).toBe(true);
136 | expect(result.since_leak_period).toBe(true);
137 | expect(result.in_new_code_period).toBe(true);
138 | });
139 | it('should validate component measures schema parameters', () => {
140 | // Create a schema similar to component measures schema in index.ts
141 | const measuresComponentSchema = z.object({
142 | component: z.string(),
143 | metric_keys: z.array(z.string()),
144 | branch: z.string().optional(),
145 | pull_request: z.string().optional(),
146 | additional_fields: z.array(z.string()).optional(),
147 | period: z.string().optional(),
148 | page: z
149 | .string()
150 | .optional()
151 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
152 | page_size: z
153 | .string()
154 | .optional()
155 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
156 | });
157 | // Test with valid parameters
158 | const result = measuresComponentSchema.parse({
159 | component: 'test-component',
160 | metric_keys: ['complexity', 'coverage'],
161 | branch: 'main',
162 | additional_fields: ['metrics'],
163 | page: '2',
164 | page_size: '20',
165 | });
166 | // Check the transformations
167 | expect(result.component).toBe('test-component');
168 | expect(result.metric_keys).toEqual(['complexity', 'coverage']);
169 | expect(result.branch).toBe('main');
170 | expect(result.page).toBe(2);
171 | expect(result.page_size).toBe(20);
172 | // Test with invalid page values
173 | const result2 = measuresComponentSchema.parse({
174 | component: 'test-component',
175 | metric_keys: ['complexity', 'coverage'],
176 | page: 'invalid',
177 | page_size: 'invalid',
178 | });
179 | expect(result2.page).toBe(null);
180 | expect(result2.page_size).toBe(null);
181 | });
182 | it('should validate components measures schema parameters', () => {
183 | // Create a schema similar to components measures schema in index.ts
184 | const measuresComponentsSchema = z.object({
185 | component_keys: z.array(z.string()),
186 | metric_keys: z.array(z.string()),
187 | branch: z.string().optional(),
188 | pull_request: z.string().optional(),
189 | additional_fields: z.array(z.string()).optional(),
190 | period: z.string().optional(),
191 | page: z
192 | .string()
193 | .optional()
194 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
195 | page_size: z
196 | .string()
197 | .optional()
198 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
199 | });
200 | // Test with valid parameters
201 | const result = measuresComponentsSchema.parse({
202 | component_keys: ['comp-1', 'comp-2'],
203 | metric_keys: ['complexity', 'coverage'],
204 | branch: 'main',
205 | page: '2',
206 | page_size: '20',
207 | });
208 | // Check the transformations
209 | expect(result.component_keys).toEqual(['comp-1', 'comp-2']);
210 | expect(result.metric_keys).toEqual(['complexity', 'coverage']);
211 | expect(result.page).toBe(2);
212 | expect(result.page_size).toBe(20);
213 | });
214 | it('should validate measures history schema parameters', () => {
215 | // Create a schema similar to measures history schema in index.ts
216 | const measuresHistorySchema = z.object({
217 | component: z.string(),
218 | metrics: z.array(z.string()),
219 | from: z.string().optional(),
220 | to: z.string().optional(),
221 | branch: z.string().optional(),
222 | pull_request: z.string().optional(),
223 | page: z
224 | .string()
225 | .optional()
226 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
227 | page_size: z
228 | .string()
229 | .optional()
230 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
231 | });
232 | // Test with valid parameters
233 | const result = measuresHistorySchema.parse({
234 | component: 'test-component',
235 | metrics: ['complexity', 'coverage'],
236 | from: '2023-01-01',
237 | to: '2023-12-31',
238 | page: '3',
239 | page_size: '15',
240 | });
241 | // Check the transformations
242 | expect(result.component).toBe('test-component');
243 | expect(result.metrics).toEqual(['complexity', 'coverage']);
244 | expect(result.from).toBe('2023-01-01');
245 | expect(result.to).toBe('2023-12-31');
246 | expect(result.page).toBe(3);
247 | expect(result.page_size).toBe(15);
248 | });
249 | });
250 | });
251 |
```
--------------------------------------------------------------------------------
/src/__tests__/standalone-handlers.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect } from 'vitest';
2 | // Test the transformations used in handlers
3 | describe('Handler Function Transformations', () => {
4 | // Test parameter transformations for handlers
5 | describe('Schema Transformations', () => {
6 | describe('Page and Page Size Transformations', () => {
7 | it('should test transform for Projects tool', () => {
8 | const transform = (val: any) => (val ? parseInt(val, 10) || null : null);
9 | // Projects page parameter
10 | expect(transform('10')).toBe(10);
11 | expect(transform('invalid')).toBe(null);
12 | expect(transform(undefined)).toBe(null);
13 | expect(transform('')).toBe(null);
14 | // Projects page_size parameter
15 | expect(transform('20')).toBe(20);
16 | });
17 | it('should test transform for Metrics tool', () => {
18 | const transform = (val: any) => (val ? parseInt(val, 10) || null : null);
19 | // Metrics page parameter
20 | expect(transform('10')).toBe(10);
21 | expect(transform('invalid')).toBe(null);
22 | // Metrics page_size parameter
23 | expect(transform('20')).toBe(20);
24 | });
25 | it('should test transform for Issues tool', () => {
26 | const transform = (val: any) => (val ? parseInt(val, 10) || null : null);
27 | // Issues page parameter
28 | expect(transform('10')).toBe(10);
29 | expect(transform('invalid')).toBe(null);
30 | // Issues page_size parameter
31 | expect(transform('20')).toBe(20);
32 | });
33 | it('should test transform for Components Measures tool', () => {
34 | const transform = (val: any) => (val ? parseInt(val, 10) || null : null);
35 | // Components Measures page parameter
36 | expect(transform('10')).toBe(10);
37 | expect(transform('invalid')).toBe(null);
38 | // Components Measures page_size parameter
39 | expect(transform('20')).toBe(20);
40 | });
41 | it('should test transform for Measures History tool', () => {
42 | const transform = (val: any) => (val ? parseInt(val, 10) || null : null);
43 | // Measures History page parameter
44 | expect(transform('10')).toBe(10);
45 | expect(transform('invalid')).toBe(null);
46 | // Measures History page_size parameter
47 | expect(transform('20')).toBe(20);
48 | });
49 | });
50 | describe('Boolean Parameter Transformations', () => {
51 | it('should test boolean transform for resolved parameter', () => {
52 | const transform = (val: any) => val === 'true';
53 | expect(transform('true')).toBe(true);
54 | expect(transform('false')).toBe(false);
55 | expect(transform('someOtherValue')).toBe(false);
56 | });
57 | it('should test boolean transform for on_component_only parameter', () => {
58 | const transform = (val: any) => val === 'true';
59 | expect(transform('true')).toBe(true);
60 | expect(transform('false')).toBe(false);
61 | expect(transform('someOtherValue')).toBe(false);
62 | });
63 | it('should test boolean transform for since_leak_period parameter', () => {
64 | const transform = (val: any) => val === 'true';
65 | expect(transform('true')).toBe(true);
66 | expect(transform('false')).toBe(false);
67 | expect(transform('someOtherValue')).toBe(false);
68 | });
69 | it('should test boolean transform for in_new_code_period parameter', () => {
70 | const transform = (val: any) => val === 'true';
71 | expect(transform('true')).toBe(true);
72 | expect(transform('false')).toBe(false);
73 | expect(transform('someOtherValue')).toBe(false);
74 | });
75 | });
76 | });
77 | // These are mock tests for the handler implementations
78 | describe('Handler Implementation Mocks', () => {
79 | it('should mock metricsHandler implementation', () => {
80 | // Test the transform within the handler
81 | const nullToUndefined = (value: any) => (value === null ? undefined : value);
82 | // Mock params that would be processed by metricsHandler
83 | const params = { page: 2, page_size: 10 };
84 | // Verify transformations work correctly
85 | expect(nullToUndefined(params.page)).toBe(2);
86 | expect(nullToUndefined(params.page_size)).toBe(10);
87 | expect(nullToUndefined(null)).toBeUndefined();
88 | // Mock result structure
89 | const result = {
90 | content: [
91 | {
92 | type: 'text',
93 | text: JSON.stringify(
94 | {
95 | metrics: [{ key: 'test-metric', name: 'Test Metric' }],
96 | paging: { pageIndex: 1, pageSize: 10, total: 1 },
97 | },
98 | null,
99 | 2
100 | ),
101 | },
102 | ],
103 | };
104 | expect(result.content).toBeDefined();
105 | expect(result.content[0]?.type).toBe('text');
106 | expect(JSON.parse(result.content[0]?.text ?? '{}').metrics).toBeDefined();
107 | });
108 | it('should mock issuesHandler implementation', () => {
109 | // Mock the mapToSonarQubeParams function within issuesHandler
110 | const nullToUndefined = (value: any) => (value === null ? undefined : value);
111 | const mapToSonarQubeParams = (params: any) => {
112 | return {
113 | projectKey: params.project_key,
114 | severity: nullToUndefined(params.severity),
115 | page: nullToUndefined(params.page),
116 | };
117 | };
118 | // Test with sample parameters
119 | const params = { project_key: 'test-project', severity: 'MAJOR', page: null };
120 | const result = mapToSonarQubeParams(params);
121 | // Verify transformations
122 | expect(result.projectKey).toBe('test-project');
123 | expect(result.severity).toBe('MAJOR');
124 | expect(result.page).toBeUndefined();
125 | // Mock the handler return structure
126 | const handlerResult = {
127 | content: [
128 | {
129 | type: 'text',
130 | text: JSON.stringify({
131 | issues: [{ key: 'test-issue', rule: 'test-rule', severity: 'MAJOR' }],
132 | paging: { pageIndex: 1, pageSize: 10, total: 1 },
133 | }),
134 | },
135 | ],
136 | };
137 | expect(handlerResult.content[0]?.type).toBe('text');
138 | });
139 | it('should mock componentMeasuresHandler implementation', () => {
140 | // Mock the array transformation logic
141 | const params = {
142 | component: 'test-component',
143 | metric_keys: 'coverage',
144 | branch: 'main',
145 | };
146 | // Test array conversion logic
147 | const metricKeys = Array.isArray(params.metric_keys)
148 | ? params.metric_keys
149 | : [params.metric_keys];
150 | expect(metricKeys).toEqual(['coverage']);
151 | // Test with array input
152 | const paramsWithArray = {
153 | component: 'test-component',
154 | metric_keys: ['coverage', 'bugs'],
155 | branch: 'main',
156 | };
157 | const metricKeysFromArray = Array.isArray(paramsWithArray.metric_keys)
158 | ? paramsWithArray.metric_keys
159 | : [paramsWithArray.metric_keys];
160 | expect(metricKeysFromArray).toEqual(['coverage', 'bugs']);
161 | // Mock the handler return structure
162 | const handlerResult = {
163 | content: [
164 | {
165 | type: 'text',
166 | text: JSON.stringify({
167 | component: {
168 | key: 'test-component',
169 | measures: [{ metric: 'coverage', value: '85.4' }],
170 | },
171 | metrics: [{ key: 'coverage', name: 'Coverage' }],
172 | }),
173 | },
174 | ],
175 | };
176 | expect(handlerResult.content[0]?.type).toBe('text');
177 | });
178 | it('should mock componentsMeasuresHandler implementation', () => {
179 | // Mock the array transformation logic for components and metrics
180 | const params = {
181 | component_keys: 'test-component',
182 | metric_keys: 'coverage',
183 | page: '1',
184 | page_size: '10',
185 | };
186 | // Test component keys array conversion
187 | const componentKeys = Array.isArray(params.component_keys)
188 | ? params.component_keys
189 | : [params.component_keys];
190 | expect(componentKeys).toEqual(['test-component']);
191 | // Test metric keys array conversion
192 | const metricKeys = Array.isArray(params.metric_keys)
193 | ? params.metric_keys
194 | : [params.metric_keys];
195 | expect(metricKeys).toEqual(['coverage']);
196 | // Test null to undefined conversion
197 | const nullToUndefined = (value: any) => (value === null ? undefined : value);
198 | expect(nullToUndefined(null)).toBeUndefined();
199 | expect(nullToUndefined('value')).toBe('value');
200 | // Mock the handler return structure
201 | const handlerResult = {
202 | content: [
203 | {
204 | type: 'text',
205 | text: JSON.stringify({
206 | components: [
207 | {
208 | key: 'test-component',
209 | measures: [{ metric: 'coverage', value: '85.4' }],
210 | },
211 | ],
212 | metrics: [{ key: 'coverage', name: 'Coverage' }],
213 | paging: { pageIndex: 1, pageSize: 10, total: 1 },
214 | }),
215 | },
216 | ],
217 | };
218 | expect(handlerResult.content[0]?.type).toBe('text');
219 | });
220 | it('should mock measuresHistoryHandler implementation', () => {
221 | // Mock the metrics array transformation logic
222 | const params = {
223 | component: 'test-component',
224 | metrics: 'coverage',
225 | from: '2023-01-01',
226 | to: '2023-12-31',
227 | };
228 | // Test metrics array conversion
229 | const metrics = Array.isArray(params.metrics) ? params.metrics : [params.metrics];
230 | expect(metrics).toEqual(['coverage']);
231 | // Test with array input
232 | const paramsWithArray = {
233 | component: 'test-component',
234 | metrics: ['coverage', 'bugs'],
235 | from: '2023-01-01',
236 | to: '2023-12-31',
237 | };
238 | const metricsFromArray = Array.isArray(paramsWithArray.metrics)
239 | ? paramsWithArray.metrics
240 | : [paramsWithArray.metrics];
241 | expect(metricsFromArray).toEqual(['coverage', 'bugs']);
242 | // Mock the handler return structure
243 | const handlerResult = {
244 | content: [
245 | {
246 | type: 'text',
247 | text: JSON.stringify({
248 | measures: [
249 | {
250 | metric: 'coverage',
251 | history: [{ date: '2023-01-01', value: '85.4' }],
252 | },
253 | ],
254 | paging: { pageIndex: 1, pageSize: 10, total: 1 },
255 | }),
256 | },
257 | ],
258 | };
259 | expect(handlerResult.content[0]?.type).toBe('text');
260 | });
261 | });
262 | });
263 |
```
--------------------------------------------------------------------------------
/src/__tests__/issues-new-parameters.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeEach, vi } from 'vitest';
2 | // Note: SearchIssuesRequestBuilderInterface is used as type from sonarqube-web-api-client
3 | type SearchIssuesRequestBuilderInterface = any;
4 | // Mock environment variables
5 | process.env.SONARQUBE_TOKEN = 'test-token';
6 | process.env.SONARQUBE_URL = 'http://localhost:9000';
7 | process.env.SONARQUBE_ORGANIZATION = 'test-org';
8 | // Mock search builder
9 | const mockSearchBuilder = {
10 | withProjects: vi.fn().mockReturnThis(),
11 | withComponents: vi.fn().mockReturnThis(),
12 | withDirectories: vi.fn().mockReturnThis(),
13 | withFiles: vi.fn().mockReturnThis(),
14 | withScopes: vi.fn().mockReturnThis(),
15 | onComponentOnly: vi.fn().mockReturnThis(),
16 | onBranch: vi.fn().mockReturnThis(),
17 | onPullRequest: vi.fn().mockReturnThis(),
18 | withIssues: vi.fn().mockReturnThis(),
19 | withSeverities: vi.fn().mockReturnThis(),
20 | withStatuses: vi.fn().mockReturnThis(),
21 | withResolutions: vi.fn().mockReturnThis(),
22 | onlyResolved: vi.fn().mockReturnThis(),
23 | onlyUnresolved: vi.fn().mockReturnThis(),
24 | withTypes: vi.fn().mockReturnThis(),
25 | withCleanCodeAttributeCategories: vi.fn().mockReturnThis(),
26 | withImpactSeverities: vi.fn().mockReturnThis(),
27 | withImpactSoftwareQualities: vi.fn().mockReturnThis(),
28 | withIssueStatuses: vi.fn().mockReturnThis(),
29 | withRules: vi.fn().mockReturnThis(),
30 | withTags: vi.fn().mockReturnThis(),
31 | createdAfter: vi.fn().mockReturnThis(),
32 | createdBefore: vi.fn().mockReturnThis(),
33 | createdAt: vi.fn().mockReturnThis(),
34 | createdInLast: vi.fn().mockReturnThis(),
35 | onlyAssigned: vi.fn().mockReturnThis(),
36 | onlyUnassigned: vi.fn().mockReturnThis(),
37 | assignedToAny: vi.fn().mockReturnThis(),
38 | byAuthor: vi.fn().mockReturnThis(),
39 | byAuthors: vi.fn().mockReturnThis(),
40 | withCwe: vi.fn().mockReturnThis(),
41 | withOwaspTop10: vi.fn().mockReturnThis(),
42 | withOwaspTop10v2021: vi.fn().mockReturnThis(),
43 | withSansTop25: vi.fn().mockReturnThis(),
44 | withSonarSourceSecurity: vi.fn().mockReturnThis(),
45 | withSonarSourceSecurityNew: vi.fn().mockReturnThis(),
46 | withLanguages: vi.fn().mockReturnThis(),
47 | withFacets: vi.fn().mockReturnThis(),
48 | withFacetMode: vi.fn().mockReturnThis(),
49 | sinceLeakPeriod: vi.fn().mockReturnThis(),
50 | inNewCodePeriod: vi.fn().mockReturnThis(),
51 | sortBy: vi.fn().mockReturnThis(),
52 | withAdditionalFields: vi.fn().mockReturnThis(),
53 | page: vi.fn().mockReturnThis(),
54 | pageSize: vi.fn().mockReturnThis(),
55 | execute: vi.fn(),
56 | } as unknown as SearchIssuesRequestBuilderInterface;
57 | // Mock the web API client
58 | vi.mock('sonarqube-web-api-client', () => ({
59 | SonarQubeClient: {
60 | withToken: vi.fn().mockReturnValue({
61 | issues: {
62 | search: vi.fn().mockReturnValue(mockSearchBuilder),
63 | },
64 | }),
65 | },
66 | }));
67 | import { IssuesDomain } from '../domains/issues.js';
68 | import type { IssuesParams, ISonarQubeClient } from '../types/index.js';
69 | // Note: IWebApiClient is mapped to ISonarQubeClient
70 | type IWebApiClient = ISonarQubeClient;
71 | describe('IssuesDomain new parameters', () => {
72 | let issuesDomain: IssuesDomain;
73 | beforeEach(() => {
74 | // Reset all mocks
75 | vi.clearAllMocks();
76 | // Reset execute mock to return default response
77 | mockSearchBuilder.execute.mockResolvedValue({
78 | issues: [],
79 | components: [],
80 | rules: [],
81 | paging: { pageIndex: 1, pageSize: 100, total: 0 },
82 | });
83 | // Create mock web API client
84 | const mockWebApiClient = {
85 | issues: {
86 | search: vi.fn().mockReturnValue(mockSearchBuilder),
87 | },
88 | } as unknown as IWebApiClient;
89 | // Create issues domain instance
90 | issuesDomain = new IssuesDomain(mockWebApiClient as any, {} as any);
91 | });
92 | describe('directories parameter', () => {
93 | it('should call withDirectories when directories parameter is provided', async () => {
94 | const params: IssuesParams = {
95 | projectKey: 'test-project',
96 | directories: ['src/main/', 'src/test/'],
97 | page: 1,
98 | pageSize: 10,
99 | };
100 | await issuesDomain.getIssues(params);
101 | expect(mockSearchBuilder.withDirectories).toHaveBeenCalledWith(['src/main/', 'src/test/']);
102 | expect(mockSearchBuilder.withDirectories).toHaveBeenCalledTimes(1);
103 | });
104 | it('should not call withDirectories when directories parameter is not provided', async () => {
105 | const params: IssuesParams = {
106 | projectKey: 'test-project',
107 | page: 1,
108 | pageSize: 10,
109 | };
110 | await issuesDomain.getIssues(params);
111 | expect(mockSearchBuilder.withDirectories).not.toHaveBeenCalled();
112 | });
113 | it('should handle empty directories array', async () => {
114 | const params: IssuesParams = {
115 | projectKey: 'test-project',
116 | directories: [],
117 | page: 1,
118 | pageSize: 10,
119 | };
120 | await issuesDomain.getIssues(params);
121 | expect(mockSearchBuilder.withDirectories).toHaveBeenCalledWith([]);
122 | });
123 | });
124 | describe('files parameter', () => {
125 | it('should call withFiles when files parameter is provided', async () => {
126 | const params: IssuesParams = {
127 | projectKey: 'test-project',
128 | files: ['UserService.java', 'config.properties'],
129 | page: 1,
130 | pageSize: 10,
131 | };
132 | await issuesDomain.getIssues(params);
133 | expect(mockSearchBuilder.withFiles).toHaveBeenCalledWith([
134 | 'UserService.java',
135 | 'config.properties',
136 | ]);
137 | expect(mockSearchBuilder.withFiles).toHaveBeenCalledTimes(1);
138 | });
139 | it('should not call withFiles when files parameter is not provided', async () => {
140 | const params: IssuesParams = {
141 | projectKey: 'test-project',
142 | page: 1,
143 | pageSize: 10,
144 | };
145 | await issuesDomain.getIssues(params);
146 | expect(mockSearchBuilder.withFiles).not.toHaveBeenCalled();
147 | });
148 | it('should handle single file', async () => {
149 | const params: IssuesParams = {
150 | projectKey: 'test-project',
151 | files: ['App.java'],
152 | page: 1,
153 | pageSize: 10,
154 | };
155 | await issuesDomain.getIssues(params);
156 | expect(mockSearchBuilder.withFiles).toHaveBeenCalledWith(['App.java']);
157 | });
158 | });
159 | describe('scopes parameter', () => {
160 | it('should call withScopes when scopes parameter is provided', async () => {
161 | const params: IssuesParams = {
162 | projectKey: 'test-project',
163 | scopes: ['MAIN', 'TEST'],
164 | page: undefined,
165 | pageSize: undefined,
166 | };
167 | await issuesDomain.getIssues(params);
168 | expect(mockSearchBuilder.withScopes).toHaveBeenCalledWith(['MAIN', 'TEST']);
169 | expect(mockSearchBuilder.withScopes).toHaveBeenCalledTimes(1);
170 | });
171 | it('should handle single scope value', async () => {
172 | const params: IssuesParams = {
173 | projectKey: 'test-project',
174 | scopes: ['MAIN'],
175 | page: undefined,
176 | pageSize: undefined,
177 | };
178 | await issuesDomain.getIssues(params);
179 | expect(mockSearchBuilder.withScopes).toHaveBeenCalledWith(['MAIN']);
180 | });
181 | it('should handle all scope values', async () => {
182 | const params: IssuesParams = {
183 | projectKey: 'test-project',
184 | scopes: ['MAIN', 'TEST', 'OVERALL'],
185 | page: undefined,
186 | pageSize: undefined,
187 | };
188 | await issuesDomain.getIssues(params);
189 | expect(mockSearchBuilder.withScopes).toHaveBeenCalledWith(['MAIN', 'TEST', 'OVERALL']);
190 | });
191 | it('should not call withScopes when scopes parameter is not provided', async () => {
192 | const params: IssuesParams = {
193 | projectKey: 'test-project',
194 | page: 1,
195 | pageSize: 10,
196 | };
197 | await issuesDomain.getIssues(params);
198 | expect(mockSearchBuilder.withScopes).not.toHaveBeenCalled();
199 | });
200 | });
201 | describe('combined parameters', () => {
202 | it('should handle all three new parameters together', async () => {
203 | const params: IssuesParams = {
204 | projectKey: 'test-project',
205 | directories: ['src/main/java/', 'src/test/java/'],
206 | files: ['Application.java', 'pom.xml'],
207 | scopes: ['MAIN', 'TEST', 'OVERALL'],
208 | page: undefined,
209 | pageSize: undefined,
210 | };
211 | await issuesDomain.getIssues(params);
212 | expect(mockSearchBuilder.withDirectories).toHaveBeenCalledWith([
213 | 'src/main/java/',
214 | 'src/test/java/',
215 | ]);
216 | expect(mockSearchBuilder.withFiles).toHaveBeenCalledWith(['Application.java', 'pom.xml']);
217 | expect(mockSearchBuilder.withScopes).toHaveBeenCalledWith(['MAIN', 'TEST', 'OVERALL']);
218 | });
219 | it('should work with existing component filters', async () => {
220 | const params: IssuesParams = {
221 | projectKey: 'test-project',
222 | componentKeys: ['src/main/java/com/example/Service.java'],
223 | directories: ['src/main/java/com/example/'],
224 | files: ['Service.java'],
225 | scopes: ['MAIN'],
226 | page: undefined,
227 | pageSize: undefined,
228 | };
229 | await issuesDomain.getIssues(params);
230 | expect(mockSearchBuilder.withComponents).toHaveBeenCalledWith([
231 | 'src/main/java/com/example/Service.java',
232 | ]);
233 | expect(mockSearchBuilder.withDirectories).toHaveBeenCalledWith([
234 | 'src/main/java/com/example/',
235 | ]);
236 | expect(mockSearchBuilder.withFiles).toHaveBeenCalledWith(['Service.java']);
237 | expect(mockSearchBuilder.withScopes).toHaveBeenCalledWith(['MAIN']);
238 | });
239 | it('should work with all filtering types together', async () => {
240 | const params: IssuesParams = {
241 | projectKey: 'test-project',
242 | componentKeys: ['src/Service.java'],
243 | directories: ['src/'],
244 | files: ['Service.java', 'Controller.java'],
245 | scopes: ['MAIN'],
246 | severities: ['CRITICAL', 'BLOCKER'],
247 | statuses: ['OPEN'],
248 | tags: ['security'],
249 | page: undefined,
250 | pageSize: undefined,
251 | };
252 | await issuesDomain.getIssues(params);
253 | // Component filters
254 | expect(mockSearchBuilder.withProjects).toHaveBeenCalledWith(['test-project']);
255 | expect(mockSearchBuilder.withComponents).toHaveBeenCalledWith(['src/Service.java']);
256 | expect(mockSearchBuilder.withDirectories).toHaveBeenCalledWith(['src/']);
257 | expect(mockSearchBuilder.withFiles).toHaveBeenCalledWith(['Service.java', 'Controller.java']);
258 | expect(mockSearchBuilder.withScopes).toHaveBeenCalledWith(['MAIN']);
259 | // Issue filters
260 | expect(mockSearchBuilder.withSeverities).toHaveBeenCalledWith(['CRITICAL', 'BLOCKER']);
261 | expect(mockSearchBuilder.withStatuses).toHaveBeenCalledWith(['OPEN']);
262 | expect(mockSearchBuilder.withTags).toHaveBeenCalledWith(['security']);
263 | });
264 | });
265 | });
266 |
```
--------------------------------------------------------------------------------
/src/__tests__/utils/retry.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, expect, it, vi, beforeEach, afterEach } from 'vitest';
2 | import type { MockedFunction } from 'vitest';
3 | import { withRetry, makeRetryable } from '../../utils/retry.js';
4 |
5 | // Mock logger to prevent console output during tests
6 | vi.mock('../../utils/logger.js', () => ({
7 | createLogger: () => ({
8 | warn: vi.fn(),
9 | debug: vi.fn(),
10 | info: vi.fn(),
11 | error: vi.fn(),
12 | }),
13 | }));
14 |
15 | describe('Retry Utilities', () => {
16 | let mockFn: MockedFunction<(...args: unknown[]) => Promise<unknown>>;
17 |
18 | beforeEach(() => {
19 | mockFn = vi.fn() as MockedFunction<(...args: unknown[]) => Promise<unknown>>;
20 | vi.clearAllMocks();
21 | });
22 |
23 | afterEach(() => {
24 | vi.clearAllTimers();
25 | });
26 |
27 | describe('withRetry', () => {
28 | it('should succeed on first attempt', async () => {
29 | mockFn.mockResolvedValue('success');
30 |
31 | const result = await withRetry(mockFn);
32 |
33 | expect(result).toBe('success');
34 | expect(mockFn).toHaveBeenCalledTimes(1);
35 | });
36 |
37 | it('should retry on retryable error and eventually succeed', async () => {
38 | mockFn
39 | .mockRejectedValueOnce(new Error('ECONNREFUSED'))
40 | .mockRejectedValueOnce(new Error('ETIMEDOUT'))
41 | .mockResolvedValue('success');
42 |
43 | const result = await withRetry(mockFn, { maxAttempts: 4 });
44 |
45 | expect(result).toBe('success');
46 | expect(mockFn).toHaveBeenCalledTimes(3);
47 | });
48 |
49 | it('should fail after max attempts with retryable error', async () => {
50 | mockFn.mockRejectedValue(new Error('ECONNREFUSED'));
51 |
52 | await expect(withRetry(mockFn, { maxAttempts: 2 })).rejects.toThrow('ECONNREFUSED');
53 | expect(mockFn).toHaveBeenCalledTimes(2);
54 | });
55 |
56 | it('should not retry on non-retryable error', async () => {
57 | mockFn.mockRejectedValue(new Error('Invalid input'));
58 |
59 | await expect(withRetry(mockFn)).rejects.toThrow('Invalid input');
60 | expect(mockFn).toHaveBeenCalledTimes(1);
61 | });
62 |
63 | it('should respect custom maxAttempts', async () => {
64 | mockFn.mockRejectedValue(new Error('ECONNREFUSED'));
65 |
66 | await expect(withRetry(mockFn, { maxAttempts: 1 })).rejects.toThrow('ECONNREFUSED');
67 | expect(mockFn).toHaveBeenCalledTimes(1);
68 | });
69 |
70 | it('should use exponential backoff with default settings', async () => {
71 | vi.useFakeTimers();
72 |
73 | mockFn
74 | .mockRejectedValueOnce(new Error('ECONNREFUSED'))
75 | .mockRejectedValueOnce(new Error('ECONNREFUSED'))
76 | .mockResolvedValue('success');
77 |
78 | const promise = withRetry(mockFn);
79 |
80 | // First attempt fails immediately
81 | await vi.advanceTimersByTimeAsync(0);
82 | expect(mockFn).toHaveBeenCalledTimes(1);
83 |
84 | // Wait for first retry delay (1000ms)
85 | await vi.advanceTimersByTimeAsync(1000);
86 | expect(mockFn).toHaveBeenCalledTimes(2);
87 |
88 | // Wait for second retry delay (2000ms)
89 | await vi.advanceTimersByTimeAsync(2000);
90 | expect(mockFn).toHaveBeenCalledTimes(3);
91 |
92 | const result = await promise;
93 | expect(result).toBe('success');
94 |
95 | vi.useRealTimers();
96 | });
97 |
98 | it('should respect custom delay settings', async () => {
99 | vi.useFakeTimers();
100 |
101 | mockFn.mockRejectedValueOnce(new Error('ECONNREFUSED')).mockResolvedValue('success');
102 |
103 | const promise = withRetry(mockFn, {
104 | initialDelay: 500,
105 | backoffMultiplier: 3,
106 | });
107 |
108 | await vi.advanceTimersByTimeAsync(0);
109 | expect(mockFn).toHaveBeenCalledTimes(1);
110 |
111 | await vi.advanceTimersByTimeAsync(500);
112 | expect(mockFn).toHaveBeenCalledTimes(2);
113 |
114 | const result = await promise;
115 | expect(result).toBe('success');
116 |
117 | vi.useRealTimers();
118 | });
119 |
120 | it('should respect maxDelay setting', async () => {
121 | vi.useFakeTimers();
122 |
123 | mockFn
124 | .mockRejectedValueOnce(new Error('ECONNREFUSED'))
125 | .mockRejectedValueOnce(new Error('ECONNREFUSED'))
126 | .mockResolvedValue('success');
127 |
128 | const promise = withRetry(mockFn, {
129 | initialDelay: 1000,
130 | backoffMultiplier: 10,
131 | maxDelay: 1500,
132 | });
133 |
134 | await vi.advanceTimersByTimeAsync(0);
135 | expect(mockFn).toHaveBeenCalledTimes(1);
136 |
137 | // First retry: 1000ms
138 | await vi.advanceTimersByTimeAsync(1000);
139 | expect(mockFn).toHaveBeenCalledTimes(2);
140 |
141 | // Second retry: should be capped at maxDelay (1500ms), not 10000ms
142 | await vi.advanceTimersByTimeAsync(1500);
143 | expect(mockFn).toHaveBeenCalledTimes(3);
144 |
145 | const result = await promise;
146 | expect(result).toBe('success');
147 |
148 | vi.useRealTimers();
149 | });
150 |
151 | it('should use custom shouldRetry function', async () => {
152 | // eslint-disable-next-line @typescript-eslint/no-unused-vars
153 | const customShouldRetry = vi.fn((error: Error, _attempt: number) =>
154 | error.message.includes('retry-me')
155 | );
156 |
157 | mockFn.mockRejectedValueOnce(new Error('retry-me please')).mockResolvedValue('success');
158 |
159 | const result = await withRetry(mockFn, { shouldRetry: customShouldRetry });
160 |
161 | expect(result).toBe('success');
162 | expect(mockFn).toHaveBeenCalledTimes(2);
163 | expect(customShouldRetry).toHaveBeenCalledWith(
164 | expect.objectContaining({ message: 'retry-me please' }),
165 | 1
166 | );
167 | });
168 |
169 | it('should not retry when custom shouldRetry returns false', async () => {
170 | const customShouldRetry = vi.fn(() => false);
171 |
172 | mockFn.mockRejectedValue(new Error('ECONNREFUSED'));
173 |
174 | await expect(withRetry(mockFn, { shouldRetry: customShouldRetry })).rejects.toThrow(
175 | 'ECONNREFUSED'
176 | );
177 | expect(mockFn).toHaveBeenCalledTimes(1);
178 | expect(customShouldRetry).toHaveBeenCalled();
179 | });
180 |
181 | describe('default shouldRetry behavior', () => {
182 | it('should retry on ECONNREFUSED', async () => {
183 | mockFn.mockRejectedValueOnce(new Error('ECONNREFUSED')).mockResolvedValue('success');
184 |
185 | const result = await withRetry(mockFn);
186 | expect(result).toBe('success');
187 | expect(mockFn).toHaveBeenCalledTimes(2);
188 | });
189 |
190 | it('should retry on ETIMEDOUT', async () => {
191 | mockFn.mockRejectedValueOnce(new Error('ETIMEDOUT')).mockResolvedValue('success');
192 |
193 | const result = await withRetry(mockFn);
194 | expect(result).toBe('success');
195 | expect(mockFn).toHaveBeenCalledTimes(2);
196 | });
197 |
198 | it('should retry on ENOTFOUND', async () => {
199 | mockFn.mockRejectedValueOnce(new Error('ENOTFOUND')).mockResolvedValue('success');
200 |
201 | const result = await withRetry(mockFn);
202 | expect(result).toBe('success');
203 | expect(mockFn).toHaveBeenCalledTimes(2);
204 | });
205 |
206 | it('should retry on ECONNRESET', async () => {
207 | mockFn.mockRejectedValueOnce(new Error('ECONNRESET')).mockResolvedValue('success');
208 |
209 | const result = await withRetry(mockFn);
210 | expect(result).toBe('success');
211 | expect(mockFn).toHaveBeenCalledTimes(2);
212 | });
213 |
214 | it('should retry on socket hang up', async () => {
215 | mockFn.mockRejectedValueOnce(new Error('socket hang up')).mockResolvedValue('success');
216 |
217 | const result = await withRetry(mockFn);
218 | expect(result).toBe('success');
219 | expect(mockFn).toHaveBeenCalledTimes(2);
220 | });
221 |
222 | it('should retry on 5xx errors', async () => {
223 | mockFn
224 | .mockRejectedValueOnce(new Error('HTTP 500 Internal Server Error'))
225 | .mockResolvedValue('success');
226 |
227 | const result = await withRetry(mockFn);
228 | expect(result).toBe('success');
229 | expect(mockFn).toHaveBeenCalledTimes(2);
230 | });
231 |
232 | it('should not retry on 4xx errors', async () => {
233 | mockFn.mockRejectedValue(new Error('HTTP 404 Not Found'));
234 |
235 | await expect(withRetry(mockFn)).rejects.toThrow('HTTP 404 Not Found');
236 | expect(mockFn).toHaveBeenCalledTimes(1);
237 | });
238 |
239 | it('should not retry on generic errors', async () => {
240 | mockFn.mockRejectedValue(new Error('Invalid input'));
241 |
242 | await expect(withRetry(mockFn)).rejects.toThrow('Invalid input');
243 | expect(mockFn).toHaveBeenCalledTimes(1);
244 | });
245 | });
246 | });
247 |
248 | describe('makeRetryable', () => {
249 | it('should create a retryable version of a function', async () => {
250 | const originalFn = vi
251 | .fn<() => Promise<string>>()
252 | .mockRejectedValueOnce(new Error('ECONNREFUSED') as never)
253 | .mockResolvedValue('success' as never);
254 |
255 | const retryableFn = makeRetryable(originalFn as (...args: unknown[]) => Promise<unknown>);
256 | const result = await retryableFn();
257 |
258 | expect(result).toBe('success');
259 | expect(originalFn).toHaveBeenCalledTimes(2);
260 | });
261 |
262 | it('should pass arguments correctly', async () => {
263 | const originalFn = vi
264 | .fn<(...args: unknown[]) => Promise<string>>()
265 | .mockResolvedValue('success' as never);
266 | const retryableFn = makeRetryable(originalFn as (...args: unknown[]) => Promise<unknown>);
267 |
268 | const result = await retryableFn('arg1', 'arg2', 123);
269 |
270 | expect(result).toBe('success');
271 | expect(originalFn).toHaveBeenCalledWith('arg1', 'arg2', 123);
272 | });
273 |
274 | it('should work with functions that have return types', async () => {
275 | const originalFn = vi.fn<(x: number) => Promise<string>>().mockResolvedValue('result');
276 |
277 | const retryableFn = makeRetryable(originalFn, { maxAttempts: 2 });
278 | const result = await retryableFn(42);
279 |
280 | expect(result).toBe('result');
281 | expect(originalFn).toHaveBeenCalledWith(42);
282 | });
283 |
284 | it('should use custom retry options', async () => {
285 | const originalFn = vi
286 | .fn<() => Promise<void>>()
287 | .mockRejectedValue(new Error('ECONNREFUSED') as never);
288 |
289 | const retryableFn = makeRetryable(originalFn as (...args: unknown[]) => Promise<unknown>, {
290 | maxAttempts: 1,
291 | });
292 |
293 | await expect(retryableFn()).rejects.toThrow('ECONNREFUSED');
294 | expect(originalFn).toHaveBeenCalledTimes(1);
295 | });
296 | });
297 |
298 | describe('error handling edge cases', () => {
299 | it('should handle non-Error objects by wrapping them in TypeError', async () => {
300 | mockFn.mockRejectedValue('string error');
301 |
302 | await expect(withRetry(mockFn)).rejects.toThrow(TypeError);
303 | expect(mockFn).toHaveBeenCalledTimes(1);
304 | });
305 |
306 | it('should handle null errors by wrapping them in TypeError', async () => {
307 | mockFn.mockRejectedValue(null);
308 |
309 | await expect(withRetry(mockFn)).rejects.toThrow(TypeError);
310 | expect(mockFn).toHaveBeenCalledTimes(1);
311 | });
312 |
313 | it('should handle undefined errors by wrapping them in TypeError', async () => {
314 | mockFn.mockRejectedValue(undefined);
315 |
316 | await expect(withRetry(mockFn)).rejects.toThrow(TypeError);
317 | expect(mockFn).toHaveBeenCalledTimes(1);
318 | });
319 | });
320 | });
321 |
```
--------------------------------------------------------------------------------
/src/__tests__/handlers/components-handler-integration.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
2 | import { handleSonarQubeComponents } from '../../handlers/components.js';
3 | import { ISonarQubeClient } from '../../types/index.js';
4 | import { resetDefaultClient } from '../../utils/client-factory.js';
5 | describe('Components Handler Integration', () => {
6 | let mockClient: ISonarQubeClient;
7 | let mockSearchBuilder: any;
8 | let mockTreeBuilder: any;
9 | beforeEach(() => {
10 | vi.clearAllMocks();
11 | resetDefaultClient();
12 | // Create mock builders
13 | mockSearchBuilder = {
14 | query: vi.fn().mockReturnThis(),
15 | qualifiers: vi.fn().mockReturnThis(),
16 | languages: vi.fn().mockReturnThis(),
17 | page: vi.fn().mockReturnThis(),
18 | pageSize: vi.fn().mockReturnThis(),
19 | execute: vi.fn(),
20 | };
21 | mockTreeBuilder = {
22 | component: vi.fn().mockReturnThis(),
23 | childrenOnly: vi.fn().mockReturnThis(),
24 | leavesOnly: vi.fn().mockReturnThis(),
25 | qualifiers: vi.fn().mockReturnThis(),
26 | sortByName: vi.fn().mockReturnThis(),
27 | sortByPath: vi.fn().mockReturnThis(),
28 | sortByQualifier: vi.fn().mockReturnThis(),
29 | page: vi.fn().mockReturnThis(),
30 | pageSize: vi.fn().mockReturnThis(),
31 | branch: vi.fn().mockReturnThis(),
32 | pullRequest: vi.fn().mockReturnThis(),
33 | execute: vi.fn(),
34 | };
35 | const mockWebApiClient = {
36 | components: {
37 | search: vi.fn().mockReturnValue(mockSearchBuilder),
38 | tree: vi.fn().mockReturnValue(mockTreeBuilder),
39 | show: vi.fn(),
40 | },
41 | };
42 | // Create mock client
43 | mockClient = {
44 | webApiClient: mockWebApiClient,
45 | organization: 'test-org',
46 | } as any;
47 | });
48 | afterEach(() => {
49 | vi.clearAllMocks();
50 | resetDefaultClient();
51 | });
52 | describe('Search Operation', () => {
53 | it('should handle component search with query', async () => {
54 | const mockSearchResult = {
55 | components: [
56 | { key: 'comp1', name: 'Component 1', qualifier: 'TRK' },
57 | { key: 'comp2', name: 'Component 2', qualifier: 'FIL' },
58 | ],
59 | paging: { pageIndex: 1, pageSize: 100, total: 2 },
60 | };
61 | mockSearchBuilder.execute.mockResolvedValue(mockSearchResult);
62 | const result = await handleSonarQubeComponents(
63 | { query: 'test', qualifiers: ['TRK', 'FIL'] },
64 | mockClient
65 | );
66 | expect(mockSearchBuilder.query).toHaveBeenCalledWith('test');
67 | expect(mockSearchBuilder.qualifiers).toHaveBeenCalledWith(['TRK', 'FIL']);
68 | expect(mockSearchBuilder.execute).toHaveBeenCalled();
69 | const firstContent = result.content[0]!;
70 | if ('text' in firstContent && typeof firstContent.text === 'string') {
71 | const content = JSON.parse(firstContent.text);
72 | expect(content.components).toHaveLength(2);
73 | expect(content.components[0].key).toBe('comp1');
74 | } else {
75 | throw new Error('Expected text content in first result item');
76 | }
77 | });
78 | it('should handle component search with language filter', async () => {
79 | const mockSearchResult = {
80 | components: [{ key: 'comp1', name: 'Component 1', qualifier: 'FIL' }],
81 | paging: { pageIndex: 1, pageSize: 100, total: 1 },
82 | };
83 | mockSearchBuilder.execute.mockResolvedValue(mockSearchResult);
84 | await handleSonarQubeComponents({ query: 'test', language: 'java' }, mockClient);
85 | expect(mockSearchBuilder.query).toHaveBeenCalledWith('test');
86 | expect(mockSearchBuilder.languages).toHaveBeenCalledWith(['java']);
87 | });
88 | it('should default to listing all projects when no specific operation', async () => {
89 | const mockSearchResult = {
90 | components: [{ key: 'proj1', name: 'Project 1', qualifier: 'TRK' }],
91 | paging: { pageIndex: 1, pageSize: 100, total: 1 },
92 | };
93 | mockSearchBuilder.execute.mockResolvedValue(mockSearchResult);
94 | await handleSonarQubeComponents({}, mockClient);
95 | expect(mockSearchBuilder.qualifiers).toHaveBeenCalledWith(['TRK']);
96 | });
97 | it('should handle pagination parameters', async () => {
98 | const mockSearchResult = {
99 | components: [],
100 | paging: { pageIndex: 2, pageSize: 50, total: 100 },
101 | };
102 | mockSearchBuilder.execute.mockResolvedValue(mockSearchResult);
103 | await handleSonarQubeComponents({ query: 'test', p: 2, ps: 50 }, mockClient);
104 | expect(mockSearchBuilder.page).toHaveBeenCalledWith(2);
105 | expect(mockSearchBuilder.pageSize).toHaveBeenCalledWith(50);
106 | });
107 | });
108 | describe('Tree Navigation Operation', () => {
109 | it('should handle component tree navigation', async () => {
110 | const mockTreeResult = {
111 | components: [
112 | { key: 'dir1', name: 'Directory 1', qualifier: 'DIR' },
113 | { key: 'file1', name: 'File 1', qualifier: 'FIL' },
114 | ],
115 | baseComponent: { key: 'project1', name: 'Project 1', qualifier: 'TRK' },
116 | paging: { pageIndex: 1, pageSize: 100, total: 2 },
117 | };
118 | mockTreeBuilder.execute.mockResolvedValue(mockTreeResult);
119 | const result = await handleSonarQubeComponents(
120 | {
121 | component: 'project1',
122 | strategy: 'children',
123 | qualifiers: ['DIR', 'FIL'],
124 | },
125 | mockClient
126 | );
127 | expect(mockTreeBuilder.component).toHaveBeenCalledWith('project1');
128 | expect(mockTreeBuilder.childrenOnly).toHaveBeenCalled();
129 | expect(mockTreeBuilder.qualifiers).toHaveBeenCalledWith(['DIR', 'FIL']);
130 | const secondContent = result.content[0]!;
131 | if ('text' in secondContent && typeof secondContent.text === 'string') {
132 | const content = JSON.parse(secondContent.text);
133 | expect(content.components).toHaveLength(2);
134 | expect(content.baseComponent.key).toBe('project1');
135 | } else {
136 | throw new Error('Expected text content in second result item');
137 | }
138 | });
139 | it('should handle tree navigation with branch', async () => {
140 | const mockTreeResult = {
141 | components: [],
142 | paging: { pageIndex: 1, pageSize: 100, total: 0 },
143 | };
144 | mockTreeBuilder.execute.mockResolvedValue(mockTreeResult);
145 | await handleSonarQubeComponents(
146 | {
147 | component: 'project1',
148 | branch: 'develop',
149 | ps: 50,
150 | p: 2,
151 | },
152 | mockClient
153 | );
154 | expect(mockTreeBuilder.branch).toHaveBeenCalledWith('develop');
155 | expect(mockTreeBuilder.page).toHaveBeenCalledWith(2);
156 | expect(mockTreeBuilder.pageSize).toHaveBeenCalledWith(50);
157 | });
158 | it('should handle leaves strategy', async () => {
159 | const mockTreeResult = {
160 | components: [],
161 | paging: { pageIndex: 1, pageSize: 100, total: 0 },
162 | };
163 | mockTreeBuilder.execute.mockResolvedValue(mockTreeResult);
164 | await handleSonarQubeComponents(
165 | {
166 | component: 'project1',
167 | strategy: 'leaves',
168 | },
169 | mockClient
170 | );
171 | expect(mockTreeBuilder.leavesOnly).toHaveBeenCalled();
172 | expect(mockTreeBuilder.childrenOnly).not.toHaveBeenCalled();
173 | });
174 | });
175 | describe('Show Component Operation', () => {
176 | it('should handle show component details', async () => {
177 | const mockShowResult = {
178 | component: { key: 'comp1', name: 'Component 1', qualifier: 'FIL' },
179 | ancestors: [
180 | { key: 'proj1', name: 'Project 1', qualifier: 'TRK' },
181 | { key: 'dir1', name: 'Directory 1', qualifier: 'DIR' },
182 | ],
183 | };
184 | (mockClient.webApiClient as any).components.show.mockResolvedValue(mockShowResult);
185 | const result = await handleSonarQubeComponents({ key: 'comp1' }, mockClient);
186 | expect((mockClient.webApiClient as any).components.show).toHaveBeenCalledWith('comp1');
187 | const thirdContent = result.content[0]!;
188 | if ('text' in thirdContent && typeof thirdContent.text === 'string') {
189 | const content = JSON.parse(thirdContent.text);
190 | expect(content.component.key).toBe('comp1');
191 | expect(content.ancestors).toHaveLength(2);
192 | } else {
193 | throw new Error('Expected text content in third result item');
194 | }
195 | });
196 | it('should handle show component with branch and PR', async () => {
197 | const mockShowResult = {
198 | component: { key: 'comp1', name: 'Component 1', qualifier: 'FIL' },
199 | ancestors: [],
200 | };
201 | (mockClient.webApiClient as any).components.show.mockResolvedValue(mockShowResult);
202 | await handleSonarQubeComponents(
203 | {
204 | key: 'comp1',
205 | branch: 'feature-branch',
206 | pullRequest: 'PR-123',
207 | },
208 | mockClient
209 | );
210 | // Note: branch and PR are passed to domain but not used by API
211 | expect((mockClient.webApiClient as any).components.show).toHaveBeenCalledWith('comp1');
212 | });
213 | });
214 | describe('Error Handling', () => {
215 | it('should handle search errors gracefully', async () => {
216 | mockSearchBuilder.execute.mockRejectedValue(new Error('Search API Error'));
217 | await expect(handleSonarQubeComponents({ query: 'test' }, mockClient)).rejects.toThrow(
218 | 'Search API Error'
219 | );
220 | });
221 | it('should handle tree errors gracefully', async () => {
222 | mockTreeBuilder.execute.mockRejectedValue(new Error('Tree API Error'));
223 | await expect(
224 | handleSonarQubeComponents({ component: 'project1' }, mockClient)
225 | ).rejects.toThrow('Tree API Error');
226 | });
227 | it('should handle show errors gracefully', async () => {
228 | (mockClient.webApiClient as any).components.show.mockRejectedValue(
229 | new Error('Show API Error')
230 | );
231 | await expect(handleSonarQubeComponents({ key: 'comp1' }, mockClient)).rejects.toThrow(
232 | 'Show API Error'
233 | );
234 | });
235 | });
236 | describe('Parameter Priority', () => {
237 | it('should prioritize show operation over tree operation', async () => {
238 | const mockShowResult = {
239 | component: { key: 'comp1', name: 'Component 1', qualifier: 'FIL' },
240 | ancestors: [],
241 | };
242 | (mockClient.webApiClient as any).components.show.mockResolvedValue(mockShowResult);
243 | await handleSonarQubeComponents(
244 | {
245 | key: 'comp1',
246 | component: 'project1', // This should be ignored
247 | query: 'test', // This should also be ignored
248 | },
249 | mockClient
250 | );
251 | expect((mockClient.webApiClient as any).components.show).toHaveBeenCalled();
252 | expect((mockClient.webApiClient as any).components.tree).not.toHaveBeenCalled();
253 | expect((mockClient.webApiClient as any).components.search).not.toHaveBeenCalled();
254 | });
255 | it('should prioritize tree operation over search operation', async () => {
256 | const mockTreeResult = {
257 | components: [],
258 | paging: { pageIndex: 1, pageSize: 100, total: 0 },
259 | };
260 | mockTreeBuilder.execute.mockResolvedValue(mockTreeResult);
261 | await handleSonarQubeComponents(
262 | {
263 | component: 'project1',
264 | query: 'test', // This should be ignored when component is present
265 | },
266 | mockClient
267 | );
268 | expect((mockClient.webApiClient as any).components.tree).toHaveBeenCalled();
269 | expect((mockClient.webApiClient as any).components.search).not.toHaveBeenCalled();
270 | });
271 | });
272 | });
273 |
```
--------------------------------------------------------------------------------
/src/__tests__/zod-schema-transforms.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect } from 'vitest';
2 | import { z } from 'zod';
3 | // Our focus is on testing the schema transformation functions that are used in index.ts
4 | describe('Zod Schema Transformation Tests', () => {
5 | describe('String to Number Transformations', () => {
6 | it('should transform valid string numbers to integers', () => {
7 | // This is the exact transformation used in index.ts
8 | const schema = z
9 | .string()
10 | .optional()
11 | .transform((val: any) => (val ? parseInt(val, 10) || null : null));
12 | // Test with a valid number string
13 | expect(schema.parse('10')).toBe(10);
14 | });
15 | it('should transform invalid string numbers to null', () => {
16 | // This is the exact transformation used in index.ts
17 | const schema = z
18 | .string()
19 | .optional()
20 | .transform((val: any) => (val ? parseInt(val, 10) || null : null));
21 | // Test with an invalid number string
22 | expect(schema.parse('abc')).toBe(null);
23 | });
24 | it('should transform empty string to null', () => {
25 | // This is the exact transformation used in index.ts
26 | const schema = z
27 | .string()
28 | .optional()
29 | .transform((val: any) => (val ? parseInt(val, 10) || null : null));
30 | // Test with an empty string
31 | expect(schema.parse('')).toBe(null);
32 | });
33 | it('should transform undefined to null', () => {
34 | // This is the exact transformation used in index.ts
35 | const schema = z
36 | .string()
37 | .optional()
38 | .transform((val: any) => (val ? parseInt(val, 10) || null : null));
39 | // Test with undefined
40 | expect(schema.parse(undefined)).toBe(null);
41 | });
42 | });
43 | describe('String to Boolean Transformations', () => {
44 | it('should transform "true" string to true boolean', () => {
45 | // This is the exact transformation used in index.ts
46 | const schema = z
47 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
48 | .nullable()
49 | .optional();
50 | // Test with "true" string
51 | expect(schema.parse('true')).toBe(true);
52 | });
53 | it('should transform "false" string to false boolean', () => {
54 | // This is the exact transformation used in index.ts
55 | const schema = z
56 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
57 | .nullable()
58 | .optional();
59 | // Test with "false" string
60 | expect(schema.parse('false')).toBe(false);
61 | });
62 | it('should pass through true boolean', () => {
63 | // This is the exact transformation used in index.ts
64 | const schema = z
65 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
66 | .nullable()
67 | .optional();
68 | // Test with true boolean
69 | expect(schema.parse(true)).toBe(true);
70 | });
71 | it('should pass through false boolean', () => {
72 | // This is the exact transformation used in index.ts
73 | const schema = z
74 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
75 | .nullable()
76 | .optional();
77 | // Test with false boolean
78 | expect(schema.parse(false)).toBe(false);
79 | });
80 | it('should pass through null', () => {
81 | // This is the exact transformation used in index.ts
82 | const schema = z
83 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
84 | .nullable()
85 | .optional();
86 | // Test with null
87 | expect(schema.parse(null)).toBe(null);
88 | });
89 | it('should pass through undefined', () => {
90 | // This is the exact transformation used in index.ts
91 | const schema = z
92 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
93 | .nullable()
94 | .optional();
95 | // Test with undefined
96 | expect(schema.parse(undefined)).toBe(undefined);
97 | });
98 | });
99 | describe('Complex Schema Combinations', () => {
100 | it('should transform string parameters in a complex schema', () => {
101 | // Create a schema similar to the ones in index.ts
102 | const statusEnumSchema = z.enum(['OPEN', 'CONFIRMED', 'REOPENED', 'RESOLVED', 'CLOSED']);
103 | const statusSchema = z.array(statusEnumSchema).nullable().optional();
104 | const resolutionEnumSchema = z.enum(['FALSE-POSITIVE', 'WONTFIX', 'FIXED', 'REMOVED']);
105 | const resolutionSchema = z.array(resolutionEnumSchema).nullable().optional();
106 | const typeEnumSchema = z.enum(['CODE_SMELL', 'BUG', 'VULNERABILITY', 'SECURITY_HOTSPOT']);
107 | const typeSchema = z.array(typeEnumSchema).nullable().optional();
108 | const issuesSchema = z.object({
109 | project_key: z.string(),
110 | severity: z.enum(['INFO', 'MINOR', 'MAJOR', 'CRITICAL', 'BLOCKER']).nullable().optional(),
111 | page: z
112 | .string()
113 | .optional()
114 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
115 | page_size: z
116 | .string()
117 | .optional()
118 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
119 | statuses: statusSchema,
120 | resolutions: resolutionSchema,
121 | resolved: z
122 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
123 | .nullable()
124 | .optional(),
125 | types: typeSchema,
126 | rules: z.array(z.string()).nullable().optional(),
127 | tags: z.array(z.string()).nullable().optional(),
128 | on_component_only: z
129 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
130 | .nullable()
131 | .optional(),
132 | since_leak_period: z
133 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
134 | .nullable()
135 | .optional(),
136 | in_new_code_period: z
137 | .union([z.boolean(), z.string().transform((val: any) => val === 'true')])
138 | .nullable()
139 | .optional(),
140 | });
141 | // Test with various parameter types
142 | const parsedParams = issuesSchema.parse({
143 | project_key: 'test-project',
144 | severity: 'MAJOR',
145 | page: '2',
146 | page_size: '10',
147 | statuses: ['OPEN', 'CONFIRMED'],
148 | resolved: 'true',
149 | types: ['BUG', 'VULNERABILITY'],
150 | rules: ['rule1', 'rule2'],
151 | tags: ['tag1', 'tag2'],
152 | on_component_only: 'true',
153 | since_leak_period: 'true',
154 | in_new_code_period: 'true',
155 | });
156 | // Check all the transformations
157 | expect(parsedParams.project_key).toBe('test-project');
158 | expect(parsedParams.severity).toBe('MAJOR');
159 | expect(parsedParams.page).toBe(2);
160 | expect(parsedParams.page_size).toBe(10);
161 | expect(parsedParams.statuses).toEqual(['OPEN', 'CONFIRMED']);
162 | expect(parsedParams.resolved).toBe(true);
163 | expect(parsedParams.types).toEqual(['BUG', 'VULNERABILITY']);
164 | expect(parsedParams.on_component_only).toBe(true);
165 | expect(parsedParams.since_leak_period).toBe(true);
166 | expect(parsedParams.in_new_code_period).toBe(true);
167 | });
168 | it('should transform component measures schema parameters', () => {
169 | // Create a schema similar to component measures schema in index.ts
170 | const measuresComponentSchema = z.object({
171 | component: z.string(),
172 | metric_keys: z.array(z.string()),
173 | branch: z.string().optional(),
174 | pull_request: z.string().optional(),
175 | additional_fields: z.array(z.string()).optional(),
176 | period: z.string().optional(),
177 | page: z
178 | .string()
179 | .optional()
180 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
181 | page_size: z
182 | .string()
183 | .optional()
184 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
185 | });
186 | // Test with valid parameters
187 | const parsedParams = measuresComponentSchema.parse({
188 | component: 'test-component',
189 | metric_keys: ['complexity', 'coverage'],
190 | branch: 'main',
191 | additional_fields: ['metrics'],
192 | page: '2',
193 | page_size: '20',
194 | });
195 | // Check the transformations
196 | expect(parsedParams.component).toBe('test-component');
197 | expect(parsedParams.metric_keys).toEqual(['complexity', 'coverage']);
198 | expect(parsedParams.branch).toBe('main');
199 | expect(parsedParams.page).toBe(2);
200 | expect(parsedParams.page_size).toBe(20);
201 | // Test with invalid page values
202 | const invalidParams = measuresComponentSchema.parse({
203 | component: 'test-component',
204 | metric_keys: ['complexity', 'coverage'],
205 | page: 'invalid',
206 | page_size: 'invalid',
207 | });
208 | expect(invalidParams.page).toBe(null);
209 | expect(invalidParams.page_size).toBe(null);
210 | });
211 | it('should transform components measures schema parameters', () => {
212 | // Create a schema similar to components measures schema in index.ts
213 | const measuresComponentsSchema = z.object({
214 | component_keys: z.array(z.string()),
215 | metric_keys: z.array(z.string()),
216 | branch: z.string().optional(),
217 | pull_request: z.string().optional(),
218 | additional_fields: z.array(z.string()).optional(),
219 | period: z.string().optional(),
220 | page: z
221 | .string()
222 | .optional()
223 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
224 | page_size: z
225 | .string()
226 | .optional()
227 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
228 | });
229 | // Test with valid parameters
230 | const parsedParams = measuresComponentsSchema.parse({
231 | component_keys: ['comp-1', 'comp-2'],
232 | metric_keys: ['complexity', 'coverage'],
233 | branch: 'main',
234 | page: '2',
235 | page_size: '20',
236 | });
237 | // Check the transformations
238 | expect(parsedParams.component_keys).toEqual(['comp-1', 'comp-2']);
239 | expect(parsedParams.metric_keys).toEqual(['complexity', 'coverage']);
240 | expect(parsedParams.page).toBe(2);
241 | expect(parsedParams.page_size).toBe(20);
242 | });
243 | it('should transform measures history schema parameters', () => {
244 | // Create a schema similar to measures history schema in index.ts
245 | const measuresHistorySchema = z.object({
246 | component: z.string(),
247 | metrics: z.array(z.string()),
248 | from: z.string().optional(),
249 | to: z.string().optional(),
250 | branch: z.string().optional(),
251 | pull_request: z.string().optional(),
252 | page: z
253 | .string()
254 | .optional()
255 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
256 | page_size: z
257 | .string()
258 | .optional()
259 | .transform((val: any) => (val ? parseInt(val, 10) || null : null)),
260 | });
261 | // Test with valid parameters
262 | const parsedParams = measuresHistorySchema.parse({
263 | component: 'test-component',
264 | metrics: ['complexity', 'coverage'],
265 | from: '2023-01-01',
266 | to: '2023-12-31',
267 | page: '3',
268 | page_size: '15',
269 | });
270 | // Check the transformations
271 | expect(parsedParams.component).toBe('test-component');
272 | expect(parsedParams.metrics).toEqual(['complexity', 'coverage']);
273 | expect(parsedParams.from).toBe('2023-01-01');
274 | expect(parsedParams.to).toBe('2023-12-31');
275 | expect(parsedParams.page).toBe(3);
276 | expect(parsedParams.page_size).toBe(15);
277 | });
278 | });
279 | });
280 |
```
--------------------------------------------------------------------------------
/src/__tests__/source-code.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import nock from 'nock';
2 | import {
3 | createSonarQubeClient,
4 | SonarQubeClient,
5 | SourceCodeParams,
6 | ScmBlameParams,
7 | } from '../sonarqube.js';
8 | import { handleSonarQubeGetSourceCode, handleSonarQubeGetScmBlame } from '../index.js';
9 |
10 | describe('SonarQube Source Code API', () => {
11 | const baseUrl = 'https://sonarcloud.io';
12 | const token = 'fake-token';
13 | let client: SonarQubeClient;
14 |
15 | // Helper function to mock raw source code API response
16 | const mockRawSourceResponse = (
17 | key: string,
18 | sourceCode: string,
19 | query?: Record<string, unknown>
20 | ) => {
21 | const queryMatcher = query ? query : { key };
22 | return nock(baseUrl)
23 | .get('/api/sources/raw')
24 | .query(queryMatcher as any)
25 | .reply(200, sourceCode);
26 | };
27 |
28 | beforeEach(() => {
29 | client = createSonarQubeClient(token, baseUrl) as SonarQubeClient;
30 | nock.disableNetConnect();
31 | });
32 |
33 | afterEach(() => {
34 | nock.cleanAll();
35 | nock.enableNetConnect();
36 | });
37 |
38 | describe('getSourceCode', () => {
39 | it('should return source code for a component', async () => {
40 | const params: SourceCodeParams = {
41 | key: 'my-project:src/main.js',
42 | };
43 |
44 | const mockResponse = {
45 | component: {
46 | key: 'my-project:src/main.js',
47 | qualifier: 'FIL',
48 | name: 'main.js',
49 | longName: 'my-project:src/main.js',
50 | },
51 | sources: [
52 | {
53 | line: 1,
54 | code: 'function main() {',
55 | issues: undefined,
56 | },
57 | {
58 | line: 2,
59 | code: ' console.log("Hello, world!");',
60 | issues: [
61 | {
62 | key: 'issue1',
63 | rule: 'javascript:S2228',
64 | severity: 'MINOR',
65 | component: 'my-project:src/main.js',
66 | project: 'my-project',
67 | line: 2,
68 | status: 'OPEN',
69 | message: 'Use a logger instead of console.log',
70 | effort: '5min',
71 | type: 'CODE_SMELL',
72 | },
73 | ],
74 | },
75 | {
76 | line: 3,
77 | code: '}',
78 | issues: undefined,
79 | },
80 | ],
81 | };
82 |
83 | // Mock the source code API call - raw endpoint returns plain text
84 | mockRawSourceResponse(params.key, 'function main() {\n console.log("Hello, world!");\n}');
85 |
86 | // Mock the issues API call
87 | nock(baseUrl)
88 | .get('/api/issues/search')
89 | .query(
90 | (queryObj) => queryObj.projects === params.key && queryObj.onComponentOnly === 'true'
91 | )
92 | .reply(200, {
93 | issues: [
94 | {
95 | key: 'issue1',
96 | rule: 'javascript:S1848',
97 | severity: 'MAJOR',
98 | component: 'my-project:src/main.js',
99 | project: 'my-project',
100 | line: 2,
101 | message: 'Use a logger instead of console.log',
102 | tags: ['bad-practice'],
103 | creationDate: '2021-01-01T00:00:00Z',
104 | updateDate: '2021-01-01T00:00:00Z',
105 | status: 'OPEN',
106 | effort: '5min',
107 | type: 'CODE_SMELL',
108 | },
109 | ],
110 | components: [],
111 | rules: [],
112 | paging: { pageIndex: 1, pageSize: 100, total: 1 },
113 | });
114 |
115 | const result = await client.getSourceCode(params);
116 |
117 | // The result should include the source code with issue annotations
118 | expect(result.component).toEqual(mockResponse.component);
119 | expect(result.sources.length).toBe(3);
120 |
121 | // Line 2 should have an issue associated with it
122 | expect(result.sources?.[1]?.line).toBe(2);
123 | expect(result.sources?.[1]?.code).toBe(' console.log("Hello, world!");');
124 | expect(result.sources?.[1]?.issues).toBeDefined();
125 | expect(result.sources?.[1]?.issues?.[0]?.message).toBe('Use a logger instead of console.log');
126 | });
127 |
128 | it('should handle errors in issues retrieval', async () => {
129 | const params: SourceCodeParams = {
130 | key: 'my-project:src/main.js',
131 | };
132 |
133 | const mockResponse = {
134 | component: {
135 | key: 'my-project:src/main.js',
136 | qualifier: 'FIL',
137 | name: 'main.js',
138 | longName: 'my-project:src/main.js',
139 | },
140 | sources: [
141 | {
142 | line: 1,
143 | code: 'function main() {',
144 | },
145 | ],
146 | };
147 |
148 | // Mock the source code API call - raw endpoint returns plain text
149 | mockRawSourceResponse(params.key, 'function main() {');
150 |
151 | // Mock a failed issues API call
152 | nock(baseUrl)
153 | .get('/api/issues/search')
154 | .query(
155 | (queryObj) => queryObj.projects === params.key && queryObj.onComponentOnly === 'true'
156 | )
157 | .replyWithError('Issues API error');
158 |
159 | const result = await client.getSourceCode(params);
160 |
161 | // Should return the source without annotations
162 | expect(result).toEqual(mockResponse);
163 | });
164 |
165 | it('should return source code without annotations when key is not provided', async () => {
166 | const params: SourceCodeParams = {
167 | key: '',
168 | };
169 |
170 | // Mock the source code API call - raw endpoint returns plain text
171 | mockRawSourceResponse('', 'function main() {', { key: '' } as any);
172 |
173 | const result = await client.getSourceCode(params);
174 |
175 | // Should return the source without annotations
176 | // When key is empty, component fields will be empty
177 | expect(result).toEqual({
178 | component: {
179 | key: '',
180 | qualifier: 'FIL',
181 | name: '',
182 | longName: '',
183 | },
184 | sources: [
185 | {
186 | line: 1,
187 | code: 'function main() {',
188 | },
189 | ],
190 | });
191 | });
192 |
193 | it('should return source code with line range', async () => {
194 | const params: SourceCodeParams = {
195 | key: 'my-project:src/main.js',
196 | from: 2,
197 | to: 2,
198 | };
199 |
200 | const mockResponse = {
201 | component: {
202 | key: 'my-project:src/main.js',
203 | qualifier: 'FIL',
204 | name: 'main.js',
205 | longName: 'my-project:src/main.js',
206 | },
207 | sources: [
208 | {
209 | line: 1,
210 | code: 'function main() {',
211 | },
212 | {
213 | line: 2,
214 | code: ' console.log("Hello, world!");',
215 | },
216 | {
217 | line: 3,
218 | code: '}',
219 | },
220 | ],
221 | };
222 |
223 | // Mock the raw source code API call - returns plain text with multiple lines
224 | mockRawSourceResponse(params.key, 'function main() {\n console.log("Hello, world!");\n}');
225 |
226 | // Mock the issues API call (no issues this time)
227 | nock(baseUrl)
228 | .get('/api/issues/search')
229 | .query(
230 | (queryObj) => queryObj.projects === params.key && queryObj.onComponentOnly === 'true'
231 | )
232 | .reply(200, {
233 | issues: [],
234 | components: [],
235 | rules: [],
236 | paging: { pageIndex: 1, pageSize: 100, total: 0 },
237 | });
238 |
239 | const result = await client.getSourceCode(params);
240 |
241 | expect(result.component).toEqual(mockResponse.component);
242 | expect(result.sources.length).toBe(1);
243 | expect(result.sources?.[0]?.line).toBe(2);
244 | expect(result.sources?.[0]?.issues).toBeUndefined();
245 | });
246 |
247 | it('handler should return source code in the expected format', async () => {
248 | const params: SourceCodeParams = {
249 | key: 'my-project:src/main.js',
250 | };
251 |
252 | const mockResponse = {
253 | component: {
254 | key: 'my-project:src/main.js',
255 | qualifier: 'FIL',
256 | name: 'main.js',
257 | longName: 'my-project:src/main.js',
258 | },
259 | sources: [
260 | {
261 | line: 1,
262 | code: 'function main() {',
263 | },
264 | ],
265 | };
266 |
267 | // Mock the raw source code API call - returns plain text
268 | mockRawSourceResponse(params.key, 'function main() {');
269 |
270 | // Mock the issues API call
271 | nock(baseUrl)
272 | .get('/api/issues/search')
273 | .query(
274 | (queryObj) => queryObj.projects === params.key && queryObj.onComponentOnly === 'true'
275 | )
276 | .reply(200, {
277 | issues: [],
278 | components: [],
279 | rules: [],
280 | paging: { pageIndex: 1, pageSize: 100, total: 0 },
281 | });
282 |
283 | const response = await handleSonarQubeGetSourceCode(params, client);
284 | expect(response).toHaveProperty('content');
285 | expect(response.content).toHaveLength(1);
286 | expect(response.content[0]?.type).toBe('text');
287 |
288 | const parsedContent = JSON.parse(response.content[0]?.text as string);
289 | expect(parsedContent.component).toEqual(mockResponse.component);
290 | });
291 | });
292 |
293 | describe('getScmBlame', () => {
294 | it('should return SCM blame information', async () => {
295 | const params: ScmBlameParams = {
296 | key: 'my-project:src/main.js',
297 | };
298 |
299 | const mockResponse = {
300 | component: {
301 | key: 'my-project:src/main.js',
302 | path: 'src/main.js',
303 | qualifier: 'FIL',
304 | name: 'main.js',
305 | language: 'js',
306 | },
307 | sources: {
308 | '1': {
309 | revision: 'abc123',
310 | date: '2021-01-01T00:00:00Z',
311 | author: 'developer',
312 | },
313 | '2': {
314 | revision: 'def456',
315 | date: '2021-01-02T00:00:00Z',
316 | author: 'another-dev',
317 | },
318 | '3': {
319 | revision: 'abc123',
320 | date: '2021-01-01T00:00:00Z',
321 | author: 'developer',
322 | },
323 | },
324 | };
325 |
326 | nock(baseUrl).get('/api/sources/scm').query({ key: params.key }).reply(200, mockResponse);
327 |
328 | const result = await client.getScmBlame(params);
329 |
330 | expect(result.component).toEqual(mockResponse.component);
331 | expect(Object.keys(result.sources).length).toBe(3);
332 | expect(result.sources?.['1']?.author).toBe('developer');
333 | expect(result.sources?.['2']?.author).toBe('another-dev');
334 | expect(result.sources?.['1']?.revision).toBe('abc123');
335 | });
336 |
337 | it('should return SCM blame for specific line range', async () => {
338 | const params: ScmBlameParams = {
339 | key: 'my-project:src/main.js',
340 | from: 2,
341 | to: 2,
342 | };
343 |
344 | const mockResponse = {
345 | component: {
346 | key: 'my-project:src/main.js',
347 | path: 'src/main.js',
348 | qualifier: 'FIL',
349 | name: 'main.js',
350 | language: 'js',
351 | },
352 | sources: {
353 | '2': {
354 | revision: 'def456',
355 | date: '2021-01-02T00:00:00Z',
356 | author: 'another-dev',
357 | },
358 | },
359 | };
360 |
361 | nock(baseUrl)
362 | .get('/api/sources/scm')
363 | .query({ key: params.key, from: params.from, to: params.to })
364 | .reply(200, mockResponse);
365 |
366 | const result = await client.getScmBlame(params);
367 |
368 | expect(result.component).toEqual(mockResponse.component);
369 | expect(Object.keys(result.sources).length).toBe(1);
370 | expect(Object.keys(result.sources)[0]).toBe('2');
371 | expect(result.sources?.['2']?.author).toBe('another-dev');
372 | });
373 |
374 | it('handler should return SCM blame in the expected format', async () => {
375 | const params: ScmBlameParams = {
376 | key: 'my-project:src/main.js',
377 | };
378 |
379 | const mockResponse = {
380 | component: {
381 | key: 'my-project:src/main.js',
382 | path: 'src/main.js',
383 | qualifier: 'FIL',
384 | name: 'main.js',
385 | language: 'js',
386 | },
387 | sources: {
388 | '1': {
389 | revision: 'abc123',
390 | date: '2021-01-01T00:00:00Z',
391 | author: 'developer',
392 | },
393 | },
394 | };
395 |
396 | nock(baseUrl).get('/api/sources/scm').query({ key: params.key }).reply(200, mockResponse);
397 |
398 | const response = await handleSonarQubeGetScmBlame(params, client);
399 | expect(response).toHaveProperty('content');
400 | expect(response.content).toHaveLength(1);
401 | expect(response.content[0]?.type).toBe('text');
402 |
403 | const parsedContent = JSON.parse(response.content[0]?.text as string);
404 | expect(parsedContent.component).toEqual(mockResponse.component);
405 | expect(parsedContent.sources?.['1']?.author).toBe('developer');
406 | });
407 | });
408 | });
409 |
```
--------------------------------------------------------------------------------
/src/__tests__/domains/components-domain-full.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
2 | import { ComponentsDomain } from '../../domains/components.js';
3 | describe('ComponentsDomain Full Tests', () => {
4 | let domain: ComponentsDomain;
5 | let mockWebApiClient: any;
6 | const organization = 'test-org';
7 | beforeEach(() => {
8 | // Create mock builders
9 | const mockSearchBuilder = {
10 | query: vi.fn().mockReturnThis(),
11 | qualifiers: vi.fn().mockReturnThis(),
12 | languages: vi.fn().mockReturnThis(),
13 | page: vi.fn().mockReturnThis(),
14 | pageSize: vi.fn().mockReturnThis(),
15 | execute: vi.fn(),
16 | };
17 | const mockTreeBuilder = {
18 | component: vi.fn().mockReturnThis(),
19 | childrenOnly: vi.fn().mockReturnThis(),
20 | leavesOnly: vi.fn().mockReturnThis(),
21 | qualifiers: vi.fn().mockReturnThis(),
22 | sortByName: vi.fn().mockReturnThis(),
23 | sortByPath: vi.fn().mockReturnThis(),
24 | sortByQualifier: vi.fn().mockReturnThis(),
25 | page: vi.fn().mockReturnThis(),
26 | pageSize: vi.fn().mockReturnThis(),
27 | branch: vi.fn().mockReturnThis(),
28 | pullRequest: vi.fn().mockReturnThis(),
29 | execute: vi.fn(),
30 | };
31 | // Create mock web API client
32 | mockWebApiClient = {
33 | components: {
34 | search: vi.fn().mockReturnValue(mockSearchBuilder),
35 | tree: vi.fn().mockReturnValue(mockTreeBuilder),
36 | show: vi.fn(),
37 | },
38 | };
39 | domain = new ComponentsDomain(mockWebApiClient, organization);
40 | });
41 | afterEach(() => {
42 | vi.clearAllMocks();
43 | });
44 | describe('searchComponents', () => {
45 | it('should search components with all parameters', async () => {
46 | const mockResponse = {
47 | components: [
48 | { key: 'comp1', name: 'Component 1', qualifier: 'TRK' },
49 | { key: 'comp2', name: 'Component 2', qualifier: 'FIL' },
50 | ],
51 | paging: { pageIndex: 1, pageSize: 100, total: 2 },
52 | };
53 | const searchBuilder = mockWebApiClient.components.search();
54 | searchBuilder.execute.mockResolvedValue(mockResponse);
55 | const result = await domain.searchComponents({
56 | query: 'test',
57 | qualifiers: ['TRK', 'FIL'],
58 | language: 'java',
59 | page: 2,
60 | pageSize: 50,
61 | });
62 | expect(mockWebApiClient.components.search).toHaveBeenCalled();
63 | expect(searchBuilder.query).toHaveBeenCalledWith('test');
64 | expect(searchBuilder.qualifiers).toHaveBeenCalledWith(['TRK', 'FIL']);
65 | expect(searchBuilder.languages).toHaveBeenCalledWith(['java']);
66 | expect(searchBuilder.page).toHaveBeenCalledWith(2);
67 | expect(searchBuilder.pageSize).toHaveBeenCalledWith(50);
68 | expect(searchBuilder.execute).toHaveBeenCalled();
69 | expect(result).toEqual({
70 | components: mockResponse.components,
71 | paging: mockResponse.paging,
72 | });
73 | });
74 | it('should search components with minimal parameters', async () => {
75 | const mockResponse = {
76 | components: [],
77 | paging: { pageIndex: 1, pageSize: 100, total: 0 },
78 | };
79 | const searchBuilder = mockWebApiClient.components.search();
80 | searchBuilder.execute.mockResolvedValue(mockResponse);
81 | const result = await domain.searchComponents();
82 | expect(mockWebApiClient.components.search).toHaveBeenCalled();
83 | expect(searchBuilder.query).not.toHaveBeenCalled();
84 | expect(searchBuilder.qualifiers).not.toHaveBeenCalled();
85 | expect(searchBuilder.languages).not.toHaveBeenCalled();
86 | expect(searchBuilder.execute).toHaveBeenCalled();
87 | expect(result).toEqual({
88 | components: [],
89 | paging: mockResponse.paging,
90 | });
91 | });
92 | it('should limit page size to maximum of 500', async () => {
93 | const mockResponse = {
94 | components: [],
95 | paging: { pageIndex: 1, pageSize: 500, total: 0 },
96 | };
97 | const searchBuilder = mockWebApiClient.components.search();
98 | searchBuilder.execute.mockResolvedValue(mockResponse);
99 | await domain.searchComponents({ pageSize: 1000 });
100 | expect(searchBuilder.pageSize).toHaveBeenCalledWith(500);
101 | });
102 | it('should handle search errors', async () => {
103 | const searchBuilder = mockWebApiClient.components.search();
104 | searchBuilder.execute.mockRejectedValue(new Error('Search failed'));
105 | await expect(domain.searchComponents({ query: 'test' })).rejects.toThrow('Search failed');
106 | });
107 | it('should handle missing paging in response', async () => {
108 | const mockResponse = {
109 | components: [{ key: 'comp1', name: 'Component 1', qualifier: 'TRK' }],
110 | // paging is missing
111 | };
112 | const searchBuilder = mockWebApiClient.components.search();
113 | searchBuilder.execute.mockResolvedValue(mockResponse);
114 | const result = await domain.searchComponents();
115 | expect(result.paging).toEqual({
116 | pageIndex: 1,
117 | pageSize: 100,
118 | total: 1,
119 | });
120 | });
121 | });
122 | describe('getComponentTree', () => {
123 | it('should get component tree with all parameters', async () => {
124 | const mockResponse = {
125 | components: [
126 | { key: 'dir1', name: 'Directory 1', qualifier: 'DIR' },
127 | { key: 'file1', name: 'File 1', qualifier: 'FIL' },
128 | ],
129 | baseComponent: { key: 'project1', name: 'Project 1', qualifier: 'TRK' },
130 | paging: { pageIndex: 1, pageSize: 100, total: 2 },
131 | };
132 | const treeBuilder = mockWebApiClient.components.tree();
133 | treeBuilder.execute.mockResolvedValue(mockResponse);
134 | const result = await domain.getComponentTree({
135 | component: 'project1',
136 | strategy: 'children',
137 | qualifiers: ['DIR', 'FIL'],
138 | sort: 'name',
139 | asc: true,
140 | page: 1,
141 | pageSize: 50,
142 | branch: 'develop',
143 | pullRequest: 'PR-123',
144 | });
145 | expect(mockWebApiClient.components.tree).toHaveBeenCalled();
146 | expect(treeBuilder.component).toHaveBeenCalledWith('project1');
147 | expect(treeBuilder.childrenOnly).toHaveBeenCalled();
148 | expect(treeBuilder.qualifiers).toHaveBeenCalledWith(['DIR', 'FIL']);
149 | expect(treeBuilder.sortByName).toHaveBeenCalled();
150 | expect(treeBuilder.page).toHaveBeenCalledWith(1);
151 | expect(treeBuilder.pageSize).toHaveBeenCalledWith(50);
152 | expect(treeBuilder.branch).toHaveBeenCalledWith('develop');
153 | expect(treeBuilder.pullRequest).toHaveBeenCalledWith('PR-123');
154 | expect(result).toEqual({
155 | components: mockResponse.components,
156 | baseComponent: mockResponse.baseComponent,
157 | paging: mockResponse.paging,
158 | });
159 | });
160 | it('should handle leaves strategy', async () => {
161 | const mockResponse = {
162 | components: [],
163 | paging: { pageIndex: 1, pageSize: 100, total: 0 },
164 | };
165 | const treeBuilder = mockWebApiClient.components.tree();
166 | treeBuilder.execute.mockResolvedValue(mockResponse);
167 | await domain.getComponentTree({
168 | component: 'project1',
169 | strategy: 'leaves',
170 | });
171 | expect(treeBuilder.leavesOnly).toHaveBeenCalled();
172 | expect(treeBuilder.childrenOnly).not.toHaveBeenCalled();
173 | });
174 | it('should handle all strategy', async () => {
175 | const mockResponse = {
176 | components: [],
177 | paging: { pageIndex: 1, pageSize: 100, total: 0 },
178 | };
179 | const treeBuilder = mockWebApiClient.components.tree();
180 | treeBuilder.execute.mockResolvedValue(mockResponse);
181 | await domain.getComponentTree({
182 | component: 'project1',
183 | strategy: 'all',
184 | });
185 | expect(treeBuilder.childrenOnly).not.toHaveBeenCalled();
186 | expect(treeBuilder.leavesOnly).not.toHaveBeenCalled();
187 | });
188 | it('should sort by path', async () => {
189 | const mockResponse = {
190 | components: [],
191 | paging: { pageIndex: 1, pageSize: 100, total: 0 },
192 | };
193 | const treeBuilder = mockWebApiClient.components.tree();
194 | treeBuilder.execute.mockResolvedValue(mockResponse);
195 | await domain.getComponentTree({
196 | component: 'project1',
197 | sort: 'path',
198 | });
199 | expect(treeBuilder.sortByPath).toHaveBeenCalled();
200 | });
201 | it('should sort by qualifier', async () => {
202 | const mockResponse = {
203 | components: [],
204 | paging: { pageIndex: 1, pageSize: 100, total: 0 },
205 | };
206 | const treeBuilder = mockWebApiClient.components.tree();
207 | treeBuilder.execute.mockResolvedValue(mockResponse);
208 | await domain.getComponentTree({
209 | component: 'project1',
210 | sort: 'qualifier',
211 | });
212 | expect(treeBuilder.sortByQualifier).toHaveBeenCalled();
213 | });
214 | it('should limit page size to maximum of 500', async () => {
215 | const mockResponse = {
216 | components: [],
217 | paging: { pageIndex: 1, pageSize: 500, total: 0 },
218 | };
219 | const treeBuilder = mockWebApiClient.components.tree();
220 | treeBuilder.execute.mockResolvedValue(mockResponse);
221 | await domain.getComponentTree({
222 | component: 'project1',
223 | pageSize: 1000,
224 | });
225 | expect(treeBuilder.pageSize).toHaveBeenCalledWith(500);
226 | });
227 | it('should handle tree errors', async () => {
228 | const treeBuilder = mockWebApiClient.components.tree();
229 | treeBuilder.execute.mockRejectedValue(new Error('Tree failed'));
230 | await expect(domain.getComponentTree({ component: 'project1' })).rejects.toThrow(
231 | 'Tree failed'
232 | );
233 | });
234 | });
235 | describe('showComponent', () => {
236 | it('should show component details', async () => {
237 | const mockResponse = {
238 | component: { key: 'comp1', name: 'Component 1', qualifier: 'FIL' },
239 | ancestors: [
240 | { key: 'proj1', name: 'Project 1', qualifier: 'TRK' },
241 | { key: 'dir1', name: 'Directory 1', qualifier: 'DIR' },
242 | ],
243 | };
244 | mockWebApiClient.components.show.mockResolvedValue(mockResponse);
245 | const result = await domain.showComponent('comp1');
246 | expect(mockWebApiClient.components.show).toHaveBeenCalledWith('comp1');
247 | expect(result).toEqual({
248 | component: mockResponse.component,
249 | ancestors: mockResponse.ancestors,
250 | });
251 | });
252 | it('should show component with branch and PR (though not supported by API)', async () => {
253 | const mockResponse = {
254 | component: { key: 'comp1', name: 'Component 1', qualifier: 'FIL' },
255 | ancestors: [],
256 | };
257 | mockWebApiClient.components.show.mockResolvedValue(mockResponse);
258 | const result = await domain.showComponent('comp1', 'develop', 'PR-123');
259 | // Note: branch and pullRequest are not passed to API as it doesn't support them
260 | expect(mockWebApiClient.components.show).toHaveBeenCalledWith('comp1');
261 | expect(result).toEqual({
262 | component: mockResponse.component,
263 | ancestors: [],
264 | });
265 | });
266 | it('should handle missing ancestors', async () => {
267 | const mockResponse = {
268 | component: { key: 'comp1', name: 'Component 1', qualifier: 'FIL' },
269 | // ancestors is missing
270 | };
271 | mockWebApiClient.components.show.mockResolvedValue(mockResponse);
272 | const result = await domain.showComponent('comp1');
273 | expect(result.ancestors).toEqual([]);
274 | });
275 | it('should handle show errors', async () => {
276 | mockWebApiClient.components.show.mockRejectedValue(new Error('Show failed'));
277 | await expect(domain.showComponent('comp1')).rejects.toThrow('Show failed');
278 | });
279 | });
280 | describe('transformComponent', () => {
281 | it('should transform component with all fields', async () => {
282 | const mockResponse = {
283 | components: [
284 | {
285 | key: 'comp1',
286 | name: 'Component 1',
287 | qualifier: 'FIL',
288 | path: '/src/file.js',
289 | longName: 'Project :: src/file.js',
290 | enabled: true,
291 | },
292 | ],
293 | };
294 | const searchBuilder = mockWebApiClient.components.search();
295 | searchBuilder.execute.mockResolvedValue(mockResponse);
296 | const result = await domain.searchComponents();
297 | expect(result.components[0]).toEqual({
298 | key: 'comp1',
299 | name: 'Component 1',
300 | qualifier: 'FIL',
301 | path: '/src/file.js',
302 | longName: 'Project :: src/file.js',
303 | enabled: true,
304 | });
305 | });
306 | it('should transform component with minimal fields', async () => {
307 | const mockResponse = {
308 | components: [
309 | {
310 | key: 'comp1',
311 | name: 'Component 1',
312 | qualifier: 'TRK',
313 | // optional fields missing
314 | },
315 | ],
316 | };
317 | const searchBuilder = mockWebApiClient.components.search();
318 | searchBuilder.execute.mockResolvedValue(mockResponse);
319 | const result = await domain.searchComponents();
320 | expect(result.components[0]).toEqual({
321 | key: 'comp1',
322 | name: 'Component 1',
323 | qualifier: 'TRK',
324 | path: undefined,
325 | longName: undefined,
326 | enabled: undefined,
327 | });
328 | });
329 | });
330 | });
331 |
```