This is page 5 of 5. Use http://codebase.md/stefan-nitu/mcp-xcode-server?lines=true&page={x} to view the full context. # Directory Structure ``` ├── .claude │ └── settings.local.json ├── .github │ └── workflows │ └── ci.yml ├── .gitignore ├── .vscode │ └── settings.json ├── CLAUDE.md ├── CONTRIBUTING.md ├── docs │ ├── ARCHITECTURE.md │ ├── ERROR-HANDLING.md │ └── TESTING-PHILOSOPHY.md ├── examples │ └── screenshot-demo.js ├── jest.config.cjs ├── jest.e2e.config.cjs ├── LICENSE ├── package-lock.json ├── package.json ├── README.md ├── scripts │ └── xcode-sync.swift ├── src │ ├── application │ │ └── ports │ │ ├── ArtifactPorts.ts │ │ ├── BuildPorts.ts │ │ ├── CommandPorts.ts │ │ ├── ConfigPorts.ts │ │ ├── LoggingPorts.ts │ │ ├── MappingPorts.ts │ │ ├── OutputFormatterPorts.ts │ │ ├── OutputParserPorts.ts │ │ └── SimulatorPorts.ts │ ├── cli.ts │ ├── config.ts │ ├── domain │ │ ├── errors │ │ │ └── DomainError.ts │ │ ├── services │ │ │ └── PlatformDetector.ts │ │ ├── shared │ │ │ └── Result.ts │ │ └── tests │ │ └── unit │ │ └── PlatformDetector.unit.test.ts │ ├── features │ │ ├── app-management │ │ │ ├── controllers │ │ │ │ └── InstallAppController.ts │ │ │ ├── domain │ │ │ │ ├── InstallRequest.ts │ │ │ │ └── InstallResult.ts │ │ │ ├── factories │ │ │ │ └── InstallAppControllerFactory.ts │ │ │ ├── index.ts │ │ │ ├── infrastructure │ │ │ │ └── AppInstallerAdapter.ts │ │ │ ├── tests │ │ │ │ ├── e2e │ │ │ │ │ ├── InstallAppController.e2e.test.ts │ │ │ │ │ └── InstallAppMCP.e2e.test.ts │ │ │ │ ├── integration │ │ │ │ │ └── InstallAppController.integration.test.ts │ │ │ │ └── unit │ │ │ │ ├── AppInstallerAdapter.unit.test.ts │ │ │ │ ├── InstallAppController.unit.test.ts │ │ │ │ ├── InstallAppUseCase.unit.test.ts │ │ │ │ ├── InstallRequest.unit.test.ts │ │ │ │ └── InstallResult.unit.test.ts │ │ │ └── use-cases │ │ │ └── InstallAppUseCase.ts │ │ ├── build │ │ │ ├── controllers │ │ │ │ └── BuildXcodeController.ts │ │ │ ├── domain │ │ │ │ ├── BuildDestination.ts │ │ │ │ ├── BuildIssue.ts │ │ │ │ ├── BuildRequest.ts │ │ │ │ ├── BuildResult.ts │ │ │ │ └── PlatformInfo.ts │ │ │ ├── factories │ │ │ │ └── BuildXcodeControllerFactory.ts │ │ │ ├── index.ts │ │ │ ├── infrastructure │ │ │ │ ├── BuildArtifactLocatorAdapter.ts │ │ │ │ ├── BuildDestinationMapperAdapter.ts │ │ │ │ ├── XcbeautifyFormatterAdapter.ts │ │ │ │ ├── XcbeautifyOutputParserAdapter.ts │ │ │ │ └── XcodeBuildCommandAdapter.ts │ │ │ ├── tests │ │ │ │ ├── e2e │ │ │ │ │ ├── BuildXcodeController.e2e.test.ts │ │ │ │ │ └── BuildXcodeMCP.e2e.test.ts │ │ │ │ ├── integration │ │ │ │ │ └── BuildXcodeController.integration.test.ts │ │ │ │ └── unit │ │ │ │ ├── BuildArtifactLocatorAdapter.unit.test.ts │ │ │ │ ├── BuildDestinationMapperAdapter.unit.test.ts │ │ │ │ ├── BuildIssue.unit.test.ts │ │ │ │ ├── BuildProjectUseCase.unit.test.ts │ │ │ │ ├── BuildRequest.unit.test.ts │ │ │ │ ├── BuildResult.unit.test.ts │ │ │ │ ├── BuildXcodeController.unit.test.ts │ │ │ │ ├── BuildXcodePresenter.unit.test.ts │ │ │ │ ├── PlatformInfo.unit.test.ts │ │ │ │ ├── XcbeautifyFormatterAdapter.unit.test.ts │ │ │ │ ├── XcbeautifyOutputParserAdapter.unit.test.ts │ │ │ │ └── XcodeBuildCommandAdapter.unit.test.ts │ │ │ └── use-cases │ │ │ └── BuildProjectUseCase.ts │ │ └── simulator │ │ ├── controllers │ │ │ ├── BootSimulatorController.ts │ │ │ ├── ListSimulatorsController.ts │ │ │ └── ShutdownSimulatorController.ts │ │ ├── domain │ │ │ ├── BootRequest.ts │ │ │ ├── BootResult.ts │ │ │ ├── ListSimulatorsRequest.ts │ │ │ ├── ListSimulatorsResult.ts │ │ │ ├── ShutdownRequest.ts │ │ │ ├── ShutdownResult.ts │ │ │ └── SimulatorState.ts │ │ ├── factories │ │ │ ├── BootSimulatorControllerFactory.ts │ │ │ ├── ListSimulatorsControllerFactory.ts │ │ │ └── ShutdownSimulatorControllerFactory.ts │ │ ├── index.ts │ │ ├── infrastructure │ │ │ ├── SimulatorControlAdapter.ts │ │ │ └── SimulatorLocatorAdapter.ts │ │ ├── tests │ │ │ ├── e2e │ │ │ │ ├── BootSimulatorController.e2e.test.ts │ │ │ │ ├── BootSimulatorMCP.e2e.test.ts │ │ │ │ ├── ListSimulatorsController.e2e.test.ts │ │ │ │ ├── ListSimulatorsMCP.e2e.test.ts │ │ │ │ ├── ShutdownSimulatorController.e2e.test.ts │ │ │ │ └── ShutdownSimulatorMCP.e2e.test.ts │ │ │ ├── integration │ │ │ │ ├── BootSimulatorController.integration.test.ts │ │ │ │ ├── ListSimulatorsController.integration.test.ts │ │ │ │ └── ShutdownSimulatorController.integration.test.ts │ │ │ └── unit │ │ │ ├── BootRequest.unit.test.ts │ │ │ ├── BootResult.unit.test.ts │ │ │ ├── BootSimulatorController.unit.test.ts │ │ │ ├── BootSimulatorUseCase.unit.test.ts │ │ │ ├── ListSimulatorsController.unit.test.ts │ │ │ ├── ListSimulatorsUseCase.unit.test.ts │ │ │ ├── ShutdownRequest.unit.test.ts │ │ │ ├── ShutdownResult.unit.test.ts │ │ │ ├── ShutdownSimulatorUseCase.unit.test.ts │ │ │ ├── SimulatorControlAdapter.unit.test.ts │ │ │ └── SimulatorLocatorAdapter.unit.test.ts │ │ └── use-cases │ │ ├── BootSimulatorUseCase.ts │ │ ├── ListSimulatorsUseCase.ts │ │ └── ShutdownSimulatorUseCase.ts │ ├── index.ts │ ├── infrastructure │ │ ├── repositories │ │ │ └── DeviceRepository.ts │ │ ├── services │ │ │ └── DependencyChecker.ts │ │ └── tests │ │ └── unit │ │ ├── DependencyChecker.unit.test.ts │ │ └── DeviceRepository.unit.test.ts │ ├── logger.ts │ ├── presentation │ │ ├── decorators │ │ │ └── DependencyCheckingDecorator.ts │ │ ├── formatters │ │ │ ├── ErrorFormatter.ts │ │ │ └── strategies │ │ │ ├── BuildIssuesStrategy.ts │ │ │ ├── DefaultErrorStrategy.ts │ │ │ ├── ErrorFormattingStrategy.ts │ │ │ └── OutputFormatterErrorStrategy.ts │ │ ├── interfaces │ │ │ ├── IDependencyChecker.ts │ │ │ ├── MCPController.ts │ │ │ └── MCPResponse.ts │ │ ├── presenters │ │ │ └── BuildXcodePresenter.ts │ │ └── tests │ │ └── unit │ │ ├── BuildIssuesStrategy.unit.test.ts │ │ ├── DefaultErrorStrategy.unit.test.ts │ │ ├── DependencyCheckingDecorator.unit.test.ts │ │ └── ErrorFormatter.unit.test.ts │ ├── shared │ │ ├── domain │ │ │ ├── AppPath.ts │ │ │ ├── DeviceId.ts │ │ │ ├── Platform.ts │ │ │ └── ProjectPath.ts │ │ ├── index.ts │ │ ├── infrastructure │ │ │ ├── ConfigProviderAdapter.ts │ │ │ └── ShellCommandExecutorAdapter.ts │ │ └── tests │ │ ├── mocks │ │ │ ├── promisifyExec.ts │ │ │ ├── selectiveExecMock.ts │ │ │ └── xcodebuildHelpers.ts │ │ ├── skipped │ │ │ ├── cli.e2e.test.skip │ │ │ ├── hook-e2e.test.skip │ │ │ ├── hook-path.e2e.test.skip │ │ │ └── hook.test.skip │ │ ├── types │ │ │ └── execTypes.ts │ │ ├── unit │ │ │ ├── AppPath.unit.test.ts │ │ │ ├── ConfigProviderAdapter.unit.test.ts │ │ │ ├── ProjectPath.unit.test.ts │ │ │ └── ShellCommandExecutorAdapter.unit.test.ts │ │ └── utils │ │ ├── gitResetTestArtifacts.ts │ │ ├── mockHelpers.ts │ │ ├── TestEnvironmentCleaner.ts │ │ ├── TestErrorInjector.ts │ │ ├── testHelpers.ts │ │ ├── TestProjectManager.ts │ │ └── TestSimulatorManager.ts │ ├── types.ts │ ├── utils │ │ ├── devices │ │ │ ├── Devices.ts │ │ │ ├── SimulatorApps.ts │ │ │ ├── SimulatorBoot.ts │ │ │ ├── SimulatorDevice.ts │ │ │ ├── SimulatorInfo.ts │ │ │ ├── SimulatorReset.ts │ │ │ └── SimulatorUI.ts │ │ ├── errors │ │ │ ├── index.ts │ │ │ └── xcbeautify-parser.ts │ │ ├── index.ts │ │ ├── LogManager.ts │ │ ├── LogManagerInstance.ts │ │ └── projects │ │ ├── SwiftBuild.ts │ │ ├── SwiftPackage.ts │ │ ├── SwiftPackageInfo.ts │ │ ├── Xcode.ts │ │ ├── XcodeArchive.ts │ │ ├── XcodeBuild.ts │ │ ├── XcodeErrors.ts │ │ ├── XcodeInfo.ts │ │ └── XcodeProject.ts │ └── utils.ts ├── test_artifacts │ ├── Test.xcworkspace │ │ ├── contents.xcworkspacedata │ │ └── xcuserdata │ │ └── stefan.xcuserdatad │ │ └── UserInterfaceState.xcuserstate │ ├── TestProjectSwiftTesting │ │ ├── TestProjectSwiftTesting │ │ │ ├── Assets.xcassets │ │ │ │ ├── AccentColor.colorset │ │ │ │ │ └── Contents.json │ │ │ │ ├── AppIcon.appiconset │ │ │ │ │ └── Contents.json │ │ │ │ └── Contents.json │ │ │ ├── ContentView.swift │ │ │ ├── Item.swift │ │ │ ├── TestProjectSwiftTesting.entitlements │ │ │ └── TestProjectSwiftTestingApp.swift │ │ ├── TestProjectSwiftTesting.xcodeproj │ │ │ ├── project.pbxproj │ │ │ ├── project.xcworkspace │ │ │ │ ├── contents.xcworkspacedata │ │ │ │ └── xcuserdata │ │ │ │ └── stefan.xcuserdatad │ │ │ │ └── UserInterfaceState.xcuserstate │ │ │ └── xcuserdata │ │ │ └── stefan.xcuserdatad │ │ │ └── xcschemes │ │ │ └── xcschememanagement.plist │ │ ├── TestProjectSwiftTestingTests │ │ │ └── TestProjectSwiftTestingTests.swift │ │ └── TestProjectSwiftTestingUITests │ │ ├── TestProjectSwiftTestingUITests.swift │ │ └── TestProjectSwiftTestingUITestsLaunchTests.swift │ ├── TestProjectWatchOS │ │ ├── TestProjectWatchOS │ │ │ ├── Assets.xcassets │ │ │ │ ├── AccentColor.colorset │ │ │ │ │ └── Contents.json │ │ │ │ ├── AppIcon.appiconset │ │ │ │ │ └── Contents.json │ │ │ │ └── Contents.json │ │ │ ├── ContentView.swift │ │ │ └── TestProjectWatchOSApp.swift │ │ ├── TestProjectWatchOS Watch App │ │ │ ├── Assets.xcassets │ │ │ │ ├── AccentColor.colorset │ │ │ │ │ └── Contents.json │ │ │ │ ├── AppIcon.appiconset │ │ │ │ │ └── Contents.json │ │ │ │ └── Contents.json │ │ │ ├── ContentView.swift │ │ │ └── TestProjectWatchOSApp.swift │ │ ├── TestProjectWatchOS Watch AppTests │ │ │ └── TestProjectWatchOS_Watch_AppTests.swift │ │ ├── TestProjectWatchOS Watch AppUITests │ │ │ ├── TestProjectWatchOS_Watch_AppUITests.swift │ │ │ └── TestProjectWatchOS_Watch_AppUITestsLaunchTests.swift │ │ ├── TestProjectWatchOS.xcodeproj │ │ │ ├── project.pbxproj │ │ │ └── project.xcworkspace │ │ │ └── contents.xcworkspacedata │ │ ├── TestProjectWatchOSTests │ │ │ └── TestProjectWatchOSTests.swift │ │ └── TestProjectWatchOSUITests │ │ ├── TestProjectWatchOSUITests.swift │ │ └── TestProjectWatchOSUITestsLaunchTests.swift │ ├── TestProjectXCTest │ │ ├── TestProjectXCTest │ │ │ ├── Assets.xcassets │ │ │ │ ├── AccentColor.colorset │ │ │ │ │ └── Contents.json │ │ │ │ ├── AppIcon.appiconset │ │ │ │ │ └── Contents.json │ │ │ │ └── Contents.json │ │ │ ├── ContentView.swift │ │ │ ├── Item.swift │ │ │ ├── TestProjectXCTest.entitlements │ │ │ └── TestProjectXCTestApp.swift │ │ ├── TestProjectXCTest.xcodeproj │ │ │ ├── project.pbxproj │ │ │ ├── project.xcworkspace │ │ │ │ ├── contents.xcworkspacedata │ │ │ │ └── xcuserdata │ │ │ │ └── stefan.xcuserdatad │ │ │ │ └── UserInterfaceState.xcuserstate │ │ │ └── xcuserdata │ │ │ └── stefan.xcuserdatad │ │ │ └── xcschemes │ │ │ └── xcschememanagement.plist │ │ ├── TestProjectXCTestTests │ │ │ └── TestProjectXCTestTests.swift │ │ └── TestProjectXCTestUITests │ │ ├── TestProjectXCTestUITests.swift │ │ └── TestProjectXCTestUITestsLaunchTests.swift │ ├── TestSwiftPackageSwiftTesting │ │ ├── .gitignore │ │ ├── Package.swift │ │ ├── Sources │ │ │ ├── TestSwiftPackageSwiftTesting │ │ │ │ └── TestSwiftPackageSwiftTesting.swift │ │ │ └── TestSwiftPackageSwiftTestingExecutable │ │ │ └── main.swift │ │ └── Tests │ │ └── TestSwiftPackageSwiftTestingTests │ │ └── TestSwiftPackageSwiftTestingTests.swift │ └── TestSwiftPackageXCTest │ ├── .gitignore │ ├── Package.swift │ ├── Sources │ │ ├── TestSwiftPackageXCTest │ │ │ └── TestSwiftPackageXCTest.swift │ │ └── TestSwiftPackageXCTestExecutable │ │ └── main.swift │ └── Tests │ └── TestSwiftPackageXCTestTests │ └── TestSwiftPackageXCTestTests.swift ├── tsconfig.json └── XcodeProjectModifier ├── Package.resolved ├── Package.swift └── Sources └── XcodeProjectModifier └── main.swift ``` # Files -------------------------------------------------------------------------------- /src/utils/projects/XcodeBuild.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { execAsync } from '../../utils.js'; 2 | import { execSync } from 'child_process'; 3 | import { createModuleLogger } from '../../logger.js'; 4 | import { Platform } from '../../types.js'; 5 | import { PlatformInfo } from '../../features/build/domain/PlatformInfo.js'; 6 | import { existsSync, mkdirSync, rmSync } from 'fs'; 7 | import path from 'path'; 8 | import { config } from '../../config.js'; 9 | import { LogManager } from '../LogManager.js'; 10 | import { parseXcbeautifyOutput, formatParsedOutput } from '../errors/xcbeautify-parser.js'; 11 | 12 | const logger = createModuleLogger('XcodeBuild'); 13 | const logManager = new LogManager(); 14 | 15 | export interface BuildOptions { 16 | scheme?: string; 17 | configuration?: string; 18 | platform?: Platform; 19 | deviceId?: string; 20 | derivedDataPath?: string; 21 | } 22 | 23 | export interface TestOptions { 24 | scheme?: string; 25 | configuration?: string; 26 | platform?: Platform; 27 | deviceId?: string; 28 | testFilter?: string; 29 | testTarget?: string; 30 | } 31 | 32 | // Using unified xcbeautify parser for all error handling 33 | 34 | /** 35 | * Handles xcodebuild commands for Xcode projects 36 | */ 37 | export class XcodeBuild { 38 | /** 39 | * Validates that a scheme supports the requested platform 40 | * @throws Error if the platform is not supported 41 | */ 42 | private async validatePlatformSupport( 43 | projectPath: string, 44 | isWorkspace: boolean, 45 | scheme: string | undefined, 46 | platform: Platform 47 | ): Promise<void> { 48 | const projectFlag = isWorkspace ? '-workspace' : '-project'; 49 | 50 | let command = `xcodebuild -showBuildSettings ${projectFlag} "${projectPath}"`; 51 | 52 | if (scheme) { 53 | command += ` -scheme "${scheme}"`; 54 | } 55 | 56 | // Use a generic destination to check platform support 57 | const platformInfo = PlatformInfo.fromPlatform(platform); 58 | const destination = platformInfo.generateGenericDestination(); 59 | command += ` -destination '${destination}'`; 60 | 61 | try { 62 | logger.debug({ command }, 'Validating platform support'); 63 | // Just check if the command succeeds - we don't need the output 64 | await execAsync(command, { 65 | maxBuffer: 1024 * 1024, // 1MB is enough for validation 66 | timeout: 10000 // 10 second timeout for validation 67 | }); 68 | logger.debug({ platform, scheme }, 'Platform validation succeeded'); 69 | } catch (error: any) { 70 | const stderr = error.stderr || ''; 71 | const stdout = error.stdout || ''; 72 | 73 | // Check if error indicates platform mismatch 74 | if (stderr.includes('Available destinations for') || stdout.includes('Available destinations for')) { 75 | // Extract available platforms from the error message 76 | const availablePlatforms = this.extractAvailablePlatforms(stderr + stdout); 77 | throw new Error( 78 | `Platform '${platform}' is not supported by scheme '${scheme || 'default'}'. ` + 79 | `Available platforms: ${availablePlatforms.join(', ')}` 80 | ); 81 | } 82 | 83 | // Some other error - let it pass through for now 84 | logger.warn({ error: error.message }, 'Platform validation check failed, continuing anyway'); 85 | } 86 | } 87 | 88 | /** 89 | * Extracts available platforms from xcodebuild error output 90 | */ 91 | private extractAvailablePlatforms(output: string): string[] { 92 | const platforms = new Set<string>(); 93 | const lines = output.split('\n'); 94 | 95 | for (const line of lines) { 96 | // Look for lines like "{ platform:watchOS" or "{ platform:iOS Simulator" 97 | const match = line.match(/\{ platform:([^,}]+)/); 98 | if (match) { 99 | let platform = match[1].trim(); 100 | // Normalize platform names 101 | if (platform.includes('Simulator')) { 102 | platform = platform.replace(' Simulator', ''); 103 | } 104 | platforms.add(platform); 105 | } 106 | } 107 | 108 | return Array.from(platforms); 109 | } 110 | /** 111 | * Build an Xcode project or workspace 112 | */ 113 | async build( 114 | projectPath: string, 115 | isWorkspace: boolean, 116 | options: BuildOptions = {} 117 | ): Promise<{ success: boolean; output: string; appPath?: string; logPath?: string; errors?: any[] }> { 118 | const { 119 | scheme, 120 | configuration = 'Debug', 121 | platform = Platform.iOS, 122 | deviceId, 123 | derivedDataPath = './DerivedData' 124 | } = options; 125 | 126 | // Validate platform support first 127 | await this.validatePlatformSupport(projectPath, isWorkspace, scheme, platform); 128 | 129 | const projectFlag = isWorkspace ? '-workspace' : '-project'; 130 | let command = `xcodebuild ${projectFlag} "${projectPath}"`; 131 | 132 | if (scheme) { 133 | command += ` -scheme "${scheme}"`; 134 | } 135 | 136 | command += ` -configuration "${configuration}"`; 137 | 138 | // Determine destination 139 | const platformInfo = PlatformInfo.fromPlatform(platform); 140 | let destination: string; 141 | if (deviceId) { 142 | destination = platformInfo.generateDestination(deviceId); 143 | } else { 144 | destination = platformInfo.generateGenericDestination(); 145 | } 146 | command += ` -destination '${destination}'`; 147 | 148 | command += ` -derivedDataPath "${derivedDataPath}" build`; 149 | 150 | // Pipe through xcbeautify for clean output 151 | command = `set -o pipefail && ${command} 2>&1 | xcbeautify`; 152 | 153 | logger.debug({ command }, 'Build command'); 154 | 155 | let output = ''; 156 | let exitCode = 0; 157 | const projectName = path.basename(projectPath, path.extname(projectPath)); 158 | 159 | try { 160 | const { stdout, stderr } = await execAsync(command, { 161 | maxBuffer: 50 * 1024 * 1024, 162 | shell: '/bin/bash' 163 | }); 164 | 165 | output = stdout + (stderr ? `\n${stderr}` : ''); 166 | 167 | // Try to find the built app using find command (more reliable than parsing output) 168 | let appPath: string | undefined; 169 | try { 170 | const { stdout: findOutput } = await execAsync( 171 | `find "${derivedDataPath}" -name "*.app" -type d | head -1` 172 | ); 173 | appPath = findOutput.trim() || undefined; 174 | 175 | if (appPath) { 176 | logger.info({ appPath }, 'Found app at path'); 177 | 178 | // Verify the app actually exists 179 | if (!existsSync(appPath)) { 180 | logger.error({ appPath }, 'App path does not exist!'); 181 | appPath = undefined; 182 | } 183 | } else { 184 | logger.warn({ derivedDataPath }, 'No app found in DerivedData'); 185 | } 186 | } catch (error: any) { 187 | logger.error({ error: error.message, derivedDataPath }, 'Error finding app path'); 188 | } 189 | 190 | logger.info({ projectPath, scheme, configuration, platform }, 'Build succeeded'); 191 | 192 | // Save the build output to logs 193 | const logPath = logManager.saveLog('build', output, projectName, { 194 | scheme, 195 | configuration, 196 | platform, 197 | exitCode, 198 | command 199 | }); 200 | 201 | return { 202 | success: true, 203 | output, 204 | appPath, 205 | logPath 206 | }; 207 | } catch (error: any) { 208 | logger.error({ error: error.message, projectPath }, 'Build failed'); 209 | 210 | output = (error.stdout || '') + (error.stderr ? `\n${error.stderr}` : ''); 211 | exitCode = error.code || 1; 212 | 213 | // Parse errors using the unified xcbeautify parser 214 | const parsed = parseXcbeautifyOutput(output); 215 | 216 | // Log for debugging 217 | if (parsed.errors.length === 0 && output.toLowerCase().includes('error:')) { 218 | logger.warn({ outputSample: output.substring(0, 500) }, 'Output contains "error:" but no errors were parsed'); 219 | } 220 | 221 | // Save the build output to logs 222 | const logPath = logManager.saveLog('build', output, projectName, { 223 | scheme, 224 | configuration, 225 | platform, 226 | exitCode, 227 | command, 228 | errors: parsed.errors, 229 | warnings: parsed.warnings 230 | }); 231 | 232 | // Save debug data with parsed errors 233 | if (parsed.errors.length > 0) { 234 | logManager.saveDebugData('build-errors', parsed.errors, projectName); 235 | logger.info({ errorCount: parsed.errors.length, warningCount: parsed.warnings.length }, 'Parsed errors'); 236 | } 237 | 238 | // Create error with parsed details 239 | const errorWithDetails = new Error(formatParsedOutput(parsed)) as any; 240 | errorWithDetails.output = output; 241 | errorWithDetails.parsed = parsed; 242 | errorWithDetails.logPath = logPath; 243 | 244 | throw errorWithDetails; 245 | } 246 | } 247 | 248 | /** 249 | * Run tests for an Xcode project 250 | */ 251 | async test( 252 | projectPath: string, 253 | isWorkspace: boolean, 254 | options: TestOptions = {} 255 | ): Promise<{ 256 | success: boolean; 257 | output: string; 258 | passed: number; 259 | failed: number; 260 | failingTests?: Array<{ identifier: string; reason: string }>; 261 | errors?: any[]; 262 | warnings?: any[]; 263 | logPath: string; 264 | }> { 265 | const { 266 | scheme, 267 | configuration = 'Debug', 268 | platform = Platform.iOS, 269 | deviceId, 270 | testFilter, 271 | testTarget 272 | } = options; 273 | 274 | // Create a unique result bundle path in DerivedData 275 | const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); 276 | const derivedDataPath = config.getDerivedDataPath(projectPath); 277 | let resultBundlePath = path.join( 278 | derivedDataPath, 279 | 'Logs', 280 | 'Test', 281 | `Test-${scheme || 'tests'}-${timestamp}.xcresult` 282 | ); 283 | 284 | // Ensure result directory exists 285 | const resultDir = path.dirname(resultBundlePath); 286 | if (!existsSync(resultDir)) { 287 | mkdirSync(resultDir, { recursive: true }); 288 | } 289 | 290 | const projectFlag = isWorkspace ? '-workspace' : '-project'; 291 | let command = `xcodebuild ${projectFlag} "${projectPath}"`; 292 | 293 | if (scheme) { 294 | command += ` -scheme "${scheme}"`; 295 | } 296 | 297 | command += ` -configuration "${configuration}"`; 298 | 299 | // Determine destination 300 | const platformInfo = PlatformInfo.fromPlatform(platform); 301 | let destination: string; 302 | if (deviceId) { 303 | destination = platformInfo.generateDestination(deviceId); 304 | } else { 305 | destination = platformInfo.generateGenericDestination(); 306 | } 307 | command += ` -destination '${destination}'`; 308 | 309 | // Add test target/filter if provided 310 | if (testTarget) { 311 | command += ` -only-testing:${testTarget}`; 312 | } 313 | if (testFilter) { 314 | command += ` -only-testing:${testFilter}`; 315 | } 316 | 317 | // Disable parallel testing to avoid timeouts and multiple simulator instances 318 | command += ' -parallel-testing-enabled NO'; 319 | 320 | // Add result bundle path 321 | command += ` -resultBundlePath "${resultBundlePath}"`; 322 | 323 | command += ' test'; 324 | 325 | // Pipe through xcbeautify for clean output 326 | command = `set -o pipefail && ${command} 2>&1 | xcbeautify`; 327 | 328 | logger.debug({ command }, 'Test command'); 329 | 330 | // Use execAsync instead of spawn to ensure the xcresult is fully written when we get the result 331 | let output = ''; 332 | let code = 0; 333 | 334 | try { 335 | logger.info('Running tests...'); 336 | const { stdout, stderr } = await execAsync(command, { 337 | maxBuffer: 50 * 1024 * 1024, // 50MB buffer for large test outputs 338 | timeout: 1800000, // 10 minute timeout for tests 339 | shell: '/bin/bash' 340 | }); 341 | 342 | output = stdout + (stderr ? '\n' + stderr : ''); 343 | } catch (error: any) { 344 | // Test failure is expected, capture the output 345 | output = (error.stdout || '') + (error.stderr ? '\n' + error.stderr : ''); 346 | code = error.code || 1; 347 | logger.debug({ code }, 'Tests completed with failures'); 348 | } 349 | 350 | // Parse compile errors and warnings using the central parser 351 | const parsed = parseXcbeautifyOutput(output); 352 | 353 | // Save the full test output to logs 354 | const projectName = path.basename(projectPath, path.extname(projectPath)); 355 | const logPath = logManager.saveLog('test', output, projectName, { 356 | scheme, 357 | configuration, 358 | platform, 359 | exitCode: code, 360 | command, 361 | errors: parsed.errors.length > 0 ? parsed.errors : undefined, 362 | warnings: parsed.warnings.length > 0 ? parsed.warnings : undefined 363 | }); 364 | logger.debug({ logPath }, 'Test output saved to log file'); 365 | 366 | // Parse the xcresult bundle for accurate test results 367 | let testResult = { 368 | passed: 0, 369 | failed: 0, 370 | success: false, 371 | failingTests: undefined as Array<{ identifier: string; reason: string }> | undefined, 372 | logPath 373 | }; 374 | 375 | // Try to extract the actual xcresult path from the output 376 | const resultMatch = output.match(/Test session results.*?\n\s*(.+\.xcresult)/); 377 | if (resultMatch) { 378 | resultBundlePath = resultMatch[1].trim(); 379 | logger.debug({ resultBundlePath }, 'Found xcresult path in output'); 380 | } 381 | 382 | // Also check for the "Writing result bundle at path" message 383 | const writingMatch = output.match(/Writing result bundle at path:\s*(.+\.xcresult)/); 384 | if (!resultMatch && writingMatch) { 385 | resultBundlePath = writingMatch[1].trim(); 386 | logger.debug({ resultBundlePath }, 'Found xcresult path from Writing message'); 387 | } 388 | 389 | try { 390 | // Check if xcresult exists and wait for it to be fully written 391 | // Wait for the xcresult bundle to be created and fully written (up to 10 seconds) 392 | let waitTime = 0; 393 | const maxWaitTime = 10000; 394 | const checkInterval = 200; 395 | 396 | // Check both that the directory exists and has the Info.plist file 397 | const isXcresultReady = () => { 398 | if (!existsSync(resultBundlePath)) { 399 | return false; 400 | } 401 | // Check if Info.plist exists inside the bundle, which indicates it's fully written 402 | const infoPlistPath = path.join(resultBundlePath, 'Info.plist'); 403 | return existsSync(infoPlistPath); 404 | }; 405 | 406 | while (!isXcresultReady() && waitTime < maxWaitTime) { 407 | await new Promise(resolve => setTimeout(resolve, checkInterval)); 408 | waitTime += checkInterval; 409 | } 410 | 411 | if (!isXcresultReady()) { 412 | logger.warn({ resultBundlePath, waitTime }, 'xcresult bundle not ready after waiting, using fallback parsing'); 413 | throw new Error('xcresult bundle not ready'); 414 | } 415 | 416 | // Give xcresulttool a moment to prepare for reading 417 | await new Promise(resolve => setTimeout(resolve, 300)); 418 | 419 | logger.debug({ resultBundlePath, waitTime }, 'xcresult bundle is ready'); 420 | 421 | let testReportJson; 422 | let totalPassed = 0; 423 | let totalFailed = 0; 424 | const failingTests: Array<{ identifier: string; reason: string }> = []; 425 | 426 | try { 427 | // Try the new format first (Xcode 16+) 428 | logger.debug({ resultBundlePath }, 'Attempting to parse xcresult with new format'); 429 | testReportJson = execSync( 430 | `xcrun xcresulttool get test-results summary --path "${resultBundlePath}"`, 431 | { encoding: 'utf8', maxBuffer: 50 * 1024 * 1024 } 432 | ); 433 | 434 | const summary = JSON.parse(testReportJson); 435 | logger.debug({ summary: { passedTests: summary.passedTests, failedTests: summary.failedTests } }, 'Got summary from xcresulttool'); 436 | 437 | // The summary counts are not reliable for mixed XCTest/Swift Testing 438 | // We'll count from the detailed test nodes instead 439 | 440 | // Always get the detailed tests to count accurately 441 | try { 442 | const testsJson = execSync( 443 | `xcrun xcresulttool get test-results tests --path "${resultBundlePath}"`, 444 | { encoding: 'utf8', maxBuffer: 50 * 1024 * 1024 } 445 | ); 446 | const testsData = JSON.parse(testsJson); 447 | 448 | // Helper function to count tests and extract failing tests with reasons 449 | const processTestNodes = (node: any, parentName: string = ''): void => { 450 | if (!node) return; 451 | 452 | // Count test cases (including argument variations) 453 | if (node.nodeType === 'Test Case') { 454 | // Check if this test has argument variations 455 | let hasArguments = false; 456 | if (node.children && Array.isArray(node.children)) { 457 | for (const child of node.children) { 458 | if (child.nodeType === 'Arguments') { 459 | hasArguments = true; 460 | // Each argument variation is a separate test 461 | if (child.result === 'Passed') { 462 | totalPassed++; 463 | } else if (child.result === 'Failed') { 464 | totalFailed++; 465 | } 466 | } 467 | } 468 | } 469 | 470 | // If no arguments, count the test case itself 471 | if (!hasArguments) { 472 | if (node.result === 'Passed') { 473 | totalPassed++; 474 | } else if (node.result === 'Failed') { 475 | totalFailed++; 476 | 477 | // Extract failure information 478 | let testName = node.nodeIdentifier || node.name || parentName; 479 | let failureReason = ''; 480 | 481 | // Look for failure message in children 482 | if (node.children && Array.isArray(node.children)) { 483 | for (const child of node.children) { 484 | if (child.nodeType === 'Failure Message') { 485 | failureReason = child.details || child.name || 'Test failed'; 486 | break; 487 | } 488 | } 489 | } 490 | 491 | // Add test as an object with identifier and reason 492 | failingTests.push({ 493 | identifier: testName, 494 | reason: failureReason || 'Test failed (no details available)' 495 | }); 496 | } 497 | } 498 | } 499 | 500 | // Recurse through children 501 | if (node.children && Array.isArray(node.children)) { 502 | for (const child of node.children) { 503 | processTestNodes(child, node.name || parentName); 504 | } 505 | } 506 | }; 507 | 508 | // Parse the test nodes to count tests and extract failing test names with reasons 509 | if (testsData.testNodes && Array.isArray(testsData.testNodes)) { 510 | for (const testNode of testsData.testNodes) { 511 | processTestNodes(testNode); 512 | } 513 | } 514 | } catch (detailsError: any) { 515 | logger.debug({ error: detailsError.message }, 'Could not extract failing test details'); 516 | } 517 | 518 | } catch (newFormatError: any) { 519 | // Fall back to legacy format 520 | logger.debug('Falling back to legacy xcresulttool format'); 521 | testReportJson = execSync( 522 | `xcrun xcresulttool get test-report --legacy --format json --path "${resultBundlePath}"`, 523 | { encoding: 'utf8', maxBuffer: 50 * 1024 * 1024 } 524 | ); 525 | 526 | const testReport = JSON.parse(testReportJson); 527 | 528 | // Parse the legacy test report structure 529 | if (testReport.tests) { 530 | const countTests = (tests: any[]): void => { 531 | for (const test of tests) { 532 | if (test.subtests) { 533 | // This is a test suite, recurse into it 534 | countTests(test.subtests); 535 | } else if (test.testStatus) { 536 | // This is an actual test 537 | if (test.testStatus === 'Success') { 538 | totalPassed++; 539 | } else if (test.testStatus === 'Failure' || test.testStatus === 'Expected Failure') { 540 | totalFailed++; 541 | // Extract test name and failure details 542 | if (test.identifier) { 543 | const failureReason = test.failureMessage || test.message || 'Test failed (no details available)'; 544 | failingTests.push({ 545 | identifier: test.identifier, 546 | reason: failureReason 547 | }); 548 | } 549 | } 550 | } 551 | } 552 | }; 553 | 554 | countTests(testReport.tests); 555 | } 556 | } 557 | 558 | testResult = { 559 | passed: totalPassed, 560 | failed: totalFailed, 561 | success: totalFailed === 0 && code === 0, 562 | failingTests: failingTests.length > 0 ? failingTests : undefined, 563 | logPath 564 | }; 565 | 566 | // Save debug data for successful parsing 567 | logManager.saveDebugData('test-xcresult-parsed', { 568 | passed: totalPassed, 569 | failed: totalFailed, 570 | failingTests, 571 | resultBundlePath 572 | }, projectName); 573 | 574 | logger.info({ 575 | projectPath, 576 | ...testResult, 577 | exitCode: code, 578 | resultBundlePath 579 | }, 'Tests completed (parsed from xcresult)'); 580 | 581 | } catch (parseError: any) { 582 | logger.error({ 583 | error: parseError.message, 584 | resultBundlePath, 585 | xcresultExists: existsSync(resultBundlePath) 586 | }, 'Failed to parse xcresult bundle'); 587 | 588 | // Save debug info about the failure 589 | logManager.saveDebugData('test-xcresult-parse-error', { 590 | error: parseError.message, 591 | resultBundlePath, 592 | exists: existsSync(resultBundlePath) 593 | }, projectName); 594 | 595 | // If xcresulttool fails, try to parse counts from the text output 596 | const passedMatch = output.match(/Executed (\d+) tests?, with (\d+) failures?/); 597 | if (passedMatch) { 598 | const totalTests = parseInt(passedMatch[1], 10); 599 | const failures = parseInt(passedMatch[2], 10); 600 | testResult = { 601 | passed: totalTests - failures, 602 | failed: failures, 603 | success: failures === 0, 604 | failingTests: undefined, 605 | logPath 606 | }; 607 | } else { 608 | // Last resort fallback 609 | testResult = { 610 | passed: 0, 611 | failed: code === 0 ? 0 : 1, 612 | success: code === 0, 613 | failingTests: undefined, 614 | logPath 615 | }; 616 | } 617 | } 618 | 619 | // Parse build errors from output 620 | // Errors are already parsed by xcbeautify parser 621 | 622 | const result = { 623 | ...testResult, 624 | success: code === 0 && testResult.failed === 0, 625 | output, 626 | errors: parsed.errors.length > 0 ? parsed.errors : undefined, 627 | warnings: parsed.warnings.length > 0 ? parsed.warnings : undefined 628 | }; 629 | 630 | // Clean up the result bundle if tests passed (keep failed results for debugging) 631 | if (result.success) { 632 | try { 633 | rmSync(resultBundlePath, { recursive: true, force: true }); 634 | } catch { 635 | // Ignore cleanup errors 636 | } 637 | } 638 | 639 | return result; 640 | } 641 | 642 | /** 643 | * Clean build artifacts 644 | */ 645 | async clean( 646 | projectPath: string, 647 | isWorkspace: boolean, 648 | options: { scheme?: string; configuration?: string } = {} 649 | ): Promise<void> { 650 | const { scheme, configuration = 'Debug' } = options; 651 | 652 | const projectFlag = isWorkspace ? '-workspace' : '-project'; 653 | let command = `xcodebuild ${projectFlag} "${projectPath}"`; 654 | 655 | if (scheme) { 656 | command += ` -scheme "${scheme}"`; 657 | } 658 | 659 | command += ` -configuration "${configuration}" clean`; 660 | 661 | logger.debug({ command }, 'Clean command'); 662 | 663 | try { 664 | await execAsync(command); 665 | logger.info({ projectPath }, 'Clean succeeded'); 666 | } catch (error: any) { 667 | logger.error({ error: error.message, projectPath }, 'Clean failed'); 668 | throw new Error(`Clean failed: ${error.message}`); 669 | } 670 | } 671 | } ``` -------------------------------------------------------------------------------- /src/utils/projects/SwiftBuild.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { execAsync } from '../../utils.js'; 2 | import { createModuleLogger } from '../../logger.js'; 3 | import path from 'path'; 4 | import { existsSync, readFileSync, unlinkSync } from 'fs'; 5 | import { tmpdir } from 'os'; 6 | import { XMLParser } from 'fast-xml-parser'; 7 | import { LogManager } from '../LogManager.js'; 8 | import { parseXcbeautifyOutput, Issue } from '../errors/xcbeautify-parser.js'; 9 | 10 | const logger = createModuleLogger('SwiftBuild'); 11 | const logManager = new LogManager(); 12 | 13 | export interface SwiftBuildOptions { 14 | configuration?: 'Debug' | 'Release'; 15 | product?: string; 16 | target?: string; 17 | } 18 | 19 | export interface SwiftRunOptions { 20 | executable?: string; 21 | arguments?: string[]; 22 | configuration?: 'Debug' | 'Release'; 23 | } 24 | 25 | export interface SwiftTestOptions { 26 | filter?: string; 27 | configuration?: 'Debug' | 'Release'; 28 | } 29 | 30 | /** 31 | * Handles Swift package commands (build, run, test) 32 | */ 33 | export class SwiftBuild { 34 | /** 35 | * Parse compile errors from Swift compiler output 36 | * @unused - Kept for potential future use 37 | */ 38 | private parseCompileErrors(output: string): Issue[] { 39 | const errors: Issue[] = []; 40 | const lines = output.split('\n'); 41 | 42 | // Swift compiler error format: 43 | // /path/to/file.swift:10:15: error: message here 44 | // /path/to/file.swift:20:8: warning: message here 45 | const errorRegex = /^(.+):(\d+):(\d+):\s+(error|warning):\s+(.+)$/; 46 | 47 | // Track unique errors (same as XcodeBuild to avoid duplicates) 48 | const seenErrors = new Set<string>(); 49 | 50 | for (const line of lines) { 51 | const match = line.match(errorRegex); 52 | if (match) { 53 | const [, file, lineNum, column, type, message] = match; 54 | 55 | // Create unique key to avoid duplicates 56 | const errorKey = `${file}:${lineNum}:${column}:${message}`; 57 | 58 | if (!seenErrors.has(errorKey)) { 59 | seenErrors.add(errorKey); 60 | errors.push({ 61 | file, 62 | line: parseInt(lineNum, 10), 63 | column: parseInt(column, 10), 64 | message, 65 | type: type as 'error' | 'warning', 66 | rawLine: line 67 | }); 68 | } 69 | } 70 | } 71 | 72 | return errors; 73 | } 74 | /** 75 | * Build a Swift package 76 | */ 77 | async build( 78 | packagePath: string, 79 | options: SwiftBuildOptions = {} 80 | ): Promise<{ success: boolean; output: string; logPath?: string; errors?: Issue[]; warnings?: Issue[] }> { 81 | const { configuration = 'Debug', product, target } = options; 82 | 83 | // Convert to lowercase for swift command 84 | const configFlag = configuration.toLowerCase(); 85 | let command = `swift build --package-path "${packagePath}" -c ${configFlag}`; 86 | 87 | if (product) { 88 | command += ` --product "${product}"`; 89 | } 90 | 91 | if (target) { 92 | command += ` --target "${target}"`; 93 | } 94 | 95 | logger.debug({ command }, 'Build command'); 96 | 97 | try { 98 | const { stdout, stderr } = await execAsync(command, { 99 | maxBuffer: 10 * 1024 * 1024 100 | }); 101 | 102 | const output = stdout + (stderr ? `\n${stderr}` : ''); 103 | 104 | // Save log 105 | const packageName = path.basename(packagePath); 106 | const logPath = logManager.saveLog('build', output, packageName, { 107 | configuration, 108 | product, 109 | target 110 | }); 111 | 112 | logger.info({ packagePath, configuration, logPath }, 'Build succeeded'); 113 | 114 | return { 115 | success: true, 116 | output, 117 | logPath 118 | }; 119 | } catch (error: any) { 120 | logger.error({ error: error.message, packagePath }, 'Build failed'); 121 | 122 | // Get full output 123 | const output = (error.stdout || '') + (error.stderr ? `\n${error.stderr}` : ''); 124 | 125 | // Save log 126 | const packageName = path.basename(packagePath); 127 | const logPath = logManager.saveLog('build', output, packageName, { 128 | configuration, 129 | product, 130 | target, 131 | exitCode: error.code || 1 132 | }); 133 | 134 | // Parse errors using unified xcbeautify parser 135 | const parsed = parseXcbeautifyOutput(output); 136 | const compileErrors = parsed.errors; 137 | const buildErrors: Issue[] = []; 138 | 139 | // Throw error with output for handler to parse 140 | const buildError: any = new Error(output); 141 | buildError.compileErrors = compileErrors; 142 | buildError.buildErrors = buildErrors; 143 | buildError.logPath = logPath; 144 | buildError.output = output; 145 | throw buildError; 146 | } 147 | } 148 | 149 | /** 150 | * Run a Swift package executable 151 | */ 152 | async run( 153 | packagePath: string, 154 | options: SwiftRunOptions = {} 155 | ): Promise<{ success: boolean; output: string; logPath?: string; errors?: Issue[]; warnings?: Issue[] }> { 156 | const { executable, arguments: args = [], configuration = 'Debug' } = options; 157 | 158 | // Convert to lowercase for swift command 159 | const configFlag = configuration.toLowerCase(); 160 | let command = `swift run --package-path "${packagePath}" -c ${configFlag}`; 161 | 162 | if (executable) { 163 | command += ` "${executable}"`; 164 | } 165 | 166 | if (args.length > 0) { 167 | command += ` ${args.map(arg => `"${arg}"`).join(' ')}`; 168 | } 169 | 170 | logger.debug({ command }, 'Run command'); 171 | 172 | try { 173 | const { stdout, stderr } = await execAsync(command, { 174 | maxBuffer: 10 * 1024 * 1024 175 | }); 176 | 177 | const output = stdout + (stderr ? `\n${stderr}` : ''); 178 | 179 | // Save log 180 | const packageName = path.basename(packagePath); 181 | const logPath = logManager.saveLog('run', output, packageName, { 182 | configuration, 183 | executable, 184 | arguments: args 185 | }); 186 | 187 | logger.info({ packagePath, executable, logPath }, 'Run succeeded'); 188 | 189 | return { 190 | success: true, 191 | output, 192 | logPath 193 | }; 194 | } catch (error: any) { 195 | logger.error({ error: error.message, packagePath }, 'Run failed'); 196 | 197 | // Get full output - for swift run, build output is in stderr, executable output is in stdout 198 | // We want to show them in chronological order: build first, then executable 199 | const output = (error.stderr || '') + (error.stdout ? `\n${error.stdout}` : ''); 200 | 201 | // Save log 202 | const packageName = path.basename(packagePath); 203 | const logPath = logManager.saveLog('run', output, packageName, { 204 | configuration, 205 | executable, 206 | arguments: args, 207 | exitCode: error.code || 1 208 | }); 209 | 210 | // Parse errors using unified xcbeautify parser 211 | const parsed = parseXcbeautifyOutput(output); 212 | const compileErrors = parsed.errors; 213 | const buildErrors: Issue[] = []; 214 | 215 | // Throw error with output for handler to parse 216 | const runError: any = new Error(output); 217 | runError.compileErrors = compileErrors; 218 | runError.buildErrors = buildErrors; 219 | runError.logPath = logPath; 220 | runError.output = output; 221 | throw runError; 222 | } 223 | } 224 | 225 | /** 226 | * Test a Swift package 227 | */ 228 | async test( 229 | packagePath: string, 230 | options: SwiftTestOptions = {} 231 | ): Promise<{ 232 | success: boolean; 233 | output: string; 234 | passed: number; 235 | failed: number; 236 | failingTests?: Array<{ identifier: string; reason: string }>; 237 | errors?: Issue[]; 238 | warnings?: Issue[]; 239 | logPath: string; 240 | }> { 241 | const { filter, configuration = 'Debug' } = options; 242 | 243 | 244 | // Convert to lowercase for swift command 245 | const configFlag = configuration.toLowerCase(); 246 | 247 | // Generate unique xunit output file in temp directory 248 | const xunitPath = path.join(tmpdir(), `test-${Date.now()}-${Math.random().toString(36).substring(7)}.xml`); 249 | const swiftTestingXunitPath = xunitPath.replace('.xml', '-swift-testing.xml'); 250 | 251 | let command = `swift test --package-path "${packagePath}" -c ${configFlag}`; 252 | 253 | if (filter) { 254 | command += ` --filter "${filter}"`; 255 | } 256 | 257 | // Add parallel and xunit output for better results 258 | command += ` --parallel --xunit-output "${xunitPath}"`; 259 | 260 | logger.debug({ command, xunitPath, swiftTestingXunitPath }, 'Test command'); 261 | 262 | // Extract package name for logging 263 | const packageName = path.basename(packagePath); 264 | 265 | let testResult = { passed: 0, failed: 0, success: false, failingTests: undefined as Array<{ identifier: string; reason: string }> | undefined }; 266 | let output = ''; 267 | let exitCode = 0; 268 | 269 | try { 270 | const { stdout, stderr } = await execAsync(command, { 271 | maxBuffer: 10 * 1024 * 1024 272 | }); 273 | 274 | output = stdout + (stderr ? `\n${stderr}` : ''); 275 | 276 | // Parse XUnit files for test results 277 | const xunitResults = this.parseXunitFiles(xunitPath, swiftTestingXunitPath, output); 278 | 279 | // Use XUnit results if available 280 | if (xunitResults) { 281 | testResult = { ...testResult, ...xunitResults }; 282 | } else { 283 | // Fallback to console parsing if XUnit fails 284 | const parsedResults = this.parseTestOutput(output); 285 | testResult = { ...testResult, ...parsedResults }; 286 | } 287 | 288 | testResult.success = exitCode === 0 && testResult.failed === 0; 289 | 290 | // Clean up XUnit files 291 | this.cleanupXunitFiles(xunitPath, swiftTestingXunitPath); 292 | 293 | logger.info({ 294 | packagePath, 295 | passed: testResult.passed, 296 | failed: testResult.failed, 297 | failingTests: testResult.failingTests, 298 | source: xunitResults ? 'xunit' : 'console' 299 | }, 'Tests completed'); 300 | 301 | // Save the test output to logs 302 | const logPath = logManager.saveLog('test', output, packageName, { 303 | configuration, 304 | filter, 305 | exitCode, 306 | command, 307 | testResults: testResult 308 | }); 309 | 310 | return { 311 | ...testResult, 312 | output, 313 | logPath 314 | }; 315 | } catch (error: any) { 316 | logger.error({ error: error.message, packagePath }, 'Tests failed'); 317 | 318 | // Extract output from error 319 | output = (error.stdout || '') + (error.stderr ? `\n${error.stderr}` : ''); 320 | exitCode = error.code || 1; 321 | 322 | // Parse XUnit files for test results 323 | const xunitResults = this.parseXunitFiles(xunitPath, swiftTestingXunitPath, output); 324 | 325 | // Use XUnit results if available 326 | if (xunitResults) { 327 | testResult = { ...testResult, ...xunitResults }; 328 | } else { 329 | // Fallback to console parsing if XUnit fails 330 | const parsedResults = this.parseTestOutput(output); 331 | testResult = { ...testResult, ...parsedResults }; 332 | } 333 | 334 | // Clean up XUnit files 335 | this.cleanupXunitFiles(xunitPath, swiftTestingXunitPath); 336 | 337 | // Parse errors using unified xcbeautify parser 338 | const parsed = parseXcbeautifyOutput(output); 339 | 340 | // Save the test output to logs 341 | const logPath = logManager.saveLog('test', output, packageName, { 342 | configuration, 343 | filter, 344 | exitCode, 345 | command, 346 | testResults: testResult, 347 | errors: parsed.errors.length > 0 ? parsed.errors : undefined, 348 | warnings: parsed.warnings.length > 0 ? parsed.warnings : undefined 349 | }); 350 | 351 | return { 352 | ...testResult, 353 | success: false, 354 | output, 355 | errors: parsed.errors.length > 0 ? parsed.errors : undefined, 356 | warnings: parsed.warnings.length > 0 ? parsed.warnings : undefined, 357 | logPath 358 | }; 359 | } 360 | } 361 | 362 | /** 363 | * Parse test output from console 364 | */ 365 | private parseTestOutput(output: string): { passed?: number; failed?: number; failingTests?: Array<{ identifier: string; reason: string }> } { 366 | const result: { passed?: number; failed?: number; failingTests?: Array<{ identifier: string; reason: string }> } = {}; 367 | 368 | // Parse test counts 369 | const counts = this.parseTestCounts(output); 370 | if (counts) { 371 | result.passed = counts.passed; 372 | result.failed = counts.failed; 373 | } 374 | 375 | // Parse failing tests 376 | const failingTests = this.parseFailingTests(output); 377 | if (failingTests.length > 0) { 378 | result.failingTests = failingTests; 379 | } 380 | 381 | return result; 382 | } 383 | 384 | /** 385 | * Parse test counts from output 386 | */ 387 | private parseTestCounts(output: string): { passed: number; failed: number } | null { 388 | // XCTest format: "Executed 1 test, with 1 failure" 389 | // Look for the last occurrence to get the summary 390 | const xcTestMatches = [...output.matchAll(/Executed (\d+) test(?:s)?, with (\d+) failure/g)]; 391 | if (xcTestMatches.length > 0) { 392 | const lastMatch = xcTestMatches[xcTestMatches.length - 1]; 393 | const totalTests = parseInt(lastMatch[1], 10); 394 | const failures = parseInt(lastMatch[2], 10); 395 | 396 | // If we found XCTest results with actual tests, use them 397 | if (totalTests > 0) { 398 | return { 399 | passed: totalTests - failures, 400 | failed: failures 401 | }; 402 | } 403 | } 404 | 405 | // Swift Testing format: "✘ Test run with 1 test failed after..." or "✔ Test run with X tests passed after..." 406 | const swiftTestingMatch = output.match(/[✘✔] Test run with (\d+) test(?:s)? (passed|failed)/); 407 | if (swiftTestingMatch) { 408 | const testCount = parseInt(swiftTestingMatch[1], 10); 409 | const status = swiftTestingMatch[2]; 410 | 411 | // Only use Swift Testing results if we have actual tests 412 | if (testCount > 0) { 413 | if (status === 'failed') { 414 | return { passed: 0, failed: testCount }; 415 | } else { 416 | return { passed: testCount, failed: 0 }; 417 | } 418 | } 419 | } 420 | 421 | return null; 422 | } 423 | 424 | /** 425 | * Parse failing test details from output 426 | */ 427 | private parseFailingTests(output: string): Array<{ identifier: string; reason: string }> { 428 | const failingTests: Array<{ identifier: string; reason: string }> = []; 429 | 430 | // Parse XCTest failures 431 | const xcTestFailures = this.parseXCTestFailures(output); 432 | failingTests.push(...xcTestFailures); 433 | 434 | // Parse Swift Testing failures 435 | const swiftTestingFailures = this.parseSwiftTestingFailures(output); 436 | 437 | // Add Swift Testing failures, avoiding duplicates 438 | for (const failure of swiftTestingFailures) { 439 | if (!failingTests.some(t => t.identifier === failure.identifier)) { 440 | failingTests.push(failure); 441 | } 442 | } 443 | 444 | logger.debug({ failingTestsCount: failingTests.length, failingTests }, 'Parsed failing tests from console output'); 445 | return failingTests; 446 | } 447 | 448 | /** 449 | * Parse XCTest failure details 450 | */ 451 | private parseXCTestFailures(output: string): Array<{ identifier: string; reason: string }> { 452 | const failures: Array<{ identifier: string; reason: string }> = []; 453 | const pattern = /Test Case '-\[(\S+)\s+(\w+)\]' failed/g; 454 | let match; 455 | 456 | while ((match = pattern.exec(output)) !== null) { 457 | const className = match[1]; 458 | const methodName = match[2]; 459 | const identifier = `${className}.${methodName}`; 460 | const reason = this.extractXCTestFailureReason(output, className, methodName); 461 | 462 | failures.push({ identifier, reason }); 463 | } 464 | 465 | return failures; 466 | } 467 | 468 | /** 469 | * Extract failure reason for a specific XCTest 470 | */ 471 | private extractXCTestFailureReason(output: string, className: string, testName: string): string { 472 | const lines = output.split('\n'); 473 | 474 | // Try both formats: full class name and just test name 475 | const patterns = [ 476 | `Test Case '-[${className} ${testName}]' failed`, 477 | `Test Case '-[${className.split('.').pop()} ${testName}]' failed` 478 | ]; 479 | 480 | for (const pattern of patterns) { 481 | for (let i = 0; i < lines.length; i++) { 482 | if (lines[i].includes(pattern)) { 483 | // Check the previous line for error details 484 | if (i > 0) { 485 | const prevLine = lines[i-1]; 486 | 487 | // XCTFail format: "error: ... : failed - <message>" 488 | if (prevLine.includes('failed -')) { 489 | const failedMatch = prevLine.match(/failed\s*-\s*(.+)$/); 490 | if (failedMatch) { 491 | return failedMatch[1].trim(); 492 | } 493 | } 494 | 495 | // XCTAssert format: may have the full error with escaped quotes 496 | if (prevLine.includes('error:')) { 497 | // Try to extract custom message after the last dash 498 | const customMessageMatch = prevLine.match(/\s-\s([^-]+)$/); 499 | if (customMessageMatch) { 500 | return customMessageMatch[1].trim(); 501 | } 502 | 503 | // Try to extract the assertion type 504 | if (prevLine.includes('XCTAssertEqual failed')) { 505 | // Clean up the XCTAssertEqual format 506 | const assertMatch = prevLine.match(/XCTAssertEqual failed:.*?-\s*(.+)$/); 507 | if (assertMatch) { 508 | return assertMatch[1].trim(); 509 | } 510 | // If no custom message, return a generic one 511 | return 'Values are not equal'; 512 | } 513 | 514 | // Generic error format: extract everything after "error: ... :" 515 | const errorMatch = prevLine.match(/error:\s*[^:]+:\s*(.+)$/); 516 | if (errorMatch) { 517 | let reason = errorMatch[1].trim(); 518 | // Clean up escaped quotes and format 519 | reason = reason.replace(/\\"/g, '"'); 520 | // Remove the redundant class/method prefix if present 521 | reason = reason.replace(new RegExp(`^-?\\[${className.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}[^\\]]*\\]\\s*:\\s*`, 'i'), ''); 522 | return reason.trim(); 523 | } 524 | } 525 | } 526 | break; 527 | } 528 | } 529 | } 530 | 531 | return 'Test failed'; 532 | } 533 | 534 | /** 535 | * Parse Swift Testing failure details 536 | */ 537 | private parseSwiftTestingFailures(output: string): Array<{ identifier: string; reason: string }> { 538 | const failures: Array<{ identifier: string; reason: string }> = []; 539 | const pattern = /✘ Test (\w+)\(\) (?:failed|recorded an issue)/g; 540 | let match; 541 | 542 | // Try to find the suite name from the output 543 | let suiteName: string | null = null; 544 | const suiteMatch = output.match(/◇ Suite (\w+) started\./); 545 | if (suiteMatch) { 546 | suiteName = suiteMatch[1]; 547 | } 548 | 549 | while ((match = pattern.exec(output)) !== null) { 550 | const testName = match[1]; 551 | 552 | // Build identifier with module.suite.test format to match XCTest 553 | let identifier = testName; 554 | const issuePattern = new RegExp(`✘ Test ${testName}\\(\\) recorded an issue at (\\w+)\\.swift`, 'm'); 555 | const issueMatch = output.match(issuePattern); 556 | if (issueMatch) { 557 | const fileName = issueMatch[1]; 558 | // If we have a suite name, use module.suite.test format 559 | // Otherwise fall back to module.test 560 | if (suiteName) { 561 | identifier = `${fileName}.${suiteName}.${testName}`; 562 | } else { 563 | identifier = `${fileName}.${testName}`; 564 | } 565 | } 566 | 567 | const reason = this.extractSwiftTestingFailureReason(output, testName); 568 | 569 | failures.push({ identifier, reason }); 570 | } 571 | 572 | return failures; 573 | } 574 | 575 | /** 576 | * Extract failure reason for a specific Swift test 577 | */ 578 | private extractSwiftTestingFailureReason(output: string, testName: string): string { 579 | const lines = output.split('\n'); 580 | 581 | for (let i = 0; i < lines.length; i++) { 582 | const line = lines[i]; 583 | 584 | if (line.includes(`✘ Test ${testName}() recorded an issue`)) { 585 | // Extract the expectation failure message from the same line 586 | // Format: "✘ Test testFailingTest() recorded an issue at TestSwiftPackageSwiftTestingTests.swift:12:5: Expectation failed: 1 == 2" 587 | const issueMatch = line.match(/recorded an issue at .*?:\d+:\d+:\s*(.+)$/); 588 | if (issueMatch) { 589 | let reason = issueMatch[1]; 590 | 591 | // Check if there's a message on the following lines (marked with ↳) 592 | // Collect all lines between ↳ and the next ✘ marker 593 | const messageLines: string[] = []; 594 | let inMessage = false; 595 | 596 | for (let j = i + 1; j < lines.length && j < i + 20; j++) { 597 | const nextLine = lines[j]; 598 | 599 | // Stop when we hit the next test marker 600 | if (nextLine.includes('✘')) { 601 | break; 602 | } 603 | 604 | // Start capturing after we see ↳ (but skip comment lines) 605 | if (nextLine.includes('↳')) { 606 | if (!nextLine.includes('//')) { 607 | const messageMatch = nextLine.match(/↳\s*(.+)$/); 608 | if (messageMatch) { 609 | messageLines.push(messageMatch[1].trim()); 610 | inMessage = true; 611 | } 612 | } 613 | } else if (inMessage && nextLine.trim()) { 614 | // Capture continuation lines (indented lines without ↳) 615 | messageLines.push(nextLine.trim()); 616 | } 617 | } 618 | 619 | // If we found message lines, append them to the reason 620 | if (messageLines.length > 0) { 621 | reason = `${reason} - ${messageLines.join(' ')}`; 622 | } 623 | 624 | return reason; 625 | } 626 | // Fallback to simpler pattern 627 | const simpleMatch = line.match(/recorded an issue.*?:\s*(.+)$/); 628 | if (simpleMatch) { 629 | return simpleMatch[1]; 630 | } 631 | break; 632 | } else if (line.includes(`✘ Test ${testName}() failed`)) { 633 | // Check if there was an issue line before this 634 | if (i > 0 && lines[i-1].includes('recorded an issue')) { 635 | const issueMatch = lines[i-1].match(/recorded an issue.*?:\d+:\d+:\s*(.+)$/); 636 | if (issueMatch) { 637 | return issueMatch[1]; 638 | } 639 | } 640 | break; 641 | } 642 | } 643 | 644 | return 'Test failed'; 645 | } 646 | 647 | /** 648 | * Parse XUnit files from both XCTest and Swift Testing 649 | */ 650 | private parseXunitFiles(xunitPath: string, swiftTestingPath: string, consoleOutput: string): { 651 | passed: number; 652 | failed: number; 653 | failingTests?: Array<{ identifier: string; reason: string }>; 654 | } | null { 655 | try { 656 | const parser = new XMLParser({ 657 | ignoreAttributes: false, 658 | attributeNamePrefix: '@_' 659 | }); 660 | 661 | let totalPassed = 0; 662 | let totalFailed = 0; 663 | const allFailingTests: Array<{ identifier: string; reason: string }> = []; 664 | 665 | // Parse XCTest XUnit file 666 | if (existsSync(xunitPath)) { 667 | const xcTestXml = readFileSync(xunitPath, 'utf8'); 668 | const xcTestResult = parser.parse(xcTestXml); 669 | const xcTestSuite = xcTestResult.testsuites?.testsuite; 670 | 671 | if (xcTestSuite && xcTestSuite['@_tests']) { 672 | const totalTests = parseInt(xcTestSuite['@_tests'], 10); 673 | const failures = parseInt(xcTestSuite['@_failures'] || '0', 10); 674 | 675 | if (totalTests > 0) { 676 | totalPassed += totalTests - failures; 677 | totalFailed += failures; 678 | 679 | // Extract failing test identifiers (but not reasons - they're just "failed") 680 | const testcases = Array.isArray(xcTestSuite.testcase) 681 | ? xcTestSuite.testcase 682 | : xcTestSuite.testcase ? [xcTestSuite.testcase] : []; 683 | 684 | for (const testcase of testcases) { 685 | if (testcase && testcase.failure) { 686 | const className = testcase['@_classname'] || ''; 687 | const testName = testcase['@_name'] || ''; 688 | const identifier = `${className}.${testName}`; 689 | 690 | // Extract reason from console output 691 | const reason = this.extractXCTestFailureReason(consoleOutput, className, testName); 692 | allFailingTests.push({ identifier, reason }); 693 | } 694 | } 695 | } 696 | } 697 | } 698 | 699 | // Parse Swift Testing XUnit file 700 | if (existsSync(swiftTestingPath)) { 701 | const swiftTestingXml = readFileSync(swiftTestingPath, 'utf8'); 702 | const swiftTestingResult = parser.parse(swiftTestingXml); 703 | const swiftTestingSuite = swiftTestingResult.testsuites?.testsuite; 704 | 705 | if (swiftTestingSuite && swiftTestingSuite['@_tests']) { 706 | const totalTests = parseInt(swiftTestingSuite['@_tests'], 10); 707 | const failures = parseInt(swiftTestingSuite['@_failures'] || '0', 10); 708 | 709 | if (totalTests > 0) { 710 | totalPassed += totalTests - failures; 711 | totalFailed += failures; 712 | 713 | // Extract failing tests with full error messages 714 | const testcases = Array.isArray(swiftTestingSuite.testcase) 715 | ? swiftTestingSuite.testcase 716 | : swiftTestingSuite.testcase ? [swiftTestingSuite.testcase] : []; 717 | 718 | for (const testcase of testcases) { 719 | if (testcase && testcase.failure) { 720 | const className = testcase['@_classname'] || ''; 721 | const testName = testcase['@_name'] || ''; 722 | const identifier = `${className}.${testName}`; 723 | 724 | // Swift Testing XUnit includes the full error message! 725 | const failureElement = testcase.failure; 726 | let reason = 'Test failed'; 727 | if (typeof failureElement === 'object' && failureElement['@_message']) { 728 | reason = failureElement['@_message']; 729 | // Decode HTML entities 730 | reason = reason 731 | .replace(/&/g, '&') 732 | .replace(/</g, '<') 733 | .replace(/>/g, '>') 734 | .replace(/"/g, '"') 735 | .replace(/ /g, '\n') 736 | .replace(/→/g, '→'); 737 | // Replace newlines with space for single-line display 738 | reason = reason.replace(/\n+/g, ' ').trim(); 739 | } 740 | 741 | allFailingTests.push({ identifier, reason }); 742 | } 743 | } 744 | } 745 | } 746 | } 747 | 748 | // Return results if we found any tests 749 | if (totalPassed > 0 || totalFailed > 0) { 750 | logger.debug({ 751 | totalPassed, 752 | totalFailed, 753 | failingTests: allFailingTests, 754 | xcTestExists: existsSync(xunitPath), 755 | swiftTestingExists: existsSync(swiftTestingPath) 756 | }, 'XUnit parsing successful'); 757 | 758 | return { 759 | passed: totalPassed, 760 | failed: totalFailed, 761 | failingTests: allFailingTests.length > 0 ? allFailingTests : undefined 762 | }; 763 | } 764 | 765 | return null; 766 | } catch (error: any) { 767 | logger.error({ error: error.message }, 'Failed to parse XUnit files'); 768 | return null; 769 | } 770 | } 771 | 772 | /** 773 | * Clean up XUnit files after parsing 774 | */ 775 | private cleanupXunitFiles(xunitPath: string, swiftTestingPath: string): void { 776 | try { 777 | if (existsSync(xunitPath)) { 778 | unlinkSync(xunitPath); 779 | } 780 | if (existsSync(swiftTestingPath)) { 781 | unlinkSync(swiftTestingPath); 782 | } 783 | } catch (error: any) { 784 | logger.debug({ error: error.message }, 'Failed to clean up XUnit files'); 785 | } 786 | } 787 | 788 | /** 789 | * Clean Swift package build artifacts 790 | */ 791 | async clean(packagePath: string): Promise<void> { 792 | const command = `swift package clean --package-path "${packagePath}"`; 793 | 794 | logger.debug({ command }, 'Clean command'); 795 | 796 | try { 797 | await execAsync(command); 798 | logger.info({ packagePath }, 'Clean succeeded'); 799 | } catch (error: any) { 800 | logger.error({ error: error.message, packagePath }, 'Clean failed'); 801 | throw new Error(`Clean failed: ${error.message}`); 802 | } 803 | } 804 | } ``` -------------------------------------------------------------------------------- /docs/TESTING-PHILOSOPHY.md: -------------------------------------------------------------------------------- ```markdown 1 | # Comprehensive Testing Philosophy 2 | 3 | > "The more your tests resemble the way your software is used, the more confidence they can give you." - Kent C. Dodds 4 | 5 | ## Table of Contents 6 | 1. [Fundamental Principles](#fundamental-principles) 7 | 2. [Testing Strategies](#testing-strategies) 8 | 3. [Test Quality Principles](#test-quality-principles) 9 | 4. [Advanced Testing Patterns](#advanced-testing-patterns) 10 | 5. [Testing Anti-Patterns](#testing-anti-patterns) 11 | 6. [Architecture-Specific Testing](#architecture-specific-testing) 12 | 7. [Practical Guidelines](#practical-guidelines) 13 | 8. [Jest TypeScript Mocking Best Practices](#jest-typescript-mocking-best-practices) 14 | 9. [Troubleshooting Jest TypeScript Issues](#troubleshooting-jest-typescript-issues) 15 | 10. [Implementation Checklist](#implementation-checklist) 16 | 17 | --- 18 | 19 | ## Fundamental Principles 20 | 21 | 22 | ### 1. Parse, Don't Validate - Type Safety at Boundaries 23 | 24 | **Principle**: Transform untrusted input into domain types at system boundaries. Once parsed, data is guaranteed valid throughout the system. 25 | 26 | #### ✅ Good Example - Parse at Boundary 27 | ```typescript 28 | // Parse raw input into domain type at the boundary 29 | export const bootSimulatorSchema = z.object({ 30 | deviceId: z.string().min(1, 'Device ID is required') 31 | }); 32 | 33 | export type BootSimulatorArgs = z.infer<typeof bootSimulatorSchema>; 34 | 35 | class BootSimulatorTool { 36 | async execute(args: any) { 37 | // Parse once at boundary 38 | const validated = bootSimulatorSchema.parse(args); 39 | // Now 'validated' is guaranteed to be valid BootSimulatorArgs 40 | // No need to check deviceId again anywhere in the system 41 | return this.bootDevice(validated); 42 | } 43 | } 44 | ``` 45 | 46 | #### ❌ Bad Example - Validate Throughout 47 | ```typescript 48 | class BootSimulatorTool { 49 | async execute(args: any) { 50 | // Checking validity everywhere = shotgun parsing 51 | if (!args.deviceId) throw new Error('No device ID'); 52 | return this.bootDevice(args); 53 | } 54 | 55 | private async bootDevice(args: any) { 56 | // Having to check again! 57 | if (!args.deviceId || args.deviceId.length === 0) { 58 | throw new Error('Invalid device ID'); 59 | } 60 | // ... 61 | } 62 | } 63 | ``` 64 | 65 | ### 2. Domain Primitives - Rich Types Over Primitives 66 | 67 | **Principle**: Use domain-specific types that enforce invariants at creation time. 68 | 69 | #### ✅ Good Example - Domain Primitive 70 | ```typescript 71 | // DeviceId can only exist if valid 72 | class DeviceId { 73 | private constructor(private readonly value: string) {} 74 | 75 | static parse(input: string): DeviceId { 76 | if (!input || input.length === 0) { 77 | throw new Error('Device ID cannot be empty'); 78 | } 79 | if (!input.match(/^[A-F0-9-]+$/i)) { 80 | throw new Error('Invalid device ID format'); 81 | } 82 | return new DeviceId(input); 83 | } 84 | 85 | toString(): string { 86 | return this.value; 87 | } 88 | } 89 | 90 | // Usage - type safety throughout 91 | async bootDevice(deviceId: DeviceId) { 92 | // No need to validate - DeviceId guarantees validity 93 | await execAsync(`xcrun simctl boot "${deviceId}"`); 94 | } 95 | ``` 96 | 97 | #### ❌ Bad Example - Primitive Obsession 98 | ```typescript 99 | // Strings everywhere = no guarantees 100 | async bootDevice(deviceId: string) { 101 | // Have to validate everywhere 102 | if (!deviceId) throw new Error('Invalid device'); 103 | // Easy to pass wrong string 104 | await execAsync(`xcrun simctl boot "${deviceId}"`); 105 | } 106 | 107 | // Easy to mix up parameters 108 | function buildProject(projectPath: string, scheme: string, configuration: string) { 109 | // Oops, swapped parameters - no compile-time error! 110 | return build(configuration, projectPath, scheme); 111 | } 112 | ``` 113 | 114 | ### 3. Test Behavior, Not Implementation 115 | 116 | **Principle**: Test what your code does, not how it does it. 117 | 118 | #### ✅ Good Example - Behavior Testing 119 | ```typescript 120 | test('boots a simulator device', async () => { 121 | const tool = new BootSimulatorTool(); 122 | const result = await tool.execute({ deviceId: 'iPhone 15' }); 123 | 124 | // Test the behavior/outcome 125 | expect(result.content[0].text).toContain('booted'); 126 | expect(result.content[0].text).toContain('iPhone 15'); 127 | }); 128 | 129 | test('handles already booted device gracefully', async () => { 130 | const tool = new BootSimulatorTool(); 131 | 132 | // First boot 133 | await tool.execute({ deviceId: 'iPhone 15' }); 134 | 135 | // Second boot should handle gracefully 136 | const result = await tool.execute({ deviceId: 'iPhone 15' }); 137 | expect(result.content[0].text).toContain('already booted'); 138 | }); 139 | ``` 140 | 141 | #### ❌ Bad Example - Implementation Testing 142 | ```typescript 143 | test('calls correct commands in sequence', async () => { 144 | const tool = new BootSimulatorTool(); 145 | await tool.execute({ deviceId: 'test-id' }); 146 | 147 | // Testing HOW it works, not WHAT it does 148 | expect(mockExecAsync).toHaveBeenCalledWith('xcrun simctl list devices --json'); 149 | expect(mockExecAsync).toHaveBeenCalledWith('xcrun simctl boot "test-id"'); 150 | expect(mockExecAsync).toHaveBeenCalledTimes(2); 151 | expect(mockExecAsync.mock.calls[0]).toHaveBeenCalledBefore(mockExecAsync.mock.calls[1]); 152 | }); 153 | ``` 154 | 155 | ## Testing Strategies 156 | 157 | ### The Testing Trophy (Not Pyramid) - Modern Approach 158 | 159 | Based on Kent C. Dodds' philosophy: **"Write tests. Not too many. Mostly integration."** 160 | 161 | ``` 162 | /\ 163 | /e2e\ <- 10%: Critical user paths 164 | /------\ 165 | / integ \ <- 60%: Component interactions (THE FOCUS) 166 | /----------\ 167 | / unit \ <- 25%: Complex logic, algorithms 168 | /--------------\ 169 | / static \ <- 5%: TypeScript, ESLint 170 | ``` 171 | 172 | **Why Trophy Over Pyramid**: 173 | - Integration tests provide the best confidence-to-effort ratio 174 | - Modern tools make integration tests fast 175 | - Unit tests often test implementation details 176 | - "The more your tests resemble the way your software is used, the more confidence they can give you" 177 | 178 | ### When to Use Each Test Type 179 | 180 | #### Static Testing (TypeScript, ESLint) 181 | - **Use for**: Type safety, code style, obvious errors 182 | - **Example**: TypeScript ensuring correct function signatures 183 | 184 | #### Unit Tests - Solitary 185 | - **Use for**: Pure functions, complex algorithms, data transformations 186 | - **Mock**: All dependencies 187 | - **Example**: Testing a sorting algorithm, parsing logic 188 | 189 | #### Unit Tests - Sociable (Kent Beck's Original Approach) 190 | - **Use for**: Testing small units with their real collaborators 191 | - **Mock**: Only awkward dependencies (network, filesystem) 192 | - **Example**: Testing a service with its real validator 193 | 194 | #### ✅ Good Sociable Unit Test 195 | ```typescript 196 | test('XcodeProject builds with real configuration', async () => { 197 | // Use real Configuration and ProjectParser 198 | const config = new Configuration({ scheme: 'MyApp' }); 199 | const parser = new ProjectParser(); 200 | const project = new XcodeProject('path/to/project', config, parser); 201 | 202 | // Only mock the subprocess boundary 203 | mockExecAsync.mockResolvedValue({ stdout: 'Build succeeded' }); 204 | 205 | const result = await project.build(); 206 | expect(result.success).toBe(true); 207 | }); 208 | ``` 209 | 210 | #### Integration Tests - Narrow (Recommended) 211 | - **Use for**: Testing specific integration points 212 | - **Mock**: External boundaries only (subprocess, network, filesystem) 213 | - **Focus**: Data flow between components 214 | 215 | #### ✅ Good Narrow Integration Test 216 | ```typescript 217 | test('device information flows correctly through tool chain', async () => { 218 | // Mock only external boundary 219 | mockExecAsync.mockResolvedValue({ 220 | stdout: JSON.stringify({ devices: deviceList }) 221 | }); 222 | 223 | // Test real component interaction 224 | const tool = new BootSimulatorTool(); // Uses real Devices, real SimulatorDevice 225 | const result = await tool.execute({ deviceId: 'iPhone 15' }); 226 | 227 | // Verify outcome, not implementation 228 | expect(result.content[0].text).toContain('iPhone 15'); 229 | }); 230 | ``` 231 | 232 | #### Integration Tests - Broad (Use Sparingly) 233 | - **Use for**: Critical paths that must work 234 | - **Mock**: Nothing - use real services 235 | - **Also called**: E2E tests, system tests 236 | 237 | #### End-to-End Tests 238 | - **Use for**: Critical user journeys, smoke tests 239 | - **Mock**: Nothing 240 | - **Example**: Actually booting a real simulator 241 | 242 | ### Contract Testing - API Boundaries 243 | 244 | **When to use**: When you have separate services/modules that communicate 245 | 246 | #### Consumer-Driven Contract Example 247 | ```typescript 248 | // Consumer defines what it needs 249 | const consumerContract = { 250 | getDevice: { 251 | request: { deviceId: 'string' }, 252 | response: { 253 | id: 'string', 254 | name: 'string', 255 | state: 'Booted' | 'Shutdown' 256 | } 257 | } 258 | }; 259 | 260 | // Provider verifies it can fulfill the contract 261 | test('Devices service fulfills consumer contract', async () => { 262 | const device = await devices.find('test-id'); 263 | expect(device).toMatchObject({ 264 | id: expect.any(String), 265 | name: expect.any(String), 266 | state: expect.stringMatching(/Booted|Shutdown/) 267 | }); 268 | }); 269 | ``` 270 | 271 | ## Property-Based Testing 272 | 273 | **Use for**: Finding edge cases, testing invariants 274 | 275 | ### Example: Testing Invariants 276 | ```typescript 277 | import { property, forAll, string } from 'fast-check'; 278 | 279 | test('device ID parsing is reversible', () => { 280 | property( 281 | forAll(string(), (input) => { 282 | try { 283 | const deviceId = DeviceId.parse(input); 284 | const serialized = deviceId.toString(); 285 | const reparsed = DeviceId.parse(serialized); 286 | // Invariant: parse → toString → parse = identity 287 | return reparsed.toString() === serialized; 288 | } catch { 289 | // Invalid inputs should consistently fail 290 | expect(() => DeviceId.parse(input)).toThrow(); 291 | return true; 292 | } 293 | }) 294 | ); 295 | }); 296 | ``` 297 | 298 | ## Anti-Patterns to Avoid 299 | 300 | ### 1. Testing Private Methods 301 | ```typescript 302 | // ❌ BAD: Testing internals 303 | test('private parseDeviceList works', () => { 304 | const devices = new Devices(); 305 | // @ts-ignore - accessing private method 306 | const parsed = devices.parseDeviceList(json); 307 | expect(parsed).toHaveLength(3); 308 | }); 309 | 310 | // ✅ GOOD: Test through public API 311 | test('finds devices from list', async () => { 312 | const devices = new Devices(); 313 | const device = await devices.find('iPhone 15'); 314 | expect(device).toBeDefined(); 315 | }); 316 | ``` 317 | 318 | ### 2. Excessive Mocking 319 | ```typescript 320 | // ❌ BAD: Mocking everything 321 | test('device boots', async () => { 322 | const mockDevice = { 323 | bootDevice: jest.fn(), 324 | open: jest.fn(), 325 | id: 'test', 326 | name: 'Test Device' 327 | }; 328 | const mockDevices = { 329 | find: jest.fn().mockResolvedValue(mockDevice) 330 | }; 331 | const tool = new BootSimulatorTool(mockDevices); 332 | // This tests nothing real! 333 | }); 334 | 335 | // ✅ GOOD: Minimal mocking 336 | test('device boots', async () => { 337 | mockExecAsync.mockResolvedValue({ stdout: '' }); 338 | const tool = new BootSimulatorTool(); // Real components 339 | await tool.execute({ deviceId: 'iPhone 15' }); 340 | // Tests actual integration 341 | }); 342 | ``` 343 | 344 | ### 3. Snapshot Testing Without Thought 345 | ```typescript 346 | // ❌ BAD: Meaningless snapshot 347 | test('renders correctly', () => { 348 | const result = tool.execute(args); 349 | expect(result).toMatchSnapshot(); 350 | // What are we actually testing? 351 | }); 352 | 353 | // ✅ GOOD: Specific assertions 354 | test('returns success message with device name', async () => { 355 | const result = await tool.execute({ deviceId: 'iPhone 15' }); 356 | expect(result.content[0].text).toContain('Successfully booted'); 357 | expect(result.content[0].text).toContain('iPhone 15'); 358 | }); 359 | ``` 360 | 361 | ## Practical Guidelines for This Project 362 | 363 | ### 1. Test Categorization 364 | 365 | **Keep as E2E (10%)**: 366 | - Critical paths: build → run → test cycle 367 | - Simulator boot/shutdown with real devices 368 | - Actual Xcode project compilation 369 | 370 | **Convert to Integration (60%)**: 371 | - Tool composition tests (Tool → Service → Component) 372 | - Data flow tests 373 | - Error propagation tests 374 | 375 | **Convert to Unit (30%)**: 376 | - Validation logic 377 | - Parsing functions 378 | - Error message formatting 379 | - Configuration merging 380 | 381 | ### 2. Where to Mock 382 | 383 | **Always Mock**: 384 | - `execAsync` / `execSync` - subprocess calls 385 | - File system operations 386 | - Network requests 387 | - Time-dependent operations 388 | 389 | **Never Mock**: 390 | - Your own domain objects 391 | - Simple data transformations 392 | - Validation logic 393 | - Pure functions 394 | 395 | ### 3. MCP Controller Testing Guidelines 396 | 397 | #### What Controllers Are 398 | In our MCP server architecture, Controllers are the presentation layer that: 399 | - Define MCP tool metadata (name, description, input schema) 400 | - Orchestrate the flow: validate input → call use case → format output 401 | - Handle error presentation with consistent formatting 402 | 403 | #### Unit Tests for Controllers - ONLY Test the Contract 404 | Controller unit tests should ONLY verify: 405 | 1. **MCP Tool Metadata**: Name, description, and schema definition 406 | 2. **Error Formatting**: How errors are presented to users (❌ prefix, etc.) 407 | 3. **Success Formatting**: How success is presented (✅ prefix, etc.) 408 | 409 | **DO NOT TEST**: 410 | - How the controller calls the use case (implementation detail) 411 | - What parameters are passed to the use case 412 | - Whether the use case was called 413 | 414 | #### ✅ Good Controller Unit Test 415 | ```typescript 416 | describe('BuildXcodeController', () => { 417 | function createSUT() { 418 | // Minimal mocks just to instantiate 419 | const mockUseCase = {} as BuildProjectUseCase; 420 | const mockPresenter = {} as BuildXcodePresenter; 421 | return new BuildXcodeController(mockUseCase, mockPresenter); 422 | } 423 | 424 | it('should define correct tool metadata', () => { 425 | const sut = createSUT(); 426 | expect(sut.name).toBe('build_xcode'); 427 | expect(sut.description).toBe('Build an Xcode project or workspace'); 428 | }); 429 | 430 | it('should format success with ✅ emoji', async () => { 431 | const { sut, mockExecute } = createSUT(); 432 | mockExecute.mockResolvedValue(BuildResult.succeeded(...)); 433 | 434 | const result = await sut.execute({...}); 435 | 436 | // Test WHAT the user sees, not HOW it's produced 437 | expect(result.content[0].text).toContain('✅'); 438 | expect(result.content[0].text).toContain('Build succeeded'); 439 | }); 440 | }); 441 | ``` 442 | 443 | #### ❌ Bad Controller Unit Test 444 | ```typescript 445 | it('should call use case with correct parameters', async () => { 446 | const { sut, mockExecute } = createSUT(); 447 | 448 | await sut.execute({ platform: 'iOS' }); 449 | 450 | // Testing HOW, not WHAT - implementation detail! 451 | expect(mockExecute).toHaveBeenCalledWith( 452 | expect.objectContaining({ platform: Platform.iOS }) 453 | ); 454 | }); 455 | ``` 456 | 457 | #### Integration Tests for Controllers - Test Behavior 458 | Integration tests should verify the actual behavior with real components: 459 | ```typescript 460 | it('should filter simulators by name', async () => { 461 | // Mock only external boundary (shell command) 462 | mockExec.mockImplementation(...); 463 | 464 | // Use real controller with real use case and repository 465 | const controller = ListSimulatorsControllerFactory.create(); 466 | 467 | const result = await controller.execute({ name: 'iPhone 15' }); 468 | 469 | // Test actual behavior 470 | expect(result.content[0].text).toContain('iPhone 15 Pro'); 471 | expect(result.content[0].text).not.toContain('iPhone 14'); 472 | }); 473 | ``` 474 | 475 | ### 4. Test Naming Conventions 476 | 477 | #### File Naming Pattern 478 | 479 | **Standard**: `[ComponentName].[test-type].test.ts` 480 | 481 | ```bash 482 | # Unit tests - test a single unit in isolation 483 | XcbeautifyOutputParser.unit.test.ts 484 | DeviceValidator.unit.test.ts 485 | BuildCommandBuilder.unit.test.ts 486 | 487 | # Integration tests - test components working together 488 | BuildWorkflow.integration.test.ts 489 | DeviceManagement.integration.test.ts 490 | 491 | # E2E tests - test complete user scenarios 492 | BuildAndRun.e2e.test.ts 493 | SimulatorLifecycle.e2e.test.ts 494 | 495 | # Contract tests - verify API contracts 496 | DeviceService.contract.test.ts 497 | ``` 498 | 499 | **Why include test type in filename?** 500 | - Immediately clear what type of test without opening file 501 | - Can run specific test types: `jest *.unit.test.ts` 502 | - Different test types have different performance characteristics 503 | - Helps maintain proper test pyramid/trophy distribution 504 | 505 | #### Directory Structure 506 | 507 | ``` 508 | src/__tests__/ 509 | ├── unit/ 510 | │ ├── domain/ # Pure business logic 511 | │ ├── application/ # Use cases and orchestration 512 | │ ├── infrastructure/ # External adapters 513 | │ └── utils/ # Helper functions 514 | ├── integration/ # Component interactions 515 | ├── e2e/ # Full system tests 516 | └── contracts/ # API contract tests 517 | ``` 518 | 519 | #### Test Suite Organization 520 | 521 | ```typescript 522 | // Mirror your source code structure in describe blocks 523 | describe('XcbeautifyOutputParser', () => { // Class/module name 524 | describe('parseBuildOutput', () => { // Method name 525 | describe('when parsing errors', () => { // Scenario 526 | it('should extract file information from error line', () => {}); 527 | it('should handle errors without file paths', () => {}); 528 | }); 529 | 530 | describe('when parsing warnings', () => { 531 | it('should extract warning details', () => {}); 532 | }); 533 | }); 534 | }); 535 | ``` 536 | 537 | #### Individual Test Naming 538 | 539 | **Pattern**: `should [expected behavior] when [condition]` 540 | 541 | ```typescript 542 | // ✅ GOOD: Clear behavior and condition 543 | it('should parse error with file information when line contains file path', () => {}); 544 | it('should return empty array when input is empty', () => {}); 545 | it('should throw ValidationError when device ID is invalid', () => {}); 546 | it('should deduplicate identical errors when parsing multi-arch output', () => {}); 547 | 548 | // ❌ BAD: Vague or implementation-focused 549 | it('works', () => {}); 550 | it('parses correctly', () => {}); 551 | it('calls parseError method', () => {}); 552 | it('test case 1', () => {}); 553 | ``` 554 | 555 | **Alternative patterns for specific scenarios**: 556 | 557 | ```typescript 558 | // Given-When-Then (BDD style) 559 | it('given a shutdown device, when boot is called, then device should be in booted state', () => {}); 560 | 561 | // Error scenarios - be specific about the error 562 | it('should throw InvalidPathError when project path does not exist', () => {}); 563 | it('should throw TimeoutError when device does not respond within 5 seconds', () => {}); 564 | 565 | // Edge cases - explain what makes it an edge case 566 | it('should handle empty array without throwing', () => {}); 567 | it('should process 10,000 items without memory overflow', () => {}); 568 | it('should correctly parse Unicode characters in file paths', () => {}); 569 | 570 | // Regression tests - reference the issue 571 | it('should not crash when device name contains spaces (fixes #123)', () => {}); 572 | ``` 573 | 574 | #### Mock and Test Data Naming 575 | 576 | ```typescript 577 | // Use descriptive prefixes 578 | const mockDeviceRepository = { find: jest.fn() }; 579 | const stubLogger = { log: () => {} }; 580 | const spyOnExecute = jest.spyOn(executor, 'execute'); 581 | const fakeDevice = { id: '123', name: 'Test Device' }; 582 | 583 | // Test data should describe the scenario 584 | const validConfiguration = createConfiguration({ valid: true }); 585 | const invalidConfiguration = createConfiguration({ valid: false }); 586 | const minimalDevice = createDevice(); // defaults only 587 | const bootedDevice = createBootedDevice(); 588 | const deviceWithError = createDeviceWithError('Boot failed'); 589 | 590 | // ❌ Avoid generic names 591 | const data = {}; // What kind of data? 592 | const obj = {}; // What object? 593 | const mock1 = {}; // Mock of what? 594 | const testDevice = {}; // All devices in tests are test devices 595 | ``` 596 | 597 | ## Testing Decision Tree 598 | 599 | ``` 600 | Is it a pure function? 601 | Yes → Unit test with examples 602 | No ↓ 603 | 604 | Does it integrate with external systems? 605 | Yes → Mock external boundary, integration test 606 | No ↓ 607 | 608 | Is it orchestrating multiple components? 609 | Yes → Integration test with real components 610 | No ↓ 611 | 612 | Is it a critical user path? 613 | Yes → E2E test 614 | No ↓ 615 | 616 | Is the logic complex? 617 | Yes → Unit test with sociable approach 618 | No → Maybe doesn't need a test 619 | ``` 620 | 621 | ## Measuring Test Quality 622 | 623 | ### Good Tests Are: 624 | 1. **Fast**: Run in milliseconds, not seconds 625 | 2. **Deterministic**: Same input → same output 626 | 3. **Isolated**: Can run in parallel 627 | 4. **Descriptive**: Clear what failed and why 628 | 5. **Maintainable**: Don't break on refactoring 629 | 630 | ### Red Flags: 631 | - Tests that break when refactoring 632 | - Tests with lots of mocks 633 | - Tests that are hard to understand 634 | - Tests that are slow 635 | - Tests that are flaky 636 | 637 | ## Implementation Checklist 638 | 639 | - [ ] Parse inputs at system boundaries using domain validation 640 | - [ ] Create domain primitives for core concepts (DeviceId, BundleId, etc.) 641 | - [ ] Remove integration tests that test implementation 642 | - [ ] Convert E2E tests to integration tests where possible 643 | - [ ] Focus on behavior, not implementation 644 | - [ ] Use Kent C. Dodds' Testing Trophy approach 645 | - [ ] Mock only at system boundaries 646 | - [ ] Add property-based tests for invariants 647 | - [ ] Use contract tests for module boundaries 648 | 649 | ## Test Quality Principles 650 | 651 | ### SUT (System Under Test) Pattern 652 | 653 | **Principle**: Clearly identify and isolate the system being tested. Use factory methods to create the SUT and test data, making tests more maintainable and resistant to implementation changes. 654 | 655 | #### ✅ Good Example - SUT with Factory Methods 656 | ```typescript 657 | describe('XcbeautifyOutputParser', () => { 658 | // Factory method for creating the SUT 659 | function createSUT(): IOutputParser { 660 | return new XcbeautifyOutputParser(); 661 | } 662 | 663 | // Factory methods for test data 664 | function createErrorWithFileInfo( 665 | file = '/Users/project/App.swift', 666 | line = 10, 667 | column = 15, 668 | message = 'cannot find type' 669 | ) { 670 | return `❌ ${file}:${line}:${column}: error: ${message}`; 671 | } 672 | 673 | describe('parseBuildOutput', () => { 674 | let sut: IOutputParser; 675 | 676 | beforeEach(() => { 677 | sut = createSUT(); // Easy to modify creation logic 678 | }); 679 | 680 | it('should parse errors with file information', () => { 681 | // Arrange - using factory methods 682 | const output = createErrorWithFileInfo( 683 | '/Users/project/Main.swift', 684 | 25, 685 | 8, 686 | 'missing return' 687 | ); 688 | 689 | // Act - clear what's being tested 690 | const result = sut.parseBuildOutput(output); 691 | 692 | // Assert - focused on behavior 693 | expect(result.errors[0]).toMatchObject({ 694 | file: '/Users/project/Main.swift', 695 | line: 25, 696 | column: 8 697 | }); 698 | }); 699 | }); 700 | }); 701 | ``` 702 | 703 | #### ❌ Bad Example - Direct Instantiation 704 | ```typescript 705 | describe('XcbeautifyOutputParser', () => { 706 | let parser: XcbeautifyOutputParser; 707 | 708 | beforeEach(() => { 709 | // Hard to change if constructor changes 710 | parser = new XcbeautifyOutputParser(); 711 | }); 712 | 713 | it('parses errors', () => { 714 | // Inline test data - hard to reuse 715 | const output = '❌ /Users/project/App.swift:10:15: error: cannot find type'; 716 | 717 | // Not clear what's being tested 718 | const result = parser.parseBuildOutput(output); 719 | 720 | // Brittle assertions 721 | expect(result.errors[0].file).toBe('/Users/project/App.swift'); 722 | expect(result.errors[0].line).toBe(10); 723 | }); 724 | }); 725 | ``` 726 | 727 | #### Benefits of SUT Pattern 728 | 729 | 1. **Maintainability**: Change SUT creation in one place 730 | 2. **Clarity**: Clear distinction between SUT and collaborators 731 | 3. **Flexibility**: Easy to add constructor parameters 732 | 4. **Testability**: Can return mocks/stubs from factory when needed 733 | 5. **Documentation**: Factory method name describes what variant is created 734 | 735 | #### Factory Method Best Practices 736 | 737 | ```typescript 738 | // 1. Default values for common cases 739 | function createTestDevice(overrides = {}) { 740 | return { 741 | id: 'default-id', 742 | name: 'iPhone 15', 743 | state: 'Shutdown', 744 | ...overrides // Easy to customize 745 | }; 746 | } 747 | 748 | // 2. Descriptive factory names for specific scenarios 749 | function createBootedDevice() { 750 | return createTestDevice({ state: 'Booted' }); 751 | } 752 | 753 | function createErrorDevice() { 754 | return createTestDevice({ state: 'Error', error: 'Boot failed' }); 755 | } 756 | 757 | // 3. Factory for complex objects with builders 758 | function createParsedOutputWithErrors(errorCount = 1) { 759 | const errors = Array.from({ length: errorCount }, (_, i) => ({ 760 | type: 'error' as const, 761 | message: `Error ${i + 1}`, 762 | file: `/path/file${i}.swift`, 763 | line: i * 10, 764 | column: 5 765 | })); 766 | 767 | return { 768 | errors, 769 | warnings: [], 770 | summary: { 771 | totalErrors: errorCount, 772 | totalWarnings: 0, 773 | buildSucceeded: false 774 | } 775 | }; 776 | } 777 | ``` 778 | 779 | ### FIRST Principles 780 | 781 | Good tests follow the FIRST principles: 782 | 783 | #### **F - Fast** 784 | Tests should execute in milliseconds, not seconds. A test suite with 2000 tests at 200ms each takes 6.5 minutes - unacceptable for rapid feedback. 785 | 786 | ```typescript 787 | // ✅ FAST: In-memory, no I/O 788 | test('validates device ID format', () => { 789 | expect(() => DeviceId.parse('')).toThrow(); 790 | expect(() => DeviceId.parse('valid-id')).not.toThrow(); 791 | }); // ~1ms 792 | 793 | // ❌ SLOW: Network calls, file I/O 794 | test('fetches device from API', async () => { 795 | const device = await fetch('https://api.example.com/devices/123'); 796 | expect(device.name).toBe('iPhone'); 797 | }); // ~500ms 798 | ``` 799 | 800 | #### **I - Independent/Isolated** 801 | Tests should not depend on each other or execution order. 802 | 803 | ```typescript 804 | // ❌ BAD: Tests depend on shared state 805 | let counter = 0; 806 | test('first test', () => { 807 | counter++; 808 | expect(counter).toBe(1); 809 | }); 810 | test('second test', () => { 811 | expect(counter).toBe(1); // Fails if run alone! 812 | }); 813 | 814 | // ✅ GOOD: Each test is independent 815 | test('first test', () => { 816 | const counter = createCounter(); 817 | counter.increment(); 818 | expect(counter.value).toBe(1); 819 | }); 820 | ``` 821 | 822 | #### **R - Repeatable** 823 | Same input → same output, every time. 824 | 825 | ```typescript 826 | // ❌ BAD: Time-dependent 827 | test('checks if weekend', () => { 828 | const isWeekend = checkWeekend(); 829 | expect(isWeekend).toBe(true); // Fails Monday-Friday! 830 | }); 831 | 832 | // ✅ GOOD: Deterministic 833 | test('checks if weekend', () => { 834 | const saturday = new Date('2024-01-06'); 835 | const isWeekend = checkWeekend(saturday); 836 | expect(isWeekend).toBe(true); 837 | }); 838 | ``` 839 | 840 | #### **S - Self-Validating** 841 | Tests must clearly pass or fail without human interpretation. 842 | 843 | ```typescript 844 | // ❌ BAD: Requires manual verification 845 | test('logs output correctly', () => { 846 | console.log(generateReport()); 847 | // Developer must manually check console output 848 | }); 849 | 850 | // ✅ GOOD: Automated assertion 851 | test('generates correct report', () => { 852 | const report = generateReport(); 853 | expect(report).toContain('Total: 100'); 854 | expect(report).toMatch(/Date: \d{4}-\d{2}-\d{2}/); 855 | }); 856 | ``` 857 | 858 | #### **T - Timely** 859 | Write tests alongside code, not after. 860 | 861 | ```typescript 862 | // TDD Cycle: Red → Green → Refactor 863 | // 1. Write failing test first 864 | test('parses valid device ID', () => { 865 | const id = DeviceId.parse('ABC-123'); 866 | expect(id.toString()).toBe('ABC-123'); 867 | }); 868 | 869 | // 2. Implement minimal code to pass 870 | // 3. Refactor while tests stay green 871 | ``` 872 | 873 | ### DRY vs DAMP in Tests 874 | 875 | **DRY (Don't Repeat Yourself)**: Avoid duplication in production code. 876 | **DAMP (Descriptive And Meaningful Phrases)**: Prioritize readability in test code. 877 | 878 | #### When to Choose DAMP Over DRY 879 | 880 | ```typescript 881 | // ❌ Too DRY - Hard to understand test failures 882 | const testCases = [ 883 | ['input1', 'output1'], 884 | ['input2', 'output2'], 885 | ['input3', 'output3'] 886 | ]; 887 | 888 | testCases.forEach(([input, output]) => { 889 | test(`test ${input}`, () => { 890 | expect(process(input)).toBe(output); 891 | }); 892 | }); 893 | 894 | // ✅ DAMP - Clear and descriptive 895 | test('handles empty string input', () => { 896 | const result = parseDeviceId(''); 897 | expect(result).toBeNull(); 898 | expect(console.error).toHaveBeenCalledWith('Device ID cannot be empty'); 899 | }); 900 | 901 | test('handles valid UUID format', () => { 902 | const result = parseDeviceId('550e8400-e29b-41d4-a716-446655440000'); 903 | expect(result).toEqual({ 904 | type: 'uuid', 905 | value: '550e8400-e29b-41d4-a716-446655440000' 906 | }); 907 | }); 908 | 909 | test('handles device name format', () => { 910 | const result = parseDeviceId('iPhone 15 Pro'); 911 | expect(result).toEqual({ 912 | type: 'name', 913 | value: 'iPhone 15 Pro' 914 | }); 915 | }); 916 | ``` 917 | 918 | **Key Insight**: "DAMP not DRY" means tests should be easy to understand even if that means some code duplication. When a test fails, the reason should be immediately obvious. 919 | 920 | #### When to Use beforeEach vs DAMP 921 | 922 | **Use beforeEach for:** 923 | - Technical housekeeping that doesn't affect test understanding (e.g., `jest.clearAllMocks()`) 924 | - Mock resets and cleanup operations 925 | - Setting up test infrastructure that's identical across all tests 926 | 927 | ```typescript 928 | // ✅ GOOD - beforeEach for technical housekeeping 929 | describe('ProjectPath', () => { 930 | beforeEach(() => { 931 | jest.clearAllMocks(); // Technical cleanup, not test logic 932 | }); 933 | 934 | it('should validate path exists', () => { 935 | // Test-specific setup visible here 936 | mockExistsSync.mockReturnValue(true); 937 | const result = ProjectPath.create('/path/to/project.xcodeproj'); 938 | expect(result).toBeDefined(); 939 | }); 940 | }); 941 | 942 | // ❌ BAD - Adding mockClear in every test 943 | describe('ProjectPath', () => { 944 | it('should validate path exists', () => { 945 | mockExistsSync.mockClear(); // Repetitive technical detail 946 | mockExistsSync.mockReturnValue(true); 947 | const result = ProjectPath.create('/path/to/project.xcodeproj'); 948 | expect(result).toBeDefined(); 949 | }); 950 | 951 | it('should reject invalid path', () => { 952 | mockExistsSync.mockClear(); // Same line in every test! 953 | mockExistsSync.mockReturnValue(false); 954 | // ... 955 | }); 956 | }); 957 | ``` 958 | 959 | **Apply DAMP (avoid beforeEach) for:** 960 | - Test data setup that varies between tests 961 | - SUT (System Under Test) creation 962 | - Mock configurations specific to test scenarios 963 | - Anything that helps understand what the test is doing 964 | 965 | #### SUT Creation Pattern - DAMP Over DRY 966 | 967 | For simple SUTs (System Under Test), create them directly in each test for maximum clarity: 968 | 969 | ```typescript 970 | // ✅ GOOD - Create SUT in each test for clarity 971 | describe('XcbeautifyOutputParser', () => { 972 | function createSUT(): IOutputParser { 973 | return new XcbeautifyOutputParser(); 974 | } 975 | 976 | it('should parse error with file information', () => { 977 | // Everything the test needs is visible here 978 | const sut = createSUT(); 979 | const output = '❌ /path/file.swift:10:5: error message'; 980 | 981 | const result = sut.parseBuildOutput(output); 982 | 983 | expect(result.issues[0]).toEqual( 984 | BuildIssue.error('error message', '/path/file.swift', 10, 5) 985 | ); 986 | }); 987 | }); 988 | 989 | // ❌ BAD - Hidden setup in beforeEach 990 | describe('XcbeautifyOutputParser', () => { 991 | let sut: IOutputParser; 992 | 993 | beforeEach(() => { 994 | sut = createSUT(); // Setup hidden from test 995 | }); 996 | 997 | it('should parse error with file information', () => { 998 | // Have to look at beforeEach to understand setup 999 | const output = '❌ /path/file.swift:10:5: error message'; 1000 | const result = sut.parseBuildOutput(output); 1001 | // ... 1002 | }); 1003 | }); 1004 | ``` 1005 | 1006 | #### SUT with Dependencies Pattern 1007 | 1008 | When the SUT needs mocked dependencies, return both from the factory: 1009 | 1010 | ```typescript 1011 | // ✅ GOOD - Factory returns SUT with its mocks 1012 | describe('XcodePlatformValidator', () => { 1013 | function createSUT() { 1014 | const mockExecute = jest.fn(); 1015 | const mockExecutor: ICommandExecutor = { execute: mockExecute }; 1016 | const sut = new XcodePlatformValidator(mockExecutor); 1017 | return { sut, mockExecute }; 1018 | } 1019 | 1020 | it('should validate platform support', async () => { 1021 | // Everything needed is created together 1022 | const { sut, mockExecute } = createSUT(); 1023 | 1024 | mockExecute.mockResolvedValue({ exitCode: 0, stdout: '', stderr: '' }); 1025 | 1026 | await sut.validate('/path', false, 'MyScheme', Platform.iOS); 1027 | 1028 | expect(mockExecute).toHaveBeenCalled(); 1029 | }); 1030 | }); 1031 | 1032 | // ❌ BAD - Separate mock creation leads to duplication 1033 | describe('XcodePlatformValidator', () => { 1034 | it('should validate platform support', async () => { 1035 | const mockExecute = jest.fn(); 1036 | const mockExecutor = { execute: mockExecute }; 1037 | const sut = new XcodePlatformValidator(mockExecutor); 1038 | // ... rest of test 1039 | }); 1040 | 1041 | it('should handle errors', async () => { 1042 | // Duplicating mock setup 1043 | const mockExecute = jest.fn(); 1044 | const mockExecutor = { execute: mockExecute }; 1045 | const sut = new XcodePlatformValidator(mockExecutor); 1046 | // ... rest of test 1047 | }); 1048 | }); 1049 | ``` 1050 | 1051 | **Why this approach?** 1052 | 1. **Complete visibility** - All setup is visible in the test 1053 | 2. **Self-contained tests** - Each test is independent and complete 1054 | 3. **Easy debugging** - When a test fails, everything is right there 1055 | 4. **Follows AAA pattern** - Arrange is explicit in each test 1056 | 1057 | ## Advanced Testing Patterns 1058 | 1059 | ### Mutation Testing - Test Your Tests 1060 | 1061 | Mutation testing injects faults into your code to verify that your tests catch them. It literally "tests your tests". 1062 | 1063 | #### How It Works 1064 | 1. Make small changes (mutations) to your code 1065 | 2. Run tests against mutated code 1066 | 3. Tests should fail ("kill the mutant") 1067 | 4. If tests pass, you have inadequate coverage 1068 | 1069 | #### Example Mutations 1070 | ```typescript 1071 | // Original code 1072 | function isAdult(age: number): boolean { 1073 | return age >= 18; 1074 | } 1075 | 1076 | // Mutations: 1077 | // 1. Change >= to > 1078 | return age > 18; // Tests should catch this 1079 | 1080 | // 2. Change 18 to 17 1081 | return age >= 17; // Tests should catch this 1082 | 1083 | // 3. Change >= to <= 1084 | return age <= 18; // Tests should catch this 1085 | ``` 1086 | 1087 | #### When to Use 1088 | - Mission-critical code 1089 | - Security-sensitive functions 1090 | - Core business logic 1091 | - After major refactoring 1092 | 1093 | ### Approval Testing (Golden Master) 1094 | 1095 | Capture existing behavior as a "golden master" and detect any changes. 1096 | 1097 | #### When to Use Approval Tests 1098 | 1099 | ```typescript 1100 | // ✅ GOOD: Complex output that's hard to assert 1101 | test('generates PDF report', async () => { 1102 | const pdf = await generateReport(data); 1103 | expect(pdf).toMatchSnapshot(); 1104 | // or 1105 | expect(pdf).toMatchApprovedFile('report.approved.pdf'); 1106 | }); 1107 | 1108 | // ✅ GOOD: Legacy code characterization 1109 | test('existing calculator behavior', () => { 1110 | const results = []; 1111 | for (let i = 0; i < 100; i++) { 1112 | results.push(legacyCalculator.compute(i)); 1113 | } 1114 | expect(results).toMatchSnapshot(); 1115 | }); 1116 | 1117 | // ❌ BAD: Simple values 1118 | test('adds two numbers', () => { 1119 | expect(add(2, 2)).toMatchSnapshot(); // Just use toBe(4)! 1120 | }); 1121 | ``` 1122 | 1123 | #### Key Benefits 1124 | - Quick tests for legacy code 1125 | - Handles complex outputs (PDFs, images, reports) 1126 | - Makes reviewers see changes clearly 1127 | - Enables safe refactoring 1128 | 1129 | ### Fuzz Testing 1130 | 1131 | Automatically generate random, invalid, or unexpected inputs to find edge cases and security vulnerabilities. 1132 | 1133 | #### Example Implementation 1134 | ```typescript 1135 | import fc from 'fast-check'; 1136 | 1137 | test('device ID parser handles any input safely', () => { 1138 | fc.assert( 1139 | fc.property(fc.string(), (input) => { 1140 | // Should never throw unhandled exception 1141 | try { 1142 | const result = parseDeviceId(input); 1143 | // If it returns a result, it should be valid 1144 | if (result) { 1145 | expect(result.id).toBeTruthy(); 1146 | expect(result.type).toMatch(/uuid|name/); 1147 | } 1148 | return true; 1149 | } catch (e) { 1150 | // Should only throw expected errors 1151 | expect(e.message).toMatch(/Invalid device ID|Empty input/); 1152 | return true; 1153 | } 1154 | }) 1155 | ); 1156 | }); 1157 | ``` 1158 | 1159 | #### What Fuzzing Finds 1160 | - Buffer overflows 1161 | - SQL injection vulnerabilities 1162 | - XSS vulnerabilities 1163 | - Race conditions 1164 | - Memory leaks 1165 | - Unexpected crashes 1166 | 1167 | ### Testing Async Code 1168 | 1169 | #### Common Pitfalls and Solutions 1170 | 1171 | ```typescript 1172 | // ❌ BAD: Not waiting for promise 1173 | test('async operation', () => { 1174 | doAsyncThing(); // Test passes before this completes! 1175 | expect(result).toBe(true); 1176 | }); 1177 | 1178 | // ❌ BAD: Mixing callbacks and promises 1179 | test('async operation', (done) => { 1180 | doAsyncThing().then(result => { 1181 | expect(result).toBe(true); 1182 | done(); // Easy to forget! 1183 | }); 1184 | }); 1185 | 1186 | // ✅ GOOD: async/await 1187 | test('async operation', async () => { 1188 | const result = await doAsyncThing(); 1189 | expect(result).toBe(true); 1190 | }); 1191 | 1192 | // ✅ GOOD: Testing race conditions 1193 | test('handles concurrent requests', async () => { 1194 | const promises = [ 1195 | fetchUser('alice'), 1196 | fetchUser('bob'), 1197 | fetchUser('charlie') 1198 | ]; 1199 | 1200 | const results = await Promise.all(promises); 1201 | expect(results).toHaveLength(3); 1202 | expect(new Set(results.map(r => r.id)).size).toBe(3); // All unique 1203 | }); 1204 | 1205 | // ✅ GOOD: Testing timeouts 1206 | test('times out after 5 seconds', async () => { 1207 | jest.useFakeTimers(); 1208 | 1209 | const promise = fetchWithTimeout(url, 5000); 1210 | jest.advanceTimersByTime(5001); 1211 | 1212 | await expect(promise).rejects.toThrow('Timeout'); 1213 | jest.useRealTimers(); 1214 | }); 1215 | ``` 1216 | 1217 | ## Testing Anti-Patterns 1218 | 1219 | ### Common Test Smells 1220 | 1221 | #### 1. Mystery Guest 1222 | ```typescript 1223 | // ❌ BAD: External dependency hidden 1224 | test('processes user data', async () => { 1225 | const result = await processUser('user-123'); // What's user-123? 1226 | expect(result.name).toBe('Alice'); // Why Alice? 1227 | }); 1228 | 1229 | // ✅ GOOD: Self-contained test 1230 | test('processes user data', async () => { 1231 | const testUser = { 1232 | id: 'user-123', 1233 | name: 'Alice', 1234 | email: '[email protected]' 1235 | }; 1236 | await createTestUser(testUser); 1237 | 1238 | const result = await processUser(testUser.id); 1239 | expect(result.name).toBe(testUser.name); 1240 | }); 1241 | ``` 1242 | 1243 | #### 2. Eager Test 1244 | ```typescript 1245 | // ❌ BAD: Testing too much in one test 1246 | test('user workflow', async () => { 1247 | const user = await createUser(data); 1248 | expect(user.id).toBeDefined(); 1249 | 1250 | const updated = await updateUser(user.id, newData); 1251 | expect(updated.name).toBe(newData.name); 1252 | 1253 | const deleted = await deleteUser(user.id); 1254 | expect(deleted).toBe(true); 1255 | 1256 | const fetched = await getUser(user.id); 1257 | expect(fetched).toBeNull(); 1258 | }); 1259 | 1260 | // ✅ GOOD: Focused tests 1261 | test('creates user with valid data', async () => { 1262 | const user = await createUser(validData); 1263 | expect(user.id).toBeDefined(); 1264 | expect(user.name).toBe(validData.name); 1265 | }); 1266 | 1267 | test('updates existing user', async () => { 1268 | const user = await createTestUser(); 1269 | const updated = await updateUser(user.id, { name: 'New Name' }); 1270 | expect(updated.name).toBe('New Name'); 1271 | }); 1272 | ``` 1273 | 1274 | #### 3. Excessive Setup (General Fixture) 1275 | ```typescript 1276 | // ❌ BAD: Setting up everything for every test 1277 | beforeEach(() => { 1278 | createDatabase(); 1279 | seedUsers(100); 1280 | seedProducts(500); 1281 | seedOrders(1000); 1282 | setupMockServers(); 1283 | initializeCache(); 1284 | }); 1285 | 1286 | test('gets user by id', async () => { 1287 | // Only needs one user! 1288 | const user = await getUser('user-1'); 1289 | expect(user.name).toBe('User 1'); 1290 | }); 1291 | 1292 | // ✅ GOOD: Minimal setup 1293 | test('gets user by id', async () => { 1294 | const user = await createTestUser({ name: 'Test User' }); 1295 | const fetched = await getUser(user.id); 1296 | expect(fetched.name).toBe('Test User'); 1297 | }); 1298 | ``` 1299 | 1300 | #### 4. Assertion Roulette 1301 | ```typescript 1302 | // ❌ BAD: Multiple assertions without context 1303 | test('processes order', () => { 1304 | const order = processOrder(data); 1305 | expect(order.id).toBeDefined(); 1306 | expect(order.total).toBe(100); 1307 | expect(order.items).toHaveLength(3); 1308 | expect(order.status).toBe('pending'); 1309 | expect(order.customer).toBeDefined(); 1310 | }); 1311 | 1312 | // ✅ GOOD: Descriptive assertions 1313 | test('processes order', () => { 1314 | const order = processOrder(data); 1315 | 1316 | expect(order.id).toBeDefined(); 1317 | expect(order.total).toBe(100); 1318 | expect(order.items).toHaveLength(3); 1319 | expect(order.status).toBe('pending'); 1320 | expect(order.customer).toBeDefined(); 1321 | }); 1322 | ``` 1323 | 1324 | #### 5. Test Code Duplication 1325 | ```typescript 1326 | // ❌ BAD: Copying setup code 1327 | test('test 1', () => { 1328 | const device = { 1329 | id: 'test-id', 1330 | name: 'iPhone', 1331 | state: 'Booted' 1332 | }; 1333 | // ... test logic 1334 | }); 1335 | 1336 | test('test 2', () => { 1337 | const device = { 1338 | id: 'test-id', 1339 | name: 'iPhone', 1340 | state: 'Booted' 1341 | }; 1342 | // ... test logic 1343 | }); 1344 | 1345 | // ✅ GOOD: Extract factory function 1346 | function createTestDevice(overrides = {}) { 1347 | return { 1348 | id: 'test-id', 1349 | name: 'iPhone', 1350 | state: 'Booted', 1351 | ...overrides 1352 | }; 1353 | } 1354 | 1355 | test('test 1', () => { 1356 | const device = createTestDevice(); 1357 | // ... test logic 1358 | }); 1359 | 1360 | test('test 2', () => { 1361 | const device = createTestDevice({ state: 'Shutdown' }); 1362 | // ... test logic 1363 | }); 1364 | ``` 1365 | 1366 | ## Architecture-Specific Testing 1367 | 1368 | ### Hexagonal Architecture (Ports & Adapters) 1369 | 1370 | #### Test Boundaries 1371 | ```typescript 1372 | // Domain (Hexagon Core) 1373 | class DeviceService { 1374 | constructor( 1375 | private deviceRepo: DeviceRepository, // Port 1376 | private notifier: NotificationService // Port 1377 | ) {} 1378 | 1379 | async bootDevice(id: string): Promise<void> { 1380 | const device = await this.deviceRepo.find(id); 1381 | if (!device) throw new Error('Device not found'); 1382 | 1383 | await device.boot(); 1384 | await this.deviceRepo.save(device); 1385 | await this.notifier.notify(`Device ${id} booted`); 1386 | } 1387 | } 1388 | 1389 | // Test at port boundary - mock adapters 1390 | test('boots device through service', async () => { 1391 | const mockRepo = { 1392 | find: jest.fn().mockResolvedValue(testDevice), 1393 | save: jest.fn().mockResolvedValue(void 0) 1394 | }; 1395 | const mockNotifier = { 1396 | notify: jest.fn().mockResolvedValue(void 0) 1397 | }; 1398 | 1399 | const service = new DeviceService(mockRepo, mockNotifier); 1400 | await service.bootDevice('test-id'); 1401 | 1402 | expect(mockRepo.save).toHaveBeenCalledWith( 1403 | expect.objectContaining({ state: 'Booted' }) 1404 | ); 1405 | expect(mockNotifier.notify).toHaveBeenCalled(); 1406 | }); 1407 | 1408 | // Contract test for adapter 1409 | test('repository adapter fulfills contract', async () => { 1410 | const repo = new MongoDeviceRepository(); 1411 | const device = await repo.find('test-id'); 1412 | 1413 | // Verify contract shape 1414 | expect(device).toMatchObject({ 1415 | id: expect.any(String), 1416 | name: expect.any(String), 1417 | boot: expect.any(Function) 1418 | }); 1419 | }); 1420 | ``` 1421 | 1422 | #### Key Benefits 1423 | - Test core logic without infrastructure 1424 | - Fast unit tests for business rules 1425 | - Contract tests ensure adapters comply 1426 | - Easy to swap implementations 1427 | 1428 | ### Microservices Testing 1429 | 1430 | #### Testing Strategy Pyramid 1431 | ``` 1432 | /\ 1433 | /e2e\ <- Cross-service journeys 1434 | /------\ 1435 | /contract\ <- Service boundaries (Pact) 1436 | /----------\ 1437 | /integration \ <- Within service 1438 | /--------------\ 1439 | / unit \ <- Business logic 1440 | /------------------\ 1441 | ``` 1442 | 1443 | #### Consumer-Driven Contract Testing 1444 | ```typescript 1445 | // Consumer defines expectations 1446 | const deviceServiceContract = { 1447 | 'get device': { 1448 | request: { 1449 | method: 'GET', 1450 | path: '/devices/123' 1451 | }, 1452 | response: { 1453 | status: 200, 1454 | body: { 1455 | id: '123', 1456 | name: 'iPhone 15', 1457 | state: 'Booted' 1458 | } 1459 | } 1460 | } 1461 | }; 1462 | 1463 | // Provider verifies it can fulfill 1464 | test('device service fulfills contract', async () => { 1465 | const response = await request(app) 1466 | .get('/devices/123') 1467 | .expect(200); 1468 | 1469 | expect(response.body).toMatchObject({ 1470 | id: expect.any(String), 1471 | name: expect.any(String), 1472 | state: expect.stringMatching(/Booted|Shutdown/) 1473 | }); 1474 | }); 1475 | ``` 1476 | 1477 | ## Practical Guidelines 1478 | 1479 | ### Test Organization 1480 | 1481 | #### AAA Pattern (Arrange-Act-Assert) 1482 | ```typescript 1483 | test('should boot simulator when device exists', async () => { 1484 | // Arrange 1485 | const mockDevice = createMockDevice({ state: 'Shutdown' }); 1486 | const tool = new BootSimulatorTool(); 1487 | mockDevices.find.mockResolvedValue(mockDevice); 1488 | 1489 | // Act 1490 | const result = await tool.execute({ deviceId: 'iPhone-15' }); 1491 | 1492 | // Assert 1493 | expect(result.success).toBe(true); 1494 | expect(result.message).toContain('booted'); 1495 | }); 1496 | ``` 1497 | 1498 | #### Given-When-Then (BDD Style) 1499 | ```typescript 1500 | test('boots simulator successfully', async () => { 1501 | // Given a shutdown simulator exists 1502 | const device = givenAShutdownSimulator(); 1503 | 1504 | // When I boot the simulator 1505 | const result = await whenIBootSimulator(device.id); 1506 | 1507 | // Then the simulator should be booted 1508 | thenSimulatorShouldBeBooted(result); 1509 | }); 1510 | ``` 1511 | 1512 | ### Test Data Management 1513 | 1514 | #### Builder Pattern 1515 | ```typescript 1516 | class DeviceBuilder { 1517 | private device = { 1518 | id: 'default-id', 1519 | name: 'iPhone 15', 1520 | state: 'Shutdown', 1521 | platform: 'iOS' 1522 | }; 1523 | 1524 | withId(id: string): this { 1525 | this.device.id = id; 1526 | return this; 1527 | } 1528 | 1529 | withState(state: string): this { 1530 | this.device.state = state; 1531 | return this; 1532 | } 1533 | 1534 | booted(): this { 1535 | this.device.state = 'Booted'; 1536 | return this; 1537 | } 1538 | 1539 | build(): Device { 1540 | return { ...this.device }; 1541 | } 1542 | } 1543 | 1544 | // Usage 1545 | const device = new DeviceBuilder() 1546 | .withId('test-123') 1547 | .booted() 1548 | .build(); 1549 | ``` 1550 | 1551 | #### Object Mother Pattern 1552 | ```typescript 1553 | class DeviceMother { 1554 | static bootedIPhone(): Device { 1555 | return { 1556 | id: 'iphone-test', 1557 | name: 'iPhone 15 Pro', 1558 | state: 'Booted', 1559 | platform: 'iOS' 1560 | }; 1561 | } 1562 | 1563 | static shutdownAndroid(): Device { 1564 | return { 1565 | id: 'android-test', 1566 | name: 'Pixel 8', 1567 | state: 'Shutdown', 1568 | platform: 'Android' 1569 | }; 1570 | } 1571 | } 1572 | 1573 | // Usage 1574 | const device = DeviceMother.bootedIPhone(); 1575 | ``` 1576 | 1577 | ## Jest TypeScript Mocking Best Practices 1578 | 1579 | ### 1. Always Provide Explicit Type Signatures to jest.fn() 1580 | 1581 | **Principle**: TypeScript requires explicit function signatures for proper type inference with mocks. 1582 | 1583 | #### ❌ Bad - Causes "type never" errors 1584 | ```typescript 1585 | const mockFunction = jest.fn(); 1586 | mockFunction.mockResolvedValue({ success: true }); // Error: type 'never' 1587 | ``` 1588 | 1589 | #### ✅ Good - Consistent Approach with @jest/globals 1590 | ```typescript 1591 | // Always import from @jest/globals for consistency 1592 | import { describe, it, expect, jest, beforeEach } from '@jest/globals'; 1593 | 1594 | // Use single type parameter with function signature 1595 | const mockFunction = jest.fn<() => Promise<{ success: boolean }>>(); 1596 | mockFunction.mockResolvedValue({ success: true }); // Works! 1597 | 1598 | // With parameters 1599 | const mockBuildProject = jest.fn<(options: BuildOptions) => Promise<BuildResult>>(); 1600 | 1601 | // Multiple parameters 1602 | const mockCallback = jest.fn<(error: Error | null, data?: string) => void>(); 1603 | 1604 | // Optional parameters 1605 | const mockExecute = jest.fn<(command: string, options?: ExecutionOptions) => Promise<ExecutionResult>>(); 1606 | ``` 1607 | 1608 | #### Using Interface Properties 1609 | ```typescript 1610 | // When mocking interface methods, use the property directly 1611 | const mockFindApp = jest.fn<IAppLocator['findApp']>(); 1612 | const mockSaveLog = jest.fn<ILogManager['saveLog']>(); 1613 | ``` 1614 | 1615 | #### Factory Pattern for Mocks 1616 | ```typescript 1617 | function createSUT() { 1618 | const mockExecute = jest.fn<(command: string, options?: ExecutionOptions) => Promise<ExecutionResult>>(); 1619 | const mockExecutor: ICommandExecutor = { 1620 | execute: mockExecute 1621 | }; 1622 | const sut = new MyService(mockExecutor); 1623 | return { sut, mockExecute }; // Return both for easy access in tests 1624 | } 1625 | ``` 1626 | 1627 | **Important**: 1628 | - Always import `jest` from `@jest/globals` for consistent type behavior 1629 | - Use single type parameter with complete function signature 1630 | - This approach avoids TypeScript errors and provides proper type inference 1631 | 1632 | ### 2. Handle instanceof Checks with Object.create() 1633 | 1634 | **Principle**: When code uses `instanceof` checks, create mocks that pass these checks. 1635 | 1636 | #### ❌ Bad - Plain object fails instanceof 1637 | ```typescript 1638 | const mockXcodeProject = { 1639 | buildProject: jest.fn() 1640 | }; 1641 | // Fails: if (!(project instanceof XcodeProject)) 1642 | ``` 1643 | 1644 | #### ✅ Good - Use Object.create with prototype 1645 | ```typescript 1646 | const mockBuildProject = jest.fn<(options: any) => Promise<any>>(); 1647 | const mockXcodeProject = Object.create(XcodeProject.prototype); 1648 | mockXcodeProject.buildProject = mockBuildProject; 1649 | // Passes: if (project instanceof XcodeProject) ✓ 1650 | ``` 1651 | 1652 | ### 3. Match Async vs Sync Return Types 1653 | 1654 | **Principle**: Use the correct mock method based on function return type. 1655 | 1656 | #### ❌ Bad - Mixing async/sync 1657 | ```typescript 1658 | const mockSync = jest.fn<() => string>(); 1659 | mockSync.mockResolvedValue('result'); // Wrong! Use mockReturnValue 1660 | 1661 | const mockAsync = jest.fn<() => Promise<string>>(); 1662 | mockAsync.mockReturnValue('result'); // Wrong! Use mockResolvedValue 1663 | ``` 1664 | 1665 | #### ✅ Good - Match the return type 1666 | ```typescript 1667 | // Synchronous 1668 | const mockSync = jest.fn<() => string>(); 1669 | mockSync.mockReturnValue('result'); 1670 | 1671 | // Asynchronous 1672 | const mockAsync = jest.fn<() => Promise<string>>(); 1673 | mockAsync.mockResolvedValue('result'); 1674 | ``` 1675 | 1676 | ### 4. Mock Module Imports Correctly 1677 | 1678 | **Principle**: Mock at module level and type the mocks properly. 1679 | 1680 | ```typescript 1681 | // Mock the module 1682 | jest.mock('fs', () => ({ 1683 | existsSync: jest.fn() 1684 | })); 1685 | 1686 | // Import and type the mock 1687 | import { existsSync } from 'fs'; 1688 | const mockExistsSync = existsSync as jest.MockedFunction<typeof existsSync>; 1689 | 1690 | // Use in tests 1691 | beforeEach(() => { 1692 | mockExistsSync.mockReturnValue(true); 1693 | }); 1694 | ``` 1695 | 1696 | ### 5. Never Use Type Casting - Fix the Root Cause 1697 | 1698 | **Principle**: Type casting hides problems. Fix the types properly instead. 1699 | 1700 | #### ❌ Bad - Type casting 1701 | ```typescript 1702 | const mockFunction = jest.fn() as any; 1703 | const mockFunction = jest.fn() as jest.Mock; 1704 | ``` 1705 | 1706 | #### ✅ Good - Proper typing 1707 | ```typescript 1708 | type BuildFunction = (path: string) => Promise<BuildResult>; 1709 | const mockBuild = jest.fn<BuildFunction>(); 1710 | ``` 1711 | 1712 | ### 6. Sequential Mock Returns 1713 | 1714 | ```typescript 1715 | const mockExecAsync = jest.fn<(cmd: string) => Promise<{ stdout: string }>>(); 1716 | mockExecAsync 1717 | .mockResolvedValueOnce({ stdout: 'First call' }) 1718 | .mockResolvedValueOnce({ stdout: 'Second call' }) 1719 | .mockRejectedValueOnce(new Error('Third call fails')); 1720 | ``` 1721 | 1722 | ### Handling Flaky Tests 1723 | 1724 | #### Identifying Flaky Tests 1725 | 1. Run tests multiple times 1726 | 2. Track failure patterns 1727 | 3. Look for timing dependencies 1728 | 4. Check for shared state 1729 | 1730 | #### Common Causes and Fixes 1731 | ```typescript 1732 | // ❌ FLAKY: Race condition 1733 | test('concurrent operations', async () => { 1734 | startOperation1(); 1735 | startOperation2(); 1736 | await wait(100); // Arbitrary wait 1737 | expect(getResult()).toBe('complete'); 1738 | }); 1739 | 1740 | // ✅ FIXED: Proper synchronization 1741 | test('concurrent operations', async () => { 1742 | const op1 = startOperation1(); 1743 | const op2 = startOperation2(); 1744 | await Promise.all([op1, op2]); 1745 | expect(getResult()).toBe('complete'); 1746 | }); 1747 | 1748 | // ❌ FLAKY: External dependency 1749 | test('fetches weather', async () => { 1750 | const weather = await fetchWeather('London'); 1751 | expect(weather.temp).toBeGreaterThan(0); 1752 | }); 1753 | 1754 | // ✅ FIXED: Mock external service 1755 | test('fetches weather', async () => { 1756 | mockWeatherAPI.mockResolvedValue({ temp: 20, condition: 'sunny' }); 1757 | const weather = await fetchWeather('London'); 1758 | expect(weather.temp).toBe(20); 1759 | }); 1760 | ``` 1761 | 1762 | ## Troubleshooting Jest TypeScript Issues 1763 | 1764 | ### Common Problems and Solutions 1765 | 1766 | #### Problem: "Argument of type X is not assignable to parameter of type 'never'" 1767 | **Solution**: Add explicit type signature to jest.fn() 1768 | ```typescript 1769 | // Wrong 1770 | const mock = jest.fn(); 1771 | // Right 1772 | const mock = jest.fn<() => Promise<string>>(); 1773 | ``` 1774 | 1775 | #### Problem: "Expected 0-1 type arguments, but got 2" (TS2558) 1776 | **Solution**: You're using old Jest syntax. Use modern syntax: 1777 | ```typescript 1778 | // Wrong (old syntax) 1779 | jest.fn<any, any[]>() 1780 | // Right (modern syntax) 1781 | jest.fn<(...args: any[]) => any>() 1782 | ``` 1783 | 1784 | #### Problem: instanceof checks failing in tests 1785 | **Solution**: Use Object.create(ClassName.prototype) for the mock object 1786 | 1787 | #### Problem: Mock structure doesn't match actual implementation 1788 | **Solution**: Always verify the actual interface by reading the source code 1789 | 1790 | #### Problem: Validation errors not caught in tests 1791 | **Solution**: Use `await expect(...).rejects.toThrow()` for code that throws 1792 | 1793 | ## References 1794 | 1795 | ### Core Testing Philosophy 1796 | 1. "Parse, Don't Validate" - Alexis King 1797 | 2. "Domain Primitives" - Secure by Design (Dan Bergh Johnsson, Daniel Deogun, Daniel Sawano) 1798 | 3. "Write tests. Not too many. Mostly integration." - Kent C. Dodds 1799 | 4. "Test Behavior, Not Implementation" - Martin Fowler 1800 | 5. "Working Effectively with Legacy Code" - Michael Feathers 1801 | 1802 | ### Testing Techniques 1803 | 6. "Property-Based Testing" - QuickCheck (Koen Claessen and John Hughes) 1804 | 7. "Consumer-Driven Contracts" - Pact 1805 | 8. "The Art of Unit Testing" - Roy Osherove 1806 | 9. "Growing Object-Oriented Software, Guided by Tests" - Steve Freeman and Nat Pryce 1807 | 10. "xUnit Test Patterns" - Gerard Meszaros 1808 | 1809 | ### Modern Approaches 1810 | 11. "Testing Trophy" - Kent C. Dodds 1811 | 12. "Mutation Testing" - PITest, Stryker 1812 | 13. "Approval Tests" - Llewellyn Falco 1813 | 14. "Hexagonal Architecture" - Alistair Cockburn 1814 | 15. "FIRST Principles" - Clean Code (Robert C. Martin) ```