This is page 28 of 28. Use http://codebase.md/trycua/cua?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .cursorignore
├── .dockerignore
├── .editorconfig
├── .gitattributes
├── .github
│ ├── FUNDING.yml
│ ├── scripts
│ │ ├── get_pyproject_version.py
│ │ └── tests
│ │ ├── __init__.py
│ │ ├── README.md
│ │ └── test_get_pyproject_version.py
│ └── workflows
│ ├── bump-version.yml
│ ├── ci-lume.yml
│ ├── docker-publish-cua-linux.yml
│ ├── docker-publish-cua-windows.yml
│ ├── docker-publish-kasm.yml
│ ├── docker-publish-xfce.yml
│ ├── docker-reusable-publish.yml
│ ├── link-check.yml
│ ├── lint.yml
│ ├── npm-publish-cli.yml
│ ├── npm-publish-computer.yml
│ ├── npm-publish-core.yml
│ ├── publish-lume.yml
│ ├── pypi-publish-agent.yml
│ ├── pypi-publish-computer-server.yml
│ ├── pypi-publish-computer.yml
│ ├── pypi-publish-core.yml
│ ├── pypi-publish-mcp-server.yml
│ ├── pypi-publish-som.yml
│ ├── pypi-reusable-publish.yml
│ ├── python-tests.yml
│ ├── test-cua-models.yml
│ └── test-validation-script.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .prettierignore
├── .prettierrc.yaml
├── .vscode
│ ├── docs.code-workspace
│ ├── extensions.json
│ ├── launch.json
│ ├── libs-ts.code-workspace
│ ├── lume.code-workspace
│ ├── lumier.code-workspace
│ ├── py.code-workspace
│ └── settings.json
├── blog
│ ├── app-use.md
│ ├── assets
│ │ ├── composite-agents.png
│ │ ├── docker-ubuntu-support.png
│ │ ├── hack-booth.png
│ │ ├── hack-closing-ceremony.jpg
│ │ ├── hack-cua-ollama-hud.jpeg
│ │ ├── hack-leaderboard.png
│ │ ├── hack-the-north.png
│ │ ├── hack-winners.jpeg
│ │ ├── hack-workshop.jpeg
│ │ ├── hud-agent-evals.png
│ │ └── trajectory-viewer.jpeg
│ ├── bringing-computer-use-to-the-web.md
│ ├── build-your-own-operator-on-macos-1.md
│ ├── build-your-own-operator-on-macos-2.md
│ ├── cloud-windows-ga-macos-preview.md
│ ├── composite-agents.md
│ ├── computer-use-agents-for-growth-hacking.md
│ ├── cua-hackathon.md
│ ├── cua-playground-preview.md
│ ├── cua-vlm-router.md
│ ├── hack-the-north.md
│ ├── hud-agent-evals.md
│ ├── human-in-the-loop.md
│ ├── introducing-cua-cli.md
│ ├── introducing-cua-cloud-containers.md
│ ├── lume-to-containerization.md
│ ├── neurips-2025-cua-papers.md
│ ├── sandboxed-python-execution.md
│ ├── training-computer-use-models-trajectories-1.md
│ ├── trajectory-viewer.md
│ ├── ubuntu-docker-support.md
│ └── windows-sandbox.md
├── CONTRIBUTING.md
├── Development.md
├── Dockerfile
├── docs
│ ├── .env.example
│ ├── .gitignore
│ ├── content
│ │ └── docs
│ │ ├── agent-sdk
│ │ │ ├── agent-loops.mdx
│ │ │ ├── benchmarks
│ │ │ │ ├── index.mdx
│ │ │ │ ├── interactive.mdx
│ │ │ │ ├── introduction.mdx
│ │ │ │ ├── meta.json
│ │ │ │ ├── osworld-verified.mdx
│ │ │ │ ├── screenspot-pro.mdx
│ │ │ │ └── screenspot-v2.mdx
│ │ │ ├── callbacks
│ │ │ │ ├── agent-lifecycle.mdx
│ │ │ │ ├── cost-saving.mdx
│ │ │ │ ├── index.mdx
│ │ │ │ ├── logging.mdx
│ │ │ │ ├── meta.json
│ │ │ │ ├── pii-anonymization.mdx
│ │ │ │ └── trajectories.mdx
│ │ │ ├── chat-history.mdx
│ │ │ ├── custom-tools.mdx
│ │ │ ├── customizing-computeragent.mdx
│ │ │ ├── integrations
│ │ │ │ ├── hud.mdx
│ │ │ │ ├── meta.json
│ │ │ │ └── observability.mdx
│ │ │ ├── mcp-server
│ │ │ │ ├── client-integrations.mdx
│ │ │ │ ├── configuration.mdx
│ │ │ │ ├── index.mdx
│ │ │ │ ├── installation.mdx
│ │ │ │ ├── llm-integrations.mdx
│ │ │ │ ├── meta.json
│ │ │ │ ├── tools.mdx
│ │ │ │ └── usage.mdx
│ │ │ ├── message-format.mdx
│ │ │ ├── meta.json
│ │ │ ├── migration-guide.mdx
│ │ │ ├── prompt-caching.mdx
│ │ │ ├── supported-agents
│ │ │ │ ├── composed-agents.mdx
│ │ │ │ ├── computer-use-agents.mdx
│ │ │ │ ├── grounding-models.mdx
│ │ │ │ ├── human-in-the-loop.mdx
│ │ │ │ └── meta.json
│ │ │ ├── supported-model-providers
│ │ │ │ ├── cua-vlm-router.mdx
│ │ │ │ ├── index.mdx
│ │ │ │ └── local-models.mdx
│ │ │ ├── telemetry.mdx
│ │ │ └── usage-tracking.mdx
│ │ ├── cli-playbook
│ │ │ ├── commands.mdx
│ │ │ ├── index.mdx
│ │ │ └── meta.json
│ │ ├── computer-sdk
│ │ │ ├── cloud-vm-management.mdx
│ │ │ ├── commands.mdx
│ │ │ ├── computer-server
│ │ │ │ ├── Commands.mdx
│ │ │ │ ├── index.mdx
│ │ │ │ ├── meta.json
│ │ │ │ ├── REST-API.mdx
│ │ │ │ └── WebSocket-API.mdx
│ │ │ ├── computer-ui.mdx
│ │ │ ├── computers.mdx
│ │ │ ├── custom-computer-handlers.mdx
│ │ │ ├── meta.json
│ │ │ ├── sandboxed-python.mdx
│ │ │ └── tracing-api.mdx
│ │ ├── example-usecases
│ │ │ ├── form-filling.mdx
│ │ │ ├── gemini-complex-ui-navigation.mdx
│ │ │ ├── meta.json
│ │ │ ├── post-event-contact-export.mdx
│ │ │ └── windows-app-behind-vpn.mdx
│ │ ├── get-started
│ │ │ ├── meta.json
│ │ │ └── quickstart.mdx
│ │ ├── index.mdx
│ │ ├── macos-vm-cli-playbook
│ │ │ ├── lume
│ │ │ │ ├── cli-reference.mdx
│ │ │ │ ├── faq.md
│ │ │ │ ├── http-api.mdx
│ │ │ │ ├── index.mdx
│ │ │ │ ├── installation.mdx
│ │ │ │ ├── meta.json
│ │ │ │ └── prebuilt-images.mdx
│ │ │ ├── lumier
│ │ │ │ ├── building-lumier.mdx
│ │ │ │ ├── docker-compose.mdx
│ │ │ │ ├── docker.mdx
│ │ │ │ ├── index.mdx
│ │ │ │ ├── installation.mdx
│ │ │ │ └── meta.json
│ │ │ └── meta.json
│ │ └── meta.json
│ ├── next.config.mjs
│ ├── package-lock.json
│ ├── package.json
│ ├── pnpm-lock.yaml
│ ├── postcss.config.mjs
│ ├── public
│ │ └── img
│ │ ├── agent_gradio_ui.png
│ │ ├── agent.png
│ │ ├── bg-dark.jpg
│ │ ├── bg-light.jpg
│ │ ├── cli.png
│ │ ├── computer.png
│ │ ├── grounding-with-gemini3.gif
│ │ ├── hero.png
│ │ ├── laminar_trace_example.png
│ │ ├── som_box_threshold.png
│ │ └── som_iou_threshold.png
│ ├── README.md
│ ├── source.config.ts
│ ├── src
│ │ ├── app
│ │ │ ├── (home)
│ │ │ │ ├── [[...slug]]
│ │ │ │ │ └── page.tsx
│ │ │ │ └── layout.tsx
│ │ │ ├── api
│ │ │ │ ├── posthog
│ │ │ │ │ └── [...path]
│ │ │ │ │ └── route.ts
│ │ │ │ └── search
│ │ │ │ └── route.ts
│ │ │ ├── favicon.ico
│ │ │ ├── global.css
│ │ │ ├── layout.config.tsx
│ │ │ ├── layout.tsx
│ │ │ ├── llms.mdx
│ │ │ │ └── [[...slug]]
│ │ │ │ └── route.ts
│ │ │ ├── llms.txt
│ │ │ │ └── route.ts
│ │ │ ├── robots.ts
│ │ │ └── sitemap.ts
│ │ ├── assets
│ │ │ ├── discord-black.svg
│ │ │ ├── discord-white.svg
│ │ │ ├── logo-black.svg
│ │ │ └── logo-white.svg
│ │ ├── components
│ │ │ ├── analytics-tracker.tsx
│ │ │ ├── cookie-consent.tsx
│ │ │ ├── doc-actions-menu.tsx
│ │ │ ├── editable-code-block.tsx
│ │ │ ├── footer.tsx
│ │ │ ├── hero.tsx
│ │ │ ├── iou.tsx
│ │ │ ├── mermaid.tsx
│ │ │ └── page-feedback.tsx
│ │ ├── lib
│ │ │ ├── llms.ts
│ │ │ └── source.ts
│ │ ├── mdx-components.tsx
│ │ └── providers
│ │ └── posthog-provider.tsx
│ └── tsconfig.json
├── examples
│ ├── agent_examples.py
│ ├── agent_ui_examples.py
│ ├── browser_tool_example.py
│ ├── cloud_api_examples.py
│ ├── computer_examples_windows.py
│ ├── computer_examples.py
│ ├── computer_ui_examples.py
│ ├── computer-example-ts
│ │ ├── .env.example
│ │ ├── .gitignore
│ │ ├── package-lock.json
│ │ ├── package.json
│ │ ├── pnpm-lock.yaml
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── helpers.ts
│ │ │ └── index.ts
│ │ └── tsconfig.json
│ ├── docker_examples.py
│ ├── evals
│ │ ├── hud_eval_examples.py
│ │ └── wikipedia_most_linked.txt
│ ├── pylume_examples.py
│ ├── sandboxed_functions_examples.py
│ ├── som_examples.py
│ ├── tracing_examples.py
│ ├── utils.py
│ └── winsandbox_example.py
├── img
│ ├── agent_gradio_ui.png
│ ├── agent.png
│ ├── cli.png
│ ├── computer.png
│ ├── logo_black.png
│ └── logo_white.png
├── libs
│ ├── kasm
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src
│ │ └── ubuntu
│ │ └── install
│ │ └── firefox
│ │ ├── custom_startup.sh
│ │ ├── firefox.desktop
│ │ └── install_firefox.sh
│ ├── lume
│ │ ├── .cursorignore
│ │ ├── CONTRIBUTING.md
│ │ ├── Development.md
│ │ ├── img
│ │ │ └── cli.png
│ │ ├── Package.resolved
│ │ ├── Package.swift
│ │ ├── README.md
│ │ ├── resources
│ │ │ └── lume.entitlements
│ │ ├── scripts
│ │ │ ├── build
│ │ │ │ ├── build-debug.sh
│ │ │ │ ├── build-release-notarized.sh
│ │ │ │ └── build-release.sh
│ │ │ └── install.sh
│ │ ├── src
│ │ │ ├── Commands
│ │ │ │ ├── Clone.swift
│ │ │ │ ├── Config.swift
│ │ │ │ ├── Create.swift
│ │ │ │ ├── Delete.swift
│ │ │ │ ├── Get.swift
│ │ │ │ ├── Images.swift
│ │ │ │ ├── IPSW.swift
│ │ │ │ ├── List.swift
│ │ │ │ ├── Logs.swift
│ │ │ │ ├── Options
│ │ │ │ │ └── FormatOption.swift
│ │ │ │ ├── Prune.swift
│ │ │ │ ├── Pull.swift
│ │ │ │ ├── Push.swift
│ │ │ │ ├── Run.swift
│ │ │ │ ├── Serve.swift
│ │ │ │ ├── Set.swift
│ │ │ │ └── Stop.swift
│ │ │ ├── ContainerRegistry
│ │ │ │ ├── ImageContainerRegistry.swift
│ │ │ │ ├── ImageList.swift
│ │ │ │ └── ImagesPrinter.swift
│ │ │ ├── Errors
│ │ │ │ └── Errors.swift
│ │ │ ├── FileSystem
│ │ │ │ ├── Home.swift
│ │ │ │ ├── Settings.swift
│ │ │ │ ├── VMConfig.swift
│ │ │ │ ├── VMDirectory.swift
│ │ │ │ └── VMLocation.swift
│ │ │ ├── LumeController.swift
│ │ │ ├── Main.swift
│ │ │ ├── Server
│ │ │ │ ├── Handlers.swift
│ │ │ │ ├── HTTP.swift
│ │ │ │ ├── Requests.swift
│ │ │ │ ├── Responses.swift
│ │ │ │ └── Server.swift
│ │ │ ├── Utils
│ │ │ │ ├── CommandRegistry.swift
│ │ │ │ ├── CommandUtils.swift
│ │ │ │ ├── Logger.swift
│ │ │ │ ├── NetworkUtils.swift
│ │ │ │ ├── Path.swift
│ │ │ │ ├── ProcessRunner.swift
│ │ │ │ ├── ProgressLogger.swift
│ │ │ │ ├── String.swift
│ │ │ │ └── Utils.swift
│ │ │ ├── Virtualization
│ │ │ │ ├── DarwinImageLoader.swift
│ │ │ │ ├── DHCPLeaseParser.swift
│ │ │ │ ├── ImageLoaderFactory.swift
│ │ │ │ └── VMVirtualizationService.swift
│ │ │ ├── VM
│ │ │ │ ├── DarwinVM.swift
│ │ │ │ ├── LinuxVM.swift
│ │ │ │ ├── VM.swift
│ │ │ │ ├── VMDetails.swift
│ │ │ │ ├── VMDetailsPrinter.swift
│ │ │ │ ├── VMDisplayResolution.swift
│ │ │ │ └── VMFactory.swift
│ │ │ └── VNC
│ │ │ ├── PassphraseGenerator.swift
│ │ │ └── VNCService.swift
│ │ └── tests
│ │ ├── Mocks
│ │ │ ├── MockVM.swift
│ │ │ ├── MockVMVirtualizationService.swift
│ │ │ └── MockVNCService.swift
│ │ ├── VM
│ │ │ └── VMDetailsPrinterTests.swift
│ │ ├── VMTests.swift
│ │ ├── VMVirtualizationServiceTests.swift
│ │ └── VNCServiceTests.swift
│ ├── lumier
│ │ ├── .dockerignore
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ └── src
│ │ ├── bin
│ │ │ └── entry.sh
│ │ ├── config
│ │ │ └── constants.sh
│ │ ├── hooks
│ │ │ └── on-logon.sh
│ │ └── lib
│ │ ├── utils.sh
│ │ └── vm.sh
│ ├── python
│ │ ├── agent
│ │ │ ├── .bumpversion.cfg
│ │ │ ├── agent
│ │ │ │ ├── __init__.py
│ │ │ │ ├── __main__.py
│ │ │ │ ├── adapters
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── cua_adapter.py
│ │ │ │ │ ├── huggingfacelocal_adapter.py
│ │ │ │ │ ├── human_adapter.py
│ │ │ │ │ ├── mlxvlm_adapter.py
│ │ │ │ │ └── models
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── generic.py
│ │ │ │ │ ├── internvl.py
│ │ │ │ │ ├── opencua.py
│ │ │ │ │ └── qwen2_5_vl.py
│ │ │ │ ├── agent.py
│ │ │ │ ├── callbacks
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── base.py
│ │ │ │ │ ├── budget_manager.py
│ │ │ │ │ ├── image_retention.py
│ │ │ │ │ ├── logging.py
│ │ │ │ │ ├── operator_validator.py
│ │ │ │ │ ├── pii_anonymization.py
│ │ │ │ │ ├── prompt_instructions.py
│ │ │ │ │ ├── telemetry.py
│ │ │ │ │ └── trajectory_saver.py
│ │ │ │ ├── cli.py
│ │ │ │ ├── computers
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── base.py
│ │ │ │ │ ├── cua.py
│ │ │ │ │ └── custom.py
│ │ │ │ ├── decorators.py
│ │ │ │ ├── human_tool
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── __main__.py
│ │ │ │ │ ├── server.py
│ │ │ │ │ └── ui.py
│ │ │ │ ├── integrations
│ │ │ │ │ └── hud
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── agent.py
│ │ │ │ │ └── proxy.py
│ │ │ │ ├── loops
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── anthropic.py
│ │ │ │ │ ├── base.py
│ │ │ │ │ ├── composed_grounded.py
│ │ │ │ │ ├── gelato.py
│ │ │ │ │ ├── gemini.py
│ │ │ │ │ ├── generic_vlm.py
│ │ │ │ │ ├── glm45v.py
│ │ │ │ │ ├── gta1.py
│ │ │ │ │ ├── holo.py
│ │ │ │ │ ├── internvl.py
│ │ │ │ │ ├── model_types.csv
│ │ │ │ │ ├── moondream3.py
│ │ │ │ │ ├── omniparser.py
│ │ │ │ │ ├── openai.py
│ │ │ │ │ ├── opencua.py
│ │ │ │ │ ├── uiins.py
│ │ │ │ │ ├── uitars.py
│ │ │ │ │ └── uitars2.py
│ │ │ │ ├── proxy
│ │ │ │ │ ├── examples.py
│ │ │ │ │ └── handlers.py
│ │ │ │ ├── responses.py
│ │ │ │ ├── tools
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── browser_tool.py
│ │ │ │ ├── types.py
│ │ │ │ └── ui
│ │ │ │ ├── __init__.py
│ │ │ │ ├── __main__.py
│ │ │ │ └── gradio
│ │ │ │ ├── __init__.py
│ │ │ │ ├── app.py
│ │ │ │ └── ui_components.py
│ │ │ ├── benchmarks
│ │ │ │ ├── .gitignore
│ │ │ │ ├── contrib.md
│ │ │ │ ├── interactive.py
│ │ │ │ ├── models
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── base.py
│ │ │ │ │ └── gta1.py
│ │ │ │ ├── README.md
│ │ │ │ ├── ss-pro.py
│ │ │ │ ├── ss-v2.py
│ │ │ │ └── utils.py
│ │ │ ├── example.py
│ │ │ ├── pyproject.toml
│ │ │ ├── README.md
│ │ │ └── tests
│ │ │ ├── conftest.py
│ │ │ └── test_computer_agent.py
│ │ ├── bench-ui
│ │ │ ├── bench_ui
│ │ │ │ ├── __init__.py
│ │ │ │ ├── api.py
│ │ │ │ └── child.py
│ │ │ ├── examples
│ │ │ │ ├── folder_example.py
│ │ │ │ ├── gui
│ │ │ │ │ ├── index.html
│ │ │ │ │ ├── logo.svg
│ │ │ │ │ └── styles.css
│ │ │ │ ├── output_overlay.png
│ │ │ │ └── simple_example.py
│ │ │ ├── pyproject.toml
│ │ │ ├── README.md
│ │ │ └── tests
│ │ │ └── test_port_detection.py
│ │ ├── computer
│ │ │ ├── .bumpversion.cfg
│ │ │ ├── computer
│ │ │ │ ├── __init__.py
│ │ │ │ ├── computer.py
│ │ │ │ ├── diorama_computer.py
│ │ │ │ ├── helpers.py
│ │ │ │ ├── interface
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── base.py
│ │ │ │ │ ├── factory.py
│ │ │ │ │ ├── generic.py
│ │ │ │ │ ├── linux.py
│ │ │ │ │ ├── macos.py
│ │ │ │ │ ├── models.py
│ │ │ │ │ └── windows.py
│ │ │ │ ├── logger.py
│ │ │ │ ├── models.py
│ │ │ │ ├── providers
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── base.py
│ │ │ │ │ ├── cloud
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ └── provider.py
│ │ │ │ │ ├── docker
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ └── provider.py
│ │ │ │ │ ├── factory.py
│ │ │ │ │ ├── lume
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ └── provider.py
│ │ │ │ │ ├── lume_api.py
│ │ │ │ │ ├── lumier
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ └── provider.py
│ │ │ │ │ ├── types.py
│ │ │ │ │ └── winsandbox
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── provider.py
│ │ │ │ │ └── setup_script.ps1
│ │ │ │ ├── tracing_wrapper.py
│ │ │ │ ├── tracing.py
│ │ │ │ ├── ui
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── __main__.py
│ │ │ │ │ └── gradio
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── app.py
│ │ │ │ └── utils.py
│ │ │ ├── poetry.toml
│ │ │ ├── pyproject.toml
│ │ │ ├── README.md
│ │ │ └── tests
│ │ │ ├── conftest.py
│ │ │ └── test_computer.py
│ │ ├── computer-server
│ │ │ ├── .bumpversion.cfg
│ │ │ ├── computer_server
│ │ │ │ ├── __init__.py
│ │ │ │ ├── __main__.py
│ │ │ │ ├── browser.py
│ │ │ │ ├── cli.py
│ │ │ │ ├── diorama
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── base.py
│ │ │ │ │ ├── diorama_computer.py
│ │ │ │ │ ├── diorama.py
│ │ │ │ │ ├── draw.py
│ │ │ │ │ ├── macos.py
│ │ │ │ │ └── safezone.py
│ │ │ │ ├── handlers
│ │ │ │ │ ├── base.py
│ │ │ │ │ ├── factory.py
│ │ │ │ │ ├── generic.py
│ │ │ │ │ ├── linux.py
│ │ │ │ │ ├── macos.py
│ │ │ │ │ └── windows.py
│ │ │ │ ├── main.py
│ │ │ │ ├── server.py
│ │ │ │ ├── utils
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── wallpaper.py
│ │ │ │ └── watchdog.py
│ │ │ ├── examples
│ │ │ │ ├── __init__.py
│ │ │ │ └── usage_example.py
│ │ │ ├── pyproject.toml
│ │ │ ├── README.md
│ │ │ ├── run_server.py
│ │ │ ├── test_connection.py
│ │ │ └── tests
│ │ │ ├── conftest.py
│ │ │ └── test_server.py
│ │ ├── core
│ │ │ ├── .bumpversion.cfg
│ │ │ ├── core
│ │ │ │ ├── __init__.py
│ │ │ │ └── telemetry
│ │ │ │ ├── __init__.py
│ │ │ │ └── posthog.py
│ │ │ ├── poetry.toml
│ │ │ ├── pyproject.toml
│ │ │ ├── README.md
│ │ │ └── tests
│ │ │ ├── conftest.py
│ │ │ └── test_telemetry.py
│ │ ├── mcp-server
│ │ │ ├── .bumpversion.cfg
│ │ │ ├── build-extension.py
│ │ │ ├── CONCURRENT_SESSIONS.md
│ │ │ ├── desktop-extension
│ │ │ │ ├── cua-extension.mcpb
│ │ │ │ ├── desktop_extension.png
│ │ │ │ ├── manifest.json
│ │ │ │ ├── README.md
│ │ │ │ ├── requirements.txt
│ │ │ │ ├── run_server.sh
│ │ │ │ └── setup.py
│ │ │ ├── mcp_server
│ │ │ │ ├── __init__.py
│ │ │ │ ├── __main__.py
│ │ │ │ ├── server.py
│ │ │ │ └── session_manager.py
│ │ │ ├── pdm.lock
│ │ │ ├── pyproject.toml
│ │ │ ├── QUICK_TEST_COMMANDS.sh
│ │ │ ├── quick_test_local_option.py
│ │ │ ├── README.md
│ │ │ ├── scripts
│ │ │ │ ├── install_mcp_server.sh
│ │ │ │ └── start_mcp_server.sh
│ │ │ ├── test_mcp_server_local_option.py
│ │ │ └── tests
│ │ │ ├── conftest.py
│ │ │ └── test_mcp_server.py
│ │ ├── pylume
│ │ │ └── tests
│ │ │ ├── conftest.py
│ │ │ └── test_pylume.py
│ │ └── som
│ │ ├── .bumpversion.cfg
│ │ ├── LICENSE
│ │ ├── poetry.toml
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ ├── som
│ │ │ ├── __init__.py
│ │ │ ├── detect.py
│ │ │ ├── detection.py
│ │ │ ├── models.py
│ │ │ ├── ocr.py
│ │ │ ├── util
│ │ │ │ └── utils.py
│ │ │ └── visualization.py
│ │ └── tests
│ │ ├── conftest.py
│ │ └── test_omniparser.py
│ ├── qemu-docker
│ │ ├── linux
│ │ │ ├── Dockerfile
│ │ │ ├── README.md
│ │ │ └── src
│ │ │ ├── entry.sh
│ │ │ └── vm
│ │ │ ├── image
│ │ │ │ └── README.md
│ │ │ └── setup
│ │ │ ├── install.sh
│ │ │ ├── setup-cua-server.sh
│ │ │ └── setup.sh
│ │ ├── README.md
│ │ └── windows
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ └── src
│ │ ├── entry.sh
│ │ └── vm
│ │ ├── image
│ │ │ └── README.md
│ │ └── setup
│ │ ├── install.bat
│ │ ├── on-logon.ps1
│ │ ├── setup-cua-server.ps1
│ │ ├── setup-utils.psm1
│ │ └── setup.ps1
│ ├── typescript
│ │ ├── .gitignore
│ │ ├── .nvmrc
│ │ ├── agent
│ │ │ ├── examples
│ │ │ │ ├── playground-example.html
│ │ │ │ └── README.md
│ │ │ ├── package.json
│ │ │ ├── README.md
│ │ │ ├── src
│ │ │ │ ├── client.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── tests
│ │ │ │ └── client.test.ts
│ │ │ ├── tsconfig.json
│ │ │ ├── tsdown.config.ts
│ │ │ └── vitest.config.ts
│ │ ├── computer
│ │ │ ├── .editorconfig
│ │ │ ├── .gitattributes
│ │ │ ├── .gitignore
│ │ │ ├── LICENSE
│ │ │ ├── package.json
│ │ │ ├── README.md
│ │ │ ├── src
│ │ │ │ ├── computer
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── providers
│ │ │ │ │ │ ├── base.ts
│ │ │ │ │ │ ├── cloud.ts
│ │ │ │ │ │ └── index.ts
│ │ │ │ │ └── types.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── interface
│ │ │ │ │ ├── base.ts
│ │ │ │ │ ├── factory.ts
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── linux.ts
│ │ │ │ │ ├── macos.ts
│ │ │ │ │ └── windows.ts
│ │ │ │ └── types.ts
│ │ │ ├── tests
│ │ │ │ ├── computer
│ │ │ │ │ └── cloud.test.ts
│ │ │ │ ├── interface
│ │ │ │ │ ├── factory.test.ts
│ │ │ │ │ ├── index.test.ts
│ │ │ │ │ ├── linux.test.ts
│ │ │ │ │ ├── macos.test.ts
│ │ │ │ │ └── windows.test.ts
│ │ │ │ └── setup.ts
│ │ │ ├── tsconfig.json
│ │ │ ├── tsdown.config.ts
│ │ │ └── vitest.config.ts
│ │ ├── core
│ │ │ ├── .editorconfig
│ │ │ ├── .gitattributes
│ │ │ ├── .gitignore
│ │ │ ├── LICENSE
│ │ │ ├── package.json
│ │ │ ├── README.md
│ │ │ ├── src
│ │ │ │ ├── index.ts
│ │ │ │ └── telemetry
│ │ │ │ ├── clients
│ │ │ │ │ ├── index.ts
│ │ │ │ │ └── posthog.ts
│ │ │ │ └── index.ts
│ │ │ ├── tests
│ │ │ │ └── telemetry.test.ts
│ │ │ ├── tsconfig.json
│ │ │ ├── tsdown.config.ts
│ │ │ └── vitest.config.ts
│ │ ├── cua-cli
│ │ │ ├── .gitignore
│ │ │ ├── .prettierrc
│ │ │ ├── bun.lock
│ │ │ ├── CLAUDE.md
│ │ │ ├── index.ts
│ │ │ ├── package.json
│ │ │ ├── README.md
│ │ │ ├── src
│ │ │ │ ├── auth.ts
│ │ │ │ ├── cli.ts
│ │ │ │ ├── commands
│ │ │ │ │ ├── auth.ts
│ │ │ │ │ └── sandbox.ts
│ │ │ │ ├── config.ts
│ │ │ │ ├── http.ts
│ │ │ │ ├── storage.ts
│ │ │ │ └── util.ts
│ │ │ └── tsconfig.json
│ │ ├── package.json
│ │ ├── pnpm-lock.yaml
│ │ ├── pnpm-workspace.yaml
│ │ └── README.md
│ └── xfce
│ ├── .dockerignore
│ ├── .gitignore
│ ├── Development.md
│ ├── Dockerfile
│ ├── Dockerfile.dev
│ ├── README.md
│ └── src
│ ├── scripts
│ │ ├── resize-display.sh
│ │ ├── start-computer-server.sh
│ │ ├── start-novnc.sh
│ │ ├── start-vnc.sh
│ │ └── xstartup.sh
│ ├── supervisor
│ │ └── supervisord.conf
│ └── xfce-config
│ ├── helpers.rc
│ ├── xfce4-power-manager.xml
│ └── xfce4-session.xml
├── LICENSE.md
├── Makefile
├── notebooks
│ ├── agent_nb.ipynb
│ ├── blog
│ │ ├── build-your-own-operator-on-macos-1.ipynb
│ │ └── build-your-own-operator-on-macos-2.ipynb
│ ├── composite_agents_docker_nb.ipynb
│ ├── computer_nb.ipynb
│ ├── computer_server_nb.ipynb
│ ├── customizing_computeragent.ipynb
│ ├── eval_osworld.ipynb
│ ├── ollama_nb.ipynb
│ ├── README.md
│ ├── sota_hackathon_cloud.ipynb
│ └── sota_hackathon.ipynb
├── package-lock.json
├── package.json
├── pnpm-lock.yaml
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│ ├── install-cli.ps1
│ ├── install-cli.sh
│ ├── playground-docker.sh
│ ├── playground.sh
│ ├── run-docker-dev.sh
│ └── typescript-typecheck.js
├── TESTING.md
├── tests
│ ├── agent_loop_testing
│ │ ├── agent_test.py
│ │ └── README.md
│ ├── pytest.ini
│ ├── shell_cmd.py
│ ├── test_files.py
│ ├── test_mcp_server_session_management.py
│ ├── test_mcp_server_streaming.py
│ ├── test_shell_bash.py
│ ├── test_telemetry.py
│ ├── test_tracing.py
│ ├── test_venv.py
│ └── test_watchdog.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/libs/lume/src/ContainerRegistry/ImageContainerRegistry.swift:
--------------------------------------------------------------------------------
```swift
1 | import ArgumentParser
2 | import CommonCrypto
3 | import Compression // Add this import
4 | import Darwin
5 | import Foundation
6 | import Swift
7 |
8 | // Extension to calculate SHA256 hash
9 | extension Data {
10 | func sha256String() -> String {
11 | let hash = self.withUnsafeBytes { (bytes: UnsafeRawBufferPointer) -> [UInt8] in
12 | var hash = [UInt8](repeating: 0, count: Int(CC_SHA256_DIGEST_LENGTH))
13 | CC_SHA256(bytes.baseAddress, CC_LONG(self.count), &hash)
14 | return hash
15 | }
16 | return hash.map { String(format: "%02x", $0) }.joined()
17 | }
18 | }
19 |
20 | // Push-related errors
21 | enum PushError: Error {
22 | case uploadInitiationFailed
23 | case blobUploadFailed
24 | case manifestPushFailed
25 | case authenticationFailed
26 | case missingToken
27 | case invalidURL
28 | case lz4NotFound // Added error case
29 | case invalidMediaType // Added during part refactoring
30 | case missingUncompressedSizeAnnotation // Added for sparse file handling
31 | case fileCreationFailed(String) // Added for sparse file handling
32 | case reassemblySetupFailed(path: String, underlyingError: Error?) // Added for sparse file handling
33 | case missingPart(Int) // Added for sparse file handling
34 | case layerDownloadFailed(String) // Added for download retries
35 | case manifestFetchFailed // Added for manifest fetching
36 | case insufficientPermissions(String) // Added for permission issues
37 | }
38 |
39 | // Define a specific error type for when no underlying error exists
40 | struct NoSpecificUnderlyingError: Error, CustomStringConvertible {
41 | var description: String { "No specific underlying error was provided." }
42 | }
43 |
44 | struct ChunkMetadata: Codable {
45 | let uncompressedDigest: String
46 | let uncompressedSize: UInt64
47 | let compressedDigest: String
48 | let compressedSize: Int
49 | }
50 |
51 | // Define struct to decode relevant parts of config.json
52 | struct OCIManifestLayer {
53 | let mediaType: String
54 | let size: Int
55 | let digest: String
56 | let uncompressedSize: UInt64?
57 | let uncompressedContentDigest: String?
58 |
59 | init(
60 | mediaType: String, size: Int, digest: String, uncompressedSize: UInt64? = nil,
61 | uncompressedContentDigest: String? = nil
62 | ) {
63 | self.mediaType = mediaType
64 | self.size = size
65 | self.digest = digest
66 | self.uncompressedSize = uncompressedSize
67 | self.uncompressedContentDigest = uncompressedContentDigest
68 | }
69 | }
70 |
71 | struct OCIConfig: Codable {
72 | struct Annotations: Codable {
73 | let uncompressedSize: String? // Use optional String
74 |
75 | enum CodingKeys: String, CodingKey {
76 | case uncompressedSize = "com.trycua.lume.disk.uncompressed_size"
77 | }
78 | }
79 | let annotations: Annotations? // Optional annotations
80 | }
81 |
82 | struct Layer: Codable, Equatable {
83 | let mediaType: String
84 | let digest: String
85 | let size: Int
86 | }
87 |
88 | struct Manifest: Codable {
89 | let layers: [Layer]
90 | let config: Layer?
91 | let mediaType: String
92 | let schemaVersion: Int
93 | }
94 |
95 | struct RepositoryTag: Codable {
96 | let name: String
97 | let tags: [String]
98 | }
99 |
100 | struct RepositoryList: Codable {
101 | let repositories: [String]
102 | }
103 |
104 | struct RepositoryTags: Codable {
105 | let name: String
106 | let tags: [String]
107 | }
108 |
109 | struct CachedImage {
110 | let repository: String
111 | let imageId: String
112 | let manifestId: String
113 | }
114 |
115 | struct ImageMetadata: Codable {
116 | let image: String
117 | let manifestId: String
118 | let timestamp: Date
119 | }
120 |
121 | // Actor to safely collect disk part information from concurrent tasks
122 | actor DiskPartsCollector {
123 | // Store tuples of (sequentialPartNum, url)
124 | private var diskParts: [(Int, URL)] = []
125 | // Restore internal counter
126 | private var partCounter = 0
127 |
128 | // Adds a part and returns its assigned sequential number
129 | func addPart(url: URL) -> Int {
130 | partCounter += 1 // Use counter logic
131 | let partNum = partCounter
132 | diskParts.append((partNum, url)) // Store sequential number
133 | return partNum // Return assigned sequential number
134 | }
135 |
136 | // Sort by the sequential part number (index 0 of tuple)
137 | func getSortedParts() -> [(Int, URL)] {
138 | return diskParts.sorted { $0.0 < $1.0 }
139 | }
140 |
141 | // Restore getTotalParts
142 | func getTotalParts() -> Int {
143 | return partCounter
144 | }
145 | }
146 |
147 | actor ProgressTracker {
148 | private var totalBytes: Int64 = 0
149 | private var downloadedBytes: Int64 = 0
150 | private var progressLogger = ProgressLogger(threshold: 0.01)
151 | private var totalFiles: Int = 0
152 | private var completedFiles: Int = 0
153 |
154 | // Download speed tracking
155 | private var startTime: Date = Date()
156 | private var lastUpdateTime: Date = Date()
157 | private var lastUpdateBytes: Int64 = 0
158 | private var speedSamples: [Double] = []
159 | private var peakSpeed: Double = 0
160 | private var totalElapsedTime: TimeInterval = 0
161 |
162 | // Smoothing factor for speed calculation
163 | private var speedSmoothing: Double = 0.3
164 | private var smoothedSpeed: Double = 0
165 |
166 | func setTotal(_ total: Int64, files: Int) {
167 | totalBytes = total
168 | totalFiles = files
169 | startTime = Date()
170 | lastUpdateTime = startTime
171 | smoothedSpeed = 0
172 | }
173 |
174 | func addProgress(_ bytes: Int64) {
175 | downloadedBytes += bytes
176 | let now = Date()
177 | let elapsed = now.timeIntervalSince(lastUpdateTime)
178 |
179 | // Show first progress update immediately, then throttle updates
180 | let shouldUpdate = (downloadedBytes <= bytes) || (elapsed >= 0.5)
181 |
182 | if shouldUpdate {
183 | let currentSpeed = Double(downloadedBytes - lastUpdateBytes) / max(elapsed, 0.001)
184 | speedSamples.append(currentSpeed)
185 |
186 | // Cap samples array to prevent memory growth
187 | if speedSamples.count > 20 {
188 | speedSamples.removeFirst(speedSamples.count - 20)
189 | }
190 |
191 | // Update peak speed
192 | peakSpeed = max(peakSpeed, currentSpeed)
193 |
194 | // Apply exponential smoothing to the speed
195 | if smoothedSpeed == 0 {
196 | smoothedSpeed = currentSpeed
197 | } else {
198 | smoothedSpeed = speedSmoothing * currentSpeed + (1 - speedSmoothing) * smoothedSpeed
199 | }
200 |
201 | // Calculate average speed over the last few samples
202 | let recentAvgSpeed = calculateAverageSpeed()
203 |
204 | // Calculate overall average
205 | let totalElapsed = now.timeIntervalSince(startTime)
206 | let overallAvgSpeed = totalElapsed > 0 ? Double(downloadedBytes) / totalElapsed : 0
207 |
208 | let progress = Double(downloadedBytes) / Double(totalBytes)
209 | logSpeedProgress(
210 | current: progress,
211 | currentSpeed: currentSpeed,
212 | averageSpeed: recentAvgSpeed,
213 | smoothedSpeed: smoothedSpeed,
214 | overallSpeed: overallAvgSpeed,
215 | peakSpeed: peakSpeed,
216 | context: "Downloading Image"
217 | )
218 |
219 | // Update tracking variables
220 | lastUpdateTime = now
221 | lastUpdateBytes = downloadedBytes
222 | totalElapsedTime = totalElapsed
223 | }
224 | }
225 |
226 | private func calculateAverageSpeed() -> Double {
227 | guard !speedSamples.isEmpty else { return 0 }
228 |
229 | // Use weighted average giving more emphasis to recent samples
230 | var totalWeight = 0.0
231 | var weightedSum = 0.0
232 |
233 | let samples = speedSamples.suffix(min(8, speedSamples.count))
234 | for (index, speed) in samples.enumerated() {
235 | let weight = Double(index + 1)
236 | weightedSum += speed * weight
237 | totalWeight += weight
238 | }
239 |
240 | return totalWeight > 0 ? weightedSum / totalWeight : 0
241 | }
242 |
243 | func getDownloadStats() -> DownloadStats {
244 | let avgSpeed = totalElapsedTime > 0 ? Double(downloadedBytes) / totalElapsedTime : 0
245 | return DownloadStats(
246 | totalBytes: totalBytes,
247 | downloadedBytes: downloadedBytes,
248 | elapsedTime: totalElapsedTime,
249 | averageSpeed: avgSpeed,
250 | peakSpeed: peakSpeed
251 | )
252 | }
253 |
254 | private func logSpeedProgress(
255 | current: Double,
256 | currentSpeed: Double,
257 | averageSpeed: Double,
258 | smoothedSpeed: Double,
259 | overallSpeed: Double,
260 | peakSpeed: Double,
261 | context: String
262 | ) {
263 | let progressPercent = Int(current * 100)
264 | let currentSpeedStr = formatByteSpeed(currentSpeed)
265 | let avgSpeedStr = formatByteSpeed(averageSpeed)
266 | let peakSpeedStr = formatByteSpeed(peakSpeed)
267 |
268 | // Calculate ETA based on the smoothed speed which is more stable
269 | // This provides a more realistic estimate that doesn't fluctuate as much
270 | let remainingBytes = totalBytes - downloadedBytes
271 | let speedForEta = max(smoothedSpeed, averageSpeed * 0.8) // Use the higher of smoothed or 80% of avg
272 | let etaSeconds = speedForEta > 0 ? Double(remainingBytes) / speedForEta : 0
273 | let etaStr = formatTimeRemaining(etaSeconds)
274 |
275 | let progressBar = createProgressBar(progress: current)
276 |
277 | print(
278 | "\r\(progressBar) \(progressPercent)% | Current: \(currentSpeedStr) | Avg: \(avgSpeedStr) | Peak: \(peakSpeedStr) | ETA: \(etaStr) ",
279 | terminator: "")
280 | fflush(stdout)
281 | }
282 |
283 | private func createProgressBar(progress: Double, width: Int = 30) -> String {
284 | let completedWidth = Int(progress * Double(width))
285 | let remainingWidth = width - completedWidth
286 |
287 | let completed = String(repeating: "█", count: completedWidth)
288 | let remaining = String(repeating: "░", count: remainingWidth)
289 |
290 | return "[\(completed)\(remaining)]"
291 | }
292 |
293 | private func formatByteSpeed(_ bytesPerSecond: Double) -> String {
294 | let units = ["B/s", "KB/s", "MB/s", "GB/s"]
295 | var speed = bytesPerSecond
296 | var unitIndex = 0
297 |
298 | while speed > 1024 && unitIndex < units.count - 1 {
299 | speed /= 1024
300 | unitIndex += 1
301 | }
302 |
303 | return String(format: "%.1f %@", speed, units[unitIndex])
304 | }
305 |
306 | private func formatTimeRemaining(_ seconds: Double) -> String {
307 | if seconds.isNaN || seconds.isInfinite || seconds <= 0 {
308 | return "calculating..."
309 | }
310 |
311 | let hours = Int(seconds) / 3600
312 | let minutes = (Int(seconds) % 3600) / 60
313 | let secs = Int(seconds) % 60
314 |
315 | if hours > 0 {
316 | return String(format: "%d:%02d:%02d", hours, minutes, secs)
317 | } else {
318 | return String(format: "%d:%02d", minutes, secs)
319 | }
320 | }
321 | }
322 |
323 | struct DownloadStats {
324 | let totalBytes: Int64
325 | let downloadedBytes: Int64
326 | let elapsedTime: TimeInterval
327 | let averageSpeed: Double
328 | let peakSpeed: Double
329 |
330 | func formattedSummary() -> String {
331 | let bytesStr = ByteCountFormatter.string(fromByteCount: downloadedBytes, countStyle: .file)
332 | let avgSpeedStr = formatSpeed(averageSpeed)
333 | let peakSpeedStr = formatSpeed(peakSpeed)
334 | let timeStr = formatTime(elapsedTime)
335 |
336 | return """
337 | Download Statistics:
338 | - Total downloaded: \(bytesStr)
339 | - Elapsed time: \(timeStr)
340 | - Average speed: \(avgSpeedStr)
341 | - Peak speed: \(peakSpeedStr)
342 | """
343 | }
344 |
345 | private func formatSpeed(_ bytesPerSecond: Double) -> String {
346 | let formatter = ByteCountFormatter()
347 | formatter.countStyle = .file
348 | let bytesStr = formatter.string(fromByteCount: Int64(bytesPerSecond))
349 | return "\(bytesStr)/s"
350 | }
351 |
352 | private func formatTime(_ seconds: TimeInterval) -> String {
353 | let hours = Int(seconds) / 3600
354 | let minutes = (Int(seconds) % 3600) / 60
355 | let secs = Int(seconds) % 60
356 |
357 | if hours > 0 {
358 | return String(format: "%d hours, %d minutes, %d seconds", hours, minutes, secs)
359 | } else if minutes > 0 {
360 | return String(format: "%d minutes, %d seconds", minutes, secs)
361 | } else {
362 | return String(format: "%d seconds", secs)
363 | }
364 | }
365 | }
366 |
367 | // Renamed struct
368 | struct UploadStats {
369 | let totalBytes: Int64
370 | let uploadedBytes: Int64 // Renamed
371 | let elapsedTime: TimeInterval
372 | let averageSpeed: Double
373 | let peakSpeed: Double
374 |
375 | func formattedSummary() -> String {
376 | let bytesStr = ByteCountFormatter.string(fromByteCount: uploadedBytes, countStyle: .file)
377 | let avgSpeedStr = formatSpeed(averageSpeed)
378 | let peakSpeedStr = formatSpeed(peakSpeed)
379 | let timeStr = formatTime(elapsedTime)
380 | return """
381 | Upload Statistics:
382 | - Total uploaded: \(bytesStr)
383 | - Elapsed time: \(timeStr)
384 | - Average speed: \(avgSpeedStr)
385 | - Peak speed: \(peakSpeedStr)
386 | """
387 | }
388 | private func formatSpeed(_ bytesPerSecond: Double) -> String {
389 | let formatter = ByteCountFormatter()
390 | formatter.countStyle = .file
391 | let bytesStr = formatter.string(fromByteCount: Int64(bytesPerSecond))
392 | return "\(bytesStr)/s"
393 | }
394 | private func formatTime(_ seconds: TimeInterval) -> String {
395 | let hours = Int(seconds) / 3600
396 | let minutes = (Int(seconds) % 3600) / 60
397 | let secs = Int(seconds) % 60
398 | if hours > 0 {
399 | return String(format: "%d hours, %d minutes, %d seconds", hours, minutes, secs)
400 | } else if minutes > 0 {
401 | return String(format: "%d minutes, %d seconds", minutes, secs)
402 | } else {
403 | return String(format: "%d seconds", secs)
404 | }
405 | }
406 | }
407 |
408 | actor TaskCounter {
409 | private var count: Int = 0
410 |
411 | func increment() { count += 1 }
412 | func decrement() { count -= 1 }
413 | func current() -> Int { count }
414 | }
415 |
416 | class ImageContainerRegistry: @unchecked Sendable {
417 | private let registry: String
418 | private let organization: String
419 | private let downloadProgress = ProgressTracker() // Renamed for clarity
420 | private let uploadProgress = UploadProgressTracker() // Added upload tracker
421 | private let cacheDirectory: URL
422 | private let downloadLock = NSLock()
423 | private var activeDownloads: [String] = []
424 | private let cachingEnabled: Bool
425 |
426 | // Constants for zero-skipping write logic
427 | private static let holeGranularityBytes = 4 * 1024 * 1024 // 4MB block size for checking zeros
428 | private static let zeroChunk = Data(count: holeGranularityBytes)
429 |
430 | // Add the createProgressBar function here as a private method
431 | private func createProgressBar(progress: Double, width: Int = 30) -> String {
432 | let completedWidth = Int(progress * Double(width))
433 | let remainingWidth = width - completedWidth
434 |
435 | let completed = String(repeating: "█", count: completedWidth)
436 | let remaining = String(repeating: "░", count: remainingWidth)
437 |
438 | return "[\(completed)\(remaining)]"
439 | }
440 |
441 | init(registry: String, organization: String) {
442 | self.registry = registry
443 | self.organization = organization
444 |
445 | // Get cache directory from settings
446 | let cacheDir = SettingsManager.shared.getCacheDirectory()
447 | let expandedCacheDir = (cacheDir as NSString).expandingTildeInPath
448 | self.cacheDirectory = URL(fileURLWithPath: expandedCacheDir)
449 | .appendingPathComponent("ghcr")
450 |
451 | // Get caching enabled setting
452 | self.cachingEnabled = SettingsManager.shared.isCachingEnabled()
453 |
454 | try? FileManager.default.createDirectory(
455 | at: cacheDirectory, withIntermediateDirectories: true)
456 |
457 | // Create organization directory
458 | let orgDir = cacheDirectory.appendingPathComponent(organization)
459 | try? FileManager.default.createDirectory(at: orgDir, withIntermediateDirectories: true)
460 | }
461 |
462 | private func getManifestIdentifier(_ manifest: Manifest, manifestDigest: String) -> String {
463 | // Use the manifest's own digest as the identifier
464 | return manifestDigest.replacingOccurrences(of: ":", with: "_")
465 | }
466 |
467 | private func getShortImageId(_ digest: String) -> String {
468 | // Take first 12 characters of the digest after removing the "sha256:" prefix
469 | let id = digest.replacingOccurrences(of: "sha256:", with: "")
470 | return String(id.prefix(12))
471 | }
472 |
473 | private func getImageCacheDirectory(manifestId: String) -> URL {
474 | return
475 | cacheDirectory
476 | .appendingPathComponent(organization)
477 | .appendingPathComponent(manifestId)
478 | }
479 |
480 | private func getCachedManifestPath(manifestId: String) -> URL {
481 | return getImageCacheDirectory(manifestId: manifestId).appendingPathComponent(
482 | "manifest.json")
483 | }
484 |
485 | private func getCachedLayerPath(manifestId: String, digest: String) -> URL {
486 | return getImageCacheDirectory(manifestId: manifestId).appendingPathComponent(
487 | digest.replacingOccurrences(of: ":", with: "_"))
488 | }
489 |
490 | private func setupImageCache(manifestId: String) throws {
491 | let cacheDir = getImageCacheDirectory(manifestId: manifestId)
492 | // Remove existing cache if it exists
493 | if FileManager.default.fileExists(atPath: cacheDir.path) {
494 | try FileManager.default.removeItem(at: cacheDir)
495 | // Ensure it's completely removed
496 | while FileManager.default.fileExists(atPath: cacheDir.path) {
497 | try? FileManager.default.removeItem(at: cacheDir)
498 | }
499 | }
500 | try FileManager.default.createDirectory(at: cacheDir, withIntermediateDirectories: true)
501 | }
502 |
503 | private func loadCachedManifest(manifestId: String) -> Manifest? {
504 | let manifestPath = getCachedManifestPath(manifestId: manifestId)
505 | guard let data = try? Data(contentsOf: manifestPath) else { return nil }
506 | return try? JSONDecoder().decode(Manifest.self, from: data)
507 | }
508 |
509 | private func validateCache(manifest: Manifest, manifestId: String) -> Bool {
510 | // Skip cache validation if caching is disabled
511 | if !cachingEnabled {
512 | return false
513 | }
514 |
515 | // Check if we have a reassembled image
516 | let reassembledCachePath = getImageCacheDirectory(manifestId: manifestId)
517 | .appendingPathComponent("disk.img.reassembled")
518 | if FileManager.default.fileExists(atPath: reassembledCachePath.path) {
519 | Logger.info("Found reassembled disk image in cache validation")
520 |
521 | // If we have a reassembled image, we only need to make sure the manifest matches
522 | guard let cachedManifest = loadCachedManifest(manifestId: manifestId),
523 | cachedManifest.layers == manifest.layers
524 | else {
525 | return false
526 | }
527 |
528 | // We have a reassembled image and the manifest matches
529 | return true
530 | }
531 |
532 | // If no reassembled image, check layer files
533 | // First check if manifest exists and matches
534 | guard let cachedManifest = loadCachedManifest(manifestId: manifestId),
535 | cachedManifest.layers == manifest.layers
536 | else {
537 | return false
538 | }
539 |
540 | // Then verify all layer files exist
541 | for layer in manifest.layers {
542 | let cachedLayer = getCachedLayerPath(manifestId: manifestId, digest: layer.digest)
543 | if !FileManager.default.fileExists(atPath: cachedLayer.path) {
544 | return false
545 | }
546 | }
547 |
548 | return true
549 | }
550 |
551 | private func saveManifest(_ manifest: Manifest, manifestId: String) throws {
552 | // Skip saving manifest if caching is disabled
553 | if !cachingEnabled {
554 | return
555 | }
556 |
557 | let manifestPath = getCachedManifestPath(manifestId: manifestId)
558 | try JSONEncoder().encode(manifest).write(to: manifestPath)
559 | }
560 |
561 | private func isDownloading(_ digest: String) -> Bool {
562 | downloadLock.lock()
563 | defer { downloadLock.unlock() }
564 | return activeDownloads.contains(digest)
565 | }
566 |
567 | private func markDownloadStarted(_ digest: String) {
568 | downloadLock.lock()
569 | if !activeDownloads.contains(digest) {
570 | activeDownloads.append(digest)
571 | }
572 | downloadLock.unlock()
573 | }
574 |
575 | private func markDownloadComplete(_ digest: String) {
576 | downloadLock.lock()
577 | activeDownloads.removeAll { $0 == digest }
578 | downloadLock.unlock()
579 | }
580 |
581 | private func waitForExistingDownload(_ digest: String, cachedLayer: URL) async throws {
582 | while isDownloading(digest) {
583 | try await Task.sleep(nanoseconds: 1_000_000_000) // Sleep for 1 second
584 | if FileManager.default.fileExists(atPath: cachedLayer.path) {
585 | return // File is now available
586 | }
587 | }
588 | }
589 |
590 | private func saveImageMetadata(image: String, manifestId: String) throws {
591 | // Skip saving metadata if caching is disabled
592 | if !cachingEnabled {
593 | return
594 | }
595 |
596 | let metadataPath = getImageCacheDirectory(manifestId: manifestId).appendingPathComponent(
597 | "metadata.json")
598 | let metadata = ImageMetadata(
599 | image: image,
600 | manifestId: manifestId,
601 | timestamp: Date()
602 | )
603 | try JSONEncoder().encode(metadata).write(to: metadataPath)
604 | }
605 |
606 | private func cleanupOldVersions(currentManifestId: String, image: String) throws {
607 | // Skip cleanup if caching is disabled
608 | if !cachingEnabled {
609 | return
610 | }
611 |
612 | Logger.info(
613 | "Checking for old versions of image to clean up",
614 | metadata: [
615 | "image": image,
616 | "current_manifest_id": currentManifestId,
617 | ])
618 |
619 | let orgDir = cacheDirectory.appendingPathComponent(organization)
620 | guard FileManager.default.fileExists(atPath: orgDir.path) else { return }
621 |
622 | let contents = try FileManager.default.contentsOfDirectory(atPath: orgDir.path)
623 | for item in contents {
624 | if item == currentManifestId { continue }
625 |
626 | let itemPath = orgDir.appendingPathComponent(item)
627 | let metadataPath = itemPath.appendingPathComponent("metadata.json")
628 |
629 | if let metadataData = try? Data(contentsOf: metadataPath),
630 | let metadata = try? JSONDecoder().decode(ImageMetadata.self, from: metadataData)
631 | {
632 | if metadata.image == image {
633 | // Before removing, check if there's a reassembled image we should preserve
634 | let reassembledPath = itemPath.appendingPathComponent("disk.img.reassembled")
635 | let nvramPath = itemPath.appendingPathComponent("nvram.bin")
636 | let configPath = itemPath.appendingPathComponent("config.json")
637 |
638 | // Preserve reassembled image if it exists
639 | if FileManager.default.fileExists(atPath: reassembledPath.path) {
640 | Logger.info(
641 | "Preserving reassembled disk image during cleanup",
642 | metadata: ["manifest_id": item])
643 |
644 | // Ensure the current cache directory exists
645 | let currentCacheDir = getImageCacheDirectory(manifestId: currentManifestId)
646 | try FileManager.default.createDirectory(
647 | at: currentCacheDir, withIntermediateDirectories: true)
648 |
649 | // Move reassembled image to current cache directory
650 | let currentReassembledPath = currentCacheDir.appendingPathComponent(
651 | "disk.img.reassembled")
652 | if !FileManager.default.fileExists(atPath: currentReassembledPath.path) {
653 | try FileManager.default.copyItem(
654 | at: reassembledPath, to: currentReassembledPath)
655 | }
656 |
657 | // Also preserve nvram if it exists
658 | if FileManager.default.fileExists(atPath: nvramPath.path) {
659 | let currentNvramPath = currentCacheDir.appendingPathComponent(
660 | "nvram.bin")
661 | if !FileManager.default.fileExists(atPath: currentNvramPath.path) {
662 | try FileManager.default.copyItem(
663 | at: nvramPath, to: currentNvramPath)
664 | }
665 | }
666 |
667 | // Also preserve config if it exists
668 | if FileManager.default.fileExists(atPath: configPath.path) {
669 | let currentConfigPath = currentCacheDir.appendingPathComponent(
670 | "config.json")
671 | if !FileManager.default.fileExists(atPath: currentConfigPath.path) {
672 | try FileManager.default.copyItem(
673 | at: configPath, to: currentConfigPath)
674 | }
675 | }
676 | }
677 |
678 | // Now remove the old directory
679 | try FileManager.default.removeItem(at: itemPath)
680 | Logger.info(
681 | "Removed old version of image",
682 | metadata: [
683 | "image": image,
684 | "old_manifest_id": item,
685 | ])
686 | }
687 | continue
688 | }
689 |
690 | Logger.info(
691 | "Skipping cleanup check for item without metadata", metadata: ["item": item])
692 | }
693 | }
694 |
695 | private func optimizeNetworkSettings() {
696 | // Set global URLSession configuration properties for better performance
697 | URLSessionConfiguration.default.httpMaximumConnectionsPerHost = 10
698 | URLSessionConfiguration.default.httpShouldUsePipelining = true
699 | URLSessionConfiguration.default.timeoutIntervalForResource = 3600
700 |
701 | // Pre-warm DNS resolution
702 | let preWarmTask = URLSession.shared.dataTask(with: URL(string: "https://\(self.registry)")!)
703 | preWarmTask.resume()
704 | }
705 |
706 | public func pull(
707 | image: String,
708 | name: String?,
709 | locationName: String? = nil
710 | ) async throws -> VMDirectory {
711 | guard !image.isEmpty else {
712 | throw ValidationError("Image name cannot be empty")
713 | }
714 |
715 | let home = Home()
716 |
717 | // Use provided name or derive from image
718 | let vmName = name ?? image.split(separator: ":").first.map(String.init) ?? ""
719 |
720 | // Determine if locationName is a direct path or a named storage location
721 | let vmDir: VMDirectory
722 | if let locationName = locationName,
723 | locationName.contains("/") || locationName.contains("\\")
724 | {
725 | // Direct path
726 | vmDir = try home.getVMDirectoryFromPath(vmName, storagePath: locationName)
727 | } else {
728 | // Named storage or default location
729 | vmDir = try home.getVMDirectory(vmName, storage: locationName)
730 | }
731 |
732 | // Optimize network early in the process
733 | optimizeNetworkSettings()
734 |
735 | // Parse image name and tag
736 | let components = image.split(separator: ":")
737 | guard components.count == 2, let tag = components.last else {
738 | throw ValidationError("Invalid image format. Expected format: name:tag")
739 | }
740 |
741 | let imageName = String(components.first!)
742 | let imageTag = String(tag)
743 |
744 | Logger.info(
745 | "Pulling image",
746 | metadata: [
747 | "image": image,
748 | "name": vmName,
749 | "location": locationName ?? "default",
750 | "registry": registry,
751 | "organization": organization,
752 | ])
753 |
754 | // Get anonymous token
755 | Logger.info("Getting registry authentication token")
756 | let token = try await getToken(
757 | repository: "\(self.organization)/\(imageName)", scopes: ["pull"])
758 |
759 | // Fetch manifest
760 | Logger.info("Fetching Image manifest")
761 | let (manifest, manifestDigest): (Manifest, String) = try await fetchManifest(
762 | repository: "\(self.organization)/\(imageName)",
763 | tag: imageTag,
764 | token: token
765 | )
766 |
767 | // Get manifest identifier using the manifest's own digest
768 | let manifestId = getManifestIdentifier(manifest, manifestDigest: manifestDigest)
769 |
770 | Logger.info(
771 | "Pulling image",
772 | metadata: [
773 | "repository": imageName,
774 | "manifest_id": manifestId,
775 | ])
776 |
777 | // Create temporary directory for the entire VM setup
778 | let tempVMDir = FileManager.default.temporaryDirectory.appendingPathComponent(
779 | "lume_vm_\(UUID().uuidString)")
780 | try FileManager.default.createDirectory(at: tempVMDir, withIntermediateDirectories: true)
781 | defer {
782 | try? FileManager.default.removeItem(at: tempVMDir)
783 | }
784 |
785 | // Check if caching is enabled and if we have a valid cached version
786 | Logger.info("Caching enabled: \(cachingEnabled)")
787 | if cachingEnabled && validateCache(manifest: manifest, manifestId: manifestId) {
788 | Logger.info("Using cached version of image")
789 | try await copyFromCache(manifest: manifest, manifestId: manifestId, to: tempVMDir)
790 | } else {
791 | // If caching is disabled, log it
792 | if !cachingEnabled {
793 | Logger.info("Caching is disabled, downloading fresh copy")
794 | } else {
795 | Logger.info("Cache miss or invalid cache, setting up new cache")
796 | }
797 |
798 | // Clean up old versions of this repository before setting up new cache if caching is enabled
799 | if cachingEnabled {
800 | try cleanupOldVersions(currentManifestId: manifestId, image: imageName)
801 |
802 | // Setup new cache directory
803 | try setupImageCache(manifestId: manifestId)
804 | // Save new manifest
805 | try saveManifest(manifest, manifestId: manifestId)
806 |
807 | // Save image metadata
808 | try saveImageMetadata(
809 | image: imageName,
810 | manifestId: manifestId
811 | )
812 | }
813 |
814 | // Create temporary directory for new downloads
815 | let tempDownloadDir = FileManager.default.temporaryDirectory.appendingPathComponent(
816 | UUID().uuidString)
817 | try FileManager.default.createDirectory(
818 | at: tempDownloadDir, withIntermediateDirectories: true)
819 | defer {
820 | try? FileManager.default.removeItem(at: tempDownloadDir)
821 | }
822 |
823 | // Set total size and file count
824 | let totalFiles = manifest.layers.filter {
825 | $0.mediaType != "application/vnd.oci.empty.v1+json"
826 | }.count
827 | let totalSize = manifest.layers.reduce(0) { $0 + Int64($1.size) }
828 | await downloadProgress.setTotal(totalSize, files: totalFiles)
829 |
830 | // Process layers with limited concurrency
831 | Logger.info("Processing Image layers")
832 | Logger.info(
833 | "This may take several minutes depending on the image size and your internet connection. Please wait..."
834 | )
835 |
836 | // Add immediate progress indicator before starting downloads
837 | print(
838 | "[░░░░░░░░░░░░░░░░░░░░] 0% | Initializing downloads... | ETA: calculating... ")
839 | fflush(stdout)
840 |
841 | // Instantiate the collector
842 | let diskPartsCollector = DiskPartsCollector()
843 |
844 | // Adaptive concurrency based on system capabilities
845 | let memoryConstrained = determineIfMemoryConstrained()
846 | let networkQuality = determineNetworkQuality()
847 | let maxConcurrentTasks = calculateOptimalConcurrency(
848 | memoryConstrained: memoryConstrained, networkQuality: networkQuality)
849 |
850 | Logger.info(
851 | "Using adaptive download configuration: Concurrency=\(maxConcurrentTasks), Memory-optimized=\(memoryConstrained)"
852 | )
853 |
854 | let counter = TaskCounter()
855 | var lz4LayerCount = 0 // Count lz4 layers found
856 |
857 | try await withThrowingTaskGroup(of: Int64.self) { group in
858 | for layer in manifest.layers {
859 | if layer.mediaType == "application/vnd.oci.empty.v1+json" {
860 | continue
861 | }
862 |
863 | while await counter.current() >= maxConcurrentTasks {
864 | _ = try await group.next()
865 | await counter.decrement()
866 | }
867 |
868 | // Identify disk parts by media type
869 | if layer.mediaType == "application/octet-stream+lz4" {
870 | // --- Handle LZ4 Disk Part Layer ---
871 | lz4LayerCount += 1 // Increment count
872 | let currentPartNum = lz4LayerCount // Use the current count as the logical number for logging
873 |
874 | let cachedLayer = getCachedLayerPath(
875 | manifestId: manifestId, digest: layer.digest)
876 | let digest = layer.digest
877 | let size = layer.size
878 |
879 | if memoryConstrained
880 | && FileManager.default.fileExists(atPath: cachedLayer.path)
881 | {
882 | // Add to collector, get sequential number assigned by collector
883 | let collectorPartNum = await diskPartsCollector.addPart(
884 | url: cachedLayer)
885 | // Log using the sequential number from collector for clarity if needed, or the lz4LayerCount
886 | Logger.info(
887 | "Using cached lz4 layer (part \(currentPartNum)) directly: \(cachedLayer.lastPathComponent) -> Collector #\(collectorPartNum)"
888 | )
889 | await downloadProgress.addProgress(Int64(size))
890 | continue
891 | } else {
892 | // Download/Copy Path (Task Group)
893 | group.addTask { [self] in
894 | await counter.increment()
895 | let finalPath: URL
896 | if FileManager.default.fileExists(atPath: cachedLayer.path) {
897 | let tempPartURL = tempDownloadDir.appendingPathComponent(
898 | "disk.img.part.\(UUID().uuidString)")
899 | try FileManager.default.copyItem(
900 | at: cachedLayer, to: tempPartURL)
901 | await downloadProgress.addProgress(Int64(size))
902 | finalPath = tempPartURL
903 | } else {
904 | let tempPartURL = tempDownloadDir.appendingPathComponent(
905 | "disk.img.part.\(UUID().uuidString)")
906 | if isDownloading(digest) {
907 | try await waitForExistingDownload(
908 | digest, cachedLayer: cachedLayer)
909 | if FileManager.default.fileExists(atPath: cachedLayer.path)
910 | {
911 | try FileManager.default.copyItem(
912 | at: cachedLayer, to: tempPartURL)
913 | await downloadProgress.addProgress(Int64(size))
914 | finalPath = tempPartURL
915 | } else {
916 | markDownloadStarted(digest)
917 | try await self.downloadLayer(
918 | repository: "\(self.organization)/\(imageName)",
919 | digest: digest, mediaType: layer.mediaType,
920 | token: token,
921 | to: tempPartURL, maxRetries: 5,
922 | progress: downloadProgress, manifestId: manifestId
923 | )
924 | finalPath = tempPartURL
925 | }
926 | } else {
927 | markDownloadStarted(digest)
928 | try await self.downloadLayer(
929 | repository: "\(self.organization)/\(imageName)",
930 | digest: digest, mediaType: layer.mediaType,
931 | token: token,
932 | to: tempPartURL, maxRetries: 5,
933 | progress: downloadProgress, manifestId: manifestId
934 | )
935 | finalPath = tempPartURL
936 | }
937 | }
938 | // Add to collector, get sequential number assigned by collector
939 | let collectorPartNum = await diskPartsCollector.addPart(
940 | url: finalPath)
941 | // Log using the sequential number from collector
942 | Logger.info(
943 | "Assigned path for lz4 layer (part \(currentPartNum)): \(finalPath.lastPathComponent) -> Collector #\(collectorPartNum)"
944 | )
945 | await counter.decrement()
946 | return Int64(size)
947 | }
948 | }
949 | } else {
950 | // --- Handle Non-Disk-Part Layer ---
951 | let mediaType = layer.mediaType
952 | let digest = layer.digest
953 | let size = layer.size
954 |
955 | // Determine output path based on media type
956 | let outputURL: URL
957 | switch mediaType {
958 | case "application/vnd.oci.image.layer.v1.tar",
959 | "application/octet-stream+gzip": // Might be compressed disk.img single file?
960 | outputURL = tempDownloadDir.appendingPathComponent("disk.img")
961 | case "application/vnd.oci.image.config.v1+json":
962 | outputURL = tempDownloadDir.appendingPathComponent("config.json")
963 | case "application/octet-stream": // Could be nvram or uncompressed single disk.img
964 | // Heuristic: If a config.json already exists or is expected, assume this is nvram.
965 | // This might need refinement if single disk images use octet-stream.
966 | if manifest.config != nil {
967 | outputURL = tempDownloadDir.appendingPathComponent("nvram.bin")
968 | } else {
969 | // Assume it's a single-file disk image if no config layer is present
970 | outputURL = tempDownloadDir.appendingPathComponent("disk.img")
971 | }
972 | default:
973 | Logger.info("Skipping unsupported layer media type: \(mediaType)")
974 | continue // Skip to the next layer
975 | }
976 |
977 | // Add task to download/copy the non-disk-part layer
978 | group.addTask { [self] in
979 | await counter.increment()
980 | let cachedLayer = getCachedLayerPath(
981 | manifestId: manifestId, digest: digest)
982 |
983 | if FileManager.default.fileExists(atPath: cachedLayer.path) {
984 | try FileManager.default.copyItem(at: cachedLayer, to: outputURL)
985 | await downloadProgress.addProgress(Int64(size))
986 | } else {
987 | if isDownloading(digest) {
988 | try await waitForExistingDownload(
989 | digest, cachedLayer: cachedLayer)
990 | if FileManager.default.fileExists(atPath: cachedLayer.path) {
991 | try FileManager.default.copyItem(
992 | at: cachedLayer, to: outputURL)
993 | await downloadProgress.addProgress(Int64(size))
994 | await counter.decrement() // Decrement before returning
995 | return Int64(size)
996 | }
997 | }
998 |
999 | markDownloadStarted(digest)
1000 | try await self.downloadLayer(
1001 | repository: "\(self.organization)/\(imageName)",
1002 | digest: digest, mediaType: mediaType, token: token,
1003 | to: outputURL, maxRetries: 5,
1004 | progress: downloadProgress, manifestId: manifestId
1005 | )
1006 | // Note: downloadLayer handles caching and marking download complete
1007 | }
1008 | await counter.decrement()
1009 | return Int64(size)
1010 | }
1011 | }
1012 | } // End for layer in manifest.layers
1013 |
1014 | // Wait for remaining tasks
1015 | for try await _ in group {}
1016 | } // End TaskGroup
1017 |
1018 | // Display download statistics
1019 | let stats = await downloadProgress.getDownloadStats()
1020 | Logger.info("") // New line after progress
1021 | Logger.info(stats.formattedSummary())
1022 |
1023 | // Now that we've downloaded everything to the cache, use copyFromCache to create final VM files
1024 | if cachingEnabled {
1025 | Logger.info("Using copyFromCache method to properly preserve partition tables")
1026 | try await copyFromCache(manifest: manifest, manifestId: manifestId, to: tempVMDir)
1027 | } else {
1028 | // Even if caching is disabled, we need to use copyFromCache to assemble the disk image
1029 | // correctly with partition tables, then we'll clean up the cache afterward
1030 | Logger.info("Caching disabled - using temporary cache to assemble VM files")
1031 | try await copyFromCache(manifest: manifest, manifestId: manifestId, to: tempVMDir)
1032 | }
1033 | }
1034 |
1035 | // Only move to final location once everything is complete
1036 | if FileManager.default.fileExists(atPath: vmDir.dir.path) {
1037 | try FileManager.default.removeItem(at: URL(fileURLWithPath: vmDir.dir.path))
1038 | }
1039 |
1040 | // Ensure parent directory exists
1041 | try FileManager.default.createDirectory(
1042 | at: URL(fileURLWithPath: vmDir.dir.path).deletingLastPathComponent(),
1043 | withIntermediateDirectories: true)
1044 |
1045 | // Log the final destination
1046 | Logger.info(
1047 | "Moving files to VM directory",
1048 | metadata: [
1049 | "destination": vmDir.dir.path,
1050 | "location": locationName ?? "default",
1051 | ])
1052 |
1053 | // Move files to final location
1054 | try FileManager.default.moveItem(at: tempVMDir, to: URL(fileURLWithPath: vmDir.dir.path))
1055 |
1056 | // If caching is disabled, clean up the cache entry
1057 | if !cachingEnabled {
1058 | Logger.info("Caching disabled - cleaning up temporary cache entry")
1059 | try? cleanupCacheEntry(manifestId: manifestId)
1060 | }
1061 |
1062 | Logger.info("Download complete: Files extracted to \(vmDir.dir.path)")
1063 | Logger.info(
1064 | "Note: Actual disk usage is significantly lower than reported size due to macOS sparse file system"
1065 | )
1066 | Logger.info(
1067 | "Run 'lume run \(vmName)' to reduce the disk image file size by using macOS sparse file system"
1068 | )
1069 | return vmDir
1070 | }
1071 |
1072 | // Helper function to clean up a specific cache entry
1073 | private func cleanupCacheEntry(manifestId: String) throws {
1074 | let cacheDir = getImageCacheDirectory(manifestId: manifestId)
1075 |
1076 | if FileManager.default.fileExists(atPath: cacheDir.path) {
1077 | Logger.info("Removing cache entry for manifest ID: \(manifestId)")
1078 | try FileManager.default.removeItem(at: cacheDir)
1079 | }
1080 | }
1081 |
1082 | // Shared function to handle disk image creation - can be used by both cache hit and cache miss paths
1083 | private func createDiskImageFromSource(
1084 | sourceURL: URL, // Source data to decompress
1085 | destinationURL: URL, // Where to create the disk image
1086 | diskSize: UInt64 // Total size for the sparse file
1087 | ) throws {
1088 | Logger.info("Creating sparse disk image...")
1089 |
1090 | // Create empty destination file
1091 | if FileManager.default.fileExists(atPath: destinationURL.path) {
1092 | try FileManager.default.removeItem(at: destinationURL)
1093 | }
1094 | guard FileManager.default.createFile(atPath: destinationURL.path, contents: nil) else {
1095 | throw PullError.fileCreationFailed(destinationURL.path)
1096 | }
1097 |
1098 | // Create sparse file
1099 | let outputHandle = try FileHandle(forWritingTo: destinationURL)
1100 | try outputHandle.truncate(atOffset: diskSize)
1101 |
1102 | // Write test patterns at beginning and end
1103 | Logger.info("Writing test patterns to verify writability...")
1104 | let testPattern = "LUME_TEST_PATTERN".data(using: .utf8)!
1105 | try outputHandle.seek(toOffset: 0)
1106 | try outputHandle.write(contentsOf: testPattern)
1107 | try outputHandle.seek(toOffset: diskSize - UInt64(testPattern.count))
1108 | try outputHandle.write(contentsOf: testPattern)
1109 | try outputHandle.synchronize()
1110 |
1111 | // Decompress the source data at offset 0
1112 | Logger.info("Decompressing source data...")
1113 | let bytesWritten = try decompressChunkAndWriteSparse(
1114 | inputPath: sourceURL.path,
1115 | outputHandle: outputHandle,
1116 | startOffset: 0
1117 | )
1118 | Logger.info(
1119 | "Decompressed \(ByteCountFormatter.string(fromByteCount: Int64(bytesWritten), countStyle: .file)) of data"
1120 | )
1121 |
1122 | // Ensure data is written and close handle
1123 | try outputHandle.synchronize()
1124 | try outputHandle.close()
1125 |
1126 | // Run sync to flush filesystem
1127 | let syncProcess = Process()
1128 | syncProcess.executableURL = URL(fileURLWithPath: "/bin/sync")
1129 | try syncProcess.run()
1130 | syncProcess.waitUntilExit()
1131 |
1132 | // Optimize with cp -c
1133 | if FileManager.default.fileExists(atPath: "/bin/cp") {
1134 | Logger.info("Optimizing sparse file representation...")
1135 | let optimizedPath = destinationURL.path + ".optimized"
1136 |
1137 | let process = Process()
1138 | process.executableURL = URL(fileURLWithPath: "/bin/cp")
1139 | process.arguments = ["-c", destinationURL.path, optimizedPath]
1140 |
1141 | try process.run()
1142 | process.waitUntilExit()
1143 |
1144 | if process.terminationStatus == 0 {
1145 | // Get optimization results
1146 | let optimizedSize =
1147 | (try? FileManager.default.attributesOfItem(atPath: optimizedPath)[.size]
1148 | as? UInt64) ?? 0
1149 | let originalUsage = getActualDiskUsage(path: destinationURL.path)
1150 | let optimizedUsage = getActualDiskUsage(path: optimizedPath)
1151 |
1152 | Logger.info(
1153 | "Sparse optimization results: Before: \(ByteCountFormatter.string(fromByteCount: Int64(originalUsage), countStyle: .file)) actual usage, After: \(ByteCountFormatter.string(fromByteCount: Int64(optimizedUsage), countStyle: .file)) actual usage (Apparent size: \(ByteCountFormatter.string(fromByteCount: Int64(optimizedSize), countStyle: .file)))"
1154 | )
1155 |
1156 | // Replace original with optimized
1157 | try FileManager.default.removeItem(at: destinationURL)
1158 | try FileManager.default.moveItem(
1159 | at: URL(fileURLWithPath: optimizedPath), to: destinationURL)
1160 | Logger.info("Replaced with optimized sparse version")
1161 | } else {
1162 | Logger.info("Sparse optimization failed, using original file")
1163 | try? FileManager.default.removeItem(atPath: optimizedPath)
1164 | }
1165 | }
1166 |
1167 | // Set permissions to 0644
1168 | let chmodProcess = Process()
1169 | chmodProcess.executableURL = URL(fileURLWithPath: "/bin/chmod")
1170 | chmodProcess.arguments = ["0644", destinationURL.path]
1171 | try chmodProcess.run()
1172 | chmodProcess.waitUntilExit()
1173 |
1174 | // Final sync
1175 | let finalSyncProcess = Process()
1176 | finalSyncProcess.executableURL = URL(fileURLWithPath: "/bin/sync")
1177 | try finalSyncProcess.run()
1178 | finalSyncProcess.waitUntilExit()
1179 | }
1180 |
1181 | // Function to simulate cache pull behavior for freshly downloaded images
1182 | private func simulateCachePull(tempVMDir: URL) throws {
1183 | Logger.info("Simulating cache pull behavior for freshly downloaded image...")
1184 |
1185 | // Find disk.img in tempVMDir
1186 | let diskImgPath = tempVMDir.appendingPathComponent("disk.img")
1187 | guard FileManager.default.fileExists(atPath: diskImgPath.path) else {
1188 | Logger.info("No disk.img found to simulate cache pull behavior")
1189 | return
1190 | }
1191 |
1192 | // Get file attributes and size
1193 | let attributes = try FileManager.default.attributesOfItem(atPath: diskImgPath.path)
1194 | guard let diskSize = attributes[.size] as? UInt64, diskSize > 0 else {
1195 | Logger.error("Could not determine disk.img size for simulation")
1196 | return
1197 | }
1198 |
1199 | Logger.info("Creating true disk image clone with partition table preserved...")
1200 |
1201 | // Create backup of original file
1202 | let backupPath = tempVMDir.appendingPathComponent("disk.img.original")
1203 | try FileManager.default.moveItem(at: diskImgPath, to: backupPath)
1204 |
1205 | // Let's first check if the original image has a partition table
1206 | Logger.info("Checking if source image has a partition table...")
1207 | let checkProcess = Process()
1208 | checkProcess.executableURL = URL(fileURLWithPath: "/usr/bin/hdiutil")
1209 | checkProcess.arguments = ["imageinfo", backupPath.path]
1210 |
1211 | let checkPipe = Pipe()
1212 | checkProcess.standardOutput = checkPipe
1213 |
1214 | try checkProcess.run()
1215 | checkProcess.waitUntilExit()
1216 |
1217 | let checkData = checkPipe.fileHandleForReading.readDataToEndOfFile()
1218 | let checkOutput = String(data: checkData, encoding: .utf8) ?? ""
1219 | Logger.info("Source image info: \(checkOutput)")
1220 |
1221 | // Try different methods in sequence until one works
1222 | var success = false
1223 |
1224 | // Method 1: Use hdiutil convert to convert the image while preserving all data
1225 | if !success {
1226 | Logger.info("Trying hdiutil convert...")
1227 | let tempPath = tempVMDir.appendingPathComponent("disk.img.temp")
1228 |
1229 | let convertProcess = Process()
1230 | convertProcess.executableURL = URL(fileURLWithPath: "/usr/bin/hdiutil")
1231 | convertProcess.arguments = [
1232 | "convert",
1233 | backupPath.path,
1234 | "-format", "UDRO", // Read-only first to preserve partition table
1235 | "-o", tempPath.path,
1236 | ]
1237 |
1238 | let convertOutPipe = Pipe()
1239 | let convertErrPipe = Pipe()
1240 | convertProcess.standardOutput = convertOutPipe
1241 | convertProcess.standardError = convertErrPipe
1242 |
1243 | do {
1244 | try convertProcess.run()
1245 | convertProcess.waitUntilExit()
1246 |
1247 | let errData = convertErrPipe.fileHandleForReading.readDataToEndOfFile()
1248 | let errOutput = String(data: errData, encoding: .utf8) ?? ""
1249 |
1250 | if convertProcess.terminationStatus == 0 {
1251 | Logger.info("hdiutil convert succeeded. Converting to writable format...")
1252 | // Now convert to writable format
1253 | let convertBackProcess = Process()
1254 | convertBackProcess.executableURL = URL(fileURLWithPath: "/usr/bin/hdiutil")
1255 | convertBackProcess.arguments = [
1256 | "convert",
1257 | tempPath.path,
1258 | "-format", "UDRW", // Read-write format
1259 | "-o", diskImgPath.path,
1260 | ]
1261 |
1262 | try convertBackProcess.run()
1263 | convertBackProcess.waitUntilExit()
1264 |
1265 | if convertBackProcess.terminationStatus == 0 {
1266 | Logger.info(
1267 | "Successfully converted to writable format with partition table")
1268 | success = true
1269 | } else {
1270 | Logger.error("hdiutil convert to writable format failed")
1271 | }
1272 |
1273 | // Clean up temporary image
1274 | try? FileManager.default.removeItem(at: tempPath)
1275 | } else {
1276 | Logger.error("hdiutil convert failed: \(errOutput)")
1277 | }
1278 | } catch {
1279 | Logger.error("Error executing hdiutil convert: \(error)")
1280 | }
1281 | }
1282 |
1283 | // Method 2: Try direct raw copy method
1284 | if !success {
1285 | Logger.info("Trying direct raw copy with dd...")
1286 |
1287 | // Create empty file first
1288 | FileManager.default.createFile(atPath: diskImgPath.path, contents: nil)
1289 |
1290 | let ddProcess = Process()
1291 | ddProcess.executableURL = URL(fileURLWithPath: "/bin/dd")
1292 | ddProcess.arguments = [
1293 | "if=\(backupPath.path)",
1294 | "of=\(diskImgPath.path)",
1295 | "bs=1m", // Large block size
1296 | "count=81920", // Ensure we copy everything (80GB+ should be sufficient)
1297 | ]
1298 |
1299 | let ddErrPipe = Pipe()
1300 | ddProcess.standardError = ddErrPipe
1301 |
1302 | do {
1303 | try ddProcess.run()
1304 | ddProcess.waitUntilExit()
1305 |
1306 | let errData = ddErrPipe.fileHandleForReading.readDataToEndOfFile()
1307 | let errOutput = String(data: errData, encoding: .utf8) ?? ""
1308 |
1309 | if ddProcess.terminationStatus == 0 {
1310 | Logger.info("Raw dd copy completed: \(errOutput)")
1311 | success = true
1312 | } else {
1313 | Logger.error("Raw dd copy failed: \(errOutput)")
1314 | }
1315 | } catch {
1316 | Logger.error("Error executing dd: \(error)")
1317 | }
1318 | }
1319 |
1320 | // Method 3: Use a more complex approach with disk mounting
1321 | if !success {
1322 | Logger.info("Trying advanced disk attach/detach approach...")
1323 |
1324 | // Mount the source disk image
1325 | let attachProcess = Process()
1326 | attachProcess.executableURL = URL(fileURLWithPath: "/usr/bin/hdiutil")
1327 | attachProcess.arguments = ["attach", backupPath.path, "-nomount"]
1328 |
1329 | let attachPipe = Pipe()
1330 | attachProcess.standardOutput = attachPipe
1331 |
1332 | try attachProcess.run()
1333 | attachProcess.waitUntilExit()
1334 |
1335 | let attachData = attachPipe.fileHandleForReading.readDataToEndOfFile()
1336 | let attachOutput = String(data: attachData, encoding: .utf8) ?? ""
1337 |
1338 | // Extract the disk device from output (/dev/diskN)
1339 | var diskDevice: String? = nil
1340 | if let diskMatch = attachOutput.range(
1341 | of: "/dev/disk[0-9]+", options: .regularExpression)
1342 | {
1343 | diskDevice = String(attachOutput[diskMatch])
1344 | }
1345 |
1346 | if let device = diskDevice {
1347 | Logger.info("Source disk attached at \(device)")
1348 |
1349 | // Create a bootable disk image clone
1350 | let createProcess = Process()
1351 | createProcess.executableURL = URL(fileURLWithPath: "/usr/sbin/asr")
1352 | createProcess.arguments = [
1353 | "restore",
1354 | "--source", device,
1355 | "--target", diskImgPath.path,
1356 | "--erase",
1357 | "--noprompt",
1358 | ]
1359 |
1360 | let createPipe = Pipe()
1361 | createProcess.standardOutput = createPipe
1362 |
1363 | do {
1364 | try createProcess.run()
1365 | createProcess.waitUntilExit()
1366 |
1367 | let createOutput =
1368 | String(
1369 | data: createPipe.fileHandleForReading.readDataToEndOfFile(),
1370 | encoding: .utf8) ?? ""
1371 | Logger.info("asr output: \(createOutput)")
1372 |
1373 | if createProcess.terminationStatus == 0 {
1374 | Logger.info("Successfully created bootable disk image clone!")
1375 | success = true
1376 | } else {
1377 | Logger.error("Failed to create bootable disk image clone")
1378 | }
1379 | } catch {
1380 | Logger.error("Error executing asr: \(error)")
1381 | }
1382 |
1383 | // Always detach the disk when done
1384 | let detachProcess = Process()
1385 | detachProcess.executableURL = URL(fileURLWithPath: "/usr/bin/hdiutil")
1386 | detachProcess.arguments = ["detach", device]
1387 | try? detachProcess.run()
1388 | detachProcess.waitUntilExit()
1389 | } else {
1390 | Logger.error("Failed to extract disk device from hdiutil attach output")
1391 | }
1392 | }
1393 |
1394 | // Fallback: If none of the methods worked, revert to our previous method just to ensure we have a usable image
1395 | if !success {
1396 | Logger.info("All specialized methods failed. Reverting to basic copy...")
1397 |
1398 | // If the disk image file exists (from a failed attempt), remove it
1399 | if FileManager.default.fileExists(atPath: diskImgPath.path) {
1400 | try FileManager.default.removeItem(at: diskImgPath)
1401 | }
1402 |
1403 | // Attempt a basic file copy which will at least give us something to work with
1404 | try FileManager.default.copyItem(at: backupPath, to: diskImgPath)
1405 | }
1406 |
1407 | // Optimize sparseness if possible
1408 | if FileManager.default.fileExists(atPath: "/bin/cp") {
1409 | Logger.info("Optimizing sparse file representation...")
1410 | let optimizedPath = diskImgPath.path + ".optimized"
1411 |
1412 | let process = Process()
1413 | process.executableURL = URL(fileURLWithPath: "/bin/cp")
1414 | process.arguments = ["-c", diskImgPath.path, optimizedPath]
1415 |
1416 | try process.run()
1417 | process.waitUntilExit()
1418 |
1419 | if process.terminationStatus == 0 {
1420 | let optimizedSize =
1421 | (try? FileManager.default.attributesOfItem(atPath: optimizedPath)[.size]
1422 | as? UInt64) ?? 0
1423 | let originalUsage = getActualDiskUsage(path: diskImgPath.path)
1424 | let optimizedUsage = getActualDiskUsage(path: optimizedPath)
1425 |
1426 | Logger.info(
1427 | "Sparse optimization results: Before: \(ByteCountFormatter.string(fromByteCount: Int64(originalUsage), countStyle: .file)) actual usage, After: \(ByteCountFormatter.string(fromByteCount: Int64(optimizedUsage), countStyle: .file)) actual usage (Apparent size: \(ByteCountFormatter.string(fromByteCount: Int64(optimizedSize), countStyle: .file)))"
1428 | )
1429 |
1430 | // Replace with optimized version
1431 | try FileManager.default.removeItem(at: diskImgPath)
1432 | try FileManager.default.moveItem(
1433 | at: URL(fileURLWithPath: optimizedPath), to: diskImgPath)
1434 | Logger.info("Replaced with optimized sparse version")
1435 | } else {
1436 | Logger.info("Sparse optimization failed, using original file")
1437 | try? FileManager.default.removeItem(atPath: optimizedPath)
1438 | }
1439 | }
1440 |
1441 | // Set permissions to 0644
1442 | let chmodProcess = Process()
1443 | chmodProcess.executableURL = URL(fileURLWithPath: "/bin/chmod")
1444 | chmodProcess.arguments = ["0644", diskImgPath.path]
1445 | try chmodProcess.run()
1446 | chmodProcess.waitUntilExit()
1447 |
1448 | // Final sync
1449 | let finalSyncProcess = Process()
1450 | finalSyncProcess.executableURL = URL(fileURLWithPath: "/bin/sync")
1451 | try finalSyncProcess.run()
1452 | finalSyncProcess.waitUntilExit()
1453 |
1454 | // Verify the final disk image
1455 | Logger.info("Verifying final disk image partition information...")
1456 | let verifyProcess = Process()
1457 | verifyProcess.executableURL = URL(fileURLWithPath: "/usr/bin/hdiutil")
1458 | verifyProcess.arguments = ["imageinfo", diskImgPath.path]
1459 |
1460 | let verifyOutputPipe = Pipe()
1461 | verifyProcess.standardOutput = verifyOutputPipe
1462 |
1463 | try verifyProcess.run()
1464 | verifyProcess.waitUntilExit()
1465 |
1466 | let verifyOutputData = verifyOutputPipe.fileHandleForReading.readDataToEndOfFile()
1467 | let verifyOutput = String(data: verifyOutputData, encoding: .utf8) ?? ""
1468 | Logger.info("Final disk image verification:\n\(verifyOutput)")
1469 |
1470 | // Clean up backup file
1471 | try FileManager.default.removeItem(at: backupPath)
1472 |
1473 | Logger.info(
1474 | "Cache pull simulation completed successfully with partition table preservation")
1475 | }
1476 |
1477 | private func copyFromCache(manifest: Manifest, manifestId: String, to destination: URL)
1478 | async throws
1479 | {
1480 | Logger.info("Copying from cache...")
1481 |
1482 | // Define output URL and expected size variable scope here
1483 | let outputURL = destination.appendingPathComponent("disk.img")
1484 | var expectedTotalSize: UInt64? = nil // Use optional to handle missing config
1485 |
1486 | // Define the path for the reassembled cache image
1487 | let cacheDir = getImageCacheDirectory(manifestId: manifestId)
1488 | let reassembledCachePath = cacheDir.appendingPathComponent("disk.img.reassembled")
1489 | let nvramCachePath = cacheDir.appendingPathComponent("nvram.bin")
1490 |
1491 | // First check if we already have a reassembled image in the cache
1492 | if FileManager.default.fileExists(atPath: reassembledCachePath.path) {
1493 | Logger.info("Found reassembled disk image in cache, using it directly")
1494 |
1495 | // Copy reassembled disk image
1496 | try FileManager.default.copyItem(at: reassembledCachePath, to: outputURL)
1497 |
1498 | // Copy nvram if it exists
1499 | if FileManager.default.fileExists(atPath: nvramCachePath.path) {
1500 | try FileManager.default.copyItem(
1501 | at: nvramCachePath,
1502 | to: destination.appendingPathComponent("nvram.bin")
1503 | )
1504 | Logger.info("Using cached nvram.bin file")
1505 | } else {
1506 | // Look for nvram in layer cache if needed
1507 | let nvramLayers = manifest.layers.filter {
1508 | $0.mediaType == "application/octet-stream"
1509 | }
1510 | if let nvramLayer = nvramLayers.first {
1511 | let cachedNvram = getCachedLayerPath(
1512 | manifestId: manifestId, digest: nvramLayer.digest)
1513 | if FileManager.default.fileExists(atPath: cachedNvram.path) {
1514 | try FileManager.default.copyItem(
1515 | at: cachedNvram,
1516 | to: destination.appendingPathComponent("nvram.bin")
1517 | )
1518 | // Also save it to the dedicated nvram location for future use
1519 | try FileManager.default.copyItem(at: cachedNvram, to: nvramCachePath)
1520 | Logger.info("Recovered nvram.bin from layer cache")
1521 | }
1522 | }
1523 | }
1524 |
1525 | // Copy config if it exists
1526 | let configCachePath = cacheDir.appendingPathComponent("config.json")
1527 | if FileManager.default.fileExists(atPath: configCachePath.path) {
1528 | try FileManager.default.copyItem(
1529 | at: configCachePath,
1530 | to: destination.appendingPathComponent("config.json")
1531 | )
1532 | Logger.info("Using cached config.json file")
1533 | } else {
1534 | // Look for config in layer cache if needed
1535 | let configLayers = manifest.layers.filter {
1536 | $0.mediaType == "application/vnd.oci.image.config.v1+json"
1537 | }
1538 | if let configLayer = configLayers.first {
1539 | let cachedConfig = getCachedLayerPath(
1540 | manifestId: manifestId, digest: configLayer.digest)
1541 | if FileManager.default.fileExists(atPath: cachedConfig.path) {
1542 | try FileManager.default.copyItem(
1543 | at: cachedConfig,
1544 | to: destination.appendingPathComponent("config.json")
1545 | )
1546 | // Also save it to the dedicated config location for future use
1547 | try FileManager.default.copyItem(at: cachedConfig, to: configCachePath)
1548 | Logger.info("Recovered config.json from layer cache")
1549 | }
1550 | }
1551 | }
1552 |
1553 | Logger.info("Cache copy complete using reassembled image")
1554 | return
1555 | }
1556 |
1557 | // If we don't have a reassembled image, proceed with legacy part handling
1558 | Logger.info("No reassembled image found, using part-based reassembly")
1559 |
1560 | // Instantiate collector
1561 | let diskPartsCollector = DiskPartsCollector()
1562 | var lz4LayerCount = 0 // Count lz4 layers found
1563 | var hasNvram = false
1564 | var configPath: URL? = nil
1565 |
1566 | // First identify disk parts and non-disk files
1567 | for layer in manifest.layers {
1568 | let cachedLayer = getCachedLayerPath(manifestId: manifestId, digest: layer.digest)
1569 |
1570 | // Identify disk parts simply by media type
1571 | if layer.mediaType == "application/octet-stream+lz4" {
1572 | lz4LayerCount += 1 // Increment count
1573 |
1574 | // When caching is disabled, the file might not exist with the cache path name
1575 | // Check if the file exists before trying to use it
1576 | if !FileManager.default.fileExists(atPath: cachedLayer.path) {
1577 | Logger.info("Layer file not found in cache: \(cachedLayer.path) - skipping")
1578 | continue
1579 | }
1580 |
1581 | // Add to collector. It will assign the sequential part number.
1582 | let collectorPartNum = await diskPartsCollector.addPart(url: cachedLayer)
1583 | Logger.info(
1584 | "Adding cached lz4 layer (part \(lz4LayerCount)) -> Collector #\(collectorPartNum): \(cachedLayer.lastPathComponent)"
1585 | )
1586 | } else {
1587 | // --- Handle Non-Disk-Part Layer (from cache) ---
1588 | let fileName: String
1589 | switch layer.mediaType {
1590 | case "application/vnd.oci.image.config.v1+json":
1591 | fileName = "config.json"
1592 | configPath = cachedLayer
1593 | case "application/octet-stream":
1594 | // Assume nvram if config layer exists, otherwise assume single disk image
1595 | fileName = manifest.config != nil ? "nvram.bin" : "disk.img"
1596 | if fileName == "nvram.bin" {
1597 | hasNvram = true
1598 | }
1599 | case "application/vnd.oci.image.layer.v1.tar",
1600 | "application/octet-stream+gzip":
1601 | // Assume disk image for these types as well if encountered in cache scenario
1602 | fileName = "disk.img"
1603 | default:
1604 | Logger.info("Skipping unsupported cached layer media type: \(layer.mediaType)")
1605 | continue
1606 | }
1607 |
1608 | // When caching is disabled, the file might not exist with the cache path name
1609 | if !FileManager.default.fileExists(atPath: cachedLayer.path) {
1610 | Logger.info(
1611 | "Non-disk layer file not found in cache: \(cachedLayer.path) - skipping")
1612 | continue
1613 | }
1614 |
1615 | // Copy the non-disk file directly from cache to destination
1616 | try FileManager.default.copyItem(
1617 | at: cachedLayer,
1618 | to: destination.appendingPathComponent(fileName)
1619 | )
1620 | }
1621 | }
1622 |
1623 | // --- Safely retrieve parts AFTER loop ---
1624 | let diskPartSources = await diskPartsCollector.getSortedParts() // Sorted by assigned sequential number
1625 | let totalParts = await diskPartsCollector.getTotalParts() // Get total count from collector
1626 |
1627 | Logger.info("Found \(totalParts) lz4 disk parts in cache to reassemble.")
1628 | // --- End retrieving parts ---
1629 |
1630 | // Reassemble disk parts if needed
1631 | // Use the count from the collector
1632 | if !diskPartSources.isEmpty {
1633 | // Use totalParts from collector directly
1634 | Logger.info(
1635 | "Reassembling \(totalParts) disk image parts using sparse file technique...")
1636 |
1637 | // Get uncompressed size from cached config file (needs to be copied first)
1638 | let configURL = destination.appendingPathComponent("config.json")
1639 | // Parse config.json to get uncompressed size *before* reassembly
1640 | let uncompressedSize = getUncompressedSizeFromConfig(configPath: configURL)
1641 |
1642 | // Now also try to get disk size from VM config if OCI annotation not found
1643 | var vmConfigDiskSize: UInt64? = nil
1644 | if uncompressedSize == nil && FileManager.default.fileExists(atPath: configURL.path) {
1645 | do {
1646 | let configData = try Data(contentsOf: configURL)
1647 | let decoder = JSONDecoder()
1648 | if let vmConfig = try? decoder.decode(VMConfig.self, from: configData) {
1649 | vmConfigDiskSize = vmConfig.diskSize
1650 | if let size = vmConfigDiskSize {
1651 | Logger.info("Found diskSize from VM config.json: \(size) bytes")
1652 | }
1653 | }
1654 | } catch {
1655 | Logger.error("Failed to parse VM config.json for diskSize: \(error)")
1656 | }
1657 | }
1658 |
1659 | // Determine the size to use for the sparse file
1660 | // Use: annotation size > VM config diskSize > fallback (error)
1661 | if let size = uncompressedSize {
1662 | Logger.info("Using uncompressed size from annotation: \(size) bytes")
1663 | expectedTotalSize = size
1664 | } else if let size = vmConfigDiskSize {
1665 | Logger.info("Using diskSize from VM config: \(size) bytes")
1666 | expectedTotalSize = size
1667 | } else {
1668 | // If neither is found in cache scenario, throw error as we cannot determine the size
1669 | Logger.error(
1670 | "Missing both uncompressed size annotation and VM config diskSize for cached multi-part image."
1671 | + " Cannot reassemble."
1672 | )
1673 | throw PullError.missingUncompressedSizeAnnotation
1674 | }
1675 |
1676 | // Now that expectedTotalSize is guaranteed to be non-nil, proceed with setup
1677 | guard let sizeForTruncate = expectedTotalSize else {
1678 | // This should not happen due to the checks above, but safety first
1679 | let nilError: Error? = nil
1680 | // Use nil-coalescing to provide a default error, appeasing the compiler
1681 | throw PullError.reassemblySetupFailed(
1682 | path: outputURL.path, underlyingError: nilError ?? NoSpecificUnderlyingError())
1683 | }
1684 |
1685 | // If we have just one disk part, use the shared function
1686 | if totalParts == 1 {
1687 | // Single part - use shared function
1688 | let sourceURL = diskPartSources[0].1 // Get the first source URL (index 1 of the tuple)
1689 | try createDiskImageFromSource(
1690 | sourceURL: sourceURL,
1691 | destinationURL: outputURL,
1692 | diskSize: sizeForTruncate
1693 | )
1694 | } else {
1695 | // Multiple parts - we need to reassemble
1696 | // Wrap file handle setup and sparse file creation within this block
1697 | let outputHandle: FileHandle
1698 | do {
1699 | // Ensure parent directory exists
1700 | try FileManager.default.createDirectory(
1701 | at: outputURL.deletingLastPathComponent(), withIntermediateDirectories: true
1702 | )
1703 | // Explicitly create the file first, removing old one if needed
1704 | if FileManager.default.fileExists(atPath: outputURL.path) {
1705 | try FileManager.default.removeItem(at: outputURL)
1706 | }
1707 | guard FileManager.default.createFile(atPath: outputURL.path, contents: nil)
1708 | else {
1709 | throw PullError.fileCreationFailed(outputURL.path)
1710 | }
1711 | // Open handle for writing
1712 | outputHandle = try FileHandle(forWritingTo: outputURL)
1713 | // Set the file size (creates sparse file)
1714 | try outputHandle.truncate(atOffset: sizeForTruncate)
1715 | Logger.info(
1716 | "Sparse file initialized for cache reassembly with size: \(ByteCountFormatter.string(fromByteCount: Int64(sizeForTruncate), countStyle: .file))"
1717 | )
1718 | } catch {
1719 | Logger.error(
1720 | "Failed during setup for cached disk image reassembly: \(error.localizedDescription)",
1721 | metadata: ["path": outputURL.path])
1722 | throw PullError.reassemblySetupFailed(
1723 | path: outputURL.path, underlyingError: error)
1724 | }
1725 |
1726 | // Ensure handle is closed when exiting this scope
1727 | defer { try? outputHandle.close() }
1728 |
1729 | var reassemblyProgressLogger = ProgressLogger(threshold: 0.05)
1730 | var currentOffset: UInt64 = 0
1731 |
1732 | // Iterate from 1 up to the total number of parts found by the collector
1733 | for collectorPartNum in 1...totalParts {
1734 | // Find the source URL from our collected parts using the sequential collectorPartNum
1735 | guard
1736 | let sourceInfo = diskPartSources.first(where: { $0.0 == collectorPartNum })
1737 | else {
1738 | Logger.error(
1739 | "Missing required cached part number \(collectorPartNum) in collected parts during reassembly."
1740 | )
1741 | throw PullError.missingPart(collectorPartNum)
1742 | }
1743 | let sourceURL = sourceInfo.1 // Get URL from tuple
1744 |
1745 | // Log using the sequential collector part number
1746 | Logger.info(
1747 | "Decompressing part \(collectorPartNum) of \(totalParts) from cache: \(sourceURL.lastPathComponent) at offset \(currentOffset)..."
1748 | )
1749 |
1750 | // Always use the correct sparse decompression function
1751 | let decompressedBytesWritten = try decompressChunkAndWriteSparse(
1752 | inputPath: sourceURL.path,
1753 | outputHandle: outputHandle,
1754 | startOffset: currentOffset
1755 | )
1756 | currentOffset += decompressedBytesWritten
1757 | // Update progress (using sizeForTruncate which should be available)
1758 | reassemblyProgressLogger.logProgress(
1759 | current: Double(currentOffset) / Double(sizeForTruncate),
1760 | context: "Reassembling Cache")
1761 |
1762 | try outputHandle.synchronize() // Explicitly synchronize after each chunk
1763 | }
1764 |
1765 | // Finalize progress, close handle (done by defer)
1766 | reassemblyProgressLogger.logProgress(current: 1.0, context: "Reassembly Complete")
1767 |
1768 | // Add test patterns at the beginning and end of the file
1769 | Logger.info("Writing test patterns to sparse file to verify integrity...")
1770 | let testPattern = "LUME_TEST_PATTERN".data(using: .utf8)!
1771 | try outputHandle.seek(toOffset: 0)
1772 | try outputHandle.write(contentsOf: testPattern)
1773 | try outputHandle.seek(toOffset: sizeForTruncate - UInt64(testPattern.count))
1774 | try outputHandle.write(contentsOf: testPattern)
1775 | try outputHandle.synchronize()
1776 |
1777 | // Ensure handle is properly synchronized before closing
1778 | try outputHandle.synchronize()
1779 |
1780 | // Close handle explicitly instead of relying on defer
1781 | try outputHandle.close()
1782 |
1783 | // Verify final size
1784 | let finalSize =
1785 | (try? FileManager.default.attributesOfItem(atPath: outputURL.path)[.size]
1786 | as? UInt64) ?? 0
1787 | Logger.info(
1788 | "Final disk image size from cache (before sparse file optimization): \(ByteCountFormatter.string(fromByteCount: Int64(finalSize), countStyle: .file))"
1789 | )
1790 |
1791 | // Use the calculated sizeForTruncate for comparison
1792 | if finalSize != sizeForTruncate {
1793 | Logger.info(
1794 | "Warning: Final reported size (\(finalSize) bytes) differs from expected size (\(sizeForTruncate) bytes), but this doesn't affect functionality"
1795 | )
1796 | }
1797 |
1798 | Logger.info("Disk image reassembly completed")
1799 |
1800 | // Optimize sparseness for cached reassembly if on macOS
1801 | if FileManager.default.fileExists(atPath: "/bin/cp") {
1802 | Logger.info("Optimizing sparse file representation for cached reassembly...")
1803 | let optimizedPath = outputURL.path + ".optimized"
1804 |
1805 | let process = Process()
1806 | process.executableURL = URL(fileURLWithPath: "/bin/cp")
1807 | process.arguments = ["-c", outputURL.path, optimizedPath]
1808 |
1809 | do {
1810 | try process.run()
1811 | process.waitUntilExit()
1812 |
1813 | if process.terminationStatus == 0 {
1814 | // Get size of optimized file
1815 | let optimizedSize =
1816 | (try? FileManager.default.attributesOfItem(atPath: optimizedPath)[
1817 | .size] as? UInt64) ?? 0
1818 | let originalUsage = getActualDiskUsage(path: outputURL.path)
1819 | let optimizedUsage = getActualDiskUsage(path: optimizedPath)
1820 |
1821 | Logger.info(
1822 | "Sparse optimization results for cache: Before: \(ByteCountFormatter.string(fromByteCount: Int64(originalUsage), countStyle: .file)) actual usage, After: \(ByteCountFormatter.string(fromByteCount: Int64(optimizedUsage), countStyle: .file)) actual usage (Apparent size: \(ByteCountFormatter.string(fromByteCount: Int64(optimizedSize), countStyle: .file)))"
1823 | )
1824 |
1825 | // Replace the original with the optimized version
1826 | try FileManager.default.removeItem(at: outputURL)
1827 | try FileManager.default.moveItem(
1828 | at: URL(fileURLWithPath: optimizedPath), to: outputURL)
1829 | Logger.info("Replaced cached reassembly with optimized sparse version")
1830 | } else {
1831 | Logger.info("Sparse optimization failed for cache, using original file")
1832 | try? FileManager.default.removeItem(atPath: optimizedPath)
1833 | }
1834 | } catch {
1835 | Logger.info(
1836 | "Error during sparse optimization for cache: \(error.localizedDescription)"
1837 | )
1838 | try? FileManager.default.removeItem(atPath: optimizedPath)
1839 | }
1840 | }
1841 |
1842 | // Set permissions to ensure consistency
1843 | let chmodProcess = Process()
1844 | chmodProcess.executableURL = URL(fileURLWithPath: "/bin/chmod")
1845 | chmodProcess.arguments = ["0644", outputURL.path]
1846 | try chmodProcess.run()
1847 | chmodProcess.waitUntilExit()
1848 | }
1849 |
1850 | // After successful reassembly, store the reassembled image in the cache
1851 | if cachingEnabled {
1852 | Logger.info("Saving reassembled disk image to cache for future use")
1853 |
1854 | // Copy the reassembled disk image to the cache
1855 | try FileManager.default.copyItem(at: outputURL, to: reassembledCachePath)
1856 |
1857 | // Clean up disk parts after successful reassembly
1858 | Logger.info("Cleaning up disk part files from cache")
1859 |
1860 | // Use an array to track unique file paths to avoid trying to delete the same file multiple times
1861 | var processedPaths: [String] = []
1862 |
1863 | for (_, partURL) in diskPartSources {
1864 | let path = partURL.path
1865 |
1866 | // Skip if we've already processed this exact path
1867 | if processedPaths.contains(path) {
1868 | Logger.info("Skipping duplicate part file: \(partURL.lastPathComponent)")
1869 | continue
1870 | }
1871 |
1872 | // Add to processed array
1873 | processedPaths.append(path)
1874 |
1875 | // Check if file exists before attempting to delete
1876 | if FileManager.default.fileExists(atPath: path) {
1877 | do {
1878 | try FileManager.default.removeItem(at: partURL)
1879 | Logger.info("Removed disk part: \(partURL.lastPathComponent)")
1880 | } catch {
1881 | Logger.info(
1882 | "Failed to remove disk part: \(partURL.lastPathComponent) - \(error.localizedDescription)"
1883 | )
1884 | }
1885 | } else {
1886 | Logger.info("Disk part already removed: \(partURL.lastPathComponent)")
1887 | }
1888 | }
1889 |
1890 | // Also save nvram if we have it
1891 | if hasNvram {
1892 | let srcNvram = destination.appendingPathComponent("nvram.bin")
1893 | if FileManager.default.fileExists(atPath: srcNvram.path) {
1894 | try? FileManager.default.copyItem(at: srcNvram, to: nvramCachePath)
1895 | }
1896 | }
1897 |
1898 | // Save config.json in the cache for future use if it exists
1899 | if let configPath = configPath {
1900 | let cacheConfigPath = cacheDir.appendingPathComponent("config.json")
1901 | try? FileManager.default.copyItem(at: configPath, to: cacheConfigPath)
1902 | }
1903 |
1904 | // Perform a final cleanup to catch any leftover part files
1905 | Logger.info("Performing final cleanup of any remaining part files")
1906 | do {
1907 | let cacheContents = try FileManager.default.contentsOfDirectory(
1908 | at: cacheDir, includingPropertiesForKeys: nil)
1909 |
1910 | for item in cacheContents {
1911 | let fileName = item.lastPathComponent
1912 | // Only remove sha256_ files that aren't the reassembled image, nvram or config
1913 | if fileName.starts(with: "sha256_") && fileName != "disk.img.reassembled"
1914 | && fileName != "nvram.bin" && fileName != "config.json"
1915 | && fileName != "manifest.json" && fileName != "metadata.json"
1916 | {
1917 | do {
1918 | try FileManager.default.removeItem(at: item)
1919 | Logger.info(
1920 | "Removed leftover file during final cleanup: \(fileName)")
1921 | } catch {
1922 | Logger.info(
1923 | "Failed to remove leftover file: \(fileName) - \(error.localizedDescription)"
1924 | )
1925 | }
1926 | }
1927 | }
1928 | } catch {
1929 | Logger.info("Error during final cleanup: \(error.localizedDescription)")
1930 | }
1931 | }
1932 | }
1933 |
1934 | Logger.info("Cache copy complete")
1935 | }
1936 |
1937 | private func getToken(
1938 | repository: String, scopes: [String] = ["pull"], requireAllScopes: Bool = false
1939 | ) async throws
1940 | -> String
1941 | {
1942 | let encodedRepo =
1943 | repository.addingPercentEncoding(withAllowedCharacters: .urlQueryAllowed) ?? repository
1944 |
1945 | // Build scope string from scopes array
1946 | let scopeString = scopes.joined(separator: ",")
1947 |
1948 | Logger.info("Requesting token with scopes: \(scopeString) for repository: \(repository)")
1949 |
1950 | // Request both pull and push scope for uploads
1951 | let url = URL(
1952 | string:
1953 | "https://\(self.registry)/token?scope=repository:\(encodedRepo):\(scopeString)&service=\(self.registry)"
1954 | )!
1955 |
1956 | var request = URLRequest(url: url)
1957 | request.httpMethod = "GET" // Token endpoint uses GET
1958 | request.setValue("application/json", forHTTPHeaderField: "Accept")
1959 |
1960 | // *** Add Basic Authentication Header if credentials exist ***
1961 | let (username, password) = getCredentialsFromEnvironment()
1962 | if let username = username, let password = password, !username.isEmpty, !password.isEmpty {
1963 | let authString = "\(username):\(password)"
1964 | if let authData = authString.data(using: .utf8) {
1965 | let base64Auth = authData.base64EncodedString()
1966 | request.setValue("Basic \(base64Auth)", forHTTPHeaderField: "Authorization")
1967 | Logger.info("Adding Basic Authentication header to token request")
1968 | } else {
1969 | Logger.error("Failed to encode credentials for Basic Auth")
1970 | }
1971 | } else {
1972 | Logger.info("No credentials found in environment for token request")
1973 | // Allow anonymous request for pull scope, but push scope likely requires auth
1974 | }
1975 | // *** End Basic Auth addition ***
1976 |
1977 | let (data, response) = try await URLSession.shared.data(for: request)
1978 |
1979 | // Check response status code *before* parsing JSON
1980 | guard let httpResponse = response as? HTTPURLResponse else {
1981 | throw PushError.authenticationFailed // Or a more generic network error
1982 | }
1983 |
1984 | // Handle errors based on status code
1985 | if httpResponse.statusCode != 200 {
1986 | // Special handling for push operations that require all scopes
1987 | if requireAllScopes
1988 | && (httpResponse.statusCode == 401 || httpResponse.statusCode == 403)
1989 | {
1990 | // Try to parse the error message from the response
1991 | let errorResponse = try? JSONSerialization.jsonObject(with: data) as? [String: Any]
1992 | let errors = errorResponse?["errors"] as? [[String: Any]]
1993 | let errorMessage = errors?.first?["message"] as? String ?? "Permission denied"
1994 |
1995 | Logger.error("Push permission denied: \(errorMessage)")
1996 | Logger.error(
1997 | "Your token does not have 'packages:write' permission to \(repository)")
1998 | Logger.error("Make sure you have the appropriate access rights to the repository")
1999 |
2000 | // Check if this is an organization repository
2001 | if repository.contains("/") {
2002 | let orgName = repository.split(separator: "/").first.map(String.init) ?? ""
2003 | if orgName != "" {
2004 | Logger.error("For organization repositories (\(orgName)), you must:")
2005 | Logger.error("1. Be a member of the organization with write access")
2006 | Logger.error("2. Have a token with 'write:packages' scope")
2007 | Logger.error(
2008 | "3. The organization must allow you to create/publish packages")
2009 | }
2010 | }
2011 |
2012 | throw PushError.insufficientPermissions("Push permission denied: \(errorMessage)")
2013 | }
2014 |
2015 | // If we get 403 and we're requesting both pull and push, retry with just pull
2016 | // ONLY if requireAllScopes is false
2017 | if httpResponse.statusCode == 403 && scopes.contains("push")
2018 | && scopes.contains("pull") && !requireAllScopes
2019 | {
2020 | Logger.info("Permission denied for push scope, retrying with pull scope only")
2021 | return try await getToken(repository: repository, scopes: ["pull"])
2022 | }
2023 |
2024 | // For pull scope only, if authentication fails, assume this is a public image
2025 | // and continue without a token (empty string)
2026 | if scopes == ["pull"] {
2027 | Logger.info(
2028 | "Authentication failed for pull scope, assuming public image and continuing without token"
2029 | )
2030 | return ""
2031 | }
2032 |
2033 | // Handle other authentication failures
2034 | let responseBody = String(data: data, encoding: .utf8) ?? "(Could not decode body)"
2035 | Logger.error(
2036 | "Token request failed with status code: \(httpResponse.statusCode). Response: \(responseBody)"
2037 | )
2038 |
2039 | throw PushError.authenticationFailed
2040 | }
2041 |
2042 | let jsonResponse = try JSONSerialization.jsonObject(with: data) as? [String: Any]
2043 | guard
2044 | let token = jsonResponse?["token"] as? String ?? jsonResponse?["access_token"]
2045 | as? String
2046 | else {
2047 | Logger.error("Token not found in registry response")
2048 | throw PushError.missingToken
2049 | }
2050 |
2051 | return token
2052 | }
2053 |
2054 | private func fetchManifest(repository: String, tag: String, token: String) async throws -> (
2055 | Manifest, String
2056 | ) {
2057 | var request = URLRequest(
2058 | url: URL(string: "https://\(self.registry)/v2/\(repository)/manifests/\(tag)")!)
2059 |
2060 | // Only add Authorization header if token is not empty
2061 | if !token.isEmpty {
2062 | request.addValue("Bearer \(token)", forHTTPHeaderField: "Authorization")
2063 | }
2064 |
2065 | request.addValue("application/vnd.oci.image.manifest.v1+json", forHTTPHeaderField: "Accept")
2066 |
2067 | let (data, response) = try await URLSession.shared.data(for: request)
2068 | guard let httpResponse = response as? HTTPURLResponse,
2069 | httpResponse.statusCode == 200,
2070 | let digest = httpResponse.value(forHTTPHeaderField: "Docker-Content-Digest")
2071 | else {
2072 | throw PullError.manifestFetchFailed
2073 | }
2074 |
2075 | let manifest = try JSONDecoder().decode(Manifest.self, from: data)
2076 | return (manifest, digest)
2077 | }
2078 |
2079 | private func downloadLayer(
2080 | repository: String,
2081 | digest: String,
2082 | mediaType: String,
2083 | token: String,
2084 | to url: URL,
2085 | maxRetries: Int = 5,
2086 | progress: isolated ProgressTracker,
2087 | manifestId: String? = nil
2088 | ) async throws {
2089 | var lastError: Error?
2090 |
2091 | // Create a shared session configuration for all download attempts
2092 | let config = URLSessionConfiguration.default
2093 | config.timeoutIntervalForRequest = 60
2094 | config.timeoutIntervalForResource = 3600
2095 | config.waitsForConnectivity = true
2096 | config.httpMaximumConnectionsPerHost = 6
2097 | config.httpShouldUsePipelining = true
2098 | config.requestCachePolicy = .reloadIgnoringLocalCacheData
2099 |
2100 | // Enable HTTP/2 when available
2101 | if #available(macOS 13.0, *) {
2102 | config.httpAdditionalHeaders = ["Connection": "keep-alive"]
2103 | }
2104 |
2105 | // Check for TCP window size and optimize if possible
2106 | if getTCPReceiveWindowSize() != nil {
2107 | config.networkServiceType = .responsiveData
2108 | }
2109 |
2110 | // Create one session to be reused across retries
2111 | let session = URLSession(configuration: config)
2112 |
2113 | for attempt in 1...maxRetries {
2114 | do {
2115 | var request = URLRequest(
2116 | url: URL(string: "https://\(self.registry)/v2/\(repository)/blobs/\(digest)")!)
2117 |
2118 | // Only add Authorization header if token is not empty
2119 | if !token.isEmpty {
2120 | request.addValue("Bearer \(token)", forHTTPHeaderField: "Authorization")
2121 | }
2122 |
2123 | request.addValue(mediaType, forHTTPHeaderField: "Accept")
2124 | request.timeoutInterval = 60
2125 |
2126 | // Add Accept-Encoding for compressed transfer if content isn't already compressed
2127 | if !mediaType.contains("gzip") && !mediaType.contains("compressed") {
2128 | request.addValue("gzip, deflate", forHTTPHeaderField: "Accept-Encoding")
2129 | }
2130 |
2131 | let (tempURL, response) = try await session.download(for: request)
2132 | guard let httpResponse = response as? HTTPURLResponse,
2133 | httpResponse.statusCode == 200
2134 | else {
2135 | throw PullError.layerDownloadFailed(digest)
2136 | }
2137 |
2138 | try FileManager.default.createDirectory(
2139 | at: url.deletingLastPathComponent(), withIntermediateDirectories: true)
2140 | try FileManager.default.moveItem(at: tempURL, to: url)
2141 | progress.addProgress(Int64(httpResponse.expectedContentLength))
2142 |
2143 | // Always save a copy to the cache directory for use by copyFromCache,
2144 | // even if caching is disabled
2145 | if let manifestId = manifestId {
2146 | let cachedLayer = getCachedLayerPath(manifestId: manifestId, digest: digest)
2147 | // Make sure cache directory exists
2148 | try FileManager.default.createDirectory(
2149 | at: cachedLayer.deletingLastPathComponent(),
2150 | withIntermediateDirectories: true
2151 | )
2152 |
2153 | if FileManager.default.fileExists(atPath: cachedLayer.path) {
2154 | try FileManager.default.removeItem(at: cachedLayer)
2155 | }
2156 | try FileManager.default.copyItem(at: url, to: cachedLayer)
2157 | }
2158 |
2159 | // Mark download as complete regardless of caching
2160 | markDownloadComplete(digest)
2161 | return
2162 |
2163 | } catch {
2164 | lastError = error
2165 | if attempt < maxRetries {
2166 | // Exponential backoff with jitter for retries
2167 | let baseDelay = Double(attempt) * 2
2168 | let jitter = Double.random(in: 0...1)
2169 | let delay = baseDelay + jitter
2170 | try await Task.sleep(nanoseconds: UInt64(delay * 1_000_000_000))
2171 |
2172 | Logger.info("Retrying download (attempt \(attempt+1)/\(maxRetries)): \(digest)")
2173 | }
2174 | }
2175 | }
2176 |
2177 | throw lastError ?? PullError.layerDownloadFailed(digest)
2178 | }
2179 |
2180 | // Function removed as it's not applicable to the observed manifest format
2181 | /*
2182 | private func extractPartInfo(from mediaType: String) -> (partNum: Int, total: Int)? {
2183 | let pattern = #"part\\.number=(\\d+);part\\.total=(\\d+)"#
2184 | guard let regex = try? NSRegularExpression(pattern: pattern),
2185 | let match = regex.firstMatch(
2186 | in: mediaType,
2187 | range: NSRange(mediaType.startIndex..., in: mediaType)
2188 | ),
2189 | let partNumRange = Range(match.range(at: 1), in: mediaType),
2190 | let totalRange = Range(match.range(at: 2), in: mediaType),
2191 | let partNum = Int(mediaType[partNumRange]),
2192 | let total = Int(mediaType[totalRange])
2193 | else {
2194 | return nil
2195 | }
2196 | return (partNum, total)
2197 | }
2198 | */
2199 |
2200 | private func listRepositories() async throws -> [String] {
2201 | var request = URLRequest(
2202 | url: URL(string: "https://\(registry)/v2/\(organization)/repositories/list")!)
2203 | request.setValue("application/json", forHTTPHeaderField: "Accept")
2204 |
2205 | let (data, response) = try await URLSession.shared.data(for: request)
2206 | guard let httpResponse = response as? HTTPURLResponse else {
2207 | throw PullError.manifestFetchFailed
2208 | }
2209 |
2210 | if httpResponse.statusCode == 404 {
2211 | return []
2212 | }
2213 |
2214 | guard httpResponse.statusCode == 200 else {
2215 | throw PullError.manifestFetchFailed
2216 | }
2217 |
2218 | let repoList = try JSONDecoder().decode(RepositoryList.self, from: data)
2219 | return repoList.repositories
2220 | }
2221 |
2222 | func getImages() async throws -> [CachedImage] {
2223 | Logger.info("Scanning for cached images in \(cacheDirectory.path)")
2224 | var images: [CachedImage] = []
2225 | let orgDir = cacheDirectory.appendingPathComponent(organization)
2226 |
2227 | if FileManager.default.fileExists(atPath: orgDir.path) {
2228 | let contents = try FileManager.default.contentsOfDirectory(atPath: orgDir.path)
2229 | Logger.info("Found \(contents.count) items in cache directory")
2230 |
2231 | for item in contents {
2232 | let itemPath = orgDir.appendingPathComponent(item)
2233 | var isDirectory: ObjCBool = false
2234 |
2235 | guard
2236 | FileManager.default.fileExists(
2237 | atPath: itemPath.path, isDirectory: &isDirectory),
2238 | isDirectory.boolValue
2239 | else { continue }
2240 |
2241 | // First try to read metadata file
2242 | let metadataPath = itemPath.appendingPathComponent("metadata.json")
2243 | if let metadataData = try? Data(contentsOf: metadataPath),
2244 | let metadata = try? JSONDecoder().decode(ImageMetadata.self, from: metadataData)
2245 | {
2246 | Logger.info(
2247 | "Found metadata for image",
2248 | metadata: [
2249 | "image": metadata.image,
2250 | "manifest_id": metadata.manifestId,
2251 | ])
2252 | images.append(
2253 | CachedImage(
2254 | repository: metadata.image,
2255 | imageId: String(metadata.manifestId.prefix(12)),
2256 | manifestId: metadata.manifestId
2257 | ))
2258 | continue
2259 | }
2260 |
2261 | // Fallback to checking manifest if metadata doesn't exist
2262 | Logger.info("No metadata found for \(item), checking manifest")
2263 | let manifestPath = itemPath.appendingPathComponent("manifest.json")
2264 | guard FileManager.default.fileExists(atPath: manifestPath.path),
2265 | let manifestData = try? Data(contentsOf: manifestPath),
2266 | let manifest = try? JSONDecoder().decode(Manifest.self, from: manifestData)
2267 | else {
2268 | Logger.info("No valid manifest found for \(item)")
2269 | continue
2270 | }
2271 |
2272 | let manifestId = item
2273 |
2274 | // Verify the manifest ID matches
2275 | let currentManifestId = getManifestIdentifier(manifest, manifestDigest: "")
2276 | Logger.info(
2277 | "Manifest check",
2278 | metadata: [
2279 | "item": item,
2280 | "current_manifest_id": currentManifestId,
2281 | "matches": "\(currentManifestId == manifestId)",
2282 | ])
2283 | if currentManifestId == manifestId {
2284 | // Skip if we can't determine the repository name
2285 | // This should be rare since we now save metadata during pull
2286 | Logger.info("Skipping image without metadata: \(item)")
2287 | continue
2288 | }
2289 | }
2290 | } else {
2291 | Logger.info("Cache directory does not exist")
2292 | }
2293 |
2294 | Logger.info("Found \(images.count) cached images")
2295 | return images.sorted {
2296 | $0.repository == $1.repository ? $0.imageId < $1.imageId : $0.repository < $1.repository
2297 | }
2298 | }
2299 |
2300 | private func listRemoteImageTags(repository: String) async throws -> [String] {
2301 | var request = URLRequest(
2302 | url: URL(string: "https://\(registry)/v2/\(organization)/\(repository)/tags/list")!)
2303 | request.setValue("application/json", forHTTPHeaderField: "Accept")
2304 |
2305 | let (data, response) = try await URLSession.shared.data(for: request)
2306 | guard let httpResponse = response as? HTTPURLResponse else {
2307 | throw PullError.manifestFetchFailed
2308 | }
2309 |
2310 | if httpResponse.statusCode == 404 {
2311 | return []
2312 | }
2313 |
2314 | guard httpResponse.statusCode == 200 else {
2315 | throw PullError.manifestFetchFailed
2316 | }
2317 |
2318 | let repoTags = try JSONDecoder().decode(RepositoryTags.self, from: data)
2319 | return repoTags.tags
2320 | }
2321 |
2322 | // Determine appropriate chunk size based on available system memory on macOS
2323 | private func getOptimalChunkSize() -> Int {
2324 | // Try to get system memory info
2325 | var stats = vm_statistics64_data_t()
2326 | var size = mach_msg_type_number_t(
2327 | MemoryLayout<vm_statistics64_data_t>.size / MemoryLayout<integer_t>.size)
2328 | let hostPort = mach_host_self()
2329 |
2330 | let result = withUnsafeMutablePointer(to: &stats) { statsPtr in
2331 | statsPtr.withMemoryRebound(to: integer_t.self, capacity: Int(size)) { ptr in
2332 | host_statistics64(hostPort, HOST_VM_INFO64, ptr, &size)
2333 | }
2334 | }
2335 |
2336 | // Define chunk size parameters
2337 | let safeMinimumChunkSize = 128 * 1024 // Reduced minimum for constrained systems
2338 | let defaultChunkSize = 512 * 1024 // Standard default / minimum for non-constrained
2339 | let constrainedCap = 512 * 1024 // Lower cap for constrained systems
2340 | let standardCap = 2 * 1024 * 1024 // Standard cap for non-constrained systems
2341 |
2342 | // If we can't get memory info, return a reasonable default
2343 | guard result == KERN_SUCCESS else {
2344 | Logger.info(
2345 | "Could not get VM statistics, using default chunk size: \(defaultChunkSize) bytes")
2346 | return defaultChunkSize
2347 | }
2348 |
2349 | // Calculate free memory in bytes
2350 | let pageSize = 4096 // Use a constant page size assumption
2351 | let freeMemory = UInt64(stats.free_count) * UInt64(pageSize)
2352 | let isConstrained = determineIfMemoryConstrained() // Check if generally constrained
2353 |
2354 | // Extremely constrained (< 512MB free) -> use absolute minimum
2355 | if freeMemory < 536_870_912 { // 512MB
2356 | Logger.info(
2357 | "System extremely memory constrained (<512MB free), using minimum chunk size: \(safeMinimumChunkSize) bytes"
2358 | )
2359 | return safeMinimumChunkSize
2360 | }
2361 |
2362 | // Generally constrained -> use adaptive size with lower cap
2363 | if isConstrained {
2364 | let adaptiveSize = min(
2365 | max(Int(freeMemory / 1000), safeMinimumChunkSize), constrainedCap)
2366 | Logger.info(
2367 | "System memory constrained, using adaptive chunk size capped at \(constrainedCap) bytes: \(adaptiveSize) bytes"
2368 | )
2369 | return adaptiveSize
2370 | }
2371 |
2372 | // Not constrained -> use original adaptive logic with standard cap
2373 | let adaptiveSize = min(max(Int(freeMemory / 1000), defaultChunkSize), standardCap)
2374 | Logger.info(
2375 | "System has sufficient memory, using adaptive chunk size capped at \(standardCap) bytes: \(adaptiveSize) bytes"
2376 | )
2377 | return adaptiveSize
2378 | }
2379 |
2380 | // Check if system is memory constrained for more aggressive memory management
2381 | private func determineIfMemoryConstrained() -> Bool {
2382 | var stats = vm_statistics64_data_t()
2383 | var size = mach_msg_type_number_t(
2384 | MemoryLayout<vm_statistics64_data_t>.size / MemoryLayout<integer_t>.size)
2385 | let hostPort = mach_host_self()
2386 |
2387 | let result = withUnsafeMutablePointer(to: &stats) { statsPtr in
2388 | statsPtr.withMemoryRebound(to: integer_t.self, capacity: Int(size)) { ptr in
2389 | host_statistics64(hostPort, HOST_VM_INFO64, ptr, &size)
2390 | }
2391 | }
2392 |
2393 | guard result == KERN_SUCCESS else {
2394 | // If we can't determine, assume constrained for safety
2395 | return true
2396 | }
2397 |
2398 | // Calculate free memory in bytes using a fixed page size
2399 | // Standard page size on macOS is 4KB or 16KB
2400 | let pageSize = 4096 // Use a constant instead of vm_kernel_page_size
2401 | let freeMemory = UInt64(stats.free_count) * UInt64(pageSize)
2402 |
2403 | // Consider memory constrained if less than 2GB free
2404 | return freeMemory < 2_147_483_648 // 2GB
2405 | }
2406 |
2407 | // Helper method to determine network quality
2408 | private func determineNetworkQuality() -> Int {
2409 | // Default quality is medium (3)
2410 | var quality = 3
2411 |
2412 | // A simple ping test to determine network quality
2413 | let process = Process()
2414 | process.executableURL = URL(fileURLWithPath: "/sbin/ping")
2415 | process.arguments = ["-c", "3", "-q", self.registry]
2416 |
2417 | let outputPipe = Pipe()
2418 | process.standardOutput = outputPipe
2419 | process.standardError = outputPipe
2420 |
2421 | do {
2422 | try process.run()
2423 | process.waitUntilExit()
2424 |
2425 | let outputData = try outputPipe.fileHandleForReading.readToEnd() ?? Data()
2426 | if let output = String(data: outputData, encoding: .utf8) {
2427 | // Check for average ping time
2428 | if let avgTimeRange = output.range(
2429 | of: "= [0-9.]+/([0-9.]+)/", options: .regularExpression)
2430 | {
2431 | let avgSubstring = output[avgTimeRange]
2432 | if let avgString = avgSubstring.split(separator: "/").dropFirst().first,
2433 | let avgTime = Double(avgString)
2434 | {
2435 |
2436 | // Classify network quality based on ping time
2437 | if avgTime < 50 {
2438 | quality = 5 // Excellent
2439 | } else if avgTime < 100 {
2440 | quality = 4 // Good
2441 | } else if avgTime < 200 {
2442 | quality = 3 // Average
2443 | } else if avgTime < 300 {
2444 | quality = 2 // Poor
2445 | } else {
2446 | quality = 1 // Very poor
2447 | }
2448 | }
2449 | }
2450 | }
2451 | } catch {
2452 | // Default to medium if ping fails
2453 | Logger.info("Failed to determine network quality, using default settings")
2454 | }
2455 |
2456 | return quality
2457 | }
2458 |
2459 | // Helper method to calculate optimal concurrency based on system capabilities
2460 | private func calculateOptimalConcurrency(memoryConstrained: Bool, networkQuality: Int) -> Int {
2461 | // Base concurrency based on network quality (1-5)
2462 | let baseThreads = min(networkQuality * 2, 8)
2463 |
2464 | if memoryConstrained {
2465 | // Reduce concurrency for memory-constrained systems
2466 | return max(2, baseThreads / 2)
2467 | }
2468 |
2469 | // Physical cores available on the system
2470 | let cores = ProcessInfo.processInfo.processorCount
2471 |
2472 | // Adaptive approach: 1-2 threads per core depending on network quality
2473 | let threadsPerCore = (networkQuality >= 4) ? 2 : 1
2474 | let systemBasedThreads = min(cores * threadsPerCore, 12)
2475 |
2476 | // Take the larger of network-based and system-based concurrency
2477 | return max(baseThreads, systemBasedThreads)
2478 | }
2479 |
2480 | // Helper to get optimal TCP window size
2481 | private func getTCPReceiveWindowSize() -> Int? {
2482 | // Try to query system TCP window size
2483 | let process = Process()
2484 | process.executableURL = URL(fileURLWithPath: "/usr/sbin/sysctl")
2485 | process.arguments = ["net.inet.tcp.recvspace"]
2486 |
2487 | let outputPipe = Pipe()
2488 | process.standardOutput = outputPipe
2489 |
2490 | do {
2491 | try process.run()
2492 | process.waitUntilExit()
2493 |
2494 | let outputData = try outputPipe.fileHandleForReading.readToEnd() ?? Data()
2495 | if let output = String(data: outputData, encoding: .utf8),
2496 | let valueStr = output.split(separator: ":").last?.trimmingCharacters(
2497 | in: .whitespacesAndNewlines),
2498 | let value = Int(valueStr)
2499 | {
2500 | return value
2501 | }
2502 | } catch {
2503 | // Ignore errors, we'll use defaults
2504 | }
2505 |
2506 | return nil
2507 | }
2508 |
2509 | // Add helper to check media type and get decompress command
2510 | private func getDecompressionCommand(for mediaType: String) -> String? {
2511 | // Determine appropriate decompression command based on layer media type
2512 | Logger.info("Determining decompression command for media type: \(mediaType)")
2513 |
2514 | // For the specific format that appears in our GHCR repository, skip decompression attempts
2515 | // These files are labeled +lzfse but aren't actually in Apple Archive format
2516 | if mediaType.contains("+lzfse;part.number=") {
2517 | Logger.info("Detected LZFSE part file, using direct copy instead of decompression")
2518 | return nil
2519 | }
2520 |
2521 | // Check for LZFSE or Apple Archive format anywhere in the media type string
2522 | // The format may include part information like: application/octet-stream+lzfse;part.number=1;part.total=38
2523 | if mediaType.contains("+lzfse") || mediaType.contains("+aa") {
2524 | // Apple Archive format requires special handling
2525 | if let aaPath = findExecutablePath(for: "aa") {
2526 | Logger.info("Found Apple Archive tool at: \(aaPath)")
2527 | return "apple_archive:\(aaPath)"
2528 | } else {
2529 | Logger.error(
2530 | "Apple Archive tool (aa) not found in PATH, falling back to default path")
2531 |
2532 | // Check if the default path exists
2533 | let defaultPath = "/usr/bin/aa"
2534 | if FileManager.default.isExecutableFile(atPath: defaultPath) {
2535 | Logger.info("Default Apple Archive tool exists at: \(defaultPath)")
2536 | } else {
2537 | Logger.error("Default Apple Archive tool not found at: \(defaultPath)")
2538 | }
2539 |
2540 | return "apple_archive:/usr/bin/aa"
2541 | }
2542 | } else {
2543 | Logger.info(
2544 | "Unsupported media type: \(mediaType) - only Apple Archive (+lzfse/+aa) is supported"
2545 | )
2546 | return nil
2547 | }
2548 | }
2549 |
2550 | // Helper to find executables (optional, or hardcode paths)
2551 | private func findExecutablePath(for executableName: String) -> String? {
2552 | let pathEnv =
2553 | ProcessInfo.processInfo.environment["PATH"]
2554 | ?? "/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/opt/homebrew/bin"
2555 | let paths = pathEnv.split(separator: ":")
2556 | for path in paths {
2557 | let executablePath = URL(fileURLWithPath: String(path)).appendingPathComponent(
2558 | executableName
2559 | ).path
2560 | if FileManager.default.isExecutableFile(atPath: executablePath) {
2561 | return executablePath
2562 | }
2563 | }
2564 | return nil
2565 | }
2566 |
2567 | // Helper function to extract uncompressed disk size from config.json
2568 | private func getUncompressedSizeFromConfig(configPath: URL) -> UInt64? {
2569 | guard FileManager.default.fileExists(atPath: configPath.path) else {
2570 | Logger.info("Config file not found: \(configPath.path)")
2571 | return nil
2572 | }
2573 |
2574 | do {
2575 | let configData = try Data(contentsOf: configPath)
2576 | let decoder = JSONDecoder()
2577 | let ociConfig = try decoder.decode(OCIConfig.self, from: configData)
2578 |
2579 | if let sizeString = ociConfig.annotations?.uncompressedSize,
2580 | let size = UInt64(sizeString)
2581 | {
2582 | Logger.info("Found uncompressed disk size annotation: \(size) bytes")
2583 | return size
2584 | } else {
2585 | Logger.info("No uncompressed disk size annotation found in config.json")
2586 | return nil
2587 | }
2588 | } catch {
2589 | Logger.error("Failed to parse config.json for uncompressed size: \(error)")
2590 | return nil
2591 | }
2592 | }
2593 |
2594 | // Helper function to find formatted file with potential extensions
2595 | private func findFormattedFile(tempFormatted: URL) -> URL? {
2596 | // Check for the exact path first
2597 | if FileManager.default.fileExists(atPath: tempFormatted.path) {
2598 | return tempFormatted
2599 | }
2600 |
2601 | // Check with .dmg extension
2602 | let dmgPath = tempFormatted.path + ".dmg"
2603 | if FileManager.default.fileExists(atPath: dmgPath) {
2604 | return URL(fileURLWithPath: dmgPath)
2605 | }
2606 |
2607 | // Check with .sparseimage extension
2608 | let sparsePath = tempFormatted.path + ".sparseimage"
2609 | if FileManager.default.fileExists(atPath: sparsePath) {
2610 | return URL(fileURLWithPath: sparsePath)
2611 | }
2612 |
2613 | // Try to find any file with the same basename
2614 | do {
2615 | let files = try FileManager.default.contentsOfDirectory(
2616 | at: tempFormatted.deletingLastPathComponent(),
2617 | includingPropertiesForKeys: nil)
2618 | if let matchingFile = files.first(where: {
2619 | $0.lastPathComponent.starts(with: tempFormatted.lastPathComponent)
2620 | }) {
2621 | return matchingFile
2622 | }
2623 | } catch {
2624 | Logger.error("Failed to list directory contents: \(error)")
2625 | }
2626 |
2627 | return nil
2628 | }
2629 |
2630 | // Helper function to decompress LZFSE compressed disk image
2631 | @discardableResult
2632 | private func decompressLZFSEImage(inputPath: String, outputPath: String? = nil) -> Bool {
2633 | Logger.info("Attempting to decompress LZFSE compressed disk image using sparse pipe...")
2634 |
2635 | let finalOutputPath = outputPath ?? inputPath // If outputPath is nil, we'll overwrite input
2636 | let tempFinalPath = finalOutputPath + ".ddsparse.tmp" // Temporary name during dd operation
2637 |
2638 | // Ensure the temporary file doesn't exist from a previous failed run
2639 | try? FileManager.default.removeItem(atPath: tempFinalPath)
2640 |
2641 | // Process 1: compression_tool
2642 | let process1 = Process()
2643 | process1.executableURL = URL(fileURLWithPath: "/usr/bin/compression_tool")
2644 | process1.arguments = [
2645 | "-decode",
2646 | "-i", inputPath,
2647 | "-o", "/dev/stdout", // Write to standard output
2648 | ]
2649 |
2650 | // Process 2: dd
2651 | let process2 = Process()
2652 | process2.executableURL = URL(fileURLWithPath: "/bin/dd")
2653 | process2.arguments = [
2654 | "if=/dev/stdin", // Read from standard input
2655 | "of=\(tempFinalPath)", // Write to the temporary final path
2656 | "conv=sparse", // Use sparse conversion
2657 | "bs=1m", // Use a reasonable block size (e.g., 1MB)
2658 | ]
2659 |
2660 | // Create pipes
2661 | let pipe = Pipe() // Connects process1 stdout to process2 stdin
2662 | let errorPipe1 = Pipe()
2663 | let errorPipe2 = Pipe()
2664 |
2665 | process1.standardOutput = pipe
2666 | process1.standardError = errorPipe1
2667 |
2668 | process2.standardInput = pipe
2669 | process2.standardError = errorPipe2
2670 |
2671 | do {
2672 | Logger.info("Starting decompression pipe: compression_tool | dd conv=sparse...")
2673 | // Start processes
2674 | try process1.run()
2675 | try process2.run()
2676 |
2677 | // Close the write end of the pipe for process2 to prevent hanging
2678 | // This might not be strictly necessary if process1 exits cleanly, but safer.
2679 | // Note: Accessing fileHandleForWriting after run can be tricky.
2680 | // We rely on process1 exiting to signal EOF to process2.
2681 |
2682 | process1.waitUntilExit()
2683 | process2.waitUntilExit() // Wait for dd to finish processing the stream
2684 |
2685 | // --- Check for errors ---
2686 | let errorData1 = errorPipe1.fileHandleForReading.readDataToEndOfFile()
2687 | if !errorData1.isEmpty,
2688 | let errorString = String(data: errorData1, encoding: .utf8)?.trimmingCharacters(
2689 | in: .whitespacesAndNewlines), !errorString.isEmpty
2690 | {
2691 | Logger.error("compression_tool stderr: \(errorString)")
2692 | }
2693 | let errorData2 = errorPipe2.fileHandleForReading.readDataToEndOfFile()
2694 | if !errorData2.isEmpty,
2695 | let errorString = String(data: errorData2, encoding: .utf8)?.trimmingCharacters(
2696 | in: .whitespacesAndNewlines), !errorString.isEmpty
2697 | {
2698 | // dd often reports blocks in/out to stderr, filter that if needed, but log for now
2699 | Logger.info("dd stderr: \(errorString)")
2700 | }
2701 |
2702 | // Check termination statuses
2703 | let status1 = process1.terminationStatus
2704 | let status2 = process2.terminationStatus
2705 |
2706 | if status1 != 0 || status2 != 0 {
2707 | Logger.error(
2708 | "Pipe command failed. compression_tool status: \(status1), dd status: \(status2)"
2709 | )
2710 | try? FileManager.default.removeItem(atPath: tempFinalPath) // Clean up failed attempt
2711 | return false
2712 | }
2713 |
2714 | // --- Validation ---
2715 | if FileManager.default.fileExists(atPath: tempFinalPath) {
2716 | let fileSize =
2717 | (try? FileManager.default.attributesOfItem(atPath: tempFinalPath)[.size]
2718 | as? UInt64) ?? 0
2719 | let actualUsage = getActualDiskUsage(path: tempFinalPath)
2720 | Logger.info(
2721 | "Piped decompression successful - Allocated: \(ByteCountFormatter.string(fromByteCount: Int64(fileSize), countStyle: .file)), Actual Usage: \(ByteCountFormatter.string(fromByteCount: Int64(actualUsage), countStyle: .file))"
2722 | )
2723 |
2724 | // Basic header validation
2725 | var isValid = false
2726 | if let fileHandle = FileHandle(forReadingAtPath: tempFinalPath) {
2727 | if let data = try? fileHandle.read(upToCount: 512), data.count >= 512,
2728 | data[510] == 0x55 && data[511] == 0xAA
2729 | {
2730 | isValid = true
2731 | }
2732 | // Ensure handle is closed regardless of validation outcome
2733 | try? fileHandle.close()
2734 | } else {
2735 | Logger.error(
2736 | "Validation Error: Could not open decompressed file handle for reading.")
2737 | }
2738 |
2739 | if isValid {
2740 | Logger.info("Decompressed file appears to be a valid disk image.")
2741 |
2742 | // Move the final file into place
2743 | // If outputPath was nil, we need to replace the original inputPath
2744 | if outputPath == nil {
2745 | // Backup original only if it's different from the temp path
2746 | if inputPath != tempFinalPath {
2747 | try? FileManager.default.copyItem(
2748 | at: URL(fileURLWithPath: inputPath),
2749 | to: URL(fileURLWithPath: inputPath + ".compressed.bak"))
2750 | try? FileManager.default.removeItem(at: URL(fileURLWithPath: inputPath))
2751 | }
2752 | try FileManager.default.moveItem(
2753 | at: URL(fileURLWithPath: tempFinalPath),
2754 | to: URL(fileURLWithPath: inputPath))
2755 | Logger.info("Replaced original file with sparsely decompressed version.")
2756 | } else {
2757 | // If outputPath was specified, move it there (overwrite if needed)
2758 | try? FileManager.default.removeItem(
2759 | at: URL(fileURLWithPath: finalOutputPath)) // Remove existing if overwriting
2760 | try FileManager.default.moveItem(
2761 | at: URL(fileURLWithPath: tempFinalPath),
2762 | to: URL(fileURLWithPath: finalOutputPath))
2763 | Logger.info("Moved sparsely decompressed file to: \(finalOutputPath)")
2764 | }
2765 | return true
2766 | } else {
2767 | Logger.error(
2768 | "Validation failed: Decompressed file header is invalid or file couldn't be read. Cleaning up."
2769 | )
2770 | try? FileManager.default.removeItem(atPath: tempFinalPath)
2771 | return false
2772 | }
2773 | } else {
2774 | Logger.error(
2775 | "Piped decompression failed: Output file '\(tempFinalPath)' not found after dd completed."
2776 | )
2777 | return false
2778 | }
2779 |
2780 | } catch {
2781 | Logger.error("Error running decompression pipe command: \(error)")
2782 | try? FileManager.default.removeItem(atPath: tempFinalPath) // Clean up on error
2783 | return false
2784 | }
2785 | }
2786 |
2787 | // Helper function to get actual disk usage of a file
2788 | private func getActualDiskUsage(path: String) -> UInt64 {
2789 | let task = Process()
2790 | task.executableURL = URL(fileURLWithPath: "/usr/bin/du")
2791 | task.arguments = ["-k", path] // -k for 1024-byte blocks
2792 |
2793 | let pipe = Pipe()
2794 | task.standardOutput = pipe
2795 |
2796 | do {
2797 | try task.run()
2798 | task.waitUntilExit()
2799 |
2800 | let data = pipe.fileHandleForReading.readDataToEndOfFile()
2801 | if let output = String(data: data, encoding: .utf8),
2802 | let size = UInt64(output.split(separator: "\t").first ?? "0")
2803 | {
2804 | return size * 1024 // Convert from KB to bytes
2805 | }
2806 | } catch {
2807 | Logger.error("Failed to get actual disk usage: \(error)")
2808 | }
2809 |
2810 | return 0
2811 | }
2812 |
2813 | // New push method
2814 | public func push(
2815 | vmDirPath: String,
2816 | imageName: String,
2817 | tags: [String],
2818 | chunkSizeMb: Int = 512,
2819 | verbose: Bool = false,
2820 | dryRun: Bool = false,
2821 | reassemble: Bool = false
2822 | ) async throws {
2823 | Logger.info(
2824 | "Pushing VM to registry",
2825 | metadata: [
2826 | "vm_path": vmDirPath,
2827 | "imageName": imageName,
2828 | "tags": "\(tags.joined(separator: ", "))", // Log all tags
2829 | "registry": registry,
2830 | "organization": organization,
2831 | "chunk_size": "\(chunkSizeMb)MB",
2832 | "dry_run": "\(dryRun)",
2833 | "reassemble": "\(reassemble)",
2834 | ])
2835 |
2836 | // Check for credentials if not in dry-run mode
2837 | if !dryRun {
2838 | let (username, token) = getCredentialsFromEnvironment()
2839 | if username == nil || token == nil {
2840 | Logger.error(
2841 | "Missing GitHub credentials. Please set GITHUB_USERNAME and GITHUB_TOKEN environment variables"
2842 | )
2843 | Logger.error(
2844 | "Your token must have 'packages:read' and 'packages:write' permissions")
2845 | throw PushError.authenticationFailed
2846 | }
2847 |
2848 | Logger.info("Using GitHub credentials from environment variables")
2849 | }
2850 |
2851 | // Remove tag parsing here, imageName is now passed directly
2852 | // let components = image.split(separator: ":") ...
2853 | // let imageTag = String(tag)
2854 |
2855 | // Get authentication token only if not in dry-run mode
2856 | var token: String = ""
2857 | if !dryRun {
2858 | Logger.info("Getting registry authentication token")
2859 | token = try await getToken(
2860 | repository: "\(self.organization)/\(imageName)",
2861 | scopes: ["pull", "push"],
2862 | requireAllScopes: true) // Require push scope, don't fall back to pull-only
2863 | } else {
2864 | Logger.info("Dry run mode: skipping authentication token request")
2865 | }
2866 |
2867 | // Create working directory inside the VM folder for caching/resuming
2868 | let workDir = URL(fileURLWithPath: vmDirPath).appendingPathComponent(".lume_push_cache")
2869 | try FileManager.default.createDirectory(at: workDir, withIntermediateDirectories: true)
2870 | Logger.info("Using push cache directory: \(workDir.path)")
2871 |
2872 | // Get VM files that need to be pushed using vmDirPath
2873 | let diskPath = URL(fileURLWithPath: vmDirPath).appendingPathComponent("disk.img")
2874 | let configPath = URL(fileURLWithPath: vmDirPath).appendingPathComponent("config.json")
2875 | let nvramPath = URL(fileURLWithPath: vmDirPath).appendingPathComponent("nvram.bin")
2876 |
2877 | var layers: [OCIManifestLayer] = []
2878 | var uncompressedDiskSize: UInt64? = nil
2879 |
2880 | // Process config.json
2881 | let cachedConfigPath = workDir.appendingPathComponent("config.json")
2882 | var configDigest: String? = nil
2883 | var configSize: Int? = nil
2884 |
2885 | if FileManager.default.fileExists(atPath: cachedConfigPath.path) {
2886 | Logger.info("Using cached config.json")
2887 | do {
2888 | let configData = try Data(contentsOf: cachedConfigPath)
2889 | configDigest = "sha256:" + configData.sha256String()
2890 | configSize = configData.count
2891 | // Try to get uncompressed disk size from cached config
2892 | if let vmConfig = try? JSONDecoder().decode(VMConfig.self, from: configData) {
2893 | uncompressedDiskSize = vmConfig.diskSize
2894 | Logger.info(
2895 | "Found disk size in cached config: \(uncompressedDiskSize ?? 0) bytes")
2896 | }
2897 | } catch {
2898 | Logger.error("Failed to read cached config.json: \(error). Will re-process.")
2899 | // Force re-processing by leaving configDigest nil
2900 | }
2901 | } else if FileManager.default.fileExists(atPath: configPath.path) {
2902 | Logger.info("Processing config.json")
2903 | let configData = try Data(contentsOf: configPath)
2904 | configDigest = "sha256:" + configData.sha256String()
2905 | configSize = configData.count
2906 | try configData.write(to: cachedConfigPath) // Save to cache
2907 | // Try to get uncompressed disk size from original config
2908 | if let vmConfig = try? JSONDecoder().decode(VMConfig.self, from: configData) {
2909 | uncompressedDiskSize = vmConfig.diskSize
2910 | Logger.info("Found disk size in config: \(uncompressedDiskSize ?? 0) bytes")
2911 | }
2912 | }
2913 |
2914 | if var digest = configDigest, let size = configSize { // Use 'var' to modify if uploaded
2915 | if !dryRun {
2916 | // Upload only if not in dry-run mode and blob doesn't exist
2917 | if !(try await blobExists(
2918 | repository: "\(self.organization)/\(imageName)", digest: digest, token: token))
2919 | {
2920 | Logger.info("Uploading config.json blob")
2921 | let configData = try Data(contentsOf: cachedConfigPath) // Read from cache for upload
2922 | digest = try await uploadBlobFromData(
2923 | repository: "\(self.organization)/\(imageName)",
2924 | data: configData,
2925 | token: token
2926 | )
2927 | } else {
2928 | Logger.info("Config blob already exists on registry")
2929 | }
2930 | }
2931 | // Add config layer
2932 | layers.append(
2933 | OCIManifestLayer(
2934 | mediaType: "application/vnd.oci.image.config.v1+json",
2935 | size: size,
2936 | digest: digest
2937 | ))
2938 | }
2939 |
2940 | // Process nvram.bin
2941 | let cachedNvramPath = workDir.appendingPathComponent("nvram.bin")
2942 | var nvramDigest: String? = nil
2943 | var nvramSize: Int? = nil
2944 |
2945 | if FileManager.default.fileExists(atPath: cachedNvramPath.path) {
2946 | Logger.info("Using cached nvram.bin")
2947 | do {
2948 | let nvramData = try Data(contentsOf: cachedNvramPath)
2949 | nvramDigest = "sha256:" + nvramData.sha256String()
2950 | nvramSize = nvramData.count
2951 | } catch {
2952 | Logger.error("Failed to read cached nvram.bin: \(error). Will re-process.")
2953 | }
2954 | } else if FileManager.default.fileExists(atPath: nvramPath.path) {
2955 | Logger.info("Processing nvram.bin")
2956 | let nvramData = try Data(contentsOf: nvramPath)
2957 | nvramDigest = "sha256:" + nvramData.sha256String()
2958 | nvramSize = nvramData.count
2959 | try nvramData.write(to: cachedNvramPath) // Save to cache
2960 | }
2961 |
2962 | if var digest = nvramDigest, let size = nvramSize { // Use 'var'
2963 | if !dryRun {
2964 | // Upload only if not in dry-run mode and blob doesn't exist
2965 | if !(try await blobExists(
2966 | repository: "\(self.organization)/\(imageName)", digest: digest, token: token))
2967 | {
2968 | Logger.info("Uploading nvram.bin blob")
2969 | let nvramData = try Data(contentsOf: cachedNvramPath) // Read from cache
2970 | digest = try await uploadBlobFromData(
2971 | repository: "\(self.organization)/\(imageName)",
2972 | data: nvramData,
2973 | token: token
2974 | )
2975 | } else {
2976 | Logger.info("NVRAM blob already exists on registry")
2977 | }
2978 | }
2979 | // Add nvram layer
2980 | layers.append(
2981 | OCIManifestLayer(
2982 | mediaType: "application/octet-stream",
2983 | size: size,
2984 | digest: digest
2985 | ))
2986 | }
2987 |
2988 | // Process disk.img
2989 | if FileManager.default.fileExists(atPath: diskPath.path) {
2990 | let diskAttributes = try FileManager.default.attributesOfItem(atPath: diskPath.path)
2991 | let diskSize = diskAttributes[.size] as? UInt64 ?? 0
2992 | let actualDiskSize = uncompressedDiskSize ?? diskSize
2993 | Logger.info(
2994 | "Processing disk.img in chunks",
2995 | metadata: [
2996 | "disk_path": diskPath.path, "disk_size": "\(diskSize) bytes",
2997 | "actual_size": "\(actualDiskSize) bytes", "chunk_size": "\(chunkSizeMb)MB",
2998 | ])
2999 | let chunksDir = workDir.appendingPathComponent("disk.img.parts")
3000 | try FileManager.default.createDirectory(
3001 | at: chunksDir, withIntermediateDirectories: true)
3002 | let chunkSizeBytes = chunkSizeMb * 1024 * 1024
3003 | let totalChunks = Int((diskSize + UInt64(chunkSizeBytes) - 1) / UInt64(chunkSizeBytes))
3004 | Logger.info("Splitting disk into \(totalChunks) chunks")
3005 | let fileHandle = try FileHandle(forReadingFrom: diskPath)
3006 | defer { try? fileHandle.close() }
3007 | var pushedDiskLayers: [(index: Int, layer: OCIManifestLayer)] = []
3008 | var diskChunks: [(index: Int, path: URL, digest: String)] = []
3009 |
3010 | try await withThrowingTaskGroup(of: (Int, OCIManifestLayer, URL, String).self) {
3011 | group in
3012 | let maxConcurrency = 4
3013 | for chunkIndex in 0..<totalChunks {
3014 | if chunkIndex >= maxConcurrency {
3015 | if let res = try await group.next() {
3016 | pushedDiskLayers.append((res.0, res.1))
3017 | diskChunks.append((res.0, res.2, res.3))
3018 | }
3019 | }
3020 | group.addTask { [token, verbose, dryRun, organization, imageName] in
3021 | let chunkIndex = chunkIndex
3022 | let chunkPath = chunksDir.appendingPathComponent("chunk.\(chunkIndex)")
3023 | let metadataPath = chunksDir.appendingPathComponent(
3024 | "chunk_metadata.\(chunkIndex).json")
3025 | var layer: OCIManifestLayer? = nil
3026 | var finalCompressedDigest: String? = nil
3027 | if FileManager.default.fileExists(atPath: metadataPath.path),
3028 | FileManager.default.fileExists(atPath: chunkPath.path)
3029 | {
3030 | do {
3031 | let metadataData = try Data(contentsOf: metadataPath)
3032 | let metadata = try JSONDecoder().decode(
3033 | ChunkMetadata.self, from: metadataData)
3034 | Logger.info(
3035 | "Resuming chunk \(chunkIndex + 1)/\(totalChunks) from cache")
3036 | finalCompressedDigest = metadata.compressedDigest
3037 | if !dryRun {
3038 | if !(try await self.blobExists(
3039 | repository: "\(organization)/\(imageName)",
3040 | digest: metadata.compressedDigest, token: token))
3041 | {
3042 | Logger.info("Uploading cached chunk \(chunkIndex + 1) blob")
3043 | _ = try await self.uploadBlobFromPath(
3044 | repository: "\(organization)/\(imageName)",
3045 | path: chunkPath, digest: metadata.compressedDigest,
3046 | token: token)
3047 | } else {
3048 | Logger.info(
3049 | "Chunk \(chunkIndex + 1) blob already exists on registry"
3050 | )
3051 | }
3052 | }
3053 | layer = OCIManifestLayer(
3054 | mediaType: "application/octet-stream+lz4",
3055 | size: metadata.compressedSize,
3056 | digest: metadata.compressedDigest,
3057 | uncompressedSize: metadata.uncompressedSize,
3058 | uncompressedContentDigest: metadata.uncompressedDigest)
3059 | } catch {
3060 | Logger.info(
3061 | "Failed to load cached metadata/chunk for index \(chunkIndex): \(error). Re-processing."
3062 | )
3063 | finalCompressedDigest = nil
3064 | layer = nil
3065 | }
3066 | }
3067 | if layer == nil {
3068 | Logger.info("Processing chunk \(chunkIndex + 1)/\(totalChunks)")
3069 | let localFileHandle = try FileHandle(forReadingFrom: diskPath)
3070 | defer { try? localFileHandle.close() }
3071 | try localFileHandle.seek(toOffset: UInt64(chunkIndex * chunkSizeBytes))
3072 | let chunkData =
3073 | try localFileHandle.read(upToCount: chunkSizeBytes) ?? Data()
3074 | let uncompressedSize = UInt64(chunkData.count)
3075 | let uncompressedDigest = "sha256:" + chunkData.sha256String()
3076 | let compressedData =
3077 | try (chunkData as NSData).compressed(using: .lz4) as Data
3078 | let compressedSize = compressedData.count
3079 | let compressedDigest = "sha256:" + compressedData.sha256String()
3080 | try compressedData.write(to: chunkPath)
3081 | let metadata = ChunkMetadata(
3082 | uncompressedDigest: uncompressedDigest,
3083 | uncompressedSize: uncompressedSize,
3084 | compressedDigest: compressedDigest, compressedSize: compressedSize)
3085 | let metadataData = try JSONEncoder().encode(metadata)
3086 | try metadataData.write(to: metadataPath)
3087 | finalCompressedDigest = compressedDigest
3088 | if !dryRun {
3089 | if !(try await self.blobExists(
3090 | repository: "\(organization)/\(imageName)",
3091 | digest: compressedDigest, token: token))
3092 | {
3093 | Logger.info("Uploading processed chunk \(chunkIndex + 1) blob")
3094 | _ = try await self.uploadBlobFromPath(
3095 | repository: "\(organization)/\(imageName)", path: chunkPath,
3096 | digest: compressedDigest, token: token)
3097 | } else {
3098 | Logger.info(
3099 | "Chunk \(chunkIndex + 1) blob already exists on registry (processed fresh)"
3100 | )
3101 | }
3102 | }
3103 | layer = OCIManifestLayer(
3104 | mediaType: "application/octet-stream+lz4", size: compressedSize,
3105 | digest: compressedDigest, uncompressedSize: uncompressedSize,
3106 | uncompressedContentDigest: uncompressedDigest)
3107 | }
3108 | guard let finalLayer = layer, let finalDigest = finalCompressedDigest else {
3109 | throw PushError.blobUploadFailed
3110 | }
3111 | if verbose {
3112 | Logger.info("Finished chunk \(chunkIndex + 1)/\(totalChunks)")
3113 | }
3114 | return (chunkIndex, finalLayer, chunkPath, finalDigest)
3115 | }
3116 | }
3117 | for try await (index, layer, path, digest) in group {
3118 | pushedDiskLayers.append((index, layer))
3119 | diskChunks.append((index, path, digest))
3120 | }
3121 | }
3122 | layers.append(
3123 | contentsOf: pushedDiskLayers.sorted { $0.index < $1.index }.map { $0.layer })
3124 | diskChunks.sort { $0.index < $1.index }
3125 | Logger.info("All disk chunks processed successfully")
3126 |
3127 | // --- Calculate Total Upload Size & Initialize Tracker ---
3128 | if !dryRun {
3129 | var totalUploadSizeBytes: Int64 = 0
3130 | var totalUploadFiles: Int = 0
3131 | // Add config size if it exists
3132 | if let size = configSize {
3133 | totalUploadSizeBytes += Int64(size)
3134 | totalUploadFiles += 1
3135 | }
3136 | // Add nvram size if it exists
3137 | if let size = nvramSize {
3138 | totalUploadSizeBytes += Int64(size)
3139 | totalUploadFiles += 1
3140 | }
3141 | // Add sizes of all compressed disk chunks
3142 | let allChunkSizes = diskChunks.compactMap {
3143 | try? FileManager.default.attributesOfItem(atPath: $0.path.path)[.size] as? Int64
3144 | ?? 0
3145 | }
3146 | totalUploadSizeBytes += allChunkSizes.reduce(0, +)
3147 | totalUploadFiles += totalChunks // Use totalChunks calculated earlier
3148 |
3149 | if totalUploadSizeBytes > 0 {
3150 | Logger.info(
3151 | "Initializing upload progress: \(totalUploadFiles) files, total size: \(ByteCountFormatter.string(fromByteCount: totalUploadSizeBytes, countStyle: .file))"
3152 | )
3153 | await uploadProgress.setTotal(totalUploadSizeBytes, files: totalUploadFiles)
3154 | // Print initial progress bar
3155 | print(
3156 | "[░░░░░░░░░░░░░░░░░░░░] 0% (0/\(totalUploadFiles)) | Initializing upload... | ETA: calculating... "
3157 | )
3158 | fflush(stdout)
3159 | } else {
3160 | Logger.info("No files marked for upload.")
3161 | }
3162 | }
3163 | // --- End Size Calculation & Init ---
3164 |
3165 | // Perform reassembly verification if requested in dry-run mode
3166 | if dryRun && reassemble {
3167 | Logger.info("=== REASSEMBLY MODE ===")
3168 | Logger.info("Reassembling chunks to verify integrity...")
3169 | let reassemblyDir = workDir.appendingPathComponent("reassembly")
3170 | try FileManager.default.createDirectory(
3171 | at: reassemblyDir, withIntermediateDirectories: true)
3172 | let reassembledFile = reassemblyDir.appendingPathComponent("reassembled_disk.img")
3173 |
3174 | // Pre-allocate a sparse file with the correct size
3175 | Logger.info(
3176 | "Pre-allocating sparse file of \(ByteCountFormatter.string(fromByteCount: Int64(actualDiskSize), countStyle: .file))..."
3177 | )
3178 | if FileManager.default.fileExists(atPath: reassembledFile.path) {
3179 | try FileManager.default.removeItem(at: reassembledFile)
3180 | }
3181 | guard FileManager.default.createFile(atPath: reassembledFile.path, contents: nil)
3182 | else {
3183 | throw PushError.fileCreationFailed(reassembledFile.path)
3184 | }
3185 |
3186 | let outputHandle = try FileHandle(forWritingTo: reassembledFile)
3187 | defer { try? outputHandle.close() }
3188 |
3189 | // Set the file size without writing data (creates a sparse file)
3190 | try outputHandle.truncate(atOffset: actualDiskSize)
3191 |
3192 | // Add test patterns at start and end to verify writability
3193 | let testPattern = "LUME_TEST_PATTERN".data(using: .utf8)!
3194 | try outputHandle.seek(toOffset: 0)
3195 | try outputHandle.write(contentsOf: testPattern)
3196 | try outputHandle.seek(toOffset: actualDiskSize - UInt64(testPattern.count))
3197 | try outputHandle.write(contentsOf: testPattern)
3198 | try outputHandle.synchronize()
3199 |
3200 | Logger.info("Test patterns written to sparse file. File is ready for writing.")
3201 |
3202 | // Track reassembly progress
3203 | var reassemblyProgressLogger = ProgressLogger(threshold: 0.05)
3204 | var currentOffset: UInt64 = 0
3205 |
3206 | // Process each chunk in order
3207 | for (index, cachedChunkPath, _) in diskChunks.sorted(by: { $0.index < $1.index }) {
3208 | Logger.info(
3209 | "Decompressing & writing part \(index + 1)/\(diskChunks.count): \(cachedChunkPath.lastPathComponent) at offset \(currentOffset)..."
3210 | )
3211 |
3212 | // Always seek to the correct position
3213 | try outputHandle.seek(toOffset: currentOffset)
3214 |
3215 | // Decompress and write the chunk
3216 | let decompressedBytesWritten = try decompressChunkAndWriteSparse(
3217 | inputPath: cachedChunkPath.path,
3218 | outputHandle: outputHandle,
3219 | startOffset: currentOffset
3220 | )
3221 |
3222 | currentOffset += decompressedBytesWritten
3223 | reassemblyProgressLogger.logProgress(
3224 | current: Double(currentOffset) / Double(actualDiskSize),
3225 | context: "Reassembling"
3226 | )
3227 |
3228 | // Ensure data is written before processing next part
3229 | try outputHandle.synchronize()
3230 | }
3231 |
3232 | // Finalize progress
3233 | reassemblyProgressLogger.logProgress(current: 1.0, context: "Reassembly Complete")
3234 | Logger.info("") // Newline
3235 |
3236 | // Close handle before post-processing
3237 | try outputHandle.close()
3238 |
3239 | // Optimize sparseness if on macOS
3240 | let optimizedFile = reassemblyDir.appendingPathComponent("optimized_disk.img")
3241 | if FileManager.default.fileExists(atPath: "/bin/cp") {
3242 | Logger.info("Optimizing sparse file representation...")
3243 |
3244 | let process = Process()
3245 | process.executableURL = URL(fileURLWithPath: "/bin/cp")
3246 | process.arguments = ["-c", reassembledFile.path, optimizedFile.path]
3247 |
3248 | do {
3249 | try process.run()
3250 | process.waitUntilExit()
3251 |
3252 | if process.terminationStatus == 0 {
3253 | // Get sizes of original and optimized files
3254 | let optimizedSize =
3255 | (try? FileManager.default.attributesOfItem(
3256 | atPath: optimizedFile.path)[.size] as? UInt64) ?? 0
3257 | let originalUsage = getActualDiskUsage(path: reassembledFile.path)
3258 | let optimizedUsage = getActualDiskUsage(path: optimizedFile.path)
3259 |
3260 | Logger.info(
3261 | "Sparse optimization results: Before: \(ByteCountFormatter.string(fromByteCount: Int64(originalUsage), countStyle: .file)) actual usage, After: \(ByteCountFormatter.string(fromByteCount: Int64(optimizedUsage), countStyle: .file)) actual usage (Apparent size: \(ByteCountFormatter.string(fromByteCount: Int64(optimizedSize), countStyle: .file)))"
3262 | )
3263 |
3264 | // Replace original with optimized version
3265 | try FileManager.default.removeItem(at: reassembledFile)
3266 | try FileManager.default.moveItem(
3267 | at: optimizedFile, to: reassembledFile)
3268 | Logger.info("Using sparse-optimized file for verification")
3269 | } else {
3270 | Logger.info(
3271 | "Sparse optimization failed, using original file for verification")
3272 | try? FileManager.default.removeItem(at: optimizedFile)
3273 | }
3274 | } catch {
3275 | Logger.info(
3276 | "Error during sparse optimization: \(error.localizedDescription)")
3277 | try? FileManager.default.removeItem(at: optimizedFile)
3278 | }
3279 | }
3280 |
3281 | // Verification step
3282 | Logger.info("Verifying reassembled file...")
3283 | let originalSize = diskSize
3284 | let originalDigest = calculateSHA256(filePath: diskPath.path)
3285 | let reassembledAttributes = try FileManager.default.attributesOfItem(
3286 | atPath: reassembledFile.path)
3287 | let reassembledSize = reassembledAttributes[.size] as? UInt64 ?? 0
3288 | let reassembledDigest = calculateSHA256(filePath: reassembledFile.path)
3289 |
3290 | // Check actual disk usage
3291 | let originalActualSize = getActualDiskUsage(path: diskPath.path)
3292 | let reassembledActualSize = getActualDiskUsage(path: reassembledFile.path)
3293 |
3294 | // Report results
3295 | Logger.info("Results:")
3296 | Logger.info(
3297 | " Original size: \(ByteCountFormatter.string(fromByteCount: Int64(originalSize), countStyle: .file)) (\(originalSize) bytes)"
3298 | )
3299 | Logger.info(
3300 | " Reassembled size: \(ByteCountFormatter.string(fromByteCount: Int64(reassembledSize), countStyle: .file)) (\(reassembledSize) bytes)"
3301 | )
3302 | Logger.info(" Original digest: \(originalDigest)")
3303 | Logger.info(" Reassembled digest: \(reassembledDigest)")
3304 | Logger.info(
3305 | " Original: Apparent size: \(ByteCountFormatter.string(fromByteCount: Int64(originalSize), countStyle: .file)), Actual disk usage: \(ByteCountFormatter.string(fromByteCount: Int64(originalActualSize), countStyle: .file))"
3306 | )
3307 | Logger.info(
3308 | " Reassembled: Apparent size: \(ByteCountFormatter.string(fromByteCount: Int64(reassembledSize), countStyle: .file)), Actual disk usage: \(ByteCountFormatter.string(fromByteCount: Int64(reassembledActualSize), countStyle: .file))"
3309 | )
3310 |
3311 | // Determine if verification was successful
3312 | if originalDigest == reassembledDigest {
3313 | Logger.info("✅ VERIFICATION SUCCESSFUL: Files are identical")
3314 | } else {
3315 | Logger.info("❌ VERIFICATION FAILED: Files differ")
3316 |
3317 | if originalSize != reassembledSize {
3318 | Logger.info(
3319 | " Size mismatch: Original \(originalSize) bytes, Reassembled \(reassembledSize) bytes"
3320 | )
3321 | }
3322 |
3323 | // Check sparse file characteristics
3324 | Logger.info("Attempting to identify differences...")
3325 | Logger.info(
3326 | "NOTE: This might be a sparse file issue. The content may be identical, but sparse regions"
3327 | )
3328 | Logger.info(
3329 | " may be handled differently between the original and reassembled files."
3330 | )
3331 |
3332 | if originalActualSize > 0 {
3333 | let diffPercentage =
3334 | ((Double(reassembledActualSize) - Double(originalActualSize))
3335 | / Double(originalActualSize)) * 100.0
3336 | Logger.info(
3337 | " Disk usage difference: \(String(format: "%.2f", diffPercentage))%")
3338 |
3339 | if diffPercentage < -40 {
3340 | Logger.info(
3341 | " ⚠️ WARNING: Reassembled disk uses significantly less space (>40% difference)."
3342 | )
3343 | Logger.info(
3344 | " This indicates sparse regions weren't properly preserved and may affect VM functionality."
3345 | )
3346 | } else if diffPercentage < -10 {
3347 | Logger.info(
3348 | " ⚠️ WARNING: Reassembled disk uses less space (10-40% difference)."
3349 | )
3350 | Logger.info(
3351 | " Some sparse regions may not be properly preserved but VM might still function correctly."
3352 | )
3353 | } else if diffPercentage > 10 {
3354 | Logger.info(
3355 | " ⚠️ WARNING: Reassembled disk uses more space (>10% difference).")
3356 | Logger.info(
3357 | " This is unusual and may indicate improper sparse file handling.")
3358 | } else {
3359 | Logger.info(
3360 | " ✓ Disk usage difference is minimal (<10%). VM likely to function correctly."
3361 | )
3362 | }
3363 | }
3364 |
3365 | // Offer recovery option
3366 | if originalDigest != reassembledDigest {
3367 | Logger.info("")
3368 | Logger.info("===== ATTEMPTING RECOVERY ACTION =====")
3369 | Logger.info(
3370 | "Since verification failed, trying direct copy as a fallback method.")
3371 |
3372 | let fallbackFile = reassemblyDir.appendingPathComponent("fallback_disk.img")
3373 | Logger.info("Creating fallback disk image at: \(fallbackFile.path)")
3374 |
3375 | // Try rsync first
3376 | let rsyncProcess = Process()
3377 | rsyncProcess.executableURL = URL(fileURLWithPath: "/usr/bin/rsync")
3378 | rsyncProcess.arguments = [
3379 | "-aS", "--progress", diskPath.path, fallbackFile.path,
3380 | ]
3381 |
3382 | do {
3383 | try rsyncProcess.run()
3384 | rsyncProcess.waitUntilExit()
3385 |
3386 | if rsyncProcess.terminationStatus == 0 {
3387 | Logger.info(
3388 | "Direct copy completed with rsync. Fallback image available at: \(fallbackFile.path)"
3389 | )
3390 | } else {
3391 | // Try cp -c as fallback
3392 | Logger.info("Rsync failed. Attempting with cp -c command...")
3393 | let cpProcess = Process()
3394 | cpProcess.executableURL = URL(fileURLWithPath: "/bin/cp")
3395 | cpProcess.arguments = ["-c", diskPath.path, fallbackFile.path]
3396 |
3397 | try cpProcess.run()
3398 | cpProcess.waitUntilExit()
3399 |
3400 | if cpProcess.terminationStatus == 0 {
3401 | Logger.info(
3402 | "Direct copy completed with cp -c. Fallback image available at: \(fallbackFile.path)"
3403 | )
3404 | } else {
3405 | Logger.info("All recovery attempts failed.")
3406 | }
3407 | }
3408 | } catch {
3409 | Logger.info(
3410 | "Error during recovery attempts: \(error.localizedDescription)")
3411 | Logger.info("All recovery attempts failed.")
3412 | }
3413 | }
3414 | }
3415 |
3416 | Logger.info("Reassembled file is available at: \(reassembledFile.path)")
3417 | }
3418 | }
3419 |
3420 | // --- Manifest Creation & Push ---
3421 | let manifest = createManifest(
3422 | layers: layers,
3423 | configLayerIndex: layers.firstIndex(where: {
3424 | $0.mediaType == "application/vnd.oci.image.config.v1+json"
3425 | }),
3426 | uncompressedDiskSize: uncompressedDiskSize
3427 | )
3428 |
3429 | // Push manifest only if not in dry-run mode
3430 | if !dryRun {
3431 | Logger.info("Pushing manifest(s)") // Updated log
3432 | // Serialize the manifest dictionary to Data first
3433 | let manifestData = try JSONSerialization.data(
3434 | withJSONObject: manifest, options: [.prettyPrinted, .sortedKeys])
3435 |
3436 | // Loop through tags to push the same manifest data
3437 | for tag in tags {
3438 | Logger.info("Pushing manifest for tag: \(tag)")
3439 | try await pushManifest(
3440 | repository: "\(self.organization)/\(imageName)",
3441 | tag: tag, // Use the current tag from the loop
3442 | manifest: manifestData, // Pass the serialized Data
3443 | token: token // Token should be in scope here now
3444 | )
3445 | }
3446 | }
3447 |
3448 | // Print final upload summary if not dry run
3449 | if !dryRun {
3450 | let stats = await uploadProgress.getUploadStats()
3451 | Logger.info("\n\(stats.formattedSummary())") // Add newline for separation
3452 | }
3453 |
3454 | // Clean up cache directory only on successful non-dry-run push
3455 | }
3456 |
3457 | private func createManifest(
3458 | layers: [OCIManifestLayer], configLayerIndex: Int?, uncompressedDiskSize: UInt64?
3459 | ) -> [String: Any] {
3460 | var manifest: [String: Any] = [
3461 | "schemaVersion": 2,
3462 | "mediaType": "application/vnd.oci.image.manifest.v1+json",
3463 | "layers": layers.map { layer in
3464 | var layerDict: [String: Any] = [
3465 | "mediaType": layer.mediaType,
3466 | "size": layer.size,
3467 | "digest": layer.digest,
3468 | ]
3469 |
3470 | if let uncompressedSize = layer.uncompressedSize {
3471 | var annotations: [String: String] = [:]
3472 | annotations["org.trycua.lume.uncompressed-size"] = "\(uncompressedSize)" // Updated prefix
3473 |
3474 | if let digest = layer.uncompressedContentDigest {
3475 | annotations["org.trycua.lume.uncompressed-content-digest"] = digest // Updated prefix
3476 | }
3477 |
3478 | layerDict["annotations"] = annotations
3479 | }
3480 |
3481 | return layerDict
3482 | },
3483 | ]
3484 |
3485 | // Add config reference if available
3486 | if let configIndex = configLayerIndex {
3487 | let configLayer = layers[configIndex]
3488 | manifest["config"] = [
3489 | "mediaType": configLayer.mediaType,
3490 | "size": configLayer.size,
3491 | "digest": configLayer.digest,
3492 | ]
3493 | }
3494 |
3495 | // Add annotations
3496 | var annotations: [String: String] = [:]
3497 | annotations["org.trycua.lume.upload-time"] = ISO8601DateFormatter().string(from: Date()) // Updated prefix
3498 |
3499 | if let diskSize = uncompressedDiskSize {
3500 | annotations["org.trycua.lume.uncompressed-disk-size"] = "\(diskSize)" // Updated prefix
3501 | }
3502 |
3503 | manifest["annotations"] = annotations
3504 |
3505 | return manifest
3506 | }
3507 |
3508 | private func uploadBlobFromData(repository: String, data: Data, token: String) async throws
3509 | -> String
3510 | {
3511 | // Calculate digest
3512 | let digest = "sha256:" + data.sha256String()
3513 |
3514 | // Check if blob already exists
3515 | if try await blobExists(repository: repository, digest: digest, token: token) {
3516 | Logger.info("Blob already exists: \(digest)")
3517 | return digest
3518 | }
3519 |
3520 | // Initiate upload
3521 | let uploadURL = try await startBlobUpload(repository: repository, token: token)
3522 |
3523 | // Upload blob
3524 | try await uploadBlob(url: uploadURL, data: data, digest: digest, token: token)
3525 |
3526 | // Report progress
3527 | await uploadProgress.addProgress(Int64(data.count))
3528 |
3529 | return digest
3530 | }
3531 |
3532 | private func uploadBlobFromPath(repository: String, path: URL, digest: String, token: String)
3533 | async throws -> String
3534 | {
3535 | // Check if blob already exists
3536 | if try await blobExists(repository: repository, digest: digest, token: token) {
3537 | Logger.info("Blob already exists: \(digest)")
3538 | return digest
3539 | }
3540 |
3541 | // Initiate upload
3542 | let uploadURL = try await startBlobUpload(repository: repository, token: token)
3543 |
3544 | // Load data from file
3545 | let data = try Data(contentsOf: path)
3546 |
3547 | // Upload blob
3548 | try await uploadBlob(url: uploadURL, data: data, digest: digest, token: token)
3549 |
3550 | // Report progress
3551 | await uploadProgress.addProgress(Int64(data.count))
3552 |
3553 | return digest
3554 | }
3555 |
3556 | private func blobExists(repository: String, digest: String, token: String) async throws -> Bool
3557 | {
3558 | let url = URL(string: "https://\(registry)/v2/\(repository)/blobs/\(digest)")!
3559 | var request = URLRequest(url: url)
3560 | request.httpMethod = "HEAD"
3561 | request.setValue("Bearer \(token)", forHTTPHeaderField: "Authorization")
3562 |
3563 | let (_, response) = try await URLSession.shared.data(for: request)
3564 |
3565 | if let httpResponse = response as? HTTPURLResponse {
3566 | return httpResponse.statusCode == 200
3567 | }
3568 |
3569 | return false
3570 | }
3571 |
3572 | private func startBlobUpload(repository: String, token: String) async throws -> URL {
3573 | let url = URL(string: "https://\(registry)/v2/\(repository)/blobs/uploads/")!
3574 | var request = URLRequest(url: url)
3575 | request.httpMethod = "POST"
3576 | request.setValue("Bearer \(token)", forHTTPHeaderField: "Authorization")
3577 | request.setValue("0", forHTTPHeaderField: "Content-Length") // Explicitly set Content-Length to 0 for POST
3578 |
3579 | let (_, response) = try await URLSession.shared.data(for: request)
3580 |
3581 | guard let httpResponse = response as? HTTPURLResponse,
3582 | httpResponse.statusCode == 202,
3583 | let locationString = httpResponse.value(forHTTPHeaderField: "Location")
3584 | else {
3585 | // Log response details on failure
3586 | let responseBody =
3587 | String(
3588 | data: (try? await URLSession.shared.data(for: request).0) ?? Data(),
3589 | encoding: .utf8) ?? "(No Body)"
3590 | Logger.error(
3591 | "Failed to initiate blob upload. Status: \( (response as? HTTPURLResponse)?.statusCode ?? 0 ). Headers: \( (response as? HTTPURLResponse)?.allHeaderFields ?? [:] ). Body: \(responseBody)"
3592 | )
3593 | throw PushError.uploadInitiationFailed
3594 | }
3595 |
3596 | // Construct the base URL for the registry
3597 | guard let baseRegistryURL = URL(string: "https://\(registry)") else {
3598 | Logger.error("Failed to create base registry URL from: \(registry)")
3599 | throw PushError.invalidURL
3600 | }
3601 |
3602 | // Create the final upload URL, resolving the location against the base URL
3603 | guard let uploadURL = URL(string: locationString, relativeTo: baseRegistryURL) else {
3604 | Logger.error(
3605 | "Failed to create absolute upload URL from location: \(locationString) relative to base: \(baseRegistryURL.absoluteString)"
3606 | )
3607 | throw PushError.invalidURL
3608 | }
3609 |
3610 | Logger.info("Blob upload initiated. Upload URL: \(uploadURL.absoluteString)")
3611 | return uploadURL.absoluteURL // Ensure it's absolute
3612 | }
3613 |
3614 | private func uploadBlob(url: URL, data: Data, digest: String, token: String) async throws {
3615 | var components = URLComponents(url: url, resolvingAgainstBaseURL: true)!
3616 |
3617 | // Add digest parameter
3618 | var queryItems = components.queryItems ?? []
3619 | queryItems.append(URLQueryItem(name: "digest", value: digest))
3620 | components.queryItems = queryItems
3621 |
3622 | guard let uploadURL = components.url else {
3623 | throw PushError.invalidURL
3624 | }
3625 |
3626 | var request = URLRequest(url: uploadURL)
3627 | request.httpMethod = "PUT"
3628 | request.setValue("Bearer \(token)", forHTTPHeaderField: "Authorization")
3629 | request.setValue("application/octet-stream", forHTTPHeaderField: "Content-Type")
3630 | request.setValue("\(data.count)", forHTTPHeaderField: "Content-Length")
3631 | request.httpBody = data
3632 |
3633 | let (_, response) = try await URLSession.shared.data(for: request)
3634 |
3635 | guard let httpResponse = response as? HTTPURLResponse, httpResponse.statusCode == 201 else {
3636 | throw PushError.blobUploadFailed
3637 | }
3638 | }
3639 |
3640 | private func pushManifest(repository: String, tag: String, manifest: Data, token: String)
3641 | async throws
3642 | {
3643 | let url = URL(string: "https://\(registry)/v2/\(repository)/manifests/\(tag)")!
3644 | var request = URLRequest(url: url)
3645 | request.httpMethod = "PUT"
3646 | request.setValue("Bearer \(token)", forHTTPHeaderField: "Authorization")
3647 | request.setValue(
3648 | "application/vnd.oci.image.manifest.v1+json", forHTTPHeaderField: "Content-Type")
3649 | request.httpBody = manifest
3650 |
3651 | let (_, response) = try await URLSession.shared.data(for: request)
3652 |
3653 | guard let httpResponse = response as? HTTPURLResponse, httpResponse.statusCode == 201 else {
3654 | throw PushError.manifestPushFailed
3655 | }
3656 | }
3657 |
3658 | private func getCredentialsFromEnvironment() -> (String?, String?) {
3659 | let username =
3660 | ProcessInfo.processInfo.environment["GITHUB_USERNAME"]
3661 | ?? ProcessInfo.processInfo.environment["GHCR_USERNAME"]
3662 | let password =
3663 | ProcessInfo.processInfo.environment["GITHUB_TOKEN"]
3664 | ?? ProcessInfo.processInfo.environment["GHCR_TOKEN"]
3665 | return (username, password)
3666 | }
3667 |
3668 | // Add these helper methods for dry-run and reassemble implementation
3669 |
3670 | // NEW Helper function using Compression framework and sparse writing
3671 | private func decompressChunkAndWriteSparse(
3672 | inputPath: String, outputHandle: FileHandle, startOffset: UInt64
3673 | ) throws -> UInt64 {
3674 | guard FileManager.default.fileExists(atPath: inputPath) else {
3675 | Logger.error("Compressed chunk not found at: \(inputPath)")
3676 | return 0 // Or throw an error
3677 | }
3678 |
3679 | let sourceData = try Data(
3680 | contentsOf: URL(fileURLWithPath: inputPath), options: .alwaysMapped)
3681 | var currentWriteOffset = startOffset
3682 | var totalDecompressedBytes: UInt64 = 0
3683 | var sourceReadOffset = 0 // Keep track of how much compressed data we've provided
3684 |
3685 | // Use the initializer with the readingFrom closure
3686 | let filter = try InputFilter(.decompress, using: .lz4) { (length: Int) -> Data? in
3687 | let bytesAvailable = sourceData.count - sourceReadOffset
3688 | if bytesAvailable == 0 {
3689 | return nil // No more data
3690 | }
3691 | let bytesToRead = min(length, bytesAvailable)
3692 | let chunk = sourceData.subdata(in: sourceReadOffset..<sourceReadOffset + bytesToRead)
3693 | sourceReadOffset += bytesToRead
3694 | return chunk
3695 | }
3696 |
3697 | // Process the decompressed output by reading from the filter
3698 | while let decompressedData = try filter.readData(ofLength: Self.holeGranularityBytes) {
3699 | if decompressedData.isEmpty { break } // End of stream
3700 |
3701 | // Check if the chunk is all zeros
3702 | if decompressedData.count == Self.holeGranularityBytes
3703 | && decompressedData == Self.zeroChunk
3704 | {
3705 | // It's a zero chunk, just advance the offset, don't write
3706 | currentWriteOffset += UInt64(decompressedData.count)
3707 | } else {
3708 | // Not a zero chunk (or a partial chunk at the end), write it
3709 | try outputHandle.seek(toOffset: currentWriteOffset)
3710 | try outputHandle.write(contentsOf: decompressedData)
3711 | currentWriteOffset += UInt64(decompressedData.count)
3712 | }
3713 | totalDecompressedBytes += UInt64(decompressedData.count)
3714 | }
3715 |
3716 | // No explicit finalize needed when initialized with source data
3717 |
3718 | return totalDecompressedBytes
3719 | }
3720 |
3721 | // Helper function to calculate SHA256 hash of a file
3722 | private func calculateSHA256(filePath: String) -> String {
3723 | guard FileManager.default.fileExists(atPath: filePath) else {
3724 | return "file-not-found"
3725 | }
3726 |
3727 | let process = Process()
3728 | process.executableURL = URL(fileURLWithPath: "/usr/bin/shasum")
3729 | process.arguments = ["-a", "256", filePath]
3730 |
3731 | let outputPipe = Pipe()
3732 | process.standardOutput = outputPipe
3733 |
3734 | do {
3735 | try process.run()
3736 | process.waitUntilExit()
3737 |
3738 | if let data = try outputPipe.fileHandleForReading.readToEnd(),
3739 | let output = String(data: data, encoding: .utf8)
3740 | {
3741 | return output.components(separatedBy: " ").first ?? "hash-calculation-failed"
3742 | }
3743 | } catch {
3744 | Logger.error("SHA256 calculation failed: \(error)")
3745 | }
3746 |
3747 | return "hash-calculation-failed"
3748 | }
3749 | }
3750 |
3751 | actor UploadProgressTracker {
3752 | private var totalBytes: Int64 = 0
3753 | private var uploadedBytes: Int64 = 0 // Renamed
3754 | private var progressLogger = ProgressLogger(threshold: 0.01)
3755 | private var totalFiles: Int = 0 // Keep track of total items
3756 | private var completedFiles: Int = 0 // Keep track of completed items
3757 |
3758 | // Upload speed tracking
3759 | private var startTime: Date = Date()
3760 | private var lastUpdateTime: Date = Date()
3761 | private var lastUpdateBytes: Int64 = 0
3762 | private var speedSamples: [Double] = []
3763 | private var peakSpeed: Double = 0
3764 | private var totalElapsedTime: TimeInterval = 0
3765 |
3766 | // Smoothing factor for speed calculation
3767 | private var speedSmoothing: Double = 0.3
3768 | private var smoothedSpeed: Double = 0
3769 |
3770 | func setTotal(_ total: Int64, files: Int) {
3771 | totalBytes = total
3772 | totalFiles = files
3773 | startTime = Date()
3774 | lastUpdateTime = startTime
3775 | uploadedBytes = 0 // Reset uploaded bytes
3776 | completedFiles = 0 // Reset completed files
3777 | smoothedSpeed = 0
3778 | speedSamples = []
3779 | peakSpeed = 0
3780 | totalElapsedTime = 0
3781 | }
3782 |
3783 | func addProgress(_ bytes: Int64) {
3784 | uploadedBytes += bytes
3785 | completedFiles += 1 // Increment completed files count
3786 | let now = Date()
3787 | let elapsed = now.timeIntervalSince(lastUpdateTime)
3788 |
3789 | // Show first progress update immediately, then throttle updates
3790 | let shouldUpdate =
3791 | (uploadedBytes <= bytes) || (elapsed >= 0.5) || (completedFiles == totalFiles)
3792 |
3793 | if shouldUpdate && totalBytes > 0 { // Ensure totalBytes is set
3794 | let currentSpeed = Double(uploadedBytes - lastUpdateBytes) / max(elapsed, 0.001)
3795 | speedSamples.append(currentSpeed)
3796 |
3797 | // Cap samples array
3798 | if speedSamples.count > 20 {
3799 | speedSamples.removeFirst(speedSamples.count - 20)
3800 | }
3801 |
3802 | peakSpeed = max(peakSpeed, currentSpeed)
3803 |
3804 | // Apply exponential smoothing
3805 | if smoothedSpeed == 0 {
3806 | smoothedSpeed = currentSpeed
3807 | } else {
3808 | smoothedSpeed = speedSmoothing * currentSpeed + (1 - speedSmoothing) * smoothedSpeed
3809 | }
3810 |
3811 | let recentAvgSpeed = calculateAverageSpeed()
3812 | let totalElapsed = now.timeIntervalSince(startTime)
3813 | let overallAvgSpeed = totalElapsed > 0 ? Double(uploadedBytes) / totalElapsed : 0
3814 |
3815 | let progress = totalBytes > 0 ? Double(uploadedBytes) / Double(totalBytes) : 1.0 // Avoid division by zero
3816 | logSpeedProgress(
3817 | current: progress,
3818 | currentSpeed: currentSpeed,
3819 | averageSpeed: recentAvgSpeed,
3820 | smoothedSpeed: smoothedSpeed,
3821 | overallSpeed: overallAvgSpeed,
3822 | peakSpeed: peakSpeed,
3823 | context: "Uploading Image" // Changed context
3824 | )
3825 |
3826 | lastUpdateTime = now
3827 | lastUpdateBytes = uploadedBytes
3828 | totalElapsedTime = totalElapsed
3829 | }
3830 | }
3831 |
3832 | private func calculateAverageSpeed() -> Double {
3833 | guard !speedSamples.isEmpty else { return 0 }
3834 | var totalWeight = 0.0
3835 | var weightedSum = 0.0
3836 | let samples = speedSamples.suffix(min(8, speedSamples.count))
3837 | for (index, speed) in samples.enumerated() {
3838 | let weight = Double(index + 1)
3839 | weightedSum += speed * weight
3840 | totalWeight += weight
3841 | }
3842 | return totalWeight > 0 ? weightedSum / totalWeight : 0
3843 | }
3844 |
3845 | // Use the UploadStats struct
3846 | func getUploadStats() -> UploadStats {
3847 | let avgSpeed = totalElapsedTime > 0 ? Double(uploadedBytes) / totalElapsedTime : 0
3848 | return UploadStats(
3849 | totalBytes: totalBytes,
3850 | uploadedBytes: uploadedBytes, // Renamed
3851 | elapsedTime: totalElapsedTime,
3852 | averageSpeed: avgSpeed,
3853 | peakSpeed: peakSpeed
3854 | )
3855 | }
3856 |
3857 | private func logSpeedProgress(
3858 | current: Double,
3859 | currentSpeed: Double,
3860 | averageSpeed: Double,
3861 | smoothedSpeed: Double,
3862 | overallSpeed: Double,
3863 | peakSpeed: Double,
3864 | context: String
3865 | ) {
3866 | let progressPercent = Int(current * 100)
3867 | // let currentSpeedStr = formatByteSpeed(currentSpeed) // Removed unused
3868 | let avgSpeedStr = formatByteSpeed(averageSpeed)
3869 | // let peakSpeedStr = formatByteSpeed(peakSpeed) // Removed unused
3870 | let remainingBytes = totalBytes - uploadedBytes
3871 | let speedForEta = max(smoothedSpeed, averageSpeed * 0.8)
3872 | let etaSeconds = speedForEta > 0 ? Double(remainingBytes) / speedForEta : 0
3873 | let etaStr = formatTimeRemaining(etaSeconds)
3874 | let progressBar = createProgressBar(progress: current)
3875 | let fileProgress = "(\(completedFiles)/\(totalFiles))" // Add file count
3876 |
3877 | print(
3878 | "\r\(progressBar) \(progressPercent)% \(fileProgress) | Speed: \(avgSpeedStr) (Avg) | ETA: \(etaStr) ", // Simplified output
3879 | terminator: "")
3880 | fflush(stdout)
3881 | }
3882 |
3883 | // Helper methods (createProgressBar, formatByteSpeed, formatTimeRemaining) remain the same
3884 | private func createProgressBar(progress: Double, width: Int = 30) -> String {
3885 | let completedWidth = Int(progress * Double(width))
3886 | let remainingWidth = width - completedWidth
3887 | let completed = String(repeating: "█", count: completedWidth)
3888 | let remaining = String(repeating: "░", count: remainingWidth)
3889 | return "[\(completed)\(remaining)]"
3890 | }
3891 | private func formatByteSpeed(_ bytesPerSecond: Double) -> String {
3892 | let units = ["B/s", "KB/s", "MB/s", "GB/s"]
3893 | var speed = bytesPerSecond
3894 | var unitIndex = 0
3895 | while speed > 1024 && unitIndex < units.count - 1 {
3896 | speed /= 1024
3897 | unitIndex += 1
3898 | }
3899 | return String(format: "%.1f %@", speed, units[unitIndex])
3900 | }
3901 | private func formatTimeRemaining(_ seconds: Double) -> String {
3902 | if seconds.isNaN || seconds.isInfinite || seconds <= 0 { return "calculating..." }
3903 | let hours = Int(seconds) / 3600
3904 | let minutes = (Int(seconds) % 3600) / 60
3905 | let secs = Int(seconds) % 60
3906 | if hours > 0 {
3907 | return String(format: "%d:%02d:%02d", hours, minutes, secs)
3908 | } else {
3909 | return String(format: "%d:%02d", minutes, secs)
3910 | }
3911 | }
3912 | }
3913 |
```