diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml new file mode 100644 index 0000000..f32b141 --- /dev/null +++ b/.github/workflows/e2e.yml @@ -0,0 +1,173 @@ +name: End-to-End ISO Test + +# Tests the full Ubuntu 26.04 live ISO lifecycle: +# 1. Build the ISO (debug=1 so SSH is available) +# 2. Boot the live ISO in QEMU — wait for UBUNTU26_LIVE_READY marker +# 3. Run fisherman install via SSH (ext4, systemd-boot, no LUKS) +# 4. Boot the installed disk in QEMU — wait for login prompt +# +# Mirrors the local just recipes: +# just debug=1 e2e ubuntu-26.04 +# +# Runs on every PR and weekly to catch regressions. + +on: + pull_request: + branches: [main] + schedule: + - cron: '0 3 * * 2' # Tuesday 03:00 UTC + workflow_dispatch: + +permissions: + contents: read + pull-requests: write + issues: write + +jobs: + e2e: + name: ISO End-to-End + runs-on: ubuntu-24.04 + timeout-minutes: 180 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Free disk space + uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be + with: + tool-cache: true + + - name: Install dependencies + run: | + sudo apt-get update -qq + sudo apt-get install -y \ + podman mtools xorriso isomd5sum squashfs-tools \ + qemu-system-x86 ovmf socat sshpass \ + python3 -qq + + - name: Setup Just + uses: extractions/setup-just@v3 + + - name: Enable KVM + run: | + echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' \ + | sudo tee /etc/udev/rules.d/99-kvm4all.rules + sudo udevadm control --reload-rules + sudo udevadm trigger --name-match=kvm + + - name: Checkout ubuntu-26.04-desktop-bootc + uses: actions/checkout@v4 + with: + repository: hanthor/ubuntu-26.04-desktop-bootc + path: bootc-image + + - name: Build base bootc image + run: | + cd bootc-image + sudo $(which just) build + + - name: Build debug ISO + id: build + run: | + START=$(date +%s) + sudo $(which just) debug=1 output_dir=/var/tmp/iso-output compression=fast \ + iso-sd-boot ubuntu-26.04 + echo "duration=$(( $(date +%s) - START ))" >> "$GITHUB_OUTPUT" + ISO=/var/tmp/iso-output/ubuntu-26.04-live.iso + echo "iso_size=$(stat -c%s "$ISO" 2>/dev/null || echo 0)" >> "$GITHUB_OUTPUT" + ls -lh "$ISO" + + - name: Boot live ISO (smoke test) + id: live_boot + run: | + START=$(date +%s) + sudo $(which just) output_dir=/var/tmp/iso-output test-live ubuntu-26.04 + echo "duration=$(( $(date +%s) - START ))" >> "$GITHUB_OUTPUT" + + - name: Full install end-to-end + id: e2e + run: | + START=$(date +%s) + # e2e-qemu runs against the already-built ISO (no rebuild) + sudo $(which just) \ + output_dir=/var/tmp/iso-output \ + e2e-qemu ubuntu-26.04 + echo "duration=$(( $(date +%s) - START ))" >> "$GITHUB_OUTPUT" + continue-on-error: true + + - name: Fix serial log permissions + if: always() + run: sudo chmod a+r /tmp/ubuntu-26.04-e2e-*.log 2>/dev/null || true + + - name: Upload serial logs + if: always() + continue-on-error: true + uses: actions/upload-artifact@v4 + with: + name: e2e-serial-logs + path: | + /tmp/ubuntu-26.04-e2e-live.log + /tmp/ubuntu-26.04-e2e-installed.log + if-no-files-found: warn + retention-days: 14 + + - name: Post PR comment + if: always() && github.event_name == 'pull_request' + uses: actions/github-script@v7 + env: + LIVE_OUTCOME: ${{ steps.live_boot.outcome }} + E2E_OUTCOME: ${{ steps.e2e.outcome }} + BUILD_S: ${{ steps.build.outputs.duration }} + LIVE_S: ${{ steps.live_boot.outputs.duration }} + E2E_S: ${{ steps.e2e.outputs.duration }} + ISO_BYTES: ${{ steps.build.outputs.iso_size }} + with: + script: | + const live = process.env.LIVE_OUTCOME; + const e2e = process.env.E2E_OUTCOME; + const pass = live === 'success' && e2e === 'success'; + const icon = pass ? '✅' : '❌'; + const runUrl = `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; + const fmtDur = s => s ? `${Math.floor(s/60)}m ${s%60}s` : 'N/A'; + const fmtMiB = b => b ? `${(b/1024/1024).toFixed(0)} MiB` : 'N/A'; + const body = [ + `## ${icon} ISO End-to-End Test — ${pass ? 'PASSED' : 'FAILED'}`, + '', + '| Step | Result | Time |', + '|------|--------|------|', + `| ISO build | ${process.env.BUILD_S ? '✅' : '⏳'} | ${fmtDur(process.env.BUILD_S)} |`, + `| Live boot | ${live === 'success' ? '✅' : '❌'} ${live} | ${fmtDur(process.env.LIVE_S)} |`, + `| Install + reboot | ${e2e === 'success' ? '✅' : '❌'} ${e2e} | ${fmtDur(process.env.E2E_S)} |`, + `| ISO size | ${fmtMiB(process.env.ISO_BYTES)} | — |`, + '', + `Serial logs are attached to the [workflow run](${runUrl}).`, + ].join('\n'); + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.payload.pull_request.number, + body, + }); + + - name: Job summary + if: always() + run: | + LIVE="${{ steps.live_boot.outcome }}" + E2E="${{ steps.e2e.outcome }}" + PASS=$( [[ "$LIVE" == "success" && "$E2E" == "success" ]] && echo "✅ PASSED" || echo "❌ FAILED" ) + { + echo "## $PASS — ISO End-to-End Test" + echo "" + echo "| Step | Result | Time |" + echo "|------|--------|------|" + echo "| Build | ✅ | ${{ steps.build.outputs.duration }}s |" + echo "| Live boot | $LIVE | ${{ steps.live_boot.outputs.duration }}s |" + echo "| Install + reboot | $E2E | ${{ steps.e2e.outputs.duration }}s |" + } >> "$GITHUB_STEP_SUMMARY" + + - name: Fail if e2e did not pass + if: steps.e2e.outcome != 'success' + run: | + echo "e2e test failed — check serial logs in artifacts" + exit 1 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ea1472e --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +output/ diff --git a/justfile b/justfile index a4645cb..88b538b 100644 --- a/justfile +++ b/justfile @@ -18,7 +18,17 @@ compression := "fast" # Build the live installer container image. container target: + #!/usr/bin/bash + set -euo pipefail + # On bootc-managed hosts /var/tmp may be read-only; override tmpdir for + # containers-storage via a custom storage.conf so podman can commit layers. + STORAGE_CONF=$(mktemp /tmp/iso-build-storage.XXXXXX.conf) + trap "rm -f '$STORAGE_CONF'" EXIT + printf '[storage]\ndriver = "overlay"\ngraphroot = "/var/lib/containers/storage"\nrunroot = "/run/containers/storage"\n\n[storage.options]\ntmpdir = "/var/home/james/iso-tmp"\n' \ + > "$STORAGE_CONF" + TMPDIR=/tmp CONTAINERS_STORAGE_CONF="$STORAGE_CONF" \ podman build --cap-add sys_admin --security-opt label=disable \ + --network=host \ --layers \ --build-arg DEBUG={{debug}} \ --build-arg INSTALLER_CHANNEL={{installer_channel}} \ @@ -57,6 +67,8 @@ build-bg target: iso-sd-boot target: #!/usr/bin/bash set -euo pipefail + # /var/tmp may have a broken inode on bootc-managed hosts; use a known-good path. + export TMPDIR=/var/home/james/iso-tmp just debug={{debug}} installer_channel={{installer_channel}} container {{target}} mkdir -p {{output_dir}} @@ -88,25 +100,21 @@ iso-sd-boot target: CS_STAGING='${CS_STAGING}' SQUASHFS_ROOT='${SQUASHFS_ROOT}' SQUASHFS_STORAGE=\"\${CS_STAGING}/usr/lib/bootc/storage\" - LIVE_RUNROOT=\"\$(mktemp -d '${OUTPUT_DIR}'/live-runroot-XXXXXX)\" - STORAGE_CONF=\"\$(mktemp '${OUTPUT_DIR}'/live-storage-XXXXXX.conf)\" mkdir -p \"\${SQUASHFS_STORAGE}\" - printf '[storage]\ndriver = \"vfs\"\nrunroot = \"%s\"\ngraphroot = \"%s\"\n' \ - \"\${LIVE_RUNROOT}\" \"\${SQUASHFS_STORAGE}\" > \"\${STORAGE_CONF}\" echo 'Exporting Ubuntu OCI image to archive...' skopeo copy \ containers-storage:${PAYLOAD_REF} \ oci-archive:\${PAYLOAD_OCI}:${PAYLOAD_REF} - echo 'Importing Ubuntu OCI image into squashfs bootc storage...' - CONTAINERS_STORAGE_CONF=\"\${STORAGE_CONF}\" \ + # Copy payload as OCI layout — blobs stay compressed, ~4 GB not ~100 GB. + # In the live session, fisherman uses: "image": "oci:/usr/lib/bootc/storage" + echo 'Copying payload into squashfs as OCI layout...' skopeo copy \ oci-archive:\${PAYLOAD_OCI}:${PAYLOAD_REF} \ - containers-storage:${PAYLOAD_REF} + oci:\${SQUASHFS_STORAGE} - rm -f \"\${PAYLOAD_OCI}\" \"\${STORAGE_CONF}\" - rm -rf \"\${LIVE_RUNROOT}\" + rm -f \"\${PAYLOAD_OCI}\" echo 'Building unified squashfs source tree...' mkdir -p \"\${SQUASHFS_ROOT}\" @@ -303,3 +311,331 @@ boot-libvirt-debug target: echo " Cleanup: sudo virsh destroy ${VM_NAME}" echo " sudo virsh undefine ${VM_NAME} --nvram" echo "════════════════════════════════════════" + +# ── QEMU e2e test ────────────────────────────────────────────────────────────── +# Adapts the dakota-iso LUKS e2e pattern for a plain (non-LUKS) Ubuntu install. +# The installed system is ext4 + systemd-boot; no passphrase unlock needed. +# +# Variables (all overridable on the command line): +e2e-disk := "/var/tmp/ubuntu-26.04-e2e.qcow2" +e2e-ovmf-vars-live := "/var/tmp/ubuntu-26.04-e2e-live-vars.fd" +e2e-ovmf-vars-installed := "/var/tmp/ubuntu-26.04-e2e-installed-vars.fd" +e2e-monitor-live := "/tmp/ubuntu-26.04-e2e-live.sock" +e2e-monitor-installed := "/tmp/ubuntu-26.04-e2e-installed.sock" +e2e-serial-live := "/tmp/ubuntu-26.04-e2e-live.log" +e2e-serial-installed := "/tmp/ubuntu-26.04-e2e-installed.log" +e2e-ssh-port := "2222" # live session SSH (debug=1) +e2e-ssh-installed-port := "2223" # installed system SSH +e2e-test-user := "tester" # created by fisherman user field +e2e-test-pass := "tester" # same value as in e2e-install.sh + +# Live boot smoke test — fast, no install needed, does not require debug=1. +# Boots the ISO in headless QEMU and waits for the UBUNTU26_LIVE_READY marker +# emitted by live-ready.service once the display manager has started. +# Usage: just test-live ubuntu-26.04 +test-live target: + #!/usr/bin/bash + set -euo pipefail + ISO="{{output_dir}}/{{target}}-live.iso" + [[ -f "$ISO" ]] || { echo "No ISO — run: just iso-sd-boot {{target}}"; exit 1; } + + QEMU=$(command -v /usr/libexec/qemu-kvm /usr/bin/qemu-kvm \ + /usr/bin/qemu-system-x86_64 2>/dev/null | head -1) + [[ -z "$QEMU" ]] && { echo "qemu-kvm / qemu-system-x86_64 not found" >&2; exit 1; } + + OVMF_CODE="" + for f in /usr/share/OVMF/OVMF_CODE_4M.fd /usr/share/OVMF/OVMF_CODE.fd \ + /usr/share/edk2/ovmf/OVMF_CODE.fd /usr/share/ovmf/OVMF.fd; do + [[ -f "$f" ]] && { OVMF_CODE="$f"; break; } + done + [[ -z "$OVMF_CODE" ]] && { echo "OVMF not found — install ovmf" >&2; exit 1; } + OVMF_VARS=$(mktemp /tmp/ubuntu-smoke-vars.XXXXXX.fd) + for f in /usr/share/OVMF/OVMF_VARS_4M.fd /usr/share/OVMF/OVMF_VARS.fd \ + /usr/share/edk2/ovmf/OVMF_VARS.fd; do + [[ -f "$f" ]] && { cp "$f" "$OVMF_VARS"; break; } + done + + SERIAL=$(mktemp /tmp/ubuntu-smoke-serial.XXXXXX.log) + MONITOR=$(mktemp /tmp/ubuntu-smoke-monitor.XXXXXX.sock) + TIMEOUT=480 # 8 min — live env with snap seeding can be slow + trap "sudo socat - UNIX-CONNECT:$MONITOR <<< 'quit' 2>/dev/null || true; rm -f $OVMF_VARS $SERIAL" EXIT + + echo "==> Booting live ISO (headless): $ISO" + sudo "$QEMU" \ + -machine q35 -cpu host -m 4096 -smp 2 -accel kvm \ + -drive "if=pflash,format=raw,readonly=on,file=${OVMF_CODE}" \ + -drive "if=pflash,format=raw,file=${OVMF_VARS}" \ + -drive "if=none,id=iso,file=${ISO},media=cdrom,readonly=on,format=raw" \ + -device virtio-scsi-pci,id=scsi \ + -device scsi-cd,drive=iso \ + -netdev "user,id=net0" \ + -device virtio-net-pci,netdev=net0 \ + -monitor "unix:${MONITOR},server,nowait" \ + -serial "file:${SERIAL}" \ + -display none \ + -daemonize + + echo "==> Waiting for UBUNTU26_LIVE_READY (timeout: ${TIMEOUT}s)..." + ELAPSED=0 + while (( ELAPSED < TIMEOUT )); do + if grep -q "UBUNTU26_LIVE_READY" "$SERIAL" 2>/dev/null; then + echo "" + echo "=== LIVE SMOKE TEST PASSED (${ELAPSED}s) ===" + FAILS=$(grep -E '\[FAILED\] Failed to start|Kernel panic' "$SERIAL" || true) + [[ -n "$FAILS" ]] && echo "WARNING — failures in serial log:" && echo "$FAILS" + exit 0 + fi + sleep 3; (( ELAPSED += 3 )) + printf "." + done + echo "" + echo "=== LIVE SMOKE TEST FAILED (timeout after ${TIMEOUT}s) ===" + echo "--- last 50 lines of serial ---" + tail -50 "$SERIAL" 2>/dev/null || true + exit 1 + +# Full end-to-end: build ISO → boot live → fisherman install → boot installed. +# Requires debug=1 so SSH is available in the live session. +# Usage: just debug=1 e2e ubuntu-26.04 +e2e target: + #!/usr/bin/bash + set -euo pipefail + if [[ "{{debug}}" != "1" ]]; then + echo "ERROR: e2e requires debug=1 (SSH is needed for the fisherman install step)" + echo " Run: just debug=1 e2e {{target}}" + exit 1 + fi + echo "=== Step 1: Build ISO (debug=1) ===" + just debug=1 output_dir={{output_dir}} compression={{compression}} iso-sd-boot {{target}} + echo "=== Step 2: QEMU end-to-end ===" + sudo rm -f "{{e2e-disk}}" \ + "{{e2e-ovmf-vars-live}}" "{{e2e-ovmf-vars-installed}}" \ + "{{e2e-monitor-live}}" "{{e2e-monitor-installed}}" \ + "{{e2e-serial-live}}" "{{e2e-serial-installed}}" + just output_dir={{output_dir}} e2e-qemu {{target}} + +# Run the QEMU e2e test against an already-built ISO (skips the rebuild). +# Expects the ISO at {{output_dir}}/{{target}}-live.iso. +e2e-qemu target: + #!/usr/bin/bash + set -euo pipefail + just output_dir={{output_dir}} e2e-boot-live {{target}} + just output_dir={{output_dir}} e2e-install {{target}} + just output_dir={{output_dir}} e2e-boot-installed {{target}} + just e2e-verify-bootc {{target}} + +# Boot the live ISO in QEMU (daemonized) with a blank install disk attached. +# Waits for UBUNTU26_LIVE_READY marker then polls SSH until the session is ready. +e2e-boot-live target: + #!/usr/bin/bash + set -euo pipefail + # sshpass may live in Linuxbrew on dev machines; not in root PATH under sudo + export PATH="/home/linuxbrew/.linuxbrew/bin:${PATH:-/usr/local/bin:/usr/bin:/bin}" + ISO="{{output_dir}}/{{target}}-live.iso" + [[ -f "$ISO" ]] || { echo "No ISO — run: just debug=1 iso-sd-boot {{target}}" >&2; exit 1; } + + QEMU=$(command -v /usr/libexec/qemu-kvm /usr/bin/qemu-kvm \ + /usr/bin/qemu-system-x86_64 2>/dev/null | head -1) + [[ -z "$QEMU" ]] && { echo "qemu-kvm / qemu-system-x86_64 not found" >&2; exit 1; } + + OVMF_CODE="" + for f in /usr/share/OVMF/OVMF_CODE_4M.fd /usr/share/OVMF/OVMF_CODE.fd \ + /usr/share/edk2/ovmf/OVMF_CODE.fd /usr/share/ovmf/OVMF.fd; do + [[ -f "$f" ]] && { OVMF_CODE="$f"; break; } + done + [[ -z "$OVMF_CODE" ]] && { echo "OVMF not found" >&2; exit 1; } + for f in /usr/share/OVMF/OVMF_VARS_4M.fd /usr/share/OVMF/OVMF_VARS.fd \ + /usr/share/edk2/ovmf/OVMF_VARS.fd; do + [[ -f "$f" ]] && { cp "$f" "{{e2e-ovmf-vars-live}}"; break; } + done + + [[ -f "{{e2e-disk}}" ]] || qemu-img create -f qcow2 "{{e2e-disk}}" 30G + + echo "==> Booting live ISO: $ISO" + sudo "$QEMU" \ + -machine q35 -cpu host -m 4096 -smp 2 -accel kvm \ + -drive "if=pflash,format=raw,readonly=on,file=${OVMF_CODE}" \ + -drive "if=pflash,format=raw,file={{e2e-ovmf-vars-live}}" \ + -drive "if=none,id=iso,file=${ISO},media=cdrom,readonly=on,format=raw" \ + -device virtio-scsi-pci,id=scsi \ + -device scsi-cd,drive=iso \ + -drive "if=none,id=disk,file={{e2e-disk}},format=qcow2" \ + -device virtio-blk-pci,drive=disk \ + -netdev "user,id=net0,hostfwd=tcp::{{e2e-ssh-port}}-:22" \ + -device virtio-net-pci,netdev=net0 \ + -monitor "unix:{{e2e-monitor-live}},server,nowait" \ + -serial "file:{{e2e-serial-live}}" \ + -display none \ + -daemonize + echo "==> Live QEMU started (monitor: {{e2e-monitor-live}})" + + echo "==> Waiting for UBUNTU26_LIVE_READY (up to 8 min)..." + for i in $(seq 1 160); do + if grep -q "UBUNTU26_LIVE_READY" "{{e2e-serial-live}}" 2>/dev/null; then + echo " ready (${i} × 3s)" + break + fi + [[ "$i" -eq 160 ]] && { + echo "TIMEOUT: live env not ready after 8 min" + tail -40 "{{e2e-serial-live}}" || true + exit 1 + } + sleep 3 + printf "." + done + + SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR -o ConnectTimeout=5 -o PreferredAuthentications=password" + # Wait up to 10 min — snap seeding on the live session can take 5+ min. + # Use nc to gate on port open first so we don't waste retry budget on auth. + echo "==> Waiting for SSH on port {{e2e-ssh-port}} (up to 10 min)..." + SSH_READY=0 + for i in $(seq 1 120); do + if nc -z 127.0.0.1 {{e2e-ssh-port}} 2>/dev/null; then + if sshpass -p live ssh $SSH_OPTS liveuser@127.0.0.1 -p {{e2e-ssh-port}} true 2>/dev/null; then + echo " SSH ready (${i} × 5s = $((i*5))s after marker)" + SSH_READY=1 + break + fi + fi + sleep 5 + done + [[ $SSH_READY -eq 1 ]] || { echo "ERROR: SSH timed out after 10 min"; exit 1; } + +# Run fisherman install via SSH into the live QEMU VM, then shut down. +# The bootc payload image lives at /usr/lib/bootc/storage in the squashfs +# (separate from the VFS store used for flatpaks at /var/lib/containers/storage). +# We set CONTAINERS_STORAGE_CONF so fisherman + bootc find the image there. +e2e-install target: + #!/usr/bin/bash + set -euo pipefail + export PATH="/home/linuxbrew/.linuxbrew/bin:${PATH:-/usr/local/bin:/usr/bin:/bin}" + SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ + -o LogLevel=ERROR -o ConnectTimeout=10 -o PreferredAuthentications=password \ + -o ServerAliveInterval=30 -o ServerAliveCountMax=20" + SSH="sshpass -p live ssh $SSH_OPTS liveuser@127.0.0.1 -p {{e2e-ssh-port}}" + SCP="sshpass -p live scp $SSH_OPTS -P {{e2e-ssh-port}}" + + echo "==> Running fisherman install (takes several minutes)..." + # SCP the install script and patched fisherman binary (has the oci: fix + # that makes composeFsBackend work with oci:/usr/lib/bootc/storage). + $SCP "{{justfile_directory()}}/ubuntu-26.04/src/e2e-install.sh" \ + liveuser@127.0.0.1:/tmp/e2e-install.sh + [[ -f "/var/home/james/iso-output/fisherman-patched" ]] && \ + $SCP "/var/home/james/iso-output/fisherman-patched" \ + liveuser@127.0.0.1:/tmp/fisherman-patched || true + $SSH 'sudo bash /tmp/e2e-install.sh' + echo "==> Install complete." + + # BLS entries are patched by e2e-install.sh (for any missing console kargs). + + echo "==> Shutting down live QEMU..." + echo "system_powerdown" | sudo socat - "UNIX-CONNECT:{{e2e-monitor-live}}" 2>/dev/null || true + sleep 8 + echo "quit" | sudo socat - "UNIX-CONNECT:{{e2e-monitor-live}}" 2>/dev/null || true + +# Boot the installed disk (no ISO) in QEMU and wait for a login prompt. +# Boot the installed disk in QEMU (daemonized). Waits for login: on the serial +# console then returns — QEMU keeps running so e2e-verify-bootc can SSH in. +e2e-boot-installed target: + #!/usr/bin/bash + set -euo pipefail + [[ -f "{{e2e-disk}}" ]] || { echo "No install disk — run e2e-install first" >&2; exit 1; } + + QEMU=$(command -v /usr/libexec/qemu-kvm /usr/bin/qemu-kvm \ + /usr/bin/qemu-system-x86_64 2>/dev/null | head -1) + [[ -z "$QEMU" ]] && { echo "qemu-kvm / qemu-system-x86_64 not found" >&2; exit 1; } + + OVMF_CODE="" + for f in /usr/share/OVMF/OVMF_CODE_4M.fd /usr/share/OVMF/OVMF_CODE.fd \ + /usr/share/edk2/ovmf/OVMF_CODE.fd /usr/share/ovmf/OVMF.fd; do + [[ -f "$f" ]] && { OVMF_CODE="$f"; break; } + done + [[ -z "$OVMF_CODE" ]] && { echo "OVMF not found" >&2; exit 1; } + for f in /usr/share/OVMF/OVMF_VARS_4M.fd /usr/share/OVMF/OVMF_VARS.fd \ + /usr/share/edk2/ovmf/OVMF_VARS.fd; do + [[ -f "$f" ]] && { cp "$f" "{{e2e-ovmf-vars-installed}}"; break; } + done + + echo "==> Booting installed disk: {{e2e-disk}}" + sudo "$QEMU" \ + -machine q35 -cpu host -m 4096 -smp 2 -accel kvm \ + -drive "if=pflash,format=raw,readonly=on,file=${OVMF_CODE}" \ + -drive "if=pflash,format=raw,file={{e2e-ovmf-vars-installed}}" \ + -drive "if=none,id=disk,file={{e2e-disk}},format=qcow2" \ + -device virtio-blk-pci,drive=disk \ + -netdev "user,id=net0,hostfwd=tcp::{{e2e-ssh-installed-port}}-:22" \ + -device virtio-net-pci,netdev=net0 \ + -monitor "unix:{{e2e-monitor-installed}},server,nowait" \ + -serial "file:{{e2e-serial-installed}}" \ + -display none \ + -daemonize + echo "==> Installed QEMU started (SSH :{{e2e-ssh-installed-port}}, monitor: {{e2e-monitor-installed}})" + + # Wait for BOOTC_STATUS_END — emitted by bootc-status-report.service on first boot + TIMEOUT=420 + echo "==> Waiting for BOOTC_STATUS_END in serial log (timeout: ${TIMEOUT}s)..." + ELAPSED=0 + while (( ELAPSED < TIMEOUT )); do + if sudo grep -q "BOOTC_STATUS_END" "{{e2e-serial-installed}}" 2>/dev/null; then + echo "" + echo " bootc-status-report.service completed after ${ELAPSED}s." + FAILS=$(sudo grep -E '\[FAILED\] Failed to start|Kernel panic' \ + "{{e2e-serial-installed}}" || true) + [[ -n "$FAILS" ]] && echo "WARNING — failures:" && echo "$FAILS" + exit 0 + fi + sleep 3; (( ELAPSED += 3 )) + printf "." + done + echo "" + echo "FAILED: BOOTC_STATUS_END not seen after ${TIMEOUT}s" + sudo tail -50 "{{e2e-serial-installed}}" 2>/dev/null || true + echo "quit" | sudo socat - "UNIX-CONNECT:{{e2e-monitor-installed}}" 2>/dev/null |# Extract bootc status from the serial log (written by bootc-status-report.service), +# verify the deployment, then gracefully shut down the installed QEMU. +e2e-verify-bootc target: + #!/usr/bin/bash + set -euo pipefail + echo "==> Extracting bootc status from serial log..." + SERIAL="{{e2e-serial-installed}}" + + # Extract the JSON between the markers. + # Kernel log lines look like: [ 52.050412] sh[2309]: {json...} + # Strip the timestamp+pid prefix then find the JSON object. + BOOTC_JSON=$(sudo awk '/BOOTC_STATUS_BEGIN/{p=1;next}/BOOTC_STATUS_END/{p=0}p' \ + "$SERIAL" 2>/dev/null \ + | tr -d '\r' \ + | sed 's/^\[[ 0-9.]*\] [a-zA-Z0-9_@:.-]*\[.*\]: //' \ + | grep '^{' | head -1) + + if [[ -z "$BOOTC_JSON" ]]; then + echo "ERROR: no bootc status JSON found in serial log" + echo "--- last 50 lines of serial ---" + sudo tail -50 "$SERIAL" 2>/dev/null || true + echo "quit" | sudo socat - "UNIX-CONNECT:{{e2e-monitor-installed}}" 2>/dev/null || true + exit 1 + fi + + echo "" + echo "=== bootc status (from serial log) ===" + echo "$BOOTC_JSON" | python3 -m json.tool 2>/dev/null || echo "$BOOTC_JSON" + echo "" + + VERIFY="{{justfile_directory()}}/ubuntu-26.04/src/verify-bootc-status.py" + if echo "$BOOTC_JSON" | python3 "$VERIFY"; then + EXIT_CODE=0 + else + EXIT_CODE=1 + fi + + echo "" + echo "==> Shutting down installed QEMU..." + echo "system_powerdown" | sudo socat - "UNIX-CONNECT:{{e2e-monitor-installed}}" 2>/dev/null || true + sleep 8 + echo "quit" | sudo socat - "UNIX-CONNECT:{{e2e-monitor-installed}}" 2>/dev/null || true + + if [[ $EXIT_CODE -eq 0 ]]; then + echo "=== e2e PASSED: installed system booted and bootc status verified ===" + else + echo "=== e2e FAILED: bootc status verification did not pass ===" + fi + exit $EXIT_CODE diff --git a/ubuntu-26.04/Containerfile b/ubuntu-26.04/Containerfile index 044b218..c8991e6 100644 --- a/ubuntu-26.04/Containerfile +++ b/ubuntu-26.04/Containerfile @@ -51,3 +51,14 @@ ENV INSTALLER_CHANNEL=${INSTALLER_CHANNEL} # only deltas are downloaded on rebuild. CI always starts cold. RUN --mount=type=cache,target=/var/cache/flatpak-dl,id=ubuntu2604-flatpak \ chmod +x /tmp/src/install-flatpaks.sh && /tmp/src/install-flatpaks.sh + +# ── Snap pre-seed layer ──────────────────────────────────────────────────────── +# Download snap packages into /var/lib/snapd/seed/ so snapd installs them +# offline on the first live-session boot. tuna-installer then copies the full +# /var/lib/snapd/ tree to the installed system (fisherman PR #25), giving the +# user pre-installed snaps without re-downloading on first boot. +# +# `snap` is already in the base image via ubuntu-desktop-minimal; no apt-get +# needed here (and apt-get cannot be used — see note above about dpkg). +RUN --mount=type=cache,target=/var/cache/snap-dl,id=ubuntu2604-snap \ + chmod +x /tmp/src/install-snaps.sh && /tmp/src/install-snaps.sh diff --git a/ubuntu-26.04/src/e2e-install.sh b/ubuntu-26.04/src/e2e-install.sh new file mode 100755 index 0000000..5a1dd0e --- /dev/null +++ b/ubuntu-26.04/src/e2e-install.sh @@ -0,0 +1,72 @@ +#!/usr/bin/bash +# e2e-install.sh — runs on the live guest via SSH during the e2e test. +# Uses fisherman with composeFsBackend=true and oci:/usr/lib/bootc/storage. +# +# With the fisherman fix (PR #25 + skopeoExportOCI oci: fix), the flow is: +# 1. podman pull oci:/usr/lib/bootc/storage → VFS store on target disk +# 2. skopeo copy oci:/usr/lib/bootc/storage → oci-cache on target disk +# 3. podman --root containers-root run oci:cache → bootc install (composefs) +# +# The CONTAINERS_STORAGE_CONF override redirects the VFS graphroot to +# /var/tmp which fisherman bind-mounts to the target disk before pulling. + +set -euo pipefail + +FISHERMAN=$(find /var/lib/flatpak/app/org.bootcinstaller.Installer \ + -name fisherman -type f 2>/dev/null | head -1) +[[ -z "$FISHERMAN" ]] && FISHERMAN=/usr/local/bin/fisherman +# Use the patched fisherman binary if available (has the oci: source ref fix) +[[ -f /tmp/fisherman-patched ]] && FISHERMAN=/tmp/fisherman-patched +echo "Using fisherman: $FISHERMAN" + +# Redirect VFS graphroot to /var/tmp so it lands on the target disk after +# fisherman mounts the scratch (avoids filling the live overlay tmpfs). +cat > /tmp/e2e-storage.conf << 'EOF' +[storage] +driver = "vfs" +graphroot = "/var/tmp/e2e-containers-storage" +runroot = "/run/containers/storage" +EOF + +# Fisherman recipe — composeFsBackend:true uses skopeoExportOCI which now +# handles oci: source refs correctly (copies directly, no containers-storage +# name lookup). +cat > /tmp/e2e-recipe.json << 'EOF' +{ + "disk": "/dev/vda", + "filesystem": "xfs", + "composeFsBackend": true, + "bootloader": "systemd", + "selinuxDisabled": true, + "unifiedStorage": false, + "hostname": "ubuntu-e2e-test", + "image": "oci:/usr/lib/bootc/storage", + "flatpaks": [], + "encryption": {"type": "none"} +} +EOF + +echo "==> Running fisherman install (composeFsBackend=true)..." +CONTAINERS_STORAGE_CONF=/tmp/e2e-storage.conf "$FISHERMAN" /tmp/e2e-recipe.json +echo "==> Install complete." + +# BLS entries are in the EFI partition (vda1 FAT32), NOT the root partition (vda2 XFS). +# Always mount vda1 — fisherman's composefs systemd-boot layout: +# vda1 = EFI System (2G FAT32, has loader/entries/*.conf) +# vda2 = Linux root (28G XFS, no boot entries here) +EFI_TMP=$(mktemp -d) +mount /dev/vda1 "$EFI_TMP" +trap "umount '$EFI_TMP' 2>/dev/null || true; rmdir '$EFI_TMP'" EXIT +COUNT=0 +for entry in "${EFI_TMP}/loader/entries/"*.conf \ + "${EFI_TMP}/EFI/loader/entries/"*.conf; do + [[ -f "$entry" ]] || continue + PATCH="" + grep -q "console=ttyS0" "$entry" || PATCH="$PATCH console=ttyS0,115200" + grep -q "systemd.wants=ssh" "$entry" || PATCH="$PATCH systemd.wants=ssh.service" + if [[ -n "$PATCH" ]]; then + sed -i "s|^options .*|&${PATCH}|" "$entry" + (( ++COUNT )) # pre-increment: evaluates to new value (≥1), never exits non-zero + fi +done +echo "==> Patched ${COUNT} BLS entry/entries (console + SSH kargs)." diff --git a/ubuntu-26.04/src/etc/bootc-installer/recipe.json b/ubuntu-26.04/src/etc/bootc-installer/recipe.json index 0dcd45d..40e7b28 100644 --- a/ubuntu-26.04/src/etc/bootc-installer/recipe.json +++ b/ubuntu-26.04/src/etc/bootc-installer/recipe.json @@ -3,6 +3,7 @@ "distro_name": "Ubuntu 26.04", "imgref": "ghcr.io/hanthor/ubuntu-26.04-desktop-bootc:latest", "local_imgref": "containers-storage:ghcr.io/hanthor/ubuntu-26.04-desktop-bootc:latest", + "snaps": ["bare", "core24", "gnome-46-2404", "gtk-common-themes", "mesa-2404", "snapd-desktop-integration", "firefox", "thunderbird", "snap-store", "firmware-updater"], "tour": { "welcome": { "title": "Installing Ubuntu 26.04", diff --git a/ubuntu-26.04/src/flatpaks b/ubuntu-26.04/src/flatpaks index ed21ad8..1c78d28 100644 --- a/ubuntu-26.04/src/flatpaks +++ b/ubuntu-26.04/src/flatpaks @@ -1,43 +1,40 @@ -# Ubuntu 26.04 system flatpaks -# Installed into the live squashfs at build time. -# Copied offline to the target by tuna-installer during installation. +# Ubuntu 26.04 live ISO — system Flatpak applications # -# Note: org.bootcinstaller.Installer is installed separately via the GitHub -# Releases bundle (not on Flathub) earlier in install-flatpaks.sh. +# Mirrors the app set that ubuntu-desktop adds on top of ubuntu-desktop-minimal, +# delivered as Flatpaks instead of debs so they install cleanly onto the +# composefs read-only sysroot and are copied to the installed system by +# tuna-installer (CopyFlatpaks). # -# Format: one app ID per line, resolved from the flathub remote. -com.github.tchx84.Flatseal -com.mattjakeman.ExtensionManager -com.ranfdev.DistroShelf -io.github.flattool.Ignition -io.github.flattool.Warehouse -io.github.kolunmi.Bazaar -io.gitlab.adhami3310.Impression -io.missioncenter.MissionCenter -it.mijorus.smile -org.gnome.Calculator -org.gnome.Calendar -org.gnome.Characters -org.gnome.Connections -org.gnome.Contacts +# Already in ubuntu-desktop-minimal — NOT listed here: +# baobab, gnome-calculator, gnome-characters, gnome-clocks, +# gnome-control-center, gnome-disk-utility, gnome-font-viewer, +# gnome-text-editor, papers (doc viewer), ptyxis (terminal), +# resources (system monitor), seahorse +# +# Snaps (handled by src/snaps — NOT listed here): +# firefox, thunderbird, snap-store, firmware-updater +# +# No Flatpak available — skipped: +# usb-creator-gtk (Ubuntu-specific USB creator, no Flatpak equivalent) +# gst-*-thumbnailer (system thumbnailer services, not user-facing apps) + +# Office +org.libreoffice.LibreOffice + +# Productivity / utilities org.gnome.DejaDup org.gnome.FileRoller -org.gnome.Firmware -org.gnome.Logs -org.gnome.Loupe -org.gnome.Maps -org.gnome.NautilusPreviewer -org.gnome.Papers -org.gnome.Showtime +org.gnome.Calendar org.gnome.SimpleScan + +# Media +org.gnome.Shotwell +org.gnome.Showtime +org.gnome.Music + +# Internet / network +com.transmissionbt.Transmission +org.remmina.Remmina + +# Camera org.gnome.Snapshot -org.gnome.SoundRecorder -org.gnome.TextEditor -org.gnome.Weather -org.gnome.baobab -org.gnome.clocks -org.gnome.font-viewer -org.gtk.Gtk3theme.adw-gtk3 -org.gtk.Gtk3theme.adw-gtk3-dark -org.mozilla.firefox -page.tesk.Refine diff --git a/ubuntu-26.04/src/install-snaps.sh b/ubuntu-26.04/src/install-snaps.sh new file mode 100755 index 0000000..6808b8d --- /dev/null +++ b/ubuntu-26.04/src/install-snaps.sh @@ -0,0 +1,130 @@ +#!/usr/bin/bash +# install-snaps.sh — pre-seed snap packages into /var/lib/snapd/seed/ +# +# Reads /tmp/src/snaps (name + channel, one per line) and downloads each snap +# into /var/lib/snapd/seed/ so snapd installs them offline on the first live +# session boot. `snap download` talks directly to the store API — no running +# snapd daemon is required. +# +# Build cache: /var/cache/snap-dl/ — persisted via --mount=type=cache so +# only changed snaps are re-fetched on subsequent builds. +# +# Called by: Containerfile RUN layer (after dpkg db is restored, apt works) + +set -exo pipefail + +SNAP_CACHE="/var/cache/snap-dl" +SEED_DIR="/var/lib/snapd/seed" +SNAPS_DIR="$SEED_DIR/snaps" +ASSERT_DIR="$SEED_DIR/assertions" + +mkdir -p "$SNAPS_DIR" "$ASSERT_DIR" "$SNAP_CACHE/snaps" + +# overlayfs inside a Podman build doesn't support O_TMPFILE. +# Use a subdirectory of the bind-mounted cache volume as TMPDIR. +mkdir -p "${SNAP_CACHE}/tmp" +export TMPDIR="${SNAP_CACHE}/tmp" + +# ── Restore build cache (warm start) ───────────────────────────────────────── +cached=$(find "$SNAP_CACHE/snaps" -maxdepth 1 -name "*.snap" 2>/dev/null | wc -l) +if [[ $cached -gt 0 ]]; then + echo "==> Restoring $cached cached snap(s)..." + cp "$SNAP_CACHE/snaps/"*.snap "$SNAPS_DIR/" 2>/dev/null || true + cp "$SNAP_CACHE/snaps/"*.assert "$ASSERT_DIR/" 2>/dev/null || true +fi + +WORK=$(mktemp -d) + +# ── Download helper ─────────────────────────────────────────────────────────── +download_snap() { + local name="$1" channel="${2:-stable}" + + # Already present (any revision) — skip re-download. + if find "$SNAPS_DIR" -maxdepth 1 -name "${name}_*.snap" 2>/dev/null | grep -q .; then + echo " -> $name: already cached" + return 0 + fi + + echo " -> snap download $name --channel=$channel" + if ! snap download --channel="$channel" --target-directory="$WORK" "$name"; then + echo "WARNING: snap download failed for $name (channel: $channel) — skipping" >&2 + return 0 + fi + + mv "$WORK/${name}_"*.snap "$SNAPS_DIR/" 2>/dev/null || true + mv "$WORK/${name}_"*.assert "$ASSERT_DIR/" 2>/dev/null || true +} + +# ── Parse snaps file ────────────────────────────────────────────────────────── +# Format: (channel required; comments and blank lines ignored) +while IFS= read -r line; do + # Strip inline comments and leading/trailing whitespace + line="${line%%#*}" + line="${line#"${line%%[![:space:]]*}"}" + line="${line%"${line##*[![:space:]]}"}" + [[ -z "$line" ]] && continue + + name="${line%% *}" + rest="${line#* }" + channel="$( [[ "$rest" != "$name" ]] && echo "$rest" || echo "stable" )" + channel="${channel#"${channel%%[![:space:]]*}"}" # ltrim + channel="${channel%"${channel##*[![:space:]]}"}" # rtrim + + download_snap "$name" "$channel" +done < /tmp/src/snaps + +rm -rf "$WORK" + +# ── Generate seed.yaml ──────────────────────────────────────────────────────── +SEED_YAML="$SEED_DIR/seed.yaml" +printf 'snaps:\n' > "$SEED_YAML" + +for snapfile in "$SNAPS_DIR/"*.snap; do + [[ -f "$snapfile" ]] || continue + base=$(basename "$snapfile" .snap) # e.g. firefox_8107 + name="${base%_*}" # e.g. firefox + assertfile="$ASSERT_DIR/${base}.assert" + + # Extract snap-id from the snap-declaration stanza in the .assert file. + # Assertion format is key: value; snap-declaration precedes snap-revision. + snap_id="" + if [[ -f "$assertfile" ]]; then + snap_id=$(awk ' + /^type: snap-declaration/ { in_decl=1 } + in_decl && /^snap-id:/ { print $2; exit } + ' "$assertfile") + fi + + if [[ -z "$snap_id" ]]; then + echo "WARNING: could not extract snap-id for $name — seed entry may be incomplete" >&2 + fi + + # Recover the channel from the snaps file for the seed.yaml entry. + channel="latest/stable" + while IFS= read -r line; do + line="${line%%#*}" + entry_name="${line%% *}" + [[ "$entry_name" == "$name" ]] || continue + rest="${line#* }" + [[ "$rest" != "$entry_name" ]] && channel="$rest" && break + done < /tmp/src/snaps + channel="${channel#"${channel%%[![:space:]]*}"}" + channel="${channel%"${channel##*[![:space:]]}"}" + + printf ' - name: %s\n' "$name" >> "$SEED_YAML" + printf ' channel: %s\n' "$channel" >> "$SEED_YAML" + printf ' id: %s\n' "${snap_id:-placeholder}" >> "$SEED_YAML" + printf ' file: %s\n' "$(basename "$snapfile")" >> "$SEED_YAML" +done + +echo "==> Snap seed (seed.yaml):" +cat "$SEED_YAML" +echo "" +echo "==> Seed sizes:" +du -sh "$SNAPS_DIR" "$ASSERT_DIR" + +# ── Save to build cache ─────────────────────────────────────────────────────── +cp "$SNAPS_DIR/"*.snap "$SNAP_CACHE/snaps/" 2>/dev/null || true +cp "$ASSERT_DIR/"*.assert "$SNAP_CACHE/snaps/" 2>/dev/null || true + +echo "install-snaps.sh: done" diff --git a/ubuntu-26.04/src/snaps b/ubuntu-26.04/src/snaps new file mode 100644 index 0000000..fe702d2 --- /dev/null +++ b/ubuntu-26.04/src/snaps @@ -0,0 +1,31 @@ +# Ubuntu 26.04 live ISO — pre-seeded snap packages +# +# Mirrors the snap set shipped by the official Ubuntu 26.04 Desktop ISO. +# Source: germinate output for ubuntu.resolute/desktop +# https://people.canonical.com/~ubuntu-archive/germinate-output/ubuntu.resolute/desktop +# +# Format: +# Channel is required — Ubuntu 26.04 uses per-snap ubuntu-26.04 tracks that +# receive Ubuntu-curated updates rather than generic upstream stable. +# +# Excluded intentionally: +# snapd — managed by apt, not seeded as a snap +# ubuntu-desktop-bootstrap — Ubuntu's first-boot OOBE; we use gnome-initial-setup +# desktop-security-center — AppArmor prompting UI; AppArmor is masked in live session +# prompting-client — AppArmor prompting client; same reason + +# ── Platform / base snaps ───────────────────────────────────────────────────── +# Required at runtime by app snaps; must be in the seed so apps can launch +# without downloading content snaps after the live session starts. +bare stable +core24 stable +gnome-46-2404 stable/ubuntu-26.04 +gtk-common-themes stable/ubuntu-26.04 +mesa-2404 stable/ubuntu-26.04 +snapd-desktop-integration stable/ubuntu-26.04 + +# ── User-facing apps ────────────────────────────────────────────────────────── +firefox stable/ubuntu-26.04 +thunderbird stable/ubuntu-26.04 +snap-store 2/stable/ubuntu-26.04 +firmware-updater 1/stable/ubuntu-26.04 diff --git a/ubuntu-26.04/src/verify-bootc-status.py b/ubuntu-26.04/src/verify-bootc-status.py new file mode 100644 index 0000000..d055328 --- /dev/null +++ b/ubuntu-26.04/src/verify-bootc-status.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +""" +verify-bootc-status.py — reads bootc status JSON from stdin, verifies +the deployment is healthy, and prints a summary. Exits 0 on success. +""" +import sys +import json + +try: + data = json.load(sys.stdin) +except json.JSONDecodeError as exc: + print(f"FAIL: could not parse bootc status JSON: {exc}", file=sys.stderr) + sys.exit(1) + +status = data.get("status", {}) +booted = status.get("booted") +staged = status.get("staged") +rollbck = status.get("rollback") + +if not booted: + print("FAIL: status.booted is null — system is not a bootc deployment", file=sys.stderr) + sys.exit(1) + +img = booted.get("image", {}).get("image", {}).get("image", "(unknown)") +digest = booted.get("image", {}).get("imageDigest", "") +digest_short = digest[:19] + "..." if len(digest) > 19 else digest + +print(f" Booted image: {img}") +print(f" Image digest: {digest_short}") +print(f" Staged: {'yes — ' + str(staged) if staged else 'none'}") +print(f" Rollback: {'yes' if rollbck else 'none'}") +print( " bootc status: VERIFIED OK ✓")