From b6a6a81f9507a77df139f26a41f6c9dfe3d663bf Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Tue, 8 Aug 2023 14:34:03 -0400 Subject: [PATCH 1/2] integration-test: deflake log test Wait for at least one log and increase the wait time 10x. --- test/integration-test/src/tests/log.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/test/integration-test/src/tests/log.rs b/test/integration-test/src/tests/log.rs index 65ad34eba..c51f7e96a 100644 --- a/test/integration-test/src/tests/log.rs +++ b/test/integration-test/src/tests/log.rs @@ -70,12 +70,16 @@ async fn log() { let mut logs = 0; let records = loop { - tokio::time::sleep(std::time::Duration::from_millis(10)).await; + tokio::time::sleep(std::time::Duration::from_millis(100)).await; let records = captured_logs.lock().unwrap(); - if records.len() == logs { + let len = records.len(); + if len == 0 { + continue; + } + if len == logs { break records; } - logs = records.len(); + logs = len; }; let mut records = records.iter(); From 82a77bc83d865af56914545625d97fa1f65fe733 Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Mon, 7 Aug 2023 16:57:22 -0400 Subject: [PATCH 2/2] integration-test: Implement running on VMs Implements running integration tests on multiple VMs with arbitrary kernel images using `cargo xtask integration-test vm ...`. This changes our coverage from 6.2 to 6.1 and 6.4. --- .github/workflows/ci.yml | 118 +++++---- Cargo.toml | 3 + bpf/aya-log-ebpf/src/lib.rs | 19 +- init/Cargo.toml | 10 + init/src/main.rs | 166 ++++++++++++ test/README.md | 22 +- test/cloud-localds | 264 ------------------- test/run.sh | 241 ----------------- xtask/src/main.rs | 12 - xtask/src/run.rs | 498 +++++++++++++++++++++++++++++++----- 10 files changed, 709 insertions(+), 644 deletions(-) create mode 100644 init/Cargo.toml create mode 100644 init/src/main.rs delete mode 100755 test/cloud-localds delete mode 100755 test/run.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e2ab4cee7..e9eece290 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -139,8 +139,14 @@ jobs: --target ${{ matrix.target }} \ -Z build-std=core - build-integration-test: - runs-on: ubuntu-22.04 + run-integration-test: + strategy: + fail-fast: false + matrix: + runner: + - macos-12 + - ubuntu-22.04 + runs-on: ${{ matrix.runner }} steps: - uses: actions/checkout@v3 with: @@ -150,13 +156,12 @@ jobs: with: toolchain: nightly components: rust-src + targets: aarch64-unknown-linux-musl,x86_64-unknown-linux-musl - uses: Swatinem/rust-cache@v2 - - name: bpf-linker - run: cargo install bpf-linker --git https://github.com/aya-rs/bpf-linker.git - - - name: Install dependencies + - name: Install prerequisites + if: runner.os == 'Linux' # ubuntu-22.04 comes with clang 14[0] which doesn't include support for signed and 64bit # enum values which was added in clang 15[1]. # @@ -171,63 +176,78 @@ jobs: set -euxo pipefail wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc echo deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy main | sudo tee /etc/apt/sources.list.d/llvm.list - sudo apt-get update - sudo apt-get -y install clang gcc-multilib llvm + sudo apt update + sudo apt -y install clang gcc-multilib llvm locate qemu-system-{arm,x86} - - name: Build + - name: bpf-linker + if: runner.os == 'Linux' + run: cargo install bpf-linker --git https://github.com/aya-rs/bpf-linker.git + + - name: Install prerequisites + if: runner.os == 'macOS' + # The clang shipped on macOS doesn't support BPF, so we need LLVM from brew. + # + # We also need LLVM for bpf-linker, see comment below. run: | set -euxo pipefail - mkdir -p integration-test-binaries - # See https://doc.rust-lang.org/cargo/reference/profiles.html for the - # names of the builtin profiles. Note that dev builds "debug" targets. - cargo xtask build-integration-test --cargo-arg=--profile=dev | xargs -I % cp % integration-test-binaries/dev - cargo xtask build-integration-test --cargo-arg=--profile=release | xargs -I % cp % integration-test-binaries/release - - - uses: actions/upload-artifact@v3 - with: - name: integration-test-binaries - path: integration-test-binaries + brew install qemu dpkg pkg-config llvm + echo /usr/local/opt/llvm/bin >> $GITHUB_PATH - run-integration-test: - runs-on: macos-latest - needs: ["build-integration-test"] - steps: - - uses: actions/checkout@v3 - with: - sparse-checkout: | - test/run.sh - test/cloud-localds + - name: bpf-linker + if: runner.os == 'macOS' + # NB: rustc doesn't ship libLLVM.so on macOS, so disable proxying (default feature). + run: cargo install bpf-linker --git https://github.com/aya-rs/bpf-linker.git --no-default-features - - name: Install Pre-requisites + - name: Download debian kernels + if: runner.arch == 'ARM64' run: | - brew install qemu gnu-getopt coreutils cdrtools - - - name: Cache tmp files - uses: actions/cache@v3 - with: - path: | - .tmp/*.qcow2 - .tmp/test_rsa - .tmp/test_rsa.pub - key: tmp-files-${{ hashFiles('test/run.sh') }} - - - uses: actions/download-artifact@v3 - with: - name: integration-test-binaries - path: integration-test-binaries - - - name: Run integration tests + set -euxo pipefail + mkdir -p test/.tmp/debian-kernels/arm64 + # NB: a 4.19 kernel image for arm64 was not available. + # TODO: enable tests on kernels before 6.0. + # linux-image-5.10.0-23-cloud-arm64-unsigned_5.10.179-3_arm64.deb \ + printf '%s\0' \ + linux-image-6.1.0-10-cloud-arm64-unsigned_6.1.38-2_arm64.deb \ + linux-image-6.4.0-1-cloud-arm64-unsigned_6.4.4-2_arm64.deb \ + | xargs -0 -t -P0 -I {} wget -nd -q -P test/.tmp/debian-kernels/arm64 ftp://ftp.us.debian.org/debian/pool/main/l/linux/{} + + - name: Download debian kernels + if: runner.arch == 'X64' run: | set -euxo pipefail - find integration-test-binaries -type f -exec chmod +x {} \; - test/run.sh integration-test-binaries + mkdir -p test/.tmp/debian-kernels/amd64 + # TODO: enable tests on kernels before 6.0. + # linux-image-4.19.0-21-cloud-amd64-unsigned_4.19.249-2_amd64.deb \ + # linux-image-5.10.0-23-cloud-amd64-unsigned_5.10.179-3_amd64.deb \ + printf '%s\0' \ + linux-image-6.1.0-10-cloud-amd64-unsigned_6.1.38-2_amd64.deb \ + linux-image-6.4.0-1-cloud-amd64-unsigned_6.4.4-2_amd64.deb \ + | xargs -0 -t -P0 -I {} wget -nd -q -P test/.tmp/debian-kernels/amd64 ftp://ftp.us.debian.org/debian/pool/main/l/linux/{} + + - name: Alias gtar as tar + if: runner.os == 'macOS' + # macOS tar doesn't support --wildcards which we use below. + run: mkdir tar-is-gtar && ln -s "$(which gtar)" tar-is-gtar/tar && echo "$PWD"/tar-is-gtar >> $GITHUB_PATH + + - name: Extract debian kernels + run: | + set -euxo pipefail + find test/.tmp -name '*.deb' -print0 | xargs -t -0 -I {} \ + sh -c "dpkg --fsys-tarfile {} | tar -C test/.tmp --wildcards --extract '*vmlinuz*' --file -" + + - name: Run integration tests + run: find test/.tmp -name 'vmlinuz-*' | xargs -t cargo xtask integration-test vm # Provides a single status check for the entire build workflow. # This is used for merge automation, like Mergify, since GH actions # has no concept of "when all status checks pass". # https://docs.mergify.com/conditions/#validating-all-status-checks build-workflow-complete: - needs: ["lint", "build-test-aya", "build-test-aya-bpf", "run-integration-test"] + needs: + - lint + - build-test-aya + - build-test-aya-bpf + - run-integration-test runs-on: ubuntu-latest steps: - name: Build Complete diff --git a/Cargo.toml b/Cargo.toml index 0a95461b8..4a4bdbca7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ members = [ "aya-log-parser", "aya-obj", "aya-tool", + "init", "test/integration-test", "xtask", @@ -29,6 +30,7 @@ default-members = [ "aya-log-parser", "aya-obj", "aya-tool", + "init", # test/integration-test is omitted; including it in this list causes `cargo test` to run its # tests, and that doesn't work unless they've been built with `cargo xtask`. "xtask", @@ -72,6 +74,7 @@ lazy_static = { version = "1", default-features = false } libc = { version = "0.2.105", default-features = false } log = { version = "0.4", default-features = false } netns-rs = { version = "0.1", default-features = false } +nix = { version = "0.26.2", default-features = false } num_enum = { version = "0.6", default-features = false } object = { version = "0.31", default-features = false } parking_lot = { version = "0.12.0", default-features = false } diff --git a/bpf/aya-log-ebpf/src/lib.rs b/bpf/aya-log-ebpf/src/lib.rs index 2933509cf..5962c239b 100644 --- a/bpf/aya-log-ebpf/src/lib.rs +++ b/bpf/aya-log-ebpf/src/lib.rs @@ -1,10 +1,9 @@ #![no_std] #![warn(clippy::cast_lossless, clippy::cast_sign_loss)] -use aya_bpf::{ - macros::map, - maps::{PerCpuArray, PerfEventByteArray}, -}; +#[cfg(target_arch = "bpf")] +use aya_bpf::macros::map; +use aya_bpf::maps::{PerCpuArray, PerfEventByteArray}; pub use aya_log_common::{write_record_header, Level, WriteToBuf, LOG_BUF_CAPACITY}; pub use aya_log_ebpf_macros::{debug, error, info, log, trace, warn}; @@ -15,11 +14,19 @@ pub struct LogBuf { } #[doc(hidden)] -#[map] +// This cfg_attr prevents compilation failures on macOS where the generated section name doesn't +// meet mach-o's requirements. We wouldn't ordinarily build this crate for macOS, but we do so +// because the integration-test crate depends on this crate transitively. See comment in +// test/integration-test/Cargo.toml. +#[cfg_attr(target_arch = "bpf", map)] pub static mut AYA_LOG_BUF: PerCpuArray = PerCpuArray::with_max_entries(1, 0); #[doc(hidden)] -#[map] +// This cfg_attr prevents compilation failures on macOS where the generated section name doesn't +// meet mach-o's requirements. We wouldn't ordinarily build this crate for macOS, but we do so +// because the integration-test crate depends on this crate transitively. See comment in +// test/integration-test/Cargo.toml. +#[cfg_attr(target_arch = "bpf", map)] pub static mut AYA_LOGS: PerfEventByteArray = PerfEventByteArray::new(0); #[doc(hidden)] diff --git a/init/Cargo.toml b/init/Cargo.toml new file mode 100644 index 000000000..6adf153e8 --- /dev/null +++ b/init/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "init" +version = "0.1.0" +authors = ["Tamir Duberstein "] +edition = "2021" +publish = false + +[dependencies] +anyhow = { workspace = true, features = ["std"] } +nix = { workspace = true, features = ["fs", "mount", "reboot"] } diff --git a/init/src/main.rs b/init/src/main.rs new file mode 100644 index 000000000..89253de7d --- /dev/null +++ b/init/src/main.rs @@ -0,0 +1,166 @@ +//! init is the first process started by the kernel. +//! +//! This implementation creates the minimal mounts required to run BPF programs, runs all binaries +//! in /bin, prints a final message ("init: success|failure"), and powers off the machine. + +use anyhow::Context as _; + +#[derive(Debug)] +struct Errors(Vec); + +impl std::fmt::Display for Errors { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Self(errors) = self; + for (i, error) in errors.iter().enumerate() { + if i != 0 { + writeln!(f)?; + } + write!(f, "{:?}", error)?; + } + Ok(()) + } +} + +impl std::error::Error for Errors {} + +fn run() -> anyhow::Result<()> { + const RXRXRX: nix::sys::stat::Mode = nix::sys::stat::Mode::empty() + .union(nix::sys::stat::Mode::S_IRUSR) + .union(nix::sys::stat::Mode::S_IXUSR) + .union(nix::sys::stat::Mode::S_IRGRP) + .union(nix::sys::stat::Mode::S_IXGRP) + .union(nix::sys::stat::Mode::S_IROTH) + .union(nix::sys::stat::Mode::S_IXOTH); + + struct Mount { + source: &'static str, + target: &'static str, + fstype: &'static str, + flags: nix::mount::MsFlags, + data: Option<&'static str>, + target_mode: Option, + } + + for Mount { + source, + target, + fstype, + flags, + data, + target_mode, + } in [ + Mount { + source: "proc", + target: "/proc", + fstype: "proc", + flags: nix::mount::MsFlags::empty(), + data: None, + target_mode: Some(RXRXRX), + }, + Mount { + source: "sysfs", + target: "/sys", + fstype: "sysfs", + flags: nix::mount::MsFlags::empty(), + data: None, + target_mode: Some(RXRXRX), + }, + Mount { + source: "debugfs", + target: "/sys/kernel/debug", + fstype: "debugfs", + flags: nix::mount::MsFlags::empty(), + data: None, + target_mode: None, + }, + Mount { + source: "bpffs", + target: "/sys/fs/bpf", + fstype: "bpf", + flags: nix::mount::MsFlags::empty(), + data: None, + target_mode: None, + }, + ] { + match target_mode { + None => { + // Must exist. + let nix::sys::stat::FileStat { st_mode, .. } = nix::sys::stat::stat(target) + .with_context(|| format!("stat({target}) failed"))?; + let s_flag = nix::sys::stat::SFlag::from_bits_truncate(st_mode); + + if !s_flag.contains(nix::sys::stat::SFlag::S_IFDIR) { + anyhow::bail!("{target} is not a directory"); + } + } + Some(target_mode) => { + // Must not exist. + nix::unistd::mkdir(target, target_mode) + .with_context(|| format!("mkdir({target}) failed"))?; + } + } + nix::mount::mount(Some(source), target, Some(fstype), flags, data).with_context(|| { + format!("mount({source}, {target}, {fstype}, {flags:?}, {data:?}) failed") + })?; + } + + // By contract we run everything in /bin and assume they're rust test binaries. + // + // If the user requested command line arguments, they're named init.arg={}. + + // Read kernel parameters from /proc/cmdline. They're space separated on a single line. + let cmdline = std::fs::read_to_string("/proc/cmdline") + .with_context(|| "read_to_string(/proc/cmdline) failed")?; + let args = cmdline + .split_whitespace() + .filter_map(|parameter| { + parameter + .strip_prefix("init.arg=") + .map(std::ffi::OsString::from) + }) + .collect::>(); + + // Iterate files in /bin. + let read_dir = std::fs::read_dir("/bin").context("read_dir(/bin) failed")?; + let errors = read_dir + .filter_map(|entry| { + match (|| { + let entry = entry.context("read_dir(/bin) failed")?; + let path = entry.path(); + let status = std::process::Command::new(&path) + .args(&args) + .status() + .with_context(|| format!("failed to execute {}", path.display()))?; + + if status.code() == Some(0) { + Ok(()) + } else { + Err(anyhow::anyhow!("{} failed: {status:?}", path.display())) + } + })() { + Ok(()) => None, + Err(err) => Some(err), + } + }) + .collect::>(); + if errors.is_empty() { + Ok(()) + } else { + Err(Errors(errors).into()) + } +} + +fn main() { + match run() { + Ok(()) => { + println!("init: success"); + } + Err(err) => { + println!("{err:?}"); + println!("init: failure"); + } + } + let how = nix::sys::reboot::RebootMode::RB_POWER_OFF; + let _: std::convert::Infallible = nix::sys::reboot::reboot(how) + .unwrap_or_else(|err| panic!("reboot({how:?}) failed: {err:?}")); +} diff --git a/test/README.md b/test/README.md index a9521c0b6..82e41da00 100644 --- a/test/README.md +++ b/test/README.md @@ -3,21 +3,15 @@ Aya Integration Tests The aya integration test suite is a set of tests to ensure that common usage behaviours work on real Linux distros -## Prerequisites - -### Linux - -To run locally all you need is: -1. Rust nightly -1. `cargo install bpf-linker` +## Prerequisites -### Other OSs +You'll need: -1. A POSIX shell -1. `rustup target add x86_64-unknown-linux-musl` +1. `rustup toolchain install nightly` +1. `rustup target add {aarch64,x86_64}-unknown-linux-musl` 1. `cargo install bpf-linker` -1. Install `qemu` and `cloud-init-utils` package - or any package that provides `cloud-localds` +1. (virtualized only) `qemu` ## Usage @@ -26,15 +20,13 @@ From the root of this repository: ### Native ``` -cargo xtask integration-test +cargo xtask integration-test local ``` ### Virtualized ``` -mkdir -p integration-test-binaries -cargo xtask build-integration-test | xargs -I % cp % integration-test-binaries -./test/run.sh integration-test-binaries +cargo xtask integration-test vm ``` ### Writing an integration test diff --git a/test/cloud-localds b/test/cloud-localds deleted file mode 100755 index 3a28129f0..000000000 --- a/test/cloud-localds +++ /dev/null @@ -1,264 +0,0 @@ -#!/bin/bash - -VERBOSITY=0 -TEMP_D="" -DEF_DISK_FORMAT="raw" -DEF_FILESYSTEM="iso9660" -CR=" -" - -error() { echo "$@" 1>&2; } -fail() { [ $# -eq 0 ] || error "$@"; exit 1; } - -Usage() { - cat < my-meta-data - * ${0##*/} my-seed.img my-user-data my-meta-data - * kvm -net nic -net user,hostfwd=tcp::2222-:22 \\ - -drive file=disk1.img,if=virtio -drive file=my-seed.img,if=virtio - * ssh -p 2222 ubuntu@localhost -EOF -} - -bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; } -cleanup() { - [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}" -} - -debug() { - local level=${1}; shift; - [ "${level}" -gt "${VERBOSITY}" ] && return - error "${@}" -} - -has_cmd() { - command -v "$1" >/dev/null 2>&1 -} - -short_opts="hH:i:d:f:m:N:o:V:v" -long_opts="disk-format:,dsmode:,filesystem:,help,hostname:,interfaces:," -long_opts="${long_opts}network-config:,output:,vendor-data:,verbose" -getopt_out=$(getopt -n "${0##*/}" \ - -o "${short_opts}" -l "${long_opts}" -- "$@") && - eval set -- "${getopt_out}" || - bad_Usage - -## <> -output="" -userdata="" -metadata="" -vendordata="" -filesystem="" -diskformat=$DEF_DISK_FORMAT -interfaces=_unset -dsmode="" -hostname="" -ncname="network-config" - - -while [ $# -ne 0 ]; do - cur=${1}; next=${2}; - case "$cur" in - -h|--help) Usage ; exit 0;; - -d|--disk-format) diskformat=$next; shift;; - -f|--filesystem) filesystem=$next; shift;; - -H|--hostname) hostname=$next; shift;; - -i|--interfaces) interfaces=$next; shift;; - -N|--network-config) netcfg=$next; shift;; - -m|--dsmode) dsmode=$next; shift;; - -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));; - -V|--vendor-data) vendordata="$next";; - --) shift; break;; - esac - shift; -done - -## check arguments here -## how many args do you expect? -echo $1 -echo $2 -echo $3 -[ $# -ge 2 ] || bad_Usage "must provide output, userdata" -[ $# -le 3 ] || bad_Usage "confused by additional args" - -output=$1 -userdata=$2 -metadata=$3 - -if [ -n "$metadata" ]; then - [ "$interfaces" = "_unset" -a -z "$dsmode" -a -z "$hostname" ] || - fail "metadata is incompatible with:" \ - "--interfaces, --hostname, --dsmode" -fi - -case "$diskformat" in - tar|tar-seed-local|tar-seed-net) - if [ "${filesystem:-tar}" != "tar" ]; then - fail "diskformat=tar is incompatible with filesystem" - fi - filesystem="$diskformat" - ;; - tar*) - fail "supported 'tar' formats are tar, tar-seed-local, tar-seed-net" -esac - -if [ -z "$filesystem" ]; then - filesystem="$DEF_FILESYSTEM" -fi -if [ "$filesystem" = "iso" ]; then - filesystem="iso9660" -fi - -case "$filesystem" in - tar*) - has_cmd tar || - fail "missing 'tar'. Required for --filesystem=$filesystem";; - vfat) - has_cmd mkfs.vfat || - fail "missing 'mkfs.vfat'. Required for --filesystem=vfat." - has_cmd mcopy || - fail "missing 'mcopy'. Required for --filesystem=vfat." - ;; - iso9660) - has_cmd mkisofs || - fail "missing 'mkisofs'. Required for --filesystem=iso9660." - ;; - *) fail "unknown filesystem $filesystem";; -esac - -case "$diskformat" in - tar*|raw) :;; - *) has_cmd "qemu-img" || - fail "missing 'qemu-img'. Required for --disk-format=$diskformat." -esac - -[ "$interfaces" = "_unset" -o -r "$interfaces" ] || - fail "$interfaces: not a readable file" - -TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") || - fail "failed to make tempdir" -trap cleanup EXIT - -files=( "${TEMP_D}/user-data" "${TEMP_D}/meta-data" ) -if [ -n "$metadata" ]; then - cp "$metadata" "$TEMP_D/meta-data" || fail "$metadata: failed to copy" -else - instance_id="iid-local01" - iface_data="" - [ "$interfaces" != "_unset" ] && - iface_data=$(sed ':a;N;$!ba;s/\n/\\n/g' "$interfaces") - - # write json formatted user-data (json is a subset of yaml) - mdata="" - for kv in "instance-id:$instance_id" "local-hostname:$hostname" \ - "interfaces:${iface_data}" "dsmode:$dsmode"; do - key=${kv%%:*} - val=${kv#*:} - [ -n "$val" ] || continue - mdata="${mdata:+${mdata},${CR}}\"$key\": \"$val\"" - done - printf "{\n%s\n}\n" "$mdata" > "${TEMP_D}/meta-data" -fi - -if [ -n "$netcfg" ]; then - cp "$netcfg" "${TEMP_D}/$ncname" || - fail "failed to copy network config" - files[${#files[@]}]="$TEMP_D/$ncname" -fi - -if [ -n "$vendordata" ]; then - cp "$vendordata" "${TEMP_D}/vendor-data" || - fail "failed to copy vendor data" - files[${#files[@]}]="$TEMP_D/vendor-data" -fi - -files_rel=( ) -for f in "${files[@]}"; do - files_rel[${#files_rel[@]}]="${f#${TEMP_D}/}" -done - -if [ "$userdata" = "-" ]; then - cat > "$TEMP_D/user-data" || fail "failed to read from stdin" -else - cp "$userdata" "$TEMP_D/user-data" || fail "$userdata: failed to copy" -fi - -## alternatively, create a vfat filesystem with same files -img="$TEMP_D/seed-data" -tar_opts=( --owner=root --group=root ) - -case "$filesystem" in - tar) - tar "${tar_opts[@]}" -C "${TEMP_D}" -cf "$img" "${files_rel[@]}" || - fail "failed to create tarball of ${files_rel[*]}" - ;; - tar-seed-local|tar-seed-net) - if [ "$filesystem" = "tar-seed-local" ]; then - path="var/lib/cloud/seed/nocloud" - else - path="var/lib/cloud/seed/nocloud-net" - fi - mkdir -p "${TEMP_D}/${path}" || - fail "failed making path for seed files" - mv "${files[@]}" "${TEMP_D}/$path" || - fail "failed moving files" - tar "${tar_opts[@]}" -C "${TEMP_D}" -cf "$img" "${path}" || - fail "failed to create tarball with $path" - ;; - iso9660) - mkisofs -output "$img" -volid cidata \ - -joliet -rock "${files[@]}" > "$TEMP_D/err" 2>&1 || - { cat "$TEMP_D/err" 1>&2; fail "failed to mkisofs"; } - ;; - vfat) - truncate -s 128K "$img" || fail "failed truncate image" - out=$(mkfs.vfat -n cidata "$img" 2>&1) || - { error "failed: mkfs.vfat -n cidata $img"; error "$out"; } - mcopy -oi "$img" "${files[@]}" :: || - fail "failed to copy user-data, meta-data to img" - ;; -esac - -[ "$output" = "-" ] && output="$TEMP_D/final" -if [ "${diskformat#tar}" != "$diskformat" -o "$diskformat" = "raw" ]; then - cp "$img" "$output" || - fail "failed to copy image to $output" -else - qemu-img convert -f raw -O "$diskformat" "$img" "$output" || - fail "failed to convert to disk format $diskformat" -fi - -[ "$output" != "$TEMP_D/final" ] || { cat "$output" && output="-"; } || - fail "failed to write to -" - -debug 1 "wrote ${output} with filesystem=$filesystem and diskformat=$diskformat" -# vi: ts=4 noexpandtab diff --git a/test/run.sh b/test/run.sh deleted file mode 100755 index 26c989189..000000000 --- a/test/run.sh +++ /dev/null @@ -1,241 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -if [ "$(uname -s)" = "Darwin" ]; then - PATH="$(dirname "$(brew list gnu-getopt | grep "bin/getopt$")"):$PATH" - export PATH -fi - -AYA_SOURCE_DIR="$(realpath "$(dirname "$0")"/..)" - -# Temporary directory for tests to use. -AYA_TMPDIR="${AYA_SOURCE_DIR}/.tmp" - -# Directory for VM images -AYA_IMGDIR=${AYA_TMPDIR} - -if [ -z "${AYA_BUILD_TARGET}" ]; then - AYA_BUILD_TARGET=$(rustc -vV | sed -n 's|host: ||p') -fi - -AYA_HOST_ARCH=$(uname -m) -if [ "${AYA_HOST_ARCH}" = "arm64" ]; then - AYA_HOST_ARCH="aarch64" -fi - -if [ -z "${AYA_GUEST_ARCH}" ]; then - AYA_GUEST_ARCH="${AYA_HOST_ARCH}" -fi - -if [ "${AYA_GUEST_ARCH}" = "aarch64" ]; then - if [ -z "${AARCH64_UEFI}" ]; then - AARCH64_UEFI="$(brew list qemu -1 -v | grep edk2-aarch64-code.fd)" - fi -fi - -if [ -z "$AYA_MUSL_TARGET" ]; then - AYA_MUSL_TARGET=${AYA_GUEST_ARCH}-unknown-linux-musl -fi - -# Test Image -if [ -z "${AYA_TEST_IMAGE}" ]; then - AYA_TEST_IMAGE="fedora38" -fi - -case "${AYA_TEST_IMAGE}" in - fedora*) AYA_SSH_USER="fedora";; - centos*) AYA_SSH_USER="centos";; -esac - -download_images() { - mkdir -p "${AYA_IMGDIR}" - case $1 in - fedora37) - if [ ! -f "${AYA_IMGDIR}/fedora37.${AYA_GUEST_ARCH}.qcow2" ]; then - IMAGE="Fedora-Cloud-Base-37-1.7.${AYA_GUEST_ARCH}.qcow2" - IMAGE_URL="https://download.fedoraproject.org/pub/fedora/linux/releases/37/Cloud/${AYA_GUEST_ARCH}/images" - echo "Downloading: ${IMAGE}, this may take a while..." - curl -o "${AYA_IMGDIR}/fedora37.${AYA_GUEST_ARCH}.qcow2" -sSL "${IMAGE_URL}/${IMAGE}" - fi - ;; - fedora38) - if [ ! -f "${AYA_IMGDIR}/fedora38.${AYA_GUEST_ARCH}.qcow2" ]; then - IMAGE="Fedora-Cloud-Base-38_Beta-1.3.${AYA_GUEST_ARCH}.qcow2" - IMAGE_URL="https://fr2.rpmfind.net/linux/fedora/linux/releases/test/38_Beta/Cloud/${AYA_GUEST_ARCH}/images" - echo "Downloading: ${IMAGE}, this may take a while..." - curl -o "${AYA_IMGDIR}/fedora38.${AYA_GUEST_ARCH}.qcow2" -sSL "${IMAGE_URL}/${IMAGE}" - fi - ;; - centos8) - if [ ! -f "${AYA_IMGDIR}/centos8.${AYA_GUEST_ARCH}.qcow2" ]; then - IMAGE="CentOS-8-GenericCloud-8.4.2105-20210603.0.${AYA_GUEST_ARCH}.qcow2" - IMAGE_URL="https://cloud.centos.org/centos/8/${AYA_GUEST_ARCH}/images" - echo "Downloading: ${IMAGE}, this may take a while..." - curl -o "${AYA_IMGDIR}/centos8.${AYA_GUEST_ARCH}.qcow2" -sSL "${IMAGE_URL}/${IMAGE}" - fi - ;; - *) - echo "$1 is not a recognized image name" - return 1 - ;; - esac -} - -start_vm() { - download_images "${AYA_TEST_IMAGE}" - # prepare config - cat > "${AYA_TMPDIR}/metadata.yaml" < "${AYA_TMPDIR}/ssh_config" < "${AYA_TMPDIR}/user-data.yaml" < actual_cores - nr_cpus=8 - fi - fi - ;; - *) - echo "${AYA_GUEST_ARCH} is not supported" - return 1 - ;; - esac - - if [ ! -f "${AYA_IMGDIR}/vm.qcow2" ]; then - echo "Creating VM image" - qemu-img create -F qcow2 -f qcow2 -o backing_file="${AYA_IMGDIR}/${AYA_TEST_IMAGE}.${AYA_GUEST_ARCH}.qcow2" "${AYA_IMGDIR}/vm.qcow2" || return 1 - else - echo "Reusing existing VM image" - fi - $QEMU \ - -machine "${machine}" \ - -cpu "${cpu}" \ - -m 3G \ - -smp "${nr_cpus}" \ - -display none \ - -monitor none \ - -daemonize \ - -pidfile "${AYA_TMPDIR}/vm.pid" \ - -device virtio-net-pci,netdev=net0 \ - -netdev user,id=net0,hostfwd=tcp::2222-:22 \ - "${uefi[@]}" \ - -drive if=virtio,format=qcow2,file="${AYA_IMGDIR}/vm.qcow2" \ - -drive if=virtio,format=raw,file="${AYA_TMPDIR}/seed.img" || return 1 - - trap cleanup_vm EXIT - echo "Waiting for SSH on port 2222..." - retry=0 - max_retries=300 - while ! ssh -q -F "${AYA_TMPDIR}/ssh_config" -o ConnectTimeout=1 -i "${AYA_TMPDIR}/test_rsa" "${AYA_SSH_USER}"@localhost -p 2222 echo "Hello VM"; do - retry=$((retry+1)) - if [ ${retry} -gt ${max_retries} ]; then - echo "Unable to connect to VM" - return 1 - fi - sleep 1 - done - - echo "VM launched" - exec_vm uname -a - echo "Enabling testing repositories" - exec_vm sudo dnf config-manager --set-enabled updates-testing - exec_vm sudo dnf config-manager --set-enabled updates-testing-modular -} - -scp_vm() { - local=$1 - remote=$(basename "$1") - scp -q -F "${AYA_TMPDIR}/ssh_config" \ - -i "${AYA_TMPDIR}/test_rsa" \ - -P 2222 "${local}" \ - "${AYA_SSH_USER}@localhost:${remote}" -} - -rsync_vm() { - rsync -a -e "ssh -p 2222 -F ${AYA_TMPDIR}/ssh_config -i ${AYA_TMPDIR}/test_rsa" "$1" "$AYA_SSH_USER"@localhost: -} - -exec_vm() { - ssh -q -F "${AYA_TMPDIR}/ssh_config" \ - -i "${AYA_TMPDIR}/test_rsa" \ - -p 2222 \ - "${AYA_SSH_USER}"@localhost \ - "$@" -} - -stop_vm() { - if [ -f "${AYA_TMPDIR}/vm.pid" ]; then - echo "Stopping VM forcefully" - kill -9 "$(cat "${AYA_TMPDIR}/vm.pid")" - rm "${AYA_TMPDIR}/vm.pid" - fi -} - -cleanup_vm() { - if ! stop_vm; then - rm -f "${AYA_IMGDIR}/vm.qcow2" - fi -} - -start_vm -trap cleanup_vm EXIT - -# make sure we always use fresh sources (also see comment at the end) -rsync_vm "$*" - -exec_vm "find $* -type f -executable -print0 | xargs -0 -I {} sudo {} --test-threads=1" - -# we rm and sync but it doesn't seem to work reliably - I guess we could sleep a -# few seconds after but ain't nobody got time for that. Instead we also rm -# before rsyncing. -exec_vm "rm -rf $*; sync" diff --git a/xtask/src/main.rs b/xtask/src/main.rs index 3f93d6928..01105c003 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -19,7 +19,6 @@ pub struct XtaskOptions { enum Subcommand { Codegen(codegen::Options), Docs, - BuildIntegrationTest(run::BuildOptions), IntegrationTest(run::Options), PublicApi(public_api::Options), } @@ -45,17 +44,6 @@ fn main() -> Result<()> { match command { Subcommand::Codegen(opts) => codegen::codegen(opts, libbpf_dir), Subcommand::Docs => docs::docs(metadata), - Subcommand::BuildIntegrationTest(opts) => { - let binaries = run::build(opts)?; - let mut stdout = std::io::stdout(); - for (_name, binary) in binaries { - use std::{io::Write as _, os::unix::ffi::OsStrExt as _}; - - stdout.write_all(binary.as_os_str().as_bytes())?; - stdout.write_all("\n".as_bytes())?; - } - Ok(()) - } Subcommand::IntegrationTest(opts) => run::run(opts), Subcommand::PublicApi(opts) => public_api::public_api(opts, metadata), } diff --git a/xtask/src/run.rs b/xtask/src/run.rs index 6742a26b6..9b80b6eff 100644 --- a/xtask/src/run.rs +++ b/xtask/src/run.rs @@ -1,47 +1,66 @@ use std::{ + env::consts::{ARCH, OS}, ffi::OsString, fmt::Write as _, - io::BufReader, - path::PathBuf, - process::{Child, Command, Stdio}, + fs::{copy, create_dir_all, metadata, File}, + io::{BufRead as _, BufReader, ErrorKind, Write as _}, + path::{Path, PathBuf}, + process::{Child, Command, Output, Stdio}, }; use anyhow::{anyhow, bail, Context as _, Result}; use cargo_metadata::{Artifact, CompilerMessage, Message, Target}; use clap::Parser; -use xtask::AYA_BUILD_INTEGRATION_BPF; +use xtask::{exec, AYA_BUILD_INTEGRATION_BPF}; -#[derive(Debug, Parser)] -pub struct BuildOptions { - /// Arguments to pass to `cargo build`. - #[clap(long)] - pub cargo_arg: Vec, +#[derive(Parser)] +enum Environment { + /// Runs the integration tests locally. + Local { + /// The command used to wrap your application. + #[clap(short, long, default_value = "sudo -E")] + runner: String, + }, + /// Runs the integration tests in a VM. + VM { + /// The kernel images to use. + /// + /// You can download some images with: + /// + /// wget --accept-regex '.*/linux-image-[0-9\.-]+-cloud-.*-unsigned*' \ + /// --recursive ftp://ftp.us.debian.org/debian/pool/main/l/linux/ + /// + /// You can then extract them with: + /// + /// find . -name '*.deb' -print0 \ + /// | xargs -0 -I {} sh -c "dpkg --fsys-tarfile {} \ + /// | tar --wildcards --extract '*vmlinuz*' --file -" + #[clap(required = true)] + kernel_image: Vec, + }, } -#[derive(Debug, Parser)] +#[derive(Parser)] pub struct Options { - #[command(flatten)] - pub build_options: BuildOptions, - /// The command used to wrap your application. - #[clap(short, long, default_value = "sudo -E")] - pub runner: String, + #[clap(subcommand)] + environment: Environment, /// Arguments to pass to your application. - #[clap(last = true)] - pub run_args: Vec, + #[clap(global = true, last = true)] + run_args: Vec, } -/// Build the project -pub fn build(opts: BuildOptions) -> Result> { - let BuildOptions { cargo_arg } = opts; +pub fn build(target: Option<&str>, f: F) -> Result> +where + F: FnOnce(&mut Command) -> &mut Command, +{ + // Always use rust-lld and -Zbuild-std in case we're cross-compiling. let mut cmd = Command::new("cargo"); - cmd.env(AYA_BUILD_INTEGRATION_BPF, "true") - .args([ - "build", - "--tests", - "--message-format=json", - "--package=integration-test", - ]) - .args(cargo_arg); + cmd.args(["build", "--message-format=json"]); + if let Some(target) = target { + let config = format!("target.{target}.linker = \"rust-lld\""); + cmd.args(["--target", target, "--config", &config]); + } + f(&mut cmd); let mut child = cmd .stdout(Stdio::piped()) @@ -83,40 +102,405 @@ pub fn build(opts: BuildOptions) -> Result> { Ok(executables) } -/// Build and run the project +#[derive(Debug)] +struct Errors(Vec); + +impl std::fmt::Display for Errors { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Self(errors) = self; + for (i, error) in errors.iter().enumerate() { + if i != 0 { + writeln!(f)?; + } + write!(f, "{:?}", error)?; + } + Ok(()) + } +} + +impl std::error::Error for Errors {} + +/// Build and run the project. pub fn run(opts: Options) -> Result<()> { let Options { - build_options, - runner, + environment, run_args, } = opts; - let binaries = build(build_options).context("error while building userspace application")?; - let mut args = runner.trim().split_terminator(' '); - let runner = args.next().ok_or(anyhow!("no first argument"))?; - let args = args.collect::>(); - - let mut failures = String::new(); - for (name, binary) in binaries { - let mut cmd = Command::new(runner); - let cmd = cmd - .args(args.iter()) - .arg(binary) - .args(run_args.iter()) - .arg("--test-threads=1"); - - println!("{name} running {cmd:?}"); - - let status = cmd - .status() - .with_context(|| format!("failed to run {cmd:?}"))?; - if status.code() != Some(0) { - writeln!(&mut failures, "{name} failed: {status:?}").context("String write failed")? - } + type Binary = (String, PathBuf); + fn binaries(target: Option<&str>) -> Result)>> { + ["dev", "release"] + .into_iter() + .map(|profile| { + let binaries = build(target, |cmd| { + cmd.env(AYA_BUILD_INTEGRATION_BPF, "true").args([ + "--package", + "integration-test", + "--tests", + "--profile", + profile, + ]) + })?; + anyhow::Ok((profile, binaries)) + }) + .collect() } - if failures.is_empty() { - Ok(()) - } else { - Err(anyhow!("failures:\n{}", failures)) + + // Use --test-threads=1 to prevent tests from interacting with shared + // kernel state due to the lack of inter-test isolation. + let default_args = [OsString::from("--test-threads=1")]; + let run_args = default_args.iter().chain(run_args.iter()); + + match environment { + Environment::Local { runner } => { + let mut args = runner.trim().split_terminator(' '); + let runner = args.next().ok_or(anyhow!("no first argument"))?; + let args = args.collect::>(); + + let binaries = binaries(None)?; + + let mut failures = String::new(); + for (profile, binaries) in binaries { + for (name, binary) in binaries { + let mut cmd = Command::new(runner); + let cmd = cmd.args(args.iter()).arg(binary).args(run_args.clone()); + + println!("{profile}:{name} running {cmd:?}"); + + let status = cmd + .status() + .with_context(|| format!("failed to run {cmd:?}"))?; + if status.code() != Some(0) { + writeln!(&mut failures, "{profile}:{name} failed: {status:?}") + .context("String write failed")? + } + } + } + if failures.is_empty() { + Ok(()) + } else { + Err(anyhow!("failures:\n{}", failures)) + } + } + Environment::VM { kernel_image } => { + // The user has asked us to run the tests on a VM. This is involved; strap in. + // + // We need tools to build the initramfs; we use gen_init_cpio from the Linux repository, + // taking care to cache it. + // + // Then we iterate the kernel images, using the `file` program to guess the target + // architecture. We then build the init program and our test binaries for that + // architecture, and use gen_init_cpio to build an initramfs containing the test + // binaries. We're almost ready to run the VM. + // + // We consult our OS, our architecture, and the target architecture to determine if + // hardware acceleration is available, and then start QEMU with the provided kernel + // image and the initramfs we built. + // + // We consume the output of QEMU, looking for the output of our init program. This is + // the only way to distinguish success from failure. We batch up the errors across all + // VM images and report to the user. The end. + let cache_dir = Path::new("test/.tmp"); + create_dir_all(cache_dir).context("failed to create cache dir")?; + let gen_init_cpio = cache_dir.join("gen_init_cpio"); + if !gen_init_cpio + .try_exists() + .context("failed to check existence of gen_init_cpio")? + { + let mut curl = Command::new("curl"); + curl.args([ + "-sfSL", + "https://raw.githubusercontent.com/torvalds/linux/master/usr/gen_init_cpio.c", + ]); + let mut curl_child = curl + .stdout(Stdio::piped()) + .spawn() + .with_context(|| format!("failed to spawn {curl:?}"))?; + let Child { stdout, .. } = &mut curl_child; + let curl_stdout = stdout.take().unwrap(); + + let mut clang = Command::new("clang"); + let clang = exec( + clang + .args(["-g", "-O2", "-x", "c", "-", "-o"]) + .arg(&gen_init_cpio) + .stdin(curl_stdout), + ); + + let output = curl_child + .wait_with_output() + .with_context(|| format!("failed to wait for {curl:?}"))?; + let Output { status, .. } = &output; + if status.code() != Some(0) { + bail!("{curl:?} failed: {output:?}") + } + + // Check the result of clang *after* checking curl; in case the download failed, + // only curl's output will be useful. + clang?; + } + + let mut errors = Vec::new(); + for kernel_image in kernel_image { + // Guess the guest architecture. + let mut cmd = Command::new("file"); + let output = cmd + .arg("--brief") + .arg(&kernel_image) + .output() + .with_context(|| format!("failed to run {cmd:?}"))?; + let Output { status, .. } = &output; + if status.code() != Some(0) { + bail!("{cmd:?} failed: {output:?}") + } + let Output { stdout, .. } = output; + + // Now parse the output of the file command, which looks something like + // + // - Linux kernel ARM64 boot executable Image, little-endian, 4K pages + // + // - Linux kernel x86 boot executable bzImage, version 6.1.0-10-cloud-amd64 [..] + + let stdout = String::from_utf8(stdout) + .with_context(|| format!("invalid UTF-8 in {cmd:?} stdout"))?; + let (_, stdout) = stdout + .split_once("Linux kernel") + .ok_or_else(|| anyhow!("failed to parse {cmd:?} stdout: {stdout}"))?; + let (guest_arch, _) = stdout + .split_once("boot executable") + .ok_or_else(|| anyhow!("failed to parse {cmd:?} stdout: {stdout}"))?; + let guest_arch = guest_arch.trim(); + + let (guest_arch, machine, cpu) = match guest_arch { + "ARM64" => ("aarch64", Some("virt"), Some("cortex-a57")), + "x86" => ("x86_64", Some("q35"), Some("qemu64")), + guest_arch => (guest_arch, None, None), + }; + + let target = format!("{guest_arch}-unknown-linux-musl"); + + // Build our init program. The contract is that it will run anything it finds in /bin. + let init = build(Some(&target), |cmd| { + cmd.args(["--package", "init", "--profile", "release"]) + }) + .context("building init program failed")?; + + let init = match &*init { + [(name, init)] => { + if name != "init" { + bail!("expected init program to be named init, found {name}") + } + init + } + init => bail!("expected exactly one init program, found {init:?}"), + }; + + let binaries = binaries(Some(&target))?; + + let tmp_dir = tempfile::tempdir().context("tempdir failed")?; + + let initrd_image = tmp_dir.path().join("qemu-initramfs.img"); + let initrd_image_file = File::create(&initrd_image).with_context(|| { + format!("failed to create {} for writing", initrd_image.display()) + })?; + + let mut gen_init_cpio = Command::new(&gen_init_cpio); + let mut gen_init_cpio_child = gen_init_cpio + .arg("-") + .stdin(Stdio::piped()) + .stdout(initrd_image_file) + .spawn() + .with_context(|| format!("failed to spawn {gen_init_cpio:?}"))?; + let Child { stdin, .. } = &mut gen_init_cpio_child; + let mut stdin = stdin.take().unwrap(); + + use std::os::unix::ffi::OsStrExt as _; + + // Send input into gen_init_cpio which looks something like + // + // file /init path-to-init 0755 0 0 + // dir /bin 0755 0 0 + // file /bin/foo path-to-foo 0755 0 0 + // file /bin/bar path-to-bar 0755 0 0 + + for bytes in [ + "file /init ".as_bytes(), + init.as_os_str().as_bytes(), + " 0755 0 0\n".as_bytes(), + "dir /bin 0755 0 0\n".as_bytes(), + ] { + stdin.write_all(bytes).expect("write"); + } + + for (profile, binaries) in binaries { + for (name, binary) in binaries { + let name = format!("{}-{}", profile, name); + let path = tmp_dir.path().join(&name); + copy(&binary, &path).with_context(|| { + format!("copy({}, {}) failed", binary.display(), path.display()) + })?; + for bytes in [ + "file /bin/".as_bytes(), + name.as_bytes(), + " ".as_bytes(), + path.as_os_str().as_bytes(), + " 0755 0 0\n".as_bytes(), + ] { + stdin.write_all(bytes).expect("write"); + } + } + } + // Must explicitly close to signal EOF. + drop(stdin); + + let output = gen_init_cpio_child + .wait_with_output() + .with_context(|| format!("failed to wait for {gen_init_cpio:?}"))?; + let Output { status, .. } = &output; + if status.code() != Some(0) { + bail!("{gen_init_cpio:?} failed: {output:?}") + } + + copy(&initrd_image, "/tmp/initrd.img").context("copy failed")?; + + let mut qemu = Command::new(format!("qemu-system-{guest_arch}")); + if let Some(machine) = machine { + qemu.args(["-machine", machine]); + } + if guest_arch == ARCH { + match OS { + "linux" => match metadata("/dev/kvm") { + Ok(metadata) => { + use std::os::unix::fs::FileTypeExt as _; + if metadata.file_type().is_char_device() { + qemu.args(["-accel", "kvm"]); + } + } + Err(error) => { + if error.kind() != ErrorKind::NotFound { + Err(error).context("failed to check existence of /dev/kvm")?; + } + } + }, + "macos" => { + qemu.args(["-accel", "hvf"]); + } + os => bail!("unsupported OS: {os}"), + } + } else if let Some(cpu) = cpu { + qemu.args(["-cpu", cpu]); + } + let console = OsString::from("ttyS0"); + let kernel_args = std::iter::once(("console", &console)) + .chain(run_args.clone().map(|run_arg| ("init.arg", run_arg))) + .enumerate() + .fold(OsString::new(), |mut acc, (i, (k, v))| { + if i != 0 { + acc.push(" "); + } + acc.push(k); + acc.push("="); + acc.push(v); + acc + }); + qemu.args(["-no-reboot", "-nographic", "-m", "512M", "-smp", "2"]) + .arg("-append") + .arg(kernel_args) + .arg("-kernel") + .arg(&kernel_image) + .arg("-initrd") + .arg(&initrd_image); + if guest_arch == "aarch64" { + match OS { + "linux" => { + let mut cmd = Command::new("locate"); + let output = cmd + .arg("QEMU_EFI.fd") + .output() + .with_context(|| format!("failed to run {cmd:?}"))?; + let Output { status, .. } = &output; + if status.code() != Some(0) { + bail!("{qemu:?} failed: {output:?}") + } + let Output { stdout, .. } = output; + let bios = String::from_utf8(stdout) + .with_context(|| format!("failed to parse output of {cmd:?}"))?; + qemu.args(["-bios", bios.trim()]); + } + "macos" => { + let mut cmd = Command::new("brew"); + let output = cmd + .args(["list", "qemu", "-1", "-v"]) + .output() + .with_context(|| format!("failed to run {cmd:?}"))?; + let Output { status, .. } = &output; + if status.code() != Some(0) { + bail!("{qemu:?} failed: {output:?}") + } + let Output { stdout, .. } = output; + let output = String::from_utf8(stdout) + .with_context(|| format!("failed to parse output of {cmd:?}"))?; + const NAME: &str = "edk2-aarch64-code.fd"; + let bios = output.lines().find(|line| line.contains(NAME)).ok_or_else( + || anyhow!("failed to find {NAME} in output of {cmd:?}: {output}"), + )?; + qemu.args(["-bios", bios.trim()]); + } + os => bail!("unsupported OS: {os}"), + }; + } + let mut qemu_child = qemu + .stdout(Stdio::piped()) + .spawn() + .with_context(|| format!("failed to spawn {qemu:?}"))?; + let Child { stdout, .. } = &mut qemu_child; + let stdout = stdout.take().unwrap(); + let stdout = BufReader::new(stdout); + + let mut outcome = None; + for line in stdout.lines() { + let line = + line.with_context(|| format!("failed to read line from {qemu:?}"))?; + println!("{}", line); + // The init program will print "init: success" or "init: failure" to indicate + // the outcome of running the binaries it found in /bin. + if let Some(line) = line.strip_prefix("init: ") { + let previous = match line { + "success" => outcome.replace(Ok(())), + "failure" => outcome.replace(Err(())), + line => bail!("unexpected init output: {}", line), + }; + if let Some(previous) = previous { + bail!("multiple exit status: previous={previous:?}, current={line}"); + } + // Try to get QEMU to exit on kernel panic; otherwise it might hang indefinitely. + if line.contains("end Kernel panic") { + qemu_child.kill().context("failed to kill {qemu:?}")?; + } + } + } + + let output = qemu_child + .wait_with_output() + .with_context(|| format!("failed to wait for {qemu:?}"))?; + let Output { status, .. } = &output; + if status.code() != Some(0) { + bail!("{qemu:?} failed: {output:?}") + } + + let outcome = outcome.ok_or(anyhow!("init did not exit"))?; + match outcome { + Ok(()) => {} + Err(()) => { + errors.push(anyhow!("VM binaries failed on {}", kernel_image.display())) + } + } + } + if errors.is_empty() { + Ok(()) + } else { + Err(Errors(errors).into()) + } + } } }