From ce63a44d1143b193af74dba32c0a042a6c47e6e2 Mon Sep 17 00:00:00 2001 From: Alex Huszagh Date: Wed, 15 Jun 2022 09:19:35 -0500 Subject: [PATCH 1/4] Support temporary data directories and files. Create and robustly delete temporary data, which is stored in the user data directory under `${data_dir}/cross-rs/tmp`. Therefore, if program execution is ever halted, deleting the directory will automatically cleanup any remaining data. A termination handler cleans up the temporary data on exit by clearing a stack of temporary files/directories, and we have 2 resource handlers that cleanup the temporary data when the object is dropped. The new installer for the startup hooks is `install_exit_hooks`, which installs both the panic and termination handlers. To get the directory containing all temporary data, use `cross::temp::dir()`. An example of using temporary directories and files is: ```rust { // these are all safe due to single-threaded execution let tmpdir1 = unsafe { temp::TempFile::new() }?; let tmpdir2 = unsafe { temp::TempFile::new() }?; let tmpfile1 = unsafe { temp::TempFile::new() }?; let tmpfile2 = unsafe { temp::TempFile::new() }?; for entry in fs::read_dir(tmpdir1.path()) { ... } for entry in fs::read_dir(tmpdir2.path()) { ... } } // cleanup tmpfile2 -> tmpfile1 -> tmpdir2 -> tmpdir1 ``` Note that only these 2 wrappers are provided, since it guarantees the temporary file and directory stack stays ordered and resources are cleaned up as desired. --- Cargo.lock | 111 ++++++++++++++++++++++++++++++++++ Cargo.toml | 3 + src/bin/commands/clean.rs | 49 +++++++++++++++ src/bin/commands/mod.rs | 2 + src/bin/cross-util.rs | 6 ++ src/bin/cross.rs | 2 + src/errors.rs | 26 ++++++++ src/lib.rs | 3 +- src/temp.rs | 121 ++++++++++++++++++++++++++++++++++++++ 9 files changed, 322 insertions(+), 1 deletion(-) create mode 100644 src/bin/commands/clean.rs create mode 100644 src/temp.rs diff --git a/Cargo.lock b/Cargo.lock index 0c663fd5b..0d1059934 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -156,6 +156,8 @@ dependencies = [ "clap", "color-eyre", "const-sha1", + "ctrlc", + "directories", "dunce", "eyre", "home", @@ -169,6 +171,7 @@ dependencies = [ "serde_json", "shell-escape", "shell-words", + "tempfile", "thiserror", "toml", "walkdir", @@ -176,6 +179,36 @@ dependencies = [ "winapi", ] +[[package]] +name = "ctrlc" +version = "3.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b37feaa84e6861e00a1f5e5aa8da3ee56d605c9992d33e082786754828e20865" +dependencies = [ + "nix", + "winapi", +] + +[[package]] +name = "directories" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f51c5d4ddabd36886dd3e1438cb358cdcb0d7c499cb99cb4ac2e38e18b5cb210" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "dunce" version = "1.0.2" @@ -198,6 +231,26 @@ dependencies = [ "once_cell", ] +[[package]] +name = "fastrand" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +dependencies = [ + "instant", +] + +[[package]] +name = "getrandom" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + [[package]] name = "gimli" version = "0.26.1" @@ -250,6 +303,15 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + [[package]] name = "itoa" version = "1.0.2" @@ -375,6 +437,26 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "redox_syscall" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +dependencies = [ + "bitflags", +] + +[[package]] +name = "redox_users" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +dependencies = [ + "getrandom", + "redox_syscall", + "thiserror", +] + [[package]] name = "regex" version = "1.5.6" @@ -392,6 +474,15 @@ version = "0.6.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -518,6 +609,20 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "tempfile" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +dependencies = [ + "cfg-if", + "fastrand", + "libc", + "redox_syscall", + "remove_dir_all", + "winapi", +] + [[package]] name = "termcolor" version = "1.1.3" @@ -642,6 +747,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + [[package]] name = "which" version = "4.2.5" diff --git a/Cargo.toml b/Cargo.toml index d260c51d8..2766e3727 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,6 +43,9 @@ serde_json = "1" serde_ignored = "0.1.2" shell-words = "1.1.0" const-sha1 = "0.2.0" +ctrlc = { version = "3.2.2", features = ["termination"] } +directories = "4.0.1" +tempfile = "3.3.0" [target.'cfg(not(windows))'.dependencies] nix = { version = "0.24", default-features = false, features = ["user"] } diff --git a/src/bin/commands/clean.rs b/src/bin/commands/clean.rs new file mode 100644 index 000000000..e3c78c302 --- /dev/null +++ b/src/bin/commands/clean.rs @@ -0,0 +1,49 @@ +use std::fs; + +use super::images::RemoveImages; +use clap::Args; + +#[derive(Args, Debug)] +pub struct Clean { + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Force removal of images. + #[clap(short, long)] + pub force: bool, + /// Remove local (development) images. + #[clap(short, long)] + pub local: bool, + /// Remove images. Default is a dry run. + #[clap(short, long)] + pub execute: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +impl Clean { + pub fn run(self, engine: cross::docker::Engine) -> cross::Result<()> { + let tempdir = cross::temp::dir()?; + match self.execute { + true => { + if tempdir.exists() { + fs::remove_dir_all(tempdir)?; + } + } + false => println!("fs::remove_dir_all({})", tempdir.display()), + } + + let remove_images = RemoveImages { + targets: vec![], + verbose: self.verbose, + force: self.force, + local: self.local, + execute: self.execute, + engine: None, + }; + remove_images.run(engine)?; + + Ok(()) + } +} diff --git a/src/bin/commands/mod.rs b/src/bin/commands/mod.rs index f2c4a675a..4fc9ea479 100644 --- a/src/bin/commands/mod.rs +++ b/src/bin/commands/mod.rs @@ -1,3 +1,5 @@ +mod clean; mod images; +pub use self::clean::*; pub use self::images::*; diff --git a/src/bin/cross-util.rs b/src/bin/cross-util.rs index b5746fcd1..e9ed18cb9 100644 --- a/src/bin/cross-util.rs +++ b/src/bin/cross-util.rs @@ -26,6 +26,8 @@ enum Commands { /// List cross images in local storage. #[clap(subcommand)] Images(commands::Images), + /// Clean all cross data in local storage. + Clean(commands::Clean), } fn is_toolchain(toolchain: &str) -> cross::Result { @@ -57,6 +59,10 @@ pub fn main() -> cross::Result<()> { let engine = get_container_engine(args.engine(), args.verbose())?; args.run(engine)?; } + Commands::Clean(args) => { + let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; + args.run(engine)?; + } } Ok(()) diff --git a/src/bin/cross.rs b/src/bin/cross.rs index 3c8ab6ce9..f2f8294ea 100644 --- a/src/bin/cross.rs +++ b/src/bin/cross.rs @@ -2,6 +2,8 @@ pub fn main() -> cross::Result<()> { cross::install_panic_hook()?; + cross::install_termination_hook()?; + let status = cross::run()?; let code = status .code() diff --git a/src/errors.rs b/src/errors.rs index 5f80b43c9..83970e7de 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -1,13 +1,39 @@ +use crate::temp; + +use std::sync::atomic::{AtomicBool, Ordering}; + pub use color_eyre::Section; pub use eyre::Context; pub use eyre::Result; +pub static mut TERMINATED: AtomicBool = AtomicBool::new(false); + pub fn install_panic_hook() -> Result<()> { color_eyre::config::HookBuilder::new() .display_env_section(false) .install() } +/// # Safety +/// Safe as long as we have single-threaded execution. +unsafe fn termination_handler() { + // we can't warn the user here, since locks aren't signal-safe. + // we can delete files, since fdopendir is thread-safe, and + // `openat`, `unlinkat`, and `lstat` are signal-safe. + // https://man7.org/linux/man-pages/man7/signal-safety.7.html + if !TERMINATED.swap(true, Ordering::SeqCst) && temp::has_tempfiles() { + temp::clean(); + } + + // EOWNERDEAD, seems to be the same on linux, macos, and bash on windows. + std::process::exit(130); +} + +pub fn install_termination_hook() -> Result<()> { + // SAFETY: safe since single-threaded execution. + ctrlc::set_handler(|| unsafe { termination_handler() }).map_err(Into::into) +} + #[derive(Debug, thiserror::Error)] pub enum CommandError { #[error("`{command}` failed with {status}")] diff --git a/src/lib.rs b/src/lib.rs index 55746e605..56a5e3739 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -29,6 +29,7 @@ mod id; mod interpreter; mod rustc; mod rustup; +pub mod temp; use std::env; use std::io::{self, Write}; @@ -44,7 +45,7 @@ use self::cross_toml::CrossToml; use self::errors::Context; use self::rustc::{TargetList, VersionMetaExt}; -pub use self::errors::{install_panic_hook, Result}; +pub use self::errors::{install_panic_hook, install_termination_hook, Result}; pub use self::extensions::{CommandExt, OutputExt}; pub use self::file::ToUtf8; diff --git a/src/temp.rs b/src/temp.rs new file mode 100644 index 000000000..e0d36dfe8 --- /dev/null +++ b/src/temp.rs @@ -0,0 +1,121 @@ +use std::fs; +use std::path::{Path, PathBuf}; + +use crate::errors::Result; + +// open temporary directories and files so we ensure we cleanup on exit. +static mut FILES: Vec = vec![]; +static mut DIRS: Vec = vec![]; + +fn data_dir() -> Option { + directories::BaseDirs::new().map(|d| d.data_dir().to_path_buf()) +} + +pub fn dir() -> Result { + data_dir() + .map(|p| p.join("cross-rs").join("tmp")) + .ok_or(eyre::eyre!("unable to get data directory")) +} + +pub(crate) fn has_tempfiles() -> bool { + // SAFETY: safe, since we only check if the stack is empty. + unsafe { !FILES.is_empty() || !DIRS.is_empty() } +} + +/// # Safety +/// Safe as long as we have single-threaded execution. +pub(crate) unsafe fn clean() { + // don't expose FILES or DIRS outside this module, + // so we can only add or remove from the stack using + // our wrappers, guaranteeing add/remove in order. + FILES.clear(); + DIRS.clear(); +} + +/// # Safety +/// Safe as long as we have single-threaded execution. +unsafe fn push_tempfile() -> Result<&'static Path> { + let parent = dir()?; + fs::create_dir_all(&parent).ok(); + let file = tempfile::NamedTempFile::new_in(&parent)?; + FILES.push(file); + Ok(FILES.last().unwrap().path()) +} + +/// # Safety +/// Safe as long as we have single-threaded execution. +unsafe fn pop_tempfile() -> Option { + FILES.pop() +} + +#[derive(Debug)] +pub struct TempFile { + path: &'static Path, +} + +impl TempFile { + /// # Safety + /// Safe as long as we have single-threaded execution. + pub unsafe fn new() -> Result { + Ok(Self { + path: push_tempfile()?, + }) + } + + pub fn path(&self) -> &'static Path { + self.path + } +} + +impl Drop for TempFile { + fn drop(&mut self) { + // SAFETY: safe if we only modify the stack via `TempFile`. + unsafe { + pop_tempfile(); + } + } +} + +/// # Safety +/// Safe as long as we have single-threaded execution. +unsafe fn push_tempdir() -> Result<&'static Path> { + let parent = dir()?; + fs::create_dir_all(&parent).ok(); + let dir = tempfile::TempDir::new_in(&parent)?; + DIRS.push(dir); + Ok(DIRS.last().unwrap().path()) +} + +/// # Safety +/// Safe as long as we have single-threaded execution. +unsafe fn pop_tempdir() -> Option { + DIRS.pop() +} + +#[derive(Debug)] +pub struct TempDir { + path: &'static Path, +} + +impl TempDir { + /// # Safety + /// Safe as long as we have single-threaded execution. + pub unsafe fn new() -> Result { + Ok(Self { + path: push_tempdir()?, + }) + } + + pub fn path(&self) -> &'static Path { + self.path + } +} + +impl Drop for TempDir { + fn drop(&mut self) { + // SAFETY: safe if we only modify the stack via `TempDir`. + unsafe { + pop_tempdir(); + } + } +} From 2096d857c910ccf0ee3d28ba1e404276f9024b3b Mon Sep 17 00:00:00 2001 From: Alex Huszagh Date: Tue, 21 Jun 2022 20:59:57 -0500 Subject: [PATCH 2/4] Add comprehensive support for remote docker. This supports the volume-based structure, and uses some nice optimizations to ensure that only the desired toolchain and cargo items are copied over. It also uses drops to ensure scoped deletion of resources, to avoid complex logic ensuring their cleanup. It also supports persistent data volumes, through `cross-util`. In order to setup a persistent data volume, use: ```bash cross-util volumes create ``` This will create a persistent data volume specific for your current toolchain, and will be shared by all targets. This volume is specific for the toolchain version and channel, so a new volume will be created for each toolchain. Make sure you provide your `DOCKER_HOST` or correct engine type to ensure these are being made on the remote host. Then, run your command as before: ```bash CROSS_REMOTE=1 cross build --target arm-unknown-linux-gnueabihf ``` Finally, you can clean up the generated volume using: ```bash cross-util volumes remove ``` A few other utilities are present in `cross-util`: - `volumes list`: list all volumes created by cross. - `volumes remove`: remove all volumes created by cross. - `volumes prune`: prune all volumes unassociated with a container. - `containers list`: list all active containers created by cross. - `containers remove`: remove all active containers created by cross. - `clean`: clean all temporary data (images, containers, volumes, temp data) created by cross. The initial implementation was done by Marc Schreiber, schrieveslaach. A few more environment variables exist to fine-tune performance, as well as handle private dependencies. - `CROSS_REMOTE_COPY_REGISTRY`: copy the cargo registry - `CROSS_REMOTE_COPY_CACHE`: copy cache directories, including the target directory. Both of these generally lead to substantial performance penalties, but can enable the use of private SSH dependencies. In either case, the use of persistent data volumes is highly recommended. Fixes #248. Fixes #273. Closes #449. --- CHANGELOG.md | 1 + Cargo.toml | 1 + src/bin/commands/clean.rs | 34 +- src/bin/commands/containers.rs | 473 ++++++++++++++++++++ src/bin/commands/images.rs | 26 +- src/bin/commands/mod.rs | 2 + src/bin/cross-util.rs | 26 +- src/docker/custom.rs | 21 +- src/docker/engine.rs | 15 +- src/docker/local.rs | 12 +- src/docker/mod.rs | 40 +- src/docker/remote.rs | 749 ++++++++++++++++++++++++++++++++ src/docker/shared.rs | 55 ++- src/file.rs | 1 + src/lib.rs | 35 +- src/rustc.rs | 90 ++++ src/rustup.rs | 34 +- xtask/src/build_docker_image.rs | 12 +- xtask/src/main.rs | 18 +- xtask/src/target_info.rs | 16 +- xtask/src/util.rs | 10 +- 21 files changed, 1541 insertions(+), 130 deletions(-) create mode 100644 src/bin/commands/containers.rs create mode 100644 src/docker/remote.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index c6c127911..be7e3b1fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). - #803 - added `CROSS_CUSTOM_TOOLCHAIN` to disable automatic installation of components for use with tools like `cargo-bisect-rustc` - #795 - added images for additional toolchains maintained by cross-rs. - #792 - added `CROSS_CONTAINER_IN_CONTAINER` environment variable to replace `CROSS_DOCKER_IN_DOCKER`. +- #785 - added support for remote container engines through data volumes through setting the `CROSS_REMOTE` environment variable. also adds in utility commands to create and remove persistent data volumes. - #782 - added `build-std` config option, which builds the rust standard library from source if enabled. - #678 - Add optional `target.{target}.dockerfile[.file]`, `target.{target}.dockerfile.context` and `target.{target}.dockerfile.build-args` to invoke docker/podman build before using an image. - #678 - Add `target.{target}.pre-build` config for running commands before building the image. diff --git a/Cargo.toml b/Cargo.toml index 2766e3727..8f2e0f858 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,6 +45,7 @@ shell-words = "1.1.0" const-sha1 = "0.2.0" ctrlc = { version = "3.2.2", features = ["termination"] } directories = "4.0.1" +walkdir = { version = "2", optional = true } tempfile = "3.3.0" [target.'cfg(not(windows))'.dependencies] diff --git a/src/bin/commands/clean.rs b/src/bin/commands/clean.rs index e3c78c302..fec49a683 100644 --- a/src/bin/commands/clean.rs +++ b/src/bin/commands/clean.rs @@ -1,6 +1,7 @@ use std::fs; -use super::images::RemoveImages; +use super::containers::*; +use super::images::*; use clap::Args; #[derive(Args, Debug)] @@ -31,9 +32,21 @@ impl Clean { fs::remove_dir_all(tempdir)?; } } - false => println!("fs::remove_dir_all({})", tempdir.display()), + false => println!( + "fs::remove_dir_all({})", + cross::pretty_path(&tempdir, |_| false) + ), } + // containers -> images -> volumes -> prune to ensure no conflicts. + let remove_containers = RemoveAllContainers { + verbose: self.verbose, + force: self.force, + execute: self.execute, + engine: None, + }; + remove_containers.run(engine.clone())?; + let remove_images = RemoveImages { targets: vec![], verbose: self.verbose, @@ -42,7 +55,22 @@ impl Clean { execute: self.execute, engine: None, }; - remove_images.run(engine)?; + remove_images.run(engine.clone())?; + + let remove_volumes = RemoveAllVolumes { + verbose: self.verbose, + force: self.force, + execute: self.execute, + engine: None, + }; + remove_volumes.run(engine.clone())?; + + let prune_volumes = PruneVolumes { + verbose: self.verbose, + execute: self.execute, + engine: None, + }; + prune_volumes.run(engine)?; Ok(()) } diff --git a/src/bin/commands/containers.rs b/src/bin/commands/containers.rs new file mode 100644 index 000000000..fefda5062 --- /dev/null +++ b/src/bin/commands/containers.rs @@ -0,0 +1,473 @@ +use atty::Stream; +use clap::{Args, Subcommand}; +use cross::{docker, CommandExt}; + +#[derive(Args, Debug)] +pub struct ListVolumes { + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +impl ListVolumes { + pub fn run(self, engine: docker::Engine) -> cross::Result<()> { + list_volumes(self, &engine) + } +} + +#[derive(Args, Debug)] +pub struct RemoveAllVolumes { + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Force removal of volumes. + #[clap(short, long)] + pub force: bool, + /// Remove volumes. Default is a dry run. + #[clap(short, long)] + pub execute: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +impl RemoveAllVolumes { + pub fn run(self, engine: docker::Engine) -> cross::Result<()> { + remove_all_volumes(self, &engine) + } +} + +#[derive(Args, Debug)] +pub struct PruneVolumes { + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Remove volumes. Default is a dry run. + #[clap(short, long)] + pub execute: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +impl PruneVolumes { + pub fn run(self, engine: docker::Engine) -> cross::Result<()> { + prune_volumes(self, &engine) + } +} + +#[derive(Args, Debug)] +pub struct CreateVolume { + /// If cross is running inside a container. + #[clap(short, long)] + pub docker_in_docker: bool, + /// If we should copy the cargo registry to the volume. + #[clap(short, long)] + pub copy_registry: bool, + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +impl CreateVolume { + pub fn run(self, engine: docker::Engine, channel: Option<&str>) -> cross::Result<()> { + create_persistent_volume(self, &engine, channel) + } +} + +#[derive(Args, Debug)] +pub struct RemoveVolume { + /// Triple for the target platform. + #[clap(long)] + pub target: String, + /// If cross is running inside a container. + #[clap(short, long)] + pub docker_in_docker: bool, + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +impl RemoveVolume { + pub fn run(self, engine: docker::Engine, channel: Option<&str>) -> cross::Result<()> { + remove_persistent_volume(self, &engine, channel) + } +} + +#[derive(Subcommand, Debug)] +pub enum Volumes { + /// List cross data volumes in local storage. + List(ListVolumes), + /// Remove cross data volumes in local storage. + RemoveAll(RemoveAllVolumes), + /// Prune volumes not used by any container. + Prune(PruneVolumes), + /// Create a persistent data volume for the current toolchain. + Create(CreateVolume), + /// Remove a persistent data volume for the current toolchain. + Remove(RemoveVolume), +} + +impl Volumes { + pub fn run(self, engine: docker::Engine, toolchain: Option<&str>) -> cross::Result<()> { + match self { + Volumes::List(args) => args.run(engine), + Volumes::RemoveAll(args) => args.run(engine), + Volumes::Prune(args) => args.run(engine), + Volumes::Create(args) => args.run(engine, toolchain), + Volumes::Remove(args) => args.run(engine, toolchain), + } + } + + pub fn engine(&self) -> Option<&str> { + match self { + Volumes::List(l) => l.engine.as_deref(), + Volumes::RemoveAll(l) => l.engine.as_deref(), + Volumes::Prune(l) => l.engine.as_deref(), + Volumes::Create(l) => l.engine.as_deref(), + Volumes::Remove(l) => l.engine.as_deref(), + } + } + + pub fn verbose(&self) -> bool { + match self { + Volumes::List(l) => l.verbose, + Volumes::RemoveAll(l) => l.verbose, + Volumes::Prune(l) => l.verbose, + Volumes::Create(l) => l.verbose, + Volumes::Remove(l) => l.verbose, + } + } +} + +#[derive(Args, Debug)] +pub struct ListContainers { + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +impl ListContainers { + pub fn run(self, engine: docker::Engine) -> cross::Result<()> { + list_containers(self, &engine) + } +} + +#[derive(Args, Debug)] +pub struct RemoveAllContainers { + /// Provide verbose diagnostic output. + #[clap(short, long)] + pub verbose: bool, + /// Force removal of containers. + #[clap(short, long)] + pub force: bool, + /// Remove containers. Default is a dry run. + #[clap(short, long)] + pub execute: bool, + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, +} + +impl RemoveAllContainers { + pub fn run(self, engine: docker::Engine) -> cross::Result<()> { + remove_all_containers(self, &engine) + } +} + +#[derive(Subcommand, Debug)] +pub enum Containers { + /// List cross containers in local storage. + List(ListContainers), + /// Stop and remove cross containers in local storage. + RemoveAll(RemoveAllContainers), +} + +impl Containers { + pub fn run(self, engine: docker::Engine) -> cross::Result<()> { + match self { + Containers::List(args) => args.run(engine), + Containers::RemoveAll(args) => args.run(engine), + } + } + + pub fn engine(&self) -> Option<&str> { + match self { + Containers::List(l) => l.engine.as_deref(), + Containers::RemoveAll(l) => l.engine.as_deref(), + } + } + + pub fn verbose(&self) -> bool { + match self { + Containers::List(l) => l.verbose, + Containers::RemoveAll(l) => l.verbose, + } + } +} + +fn get_cross_volumes(engine: &docker::Engine, verbose: bool) -> cross::Result> { + let stdout = docker::subcommand(engine, "volume") + .arg("list") + .args(&["--format", "{{.Name}}"]) + // handles simple regex: ^ for start of line. + .args(&["--filter", "name=^cross-"]) + .run_and_get_stdout(verbose)?; + + let mut volumes: Vec = stdout.lines().map(|s| s.to_string()).collect(); + volumes.sort(); + + Ok(volumes) +} + +pub fn list_volumes( + ListVolumes { verbose, .. }: ListVolumes, + engine: &docker::Engine, +) -> cross::Result<()> { + get_cross_volumes(engine, verbose)? + .iter() + .for_each(|line| println!("{}", line)); + + Ok(()) +} + +pub fn remove_all_volumes( + RemoveAllVolumes { + verbose, + force, + execute, + .. + }: RemoveAllVolumes, + engine: &docker::Engine, +) -> cross::Result<()> { + let volumes = get_cross_volumes(engine, verbose)?; + + let mut command = docker::subcommand(engine, "volume"); + command.arg("rm"); + if force { + command.arg("--force"); + } + command.args(&volumes); + if volumes.is_empty() { + Ok(()) + } else if execute { + command.run(verbose, false).map_err(Into::into) + } else { + eprintln!("Note: this is a dry run. to remove the volumes, pass the `--execute` flag."); + command.print_verbose(true); + Ok(()) + } +} + +pub fn prune_volumes( + PruneVolumes { + verbose, execute, .. + }: PruneVolumes, + engine: &docker::Engine, +) -> cross::Result<()> { + let mut command = docker::subcommand(engine, "volume"); + command.args(&["prune", "--force"]); + if execute { + command.run(verbose, false).map_err(Into::into) + } else { + eprintln!("Note: this is a dry run. to prune the volumes, pass the `--execute` flag."); + command.print_verbose(true); + Ok(()) + } +} + +pub fn create_persistent_volume( + CreateVolume { + docker_in_docker, + copy_registry, + verbose, + .. + }: CreateVolume, + engine: &docker::Engine, + channel: Option<&str>, +) -> cross::Result<()> { + // we only need a triple that needs docker: the actual target doesn't matter. + let triple = cross::Host::X86_64UnknownLinuxGnu.triple(); + let (target, metadata, dirs) = + docker::get_package_info(engine, triple, channel, docker_in_docker, verbose)?; + let container = docker::remote::unique_container_identifier(&target, &metadata, &dirs)?; + let volume = docker::remote::unique_toolchain_identifier(&dirs.sysroot)?; + + if docker::remote::volume_exists(engine, &volume, verbose)? { + eyre::bail!("Error: volume {volume} already exists."); + } + + docker::subcommand(engine, "volume") + .args(&["create", &volume]) + .run_and_get_status(verbose, false)?; + + // stop the container if it's already running + let state = docker::remote::container_state(engine, &container, verbose)?; + if !state.is_stopped() { + eprintln!("Warning: container {container} was running."); + docker::remote::container_stop(engine, &container, verbose)?; + } + if state.exists() { + eprintln!("Warning: container {container} was exited."); + docker::remote::container_rm(engine, &container, verbose)?; + } + + // create a dummy running container to copy data over + let mount_prefix = docker::remote::MOUNT_PREFIX; + let mut docker = docker::subcommand(engine, "run"); + docker.args(&["--name", &container]); + docker.args(&["-v", &format!("{}:{}", volume, mount_prefix)]); + docker.arg("-d"); + if atty::is(Stream::Stdin) && atty::is(Stream::Stdout) && atty::is(Stream::Stderr) { + docker.arg("-t"); + } + docker.arg(docker::UBUNTU_BASE); + // ensure the process never exits until we stop it + docker.args(&["sh", "-c", "sleep infinity"]); + docker.run_and_get_status(verbose, false)?; + + docker::remote::copy_volume_container_xargo( + engine, + &container, + &dirs.xargo, + &target, + mount_prefix.as_ref(), + verbose, + )?; + docker::remote::copy_volume_container_cargo( + engine, + &container, + &dirs.cargo, + mount_prefix.as_ref(), + copy_registry, + verbose, + )?; + docker::remote::copy_volume_container_rust( + engine, + &container, + &dirs.sysroot, + &target, + mount_prefix.as_ref(), + true, + verbose, + )?; + + docker::remote::container_stop(engine, &container, verbose)?; + docker::remote::container_rm(engine, &container, verbose)?; + + Ok(()) +} + +pub fn remove_persistent_volume( + RemoveVolume { + target, + docker_in_docker, + verbose, + .. + }: RemoveVolume, + engine: &docker::Engine, + channel: Option<&str>, +) -> cross::Result<()> { + let (_, _, dirs) = + docker::get_package_info(engine, &target, channel, docker_in_docker, verbose)?; + let volume = docker::remote::unique_toolchain_identifier(&dirs.sysroot)?; + + if !docker::remote::volume_exists(engine, &volume, verbose)? { + eyre::bail!("Error: volume {volume} does not exist."); + } + + docker::remote::volume_rm(engine, &volume, verbose)?; + + Ok(()) +} + +fn get_cross_containers(engine: &docker::Engine, verbose: bool) -> cross::Result> { + let stdout = docker::subcommand(engine, "ps") + .arg("-a") + .args(&["--format", "{{.Names}}: {{.State}}"]) + // handles simple regex: ^ for start of line. + .args(&["--filter", "name=^cross-"]) + .run_and_get_stdout(verbose)?; + + let mut containers: Vec = stdout.lines().map(|s| s.to_string()).collect(); + containers.sort(); + + Ok(containers) +} + +pub fn list_containers( + ListContainers { verbose, .. }: ListContainers, + engine: &docker::Engine, +) -> cross::Result<()> { + get_cross_containers(engine, verbose)? + .iter() + .for_each(|line| println!("{}", line)); + + Ok(()) +} + +pub fn remove_all_containers( + RemoveAllContainers { + verbose, + force, + execute, + .. + }: RemoveAllContainers, + engine: &docker::Engine, +) -> cross::Result<()> { + let containers = get_cross_containers(engine, verbose)?; + let mut running = vec![]; + let mut stopped = vec![]; + for container in containers.iter() { + // cannot fail, formatted as {{.Names}}: {{.State}} + let (name, state) = container.split_once(':').unwrap(); + let name = name.trim(); + let state = docker::remote::ContainerState::new(state.trim())?; + if state.is_stopped() { + stopped.push(name); + } else { + running.push(name); + } + } + + let mut commands = vec![]; + if !running.is_empty() { + let mut stop = docker::subcommand(engine, "stop"); + stop.args(&running); + commands.push(stop); + } + + if !(stopped.is_empty() && running.is_empty()) { + let mut rm = docker::subcommand(engine, "rm"); + if force { + rm.arg("--force"); + } + rm.args(&running); + rm.args(&stopped); + commands.push(rm); + } + if execute { + for mut command in commands { + command.run(verbose, false)?; + } + } else { + eprintln!("Note: this is a dry run. to remove the containers, pass the `--execute` flag."); + for command in commands { + command.print_verbose(true); + } + } + + Ok(()) +} diff --git a/src/bin/commands/images.rs b/src/bin/commands/images.rs index 860f0e661..4dfaa95e2 100644 --- a/src/bin/commands/images.rs +++ b/src/bin/commands/images.rs @@ -1,9 +1,9 @@ use clap::{Args, Subcommand}; -use cross::CommandExt; +use cross::{docker, CommandExt}; // known image prefixes, with their registry // the docker.io registry can also be implicit -const GHCR_IO: &str = cross::docker::CROSS_IMAGE; +const GHCR_IO: &str = docker::CROSS_IMAGE; const RUST_EMBEDDED: &str = "rustembedded/cross"; const DOCKER_IO: &str = "docker.io/rustembedded/cross"; const IMAGE_PREFIXES: &[&str] = &[GHCR_IO, DOCKER_IO, RUST_EMBEDDED]; @@ -19,7 +19,7 @@ pub struct ListImages { } impl ListImages { - pub fn run(self, engine: cross::docker::Engine) -> cross::Result<()> { + pub fn run(self, engine: docker::Engine) -> cross::Result<()> { list_images(self, &engine) } } @@ -46,7 +46,7 @@ pub struct RemoveImages { } impl RemoveImages { - pub fn run(self, engine: cross::docker::Engine) -> cross::Result<()> { + pub fn run(self, engine: docker::Engine) -> cross::Result<()> { if self.targets.is_empty() { remove_all_images(self, &engine) } else { @@ -64,7 +64,7 @@ pub enum Images { } impl Images { - pub fn run(self, engine: cross::docker::Engine) -> cross::Result<()> { + pub fn run(self, engine: docker::Engine) -> cross::Result<()> { match self { Images::List(args) => args.run(engine), Images::Remove(args) => args.run(engine), @@ -120,11 +120,11 @@ fn is_local_image(tag: &str) -> bool { } fn get_cross_images( - engine: &cross::docker::Engine, + engine: &docker::Engine, verbose: bool, local: bool, ) -> cross::Result> { - let stdout = cross::docker::subcommand(engine, "images") + let stdout = docker::subcommand(engine, "images") .arg("--format") .arg("{{.Repository}}:{{.Tag}} {{.ID}}") .run_and_get_stdout(verbose)?; @@ -174,7 +174,7 @@ fn get_image_target(image: &Image) -> cross::Result { pub fn list_images( ListImages { verbose, .. }: ListImages, - engine: &cross::docker::Engine, + engine: &docker::Engine, ) -> cross::Result<()> { get_cross_images(engine, verbose, true)? .iter() @@ -184,13 +184,13 @@ pub fn list_images( } fn remove_images( - engine: &cross::docker::Engine, + engine: &docker::Engine, images: &[&str], verbose: bool, force: bool, execute: bool, ) -> cross::Result<()> { - let mut command = cross::docker::subcommand(engine, "rmi"); + let mut command = docker::subcommand(engine, "rmi"); if force { command.arg("--force"); } @@ -200,7 +200,7 @@ fn remove_images( } else if execute { command.run(verbose, false).map_err(Into::into) } else { - eprintln!("note: this is a dry run. to remove the images, pass the `--execute` flag."); + eprintln!("Note: this is a dry run. to remove the images, pass the `--execute` flag."); command.print_verbose(true); Ok(()) } @@ -214,7 +214,7 @@ pub fn remove_all_images( execute, .. }: RemoveImages, - engine: &cross::docker::Engine, + engine: &docker::Engine, ) -> cross::Result<()> { let images = get_cross_images(engine, verbose, local)?; let ids: Vec<&str> = images.iter().map(|i| i.id.as_ref()).collect(); @@ -230,7 +230,7 @@ pub fn remove_target_images( execute, .. }: RemoveImages, - engine: &cross::docker::Engine, + engine: &docker::Engine, ) -> cross::Result<()> { let images = get_cross_images(engine, verbose, local)?; let mut ids = vec![]; diff --git a/src/bin/commands/mod.rs b/src/bin/commands/mod.rs index 4fc9ea479..30a1c771a 100644 --- a/src/bin/commands/mod.rs +++ b/src/bin/commands/mod.rs @@ -1,5 +1,7 @@ mod clean; +mod containers; mod images; pub use self::clean::*; +pub use self::containers::*; pub use self::images::*; diff --git a/src/bin/cross-util.rs b/src/bin/cross-util.rs index e9ed18cb9..10b705bbf 100644 --- a/src/bin/cross-util.rs +++ b/src/bin/cross-util.rs @@ -1,6 +1,7 @@ #![deny(missing_debug_implementations, rust_2018_idioms)] use clap::{CommandFactory, Parser, Subcommand}; +use cross::docker; mod commands; @@ -23,9 +24,15 @@ struct CliHidden { #[derive(Subcommand, Debug)] enum Commands { - /// List cross images in local storage. + /// Work with cross images in local storage. #[clap(subcommand)] Images(commands::Images), + /// Work with cross volumes in local storage. + #[clap(subcommand)] + Volumes(commands::Volumes), + /// Work with cross containers in local storage. + #[clap(subcommand)] + Containers(commands::Containers), /// Clean all cross data in local storage. Clean(commands::Clean), } @@ -39,16 +46,13 @@ fn is_toolchain(toolchain: &str) -> cross::Result { } } -fn get_container_engine( - engine: Option<&str>, - verbose: bool, -) -> cross::Result { +fn get_container_engine(engine: Option<&str>, verbose: bool) -> cross::Result { let engine = if let Some(ce) = engine { which::which(ce)? } else { - cross::docker::get_container_engine()? + docker::get_container_engine()? }; - cross::docker::Engine::from_path(engine, verbose) + docker::Engine::from_path(engine, verbose) } pub fn main() -> cross::Result<()> { @@ -59,6 +63,14 @@ pub fn main() -> cross::Result<()> { let engine = get_container_engine(args.engine(), args.verbose())?; args.run(engine)?; } + Commands::Volumes(args) => { + let engine = get_container_engine(args.engine(), args.verbose())?; + args.run(engine, cli.toolchain.as_deref())?; + } + Commands::Containers(args) => { + let engine = get_container_engine(args.engine(), args.verbose())?; + args.run(engine)?; + } Commands::Clean(args) => { let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; args.run(engine)?; diff --git a/src/docker/custom.rs b/src/docker/custom.rs index 6bdafed0d..f46e0e83e 100644 --- a/src/docker/custom.rs +++ b/src/docker/custom.rs @@ -5,7 +5,7 @@ use crate::docker::Engine; use crate::{config::Config, docker, CargoMetadata, Target}; use crate::{errors::*, file, CommandExt, ToUtf8}; -use super::{image_name, parse_docker_opts}; +use super::{image_name, parse_docker_opts, path_hash}; #[derive(Debug, PartialEq, Eq)] pub enum Dockerfile<'a> { @@ -51,7 +51,7 @@ impl<'a> Dockerfile<'a> { ), ]); - let image_name = self.image_name(target_triple, metadata); + let image_name = self.image_name(target_triple, metadata)?; docker_build.args(["--tag", &image_name]); for (key, arg) in build_args.into_iter() { @@ -102,32 +102,25 @@ impl<'a> Dockerfile<'a> { Ok(image_name) } - pub fn image_name(&self, target_triple: &Target, metadata: &CargoMetadata) -> String { + pub fn image_name(&self, target_triple: &Target, metadata: &CargoMetadata) -> Result { match self { Dockerfile::File { name: Some(name), .. - } => name.to_string(), - _ => format!( + } => Ok(name.to_string()), + _ => Ok(format!( "cross-custom-{package_name}:{target_triple}-{path_hash}{custom}", package_name = metadata .workspace_root .file_name() .expect("workspace_root can't end in `..`") .to_string_lossy(), - path_hash = format!( - "{}", - const_sha1::sha1(&const_sha1::ConstBuffer::from_slice( - metadata.workspace_root.to_string_lossy().as_bytes() - )) - ) - .get(..5) - .expect("sha1 is expected to be at least 5 characters long"), + path_hash = path_hash(&metadata.workspace_root)?, custom = if matches!(self, Self::File { .. }) { "" } else { "-pre-build" } - ), + )), } } diff --git a/src/docker/engine.rs b/src/docker/engine.rs index aa5771090..d325d6e86 100644 --- a/src/docker/engine.rs +++ b/src/docker/engine.rs @@ -2,6 +2,7 @@ use std::env; use std::path::{Path, PathBuf}; use std::process::Command; +use crate::config::bool_from_envvar; use crate::errors::*; use crate::extensions::CommandExt; @@ -20,6 +21,7 @@ pub enum EngineType { pub struct Engine { pub kind: EngineType, pub path: PathBuf, + pub is_remote: bool, } impl Engine { @@ -32,7 +34,18 @@ impl Engine { pub fn from_path(path: PathBuf, verbose: bool) -> Result { let kind = get_engine_type(&path, verbose)?; - Ok(Engine { path, kind }) + let is_remote = env::var("CROSS_REMOTE") + .map(|s| bool_from_envvar(&s)) + .unwrap_or_default(); + Ok(Engine { + path, + kind, + is_remote, + }) + } + + pub fn needs_remote(&self) -> bool { + self.is_remote && self.kind == EngineType::Podman } } diff --git a/src/docker/local.rs b/src/docker/local.rs index d65987fda..01c3240b8 100644 --- a/src/docker/local.rs +++ b/src/docker/local.rs @@ -6,13 +6,14 @@ use super::shared::*; use crate::cargo::CargoMetadata; use crate::errors::Result; use crate::extensions::CommandExt; -use crate::file::ToUtf8; +use crate::file::{PathExt, ToUtf8}; use crate::{Config, Target}; use atty::Stream; use eyre::Context; #[allow(clippy::too_many_arguments)] // TODO: refactor pub(crate) fn run( + engine: &Engine, target: &Target, args: &[String], metadata: &CargoMetadata, @@ -23,13 +24,12 @@ pub(crate) fn run( docker_in_docker: bool, cwd: &Path, ) -> Result { - let engine = Engine::new(verbose)?; - let dirs = Directories::create(&engine, metadata, cwd, sysroot, docker_in_docker, verbose)?; + let dirs = Directories::create(engine, metadata, cwd, sysroot, docker_in_docker, verbose)?; let mut cmd = cargo_safe_command(uses_xargo); cmd.args(args); - let mut docker = subcommand(&engine, "run"); + let mut docker = subcommand(engine, "run"); docker.args(&["--userns", "host"]); docker_envvars(&mut docker, config, target)?; @@ -76,7 +76,7 @@ pub(crate) fn run( if let Some(ref nix_store) = dirs.nix_store { docker.args(&[ "-v", - &format!("{}:{}:Z", nix_store.to_utf8()?, nix_store.to_utf8()?), + &format!("{}:{}:Z", nix_store.to_utf8()?, nix_store.as_posix()?), ]); } @@ -88,7 +88,7 @@ pub(crate) fn run( } let mut image = image_name(config, target)?; if needs_custom_image(target, config) { - image = custom_image_build(target, config, metadata, dirs, &engine, verbose) + image = custom_image_build(target, config, metadata, dirs, engine, verbose) .wrap_err("when building custom image")? } diff --git a/src/docker/mod.rs b/src/docker/mod.rs index ab431d41c..f55a97802 100644 --- a/src/docker/mod.rs +++ b/src/docker/mod.rs @@ -1,6 +1,7 @@ mod custom; mod engine; mod local; +pub mod remote; mod shared; pub use self::engine::*; @@ -15,6 +16,7 @@ use crate::{Config, Target}; #[allow(clippy::too_many_arguments)] // TODO: refactor pub fn run( + engine: &Engine, target: &Target, args: &[String], metadata: &CargoMetadata, @@ -25,15 +27,31 @@ pub fn run( docker_in_docker: bool, cwd: &Path, ) -> Result { - local::run( - target, - args, - metadata, - config, - uses_xargo, - sysroot, - verbose, - docker_in_docker, - cwd, - ) + if engine.is_remote { + remote::run( + engine, + target, + args, + metadata, + config, + uses_xargo, + sysroot, + verbose, + docker_in_docker, + cwd, + ) + } else { + local::run( + engine, + target, + args, + metadata, + config, + uses_xargo, + sysroot, + verbose, + docker_in_docker, + cwd, + ) + } } diff --git a/src/docker/remote.rs b/src/docker/remote.rs new file mode 100644 index 000000000..9521edfcf --- /dev/null +++ b/src/docker/remote.rs @@ -0,0 +1,749 @@ +use std::io::Read; +use std::path::{Path, PathBuf}; +use std::process::ExitStatus; +use std::{env, fs}; + +use super::engine::Engine; +use super::shared::*; +use crate::cargo::CargoMetadata; +use crate::config::{bool_from_envvar, Config}; +use crate::errors::Result; +use crate::extensions::CommandExt; +use crate::file::{self, PathExt, ToUtf8}; +use crate::rustc::{self, VersionMetaExt}; +use crate::rustup; +use crate::temp; +use crate::{Host, Target}; +use atty::Stream; + +// the mount directory for the data volume. +pub const MOUNT_PREFIX: &str = "/cross"; + +struct DeleteVolume<'a>(&'a Engine, &'a VolumeId, bool); + +impl<'a> Drop for DeleteVolume<'a> { + fn drop(&mut self) { + if let VolumeId::Discard(id) = self.1 { + volume_rm(self.0, id, self.2).ok(); + } + } +} + +struct DeleteContainer<'a>(&'a Engine, &'a str, bool); + +impl<'a> Drop for DeleteContainer<'a> { + fn drop(&mut self) { + container_stop(self.0, self.1, self.2).ok(); + container_rm(self.0, self.1, self.2).ok(); + } +} + +#[derive(Debug, PartialEq, Eq)] +pub enum ContainerState { + Created, + Running, + Paused, + Restarting, + Dead, + Exited, + DoesNotExist, +} + +impl ContainerState { + pub fn new(state: &str) -> Result { + match state { + "created" => Ok(ContainerState::Created), + "running" => Ok(ContainerState::Running), + "paused" => Ok(ContainerState::Paused), + "restarting" => Ok(ContainerState::Restarting), + "dead" => Ok(ContainerState::Dead), + "exited" => Ok(ContainerState::Exited), + "" => Ok(ContainerState::DoesNotExist), + _ => eyre::bail!("unknown container state: got {state}"), + } + } + + pub fn is_stopped(&self) -> bool { + matches!(self, Self::Exited | Self::DoesNotExist) + } + + pub fn exists(&self) -> bool { + !matches!(self, Self::DoesNotExist) + } +} + +#[derive(Debug)] +enum VolumeId { + Keep(String), + Discard(String), +} + +impl VolumeId { + fn create(engine: &Engine, toolchain: &str, container: &str, verbose: bool) -> Result { + if volume_exists(engine, toolchain, verbose)? { + Ok(Self::Keep(toolchain.to_string())) + } else { + Ok(Self::Discard(container.to_string())) + } + } +} + +impl AsRef for VolumeId { + fn as_ref(&self) -> &str { + match self { + Self::Keep(s) => s, + Self::Discard(s) => s, + } + } +} + +fn create_volume_dir( + engine: &Engine, + container: &str, + dir: &Path, + verbose: bool, +) -> Result { + // make our parent directory if needed + subcommand(engine, "exec") + .arg(container) + .args(&["sh", "-c", &format!("mkdir -p '{}'", dir.as_posix()?)]) + .run_and_get_status(verbose, false) + .map_err(Into::into) +} + +// copy files for a docker volume, for remote host support +fn copy_volume_files( + engine: &Engine, + container: &str, + src: &Path, + dst: &Path, + verbose: bool, +) -> Result { + subcommand(engine, "cp") + .arg("-a") + .arg(src.to_utf8()?) + .arg(format!("{container}:{}", dst.as_posix()?)) + .run_and_get_status(verbose, false) + .map_err(Into::into) +} + +fn is_cachedir_tag(path: &Path) -> Result { + let mut buffer = [b'0'; 43]; + let mut file = fs::OpenOptions::new().read(true).open(path)?; + file.read_exact(&mut buffer)?; + + Ok(&buffer == b"Signature: 8a477f597d28d172789f06886806bc55") +} + +fn is_cachedir(entry: &fs::DirEntry) -> bool { + // avoid any cached directories when copying + // see https://bford.info/cachedir/ + if entry.file_type().map(|t| t.is_dir()).unwrap_or(false) { + let path = entry.path().join("CACHEDIR.TAG"); + path.exists() && is_cachedir_tag(&path).unwrap_or(false) + } else { + false + } +} + +// copy files for a docker volume, for remote host support +fn copy_volume_files_nocache( + engine: &Engine, + container: &str, + src: &Path, + dst: &Path, + verbose: bool, +) -> Result { + // avoid any cached directories when copying + // see https://bford.info/cachedir/ + // SAFETY: safe, single-threaded execution. + let tempdir = unsafe { temp::TempDir::new()? }; + let temppath = tempdir.path(); + copy_dir(src, temppath, 0, |e, _| is_cachedir(e))?; + copy_volume_files(engine, container, temppath, dst, verbose) +} + +pub fn copy_volume_container_xargo( + engine: &Engine, + container: &str, + xargo_dir: &Path, + target: &Target, + mount_prefix: &Path, + verbose: bool, +) -> Result<()> { + // only need to copy the rustlib files for our current target. + let triple = target.triple(); + let relpath = Path::new("lib").join("rustlib").join(&triple); + let src = xargo_dir.join(&relpath); + let dst = mount_prefix.join("xargo").join(&relpath); + if Path::new(&src).exists() { + create_volume_dir(engine, container, dst.parent().unwrap(), verbose)?; + copy_volume_files(engine, container, &src, &dst, verbose)?; + } + + Ok(()) +} + +pub fn copy_volume_container_cargo( + engine: &Engine, + container: &str, + cargo_dir: &Path, + mount_prefix: &Path, + copy_registry: bool, + verbose: bool, +) -> Result<()> { + let dst = mount_prefix.join("cargo"); + let copy_registry = env::var("CROSS_REMOTE_COPY_REGISTRY") + .map(|s| bool_from_envvar(&s)) + .unwrap_or(copy_registry); + + if copy_registry { + copy_volume_files(engine, container, cargo_dir, &dst, verbose)?; + } else { + // can copy a limit subset of files: the rest is present. + create_volume_dir(engine, container, &dst, verbose)?; + for entry in fs::read_dir(cargo_dir)? { + let file = entry?; + let basename = file.file_name().to_utf8()?.to_string(); + if !basename.starts_with('.') && !matches!(basename.as_ref(), "git" | "registry") { + copy_volume_files(engine, container, &file.path(), &dst, verbose)?; + } + } + } + + Ok(()) +} + +// recursively copy a directory into another +fn copy_dir(src: &Path, dst: &Path, depth: u32, skip: Skip) -> Result<()> +where + Skip: Copy + Fn(&fs::DirEntry, u32) -> bool, +{ + for entry in fs::read_dir(src)? { + let file = entry?; + if skip(&file, depth) { + continue; + } + + let src_path = file.path(); + let dst_path = dst.join(file.file_name()); + if file.file_type()?.is_file() { + fs::copy(&src_path, &dst_path)?; + } else { + fs::create_dir(&dst_path).ok(); + copy_dir(&src_path, &dst_path, depth + 1, skip)?; + } + } + + Ok(()) +} + +// copy over files needed for all targets in the toolchain that should never change +fn copy_volume_container_rust_base( + engine: &Engine, + container: &str, + sysroot: &Path, + mount_prefix: &Path, + verbose: bool, +) -> Result<()> { + // the rust toolchain is quite large, but most of it isn't needed + // we need the bin, libexec, and etc directories, and part of the lib directory. + let dst = mount_prefix.join("rust"); + let rustlib = Path::new("lib").join("rustlib"); + create_volume_dir(engine, container, &dst.join(&rustlib), verbose)?; + for basename in ["bin", "libexec", "etc"] { + let file = sysroot.join(basename); + copy_volume_files(engine, container, &file, &dst, verbose)?; + } + + // the lib directories are rather large, so we want only a subset. + // now, we use a temp directory for everything else in the libdir + // we can pretty safely assume we don't have symlinks here. + + // first, copy the shared libraries inside lib, all except rustlib. + // SAFETY: safe, single-threaded execution. + let tempdir = unsafe { temp::TempDir::new()? }; + let temppath = tempdir.path(); + fs::create_dir_all(&temppath.join(&rustlib))?; + copy_dir(&sysroot.join("lib"), &temppath.join("lib"), 0, |e, d| { + d == 0 && e.file_name() == "rustlib" + })?; + + // next, copy the src/etc directories inside rustlib + copy_dir( + &sysroot.join(&rustlib), + &temppath.join(&rustlib), + 0, + |e, d| d == 0 && !(e.file_name() == "src" || e.file_name() == "etc"), + )?; + copy_volume_files(engine, container, &temppath.join("lib"), &dst, verbose)?; + + Ok(()) +} + +fn copy_volume_container_rust_manifest( + engine: &Engine, + container: &str, + sysroot: &Path, + mount_prefix: &Path, + verbose: bool, +) -> Result<()> { + // copy over all the manifest files in rustlib + // these are small text files containing names/paths to toolchains + let dst = mount_prefix.join("rust"); + let rustlib = Path::new("lib").join("rustlib"); + + // SAFETY: safe, single-threaded execution. + let tempdir = unsafe { temp::TempDir::new()? }; + let temppath = tempdir.path(); + fs::create_dir_all(&temppath.join(&rustlib))?; + copy_dir( + &sysroot.join(&rustlib), + &temppath.join(&rustlib), + 0, + |e, d| d != 0 || e.file_type().map(|t| !t.is_file()).unwrap_or(true), + )?; + copy_volume_files(engine, container, &temppath.join("lib"), &dst, verbose)?; + + Ok(()) +} + +// copy over the toolchain for a specific triple +pub fn copy_volume_container_rust_triple( + engine: &Engine, + container: &str, + sysroot: &Path, + triple: &str, + mount_prefix: &Path, + skip_exists: bool, + verbose: bool, +) -> Result<()> { + // copy over the files for a specific triple + let dst = mount_prefix.join("rust"); + let rustlib = Path::new("lib").join("rustlib"); + let dst_rustlib = dst.join(&rustlib); + let src_toolchain = sysroot.join(&rustlib).join(triple); + let dst_toolchain = dst_rustlib.join(triple); + + // skip if the toolchain already exists. for the host toolchain + // or the first run of the target toolchain, we know it doesn't exist. + let mut skip = false; + if skip_exists { + skip = subcommand(engine, "exec") + .arg(container) + .args(&[ + "bash", + "-c", + &format!("[[ -d '{}' ]]", dst_toolchain.as_posix()?), + ]) + .run_and_get_status(verbose, true)? + .success(); + } + if !skip { + copy_volume_files(engine, container, &src_toolchain, &dst_rustlib, verbose)?; + } + if !skip && skip_exists { + // this means we have a persistent data volume and we have a + // new target, meaning we might have new manifests as well. + copy_volume_container_rust_manifest(engine, container, sysroot, mount_prefix, verbose)?; + } + + Ok(()) +} + +pub fn copy_volume_container_rust( + engine: &Engine, + container: &str, + sysroot: &Path, + target: &Target, + mount_prefix: &Path, + skip_target: bool, + verbose: bool, +) -> Result<()> { + let target_triple = target.triple(); + let image_triple = Host::X86_64UnknownLinuxGnu.triple(); + + copy_volume_container_rust_base(engine, container, sysroot, mount_prefix, verbose)?; + copy_volume_container_rust_manifest(engine, container, sysroot, mount_prefix, verbose)?; + copy_volume_container_rust_triple( + engine, + container, + sysroot, + image_triple, + mount_prefix, + false, + verbose, + )?; + if !skip_target && target_triple != image_triple { + copy_volume_container_rust_triple( + engine, + container, + sysroot, + target_triple, + mount_prefix, + false, + verbose, + )?; + } + + Ok(()) +} + +fn run_and_get_status(engine: &Engine, args: &[&str], verbose: bool) -> Result { + command(engine) + .args(args) + .run_and_get_status(verbose, true) + .map_err(Into::into) +} + +pub fn volume_create(engine: &Engine, volume: &str, verbose: bool) -> Result { + run_and_get_status(engine, &["volume", "create", volume], verbose) +} + +pub fn volume_rm(engine: &Engine, volume: &str, verbose: bool) -> Result { + run_and_get_status(engine, &["volume", "rm", volume], verbose) +} + +pub fn volume_exists(engine: &Engine, volume: &str, verbose: bool) -> Result { + command(engine) + .args(&["volume", "inspect", volume]) + .run_and_get_output(verbose) + .map(|output| output.status.success()) + .map_err(Into::into) +} + +pub fn container_stop(engine: &Engine, container: &str, verbose: bool) -> Result { + run_and_get_status(engine, &["stop", container], verbose) +} + +pub fn container_rm(engine: &Engine, container: &str, verbose: bool) -> Result { + run_and_get_status(engine, &["rm", container], verbose) +} + +pub fn container_state(engine: &Engine, container: &str, verbose: bool) -> Result { + let stdout = command(engine) + .args(&["ps", "-a"]) + .args(&["--filter", &format!("name={container}")]) + .args(&["--format", "{{.State}}"]) + .run_and_get_stdout(verbose)?; + ContainerState::new(stdout.trim()) +} + +pub fn unique_toolchain_identifier(sysroot: &Path) -> Result { + // try to get the commit hash for the currently toolchain, if possible + // if not, get the default rustc and use the path hash for uniqueness + let commit_hash = if let Some(version) = rustup::rustc_version_string(sysroot)? { + rustc::hash_from_version_string(&version, 1) + } else { + rustc::version_meta()?.commit_hash() + }; + + let toolchain_name = sysroot.file_name().unwrap().to_utf8()?; + let toolchain_hash = path_hash(sysroot)?; + Ok(format!( + "cross-{toolchain_name}-{toolchain_hash}-{commit_hash}" + )) +} + +// unique identifier for a given project +pub fn unique_container_identifier( + target: &Target, + metadata: &CargoMetadata, + dirs: &Directories, +) -> Result { + let workspace_root = &metadata.workspace_root; + let package = metadata + .packages + .iter() + .find(|p| p.manifest_path.parent().unwrap() == workspace_root) + .unwrap_or_else(|| { + metadata + .packages + .get(0) + .expect("should have at least 1 package") + }); + + let name = &package.name; + let triple = target.triple(); + let toolchain_id = unique_toolchain_identifier(&dirs.sysroot)?; + let project_hash = path_hash(&package.manifest_path)?; + Ok(format!("{toolchain_id}-{triple}-{name}-{project_hash}")) +} + +fn mount_path(val: &Path, verbose: bool) -> Result { + let host_path = file::canonicalize(val)?; + canonicalize_mount_path(&host_path, verbose) +} + +#[allow(clippy::too_many_arguments)] // TODO: refactor +pub(crate) fn run( + engine: &Engine, + target: &Target, + args: &[String], + metadata: &CargoMetadata, + config: &Config, + uses_xargo: bool, + sysroot: &Path, + verbose: bool, + docker_in_docker: bool, + cwd: &Path, +) -> Result { + let dirs = Directories::create(engine, metadata, cwd, sysroot, docker_in_docker, verbose)?; + + let mut cmd = cargo_safe_command(uses_xargo); + cmd.args(args); + + let mount_prefix = MOUNT_PREFIX; + + // the logic is broken into the following steps + // 1. get our unique identifiers and cleanup from a previous run. + // 2. if not using persistent volumes, create a data volume + // 3. start our container with the mounted data volume and all envvars + // 4. copy data into the data volume + // with persistent data volumes, copy just copy crate data and + // if not present, the toolchain for the current target. + // otherwise, copy the entire toolchain, cargo, and crate data + // if `CROSS_REMOTE_COPY_CACHE`, copy over the target dir as well + // 5. create symlinks for all mounted data + // ensure the paths are the same as local cross + // 6. execute our cargo command inside the container + // 7. copy data from target dir back to host + // 8. stop container and delete data volume + // + // we use structs that wrap the resources to ensure they're dropped + // in the correct order even on error, to ensure safe cleanup + + // 1. get our unique identifiers and cleanup from a previous run. + // this can happen if we didn't gracefully exit before + let toolchain_id = unique_toolchain_identifier(&dirs.sysroot)?; + let container = unique_container_identifier(target, metadata, &dirs)?; + let volume = VolumeId::create(engine, &toolchain_id, &container, verbose)?; + let state = container_state(engine, &container, verbose)?; + if !state.is_stopped() { + eprintln!("Warning: container {container} was running."); + container_stop(engine, &container, verbose)?; + } + if state.exists() { + eprintln!("Warning: container {container} was exited."); + container_rm(engine, &container, verbose)?; + } + if let VolumeId::Discard(ref id) = volume { + if volume_exists(engine, id, verbose)? { + eprintln!("Warning: temporary volume {container} existed."); + volume_rm(engine, id, verbose)?; + } + } + + // 2. create our volume to copy all our data over to + if let VolumeId::Discard(ref id) = volume { + volume_create(engine, id, verbose)?; + } + let _volume_deletter = DeleteVolume(engine, &volume, verbose); + + // 3. create our start container command here + let mut docker = subcommand(engine, "run"); + docker.args(&["--userns", "host"]); + docker.args(&["--name", &container]); + docker.args(&["-v", &format!("{}:{mount_prefix}", volume.as_ref())]); + docker_envvars(&mut docker, config, target)?; + + let mut volumes = vec![]; + let mount_volumes = docker_mount( + &mut docker, + metadata, + config, + target, + cwd, + verbose, + |_, val, verbose| mount_path(val, verbose), + |(src, dst)| volumes.push((src, dst)), + )?; + + docker_seccomp(&mut docker, engine.kind, target, metadata, verbose)?; + + // Prevent `bin` from being mounted inside the Docker container. + docker.args(&["-v", &format!("{mount_prefix}/cargo/bin")]); + + // When running inside NixOS or using Nix packaging we need to add the Nix + // Store to the running container so it can load the needed binaries. + if let Some(ref nix_store) = dirs.nix_store { + volumes.push((nix_store.to_utf8()?.to_string(), nix_store.to_path_buf())) + } + + docker.arg("-d"); + if atty::is(Stream::Stdin) && atty::is(Stream::Stdout) && atty::is(Stream::Stderr) { + docker.arg("-t"); + } + + docker + .arg(&image_name(config, target)?) + // ensure the process never exits until we stop it + .args(&["sh", "-c", "sleep infinity"]) + .run_and_get_status(verbose, true)?; + let _container_deletter = DeleteContainer(engine, &container, verbose); + + // 4. copy all mounted volumes over + let copy_cache = env::var("CROSS_REMOTE_COPY_CACHE") + .map(|s| bool_from_envvar(&s)) + .unwrap_or_default(); + let copy = |src, dst: &PathBuf| { + if copy_cache { + copy_volume_files(engine, &container, src, dst, verbose) + } else { + copy_volume_files_nocache(engine, &container, src, dst, verbose) + } + }; + let mount_prefix_path = mount_prefix.as_ref(); + if let VolumeId::Discard(_) = volume { + copy_volume_container_xargo( + engine, + &container, + &dirs.xargo, + target, + mount_prefix_path, + verbose, + )?; + copy_volume_container_cargo( + engine, + &container, + &dirs.cargo, + mount_prefix_path, + false, + verbose, + )?; + copy_volume_container_rust( + engine, + &container, + &dirs.sysroot, + target, + mount_prefix_path, + false, + verbose, + )?; + } else { + // need to copy over the target triple if it hasn't been previously copied + copy_volume_container_rust_triple( + engine, + &container, + &dirs.sysroot, + target.triple(), + mount_prefix_path, + true, + verbose, + )?; + } + let mount_root = if mount_volumes { + // cannot panic: absolute unix path, must have root + let rel_mount_root = dirs.mount_root.strip_prefix("/").unwrap(); + let mount_root = mount_prefix_path.join(rel_mount_root); + if rel_mount_root != PathBuf::new() { + create_volume_dir(engine, &container, mount_root.parent().unwrap(), verbose)?; + } + mount_root + } else { + mount_prefix_path.join("project") + }; + copy(&dirs.host_root, &mount_root)?; + + let mut copied = vec![ + (&dirs.xargo, mount_prefix_path.join("xargo")), + (&dirs.cargo, mount_prefix_path.join("cargo")), + (&dirs.sysroot, mount_prefix_path.join("rust")), + (&dirs.host_root, mount_root.clone()), + ]; + let mut to_symlink = vec![]; + let target_dir = file::canonicalize(&dirs.target)?; + let target_dir = if let Ok(relpath) = target_dir.strip_prefix(&dirs.host_root) { + // target dir is in the project, just symlink it in + let target_dir = mount_root.join(relpath); + to_symlink.push((target_dir.clone(), "/target".to_string())); + target_dir + } else { + // outside project, need to copy the target data over + // only do if we're copying over cached files. + let target_dir = mount_prefix_path.join("target"); + if copy_cache { + copy(&dirs.target, &target_dir)?; + } else { + create_volume_dir(engine, &container, &target_dir, verbose)?; + } + + copied.push((&dirs.target, target_dir.clone())); + target_dir + }; + for (src, dst) in volumes.iter() { + let src: &Path = src.as_ref(); + if let Some((psrc, pdst)) = copied.iter().find(|(p, _)| src.starts_with(p)) { + // path has already been copied over + let relpath = src.strip_prefix(psrc).unwrap(); + to_symlink.push((pdst.join(relpath), dst.as_posix()?)); + } else { + let rel_dst = dst.strip_prefix("/").unwrap(); + let mount_dst = mount_prefix_path.join(rel_dst); + if rel_dst != PathBuf::new() { + create_volume_dir(engine, &container, mount_dst.parent().unwrap(), verbose)?; + } + copy(src, &mount_dst)?; + } + } + + // 5. create symlinks for copied data + let mut symlink = vec!["set -e pipefail".to_string()]; + if verbose { + symlink.push("set -x".to_string()); + } + symlink.push(format!( + "chown -R {uid}:{gid} {mount_prefix}/*", + uid = user_id(), + gid = group_id(), + )); + // need a simple script to add symlinks, but not override existing files. + symlink.push(format!( + "prefix=\"{mount_prefix}\" + +symlink_recurse() {{ + for f in \"${{1}}\"/*; do + dst=${{f#\"$prefix\"}} + if [ -f \"${{dst}}\" ]; then + echo \"invalid: got unexpected file at ${{dst}}\" 1>&2 + exit 1 + elif [ -d \"${{dst}}\" ]; then + symlink_recurse \"${{f}}\" + else + ln -s \"${{f}}\" \"${{dst}}\" + fi + done +}} + +symlink_recurse \"${{prefix}}\" +" + )); + for (src, dst) in to_symlink { + symlink.push(format!("ln -s \"{}\" \"{}\"", src.as_posix()?, dst)); + } + subcommand(engine, "exec") + .arg(&container) + .args(&["sh", "-c", &symlink.join("\n")]) + .run_and_get_status(verbose, false) + .map_err::(Into::into)?; + + // 6. execute our cargo command inside the container + let mut docker = subcommand(engine, "exec"); + docker_user_id(&mut docker, engine.kind); + docker_cwd(&mut docker, metadata, &dirs, cwd, mount_volumes)?; + docker.arg(&container); + docker.args(&["sh", "-c", &format!("PATH=$PATH:/rust/bin {:?}", cmd)]); + let status = docker + .run_and_get_status(verbose, false) + .map_err(Into::into); + + // 7. copy data from our target dir back to host + subcommand(engine, "cp") + .arg("-a") + .arg(&format!("{container}:{}", target_dir.as_posix()?)) + .arg(&dirs.target.parent().unwrap()) + .run_and_get_status(verbose, false) + .map_err::(Into::into)?; + + status +} diff --git a/src/docker/shared.rs b/src/docker/shared.rs index ac3b51a17..febe1f1c7 100644 --- a/src/docker/shared.rs +++ b/src/docker/shared.rs @@ -5,15 +5,18 @@ use std::{env, fs}; use super::custom::Dockerfile; use super::engine::*; -use crate::cargo::CargoMetadata; +use crate::cargo::{cargo_metadata_with_args, CargoMetadata}; use crate::config::Config; use crate::errors::*; use crate::extensions::{CommandExt, SafeCommand}; use crate::file::{self, write_file, PathExt, ToUtf8}; use crate::id; +use crate::rustc::{self, VersionMetaExt}; use crate::Target; pub const CROSS_IMAGE: &str = "ghcr.io/cross-rs"; +// note: this is the most common base image for our images +pub const UBUNTU_BASE: &str = "ubuntu:16.04"; const DOCKER_IMAGES: &[&str] = &include!(concat!(env!("OUT_DIR"), "/docker-images.rs")); // secured profile based off the docker documentation for denied syscalls: @@ -29,6 +32,7 @@ pub struct Directories { pub target: PathBuf, pub nix_store: Option, pub host_root: PathBuf, + // both mount fields are WSL paths on windows: they already are POSIX paths pub mount_root: PathBuf, pub mount_cwd: PathBuf, pub sysroot: PathBuf, @@ -130,7 +134,12 @@ fn create_target_dir(path: &Path) -> Result<()> { } pub fn command(engine: &Engine) -> Command { - Command::new(&engine.path) + let mut command = Command::new(&engine.path); + if engine.needs_remote() { + // if we're using podman and not podman-remote, need `--remote`. + command.arg("--remote"); + } + command } pub fn subcommand(engine: &Engine, subcommand: &str) -> Command { @@ -139,8 +148,28 @@ pub fn subcommand(engine: &Engine, subcommand: &str) -> Command { command } +pub fn get_package_info( + engine: &Engine, + target: &str, + channel: Option<&str>, + docker_in_docker: bool, + verbose: bool, +) -> Result<(Target, CargoMetadata, Directories)> { + let target_list = rustc::target_list(false)?; + let target = Target::from(target, &target_list); + let metadata = cargo_metadata_with_args(None, None, verbose)? + .ok_or(eyre::eyre!("unable to get project metadata"))?; + let cwd = std::env::current_dir()?; + let host_meta = rustc::version_meta()?; + let host = host_meta.host(); + let sysroot = rustc::get_sysroot(&host, &target, channel, verbose)?.1; + let dirs = Directories::create(engine, &metadata, &cwd, &sysroot, docker_in_docker, verbose)?; + + Ok((target, metadata, dirs)) +} + /// Register binfmt interpreters -pub(crate) fn register(target: &Target, verbose: bool) -> Result<()> { +pub(crate) fn register(engine: &Engine, target: &Target, verbose: bool) -> Result<()> { let cmd = if target.is_windows() { // https://www.kernel.org/doc/html/latest/admin-guide/binfmt-misc.html "mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc && \ @@ -150,12 +179,11 @@ pub(crate) fn register(target: &Target, verbose: bool) -> Result<()> { binfmt-support qemu-user-static" }; - let engine = Engine::new(verbose)?; - subcommand(&engine, "run") + subcommand(engine, "run") .args(&["--userns", "host"]) .arg("--privileged") .arg("--rm") - .arg("ubuntu:16.04") + .arg(UBUNTU_BASE) .args(&["sh", "-c", cmd]) .run(verbose, false) .map_err(Into::into) @@ -457,7 +485,7 @@ pub(crate) fn custom_image_build( ) .wrap_err("when pre-building") .with_note(|| format!("CROSS_CMD={}", pre_build.join("\n")))?; - image = custom.image_name(target, metadata); + image = custom.image_name(target, metadata)?; } } Ok(image) @@ -581,6 +609,19 @@ impl MountFinder { } } +fn path_digest(path: &Path) -> Result { + let buffer = const_sha1::ConstBuffer::from_slice(path.to_utf8()?.as_bytes()); + Ok(const_sha1::sha1(&buffer)) +} + +pub fn path_hash(path: &Path) -> Result { + Ok(path_digest(path)? + .to_string() + .get(..5) + .expect("sha1 is expected to be at least 5 characters long") + .to_string()) +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/file.rs b/src/file.rs index 7d369f322..b6e0df6ab 100644 --- a/src/file.rs +++ b/src/file.rs @@ -75,6 +75,7 @@ fn read_(path: &Path) -> Result { pub fn canonicalize(path: impl AsRef) -> Result { _canonicalize(path.as_ref()) + .wrap_err_with(|| format!("when canonicalizing path `{:?}`", path.as_ref())) } fn _canonicalize(path: &Path) -> Result { diff --git a/src/lib.rs b/src/lib.rs index 56a5e3739..cbf23ecb5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -27,7 +27,7 @@ mod extensions; mod file; mod id; mod interpreter; -mod rustc; +pub mod rustc; mod rustup; pub mod temp; @@ -43,11 +43,11 @@ use serde::Deserialize; pub use self::cargo::{cargo_command, cargo_metadata_with_args, CargoMetadata, Subcommand}; use self::cross_toml::CrossToml; use self::errors::Context; -use self::rustc::{TargetList, VersionMetaExt}; pub use self::errors::{install_panic_hook, install_termination_hook, Result}; pub use self::extensions::{CommandExt, OutputExt}; -pub use self::file::ToUtf8; +pub use self::file::{pretty_path, ToUtf8}; +pub use self::rustc::{TargetList, VersionMetaExt}; pub const CROSS_LABEL_DOMAIN: &str = "org.cross-rs"; @@ -116,7 +116,7 @@ impl Host { } /// Returns the [`Target`] as target triple string - fn triple(&self) -> &str { + pub fn triple(&self) -> &str { match self { Host::X86_64AppleDarwin => "x86_64-apple-darwin", Host::Aarch64AppleDarwin => "aarch64-apple-darwin", @@ -315,7 +315,7 @@ impl std::fmt::Display for Target { } impl Target { - fn from(triple: &str, target_list: &TargetList) -> Target { + pub fn from(triple: &str, target_list: &TargetList) -> Target { if target_list.contains(triple) { Target::new_built_in(triple) } else { @@ -362,8 +362,7 @@ pub fn run() -> Result { .iter() .any(|a| a == "--verbose" || a == "-v" || a == "-vv"); - let host_version_meta = - rustc_version::version_meta().wrap_err("couldn't fetch the `rustc` version")?; + let host_version_meta = rustc::version_meta()?; let cwd = std::env::current_dir()?; if let Some(metadata) = cargo_metadata_with_args(None, Some(&args), verbose)? { let host = host_version_meta.host(); @@ -384,22 +383,8 @@ pub fn run() -> Result { }; if image_exists && host.is_supported(Some(&target)) { - let mut sysroot = rustc::sysroot(&host, &target, verbose)?; - let default_toolchain = sysroot - .file_name() - .and_then(|file_name| file_name.to_str()) - .ok_or_else(|| eyre::eyre!("couldn't get toolchain name"))?; - let toolchain = if let Some(channel) = args.channel { - [channel] - .iter() - .map(|c| c.as_str()) - .chain(default_toolchain.splitn(2, '-').skip(1)) - .collect::>() - .join("-") - } else { - default_toolchain.to_string() - }; - sysroot.set_file_name(&toolchain); + let (toolchain, sysroot) = + rustc::get_sysroot(&host, &target, args.channel.as_deref(), verbose)?; let mut is_nightly = toolchain.contains("nightly"); let installed_toolchains = rustup::installed_toolchains(verbose)?; @@ -495,15 +480,17 @@ pub fn run() -> Result { if target.needs_docker() && args.subcommand.map(|sc| sc.needs_docker()).unwrap_or(false) { + let engine = docker::Engine::new(verbose)?; if host_version_meta.needs_interpreter() && needs_interpreter && target.needs_interpreter() && !interpreter::is_registered(&target)? { - docker::register(&target, verbose)? + docker::register(&engine, &target, verbose)? } return docker::run( + &engine, &target, &filtered_args, &metadata, diff --git a/src/rustc.rs b/src/rustc.rs index 1cdfaf2d8..e04aca3e3 100644 --- a/src/rustc.rs +++ b/src/rustc.rs @@ -21,6 +21,7 @@ impl TargetList { pub trait VersionMetaExt { fn host(&self) -> Host; fn needs_interpreter(&self) -> bool; + fn commit_hash(&self) -> String; } impl VersionMetaExt for VersionMeta { @@ -31,6 +32,48 @@ impl VersionMetaExt for VersionMeta { fn needs_interpreter(&self) -> bool { self.semver < Version::new(1, 19, 0) } + + fn commit_hash(&self) -> String { + self.commit_hash + .as_ref() + .map(|x| short_commit_hash(x)) + .unwrap_or_else(|| hash_from_version_string(&self.short_version_string, 2)) + } +} + +fn short_commit_hash(hash: &str) -> String { + // short version hashes are always 9 digits + // https://github.com/rust-lang/cargo/pull/10579 + const LENGTH: usize = 9; + + hash.get(..LENGTH) + .unwrap_or_else(|| panic!("commit hash must be at least {LENGTH} characters long")) + .to_string() +} + +pub fn hash_from_version_string(version: &str, index: usize) -> String { + let is_hash = |x: &str| x.chars().all(|c| c.is_digit(16)); + let is_date = |x: &str| x.chars().all(|c| matches!(c, '-' | '0'..='9')); + + // the version can be one of two forms: + // multirust channel string: `"1.61.0 (fe5b13d68 2022-05-18)"` + // short version string: `"rustc 1.61.0 (fe5b13d68 2022-05-18)"` + // want to extract the commit hash if we can, if not, just hash the string. + if let Some((commit, date)) = version + .splitn(index + 1, ' ') + .nth(index) + .and_then(|meta| meta.strip_prefix('(')) + .and_then(|meta| meta.strip_suffix(')')) + .and_then(|meta| meta.split_once(' ')) + { + if is_hash(commit) && is_date(date) { + return short_commit_hash(commit); + } + } + + // fallback: can't extract the hash. just create a hash of the version string. + let buffer = const_sha1::ConstBuffer::from_slice(version.as_bytes()); + short_commit_hash(&const_sha1::sha1(&buffer).to_string()) } pub fn rustc_command() -> Command { @@ -60,3 +103,50 @@ pub fn sysroot(host: &Host, target: &Target, verbose: bool) -> Result { Ok(PathBuf::from(stdout)) } + +pub fn get_sysroot( + host: &Host, + target: &Target, + channel: Option<&str>, + verbose: bool, +) -> Result<(String, PathBuf)> { + let mut sysroot = sysroot(host, target, verbose)?; + let default_toolchain = sysroot + .file_name() + .and_then(|file_name| file_name.to_str()) + .ok_or_else(|| eyre::eyre!("couldn't get toolchain name"))?; + let toolchain = if let Some(channel) = channel { + [channel] + .iter() + .cloned() + .chain(default_toolchain.splitn(2, '-').skip(1)) + .collect::>() + .join("-") + } else { + default_toolchain.to_string() + }; + sysroot.set_file_name(&toolchain); + + Ok((toolchain, sysroot)) +} + +pub fn version_meta() -> Result { + rustc_version::version_meta().wrap_err("couldn't fetch the `rustc` version") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn hash_from_rustc() { + assert_eq!( + hash_from_version_string("1.61.0 (fe5b13d68 2022-05-18)", 1), + "fe5b13d68" + ); + assert_eq!( + hash_from_version_string("rustc 1.61.0 (fe5b13d68 2022-05-18)", 2), + "fe5b13d68" + ); + } +} diff --git a/src/rustup.rs b/src/rustup.rs index f4c93a798..ed54412d4 100644 --- a/src/rustup.rs +++ b/src/rustup.rs @@ -1,4 +1,4 @@ -use std::path::Path; +use std::path::{Path, PathBuf}; use std::process::Command; use rustc_version::{Channel, Version}; @@ -123,25 +123,35 @@ fn rustc_channel(version: &Version) -> Result { } } -pub fn rustc_version(toolchain_path: &Path) -> Result> { - let path = toolchain_path.join("lib/rustlib/multirust-channel-manifest.toml"); +fn multirust_channel_manifest_path(toolchain_path: &Path) -> PathBuf { + toolchain_path.join("lib/rustlib/multirust-channel-manifest.toml") +} + +pub fn rustc_version_string(toolchain_path: &Path) -> Result> { + let path = multirust_channel_manifest_path(toolchain_path); if path.exists() { let contents = std::fs::read(&path).wrap_err_with(|| format!("couldn't open file `{path:?}`"))?; let manifest: toml::value::Table = toml::from_slice(&contents)?; - if let Some(rust_version) = manifest + return Ok(manifest .get("pkg") .and_then(|pkg| pkg.get("rust")) .and_then(|rust| rust.get("version")) .and_then(|version| version.as_str()) - { - // Field is `"{version} ({commit} {date})"` - if let Some((version, meta)) = rust_version.split_once(' ') { - let version = Version::parse(version) - .wrap_err_with(|| format!("invalid rust version found in {path:?}"))?; - let channel = rustc_channel(&version)?; - return Ok(Some((version, channel, meta.to_owned()))); - } + .map(|version| version.to_string())); + } + Ok(None) +} + +pub fn rustc_version(toolchain_path: &Path) -> Result> { + let path = multirust_channel_manifest_path(toolchain_path); + if let Some(rust_version) = rustc_version_string(toolchain_path)? { + // Field is `"{version} ({commit} {date})"` + if let Some((version, meta)) = rust_version.split_once(' ') { + let version = Version::parse(version) + .wrap_err_with(|| format!("invalid rust version found in {path:?}"))?; + let channel = rustc_channel(&version)?; + return Ok(Some((version, channel, meta.to_owned()))); } } Ok(None) diff --git a/xtask/src/build_docker_image.rs b/xtask/src/build_docker_image.rs index bfc4e1d1e..c09a0e65d 100644 --- a/xtask/src/build_docker_image.rs +++ b/xtask/src/build_docker_image.rs @@ -1,8 +1,8 @@ -use std::{path::Path, process::Command}; +use std::path::Path; use clap::Args; use color_eyre::Section; -use cross::{CommandExt, ToUtf8}; +use cross::{docker, CommandExt, ToUtf8}; use std::fmt::Write; #[derive(Args, Debug)] @@ -15,14 +15,14 @@ pub struct BuildDockerImage { #[clap(long)] tag: Option, /// Repository name for image. - #[clap(long, default_value = cross::docker::CROSS_IMAGE)] + #[clap(long, default_value = docker::CROSS_IMAGE)] repository: String, /// Newline separated labels #[clap(long, env = "LABELS")] labels: Option, /// Provide verbose diagnostic output. #[clap(short, long)] - verbose: bool, + pub verbose: bool, /// Print but do not execute the build commands. #[clap(long)] dry_run: bool, @@ -95,7 +95,7 @@ pub fn build_docker_image( mut targets, .. }: BuildDockerImage, - engine: &Path, + engine: &docker::Engine, ) -> cross::Result<()> { let metadata = cross::cargo_metadata_with_args( Some(Path::new(env!("CARGO_MANIFEST_DIR"))), @@ -144,7 +144,7 @@ pub fn build_docker_image( if gha && targets.len() > 1 { println!("::group::Build {target}"); } - let mut docker_build = Command::new(engine); + let mut docker_build = docker::command(engine); docker_build.args(&["buildx", "build"]); docker_build.current_dir(&docker_root); diff --git a/xtask/src/main.rs b/xtask/src/main.rs index 74a7dac0a..89ad69997 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -7,10 +7,9 @@ pub mod install_git_hooks; pub mod target_info; pub mod util; -use std::path::PathBuf; - use ci::CiJob; use clap::{CommandFactory, Parser, Subcommand}; +use cross::docker; use util::ImageTarget; use self::build_docker_image::BuildDockerImage; @@ -66,11 +65,11 @@ pub fn main() -> cross::Result<()> { let cli = Cli::parse(); match cli.command { Commands::TargetInfo(args) => { - let engine = get_container_engine(args.engine.as_deref())?; + let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; target_info::target_info(args, &engine)?; } Commands::BuildDockerImage(args) => { - let engine = get_container_engine(args.engine.as_deref())?; + let engine = get_container_engine(args.engine.as_deref(), args.verbose)?; build_docker_image::build_docker_image(args, &engine)?; } Commands::InstallGitHooks(args) => { @@ -88,10 +87,11 @@ pub fn main() -> cross::Result<()> { Ok(()) } -fn get_container_engine(engine: Option<&str>) -> Result { - if let Some(ce) = engine { - which::which(ce) +fn get_container_engine(engine: Option<&str>, verbose: bool) -> cross::Result { + let engine = if let Some(ce) = engine { + which::which(ce)? } else { - cross::docker::get_container_engine() - } + docker::get_container_engine()? + }; + docker::Engine::from_path(engine, verbose) } diff --git a/xtask/src/target_info.rs b/xtask/src/target_info.rs index 8ed54d3f1..d5493d08e 100644 --- a/xtask/src/target_info.rs +++ b/xtask/src/target_info.rs @@ -1,12 +1,8 @@ -use std::{ - collections::BTreeMap, - path::Path, - process::{Command, Stdio}, -}; +use std::{collections::BTreeMap, process::Stdio}; use crate::util::{format_repo, pull_image}; use clap::Args; -use cross::CommandExt; +use cross::{docker, CommandExt}; // Store raw text data in the binary so we don't need a data directory // when extracting all targets, or running our target info script. @@ -18,7 +14,7 @@ pub struct TargetInfo { targets: Vec, /// Provide verbose diagnostic output. #[clap(short, long)] - verbose: bool, + pub verbose: bool, /// Image registry. #[clap(long, default_value_t = String::from("ghcr.io"))] registry: String, @@ -34,7 +30,7 @@ pub struct TargetInfo { } fn image_info( - engine: &Path, + engine: &docker::Engine, target: &crate::ImageTarget, image: &str, tag: &str, @@ -45,7 +41,7 @@ fn image_info( pull_image(engine, image, verbose)?; } - let mut command = Command::new(engine); + let mut command = docker::command(engine); command.arg("run"); command.arg("--rm"); command.args(&["-e", &format!("TARGET={}", target.triplet)]); @@ -73,7 +69,7 @@ pub fn target_info( tag, .. }: TargetInfo, - engine: &Path, + engine: &docker::Engine, ) -> cross::Result<()> { let matrix = crate::util::get_matrix(); let test_map: BTreeMap = matrix diff --git a/xtask/src/util.rs b/xtask/src/util.rs index 598b68d0b..1aa7e6720 100644 --- a/xtask/src/util.rs +++ b/xtask/src/util.rs @@ -1,7 +1,4 @@ -use std::path::Path; -use std::process::Command; - -use cross::CommandExt; +use cross::{docker, CommandExt}; use once_cell::sync::OnceCell; use serde::Deserialize; @@ -84,9 +81,8 @@ pub fn format_repo(registry: &str, repository: &str) -> String { output } -pub fn pull_image(engine: &Path, image: &str, verbose: bool) -> cross::Result<()> { - let mut command = Command::new(engine); - command.arg("pull"); +pub fn pull_image(engine: &docker::Engine, image: &str, verbose: bool) -> cross::Result<()> { + let mut command = docker::subcommand(engine, "pull"); command.arg(image); let out = command.run_and_get_output(verbose)?; command.status_result(verbose, out.status, Some(&out))?; From b11b7bad728c4aaf271e3d2d22a9f6e0df634b55 Mon Sep 17 00:00:00 2001 From: Alex Huszagh Date: Wed, 22 Jun 2022 10:26:41 -0500 Subject: [PATCH 3/4] Add support for `cargo clean` on remote hosts. Required to patch #724 without deleting the entire volume for persistent data volumes. A few changes were required: the entire `/cross` mount prefix must be owned by the user so `/cross/target` can be removed. Next, we use the full path to the mounted target directory, rather than the symlink, since `cargo clean` would just delete the symlink. Finally, we've added `cargo clean` to a list of known subcommands, and it only needs docker if we use a remote host. --- src/bin/cross-util.rs | 2 +- src/cargo.rs | 14 ++++++-- src/docker/engine.rs | 16 +++++---- src/docker/remote.rs | 78 ++++++++++++++++++++++++++++++------------- src/lib.rs | 21 +++++++++--- xtask/src/main.rs | 2 +- 6 files changed, 95 insertions(+), 38 deletions(-) diff --git a/src/bin/cross-util.rs b/src/bin/cross-util.rs index 10b705bbf..5f3919faa 100644 --- a/src/bin/cross-util.rs +++ b/src/bin/cross-util.rs @@ -52,7 +52,7 @@ fn get_container_engine(engine: Option<&str>, verbose: bool) -> cross::Result cross::Result<()> { diff --git a/src/cargo.rs b/src/cargo.rs index 860c4bed6..8001f3466 100644 --- a/src/cargo.rs +++ b/src/cargo.rs @@ -19,11 +19,20 @@ pub enum Subcommand { Clippy, Metadata, List, + Clean, } impl Subcommand { - pub fn needs_docker(self) -> bool { - !matches!(self, Subcommand::Other | Subcommand::List) + pub fn needs_docker(self, is_remote: bool) -> bool { + match self { + Subcommand::Other | Subcommand::List => false, + Subcommand::Clean if !is_remote => false, + _ => true, + } + } + + pub fn needs_host(self, is_remote: bool) -> bool { + self == Subcommand::Clean && is_remote } pub fn needs_interpreter(self) -> bool { @@ -40,6 +49,7 @@ impl<'a> From<&'a str> for Subcommand { match s { "b" | "build" => Subcommand::Build, "c" | "check" => Subcommand::Check, + "clean" => Subcommand::Clean, "doc" => Subcommand::Doc, "r" | "run" => Subcommand::Run, "rustc" => Subcommand::Rustc, diff --git a/src/docker/engine.rs b/src/docker/engine.rs index d325d6e86..7c1f6924c 100644 --- a/src/docker/engine.rs +++ b/src/docker/engine.rs @@ -25,18 +25,16 @@ pub struct Engine { } impl Engine { - pub fn new(verbose: bool) -> Result { + pub fn new(is_remote: Option, verbose: bool) -> Result { let path = get_container_engine() .map_err(|_| eyre::eyre!("no container engine found")) .with_suggestion(|| "is docker or podman installed?")?; - Self::from_path(path, verbose) + Self::from_path(path, is_remote, verbose) } - pub fn from_path(path: PathBuf, verbose: bool) -> Result { + pub fn from_path(path: PathBuf, is_remote: Option, verbose: bool) -> Result { let kind = get_engine_type(&path, verbose)?; - let is_remote = env::var("CROSS_REMOTE") - .map(|s| bool_from_envvar(&s)) - .unwrap_or_default(); + let is_remote = is_remote.unwrap_or_else(Self::is_remote); Ok(Engine { path, kind, @@ -47,6 +45,12 @@ impl Engine { pub fn needs_remote(&self) -> bool { self.is_remote && self.kind == EngineType::Podman } + + pub fn is_remote() -> bool { + env::var("CROSS_REMOTE") + .map(|s| bool_from_envvar(&s)) + .unwrap_or_default() + } } // determine if the container engine is docker. this fixes issues with diff --git a/src/docker/remote.rs b/src/docker/remote.rs index 9521edfcf..21094dc5b 100644 --- a/src/docker/remote.rs +++ b/src/docker/remote.rs @@ -146,6 +146,19 @@ fn is_cachedir(entry: &fs::DirEntry) -> bool { } } +fn container_path_exists( + engine: &Engine, + container: &str, + path: &Path, + verbose: bool, +) -> Result { + Ok(subcommand(engine, "exec") + .arg(container) + .args(&["bash", "-c", &format!("[[ -d '{}' ]]", path.as_posix()?)]) + .run_and_get_status(verbose, true)? + .success()) +} + // copy files for a docker volume, for remote host support fn copy_volume_files_nocache( engine: &Engine, @@ -329,15 +342,7 @@ pub fn copy_volume_container_rust_triple( // or the first run of the target toolchain, we know it doesn't exist. let mut skip = false; if skip_exists { - skip = subcommand(engine, "exec") - .arg(container) - .args(&[ - "bash", - "-c", - &format!("[[ -d '{}' ]]", dst_toolchain.as_posix()?), - ]) - .run_and_get_status(verbose, true)? - .success(); + skip = container_path_exists(engine, container, &dst_toolchain, verbose)?; } if !skip { copy_volume_files(engine, container, &src_toolchain, &dst_rustlib, verbose)?; @@ -490,9 +495,6 @@ pub(crate) fn run( ) -> Result { let dirs = Directories::create(engine, metadata, cwd, sysroot, docker_in_docker, verbose)?; - let mut cmd = cargo_safe_command(uses_xargo); - cmd.args(args); - let mount_prefix = MOUNT_PREFIX; // the logic is broken into the following steps @@ -654,10 +656,7 @@ pub(crate) fn run( let mut to_symlink = vec![]; let target_dir = file::canonicalize(&dirs.target)?; let target_dir = if let Ok(relpath) = target_dir.strip_prefix(&dirs.host_root) { - // target dir is in the project, just symlink it in - let target_dir = mount_root.join(relpath); - to_symlink.push((target_dir.clone(), "/target".to_string())); - target_dir + mount_root.join(relpath) } else { // outside project, need to copy the target data over // only do if we're copying over cached files. @@ -687,13 +686,43 @@ pub(crate) fn run( } } + // `clean` doesn't handle symlinks: it will just unlink the target + // directory, so we should just substitute it our target directory + // for it. we'll still have the same end behavior + let mut final_args = vec![]; + let mut iter = args.iter().cloned(); + let mut has_target_dir = false; + let target_dir_string = target_dir.to_utf8()?.to_string(); + while let Some(arg) = iter.next() { + if arg == "--target-dir" { + has_target_dir = true; + final_args.push(arg); + if iter.next().is_some() { + final_args.push(target_dir_string.clone()); + } + } else if arg.starts_with("--target-dir=") { + has_target_dir = true; + if arg.split_once('=').is_some() { + final_args.push(format!("--target-dir={target_dir_string}")); + } + } else { + final_args.push(arg); + } + } + if !has_target_dir { + final_args.push("--target-dir".to_string()); + final_args.push(target_dir_string); + } + let mut cmd = cargo_safe_command(uses_xargo); + cmd.args(final_args); + // 5. create symlinks for copied data let mut symlink = vec!["set -e pipefail".to_string()]; if verbose { symlink.push("set -x".to_string()); } symlink.push(format!( - "chown -R {uid}:{gid} {mount_prefix}/*", + "chown -R {uid}:{gid} {mount_prefix}", uid = user_id(), gid = group_id(), )); @@ -738,12 +767,15 @@ symlink_recurse \"${{prefix}}\" .map_err(Into::into); // 7. copy data from our target dir back to host - subcommand(engine, "cp") - .arg("-a") - .arg(&format!("{container}:{}", target_dir.as_posix()?)) - .arg(&dirs.target.parent().unwrap()) - .run_and_get_status(verbose, false) - .map_err::(Into::into)?; + // this might not exist if we ran `clean`. + if container_path_exists(engine, &container, &target_dir, verbose)? { + subcommand(engine, "cp") + .arg("-a") + .arg(&format!("{container}:{}", target_dir.as_posix()?)) + .arg(&dirs.target.parent().unwrap()) + .run_and_get_status(verbose, false) + .map_err::(Into::into)?; + } status } diff --git a/src/lib.rs b/src/lib.rs index cbf23ecb5..d3fb72d33 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -478,9 +478,13 @@ pub fn run() -> Result { filtered_args.push("-Zbuild-std".to_string()); } - if target.needs_docker() && args.subcommand.map(|sc| sc.needs_docker()).unwrap_or(false) - { - let engine = docker::Engine::new(verbose)?; + let is_remote = docker::Engine::is_remote(); + let needs_docker = args + .subcommand + .map(|sc| sc.needs_docker(is_remote)) + .unwrap_or(false); + if target.needs_docker() && needs_docker { + let engine = docker::Engine::new(Some(is_remote), verbose)?; if host_version_meta.needs_interpreter() && needs_interpreter && target.needs_interpreter() @@ -489,7 +493,7 @@ pub fn run() -> Result { docker::register(&engine, &target, verbose)? } - return docker::run( + let status = docker::run( &engine, &target, &filtered_args, @@ -500,7 +504,14 @@ pub fn run() -> Result { verbose, args.docker_in_docker, &cwd, - ); + )?; + let needs_host = args + .subcommand + .map(|sc| sc.needs_host(is_remote)) + .unwrap_or(false); + if !(status.success() && needs_host) { + return Ok(status); + } } } } diff --git a/xtask/src/main.rs b/xtask/src/main.rs index 89ad69997..b593a7e25 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -93,5 +93,5 @@ fn get_container_engine(engine: Option<&str>, verbose: bool) -> cross::Result Date: Wed, 22 Jun 2022 16:02:45 -0500 Subject: [PATCH 4/4] Add file fingerprint to copy/remove only required files. Updates the persistent volume using a fingerprint of all files in the project, skipping any cache directories by default. If the file modified date has changed, or the file has been added, copy it to the volume and update it. If the file has been removed, then remove it from the host. To avoid a massive command-line argument, we copy a file containing each changed file on a line to the container, and then remove each file by running a script on the container. --- src/docker/remote.rs | 220 ++++++++++++++++++++++++++++++++++++++++++- src/temp.rs | 16 ++-- 2 files changed, 226 insertions(+), 10 deletions(-) diff --git a/src/docker/remote.rs b/src/docker/remote.rs index 21094dc5b..afd1eb9b8 100644 --- a/src/docker/remote.rs +++ b/src/docker/remote.rs @@ -1,7 +1,8 @@ -use std::io::Read; +use std::collections::BTreeMap; +use std::io::{self, BufRead, Read, Write}; use std::path::{Path, PathBuf}; use std::process::ExitStatus; -use std::{env, fs}; +use std::{env, fs, time}; use super::engine::Engine; use super::shared::*; @@ -394,6 +395,209 @@ pub fn copy_volume_container_rust( Ok(()) } +type FingerprintMap = BTreeMap; + +fn parse_project_fingerprint(path: &Path) -> Result { + let epoch = time::SystemTime::UNIX_EPOCH; + let file = fs::OpenOptions::new().read(true).open(path)?; + let reader = io::BufReader::new(file); + let mut result = BTreeMap::new(); + for line in reader.lines() { + let line = line?; + let (timestamp, relpath) = line + .split_once('\t') + .ok_or_else(|| eyre::eyre!("unable to parse fingerprint line '{line}'"))?; + let modified = epoch + time::Duration::from_millis(timestamp.parse::()?); + result.insert(relpath.to_string(), modified); + } + + Ok(result) +} + +fn write_project_fingerprint(path: &Path, fingerprint: &FingerprintMap) -> Result<()> { + let epoch = time::SystemTime::UNIX_EPOCH; + let mut file = fs::OpenOptions::new() + .write(true) + .truncate(true) + .create(true) + .open(path)?; + for (relpath, modified) in fingerprint { + let timestamp = modified.duration_since(epoch)?.as_millis() as u64; + writeln!(file, "{timestamp}\t{relpath}")?; + } + + Ok(()) +} + +fn read_dir_fingerprint( + home: &Path, + path: &Path, + map: &mut FingerprintMap, + copy_cache: bool, +) -> Result<()> { + let epoch = time::SystemTime::UNIX_EPOCH; + for entry in fs::read_dir(path)? { + let file = entry?; + let file_type = file.file_type()?; + // only parse known files types: 0 or 1 of these tests can pass. + if file_type.is_dir() { + if copy_cache || !is_cachedir(&file) { + read_dir_fingerprint(home, &path.join(file.file_name()), map, copy_cache)?; + } + } else if file_type.is_file() || file_type.is_symlink() { + // we're mounting to the same location, so this should fine + // we need to round the modified date to millis. + let modified = file.metadata()?.modified()?; + let millis = modified.duration_since(epoch)?.as_millis() as u64; + let rounded = epoch + time::Duration::from_millis(millis); + let relpath = file.path().strip_prefix(home)?.as_posix()?; + map.insert(relpath, rounded); + } + } + + Ok(()) +} + +fn get_project_fingerprint(home: &Path, copy_cache: bool) -> Result { + let mut result = BTreeMap::new(); + read_dir_fingerprint(home, home, &mut result, copy_cache)?; + Ok(result) +} + +fn get_fingerprint_difference<'a, 'b>( + previous: &'a FingerprintMap, + current: &'b FingerprintMap, +) -> (Vec<&'b str>, Vec<&'a str>) { + // this can be added or updated + let changed: Vec<&str> = current + .iter() + .filter(|(ref k, ref v1)| { + previous + .get(&k.to_string()) + .map(|ref v2| v1 != v2) + .unwrap_or(true) + }) + .map(|(k, _)| k.as_str()) + .collect(); + let removed: Vec<&str> = previous + .iter() + .filter(|(ref k, _)| !current.contains_key(&k.to_string())) + .map(|(k, _)| k.as_str()) + .collect(); + (changed, removed) +} + +// copy files for a docker volume, for remote host support +// provides a list of files relative to src. +fn copy_volume_file_list( + engine: &Engine, + container: &str, + src: &Path, + dst: &Path, + files: &[&str], + verbose: bool, +) -> Result { + // SAFETY: safe, single-threaded execution. + let tempdir = unsafe { temp::TempDir::new()? }; + let temppath = tempdir.path(); + for file in files { + let src_path = src.join(file); + let dst_path = temppath.join(file); + fs::create_dir_all(dst_path.parent().expect("must have parent"))?; + fs::copy(&src_path, &dst_path)?; + } + copy_volume_files(engine, container, temppath, dst, verbose) +} + +// removed files from a docker volume, for remote host support +// provides a list of files relative to src. +fn remove_volume_file_list( + engine: &Engine, + container: &str, + dst: &Path, + files: &[&str], + verbose: bool, +) -> Result { + const PATH: &str = "/tmp/remove_list"; + let mut script = vec![]; + if verbose { + script.push("set -x".to_string()); + } + script.push(format!( + "cat \"{PATH}\" | while read line; do + rm -f \"${{line}}\" +done + +rm \"{PATH}\" +" + )); + + // SAFETY: safe, single-threaded execution. + let mut tempfile = unsafe { temp::TempFile::new()? }; + for file in files { + writeln!(tempfile.file(), "{}", dst.join(file).as_posix()?)?; + } + + // need to avoid having hundreds of files on the command, so + // just provide a single file name. + subcommand(engine, "cp") + .arg(tempfile.path()) + .arg(format!("{container}:{PATH}")) + .run_and_get_status(verbose, true)?; + + subcommand(engine, "exec") + .arg(container) + .args(&["sh", "-c", &script.join("\n")]) + .run_and_get_status(verbose, true) + .map_err(Into::into) +} + +fn copy_volume_container_project( + engine: &Engine, + container: &str, + src: &Path, + dst: &Path, + volume: &VolumeId, + copy_cache: bool, + verbose: bool, +) -> Result<()> { + let copy_all = || { + if copy_cache { + copy_volume_files(engine, container, src, dst, verbose) + } else { + copy_volume_files_nocache(engine, container, src, dst, verbose) + } + }; + match volume { + VolumeId::Keep(_) => { + let parent = temp::dir()?; + fs::create_dir_all(&parent)?; + let fingerprint = parent.join(container); + let current = get_project_fingerprint(src, copy_cache)?; + if fingerprint.exists() { + let previous = parse_project_fingerprint(&fingerprint)?; + let (changed, removed) = get_fingerprint_difference(&previous, ¤t); + write_project_fingerprint(&fingerprint, ¤t)?; + + if !changed.is_empty() { + copy_volume_file_list(engine, container, src, dst, &changed, verbose)?; + } + if !removed.is_empty() { + remove_volume_file_list(engine, container, dst, &removed, verbose)?; + } + } else { + write_project_fingerprint(&fingerprint, ¤t)?; + copy_all()?; + } + } + VolumeId::Discard(_) => { + copy_all()?; + } + } + + Ok(()) +} + fn run_and_get_status(engine: &Engine, args: &[&str], verbose: bool) -> Result { command(engine) .args(args) @@ -645,7 +849,15 @@ pub(crate) fn run( } else { mount_prefix_path.join("project") }; - copy(&dirs.host_root, &mount_root)?; + copy_volume_container_project( + engine, + &container, + &dirs.host_root, + &mount_root, + &volume, + copy_cache, + verbose, + )?; let mut copied = vec![ (&dirs.xargo, mount_prefix_path.join("xargo")), @@ -692,7 +904,7 @@ pub(crate) fn run( let mut final_args = vec![]; let mut iter = args.iter().cloned(); let mut has_target_dir = false; - let target_dir_string = target_dir.to_utf8()?.to_string(); + let target_dir_string = target_dir.as_posix()?; while let Some(arg) = iter.next() { if arg == "--target-dir" { has_target_dir = true; diff --git a/src/temp.rs b/src/temp.rs index e0d36dfe8..12f24ac73 100644 --- a/src/temp.rs +++ b/src/temp.rs @@ -34,12 +34,12 @@ pub(crate) unsafe fn clean() { /// # Safety /// Safe as long as we have single-threaded execution. -unsafe fn push_tempfile() -> Result<&'static Path> { +unsafe fn push_tempfile() -> Result<&'static mut tempfile::NamedTempFile> { let parent = dir()?; fs::create_dir_all(&parent).ok(); let file = tempfile::NamedTempFile::new_in(&parent)?; FILES.push(file); - Ok(FILES.last().unwrap().path()) + Ok(FILES.last_mut().unwrap()) } /// # Safety @@ -50,7 +50,7 @@ unsafe fn pop_tempfile() -> Option { #[derive(Debug)] pub struct TempFile { - path: &'static Path, + file: &'static mut tempfile::NamedTempFile, } impl TempFile { @@ -58,12 +58,16 @@ impl TempFile { /// Safe as long as we have single-threaded execution. pub unsafe fn new() -> Result { Ok(Self { - path: push_tempfile()?, + file: push_tempfile()?, }) } - pub fn path(&self) -> &'static Path { - self.path + pub fn file(&mut self) -> &mut tempfile::NamedTempFile { + self.file + } + + pub fn path(&self) -> &Path { + self.file.path() } }