diff --git a/lib/src/cli.rs b/lib/src/cli.rs index b031d0a7..62474297 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -842,7 +842,7 @@ async fn testing(opts: &TestingOpts) -> Result<()> { TestingOpts::Run => crate::integrationtest::run_tests(), TestingOpts::RunIMA => crate::integrationtest::test_ima(), TestingOpts::FilterTar => { - crate::tar::filter_tar(std::io::stdin(), std::io::stdout()).map(|_| {}) + crate::tar::filter_tar(std::io::stdin(), std::io::stdout(), false).map(|_| {}) } } } diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 47c8ad69..00c884a8 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -853,6 +853,16 @@ impl ImageImporter { let target_imgref = self.target_imgref.as_ref().unwrap_or(&self.imgref); let base_commit = import.ostree_commit_layer.commit.clone().unwrap(); + let root_is_transient = { + let rootf = self + .repo + .read_commit(&base_commit, gio::Cancellable::NONE)? + .0; + let rootf = rootf.downcast_ref::().unwrap(); + crate::ostree_prepareroot::overlayfs_root_enabled(rootf)? + }; + tracing::debug!("Base rootfs is transient: {root_is_transient}"); + let ostree_ref = ref_for_image(&target_imgref.imgref)?; let mut layer_commits = Vec::new(); @@ -881,6 +891,7 @@ impl ImageImporter { let opts = crate::tar::WriteTarOptions { base: Some(base_commit.clone()), selinux: true, + allow_nonusr: root_is_transient, }; let r = crate::tar::write_tar(&self.repo, blob, layer.ostree_ref.as_str(), Some(opts)); diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 7d6a47ca..e6271e12 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -39,6 +39,7 @@ pub mod ima; pub mod keyfileext; pub(crate) mod logging; pub mod mountutil; +pub mod ostree_prepareroot; pub mod refescape; #[doc(hidden)] pub mod repair; diff --git a/lib/src/ostree_prepareroot.rs b/lib/src/ostree_prepareroot.rs new file mode 100644 index 00000000..ec83ffb2 --- /dev/null +++ b/lib/src/ostree_prepareroot.rs @@ -0,0 +1,54 @@ +//! Logic related to parsing ostree-prepare-root.conf. +//! + +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use crate::keyfileext::KeyFileExt; +use anyhow::{Context, Result}; +use camino::Utf8Path; +use ostree::prelude::FileExt; +use ostree::{gio, glib}; + +pub(crate) const CONF_PATH: &str = "ostree/prepare-root.conf"; + +pub(crate) fn load_config(root: &ostree::RepoFile) -> Result> { + let cancellable = gio::Cancellable::NONE; + let kf = glib::KeyFile::new(); + for path in ["etc", "usr/lib"].into_iter().map(Utf8Path::new) { + let path = &path.join(CONF_PATH); + let f = root.resolve_relative_path(&path); + if !f.query_exists(cancellable) { + continue; + } + let contents = f.load_contents(cancellable)?.0; + let contents = String::from_utf8(contents)?; + kf.load_from_data(&contents, glib::KeyFileFlags::NONE) + .with_context(|| format!("Parsing {path}"))?; + tracing::debug!("Loaded {path}"); + return Ok(Some(kf)); + } + tracing::debug!("No {CONF_PATH} found"); + Ok(None) +} + +/// Query whether the target root has the `root.transient` key +/// which sets up a transient overlayfs. +pub(crate) fn overlayfs_root_enabled(root: &ostree::RepoFile) -> Result { + if let Some(config) = load_config(root)? { + overlayfs_enabled_in_config(&config) + } else { + Ok(false) + } +} + +/// Query whether the config uses an overlayfs model (composefs or plain overlayfs). +pub fn overlayfs_enabled_in_config(config: &glib::KeyFile) -> Result { + let root_transient = config + .optional_bool("root", "transient")? + .unwrap_or_default(); + let required_composefs = config + .optional_string("composefs", "enabled")? + .map(|s| s.as_str() == "yes") + .unwrap_or_default(); + Ok(root_transient || required_composefs) +} diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index 5c5e07d7..1bb1a426 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -26,6 +26,10 @@ use std::sync::Arc; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite}; use tracing::instrument; +// Exclude things in https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems/ +// from being placed in the rootfs. +const EXCLUDED_TOPLEVEL_PATHS: &[&str] = &["run", "tmp", "proc", "sys", "dev"]; + /// Copy a tar entry to a new tar archive, optionally using a different filesystem path. pub(crate) fn copy_entry( entry: tar::Entry, @@ -62,6 +66,8 @@ pub struct WriteTarOptions { /// Enable SELinux labeling from the base commit /// Requires the `base` option. pub selinux: bool, + /// Allow content not in /usr; this should be paired with ostree rootfs.transient = true + pub allow_nonusr: bool, } /// The result of writing a tar stream. @@ -97,13 +103,16 @@ fn sepolicy_from_base(repo: &ostree::Repo, base: &str) -> Result { Filtered(&'a str), Normal(Utf8PathBuf), } -fn normalize_validate_path(path: &Utf8Path) -> Result> { +fn normalize_validate_path( + path: &Utf8Path, + allow_nonusr: bool, +) -> Result> { // This converts e.g. `foo//bar/./baz` into `foo/bar/baz`. let mut components = path .components() @@ -140,7 +149,13 @@ fn normalize_validate_path(path: &Utf8Path) -> Result> "var" => { ret.push("usr/share/factory/var"); } - o => return Ok(NormalizedPathResult::Filtered(o)), + o if EXCLUDED_TOPLEVEL_PATHS.contains(&o) => { + return Ok(NormalizedPathResult::Filtered(part)); + } + _ if allow_nonusr => ret.push(part), + _ => { + return Ok(NormalizedPathResult::Filtered(part)); + } } } else { ret.push(part); @@ -165,6 +180,7 @@ fn normalize_validate_path(path: &Utf8Path) -> Result> pub(crate) fn filter_tar( src: impl std::io::Read, dest: impl std::io::Write, + allow_nonusr: bool, ) -> Result> { let src = std::io::BufReader::new(src); let mut src = tar::Archive::new(src); @@ -241,7 +257,7 @@ pub(crate) fn filter_tar( } } - let normalized = match normalize_validate_path(path)? { + let normalized = match normalize_validate_path(path, allow_nonusr)? { NormalizedPathResult::Filtered(path) => { if let Some(v) = filtered.get_mut(path) { *v += 1; @@ -263,6 +279,7 @@ pub(crate) fn filter_tar( async fn filter_tar_async( src: impl AsyncRead + Send + 'static, mut dest: impl AsyncWrite + Send + Unpin, + allow_nonusr: bool, ) -> Result> { let (tx_buf, mut rx_buf) = tokio::io::duplex(8192); // The source must be moved to the heap so we know it is stable for passing to the worker thread @@ -270,7 +287,7 @@ async fn filter_tar_async( let tar_transformer = tokio::task::spawn_blocking(move || { let mut src = tokio_util::io::SyncIoBridge::new(src); let dest = tokio_util::io::SyncIoBridge::new(tx_buf); - let r = filter_tar(&mut src, dest); + let r = filter_tar(&mut src, dest, allow_nonusr); // Pass ownership of the input stream back to the caller - see below. (r, src) }); @@ -345,7 +362,7 @@ pub async fn write_tar( let mut child_stdout = r.stdout.take().unwrap(); let mut child_stderr = r.stderr.take().unwrap(); // Copy the filtered tar stream to child stdin - let filtered_result = filter_tar_async(src, child_stdin); + let filtered_result = filter_tar_async(src, child_stdin, options.allow_nonusr); let output_copier = async move { // Gather stdout/stderr to buffers let mut child_stdout_buf = String::new(); @@ -401,31 +418,40 @@ mod tests { #[test] fn test_normalize_path() { - let valid = &[ + let valid_all = &[ ("/usr/bin/blah", "./usr/bin/blah"), ("usr/bin/blah", "./usr/bin/blah"), ("usr///share/.//blah", "./usr/share/blah"), - ("./", "."), ("var/lib/blah", "./usr/share/factory/var/lib/blah"), ("./var/lib/blah", "./usr/share/factory/var/lib/blah"), + ("./", "."), ]; - for &(k, v) in valid { - let r = normalize_validate_path(k.into()).unwrap(); + let valid_nonusr = &[("boot", "./boot"), ("opt/puppet/blah", "./opt/puppet/blah")]; + for &(k, v) in valid_all { + let r = normalize_validate_path(k.into(), false).unwrap(); + let r2 = normalize_validate_path(k.into(), true).unwrap(); + assert_eq!(r, r2); match r { - NormalizedPathResult::Filtered(o) => { - panic!("Case {} should not be filtered as {}", k, o) - } - NormalizedPathResult::Normal(p) => { - assert_eq!(v, p.as_str()); - } + NormalizedPathResult::Normal(r) => assert_eq!(r, v), + NormalizedPathResult::Filtered(o) => panic!("Should not have filtered {o}"), } } - let filtered = &[("/boot/vmlinuz", "boot")]; - for &(k, v) in filtered { - match normalize_validate_path(k.into()).unwrap() { - NormalizedPathResult::Filtered(f) => { - assert_eq!(v, f); - } + for &(k, v) in valid_nonusr { + let strict = normalize_validate_path(k.into(), false).unwrap(); + assert!( + matches!(strict, NormalizedPathResult::Filtered(_)), + "Incorrect filter for {k}" + ); + let nonusr = normalize_validate_path(k.into(), true).unwrap(); + match nonusr { + NormalizedPathResult::Normal(r) => assert_eq!(r, v), + NormalizedPathResult::Filtered(o) => panic!("Should not have filtered {o}"), + } + } + let filtered = &["/run/blah", "/sys/foo", "/dev/somedev"]; + for &k in filtered { + match normalize_validate_path(k.into(), true).unwrap() { + NormalizedPathResult::Filtered(_) => {} NormalizedPathResult::Normal(_) => { panic!("{} should be filtered", k) } @@ -433,7 +459,8 @@ mod tests { } let errs = &["usr/foo/../../bar"]; for &k in errs { - assert!(normalize_validate_path(k.into()).is_err()); + assert!(normalize_validate_path(k.into(), true).is_err()); + assert!(normalize_validate_path(k.into(), false).is_err()); } } @@ -451,7 +478,7 @@ mod tests { let _ = rootfs_tar.into_inner()?; let mut dest = Vec::new(); let src = tokio::io::BufReader::new(tokio::fs::File::open(rootfs_tar_path).await?); - filter_tar_async(src, &mut dest).await?; + filter_tar_async(src, &mut dest, false).await?; let dest = dest.as_slice(); let mut final_tar = tar::Archive::new(Cursor::new(dest)); let destdir = &tempd.path().join("destdir"); diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index d363283e..cc6957e4 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -377,7 +377,8 @@ async fn test_tar_write() -> Result<()> { let tmpvarlog = tmproot.open_dir("var/log")?; tmpvarlog.write("foo.log", "foolog")?; tmpvarlog.write("bar.log", "barlog")?; - tmproot.create_dir("boot")?; + tmproot.create_dir("run")?; + tmproot.write("run/somefile", "somestate")?; let tmptar = "testlayer.tar"; cmd!(sh, "tar cf {tmptar} -C tmproot .").run()?; let src = fixture.dir.open(tmptar)?; @@ -393,7 +394,7 @@ async fn test_tar_write() -> Result<()> { .run()?; assert_eq!(r.filtered.len(), 1); assert!(r.filtered.get("var").is_none()); - assert_eq!(*r.filtered.get("boot").unwrap(), 1); + assert_eq!(*r.filtered.get("run").unwrap(), 2); Ok(()) }