diff --git a/Cargo.lock b/Cargo.lock index 2227fc7..d93dbb0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -268,6 +268,15 @@ dependencies = [ "clap_derive", ] +[[package]] +name = "clap-num" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e063d263364859dc54fb064cedb7c122740cd4733644b14b176c097f51e8ab7" +dependencies = [ + "num-traits", +] + [[package]] name = "clap_builder" version = "4.5.10" @@ -493,6 +502,7 @@ dependencies = [ "byte-unit", "chrono", "clap", + "clap-num", "comfy-table", "const-hex", "file-mode", diff --git a/Taskfile.yaml b/Taskfile.yaml index e770dfd..29313d7 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -13,17 +13,25 @@ tasks: - rm -rf oci/{{.image | replace ":" "/" }}/image_new/ - ./target/release/docker-repack oci/{{.image | replace ":" "/" }}/ --target-size=500MB --exclude='*.pyc' --split-file-threshold=25MB - skopeo copy --override-arch=arm64 --override-os=linux oci:oci/{{.image | replace ":" "/" }}/image_new/ docker-daemon:foo:abc -# - docker run -it --entrypoint=bash foo:abc + # - docker run -it --entrypoint=bash foo:abc - run-and-load: + run-and-export: vars: - image: 'nvidia/cuda:12.5.1-cudnn-devel-ubuntu20.04' -# image: 'python:3.11' + # image: 'nvidia/cuda:12.5.1-cudnn-devel-ubuntu20.04' + image: 'python:3.11' + compression_level: 9 + image_path: 'oci/{{.image | replace ":" "/" }}' + image_slug: '{{.image | replace ":" "-" | replace "/" "-" }}' + tmp_dir: + sh: mktemp -d cmds: - cargo build --profile=release - rm -rf oci/{{.image | replace ":" "/" }}/image_new/ - - ./target/release/docker-repack oci/{{.image | replace ":" "/" }}/ repack {{.CLI_ARGS}} - - skopeo copy --override-arch=arm64 --override-os=linux oci:oci/{{.image | replace ":" "/" }}/image_new/ docker-daemon:foo:abc + - ./target/release/docker-repack oci/{{.image | replace ":" "/" }}/ repack --compression={{.compression_level}} {{.CLI_ARGS}} + - skopeo copy --override-arch=arm64 --override-os=linux oci:{{.image_path }}/image_new/ docker://orfal/split:repacked-{{.image_slug}} + - skopeo copy --override-arch=arm64 --override-os=linux oci:{{.image_path }}/image/ docker://orfal/split:original-{{.image_slug}} + - skopeo copy --override-arch=arm64 --override-os=linux oci:{{.image_path }}/image/ dir://{{.tmp_dir}} --dest-compress --dest-compress-format=zstd --dest-compress-level={{.compression_level}} + - skopeo copy --override-arch=arm64 --override-os=linux dir://{{.tmp_dir}} docker://orfal/split:zstd-{{.image_slug}} test-all: cmds: @@ -42,7 +50,7 @@ tasks: sync: cmds: - - for: {var: IMAGES} + - for: { var: IMAGES } cmd: | mkdir -p oci/{{.ITEM | replace ":" "/" }}/image/ skopeo copy --override-arch=arm64 --override-os=linux docker://docker.io/{{.ITEM}} oci:oci/{{.ITEM | replace ":" "/" }}/image/ diff --git a/docker-repack/Cargo.toml b/docker-repack/Cargo.toml index e70e377..a7bf0ed 100644 --- a/docker-repack/Cargo.toml +++ b/docker-repack/Cargo.toml @@ -26,3 +26,4 @@ strum_macros = "0.26.4" globset = { version = "0.4.14", features = ["serde"] } file-mode = "0.1.2" comfy-table = { version = "7.1.1", default-features = false } +clap-num = "1.1.1" diff --git a/docker-repack/src/file_combiner.rs b/docker-repack/src/file_combiner.rs index 115edc6..dbe951b 100644 --- a/docker-repack/src/file_combiner.rs +++ b/docker-repack/src/file_combiner.rs @@ -1,7 +1,7 @@ use crate::image_parser::{TarItem, TarItemChunk}; use std::fmt::Write; -const SCRIPT: &'static str = include_str!("./combine_files.sh"); +const SCRIPT: &str = include_str!("./combine_files.sh"); pub fn generate_combining_script( chunked_files: &Vec<(&TarItem, Vec)>, diff --git a/docker-repack/src/image_parser/compression.rs b/docker-repack/src/image_parser/compression.rs deleted file mode 100644 index 31bbd99..0000000 --- a/docker-repack/src/image_parser/compression.rs +++ /dev/null @@ -1,76 +0,0 @@ -// use crate::image_parser::TarItem; -// use anyhow::Context; -// use byte_unit::{Byte, UnitType}; -// use pack_it_up::Pack; -// use std::fmt::{Display, Formatter}; -// use std::io::Read; -// use tar::Entry; -// -// -// pub struct ZstdCompressor<'a> { -// compressor: Compressor<'a>, -// compression_buffer: Vec, -// data_buffer: Vec, -// } -// -// impl ZstdCompressor<'_> { -// pub fn new() -> Self { -// let compressor = Compressor::new(ZSTD_TEST_COMPRESSION_LEVEL).unwrap(); -// Self { -// compressor, -// compression_buffer: Vec::new(), -// data_buffer: Vec::new(), -// } -// } -// -// pub fn compress( -// &mut self, -// tar_item: TarItem, -// entry: &mut Entry, -// ) -> anyhow::Result { -// self.data_buffer.clear(); -// self.data_buffer.reserve(entry.size() as usize); -// -// entry -// .read_to_end(&mut self.data_buffer) -// .context("Error reading entry")?; -// let buffer_len = zstd_safe::compress_bound(self.data_buffer.len()); -// self.compression_buffer.clear(); -// self.compression_buffer.reserve(buffer_len); -// -// let compressed_size = self -// .compressor -// .compress_to_buffer(&self.data_buffer, &mut self.compression_buffer) -// .context("Error compressing zstd buffer")? as u64; -// -// let compression_ratio = -// ((tar_item.raw_size as f64 / compressed_size as f64) * 100f64) as u64; -// Ok(CompressionResult { -// tar_item, -// compressed_size, -// compression_ratio, -// }) -// } -// } -// -// #[derive(Debug, Clone)] -// pub struct CompressionResult { -// pub tar_item: TarItem, -// pub compressed_size: u64, -// pub compression_ratio: u64, -// } -// -// impl Display for CompressionResult { -// fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { -// let compressed_size = -// Byte::from(self.compressed_size).get_appropriate_unit(UnitType::Decimal); -// let raw_size = Byte::from(self.tar_item.raw_size).get_appropriate_unit(UnitType::Decimal); -// write!(f, "CompressedTarItem {{ path: {:?}, raw_size: {:#.1}, compressed_size: {:#.1}, ratio: {:#.1}% }}", self.tar_item.path, raw_size, compressed_size, self.compression_ratio) -// } -// } -// -// impl Pack for &CompressionResult { -// fn size(&self) -> usize { -// self.compressed_size as usize -// } -// } diff --git a/docker-repack/src/image_parser/image_reader.rs b/docker-repack/src/image_parser/image_reader.rs index 52f1691..6f93c8e 100644 --- a/docker-repack/src/image_parser/image_reader.rs +++ b/docker-repack/src/image_parser/image_reader.rs @@ -1,11 +1,18 @@ use crate::image_parser::layer_reader::Layer; use oci_spec::image::{ImageConfiguration, ImageIndex, ImageManifest}; +use std::fmt::{Display, Formatter}; use std::fs::File; use std::path::{Path, PathBuf}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)] pub struct SourceLayerID(pub usize); +impl Display for SourceLayerID { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "Layer {:<2}", self.0) + } +} + pub struct ImageReader { layers: Vec, pub config: ImageConfiguration, diff --git a/docker-repack/src/image_parser/image_writer.rs b/docker-repack/src/image_parser/image_writer.rs index ecde680..b5bf3e1 100644 --- a/docker-repack/src/image_parser/image_writer.rs +++ b/docker-repack/src/image_parser/image_writer.rs @@ -4,7 +4,7 @@ use crate::image_parser::layer_writer::{LayerType, LayerWriter, WrittenLayer}; use crate::image_parser::{utils, HashAndSize, HashedWriter, ImageReader}; use anyhow::bail; use chrono::Utc; -use indicatif::{MultiProgress, ProgressDrawTarget}; +use indicatif::MultiProgress; use itertools::Itertools; use oci_spec::image::{ Descriptor, HistoryBuilder, ImageIndexBuilder, ImageManifestBuilder, MediaType, @@ -12,24 +12,32 @@ use oci_spec::image::{ use rayon::prelude::*; use serde_json::json; use std::collections::HashMap; +use std::fmt::{Display, Formatter}; use std::fs::File; use std::io::{BufReader, BufWriter, Read}; use std::ops::Range; use std::path::{Path, PathBuf}; -use byte_unit::{Byte, UnitType}; use tar::Entry; - -const ZSTD_OUTPUT_LEVEL: i32 = 19; +use zstd::zstd_safe::CompressionLevel; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct NewLayerID(usize); +impl Display for NewLayerID { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "New layer {:<2}", self.0) + } +} + +pub type PathKey<'a> = (SourceLayerID, &'a str, Range); +pub type PathValue = (NewLayerID, Option); + pub struct ImageWriter<'a> { directory: PathBuf, blobs_dir: PathBuf, temp_dir: PathBuf, layers: Vec, - paths: HashMap<(SourceLayerID, &'a str, Range), (NewLayerID, Option)>, + paths: HashMap, PathValue>, } impl<'a> ImageWriter<'a> { @@ -58,7 +66,7 @@ impl<'a> ImageWriter<'a> { ) -> anyhow::Result<(NewLayerID, &LayerWriter)> { let layer_id = NewLayerID(self.layers.len()); let path = self.temp_dir.join(format!("{name}-{}.tar", layer_id.0)); - let layer = LayerWriter::create_layer(path, type_)?; + let layer = LayerWriter::create_layer(layer_id, path, type_)?; self.layers.push(layer); Ok((layer_id, &self.layers[layer_id.0])) } @@ -66,7 +74,7 @@ impl<'a> ImageWriter<'a> { pub fn add_layer_paths( &mut self, name: &'static str, - paths: impl Iterator, Option)>, + paths: impl Iterator, Option)>, type_: LayerType, ) -> anyhow::Result<()> { let (layer_id, _) = self.create_new_layer(name, type_)?; @@ -182,34 +190,31 @@ impl<'a> ImageWriter<'a> { } pub fn finish_writing_layers(&mut self) -> anyhow::Result> { - let finished_layers: Result, _> = self - .layers - .drain(0..) - .into_iter() - .map(|layer| layer.finish()) - .collect(); + let finished_layers: Result, _> = + self.layers.drain(0..).map(|layer| layer.finish()).collect(); - Ok(finished_layers?) + finished_layers } pub fn compress_layers( &self, progress: &MultiProgress, finished_layers: Vec, + compression_level: CompressionLevel, ) -> anyhow::Result> { let compressed_layers: Result, _> = finished_layers .into_par_iter() .map(|layer| { - compress_layer(&progress, self.blobs_dir.clone(), &layer, ZSTD_OUTPUT_LEVEL) + compress_layer(progress, self.blobs_dir.clone(), &layer, compression_level) .map(|v| (layer, v)) }) .collect(); - Ok(compressed_layers?) + compressed_layers } pub fn write_index( self, - finished_layers: &Vec<(WrittenLayer, HashAndSize)>, + finished_layers: &[(WrittenLayer, HashAndSize)], mut image: ImageReader, ) -> anyhow::Result<()> { let root_fs = image.config.rootfs_mut(); @@ -292,7 +297,12 @@ fn compress_layer( encoder.set_pledged_src_size(Some(input_size))?; let buf_reader = BufReader::new(input_file); - let mut progress_reader = progress_reader(progress, input_size, buf_reader); + let mut progress_reader = progress_reader( + progress, + input_size, + buf_reader, + format!("Compressing {}", layer.id), + ); std::io::copy(&mut progress_reader, &mut encoder)?; diff --git a/docker-repack/src/image_parser/layer_reader.rs b/docker-repack/src/image_parser/layer_reader.rs index 41edb2c..b849499 100644 --- a/docker-repack/src/image_parser/layer_reader.rs +++ b/docker-repack/src/image_parser/layer_reader.rs @@ -10,22 +10,25 @@ pub struct Layer { pub id: SourceLayerID, pub path: PathBuf, pub size: u64, - // pub digest: String, } pub fn progress_reader( progress: &MultiProgress, size: u64, file: BufReader, + message: String, ) -> ProgressBarIter> { progress .add( ProgressBar::new(size) .with_style( - ProgressStyle::with_template("{wide_bar} {binary_bytes}/{binary_total_bytes}") - .unwrap(), + ProgressStyle::with_template( + "{msg:>10} {wide_bar} {binary_bytes}/{binary_total_bytes}", + ) + .unwrap(), ) - .with_finish(ProgressFinish::AndClear), + .with_finish(ProgressFinish::AndClear) + .with_message(message), ) .wrap_read(file) } @@ -39,7 +42,12 @@ impl Layer { let file = BufReader::new(file); let writer = match progress { None => ProgressBar::hidden().wrap_read(file), - Some(multi_progress) => progress_reader(multi_progress, self.size, file), + Some(multi_progress) => progress_reader( + multi_progress, + self.size, + file, + format!("Reading {}", self.id), + ), }; let decoder = GzDecoder::new(writer); diff --git a/docker-repack/src/image_parser/layer_writer.rs b/docker-repack/src/image_parser/layer_writer.rs index 98055be..56b04b0 100644 --- a/docker-repack/src/image_parser/layer_writer.rs +++ b/docker-repack/src/image_parser/layer_writer.rs @@ -1,3 +1,4 @@ +use crate::image_parser::image_writer::NewLayerID; use crate::image_parser::{HashAndSize, HashedWriter}; use byte_unit::{Byte, UnitType}; use std::fmt::{Display, Formatter}; @@ -17,24 +18,30 @@ pub enum LayerType { } pub struct LayerWriter { + id: NewLayerID, path: PathBuf, archive_writer: Mutex>>>, index_writer: Mutex>, entries: AtomicUsize, - type_: LayerType + type_: LayerType, } impl LayerWriter { - pub fn create_layer(path: PathBuf, type_: LayerType) -> anyhow::Result { + pub fn create_layer( + id: NewLayerID, + path: PathBuf, + type_: LayerType, + ) -> anyhow::Result { let writer = HashedWriter::new(File::create(&path)?); let writer = Builder::new(BufWriter::new(writer)); let index_writer = BufWriter::new(File::create(path.with_extension("index.txt"))?); Ok(LayerWriter { + id, path, archive_writer: Mutex::new(writer), index_writer: Mutex::new(index_writer), entries: 0.into(), - type_ + type_, }) } @@ -60,10 +67,7 @@ impl LayerWriter { } #[inline(always)] - pub fn write_new_directory( - &self, - path: &Path, - ) -> anyhow::Result<()> { + pub fn write_new_directory(&self, path: &Path) -> anyhow::Result<()> { self.write_index(&(0..0), path, None, EntryType::Directory)?; let mut header = Header::new_gnu(); header.set_entry_type(EntryType::Directory); @@ -139,6 +143,7 @@ impl LayerWriter { let inner = inner.into_inner()?; let (_, hash) = inner.into_inner(); Ok(WrittenLayer { + id: self.id, type_: self.type_, path: self.path, hash, @@ -149,6 +154,7 @@ impl LayerWriter { #[derive(Debug)] pub struct WrittenLayer { + pub id: NewLayerID, pub type_: LayerType, pub path: PathBuf, pub hash: HashAndSize, diff --git a/docker-repack/src/image_parser/mod.rs b/docker-repack/src/image_parser/mod.rs index e00f11f..544055b 100644 --- a/docker-repack/src/image_parser/mod.rs +++ b/docker-repack/src/image_parser/mod.rs @@ -16,6 +16,6 @@ pub use hashed_writer::{HashAndSize, HashedWriter}; pub use image_reader::ImageReader; pub use image_writer::ImageWriter; pub use layer_contents::LayerContents; +pub use layer_writer::LayerType; pub use packing::LayerPacker; pub use tar_item::*; -pub use layer_writer::LayerType; \ No newline at end of file diff --git a/docker-repack/src/image_parser/packing.rs b/docker-repack/src/image_parser/packing.rs index a4f34fc..1ab3ace 100644 --- a/docker-repack/src/image_parser/packing.rs +++ b/docker-repack/src/image_parser/packing.rs @@ -188,7 +188,7 @@ impl<'a: 'b, 'b> LayerPacker<'a, 'b> { Some(split_file.to_path.to_path_buf()), )] .into_iter(), - LayerType::Files + LayerType::Files, )?; } diff --git a/docker-repack/src/image_parser/tar_item.rs b/docker-repack/src/image_parser/tar_item.rs index fa1d26c..bc9ac73 100644 --- a/docker-repack/src/image_parser/tar_item.rs +++ b/docker-repack/src/image_parser/tar_item.rs @@ -2,6 +2,7 @@ use crate::image_parser::image_reader::SourceLayerID; use crate::image_parser::utils::byte_range_chunks; use anyhow::bail; use byte_unit::{Byte, UnitType}; +use const_hex::Buffer; use itertools::Itertools; use sha2::{Digest, Sha256}; use std::cmp::Ordering; @@ -10,7 +11,6 @@ use std::io::Read; use std::ops::Range; use std::path::PathBuf; use tar::{Entry, EntryType}; -use const_hex::Buffer; pub type TarItemKey<'a> = (SourceLayerID, &'a PathBuf); diff --git a/docker-repack/src/main.rs b/docker-repack/src/main.rs index fe531d3..dc024ce 100644 --- a/docker-repack/src/main.rs +++ b/docker-repack/src/main.rs @@ -1,18 +1,26 @@ -use crate::image_parser::{ImageReader, ImageWriter, LayerContents, LayerPacker, LayerType, TarItem}; +use crate::image_parser::{ + ImageReader, ImageWriter, LayerContents, LayerPacker, LayerType, TarItem, +}; use anyhow::Context; use byte_unit::{Byte, UnitType}; use clap::{Parser, Subcommand}; +use clap_num::number_range; +use comfy_table::Table; use file_mode::User; -use indicatif::{MultiProgress, ProgressDrawTarget}; +use globset::GlobSet; +use indicatif::MultiProgress; use itertools::{Either, Itertools}; use rayon::prelude::*; use std::path::{Path, PathBuf}; -use globset::GlobSet; -use comfy_table::Table; - +use zstd::zstd_safe::CompressionLevel; mod file_combiner; mod image_parser; +fn parse_compression_level(s: &str) -> Result { + let range = zstd::compression_level_range(); + number_range(s, *range.start(), *range.end()) +} + #[derive(Parser, Debug)] #[command(version, about, long_about = None)] struct Args { @@ -30,10 +38,12 @@ enum Command { target_size: Byte, #[arg(short, long)] split_file_threshold: Option, + #[arg(short, long, value_parser=parse_compression_level, default_value="7")] + compression: CompressionLevel, }, LargestFiles { #[arg(short, long, default_value = "10")] - limit: usize + limit: usize, }, } @@ -47,28 +57,61 @@ fn main() -> anyhow::Result<()> { let progress = MultiProgress::new(); match args.command { - Command::Repack { target_size, split_file_threshold } => repack(progress, image, new_image_dir, target_size, split_file_threshold, exclude), - Command::LargestFiles { limit } => largest_files(progress, image, exclude, limit) + Command::Repack { + target_size, + split_file_threshold, + compression, + } => repack( + progress, + image, + new_image_dir, + target_size, + split_file_threshold, + exclude, + compression, + ), + Command::LargestFiles { limit } => largest_files(progress, image, exclude, limit), } } -fn largest_files(progress: MultiProgress, image: ImageReader, exclude: Option, limit: usize) -> anyhow::Result<()> { +fn largest_files( + progress: MultiProgress, + image: ImageReader, + exclude: Option, + limit: usize, +) -> anyhow::Result<()> { let layer_contents = get_layer_contents(&progress, &image, exclude)?; let sorted_by_size = layer_contents .into_inner() .into_values() .sorted_by_key(|item| item.size) - .rev().take(limit); + .rev() + .take(limit); let mut table = Table::new(); - table.set_header(["Path", "Size"]).add_rows( - sorted_by_size - .map(|item| [format!("{}", item.path.display()), format!("{:#.1}", Byte::from(item.size).get_appropriate_unit(UnitType::Decimal))]) - ); + table + .set_header(["Path", "Size"]) + .add_rows(sorted_by_size.map(|item| { + [ + format!("{}", item.path.display()), + format!( + "{:#.1}", + Byte::from(item.size).get_appropriate_unit(UnitType::Decimal) + ), + ] + })); println!("{table}"); Ok(()) } -fn repack(progress: MultiProgress, image: ImageReader, output_dir: PathBuf, target_size: Byte, split_file_threshold: Option, exclude: Option) -> anyhow::Result<()> { +fn repack( + progress: MultiProgress, + image: ImageReader, + output_dir: PathBuf, + target_size: Byte, + split_file_threshold: Option, + exclude: Option, + compression_level: CompressionLevel, +) -> anyhow::Result<()> { let mut image_writer = ImageWriter::new(output_dir)?; let layer_contents = get_layer_contents(&progress, &image, exclude)?; @@ -130,7 +173,7 @@ fn repack(progress: MultiProgress, image: ImageReader, output_dir: PathBuf, targ vec![] }; - packer.add_chunked_items(chunked_files.iter().map(|i| i.1.iter()).flatten())?; + packer.add_chunked_items(chunked_files.iter().flat_map(|i| i.1.iter()))?; println!("{packer}"); packer.create_layers(&mut image_writer)?; @@ -140,7 +183,7 @@ fn repack(progress: MultiProgress, image: ImageReader, output_dir: PathBuf, targ let script = file_combiner::generate_combining_script(&chunked_files)?; let index = file_combiner::generate_combining_index(&chunked_files)?; let repack_dir = Path::new(".docker-repack/"); - layer.write_new_directory(&repack_dir)?; + layer.write_new_directory(repack_dir)?; layer.write_new_file_with_data( &repack_dir.join("combine-files.sh"), file_mode::Mode::empty() @@ -155,7 +198,7 @@ fn repack(progress: MultiProgress, image: ImageReader, output_dir: PathBuf, targ )?; } - image.layers().into_par_iter().try_for_each(|layer| { + for layer in image.layers().iter() { let mut archive = layer.get_progress_reader(Some(&progress))?; for entry in archive.entries()? { let mut entry = entry?; @@ -165,16 +208,31 @@ fn repack(progress: MultiProgress, image: ImageReader, output_dir: PathBuf, targ split_file_threshold.map(|f| f.as_u64()), )?; } - Ok::<_, anyhow::Error>(()) - })?; + } + // image.layers().into_par_iter().try_for_each(|layer| { + // let mut archive = layer.get_progress_reader(Some(&progress))?; + // for entry in archive.entries()? { + // let mut entry = entry?; + // image_writer.add_entry( + // layer.id, + // &mut entry, + // split_file_threshold.map(|f| f.as_u64()), + // )?; + // } + // Ok::<_, anyhow::Error>(()) + // })?; let finished_layers = image_writer.finish_writing_layers()?; - let compressed_layers = image_writer.compress_layers(&progress, finished_layers)?; - let sorted_layers = compressed_layers.into_iter().sorted_by_key(|(layer, size)| (layer.type_, size.size)).collect_vec(); + let compressed_layers = + image_writer.compress_layers(&progress, finished_layers, compression_level)?; + let sorted_layers = compressed_layers + .into_iter() + .sorted_by_key(|(layer, size)| (layer.type_, size.size)) + .collect_vec(); image_writer.write_index(&sorted_layers, image)?; for (layer, hash_and_size) in sorted_layers { println!( - "{layer} - compressed: {} / Size: {}", + "{layer} - compressed: {} / Size: {:#.1}", hash_and_size.raw_hash(), Byte::from(hash_and_size.size).get_appropriate_unit(UnitType::Decimal) ); @@ -183,12 +241,16 @@ fn repack(progress: MultiProgress, image: ImageReader, output_dir: PathBuf, targ Ok(()) } -fn get_layer_contents(progress: &MultiProgress, image: &ImageReader, exclude: Option) -> anyhow::Result { +fn get_layer_contents( + progress: &MultiProgress, + image: &ImageReader, + exclude: Option, +) -> anyhow::Result { let all_operations: Result, anyhow::Error> = image .layers() .into_par_iter() .map(|layer| { - let mut archive = layer.get_progress_reader(Some(&progress))?; + let mut archive = layer.get_progress_reader(Some(progress))?; let items = archive .entries() .unwrap() @@ -228,4 +290,4 @@ fn create_glob_set(exclude: Option>) -> anyhow::Result