diff --git a/rafs/src/builder/core/context.rs b/rafs/src/builder/core/context.rs index 4bc6d0e1e3d..2681e3a6c8e 100644 --- a/rafs/src/builder/core/context.rs +++ b/rafs/src/builder/core/context.rs @@ -38,7 +38,7 @@ use crate::builder::{ }; use crate::metadata::chunk::ChunkWrapper; use crate::metadata::layout::v5::RafsV5BlobTable; -use crate::metadata::layout::v6::{RafsV6BlobTable, EROFS_BLOCK_SIZE, EROFS_INODE_SLOT_SIZE}; +use crate::metadata::layout::v6::{RafsV6BlobTable, EROFS_BLOCK_SIZE_4096, EROFS_INODE_SLOT_SIZE}; use crate::metadata::layout::RafsBlobTable; use crate::metadata::{Inode, RAFS_DEFAULT_CHUNK_SIZE}; use crate::metadata::{RafsSuperFlags, RafsVersion}; @@ -970,11 +970,11 @@ impl BootstrapContext { layered, inode_map: HashMap::new(), nodes: Vec::new(), - offset: EROFS_BLOCK_SIZE, + offset: EROFS_BLOCK_SIZE_4096, writer, v6_available_blocks: vec![ VecDeque::new(); - EROFS_BLOCK_SIZE as usize / EROFS_INODE_SLOT_SIZE + EROFS_BLOCK_SIZE_4096 as usize / EROFS_INODE_SLOT_SIZE ], }) } @@ -991,17 +991,17 @@ impl BootstrapContext { // If found it, return the offset where we can store data. // If not, return 0. pub(crate) fn allocate_available_block(&mut self, size: u64) -> u64 { - if size >= EROFS_BLOCK_SIZE { + if size >= EROFS_BLOCK_SIZE_4096 { return 0; } let min_idx = div_round_up(size, EROFS_INODE_SLOT_SIZE as u64) as usize; - let max_idx = div_round_up(EROFS_BLOCK_SIZE, EROFS_INODE_SLOT_SIZE as u64) as usize; + let max_idx = div_round_up(EROFS_BLOCK_SIZE_4096, EROFS_INODE_SLOT_SIZE as u64) as usize; for idx in min_idx..max_idx { let blocks = &mut self.v6_available_blocks[idx]; if let Some(mut offset) = blocks.pop_front() { - offset += EROFS_BLOCK_SIZE - (idx * EROFS_INODE_SLOT_SIZE) as u64; + offset += EROFS_BLOCK_SIZE_4096 - (idx * EROFS_INODE_SLOT_SIZE) as u64; self.append_available_block(offset + (min_idx * EROFS_INODE_SLOT_SIZE) as u64); return offset; } @@ -1012,8 +1012,8 @@ impl BootstrapContext { // Append the block that `offset` belongs to corresponding deque. pub(crate) fn append_available_block(&mut self, offset: u64) { - if offset % EROFS_BLOCK_SIZE != 0 { - let avail = EROFS_BLOCK_SIZE - offset % EROFS_BLOCK_SIZE; + if offset % EROFS_BLOCK_SIZE_4096 != 0 { + let avail = EROFS_BLOCK_SIZE_4096 - offset % EROFS_BLOCK_SIZE_4096; let idx = avail as usize / EROFS_INODE_SLOT_SIZE; self.v6_available_blocks[idx].push_back(round_down_4k(offset)); } diff --git a/rafs/src/builder/core/node.rs b/rafs/src/builder/core/node.rs index c03286ee061..bed39fe57d2 100644 --- a/rafs/src/builder/core/node.rs +++ b/rafs/src/builder/core/node.rs @@ -235,15 +235,19 @@ impl Node { return Ok(0); } else if self.is_symlink() { if let Some(symlink) = self.info.symlink.as_ref() { - self.inode - .set_digest(RafsDigest::from_buf(symlink.as_bytes(), ctx.digester)); + if self.inode.is_v5() { + self.inode + .set_digest(RafsDigest::from_buf(symlink.as_bytes(), ctx.digester)); + } return Ok(0); } else { return Err(Error::msg("inode's symblink is invalid.")); } } else if self.is_special() { - self.inode - .set_digest(RafsDigest::hasher(ctx.digester).digest_finalize()); + if self.inode.is_v5() { + self.inode + .set_digest(RafsDigest::hasher(ctx.digester).digest_finalize()); + } return Ok(0); } @@ -581,7 +585,7 @@ impl Node { // calculate it later by ourself. if !self.is_dir() { self.inode.set_size(meta.st_size()); - self.set_inode_blocks(); + self.v5_set_inode_blocks(); } self.info = Arc::new(info); @@ -736,23 +740,6 @@ impl Node { &self.info.target } - /// Calculate and set `i_blocks` for inode. - /// - /// In order to support repeatable build, we can't reuse `i_blocks` from source filesystems, - /// so let's calculate it by ourself for stable `i_block`. - /// - /// Normal filesystem includes the space occupied by Xattr into the directory size, - /// let's follow the normal behavior. - pub fn set_inode_blocks(&mut self) { - // Set inode blocks for RAFS v5 inode, v6 will calculate it at runtime. - if let InodeWrapper::V5(_) = self.inode { - self.inode.set_blocks(div_round_up( - self.inode.size() + self.info.xattrs.aligned_size_v5() as u64, - 512, - )); - } - } - /// Set symlink target for the node. pub fn set_symlink(&mut self, symlink: OsString) { let mut info = self.info.deref().clone(); diff --git a/rafs/src/builder/core/v5.rs b/rafs/src/builder/core/v5.rs index ba0c72d7a67..35c0e3497ec 100644 --- a/rafs/src/builder/core/v5.rs +++ b/rafs/src/builder/core/v5.rs @@ -8,7 +8,7 @@ use std::mem::size_of; use anyhow::{bail, Context, Result}; use nydus_utils::digest::{DigestHasher, RafsDigest}; -use nydus_utils::{root_tracer, timing_tracer, try_round_up_4k}; +use nydus_utils::{div_round_up, root_tracer, timing_tracer, try_round_up_4k}; use super::bootstrap::STARGZ_DEFAULT_BLOCK_SIZE; use super::node::Node; @@ -102,7 +102,24 @@ impl Node { // Safe to unwrap() because we have u32 for child count. self.inode.set_size(try_round_up_4k(d_size).unwrap()); } - self.set_inode_blocks(); + self.v5_set_inode_blocks(); + } + + /// Calculate and set `i_blocks` for inode. + /// + /// In order to support repeatable build, we can't reuse `i_blocks` from source filesystems, + /// so let's calculate it by ourself for stable `i_block`. + /// + /// Normal filesystem includes the space occupied by Xattr into the directory size, + /// let's follow the normal behavior. + pub fn v5_set_inode_blocks(&mut self) { + // Set inode blocks for RAFS v5 inode, v6 will calculate it at runtime. + if let InodeWrapper::V5(_) = self.inode { + self.inode.set_blocks(div_round_up( + self.inode.size() + self.info.xattrs.aligned_size_v5() as u64, + 512, + )); + } } } diff --git a/rafs/src/builder/core/v6.rs b/rafs/src/builder/core/v6.rs index 3347cbb9261..bbec79309e6 100644 --- a/rafs/src/builder/core/v6.rs +++ b/rafs/src/builder/core/v6.rs @@ -22,7 +22,7 @@ use crate::metadata::inode::new_v6_inode; use crate::metadata::layout::v6::{ align_offset, calculate_nid, RafsV6BlobTable, RafsV6Device, RafsV6Dirent, RafsV6InodeChunkAddr, RafsV6InodeChunkHeader, RafsV6OndiskInode, RafsV6SuperBlock, RafsV6SuperBlockExt, - EROFS_BLOCK_SIZE, EROFS_DEVTABLE_OFFSET, EROFS_INODE_CHUNK_BASED, EROFS_INODE_FLAT_INLINE, + EROFS_BLOCK_SIZE_4096, EROFS_DEVTABLE_OFFSET, EROFS_INODE_CHUNK_BASED, EROFS_INODE_FLAT_INLINE, EROFS_INODE_FLAT_PLAIN, EROFS_INODE_SLOT_SIZE, EROFS_SUPER_BLOCK_SIZE, EROFS_SUPER_OFFSET, }; use crate::metadata::RafsStore; @@ -162,8 +162,8 @@ impl Node { for child in tree.children.iter() { let len = child.node.name().as_bytes().len() + size_of::(); // erofs disk format requires dirent to be aligned with 4096. - if (d_size % EROFS_BLOCK_SIZE) + len as u64 > EROFS_BLOCK_SIZE { - d_size = div_round_up(d_size as u64, EROFS_BLOCK_SIZE) * EROFS_BLOCK_SIZE; + if (d_size % EROFS_BLOCK_SIZE_4096) + len as u64 > EROFS_BLOCK_SIZE_4096 { + d_size = div_round_up(d_size as u64, EROFS_BLOCK_SIZE_4096) * EROFS_BLOCK_SIZE_4096; } d_size += len as u64; } @@ -217,7 +217,7 @@ impl Node { // // let inode_size = self.v6_size_with_xattr(); - let tail: u64 = d_size % EROFS_BLOCK_SIZE; + let tail: u64 = d_size % EROFS_BLOCK_SIZE_4096; // We use a simple inline strategy here: // If the inode size with xattr + tail data size <= EROFS_BLOCK_SIZE, @@ -229,7 +229,7 @@ impl Node { // since it contain only single blocks with some unused space, the available space can only // be smaller than EROFS_BLOCK_SIZE, therefore we can't use our used blocks to store the // inode plus the tail data bigger than EROFS_BLOCK_SIZE. - let should_inline = tail != 0 && (inode_size + tail) <= EROFS_BLOCK_SIZE; + let should_inline = tail != 0 && (inode_size + tail) <= EROFS_BLOCK_SIZE_4096; // If should inline, we first try to allocate space for the inode together with tail data // using used blocks. @@ -240,10 +240,11 @@ impl Node { self.v6_datalayout = if should_inline { self.v6_offset = bootstrap_ctx.allocate_available_block(inode_size + tail); if self.v6_offset == 0 { - let available = EROFS_BLOCK_SIZE - bootstrap_ctx.offset % EROFS_BLOCK_SIZE; + let available = + EROFS_BLOCK_SIZE_4096 - bootstrap_ctx.offset % EROFS_BLOCK_SIZE_4096; if available < inode_size + tail { bootstrap_ctx.append_available_block(bootstrap_ctx.offset); - bootstrap_ctx.align_offset(EROFS_BLOCK_SIZE); + bootstrap_ctx.align_offset(EROFS_BLOCK_SIZE_4096); } self.v6_offset = bootstrap_ctx.offset; @@ -252,7 +253,7 @@ impl Node { if d_size != tail { bootstrap_ctx.append_available_block(bootstrap_ctx.offset); - bootstrap_ctx.align_offset(EROFS_BLOCK_SIZE); + bootstrap_ctx.align_offset(EROFS_BLOCK_SIZE_4096); } self.v6_dirents_offset = bootstrap_ctx.offset; bootstrap_ctx.offset += round_down_4k(d_size); @@ -269,10 +270,10 @@ impl Node { } bootstrap_ctx.append_available_block(bootstrap_ctx.offset); - bootstrap_ctx.align_offset(EROFS_BLOCK_SIZE); + bootstrap_ctx.align_offset(EROFS_BLOCK_SIZE_4096); self.v6_dirents_offset = bootstrap_ctx.offset; bootstrap_ctx.offset += d_size; - bootstrap_ctx.align_offset(EROFS_BLOCK_SIZE); + bootstrap_ctx.align_offset(EROFS_BLOCK_SIZE_4096); EROFS_INODE_FLAT_PLAIN }; @@ -336,7 +337,7 @@ impl Node { for (offset, name, file_type) in self.v6_dirents.iter() { let len = name.len() + size_of::(); // write to bootstrap when it will exceed EROFS_BLOCK_SIZE - if used + len as u64 > EROFS_BLOCK_SIZE { + if used + len as u64 > EROFS_BLOCK_SIZE_4096 { for (entry, name) in dirents.iter_mut() { trace!("{:?} nameoff {}", name, nameoff); entry.set_name_offset(nameoff as u16); @@ -363,7 +364,7 @@ impl Node { nameoff = 0; used = 0; // track where we're going to write. - dirent_off += EROFS_BLOCK_SIZE; + dirent_off += EROFS_BLOCK_SIZE_4096; } trace!( @@ -527,7 +528,7 @@ impl Node { impl BuildContext { pub fn v6_block_size(&self) -> u64 { - EROFS_BLOCK_SIZE + EROFS_BLOCK_SIZE_4096 } pub fn v6_block_addr(&self, offset: u64) -> Result { @@ -609,7 +610,7 @@ impl Bootstrap { let blob_table_size = blob_table.size() as u64; let blob_table_offset = align_offset( (EROFS_DEVTABLE_OFFSET as u64) + devtable_len as u64, - EROFS_BLOCK_SIZE as u64, + EROFS_BLOCK_SIZE_4096 as u64, ); let blob_table_entries = blobs.len(); assert!(blob_table_entries < u8::MAX as usize); @@ -639,11 +640,11 @@ impl Bootstrap { // When using nid 0 as root nid, // the root directory will not be shown by glibc's getdents/readdir. // Because in some OS, ino == 0 represents corresponding file is deleted. - let orig_meta_addr = bootstrap_ctx.nodes[0].v6_offset - EROFS_BLOCK_SIZE; + let orig_meta_addr = bootstrap_ctx.nodes[0].v6_offset - EROFS_BLOCK_SIZE_4096; let meta_addr = if blob_table_size > 0 { align_offset( blob_table_offset + blob_table_size + prefetch_table_size as u64, - EROFS_BLOCK_SIZE as u64, + EROFS_BLOCK_SIZE_4096 as u64, ) } else { orig_meta_addr @@ -733,7 +734,7 @@ impl Bootstrap { .writer .seek_to_end() .context("failed to seek to bootstrap's end for chunk table")?; - assert_eq!(pos % EROFS_BLOCK_SIZE, 0); + assert_eq!(pos % EROFS_BLOCK_SIZE_4096, 0); let mut devtable: Vec = Vec::new(); let mut block_count = 0u32; let mut inlined_chunk_digest = true; @@ -834,7 +835,7 @@ impl Bootstrap { .writer .seek_to_end() .context("failed to seek to bootstrap's end for chunk table")?; - let padding = align_offset(pos, EROFS_BLOCK_SIZE as u64) - pos; + let padding = align_offset(pos, EROFS_BLOCK_SIZE_4096 as u64) - pos; bootstrap_ctx .writer .write_all(&WRITE_PADDING_DATA[0..padding as usize]) diff --git a/rafs/src/builder/tarball.rs b/rafs/src/builder/tarball.rs index ced521c409f..de72514dad2 100644 --- a/rafs/src/builder/tarball.rs +++ b/rafs/src/builder/tarball.rs @@ -66,7 +66,7 @@ impl Read for TarReader { } } -pub(crate) struct TarballTreeBuilder<'a> { +struct TarballTreeBuilder<'a> { ty: ConversionType, layer_idx: u16, ctx: &'a mut BuildContext, @@ -100,7 +100,7 @@ impl<'a> TarballTreeBuilder<'a> { let file = OpenOptions::new() .read(true) .open(self.ctx.source_path.clone()) - .with_context(|| "can not open source file for conversion")?; + .context("tarball: can not open source file for conversion")?; let reader = match self.ty { ConversionType::TarToRafs => TarReader::File(file), @@ -156,12 +156,12 @@ impl<'a> TarballTreeBuilder<'a> { // Generate RAFS node for each tar entry, and optionally adding missing parents. let entries = tar .entries() - .with_context(|| "failed to read entries from tar")?; + .context("tarball: failed to read entries from tar")?; for entry in entries { - let mut entry = entry.with_context(|| "failed to read entry from tar")?; + let mut entry = entry.context("tarball: failed to read entry from tar")?; let path = entry .path() - .with_context(|| "failed to to get path from tar entry")?; + .context("tarball: failed to to get path from tar entry")?; let path = PathBuf::from("/").join(path); let path = path.components().as_path(); if !self.is_special_files(path) { @@ -194,18 +194,24 @@ impl<'a> TarballTreeBuilder<'a> { let header = entry.header(); let entry_type = header.entry_type(); if entry_type.is_gnu_longname() { - return Err(anyhow!("unsupported gnu_longname from tar header")); + return Err(anyhow!("tarball: unsupported gnu_longname from tar header")); } else if entry_type.is_gnu_longlink() { - return Err(anyhow!("unsupported gnu_longlink from tar header")); + return Err(anyhow!("tarball: unsupported gnu_longlink from tar header")); } else if entry_type.is_pax_local_extensions() { - return Err(anyhow!("unsupported pax_local_extensions from tar header")); + return Err(anyhow!( + "tarball: unsupported pax_local_extensions from tar header" + )); } else if entry_type.is_pax_global_extensions() { - return Err(anyhow!("unsupported pax_global_extensions from tar header")); + return Err(anyhow!( + "tarball: unsupported pax_global_extensions from tar header" + )); } else if entry_type.is_contiguous() { - return Err(anyhow!("unsupported contiguous entry type from tar header")); + return Err(anyhow!( + "tarball: unsupported contiguous entry type from tar header" + )); } else if entry_type.is_gnu_sparse() { return Err(anyhow!( - "unsupported gnu sparse file extension from tar header" + "tarball: unsupported gnu sparse file extension from tar header" )); } @@ -226,12 +232,12 @@ impl<'a> TarballTreeBuilder<'a> { { let major = header .device_major() - .with_context(|| "failed to get device major from tar entry")? - .ok_or_else(|| anyhow!("failed to get major device from tar entry"))?; + .context("tarball: failed to get device major from tar entry")? + .ok_or_else(|| anyhow!("tarball: failed to get major device from tar entry"))?; let minor = header .device_minor() - .with_context(|| "failed to get device major from tar entry")? - .ok_or_else(|| anyhow!("failed to get minor device from tar entry"))?; + .context("tarball: failed to get device major from tar entry")? + .ok_or_else(|| anyhow!("tarball: failed to get minor device from tar entry"))?; makedev(major as u64, minor as u64) as u32 } else { u32::MAX @@ -241,11 +247,11 @@ impl<'a> TarballTreeBuilder<'a> { let (symlink, symlink_size) = if entry_type.is_symlink() { let symlink_link_path = entry .link_name() - .with_context(|| "failed to get target path for tar symlink entry")? - .ok_or_else(|| anyhow!("failed to get symlink target tor tar entry"))?; + .context("tarball: failed to get target path for tar symlink entry")? + .ok_or_else(|| anyhow!("tarball: failed to get symlink target tor tar entry"))?; let symlink_size = symlink_link_path.as_os_str().byte_size(); if symlink_size > u16::MAX as usize { - bail!("symlink target from tar entry is too big"); + bail!("tarball: symlink target from tar entry is too big"); } file_size = symlink_size as u64; flags |= RafsV5InodeFlags::SYMLINK; @@ -261,7 +267,7 @@ impl<'a> TarballTreeBuilder<'a> { if entry_type.is_file() { child_count = div_round_up(file_size, self.ctx.chunk_size as u64); if child_count > RAFS_MAX_CHUNKS_PER_BLOB as u64 { - bail!("file size 0x{:x} is too big", file_size); + bail!("tarball: file size 0x{:x} is too big", file_size); } } @@ -271,8 +277,8 @@ impl<'a> TarballTreeBuilder<'a> { if entry_type.is_hard_link() { let link_path = entry .link_name() - .with_context(|| "failed to get target path for tar symlink entry")? - .ok_or_else(|| anyhow!("failed to get symlink target tor tar entry"))?; + .context("tarball: failed to get target path for tar symlink entry")? + .ok_or_else(|| anyhow!("tarball: failed to get symlink target tor tar entry"))?; let link_path = PathBuf::from("/").join(link_path); let link_path = link_path.components().as_path(); if let Some((_ino, _index)) = self.path_inode_map.get(link_path) { @@ -280,7 +286,7 @@ impl<'a> TarballTreeBuilder<'a> { index = *_index; } else { bail!( - "unknown target {} for hardlink {}", + "tarball: unknown target {} for hardlink {}", link_path.display(), path.as_ref().display() ); @@ -306,7 +312,7 @@ impl<'a> TarballTreeBuilder<'a> { } Err(e) => { return Err(anyhow!( - "failed to parse PaxExtension from tar header, {}", + "tarball: failed to parse PaxExtension from tar header, {}", e )) } @@ -393,8 +399,8 @@ impl<'a> TarballTreeBuilder<'a> { } // Update inode.i_blocks for RAFS v5. - if !entry_type.is_dir() { - node.set_inode_blocks(); + if self.ctx.fs_version == RafsVersion::V5 && !entry_type.is_dir() { + node.v5_set_inode_blocks(); } Ok(node) @@ -413,7 +419,7 @@ impl<'a> TarballTreeBuilder<'a> { }; if uid > u32::MAX as u64 || gid > u32::MAX as u64 { bail!( - "uid {:x} or gid {:x} from tar entry is out of range", + "tarball: uid {:x} or gid {:x} from tar entry is out of range", uid, gid ); @@ -425,7 +431,7 @@ impl<'a> TarballTreeBuilder<'a> { fn get_mode(header: &Header) -> Result { let mode = header .mode() - .with_context(|| "failed to get permission/mode from tar entry")?; + .context("tarball: failed to get permission/mode from tar entry")?; let ty = match header.entry_type() { EntryType::Regular | EntryType::Link => libc::S_IFREG, EntryType::Directory => libc::S_IFDIR, @@ -433,7 +439,7 @@ impl<'a> TarballTreeBuilder<'a> { EntryType::Block => libc::S_IFBLK, EntryType::Char => libc::S_IFCHR, EntryType::Fifo => libc::S_IFIFO, - _ => bail!("unsupported tar entry type"), + _ => bail!("tarball: unsupported tar entry type"), }; Ok((mode & !libc::S_IFMT as u32) | ty as u32) } @@ -444,14 +450,14 @@ impl<'a> TarballTreeBuilder<'a> { } else { path.file_name().ok_or_else(|| { anyhow!( - "failed to get file name from tar entry with path {}", + "tarball: failed to get file name from tar entry with path {}", path.display() ) })? }; if name.len() > u16::MAX as usize { bail!( - "file name {} from tar entry is too long", + "tarball: file name {} from tar entry is too long", name.to_str().unwrap_or_default() ); } @@ -460,10 +466,24 @@ impl<'a> TarballTreeBuilder<'a> { fn make_lost_dirs>(&mut self, path: P, nodes: &mut Vec) -> Result<()> { if let Some(parent_path) = path.as_ref().parent() { - if !self.path_inode_map.contains_key(parent_path) { - self.make_lost_dirs(parent_path, nodes)?; - let node = self.create_directory(parent_path, nodes.len())?; - nodes.push(node); + match self.path_inode_map.get(parent_path) { + Some((i, idx)) => { + if !nodes[*idx].is_dir() { + bail!( + "tarball: existing inode is not a directory {} {} {}", + i, + nodes.len(), + nodes[*idx].is_dir() + ); + } + } + None => { + if !self.path_inode_map.contains_key(parent_path) { + self.make_lost_dirs(parent_path, nodes)?; + let node = self.create_directory(parent_path, nodes.len())?; + nodes.push(node); + } + } } } @@ -565,17 +585,22 @@ impl Builder for TarballBuilder { let layer_idx = u16::from(bootstrap_ctx.layered); let mut blob_writer = match self.ty { ConversionType::EStargzToRafs - | ConversionType::TargzToRafs - | ConversionType::TarToRafs | ConversionType::EStargzToRef - | ConversionType::TargzToRef => { + | ConversionType::TargzToRafs + | ConversionType::TargzToRef + | ConversionType::TarToRafs => { if let Some(blob_stor) = ctx.blob_storage.clone() { ArtifactWriter::new(blob_stor)? } else { - return Err(anyhow!("missing configuration for target path")); + return Err(anyhow!("tarball: missing configuration for target path")); } } - _ => return Err(anyhow!("unsupported image conversion type '{}'", self.ty)), + _ => { + return Err(anyhow!( + "tarball: unsupported image conversion type '{}'", + self.ty + )) + } }; let mut tree_builder = diff --git a/rafs/src/metadata/direct_v6.rs b/rafs/src/metadata/direct_v6.rs index 8c71777e03e..04f3c417444 100644 --- a/rafs/src/metadata/direct_v6.rs +++ b/rafs/src/metadata/direct_v6.rs @@ -40,7 +40,7 @@ use crate::metadata::layout::v5::RafsV5ChunkInfo; use crate::metadata::layout::v6::{ rafsv6_load_blob_extra_info, recover_namespace, RafsV6BlobTable, RafsV6Dirent, RafsV6InodeChunkAddr, RafsV6InodeCompact, RafsV6InodeExtended, RafsV6OndiskInode, - RafsV6XattrEntry, RafsV6XattrIbodyHeader, EROFS_BLOCK_SIZE, EROFS_INODE_CHUNK_BASED, + RafsV6XattrEntry, RafsV6XattrIbodyHeader, EROFS_BLOCK_SIZE_4096, EROFS_INODE_CHUNK_BASED, EROFS_INODE_FLAT_INLINE, EROFS_INODE_FLAT_PLAIN, EROFS_INODE_SLOT_SIZE, EROFS_I_DATALAYOUT_BITS, EROFS_I_VERSION_BIT, EROFS_I_VERSION_BITS, }; @@ -100,7 +100,7 @@ impl DirectSuperBlockV6 { /// Create a new instance of `DirectSuperBlockV6`. pub fn new(meta: &RafsSuperMeta) -> Self { let state = DirectMappingState::new(meta); - let meta_offset = meta.meta_blkaddr as usize * EROFS_BLOCK_SIZE as usize; + let meta_offset = meta.meta_blkaddr as usize * EROFS_BLOCK_SIZE_4096 as usize; let info = DirectCachedInfo { meta_offset, root_ino: meta.root_nid as Inode, @@ -170,8 +170,11 @@ impl DirectSuperBlockV6 { let file = clone_file(r.as_raw_fd())?; let md = file.metadata()?; let len = md.len(); - let md_range = - MetaRange::new(EROFS_BLOCK_SIZE as u64, len - EROFS_BLOCK_SIZE as u64, true)?; + let md_range = MetaRange::new( + EROFS_BLOCK_SIZE_4096 as u64, + len - EROFS_BLOCK_SIZE_4096 as u64, + true, + )?; // Validate blob table layout as blob_table_start and blob_table_offset is read from bootstrap. let old_state = self.state.load(); @@ -227,7 +230,7 @@ impl DirectSuperBlockV6 { let mut v6_chunk = RafsV6InodeChunkAddr::new(); v6_chunk.set_blob_index(chunk.blob_index()); v6_chunk.set_blob_ci_index(chunk.id()); - v6_chunk.set_block_addr((chunk.uncompressed_offset() / EROFS_BLOCK_SIZE) as u32); + v6_chunk.set_block_addr((chunk.uncompressed_offset() / EROFS_BLOCK_SIZE_4096) as u32); chunk_map.insert(v6_chunk, idx); } @@ -238,7 +241,7 @@ impl DirectSuperBlockV6 { impl RafsSuperInodes for DirectSuperBlockV6 { fn get_max_ino(&self) -> Inode { // The maximum inode number supported by RAFSv6 is smaller than limit of fuse-backend-rs. - (0xffff_ffffu64) * EROFS_BLOCK_SIZE / EROFS_INODE_SLOT_SIZE as u64 + (0xffff_ffffu64) * EROFS_BLOCK_SIZE_4096 / EROFS_INODE_SLOT_SIZE as u64 } /// Find inode offset by ino from inode table and mmap to OndiskInode. @@ -323,7 +326,7 @@ impl OndiskInodeWrapper { offset: usize, ) -> Result { let inode = DirectSuperBlockV6::disk_inode(state, offset)?; - let blocks_count = div_round_up(inode.size(), EROFS_BLOCK_SIZE); + let blocks_count = div_round_up(inode.size(), EROFS_BLOCK_SIZE_4096); Ok(OndiskInodeWrapper { mapping, @@ -358,7 +361,7 @@ impl OndiskInodeWrapper { index: usize, ) -> RafsResult<&'a RafsV6Dirent> { let offset = self.data_block_offset(inode, block_index)?; - if size_of::() * (index + 1) >= EROFS_BLOCK_SIZE as usize { + if size_of::() * (index + 1) >= EROFS_BLOCK_SIZE_4096 as usize { Err(RafsError::InvalidImageData) } else if let Some(offset) = offset.checked_add(size_of::() * index) { state @@ -386,7 +389,7 @@ impl OndiskInodeWrapper { let buf: &[u8] = match index.cmp(&(max_entries - 1)) { Ordering::Less => { let next_de = self.get_entry(state, inode, block_index, index + 1)?; - if next_de.e_nameoff as u64 >= EROFS_BLOCK_SIZE { + if next_de.e_nameoff as u64 >= EROFS_BLOCK_SIZE_4096 { return Err(RafsError::InvalidImageData); } let len = next_de.e_nameoff.checked_sub(de.e_nameoff).ok_or_else(|| { @@ -407,7 +410,7 @@ impl OndiskInodeWrapper { } Ordering::Equal => { let base = de.e_nameoff as u64; - if base >= EROFS_BLOCK_SIZE { + if base >= EROFS_BLOCK_SIZE_4096 { return Err(RafsError::InvalidImageData); } @@ -416,12 +419,12 @@ impl OndiskInodeWrapper { // Because the other blocks should be fully used, while the last may not. let block_count = self.blocks_count() as usize; let len = match block_count.cmp(&(block_index + 1)) { - Ordering::Greater => (EROFS_BLOCK_SIZE - base) as usize, + Ordering::Greater => (EROFS_BLOCK_SIZE_4096 - base) as usize, Ordering::Equal => { - if self.size() % EROFS_BLOCK_SIZE == 0 { - EROFS_BLOCK_SIZE as usize + if self.size() % EROFS_BLOCK_SIZE_4096 == 0 { + EROFS_BLOCK_SIZE_4096 as usize } else { - (self.size() % EROFS_BLOCK_SIZE - base) as usize + (self.size() % EROFS_BLOCK_SIZE_4096 - base) as usize } } Ordering::Less => return Err(RafsError::InvalidImageData), @@ -486,7 +489,7 @@ impl OndiskInodeWrapper { if base.checked_add(index).is_none() || base + index > u32::MAX as usize { Err(RafsError::InvalidImageData) } else { - Ok((base + index) * EROFS_BLOCK_SIZE as usize) + Ok((base + index) * EROFS_BLOCK_SIZE_4096 as usize) } } @@ -671,7 +674,9 @@ impl OndiskInodeWrapper { .get_entry(&state, inode, block_index, 0) .map_err(err_invalidate_data)?; let name_offset = head_entry.e_nameoff as usize; - if name_offset as u64 >= EROFS_BLOCK_SIZE || name_offset % size_of::() != 0 { + if name_offset as u64 >= EROFS_BLOCK_SIZE_4096 + || name_offset % size_of::() != 0 + { Err(enoent!(format!( "v6: invalid e_nameoff {} from directory entry", name_offset @@ -689,7 +694,7 @@ impl RafsInode for OndiskInodeWrapper { let max_inode = self.mapping.get_max_ino(); if self.ino() > max_inode - || self.offset > (u32::MAX as usize) * EROFS_BLOCK_SIZE as usize + || self.offset > (u32::MAX as usize) * EROFS_BLOCK_SIZE_4096 as usize || inode.nlink() == 0 || self.get_name_size() as usize > (RAFS_MAX_NAME + 1) { @@ -989,7 +994,7 @@ impl RafsInode for OndiskInodeWrapper { fn get_symlink(&self) -> Result { let state = self.state(); let inode = self.disk_inode(&state); - if inode.size() > EROFS_BLOCK_SIZE { + if inode.size() > EROFS_BLOCK_SIZE_4096 { return Err(einval!(format!( "v6: invalid symlink size {}", inode.size() diff --git a/rafs/src/metadata/layout/v6.rs b/rafs/src/metadata/layout/v6.rs index b208d401322..fd3c60edff7 100644 --- a/rafs/src/metadata/layout/v6.rs +++ b/rafs/src/metadata/layout/v6.rs @@ -29,9 +29,9 @@ use crate::{impl_bootstrap_converter, impl_pub_getter_setter, RafsIoReader, Rafs /// EROFS metadata slot size. pub const EROFS_INODE_SLOT_SIZE: usize = 1 << EROFS_INODE_SLOT_BITS; /// Bits of EROFS logical block size. -pub const EROFS_BLOCK_BITS: u8 = 12; +pub const EROFS_BLOCK_BITS_12: u8 = 12; /// EROFS logical block size. -pub const EROFS_BLOCK_SIZE: u64 = 1u64 << EROFS_BLOCK_BITS; +pub const EROFS_BLOCK_SIZE_4096: u64 = 1u64 << EROFS_BLOCK_BITS_12; /// Offset of EROFS super block. pub const EROFS_SUPER_OFFSET: u16 = 1024; @@ -151,19 +151,19 @@ impl RafsV6SuperBlock { /// Validate the Rafs v6 super block. pub fn validate(&self, meta_size: u64) -> Result<()> { - if meta_size < EROFS_BLOCK_SIZE { + if meta_size < EROFS_BLOCK_SIZE_4096 { return Err(einval!(format!( "invalid Rafs v6 metadata size: {}", meta_size ))); } - if meta_size & (EROFS_BLOCK_SIZE - 1) != 0 { + if meta_size & (EROFS_BLOCK_SIZE_4096 - 1) != 0 { return Err(einval!(format!( "invalid Rafs v6 metadata size: bootstrap size {} is not aligned", meta_size ))); } - let meta_addr = u32::from_le(self.s_meta_blkaddr) as u64 * EROFS_BLOCK_SIZE; + let meta_addr = u32::from_le(self.s_meta_blkaddr) as u64 * EROFS_BLOCK_SIZE_4096; if meta_addr > meta_size { return Err(einval!(format!( "invalid Rafs v6 meta block address 0x{:x}, meta file size 0x{:x}", @@ -185,7 +185,7 @@ impl RafsV6SuperBlock { ))); } - if self.s_blkszbits != EROFS_BLOCK_BITS { + if self.s_blkszbits != EROFS_BLOCK_BITS_12 { return Err(einval!(format!( "invalid block size bits {} in Rafsv6 superblock", self.s_blkszbits @@ -296,8 +296,8 @@ impl RafsV6SuperBlock { /// Set EROFS meta block address. pub fn set_meta_addr(&mut self, meta_addr: u64) { - assert!((meta_addr / EROFS_BLOCK_SIZE) <= u32::MAX as u64); - self.s_meta_blkaddr = u32::to_le((meta_addr / EROFS_BLOCK_SIZE) as u32); + assert!((meta_addr / EROFS_BLOCK_SIZE_4096) <= u32::MAX as u64); + self.s_meta_blkaddr = u32::to_le((meta_addr / EROFS_BLOCK_SIZE_4096) as u32); } /// Get device table offset. @@ -313,15 +313,17 @@ impl RafsStore for RafsV6SuperBlock { // This method must be called before RafsV6SuperBlockExt::store(), otherwise data written by // RafsV6SuperBlockExt::store() will be overwritten. fn store(&self, w: &mut dyn RafsIoWrite) -> Result { - debug_assert!(((EROFS_SUPER_OFFSET + EROFS_SUPER_BLOCK_SIZE) as u64) < EROFS_BLOCK_SIZE); + debug_assert!( + ((EROFS_SUPER_OFFSET + EROFS_SUPER_BLOCK_SIZE) as u64) < EROFS_BLOCK_SIZE_4096 + ); w.write_all(&[0u8; EROFS_SUPER_OFFSET as usize])?; w.write_all(self.as_ref())?; w.write_all( - &[0u8; (EROFS_BLOCK_SIZE as usize + &[0u8; (EROFS_BLOCK_SIZE_4096 as usize - (EROFS_SUPER_OFFSET + EROFS_SUPER_BLOCK_SIZE) as usize)], )?; - Ok(EROFS_BLOCK_SIZE as usize) + Ok(EROFS_BLOCK_SIZE_4096 as usize) } } @@ -332,7 +334,7 @@ impl Default for RafsV6SuperBlock { s_magic: u32::to_le(EROFS_SUPER_MAGIC_V1), s_checksum: 0, s_feature_compat: u32::to_le(EROFS_FEATURE_COMPAT_RAFS_V6), - s_blkszbits: EROFS_BLOCK_BITS, + s_blkszbits: EROFS_BLOCK_BITS_12, s_extslots: 0u8, s_root_nid: 0, s_inos: 0, @@ -390,7 +392,7 @@ impl RafsV6SuperBlockExt { pub fn load(&mut self, r: &mut RafsIoReader) -> Result<()> { r.seek_to_offset((EROFS_SUPER_OFFSET + EROFS_SUPER_BLOCK_SIZE) as u64)?; r.read_exact(self.as_mut())?; - r.seek_to_offset(EROFS_BLOCK_SIZE as u64)?; + r.seek_to_offset(EROFS_BLOCK_SIZE_4096 as u64)?; Ok(()) } @@ -420,7 +422,7 @@ impl RafsV6SuperBlockExt { let chunk_size = u32::from_le(self.s_chunk_size) as u64; if !chunk_size.is_power_of_two() - || !(EROFS_BLOCK_SIZE..=RAFS_MAX_CHUNK_SIZE).contains(&chunk_size) + || !(EROFS_BLOCK_SIZE_4096..=RAFS_MAX_CHUNK_SIZE).contains(&chunk_size) { return Err(einval!("invalid chunk size in Rafs v6 extended superblock")); } @@ -429,8 +431,8 @@ impl RafsV6SuperBlockExt { let blob_offset = self.blob_table_offset(); let blob_size = self.blob_table_size() as u64; - if blob_offset & (EROFS_BLOCK_SIZE - 1) != 0 - || blob_offset < EROFS_BLOCK_SIZE + if blob_offset & (EROFS_BLOCK_SIZE_4096 - 1) != 0 + || blob_offset < EROFS_BLOCK_SIZE_4096 || blob_offset < devslot_end || blob_size % size_of::() as u64 != 0 || blob_offset.checked_add(blob_size).is_none() @@ -447,8 +449,8 @@ impl RafsV6SuperBlockExt { if self.chunk_table_size() > 0 { let chunk_tbl_offset = self.chunk_table_offset(); let chunk_tbl_size = self.chunk_table_size(); - if chunk_tbl_offset < EROFS_BLOCK_SIZE - || chunk_tbl_offset % EROFS_BLOCK_SIZE != 0 + if chunk_tbl_offset < EROFS_BLOCK_SIZE_4096 + || chunk_tbl_offset % EROFS_BLOCK_SIZE_4096 != 0 || chunk_tbl_offset < devslot_end || chunk_tbl_size % size_of::() as u64 != 0 || chunk_tbl_offset.checked_add(chunk_tbl_size).is_none() @@ -473,7 +475,7 @@ impl RafsV6SuperBlockExt { if self.prefetch_table_size() > 0 && self.prefetch_table_offset() != 0 { let tbl_offset = self.prefetch_table_offset(); let tbl_size = self.prefetch_table_size() as u64; - if tbl_offset < EROFS_BLOCK_SIZE + if tbl_offset < EROFS_BLOCK_SIZE_4096 || tbl_size % size_of::() as u64 != 0 || tbl_offset < devslot_end || tbl_offset.checked_add(tbl_size).is_none() @@ -581,9 +583,9 @@ impl RafsV6SuperBlockExt { impl RafsStore for RafsV6SuperBlockExt { fn store(&self, w: &mut dyn RafsIoWrite) -> Result { w.write_all(self.as_ref())?; - w.seek_offset(EROFS_BLOCK_SIZE as u64)?; + w.seek_offset(EROFS_BLOCK_SIZE_4096 as u64)?; - Ok(EROFS_BLOCK_SIZE as usize - (EROFS_SUPER_OFFSET + EROFS_SUPER_BLOCK_SIZE) as usize) + Ok(EROFS_BLOCK_SIZE_4096 as usize - (EROFS_SUPER_OFFSET + EROFS_SUPER_BLOCK_SIZE) as usize) } } @@ -1043,7 +1045,7 @@ impl RafsV6Dirent { /// Set name offset of the dirent. pub fn set_name_offset(&mut self, offset: u16) { - assert!(offset < EROFS_BLOCK_SIZE as u16); + assert!(offset < EROFS_BLOCK_SIZE_4096 as u16); self.e_nameoff = u16::to_le(offset); } @@ -1078,8 +1080,8 @@ impl RafsV6InodeChunkHeader { pub fn new(chunk_size: u64) -> Self { assert!(chunk_size.is_power_of_two()); let chunk_bits = chunk_size.trailing_zeros() as u16; - assert!(chunk_bits >= EROFS_BLOCK_BITS as u16); - let chunk_bits = chunk_bits - EROFS_BLOCK_BITS as u16; + assert!(chunk_bits >= EROFS_BLOCK_BITS_12 as u16); + let chunk_bits = chunk_bits - EROFS_BLOCK_BITS_12 as u16; assert!(chunk_bits <= EROFS_CHUNK_FORMAT_SIZE_MASK); let format = EROFS_CHUNK_FORMAT_INDEXES_FLAG | chunk_bits; @@ -1492,7 +1494,7 @@ impl RafsV6Blob { let c_size = u32::from_le(self.chunk_size) as u64; if c_size.count_ones() != 1 - || !(EROFS_BLOCK_SIZE..=RAFS_MAX_CHUNK_SIZE).contains(&c_size) + || !(EROFS_BLOCK_SIZE_4096..=RAFS_MAX_CHUNK_SIZE).contains(&c_size) || c_size != chunk_size as u64 { error!( diff --git a/service/src/blob_cache.rs b/service/src/blob_cache.rs index ce8040ca93b..2afa936fbe9 100644 --- a/service/src/blob_cache.rs +++ b/service/src/blob_cache.rs @@ -16,7 +16,7 @@ use nydus_api::{ BlobCacheEntry, BlobCacheList, BlobCacheObjectId, ConfigV2, BLOB_CACHE_TYPE_DATA_BLOB, BLOB_CACHE_TYPE_META_BLOB, }; -use nydus_rafs::metadata::layout::v6::{EROFS_BLOCK_BITS, EROFS_BLOCK_SIZE}; +use nydus_rafs::metadata::layout::v6::{EROFS_BLOCK_BITS_12, EROFS_BLOCK_SIZE_4096}; use nydus_rafs::metadata::{RafsBlobExtraInfo, RafsSuper}; use nydus_storage::cache::BlobCache; use nydus_storage::device::BlobInfo; @@ -460,7 +460,7 @@ impl MetaBlob { e })?; let size = md.len(); - if size % EROFS_BLOCK_SIZE != 0 || (size >> EROFS_BLOCK_BITS) > u32::MAX as u64 { + if size % EROFS_BLOCK_SIZE_4096 != 0 || (size >> EROFS_BLOCK_BITS_12) > u32::MAX as u64 { return Err(einval!(format!( "blob_cache: metadata blob size (0x{:x}) is invalid", size @@ -475,7 +475,7 @@ impl MetaBlob { /// Get number of blocks in unit of EROFS_BLOCK_SIZE. pub fn blocks(&self) -> u32 { - (self.size >> EROFS_BLOCK_BITS) as u32 + (self.size >> EROFS_BLOCK_BITS_12) as u32 } /// Read data from the cached metadata blob in asynchronous mode. diff --git a/service/src/block_device.rs b/service/src/block_device.rs index ae099f5c465..3b3cc261a4a 100644 --- a/service/src/block_device.rs +++ b/service/src/block_device.rs @@ -16,7 +16,7 @@ use std::io::Result; use std::sync::Arc; use dbs_allocator::{Constraint, IntervalTree, NodeState, Range}; -use nydus_rafs::metadata::layout::v6::EROFS_BLOCK_BITS; +use nydus_rafs::metadata::layout::v6::EROFS_BLOCK_BITS_12; use tokio_uring::buf::IoBufMut; use crate::blob_cache::{BlobCacheMgr, BlobConfig, DataBlob, MetaBlob}; @@ -108,7 +108,7 @@ impl BlockDevice { ranges.update(&range, BlockRange::Hole); } - let blocks = blob_info.uncompressed_size() >> EROFS_BLOCK_BITS; + let blocks = blob_info.uncompressed_size() >> EROFS_BLOCK_BITS_12; if blocks > u32::MAX as u64 || blocks + extra_info.mapped_blkaddr as u64 > u32::MAX as u64 { @@ -163,7 +163,7 @@ impl BlockDevice { mut buf: T, ) -> (Result, T) { if start.checked_add(blocks).is_none() - || (blocks as u64) << EROFS_BLOCK_BITS > buf.bytes_total() as u64 + || (blocks as u64) << EROFS_BLOCK_BITS_12 > buf.bytes_total() as u64 { return ( Err(einval!("block_device: invalid parameters to read()")), @@ -171,7 +171,7 @@ impl BlockDevice { ); } - let total_size = (blocks as usize) << EROFS_BLOCK_BITS; + let total_size = (blocks as usize) << EROFS_BLOCK_BITS_12; let mut pos = 0; while blocks > 0 { let (range, node) = match self.ranges.get_superset(&Range::new_point(start)) { @@ -189,7 +189,7 @@ impl BlockDevice { if let NodeState::Valued(r) = node { let count = min(range.max as u32 - start + 1, blocks); - let sz = (count as usize) << EROFS_BLOCK_BITS as usize; + let sz = (count as usize) << EROFS_BLOCK_BITS_12 as usize; let mut s = buf.slice(pos..pos + sz); let (res, s) = match r { BlockRange::Hole => { @@ -197,11 +197,11 @@ impl BlockDevice { (Ok(sz), s) } BlockRange::MetaBlob(m) => { - m.async_read((start as u64) << EROFS_BLOCK_BITS, s).await + m.async_read((start as u64) << EROFS_BLOCK_BITS_12, s).await } BlockRange::DataBlob(d) => { let offset = start as u64 - range.min; - d.async_read(offset << EROFS_BLOCK_BITS, s).await + d.async_read(offset << EROFS_BLOCK_BITS_12, s).await } }; diff --git a/service/src/block_nbd.rs b/service/src/block_nbd.rs index 63ca6f7709a..f8ba8be4610 100644 --- a/service/src/block_nbd.rs +++ b/service/src/block_nbd.rs @@ -21,7 +21,7 @@ use std::thread::JoinHandle; use bytes::{Buf, BufMut}; use mio::Waker; use nydus_api::{BlobCacheEntry, BuildTimeInfo}; -use nydus_rafs::metadata::layout::v6::{EROFS_BLOCK_BITS, EROFS_BLOCK_SIZE}; +use nydus_rafs::metadata::layout::v6::{EROFS_BLOCK_BITS_12, EROFS_BLOCK_SIZE_4096}; use nydus_storage::utils::alloc_buf; use tokio::sync::broadcast::{channel, Sender}; use tokio_uring::buf::IoBuf; @@ -85,7 +85,11 @@ impl NbdService { error!("block_nbd: failed to open NBD device {}", nbd_path); e })?; - nbd_ioctl(nbd_dev.as_raw_fd(), NBD_SET_BLOCK_SIZE, EROFS_BLOCK_SIZE)?; + nbd_ioctl( + nbd_dev.as_raw_fd(), + NBD_SET_BLOCK_SIZE, + EROFS_BLOCK_SIZE_4096, + )?; nbd_ioctl(nbd_dev.as_raw_fd(), NBD_SET_BLOCKS, device.blocks() as u64)?; nbd_ioctl(nbd_dev.as_raw_fd(), NBD_SET_TIMEOUT, 60)?; nbd_ioctl(nbd_dev.as_raw_fd(), NBD_CLEAR_SOCK, 0)?; @@ -225,8 +229,8 @@ impl NbdWorker { let mut code = NBD_OK; let mut data_buf = alloc_buf(len as usize); if magic != NBD_REQUEST_MAGIC - || pos % EROFS_BLOCK_SIZE != 0 - || len as u64 % EROFS_BLOCK_SIZE != 0 + || pos % EROFS_BLOCK_SIZE_4096 != 0 + || len as u64 % EROFS_BLOCK_SIZE_4096 != 0 { warn!( "block_nbd: invalid request magic 0x{:x}, type {}, pos 0x{:x}, len 0x{:x}", @@ -234,8 +238,8 @@ impl NbdWorker { ); code = NBD_EINVAL; } else if ty == NBD_CMD_READ { - let start = (pos >> EROFS_BLOCK_BITS) as u32; - let count = len >> EROFS_BLOCK_BITS; + let start = (pos >> EROFS_BLOCK_BITS_12) as u32; + let count = len >> EROFS_BLOCK_BITS_12; let (res, buf) = device.async_read(start, count, data_buf).await; data_buf = buf; match res { diff --git a/src/bin/nydus-image/inspect.rs b/src/bin/nydus-image/inspect.rs index 2f491a32ec6..d4ab7462b5e 100644 --- a/src/bin/nydus-image/inspect.rs +++ b/src/bin/nydus-image/inspect.rs @@ -217,13 +217,12 @@ impl RafsInspector { } let child_inode = dir_inode.get_child_by_name(&child_name)?; - let mut chunks = Vec::>::new(); - // only reg_file can get and print chunk info if !child_inode.is_reg() { return Ok(RafsInodeWalkAction::Break); } + let mut chunks = Vec::>::new(); let chunk_count = child_inode.get_chunk_count(); for idx in 0..chunk_count { let cur_chunk = child_inode.get_chunk_info(idx)?; diff --git a/src/bin/nydus-image/main.rs b/src/bin/nydus-image/main.rs index d9d65534917..952fa1a701e 100644 --- a/src/bin/nydus-image/main.rs +++ b/src/bin/nydus-image/main.rs @@ -671,6 +671,12 @@ impl Command { .unwrap_or_default() .parse()?; let blob_data_size = Self::get_blob_size(matches, conversion_type)?; + let features = Features::try_from( + matches + .get_one::("features") + .map(|s| s.as_str()) + .unwrap_or_default(), + )?; match conversion_type { ConversionType::DirectoryToRafs => { @@ -776,12 +782,6 @@ impl Command { } } - let features = Features::try_from( - matches - .get_one::("features") - .map(|s| s.as_str()) - .unwrap_or_default(), - )?; if features.is_enabled(Feature::BlobToc) && version == RafsVersion::V5 { bail!("`--features blob-toc` can't be used with `--version 5` "); }