Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce blob meta format v2 #796

Merged
merged 7 commits into from
Oct 26, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions rafs/src/metadata/chunk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,14 @@ impl ChunkWrapper {
}
}

/// Check whether the chunk is compressed or not.
pub fn is_compressed(&self) -> bool {
match self {
ChunkWrapper::V5(c) => c.flags.contains(BlobChunkFlags::COMPRESSED),
ChunkWrapper::V6(c) => c.flags.contains(BlobChunkFlags::COMPRESSED),
}
}

#[allow(clippy::too_many_arguments)]
/// Set a group of chunk information fields.
pub fn set_chunk_info(
Expand Down
4 changes: 2 additions & 2 deletions rafs/src/metadata/layout/v6.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ use std::sync::Arc;
use lazy_static::lazy_static;

use nydus_storage::device::{BlobFeatures, BlobInfo};
use nydus_storage::meta::{BlobMetaHeaderOndisk, BLOB_FEATURE_4K_ALIGNED};
use nydus_storage::meta::{BlobMetaHeaderOndisk, BLOB_META_FEATURE_4K_ALIGNED};
use nydus_storage::RAFS_MAX_CHUNK_SIZE;
use nydus_utils::{compress, digest, round_up, ByteSize};

Expand Down Expand Up @@ -1381,7 +1381,7 @@ impl RafsV6Blob {
}

// for now the uncompressed data chunk of v6 image is 4k aligned.
if u32::from_le(self.meta_features) & BLOB_FEATURE_4K_ALIGNED == 0 {
if u32::from_le(self.meta_features) & BLOB_META_FEATURE_4K_ALIGNED == 0 {
error!("RafsV6Blob: idx {} invalid meta_features", blob_index);
return false;
}
Expand Down
7 changes: 4 additions & 3 deletions src/bin/nydus-image/builder/stargz.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ use nydus_rafs::metadata::layout::v5::{RafsV5ChunkInfo, RafsV5Inode, RafsV5Inode
use nydus_rafs::metadata::layout::RafsXAttrs;
use nydus_rafs::metadata::{Inode, RafsVersion};
use nydus_storage::device::BlobChunkFlags;
use nydus_storage::meta::BlobMetaHeaderOndisk;
use nydus_storage::meta::{BlobMetaHeaderOndisk, BLOB_META_FEATURE_CHUNK_INFO_V2};
use nydus_storage::{RAFS_MAX_CHUNKS_PER_BLOB, RAFS_MAX_CHUNK_SIZE};
use nydus_utils::compact::makedev;
use nydus_utils::digest::{self, Algorithm, DigestHasher, RafsDigest};
Expand Down Expand Up @@ -666,6 +666,7 @@ impl StargzBuilder {
if ctx.fs_version == RafsVersion::V6 {
let mut header = BlobMetaHeaderOndisk::default();
header.set_4k_aligned(true);
header.set_chunk_info_v2(ctx.blob_meta_features & BLOB_META_FEATURE_CHUNK_INFO_V2 != 0);
blob_ctx.blob_meta_header = header;
blob_ctx.set_meta_info_enabled(true);
} else {
Expand Down Expand Up @@ -715,7 +716,7 @@ impl StargzBuilder {
if !chunk_map.contains_key(chunk.inner.id()) {
let chunk_index = blob_ctx.alloc_chunk_index()?;
chunk.inner.set_index(chunk_index);
blob_ctx.add_chunk_meta_info(&chunk.inner)?;
blob_ctx.add_chunk_meta_info(&chunk.inner, 0)?;
chunk_map.insert(*chunk.inner.id(), chunk_index);
} else {
bail!("stargz unexpected duplicated data chunk");
Expand Down Expand Up @@ -801,7 +802,7 @@ impl Builder for StargzBuilder {
build_bootstrap(ctx, bootstrap_mgr, &mut bootstrap_ctx, blob_mgr, tree)?;

// Generate node chunks and digest
let mut blob_ctx = BlobContext::new(ctx.blob_id.clone(), 0);
let mut blob_ctx = BlobContext::new(ctx.blob_id.clone(), 0, ctx.blob_meta_features);
self.generate_nodes(ctx, &mut bootstrap_ctx, &mut blob_ctx, blob_mgr)?;

// Dump blob meta
Expand Down
15 changes: 7 additions & 8 deletions src/bin/nydus-image/core/blob.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use std::io::Write;

use anyhow::{Context, Result};
use nydus_rafs::metadata::RAFS_MAX_CHUNK_SIZE;
use nydus_storage::meta::{BlobChunkInfoOndisk, BlobMetaHeaderOndisk};
use nydus_storage::meta::{BlobMetaChunkArray, BlobMetaHeaderOndisk};
use nydus_utils::{compress, try_round_up_4k};
use sha2::Digest;

Expand Down Expand Up @@ -75,15 +75,10 @@ impl Blob {

fn dump_meta_data_raw(
pos: u64,
blob_meta_info: &[BlobChunkInfoOndisk],
blob_meta_info: &BlobMetaChunkArray,
compressor: compress::Algorithm,
) -> Result<(std::borrow::Cow<[u8]>, BlobMetaHeaderOndisk)> {
let data = unsafe {
std::slice::from_raw_parts(
blob_meta_info.as_ptr() as *const u8,
blob_meta_info.len() * std::mem::size_of::<BlobChunkInfoOndisk>(),
)
};
let data = blob_meta_info.as_byte_slice();
let (buf, compressed) = compress::compress(data, compressor)
.with_context(|| "failed to compress blob chunk info array".to_string())?;

Expand All @@ -98,6 +93,10 @@ impl Blob {
header.set_ci_compressed_size(buf.len() as u64);
header.set_ci_uncompressed_size(data.len() as u64);
header.set_4k_aligned(true);
match blob_meta_info {
BlobMetaChunkArray::V1(_) => header.set_chunk_info_v2(false),
BlobMetaChunkArray::V2(_) => header.set_chunk_info_v2(true),
}

Ok((buf, header))
}
Expand Down
5 changes: 3 additions & 2 deletions src/bin/nydus-image/core/blob_compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ impl ChunkSet {
new_chunk.set_blob_index(new_blob_idx);
new_chunk.set_compressed_offset(new_blob_ctx.compressed_offset);
new_chunk.set_uncompressed_offset(new_blob_ctx.uncompressed_offset);
new_blob_ctx.add_chunk_meta_info(&new_chunk)?;
new_blob_ctx.add_chunk_meta_info(&new_chunk, 0)?;
// insert change ops
chunks_change.push((chunk.clone(), new_chunk));

Expand Down Expand Up @@ -520,7 +520,8 @@ impl BlobCompactor {
}
State::Rebuild(cs) => {
let blob_storage = ArtifactStorage::FileDir(PathBuf::from(dir));
let mut blob_ctx = BlobContext::new(String::from(""), 0);
let mut blob_ctx =
BlobContext::new(String::from(""), 0, build_ctx.blob_meta_features);
blob_ctx.set_meta_info_enabled(self.is_v6());
let blob_idx = self.new_blob_mgr.alloc_index()?;
let new_chunks = cs.dump(
Expand Down
90 changes: 44 additions & 46 deletions src/bin/nydus-image/core/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ use nydus_rafs::metadata::{Inode, RAFS_DEFAULT_CHUNK_SIZE};
use nydus_rafs::metadata::{RafsSuperFlags, RafsVersion};
use nydus_rafs::{RafsIoReader, RafsIoWrite};
use nydus_storage::device::{BlobFeatures, BlobInfo};
use nydus_storage::meta::{BlobChunkInfoOndisk, BlobMetaHeaderOndisk};
use nydus_storage::meta::{
BlobMetaChunkArray, BlobMetaHeaderOndisk, BLOB_META_FEATURE_CHUNK_INFO_V2,
};
use nydus_utils::{compress, digest, div_round_up, round_down_4k};

use super::chunk_dict::{ChunkDict, HashChunkDict};
Expand Down Expand Up @@ -329,8 +331,7 @@ pub struct BlobContext {
/// Whether to generate blob metadata information.
pub blob_meta_info_enabled: bool,
/// Data chunks stored in the data blob, for v6.
/// TODO: zran
pub blob_meta_info: Vec<BlobChunkInfoOndisk>,
pub blob_meta_info: BlobMetaChunkArray,
/// Blob metadata header stored in the data blob, for v6
pub blob_meta_header: BlobMetaHeaderOndisk,

Expand All @@ -351,37 +352,20 @@ pub struct BlobContext {
pub chunk_source: ChunkSource,
}

impl Clone for BlobContext {
fn clone(&self) -> Self {
Self {
blob_id: self.blob_id.clone(),
blob_hash: self.blob_hash.clone(),
blob_prefetch_size: self.blob_prefetch_size,
blob_meta_info_enabled: self.blob_meta_info_enabled,
blob_meta_info: self.blob_meta_info.clone(),
blob_meta_header: self.blob_meta_header,

compressed_blob_size: self.compressed_blob_size,
uncompressed_blob_size: self.uncompressed_blob_size,

compressed_offset: self.compressed_offset,
uncompressed_offset: self.uncompressed_offset,

chunk_count: self.chunk_count,
chunk_size: self.chunk_size,
chunk_source: self.chunk_source.clone(),
}
}
}

impl BlobContext {
pub fn new(blob_id: String, blob_offset: u64) -> Self {
pub fn new(blob_id: String, blob_offset: u64, features: u32) -> Self {
let blob_meta_info = if features & BLOB_META_FEATURE_CHUNK_INFO_V2 != 0 {
BlobMetaChunkArray::new_v2()
} else {
BlobMetaChunkArray::new_v1()
};

Self {
blob_id,
blob_hash: Sha256::new(),
blob_prefetch_size: 0,
blob_meta_info_enabled: false,
blob_meta_info: Vec::new(),
blob_meta_info,
blob_meta_header: BlobMetaHeaderOndisk::default(),

compressed_blob_size: 0,
Expand All @@ -397,7 +381,7 @@ impl BlobContext {
}

pub fn from(ctx: &BuildContext, blob: &BlobInfo, chunk_source: ChunkSource) -> Self {
let mut blob_ctx = Self::new(blob.blob_id().to_owned(), 0);
let mut blob_ctx = Self::new(blob.blob_id().to_owned(), 0, blob.meta_flags());

blob_ctx.blob_prefetch_size = blob.prefetch_size();
blob_ctx.chunk_count = blob.chunk_count();
Expand All @@ -422,6 +406,9 @@ impl BlobContext {
.blob_meta_header
.set_ci_uncompressed_size(blob.meta_ci_uncompressed_size());
blob_ctx.blob_meta_header.set_4k_aligned(true);
blob_ctx
.blob_meta_header
.set_chunk_info_v2(blob.meta_flags() & BLOB_META_FEATURE_CHUNK_INFO_V2 != 0);
blob_ctx.blob_meta_info_enabled = true;
}

Expand All @@ -447,24 +434,31 @@ impl BlobContext {
self.blob_meta_info_enabled = enable;
}

pub fn add_chunk_meta_info(&mut self, chunk: &ChunkWrapper) -> Result<()> {
if !self.blob_meta_info_enabled {
return Ok(());
pub fn add_chunk_meta_info(&mut self, chunk: &ChunkWrapper, data: u64) -> Result<()> {
if self.blob_meta_info_enabled {
assert_eq!(chunk.index() as usize, self.blob_meta_info.len());
match &self.blob_meta_info {
BlobMetaChunkArray::V1(_) => {
self.blob_meta_info.add_v1(
chunk.compressed_offset(),
chunk.compressed_size(),
chunk.uncompressed_offset(),
chunk.uncompressed_size(),
);
}
BlobMetaChunkArray::V2(_) => {
self.blob_meta_info.add_v2(
chunk.compressed_offset(),
chunk.compressed_size(),
chunk.uncompressed_offset(),
chunk.uncompressed_size(),
chunk.is_compressed(),
data,
);
}
}
}

debug_assert!(chunk.index() as usize == self.blob_meta_info.len());
let mut meta = BlobChunkInfoOndisk::default();
meta.set_compressed_offset(chunk.compressed_offset());
meta.set_compressed_size(chunk.compressed_size());
meta.set_uncompressed_offset(chunk.uncompressed_offset());
meta.set_uncompressed_size(chunk.uncompressed_size());
trace!(
"chunk uncompressed {} size {}",
meta.uncompressed_offset(),
meta.uncompressed_size()
);
self.blob_meta_info.push(meta);

Ok(())
}

Expand Down Expand Up @@ -521,7 +515,8 @@ impl BlobManager {
}

fn new_blob_ctx(ctx: &BuildContext) -> Result<BlobContext> {
let mut blob_ctx = BlobContext::new(ctx.blob_id.clone(), ctx.blob_offset);
let mut blob_ctx =
BlobContext::new(ctx.blob_id.clone(), ctx.blob_offset, ctx.blob_meta_features);
blob_ctx.set_chunk_size(ctx.chunk_size);
blob_ctx.set_meta_info_enabled(ctx.fs_version == RafsVersion::V6);

Expand Down Expand Up @@ -831,6 +826,7 @@ pub struct BuildContext {
/// Storage writing blob to single file or a directory.
pub blob_storage: Option<ArtifactStorage>,
pub blob_meta_storage: Option<ArtifactStorage>,
pub blob_meta_features: u32,
pub inline_bootstrap: bool,
pub has_xattr: bool,
}
Expand Down Expand Up @@ -870,6 +866,7 @@ impl BuildContext {
prefetch,
blob_storage,
blob_meta_storage,
blob_meta_features: 0,
inline_bootstrap,
has_xattr: false,
}
Expand Down Expand Up @@ -904,6 +901,7 @@ impl Default for BuildContext {
prefetch: Prefetch::default(),
blob_storage: None,
blob_meta_storage: None,
blob_meta_features: 0,
has_xattr: true,
inline_bootstrap: false,
}
Expand Down
3 changes: 2 additions & 1 deletion src/bin/nydus-image/core/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,8 @@ impl Node {
is_compressed,
)?;

blob_ctx.add_chunk_meta_info(&chunk)?;
// TODO: figure correct value for data
blob_ctx.add_chunk_meta_info(&chunk, 0)?;
blob_mgr.layered_chunk_dict.add_chunk(chunk.clone());
self.chunks.push(NodeChunk {
source: ChunkSource::Build,
Expand Down
Loading