Skip to content

Commit

Permalink
builder: fix invalid compressed offset in image.blob toc entry
Browse files Browse the repository at this point in the history
The `blob_ctx.compressed_offset` has been set to the end of compressed
blob in tar2rafs blob dump, we shouldn't use it as the initial
compressed_offset, this will generate an invalid toc entry.

Signed-off-by: Yan Song <imeoer@linux.alibaba.com>
  • Loading branch information
imeoer committed Feb 21, 2023
1 parent aea56eb commit 079d50f
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 13 deletions.
2 changes: 1 addition & 1 deletion src/bin/nydus-image/core/blob.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ impl Blob {
toc::TOC_ENTRY_BLOB_RAW,
compress::Algorithm::None,
blob_digest,
blob_ctx.compressed_offset,
blob_ctx.compressed_offset(),
blob_ctx.compressed_blob_size,
blob_ctx.uncompressed_blob_size,
)?;
Expand Down
8 changes: 4 additions & 4 deletions src/bin/nydus-image/core/blob_compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,23 +158,23 @@ impl ChunkSet {
// file offset field is useless
new_chunk.set_index(new_blob_ctx.chunk_count);
new_chunk.set_blob_index(new_blob_idx);
new_chunk.set_compressed_offset(new_blob_ctx.compressed_offset);
new_chunk.set_uncompressed_offset(new_blob_ctx.uncompressed_offset);
new_chunk.set_compressed_offset(new_blob_ctx.current_compressed_offset);
new_chunk.set_uncompressed_offset(new_blob_ctx.current_uncompressed_offset);
new_blob_ctx.add_chunk_meta_info(&new_chunk, None)?;
// insert change ops
chunks_change.push((chunk.clone(), new_chunk));

new_blob_ctx.blob_hash.update(&buf);
new_blob_ctx.chunk_count += 1;
new_blob_ctx.compressed_offset += chunk.compressed_size() as u64;
new_blob_ctx.current_compressed_offset += chunk.compressed_size() as u64;
new_blob_ctx.compressed_blob_size += chunk.compressed_size() as u64;

let aligned_size = if aligned_chunk {
try_round_up_4k(chunk.uncompressed_size()).unwrap()
} else {
chunk.uncompressed_size() as u64
};
new_blob_ctx.uncompressed_offset += aligned_size;
new_blob_ctx.current_uncompressed_offset += aligned_size;
new_blob_ctx.uncompressed_blob_size += aligned_size;
}
new_blob_ctx.blob_id = format!("{:x}", new_blob_ctx.blob_hash.clone().finalize());
Expand Down
14 changes: 10 additions & 4 deletions src/bin/nydus-image/core/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -377,8 +377,8 @@ pub struct BlobContext {
pub uncompressed_blob_size: u64,

/// Current blob offset cursor for writing to disk file.
pub compressed_offset: u64,
pub uncompressed_offset: u64,
pub current_compressed_offset: u64,
pub current_uncompressed_offset: u64,

/// The number of counts in a blob by the index of blob table.
pub chunk_count: u32,
Expand Down Expand Up @@ -429,8 +429,8 @@ impl BlobContext {
compressed_blob_size: 0,
uncompressed_blob_size: 0,

compressed_offset: blob_offset,
uncompressed_offset: 0,
current_compressed_offset: blob_offset,
current_uncompressed_offset: 0,

chunk_count: 0,
chunk_size: RAFS_DEFAULT_CHUNK_SIZE as u32,
Expand Down Expand Up @@ -690,6 +690,12 @@ impl BlobContext {
self.blob_hash.update(header.as_bytes());
Ok(header)
}

/// Get offset of compressed blob, since compressed_offset is
/// always >= compressed_blob_size, we can safely subtract here.
pub fn compressed_offset(&self) -> u64 {
self.current_compressed_offset - self.compressed_blob_size
}
}

/// BlobManager stores all blob related information during build.
Expand Down
8 changes: 4 additions & 4 deletions src/bin/nydus-image/core/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -581,9 +581,9 @@ impl Node {
} else {
uncompressed_size
};
let pre_uncompressed_offset = blob_ctx.uncompressed_offset;
let pre_uncompressed_offset = blob_ctx.current_uncompressed_offset;
blob_ctx.uncompressed_blob_size = pre_uncompressed_offset + aligned_chunk_size as u64;
blob_ctx.uncompressed_offset += aligned_chunk_size as u64;
blob_ctx.current_uncompressed_offset += aligned_chunk_size as u64;
chunk.set_uncompressed_offset(pre_uncompressed_offset);
chunk.set_uncompressed_size(uncompressed_size);

Expand All @@ -595,12 +595,12 @@ impl Node {
let (compressed, is_compressed) = compress::compress(chunk_data, ctx.compressor)
.with_context(|| format!("failed to compress node file {:?}", self.path))?;
let compressed_size = compressed.len() as u32;
let pre_compressed_offset = blob_ctx.compressed_offset;
let pre_compressed_offset = blob_ctx.current_compressed_offset;
blob_writer
.write_all(&compressed)
.context("failed to write blob")?;
blob_ctx.blob_hash.update(&compressed);
blob_ctx.compressed_offset += compressed_size as u64;
blob_ctx.current_compressed_offset += compressed_size as u64;
blob_ctx.compressed_blob_size += compressed_size as u64;

chunk.set_compressed_offset(pre_compressed_offset);
Expand Down

0 comments on commit 079d50f

Please sign in to comment.