Skip to content

Commit

Permalink
storage: prepare for supporting of multiple blob meta formats
Browse files Browse the repository at this point in the history
Change the blob meta implement to prepare for supporting of
multiple blob meta formats.

Signed-off-by: Jiang Liu <gerry@linux.alibaba.com>
  • Loading branch information
jiangliu committed Oct 17, 2022
1 parent e5d459a commit 36a2ed4
Show file tree
Hide file tree
Showing 3 changed files with 589 additions and 380 deletions.
2 changes: 1 addition & 1 deletion src/bin/nydus-image/core/blob.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ impl Blob {
blob_meta_info: &BlobMetaChunkArray,
compressor: compress::Algorithm,
) -> Result<(std::borrow::Cow<[u8]>, BlobMetaHeaderOndisk)> {
let data = blob_meta_info.as_u8_slice();
let data = blob_meta_info.as_byte_slice();
let (buf, compressed) = compress::compress(data, compressor)
.with_context(|| "failed to compress blob chunk info array".to_string())?;

Expand Down
188 changes: 188 additions & 0 deletions storage/src/meta/chunk_info_v1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,3 +85,191 @@ impl BlobMetaChunkInfo for BlobChunkInfoV1Ondisk {
self.compressed_size() != self.uncompressed_size()
}
}

#[cfg(test)]
mod tests {
use super::*;
use crate::meta::{BlobMetaChunkArray, BlobMetaInfo, BlobMetaState};
use nydus_utils::filemap::FileMapState;
use std::mem::ManuallyDrop;
use std::sync::Arc;

#[test]
fn test_new_chunk_on_disk() {
let mut chunk = BlobChunkInfoV1Ondisk::default();

assert_eq!(chunk.compressed_offset(), 0);
assert_eq!(chunk.compressed_size(), 1);
assert_eq!(chunk.compressed_end(), 1);
assert_eq!(chunk.uncompressed_offset(), 0);
assert_eq!(chunk.uncompressed_size(), 1);
assert_eq!(chunk.aligned_uncompressed_end(), 0x1000);

chunk.set_compressed_offset(0x1000);
chunk.set_compressed_size(0x100);
assert_eq!(chunk.compressed_offset(), 0x1000);
assert_eq!(chunk.compressed_size(), 0x100);

chunk.set_uncompressed_offset(0x1000);
chunk.set_uncompressed_size(0x100);
assert_eq!(chunk.uncompressed_offset(), 0x1000);
assert_eq!(chunk.uncompressed_size(), 0x100);

chunk.set_compressed_offset(0xffffffffff);
chunk.set_compressed_size(0x1000000);
assert_eq!(chunk.compressed_offset(), 0xffffffffff);
assert_eq!(chunk.compressed_size(), 0x1000000);

chunk.set_uncompressed_offset(0xffffffff000);
chunk.set_uncompressed_size(0x1000000);
assert_eq!(chunk.uncompressed_offset(), 0xffffffff000);
assert_eq!(chunk.uncompressed_size(), 0x1000000);

// For testing old format compatibility.
let chunk = BlobChunkInfoV1Ondisk {
uncomp_info: 0xffff_ffff_f100_0000,
comp_info: 0xffff_f0ff_ffff_ffff,
};
assert_eq!(chunk.uncompressed_size(), 0x000f_ffff + 1);
assert_eq!(chunk.uncompressed_offset(), 0xffff_1000 * 0x1000);
assert_eq!(chunk.compressed_size(), 0x000f_ffff + 1);
assert_eq!(chunk.compressed_offset(), 0x00ff_ffff_ffff);
}

#[test]
fn test_get_chunk_index_with_hole() {
let state = BlobMetaState {
blob_index: 0,
compressed_size: 0,
uncompressed_size: 0,
chunk_info_array: ManuallyDrop::new(BlobMetaChunkArray::V1(vec![
BlobChunkInfoV1Ondisk {
uncomp_info: 0x01ff_f000_0000_0000,
comp_info: 0x00ff_f000_0000_0000,
},
BlobChunkInfoV1Ondisk {
uncomp_info: 0x01ff_f000_0010_0000,
comp_info: 0x00ff_f000_0010_0000,
},
])),
_filemap: FileMapState::default(),
is_stargz: false,
};

assert_eq!(
state
.chunk_info_array
.get_chunk_index_nocheck(0, false)
.unwrap(),
0
);
assert_eq!(
state
.chunk_info_array
.get_chunk_index_nocheck(0x1fff, false)
.unwrap(),
0
);
assert_eq!(
state
.chunk_info_array
.get_chunk_index_nocheck(0x100000, false)
.unwrap(),
1
);
assert_eq!(
state
.chunk_info_array
.get_chunk_index_nocheck(0x101fff, false)
.unwrap(),
1
);
state
.chunk_info_array
.get_chunk_index_nocheck(0x2000, false)
.unwrap_err();
state
.chunk_info_array
.get_chunk_index_nocheck(0xfffff, false)
.unwrap_err();
state
.chunk_info_array
.get_chunk_index_nocheck(0x102000, false)
.unwrap_err();
}

#[test]
fn test_get_chunks() {
let state = BlobMetaState {
blob_index: 1,
compressed_size: 0x6001,
uncompressed_size: 0x102001,
chunk_info_array: ManuallyDrop::new(BlobMetaChunkArray::V1(vec![
BlobChunkInfoV1Ondisk {
uncomp_info: 0x0100_0000_0000_0000,
comp_info: 0x00ff_f000_0000_0000,
},
BlobChunkInfoV1Ondisk {
uncomp_info: 0x01ff_f000_0000_2000,
comp_info: 0x01ff_f000_0000_1000,
},
BlobChunkInfoV1Ondisk {
uncomp_info: 0x01ff_f000_0000_4000,
comp_info: 0x00ff_f000_0000_3000,
},
BlobChunkInfoV1Ondisk {
uncomp_info: 0x01ff_f000_0010_0000,
comp_info: 0x00ff_f000_0000_4000,
},
BlobChunkInfoV1Ondisk {
uncomp_info: 0x01ff_f000_0010_2000,
comp_info: 0x00ff_f000_0000_5000,
},
])),
_filemap: FileMapState::default(),
is_stargz: false,
};
let info = BlobMetaInfo {
state: Arc::new(state),
};

let vec = info.get_chunks_uncompressed(0x0, 0x1001, 0).unwrap();
assert_eq!(vec.len(), 1);
assert_eq!(vec[0].blob_index(), 1);
assert_eq!(vec[0].id(), 0);
assert_eq!(vec[0].compressed_offset(), 0);
assert_eq!(vec[0].compressed_size(), 0x1000);
assert_eq!(vec[0].uncompressed_offset(), 0);
assert_eq!(vec[0].uncompressed_size(), 0x1001);
assert!(vec[0].is_compressed());

let vec = info.get_chunks_uncompressed(0x0, 0x4000, 0).unwrap();
assert_eq!(vec.len(), 2);
assert_eq!(vec[1].blob_index(), 1);
assert_eq!(vec[1].id(), 1);
assert_eq!(vec[1].compressed_offset(), 0x1000);
assert_eq!(vec[1].compressed_size(), 0x2000);
assert_eq!(vec[1].uncompressed_offset(), 0x2000);
assert_eq!(vec[1].uncompressed_size(), 0x2000);
assert!(!vec[1].is_compressed());

let vec = info.get_chunks_uncompressed(0x0, 0x4001, 0).unwrap();
assert_eq!(vec.len(), 3);

let vec = info.get_chunks_uncompressed(0x100000, 0x2000, 0).unwrap();
assert_eq!(vec.len(), 1);

assert!(info.get_chunks_uncompressed(0x0, 0x6001, 0).is_err());
assert!(info.get_chunks_uncompressed(0x0, 0xfffff, 0).is_err());
assert!(info.get_chunks_uncompressed(0x0, 0x100000, 0).is_err());
assert!(info.get_chunks_uncompressed(0x0, 0x104000, 0).is_err());
assert!(info.get_chunks_uncompressed(0x0, 0x104001, 0).is_err());
assert!(info.get_chunks_uncompressed(0x100000, 0x2001, 0).is_err());
assert!(info.get_chunks_uncompressed(0x100000, 0x4000, 0).is_err());
assert!(info.get_chunks_uncompressed(0x100000, 0x4001, 0).is_err());
assert!(info
.get_chunks_uncompressed(0x102000, 0xffff_ffff_ffff_ffff, 0)
.is_err());
assert!(info.get_chunks_uncompressed(0x104000, 0x1, 0).is_err());
}
}
Loading

0 comments on commit 36a2ed4

Please sign in to comment.