Skip to content

Commit

Permalink
Merge pull request #1125 from dragonflyoss/dev/v2.3
Browse files Browse the repository at this point in the history
Prepare for exposing nydus images as block devices
  • Loading branch information
imeoer authored Mar 3, 2023
2 parents bca1b8a + 02d1df3 commit 04e4349
Show file tree
Hide file tree
Showing 2 changed files with 128 additions and 112 deletions.
38 changes: 22 additions & 16 deletions api/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,18 +130,14 @@ impl ConfigV2 {
/// Get cache working directory.
pub fn get_cache_working_directory(&self) -> Result<String> {
let cache = self.get_cache_config()?;
match cache.cache_type.as_str() {
"blobcache" | "filecache" => {
if let Some(c) = cache.file_cache.as_ref() {
return Ok(c.work_dir.clone());
}
if cache.is_filecache() {
if let Some(c) = cache.file_cache.as_ref() {
return Ok(c.work_dir.clone());
}
"fscache" => {
if let Some(c) = cache.fs_cache.as_ref() {
return Ok(c.work_dir.clone());
}
} else if cache.is_fscache() {
if let Some(c) = cache.fs_cache.as_ref() {
return Ok(c.work_dir.clone());
}
_ => {}
}

Err(Error::new(
Expand Down Expand Up @@ -634,25 +630,35 @@ impl CacheConfigV2 {
true
}

/// Check whether the cache type is `filecache`
pub fn is_filecache(&self) -> bool {
self.cache_type == "blobcache" || self.cache_type == "filecache"
}

/// Check whether the cache type is `fscache`
pub fn is_fscache(&self) -> bool {
self.cache_type == "fscache"
}

/// Get configuration information for file cache.
pub fn get_filecache_config(&self) -> Result<&FileCacheConfig> {
if self.cache_type != "blobcache" && self.cache_type != "filecache" {
Err(einval!("cache type is not 'filecache'"))
} else {
if self.is_filecache() {
self.file_cache
.as_ref()
.ok_or_else(|| einval!("no configuration information for filecache"))
} else {
Err(einval!("cache type is not 'filecache'"))
}
}

/// Get configuration information for fscache.
pub fn get_fscache_config(&self) -> Result<&FsCacheConfig> {
if self.cache_type != "fscache" {
Err(einval!("cache type is not 'fscache'"))
} else {
if self.is_fscache() {
self.fs_cache
.as_ref()
.ok_or_else(|| einval!("no configuration information for fscache"))
} else {
Err(einval!("cache type is not 'fscache'"))
}
}
}
Expand Down
202 changes: 106 additions & 96 deletions src/bin/nydus-image/core/bootstrap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -539,51 +539,15 @@ impl Bootstrap {
// | | |devslot | | | | |
// +---+---------+------------+-------------+----------------------------------------------+

// get devt_slotoff
let mut devtable: Vec<RafsV6Device> = Vec::new();
let blobs = blob_table.get_all();
let mut block_count = 0u32;
let mut inlined_chunk_digest = true;
for entry in blobs.iter() {
let mut devslot = RafsV6Device::new();
// blob id is String, which is processed by sha256.finalize().
if entry.blob_id().is_empty() {
bail!(" blob id is empty");
} else if entry.blob_id().len() > 64 {
bail!(format!(
"blob id length is bigger than 64 bytes, blob id {:?}",
entry.blob_id()
));
} else if entry.uncompressed_size() / EROFS_BLOCK_SIZE > u32::MAX as u64 {
bail!(format!(
"uncompressed blob size (0x:{:x}) is too big",
entry.uncompressed_size()
));
}
if !entry.has_feature(BlobFeatures::INLINED_CHUNK_DIGEST) {
inlined_chunk_digest = false;
}
let cnt = (entry.uncompressed_size() / EROFS_BLOCK_SIZE) as u32;
assert!(block_count.checked_add(cnt).is_some());
block_count += cnt;
let id = entry.blob_id();
let id = id.as_bytes();
let mut blob_id = [0u8; 64];
blob_id[..id.len()].copy_from_slice(id);
devslot.set_blob_id(&blob_id);
devslot.set_blocks(cnt);
devslot.set_mapped_blkaddr(0);
devtable.push(devslot);
}

let devtable_len = devtable.len() * size_of::<RafsV6Device>();
let devtable_len = blobs.len() * size_of::<RafsV6Device>();
let blob_table_size = blob_table.size() as u64;
let blob_table_offset = align_offset(
(EROFS_DEVTABLE_OFFSET as u64) + devtable_len as u64,
EROFS_BLOCK_SIZE as u64,
);
let blob_table_entries = blobs.len();
assert!(blob_table_entries < u16::MAX as usize);
assert!(blob_table_entries < u8::MAX as usize);
trace!(
"devtable len {} blob table offset {} blob table size {}",
devtable_len,
Expand Down Expand Up @@ -620,61 +584,27 @@ impl Bootstrap {
orig_meta_addr
};

// get devt_slotoff
let root_nid = calculate_nid(
bootstrap_ctx.nodes[0].v6_offset + (meta_addr - orig_meta_addr),
meta_addr,
);

// Dump superblock
let mut sb = RafsV6SuperBlock::new();
sb.set_inos(bootstrap_ctx.nodes.len() as u64);
sb.set_blocks(block_count);
sb.set_root_nid(root_nid as u16);
sb.set_meta_addr(meta_addr);
sb.set_extra_devices(blob_table_entries as u16);
sb.store(bootstrap_ctx.writer.as_mut())
.context("failed to store SB")?;

// Dump extended superblock
// Prepare extended super block
let ext_sb_offset = bootstrap_ctx.writer.seek_current(0)?;
let mut ext_sb = RafsV6SuperBlockExt::new();
ext_sb.set_compressor(ctx.compressor);
ext_sb.set_digester(ctx.digester);
ext_sb.set_chunk_size(ctx.chunk_size);
ext_sb.set_blob_table_offset(blob_table_offset);
ext_sb.set_blob_table_size(blob_table_size as u32);
// we need to write extended_sb until chunk table is dumped.
if ctx.explicit_uidgid {
ext_sb.set_explicit_uidgid();
}
if inlined_chunk_digest {
ext_sb.set_inlined_chunk_digest();
}

// dump devtslot
bootstrap_ctx
.writer
.seek_offset(EROFS_DEVTABLE_OFFSET as u64)
.context("failed to seek devtslot")?;
for slot in devtable.iter() {
slot.store(bootstrap_ctx.writer.as_mut())
.context("failed to store device slot")?;
}

// Dump blob table
bootstrap_ctx
.writer
.seek_offset(blob_table_offset as u64)
.context("failed seek for extended blob table offset")?;
blob_table
.store(bootstrap_ctx.writer.as_mut())
.context("failed to store extended blob table")?;

// collect all chunks in this bootstrap.
// HashChunkDict cannot be used here, because there will be duplicate chunks between layers,
// but there is no deduplication during the actual construction.
// Each layer uses the corresponding chunk in the blob of its own layer.
// If HashChunkDict is used here, it will cause duplication. The chunks are removed, resulting in incomplete chunk info.
// If HashChunkDict is used here, it will cause duplication. The chunks are removed,
// resulting in incomplete chunk info.
let mut chunk_cache = BTreeMap::new();

// Dump bootstrap
Expand All @@ -696,12 +626,14 @@ impl Bootstrap {
"dump_bootstrap",
Result<()>
)?;
Self::rafsv6_align_to_block(bootstrap_ctx)?;

// `Node` offset might be updated during above inodes dumping. So `get_prefetch_table` after it.
let prefetch_table = ctx
.prefetch
.get_rafsv6_prefetch_table(&bootstrap_ctx.nodes, meta_addr);
if let Some(mut pt) = prefetch_table {
assert!(pt.len() * size_of::<u32>() <= prefetch_table_size as usize);
// Device slots are very close to extended super block.
ext_sb.set_prefetch_table_offset(prefetch_table_offset);
ext_sb.set_prefetch_table_size(prefetch_table_size);
Expand All @@ -712,18 +644,12 @@ impl Bootstrap {
pt.store(bootstrap_ctx.writer.as_mut()).unwrap();
}

// append chunk info table.
// align chunk table to EROFS_BLOCK_SIZE firstly.
let pos = bootstrap_ctx
// TODO: get rid of the chunk info array.
// Dump chunk info array.
let chunk_table_offset = bootstrap_ctx
.writer
.seek_to_end()
.context("failed to seek to bootstrap's end for chunk table")?;
let padding = align_offset(pos, EROFS_BLOCK_SIZE as u64) - pos;
bootstrap_ctx
.writer
.write_all(&WRITE_PADDING_DATA[0..padding as usize])
.context("failed to write 0 to padding of bootstrap's end for chunk table")?;
let chunk_table_offset = pos + padding;
let mut chunk_table_size: u64 = 0;
for (_, chunk) in chunk_cache.iter() {
let chunk_size = chunk
Expand All @@ -736,40 +662,124 @@ impl Bootstrap {
"chunk_table offset {} size {}",
chunk_table_offset, chunk_table_size
);
Self::rafsv6_align_to_block(bootstrap_ctx)?;

// Prepare device slots.
let pos = bootstrap_ctx
.writer
.seek_to_end()
.context("failed to seek to bootstrap's end for chunk table")?;
assert_eq!(pos % EROFS_BLOCK_SIZE, 0);
let mut mapped_blkaddr = Self::align_mapped_blkaddr((pos / EROFS_BLOCK_SIZE) as u32);
let mut devtable: Vec<RafsV6Device> = Vec::new();
let mut block_count = 0u32;
let mut inlined_chunk_digest = true;
for entry in blobs.iter() {
let mut devslot = RafsV6Device::new();
// blob id is String, which is processed by sha256.finalize().
if entry.blob_id().is_empty() {
bail!(" blob id is empty");
} else if entry.blob_id().len() > 64 {
bail!(format!(
"blob id length is bigger than 64 bytes, blob id {:?}",
entry.blob_id()
));
} else if entry.uncompressed_size() / EROFS_BLOCK_SIZE > u32::MAX as u64 {
bail!(format!(
"uncompressed blob size (0x:{:x}) is too big",
entry.uncompressed_size()
));
}
if !entry.has_feature(BlobFeatures::INLINED_CHUNK_DIGEST) {
inlined_chunk_digest = false;
}
let cnt = (entry.uncompressed_size() / EROFS_BLOCK_SIZE) as u32;
assert!(block_count.checked_add(cnt).is_some());
block_count += cnt;
let id = entry.blob_id();
let id = id.as_bytes();
let mut blob_id = [0u8; 64];
blob_id[..id.len()].copy_from_slice(id);
devslot.set_blob_id(&blob_id);
devslot.set_blocks(cnt);
devslot.set_mapped_blkaddr(mapped_blkaddr);
devtable.push(devslot);

mapped_blkaddr = Self::align_mapped_blkaddr(mapped_blkaddr + cnt);
}

// EROFS does not have inode table, so we lose the chance to decide if this
// image has xattr. So we have to rewrite extended super block.
// Dump super block
let mut sb = RafsV6SuperBlock::new();
sb.set_inos(bootstrap_ctx.nodes.len() as u64);
sb.set_blocks(block_count);
sb.set_root_nid(root_nid as u16);
sb.set_meta_addr(meta_addr);
sb.set_extra_devices(blob_table_entries as u16);
bootstrap_ctx.writer.seek(SeekFrom::Start(0))?;
sb.store(bootstrap_ctx.writer.as_mut())
.context("failed to store SB")?;

// Dump extended super block.
if ctx.explicit_uidgid {
ext_sb.set_explicit_uidgid();
}
if ctx.has_xattr {
ext_sb.set_has_xattr();
}
if inlined_chunk_digest {
ext_sb.set_inlined_chunk_digest();
}
bootstrap_ctx.writer.seek(SeekFrom::Start(ext_sb_offset))?;
ext_sb
.store(bootstrap_ctx.writer.as_mut())
.context("failed to store extended SB")?;
.context("failed to store extended super block")?;

// Flush remaining data in BufWriter to file
// Dump device slots.
bootstrap_ctx
.writer
.seek_offset(EROFS_DEVTABLE_OFFSET as u64)
.context("failed to seek devtslot")?;
for slot in devtable.iter() {
slot.store(bootstrap_ctx.writer.as_mut())
.context("failed to store device slot")?;
}

// Dump blob table
bootstrap_ctx
.writer
.seek_offset(blob_table_offset as u64)
.context("failed seek for extended blob table offset")?;
blob_table
.store(bootstrap_ctx.writer.as_mut())
.context("failed to store extended blob table")?;

Ok(())
}

fn rafsv6_align_to_block(bootstrap_ctx: &mut BootstrapContext) -> Result<()> {
bootstrap_ctx
.writer
.flush()
.context("failed to flush bootstrap")?;
let pos = bootstrap_ctx
.writer
.seek_to_end()
.context("failed to seek to bootstrap's end")?;
debug!(
"align bootstrap to 4k {}",
align_offset(pos, EROFS_BLOCK_SIZE as u64)
);
.context("failed to seek to bootstrap's end for chunk table")?;
let padding = align_offset(pos, EROFS_BLOCK_SIZE as u64) - pos;
bootstrap_ctx
.writer
.write_all(&WRITE_PADDING_DATA[0..padding as usize])
.context("failed to write 0 to padding of bootstrap's end")?;
.context("failed to write 0 to padding of bootstrap's end for chunk table")?;
bootstrap_ctx
.writer
.flush()
.context("failed to flush bootstrap")?;

Ok(())
}

fn align_mapped_blkaddr(addr: u32) -> u32 {
// TODO: define a const in nydus-service for 0x20_0000
let blocks = (0x20_0000u64 / EROFS_BLOCK_SIZE) as u32;
(addr + blocks - 1) / blocks * blocks
}
}

0 comments on commit 04e4349

Please sign in to comment.