From c6d2065c0caed46ce26cf5050d05c592d82d78ea Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Fri, 17 Mar 2023 23:44:41 +0800 Subject: [PATCH 1/2] utils: introduce mechanism to generate Merkle tree for verity Introduce mechanism to generate Merkle tree for verity. Signed-off-by: Jiang Liu --- utils/src/filemap.rs | 22 +++ utils/src/lib.rs | 8 + utils/src/verity.rs | 434 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 464 insertions(+) create mode 100644 utils/src/verity.rs diff --git a/utils/src/filemap.rs b/utils/src/filemap.rs index 78d9fbd6b94..0d92ae0ea5a 100644 --- a/utils/src/filemap.rs +++ b/utils/src/filemap.rs @@ -149,6 +149,28 @@ impl FileMapState { Ok(unsafe { std::slice::from_raw_parts(start as *const T, count) }) } + /// Get a mutable slice of 'T' at 'offset' with 'count' entries. + pub fn get_slice_mut(&mut self, offset: usize, count: usize) -> Result<&mut [T]> { + let start = self.base.wrapping_add(offset); + if count.checked_mul(size_of::()).is_none() { + bail_einval!("count 0x{count:x} to validate_slice() is too big"); + } + let size = count * size_of::(); + if size.checked_add(start as usize).is_none() { + bail_einval!( + "invalid parameter to validate_slice(), offset 0x{offset:x}, count 0x{count:x}" + ); + } + let end = start.wrapping_add(size); + if start > end || start < self.base || end < self.base || end > self.end { + bail_einval!( + "invalid range in validate_slice, base 0x{:p}, start 0x{start:p}, end 0x{end:p}", + self.base + ); + } + Ok(unsafe { std::slice::from_raw_parts_mut(start as *mut T, count) }) + } + /// Check whether the range [offset, offset + size) is valid and return the start address. pub fn validate_range(&self, offset: usize, size: usize) -> Result<*const u8> { let start = self.base.wrapping_add(offset); diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 9294cf76951..a8c124fec44 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -33,6 +33,7 @@ pub mod mpmc; pub mod reader; pub mod trace; pub mod types; +pub mod verity; /// Round up and divide the value `n` by `d`. pub fn div_round_up(n: u64, d: u64) -> u64 { @@ -48,6 +49,13 @@ pub fn round_up(n: u64, d: u64) -> u64 { (n + d - 1) / d * d } +/// Round up the value `n` to by `d`. +pub fn round_up_usize(n: usize, d: usize) -> usize { + debug_assert!(d != 0); + debug_assert!(d.is_power_of_two()); + (n + d - 1) / d * d +} + /// Overflow can fail this rounder if the base value is large enough with 4095 added. pub fn try_round_up_4k, T: Into>(x: T) -> Option { let t = 4095u64; diff --git a/utils/src/verity.rs b/utils/src/verity.rs new file mode 100644 index 00000000000..e7c877c463c --- /dev/null +++ b/utils/src/verity.rs @@ -0,0 +1,434 @@ +// Copyright (C) 2020-2021 Alibaba Cloud. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 + +//! Utilities to generate Merkle trees for data integrity verification. + +use std::fs::File; +use std::io::Result; +use std::mem::size_of; +use std::sync::Mutex; + +use crate::digest::{Algorithm, DigestData, RafsDigest}; +use crate::div_round_up; +use crate::filemap::FileMapState; + +const NON_EXIST_ENTRY_DIGEST: RafsDigest = RafsDigest { + data: [ + 173, 127, 172, 178, 88, 111, 198, 233, 102, 192, 4, 215, 209, 209, 107, 2, 79, 88, 5, 255, + 124, 180, 124, 122, 133, 218, 189, 139, 72, 137, 44, 167, + ], +}; + +/// Struct to maintain and compute Merkle Tree topology and layout. +pub struct MerkleTree { + digest_algo: Algorithm, + digest_per_page: u32, + digest_size: usize, + data_pages: u32, + page_size: u32, + max_levels: u32, +} + +impl MerkleTree { + /// Create a new instance of `MerkleTree`. + pub fn new(page_size: u32, data_pages: u32, digest_algo: Algorithm) -> Self { + assert_eq!(page_size, 4096); + assert_eq!(digest_algo, Algorithm::Sha256); + let digest_size = 32; + let digest_shift = u32::trailing_zeros(page_size / digest_size); + let digest_per_page = 1u32 << digest_shift; + + let mut max_levels = 0; + let mut tmp_pages = data_pages as u64; + while tmp_pages > 1 { + tmp_pages = div_round_up(tmp_pages, digest_per_page as u64); + max_levels += 1; + } + + MerkleTree { + digest_algo, + digest_per_page: 1 << digest_shift, + digest_size: digest_size as usize, + page_size, + data_pages, + max_levels, + } + } + + /// Get digest algorithm used to generate the Merkle tree. + pub fn digest_algorithm(&self) -> Algorithm { + self.digest_algo + } + + /// Get height of the Merkle tree, 0 means there is only a root digest for one data page. + pub fn max_levels(&self) -> u32 { + self.max_levels + } + + /// Get number of pages to store digest at specified Merkle tree level. + pub fn level_pages(&self, mut level: u32) -> u32 { + if level > self.max_levels { + 0 + } else { + let mut pages = self.data_pages as u64; + while level > 0 && pages > 0 { + pages = div_round_up(pages, self.digest_per_page as u64); + level -= 1; + } + pages as u32 + } + } + + /// Get number of digest entries at specified Merkle tree level. + pub fn level_entries(&self, level: u32) -> u32 { + if self.data_pages == 0 || level > self.max_levels { + 0 + } else { + self.level_index(level, self.data_pages - 1) + 1 + } + } + + /// Get entry index at the specified level covering the data page with index `page_index`. + pub fn level_index(&self, mut level: u32, mut page_index: u32) -> u32 { + if level <= 1 { + page_index + } else { + level -= 1; + while level > 0 { + page_index /= self.digest_per_page; + level -= 1; + } + page_index + } + } + + /// Get base position of digest array for the specified Merkle tree level. + pub fn level_base(&self, level: u32) -> u64 { + if level >= self.max_levels { + 0 + } else { + let mut offset = 0; + let mut curr = self.max_levels; + while curr > level { + let pages = self.level_pages(curr); + offset += pages as u64 * self.page_size as u64; + curr -= 1; + } + offset + } + } + + /// Get total pages needed to store the Merkle Tree. + pub fn total_pages(&self) -> u32 { + let mut pages = 0; + for idx in 1..=self.max_levels { + pages += self.level_pages(idx); + } + pages + } +} + +/// Merkle tree generator for data integrity verification. +pub struct VerityGenerator { + mkl_tree: MerkleTree, + file_map: Mutex, + root_digest: RafsDigest, +} + +impl VerityGenerator { + /// Create a new instance [VerityGenerator]. + pub fn new(file: File, offset: u64, data_pages: u32) -> Result { + let mkl_tree = MerkleTree::new(4096, data_pages, Algorithm::Sha256); + let total_size = mkl_tree.total_pages() as usize * 4096; + let file_map = if data_pages > 1 { + if offset.checked_add(total_size as u64).is_none() { + return Err(einval!(format!( + "verity data offset 0x{:x} and size 0x{:x} is too big", + offset, total_size + ))); + } + + let md = file.metadata()?; + if md.len() < total_size as u64 + offset { + file.set_len(total_size as u64 + offset)?; + } + FileMapState::new(file, offset as libc::off_t, total_size, true)? + } else { + FileMapState::default() + }; + + Ok(VerityGenerator { + mkl_tree, + file_map: Mutex::new(file_map), + root_digest: NON_EXIST_ENTRY_DIGEST, + }) + } + + /// Initialize all digest values. + pub fn initialize(&mut self) -> Result<()> { + let total_size = self.mkl_tree.total_pages() as usize * 4096; + let mut offset = 0; + let mut map = self.file_map.lock().unwrap(); + + while offset < total_size { + let digest = map.get_mut::(offset)?; + digest.copy_from_slice(&NON_EXIST_ENTRY_DIGEST.data); + offset += size_of::(); + } + + Ok(()) + } + + /// Set digest value for Merkle entry at `level` with `index`. + /// + /// Digests for data pages must be set by calling this method. It can also be used to set + /// digest values for intermediate digest pages. + pub fn set_digest(&mut self, level: u32, index: u32, digest: &[u8]) -> Result<()> { + let digest_size = self.mkl_tree.digest_size; + if digest.len() != digest_size { + return Err(einval!(format!( + "size of digest data is not {}", + digest_size + ))); + } + + // Handle special case of zero-level Merkle tree. + if self.mkl_tree.data_pages == 1 && level == 1 && index == 0 { + self.root_digest.data.copy_from_slice(digest); + return Ok(()); + } + + if level > self.mkl_tree.max_levels() || level == 0 { + return Err(einval!(format!( + "level {} is out of range, max {}", + level, + self.mkl_tree.max_levels() + ))); + } else if index >= self.mkl_tree.level_entries(level) { + return Err(einval!(format!( + "index {} is out of range, max {}", + index, + self.mkl_tree.level_entries(level) - 1 + ))); + } + + let base = self.mkl_tree.level_base(level) as usize; + let offset = base + index as usize * digest_size; + let mut guard = self.file_map.lock().unwrap(); + let buf = guard.get_mut::(offset)?; + buf.copy_from_slice(digest); + + Ok(()) + } + + /// Generate digest values from lower level digest pages. + pub fn generate_level_digests(&mut self, level: u32) -> Result<()> { + assert!(level > 1 && level <= self.mkl_tree.max_levels); + let page_size = self.mkl_tree.page_size as usize; + let count = self.mkl_tree.level_entries(level) as usize; + let mut digest_base = self.mkl_tree.level_base(level) as usize; + let mut data_base = self.mkl_tree.level_base(level - 1) as usize; + let mut guard = self.file_map.lock().unwrap(); + + for _ in 0..count { + let data = guard.get_slice::(data_base, page_size)?; + let digest = RafsDigest::from_buf(data, self.mkl_tree.digest_algo); + let buf = guard.get_mut::(digest_base)?; + buf.copy_from_slice(digest.as_ref()); + data_base += page_size; + digest_base += self.mkl_tree.digest_size; + } + + Ok(()) + } + + /// Generate Merkle root digest. + /// + /// The returned Merkle tree root digest will be: + /// - `NON_EXIST_ENTRY_DIGEST` if there's no data page + /// - digest of the data page if there's only one data page + /// - digest of the intermediate digest page if there's more than one data pages + pub fn generate_root_digest(&mut self) -> Result { + if self.mkl_tree.max_levels == 0 { + Ok(self.root_digest) + } else { + let guard = self.file_map.lock().unwrap(); + let data = guard.get_slice::(0, self.mkl_tree.page_size as usize)?; + Ok(RafsDigest::from_buf(data, self.mkl_tree.digest_algo)) + } + } + + /// Generate all intermediate and root digests for the Merkle tree. + /// + /// Digests for data pages at level 1 must be set up by calling [set_digest()] before this + /// function to generate intermediate and root digests. + pub fn generate_all_digests(&mut self) -> Result { + for level in 2..=self.mkl_tree.max_levels { + self.generate_level_digests(level)?; + } + self.generate_root_digest() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use vmm_sys_util::tempfile::TempFile; + + #[test] + fn test_max_levels() { + let mkl = MerkleTree::new(4096, 1, Algorithm::Sha256); + assert_eq!(mkl.max_levels(), 0); + assert_eq!(mkl.level_pages(0), 1); + assert_eq!(mkl.level_pages(1), 0); + assert_eq!(mkl.level_base(0), 0); + assert_eq!(mkl.level_base(1), 0); + assert_eq!(mkl.level_entries(0), 1); + assert_eq!(mkl.level_entries(1), 0); + assert_eq!(mkl.total_pages(), 0); + + let mkl = MerkleTree::new(4096, 2, Algorithm::Sha256); + assert_eq!(mkl.max_levels(), 1); + assert_eq!(mkl.level_pages(0), 2); + assert_eq!(mkl.level_pages(1), 1); + assert_eq!(mkl.level_pages(2), 0); + assert_eq!(mkl.level_entries(0), 2); + assert_eq!(mkl.level_entries(1), 2); + assert_eq!(mkl.level_entries(2), 0); + assert_eq!(mkl.level_base(0), 4096); + assert_eq!(mkl.level_base(1), 0); + assert_eq!(mkl.level_base(2), 0); + assert_eq!(mkl.total_pages(), 1); + + let mkl = MerkleTree::new(4096, 128, Algorithm::Sha256); + assert_eq!(mkl.max_levels(), 1); + assert_eq!(mkl.level_pages(0), 128); + assert_eq!(mkl.level_pages(1), 1); + assert_eq!(mkl.level_pages(2), 0); + assert_eq!(mkl.level_entries(0), 128); + assert_eq!(mkl.level_entries(1), 128); + assert_eq!(mkl.level_entries(2), 0); + assert_eq!(mkl.level_base(0), 4096); + assert_eq!(mkl.level_base(1), 0); + assert_eq!(mkl.level_base(2), 0); + assert_eq!(mkl.total_pages(), 1); + + let mkl = MerkleTree::new(4096, 129, Algorithm::Sha256); + assert_eq!(mkl.max_levels(), 2); + assert_eq!(mkl.level_pages(0), 129); + assert_eq!(mkl.level_pages(1), 2); + assert_eq!(mkl.level_pages(2), 1); + assert_eq!(mkl.level_pages(3), 0); + assert_eq!(mkl.level_entries(0), 129); + assert_eq!(mkl.level_entries(1), 129); + assert_eq!(mkl.level_entries(2), 2); + assert_eq!(mkl.level_entries(3), 0); + assert_eq!(mkl.level_base(0), 4096 * 3); + assert_eq!(mkl.level_base(1), 4096); + assert_eq!(mkl.level_base(2), 0); + assert_eq!(mkl.level_base(3), 0); + assert_eq!(mkl.total_pages(), 3); + + let mkl = MerkleTree::new(4096, 128 * 128, Algorithm::Sha256); + assert_eq!(mkl.max_levels(), 2); + assert_eq!(mkl.level_pages(0), 128 * 128); + assert_eq!(mkl.level_pages(1), 128); + assert_eq!(mkl.level_pages(2), 1); + assert_eq!(mkl.level_pages(3), 0); + assert_eq!(mkl.level_base(0), 4096 * 129); + assert_eq!(mkl.level_base(1), 4096); + assert_eq!(mkl.level_base(2), 0); + assert_eq!(mkl.level_base(3), 0); + assert_eq!(mkl.total_pages(), 129); + + let mkl = MerkleTree::new(4096, 128 * 128 + 1, Algorithm::Sha256); + assert_eq!(mkl.max_levels(), 3); + assert_eq!(mkl.level_pages(0), 128 * 128 + 1); + assert_eq!(mkl.level_pages(1), 129); + assert_eq!(mkl.level_pages(2), 2); + assert_eq!(mkl.level_pages(3), 1); + assert_eq!(mkl.level_pages(4), 0); + assert_eq!(mkl.level_entries(0), 128 * 128 + 1); + assert_eq!(mkl.level_entries(1), 128 * 128 + 1); + assert_eq!(mkl.level_entries(2), 129); + assert_eq!(mkl.level_entries(3), 2); + assert_eq!(mkl.level_entries(4), 0); + assert_eq!(mkl.level_base(0), 4096 * 132); + assert_eq!(mkl.level_base(1), 4096 * 3); + assert_eq!(mkl.level_base(2), 4096); + assert_eq!(mkl.level_base(3), 0); + assert_eq!(mkl.level_base(4), 0); + assert_eq!(mkl.total_pages(), 132); + + let mkl = MerkleTree::new(4096, u32::MAX, Algorithm::Sha256); + assert_eq!(mkl.max_levels(), 5); + } + + #[test] + fn test_generate_mkl_tree_zero_entry() { + let digest = RafsDigest::from_buf(&[0u8; 4096], Algorithm::Sha256); + assert_eq!(digest, NON_EXIST_ENTRY_DIGEST); + + let file = TempFile::new().unwrap(); + let mut generator = VerityGenerator::new(file.into_file(), 0, 0).unwrap(); + + assert!(generator + .set_digest(0, 0, &NON_EXIST_ENTRY_DIGEST.data) + .is_err()); + assert!(generator + .set_digest(1, 0, &NON_EXIST_ENTRY_DIGEST.data) + .is_err()); + + let root_digest = generator.generate_all_digests().unwrap(); + assert_eq!(root_digest, NON_EXIST_ENTRY_DIGEST); + } + + #[test] + fn test_generate_mkl_tree_one_entry() { + let file = TempFile::new().unwrap(); + let mut generator = VerityGenerator::new(file.into_file(), 0, 1).unwrap(); + + let digest = RafsDigest::from_buf(&[1u8; 4096], Algorithm::Sha256); + assert!(generator.set_digest(0, 0, &digest.data).is_err()); + assert!(generator.set_digest(2, 0, &digest.data).is_err()); + assert!(generator.set_digest(1, 1, &digest.data).is_err()); + generator.set_digest(1, 0, &digest.data).unwrap(); + + let root_digest = generator.generate_all_digests().unwrap(); + assert_eq!(root_digest, digest); + } + + #[test] + fn test_generate_mkl_tree_two_entries() { + let file = TempFile::new().unwrap(); + let mut generator = VerityGenerator::new(file.into_file(), 0, 2).unwrap(); + + let digest = RafsDigest::from_buf(&[1u8; 4096], Algorithm::Sha256); + assert!(generator.set_digest(0, 0, &digest.data).is_err()); + assert!(generator.set_digest(2, 0, &digest.data).is_err()); + assert!(generator.set_digest(1, 2, &digest.data).is_err()); + generator.set_digest(1, 0, &digest.data).unwrap(); + generator.set_digest(1, 1, &digest.data).unwrap(); + + let root_digest = generator.generate_all_digests().unwrap(); + assert_ne!(root_digest, digest); + } + + #[test] + fn test_generate_mkl_tree_4097_entries() { + let file = TempFile::new().unwrap(); + let mut generator = VerityGenerator::new(file.into_file(), 0, 4097).unwrap(); + + let digest = RafsDigest::from_buf(&[1u8; 4096], Algorithm::Sha256); + assert!(generator.set_digest(0, 0, &digest.data).is_err()); + generator.set_digest(2, 0, &digest.data).unwrap(); + for idx in 0..4097 { + generator.set_digest(1, idx, &digest.data).unwrap(); + } + + let root_digest = generator.generate_all_digests().unwrap(); + assert_ne!(root_digest, digest); + assert_eq!(generator.mkl_tree.max_levels, 2); + } +} From 01e59a61490a5d378f8a195041ef3bf83e61269d Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Mon, 27 Mar 2023 11:46:11 +0800 Subject: [PATCH 2/2] nydus-image: generate dm-verity data for block device Add `--verity` option to `nydus-image export --block` to generate dm-verity data for block devices. ``` [root@iZ0jl3vazmhc81dur3xnm3Z image-service]# tar -cvf src.tar src [root@iZ0jl3vazmhc81dur3xnm3Z image-service]# sha256sum src.tar 0e2dbe8b6e0f55f42c75034ed9dfc582ad0a94098cfc248c968522e7ef02e00a src.tar [root@iZ0jl3vazmhc81dur3xnm3Z image-service]# cp src.tar images/0e2dbe8b6e0f55f42c75034ed9dfc582ad0a94098cfc248c968522e7ef02e00a [root@iZ0jl3vazmhc81dur3xnm3Z image-service]# target/debug/nydus-image create -t tar-tarfs -D images/ images/0e2dbe8b6e0f55f42c75034ed9dfc582ad0a94098cfc248c968522e7ef02e00a [2023-03-27 16:32:00.068730 +08:00] INFO successfully built RAFS filesystem: meta blob path: images/90f0e6e7e0ff822d4acddf30c36ac77fe06f549fe58f89a818fa824b19f70d47 data blob size: 0x3c000 data blobs: ["0e2dbe8b6e0f55f42c75034ed9dfc582ad0a94098cfc248c968522e7ef02e00a"] [root@iZ0jl3vazmhc81dur3xnm3Z image-service]# target/debug/nydus-image export --block --verity -D images/ -B images/90f0e6e7e0ff822d4acddf30c36ac77fe06f549fe58f89a818fa824b19f70d47 [2023-03-27 23:49:14.450762 +08:00] INFO RAFS features: COMPRESSION_NONE | HASH_SHA256 | EXPLICIT_UID_GID | TARTFS_MODE dm-verity options: --no-superblock --format=1 -s "" --hash=sha256 --data-block-size=4096 --hash-block-size=4096 --data-blocks 572 --hash-offset 2342912 ab7b417fc284c3b58a72044a996ec55e2c68a8b9dcf10bc469f4e640e5d98e6a losetup -r /dev/loop1 images/90f0e6e7e0ff822d4acddf30c36ac77fe06f549fe58f89a818fa824b19f70d47.disk [root@iZ0jl3vazmhc81dur3xnm3Z image-service]# veritysetup open -v --no-superblock --format=1 -s "" --hash=sha256 --data-block-size=4096 --hash-block-size=4096 --data-blocks 572 --hash-offset 2342912 /dev/loop1 verity /dev/loop1 ab7b417fc284c3b58a72044a996ec55e2c68a8b9dcf10bc469f4e640e5d98e6a [root@iZ0jl3vazmhc81dur3xnm3Z image-service]# veritysetup status verity /dev/mapper/verity is active. type: VERITY status: verified hash type: 1 data block: 4096 hash block: 4096 hash name: sha256 salt: - data device: /dev/loop1 data loop: /root/image-service/images/90f0e6e7e0ff822d4acddf30c36ac77fe06f549fe58f89a818fa824b19f70d47.disk size: 4576 sectors mode: readonly hash device: /dev/loop1 hash loop: /root/image-service/images/90f0e6e7e0ff822d4acddf30c36ac77fe06f549fe58f89a818fa824b19f70d47.disk hash offset: 4576 sectors root hash: ab7b417fc284c3b58a72044a996ec55e2c68a8b9dcf10bc469f4e640e5d98e6a ``` Signed-off-by: Jiang Liu --- service/src/block_device.rs | 75 +++++++++++++++++++++++++++++++++---- src/bin/nydus-image/main.rs | 11 +++++- 2 files changed, 78 insertions(+), 8 deletions(-) diff --git a/service/src/block_device.rs b/service/src/block_device.rs index d3f921a3753..5a8c7daad01 100644 --- a/service/src/block_device.rs +++ b/service/src/block_device.rs @@ -15,7 +15,7 @@ use std::cmp::{max, min}; use std::fs::OpenOptions; use std::io::Result; use std::path::PathBuf; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use std::thread; use std::thread::JoinHandle; @@ -25,6 +25,9 @@ use nydus_rafs::metadata::layout::v6::{ EROFS_BLOCK_BITS_12, EROFS_BLOCK_BITS_9, EROFS_BLOCK_SIZE_4096, EROFS_BLOCK_SIZE_512, }; use nydus_storage::utils::alloc_buf; +use nydus_utils::digest::{self, RafsDigest}; +use nydus_utils::round_up; +use nydus_utils::verity::VerityGenerator; use tokio_uring::buf::IoBufMut; use crate::blob_cache::{generate_blob_key, BlobCacheMgr, BlobConfig, DataBlob, MetaBlob}; @@ -287,6 +290,7 @@ impl BlockDevice { output: Option, localfs_dir: Option, threads: u32, + verity: bool, ) -> Result<()> { let cache_mgr = Arc::new(BlobCacheMgr::new()); cache_mgr.add_blob_entry(&blob_entry).map_err(|e| { @@ -303,6 +307,7 @@ impl BlockDevice { )) })?; let block_device = Arc::new(block_device); + let blocks = block_device.blocks(); let path = match output { Some(v) => PathBuf::from(v), @@ -353,7 +358,27 @@ impl BlockDevice { })?; let output_file = Arc::new(tokio_uring::fs::File::from_std(output_file)); - let blocks = block_device.blocks(); + let mut verity_offset = 0; + let generator = if verity { + let file = OpenOptions::new() + .read(true) + .write(true) + .open(&path) + .map_err(|e| { + eother!(format!( + "block_device: failed to create output file {}, {}", + path.display(), + e + )) + })?; + verity_offset = round_up(block_device.blocks_to_size(blocks), 4096); + let mut generator = VerityGenerator::new(file, verity_offset, blocks)?; + generator.initialize()?; + Some(Arc::new(Mutex::new(generator))) + } else { + None + }; + let batch_size = BLOCK_DEVICE_EXPORT_BATCH_SIZE as u32 / block_device.block_size() as u32; assert_eq!(batch_size.count_ones(), 1); let threads = max(threads, 1); @@ -363,8 +388,10 @@ impl BlockDevice { } if threads == 1 { + let generator = generator.clone(); + let block_device = block_device.clone(); tokio_uring::start(async move { - Self::do_export(block_device.clone(), output_file, 0, block_device.blocks()).await + Self::do_export(block_device, output_file, 0, blocks, generator).await })?; } else { let mut thread_handlers: Vec>> = @@ -377,6 +404,7 @@ impl BlockDevice { let mgr = cache_mgr.clone(); let id = blob_id.clone(); let path = path.to_path_buf(); + let generator = generator.clone(); let handler = thread::spawn(move || { let output_file = OpenOptions::new() @@ -399,9 +427,9 @@ impl BlockDevice { })?; let device = Arc::new(block_device); - tokio_uring::start( - async move { Self::do_export(device, file, pos, count).await }, - )?; + tokio_uring::start(async move { + Self::do_export(device, file, pos, count, generator).await + })?; Ok(()) }); pos += count; @@ -424,6 +452,21 @@ impl BlockDevice { })?; } } + + if let Some(generator) = generator.as_ref() { + let mut guard = generator.lock().unwrap(); + let root_digest = guard.generate_all_digests()?; + let root_digest: String = root_digest + .data + .iter() + .map(|v| format!("{:02x}", v)) + .collect(); + println!( + "dm-verity options: --no-superblock --format=1 -s \"\" --hash=sha256 --data-block-size={} --hash-block-size=4096 --data-blocks {} --hash-offset {} {}", + block_device.block_size(), blocks, verity_offset, root_digest + ); + } + Ok(()) } @@ -432,8 +475,10 @@ impl BlockDevice { output_file: Arc, start: u32, mut blocks: u32, + generator: Option>>, ) -> Result<()> { let batch_size = BLOCK_DEVICE_EXPORT_BATCH_SIZE as u32 / block_device.block_size() as u32; + let block_size = block_device.block_size() as usize; let mut pos = start; let mut buf = alloc_buf(BLOCK_DEVICE_EXPORT_BATCH_SIZE); @@ -441,7 +486,7 @@ impl BlockDevice { let count = min(batch_size, blocks); let (res, buf1) = block_device.async_read(pos, count, buf).await; let sz = res?; - if sz != count as usize * block_device.block_size() as usize { + if sz != count as usize * block_size { return Err(eio!( "block_device: failed to read data, got less data than requested" )); @@ -462,6 +507,22 @@ impl BlockDevice { } buf = buf2; + // Generate Merkle tree leaf nodes. + if let Some(generator) = generator.as_ref() { + let mut page_idx = (block_device.blocks_to_size(pos) / block_size as u64) as u32; + let mut offset = 0; + while offset < buf.len() { + let digest = RafsDigest::from_buf( + &buf[offset..offset + block_size], + digest::Algorithm::Sha256, + ); + let mut guard = generator.lock().unwrap(); + guard.set_digest(1, page_idx, &digest.data)?; + offset += block_size; + page_idx += 1; + } + } + pos += count; blocks -= count; } diff --git a/src/bin/nydus-image/main.rs b/src/bin/nydus-image/main.rs index d19c1d86900..0e970008d51 100644 --- a/src/bin/nydus-image/main.rs +++ b/src/bin/nydus-image/main.rs @@ -479,6 +479,14 @@ fn prepare_cmd_args(bti_string: &'static str) -> App { .help("File path for saving the exported content") .required_unless_present("localfs-dir") ) + .arg( + Arg::new("verity") + .long("verity") + .help("Generate dm-verity data for block device") + .action(ArgAction::SetTrue) + .required(false) + .requires("block") + ) ); let app = app.subcommand( @@ -1558,8 +1566,9 @@ impl Command { .map(|n| n.parse().unwrap_or(1)) .unwrap_or(1); let output = subargs.value_of("output").map(|v| v.to_string()); + let verity = subargs.is_present("verity"); - BlockDevice::export(entry, output, localfs_dir, threads) + BlockDevice::export(entry, output, localfs_dir, threads, verity) .context("failed to export RAFS filesystem as raw block device image") } }