diff --git a/blobfs/src/lib.rs b/blobfs/src/lib.rs index 9fe380ca17f..8b7d3db0fe5 100644 --- a/blobfs/src/lib.rs +++ b/blobfs/src/lib.rs @@ -27,8 +27,6 @@ use rafs::{ use serde::Deserialize; use std::any::Any; #[cfg(feature = "virtiofs")] -use std::convert::TryInto; -#[cfg(feature = "virtiofs")] use std::ffi::CStr; use std::ffi::CString; #[cfg(feature = "virtiofs")] @@ -165,8 +163,6 @@ impl BootstrapArgs { /// combination of mount namespaces and the pivot_root system call. pub struct BlobFs { pfs: PassthroughFs, - #[cfg(feature = "virtiofs")] - cfg: Config, #[allow(dead_code)] bootstrap_args: BootstrapArgs, } @@ -176,14 +172,10 @@ impl BlobFs { pub fn new(cfg: Config) -> io::Result { trace!("BlobFs config is: {:?}", cfg); - #[cfg(feature = "virtiofs")] - let cfg_bak = cfg.clone(); let bootstrap_args = Self::load_bootstrap(&cfg)?; let pfs = PassthroughFs::new(cfg.ps_config)?; Ok(BlobFs { pfs, - #[cfg(feature = "virtiofs")] - cfg: cfg_bak, bootstrap_args, }) } diff --git a/blobfs/src/sync_io.rs b/blobfs/src/sync_io.rs index 4956fcd8a7e..40146ee2a4e 100644 --- a/blobfs/src/sync_io.rs +++ b/blobfs/src/sync_io.rs @@ -24,7 +24,18 @@ use storage::device::BlobPrefetchRequest; impl BlobFs { #[cfg(feature = "virtiofs")] - fn get_blob_id_and_size(&self, inode: Inode) -> io::Result<(String, i64)> { + fn check_st_size(blob_id: &Path, size: i64) -> io::Result<()> { + if size < 0 { + return Err(einval!(format!( + "load_chunks_on_demand: blob_id {:?}, size: {:?} is less than 0", + blob_id, size + ))); + } + Ok(()) + } + + #[cfg(feature = "virtiofs")] + fn get_blob_id_and_size(&self, inode: Inode) -> io::Result<(String, u64)> { // locate blob file that the inode refers to let blob_id_full_path = self.pfs.readlinkat_proc_file(inode)?; let parent = blob_id_full_path @@ -37,12 +48,6 @@ impl BlobFs { blob_id_full_path ); - debug_assert!( - parent - == Path::new(self.cfg.ps_config.root_dir.as_str()) - .join(self.bootstrap_args.blob_cache_dir.as_str()) - ); - let blob_file = Self::open_file( libc::AT_FDCWD, &blob_id_full_path.as_path(), @@ -60,31 +65,32 @@ impl BlobFs { trace!("load_chunks_on_demand: blob_id {:?}", blob_id); - Ok((blob_id.to_os_string().into_string().unwrap(), st.st_size)) + Self::check_st_size(blob_id_full_path.as_path(), st.st_size)?; + + Ok(( + blob_id.to_os_string().into_string().unwrap(), + st.st_size as u64, + )) } #[cfg(feature = "virtiofs")] - fn load_chunks_on_demand(&self, inode: Inode, foffset: u64) -> io::Result<()> { + fn load_chunks_on_demand(&self, inode: Inode, offset: u64) -> io::Result<()> { // prepare BlobPrefetchRequest and call device.prefetch(). // Make sure prefetch doesn't use delay_persist as we need the // data immediately. let (blob_id, size) = self.get_blob_id_and_size(inode)?; - let offset: u32 = foffset.try_into().map_err(|_| { - einval!(format!( - "blobfs: load_chunks_on_demand: foffset {} is larger than u32::MAX", - foffset - )) - })?; - let len = (size - offset as i64).try_into().map_err(|_| { - einval!(format!( - "blobfs: load_chunks_on_demand: len {} is larger than u32::MAX", - (size - offset as i64) - )) - })?; + if size <= offset { + return Err(einval!(format!( + "load_chunks_on_demand: blob_id {:?}, offset {:?} is larger than size {:?}", + blob_id, offset, size + ))); + } + + let len = size - offset; let req = BlobPrefetchRequest { blob_id, offset, - len: min(len, 0x0020_0000_u32), // 2M range + len: min(len, 0x0020_0000_u64), // 2M range }; self.bootstrap_args.fetch_range_sync(&[req]).map_err(|e| { diff --git a/rafs/src/fs.rs b/rafs/src/fs.rs index cfeb27f9c46..a4242ecb23c 100644 --- a/rafs/src/fs.rs +++ b/rafs/src/fs.rs @@ -491,8 +491,8 @@ impl Rafs { .iter() .map(|b| BlobPrefetchRequest { blob_id: b.blob_id().to_owned(), - offset: b.readahead_offset() as u32, - len: b.readahead_size() as u32, + offset: b.readahead_offset(), + len: b.readahead_size(), }) .collect::>(); device.prefetch(&[], &prefetches).unwrap_or_else(|e| { diff --git a/storage/src/backend/localfs.rs b/storage/src/backend/localfs.rs index ff4b3aec371..1c688aa4698 100644 --- a/storage/src/backend/localfs.rs +++ b/storage/src/backend/localfs.rs @@ -124,7 +124,7 @@ impl BlobReader for LocalFsEntry { .map_err(|e| LocalFsError::ReadVecBlob(e).into()) } - fn prefetch_blob_data_range(&self, ra_offset: u32, ra_size: u32) -> BackendResult<()> { + fn prefetch_blob_data_range(&self, ra_offset: u64, ra_size: u64) -> BackendResult<()> { if !self.readahead { return Ok(()); } @@ -153,13 +153,13 @@ impl BlobReader for LocalFsEntry { } // Prefetch data according to the hint if it's valid. - let end = ra_offset as u64 + ra_size as u64; + let end = ra_offset + ra_size; if ra_size != 0 && end <= blob_size { info!( "kick off hinted blob readahead offset {} len {}", ra_offset, ra_size ); - readahead(self.file.as_raw_fd(), ra_offset as u64, end); + readahead(self.file.as_raw_fd(), ra_offset, end); } // start access logging diff --git a/storage/src/backend/mod.rs b/storage/src/backend/mod.rs index 8a42bde757c..60fe0cb07be 100644 --- a/storage/src/backend/mod.rs +++ b/storage/src/backend/mod.rs @@ -180,7 +180,7 @@ pub trait BlobReader: Send + Sync { /// This method only prefetch blob data from storage backends, it doesn't cache data in the /// blob cache subsystem. So it's useful for disk and file system based storage backends, but /// it may not help for Registry/OSS based storage backends. - fn prefetch_blob_data_range(&self, ra_offset: u32, ra_size: u32) -> BackendResult<()>; + fn prefetch_blob_data_range(&self, ra_offset: u64, ra_size: u64) -> BackendResult<()>; /// Stop the background data prefetching tasks. fn stop_data_prefetch(&self) -> BackendResult<()>; diff --git a/storage/src/backend/oss.rs b/storage/src/backend/oss.rs index ecb0befcaae..bf31245097d 100644 --- a/storage/src/backend/oss.rs +++ b/storage/src/backend/oss.rs @@ -202,7 +202,7 @@ impl BlobReader for OssReader { .map(|size| size as usize)?) } - fn prefetch_blob_data_range(&self, _ra_offset: u32, _ra_size: u32) -> BackendResult<()> { + fn prefetch_blob_data_range(&self, _ra_offset: u64, _ra_size: u64) -> BackendResult<()> { Err(BackendError::Unsupported( "Oss backend does not support prefetch as per on-disk blob entries".to_string(), )) diff --git a/storage/src/backend/registry.rs b/storage/src/backend/registry.rs index 3ffaa5d1957..dfe3b96f315 100644 --- a/storage/src/backend/registry.rs +++ b/storage/src/backend/registry.rs @@ -534,7 +534,7 @@ impl BlobReader for RegistryReader { .map_err(BackendError::Registry) } - fn prefetch_blob_data_range(&self, _ra_offset: u32, _ra_size: u32) -> BackendResult<()> { + fn prefetch_blob_data_range(&self, _ra_offset: u64, _ra_size: u64) -> BackendResult<()> { Err(BackendError::Unsupported( "Registry backend does not support prefetch as per on-disk blob entries".to_string(), )) diff --git a/storage/src/cache/worker.rs b/storage/src/cache/worker.rs index 6c10d1ec8a1..25931db3490 100644 --- a/storage/src/cache/worker.rs +++ b/storage/src/cache/worker.rs @@ -304,11 +304,8 @@ impl AsyncWorkerMgr { e ); } - } else if offset < u32::MAX as u64 && size < u32::MAX as u64 { - let _ = cache.reader().prefetch_blob_data_range( - offset as u32, - std::cmp::min(size as u32, u32::MAX - offset as u32), - ); + } else { + let _ = cache.reader().prefetch_blob_data_range(offset, size); } Ok(()) diff --git a/storage/src/device.rs b/storage/src/device.rs index 344d8c2ff17..788beb2aeee 100644 --- a/storage/src/device.rs +++ b/storage/src/device.rs @@ -766,9 +766,9 @@ pub struct BlobPrefetchRequest { /// The ID of the blob to prefetch data for. pub blob_id: String, /// Offset into the blob to prefetch data. - pub offset: u32, + pub offset: u64, /// Size of data to prefetch. - pub len: u32, + pub len: u64, } /// Trait to provide direct access to underlying uncompressed blob file. diff --git a/storage/src/meta/mod.rs b/storage/src/meta/mod.rs index e1e291df862..e9e1018c1d7 100644 --- a/storage/src/meta/mod.rs +++ b/storage/src/meta/mod.rs @@ -618,6 +618,8 @@ impl BlobMetaState { let mut size = self.chunk_count as usize; let mut left = 0; let mut right = size; + let mut start = 0; + let mut end = 0; while left < right { let mid = left + size / 2; @@ -625,10 +627,12 @@ impl BlobMetaState { // - `mid >= 0` // - `mid < size`: `mid` is limited by `[left; right)` bound. let entry = unsafe { chunks.get_unchecked(mid) }; - let (start, end) = if compressed { - (entry.compressed_offset(), entry.compressed_end()) + if compressed { + start = entry.compressed_offset(); + end = entry.compressed_end(); } else { - (entry.uncompressed_offset(), entry.uncompressed_end()) + start = entry.uncompressed_offset(); + end = entry.uncompressed_end(); }; if start > addr { @@ -642,7 +646,12 @@ impl BlobMetaState { size = right - left; } - Err(einval!()) + // if addr == self.chunks[last].compressed_offset, return einval + // with error msg. + Err(einval!(format!( + "start: {}, end: {}, addr: {}", + start, end, addr + ))) } } @@ -885,8 +894,8 @@ mod tests { fn prefetch_blob_data_range( &self, - _blob_readahead_offset: u32, - _blob_readahead_size: u32, + _blob_readahead_offset: u64, + _blob_readahead_size: u64, ) -> BackendResult<()> { Ok(()) } diff --git a/storage/src/test.rs b/storage/src/test.rs index 1837951a8bb..ed9bd49b951 100644 --- a/storage/src/test.rs +++ b/storage/src/test.rs @@ -34,8 +34,8 @@ impl BlobReader for MockBackend { fn prefetch_blob_data_range( &self, - _blob_readahead_offset: u32, - _blob_readahead_size: u32, + _blob_readahead_offset: u64, + _blob_readahead_size: u64, ) -> BackendResult<()> { Ok(()) }