Skip to content

Commit

Permalink
storage: use filemap for blob meta
Browse files Browse the repository at this point in the history
Use FileMapState for blob meta to reduce duplicated code.

Signed-off-by: Jiang Liu <gerry@linux.alibaba.com>
  • Loading branch information
jiangliu committed Oct 17, 2022
1 parent 983423a commit cec5d13
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 45 deletions.
57 changes: 13 additions & 44 deletions storage/src/meta/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@ use std::fs::OpenOptions;
use std::io::Result;
use std::mem::{size_of, ManuallyDrop};
use std::ops::{Add, BitAnd, Not};
use std::os::unix::io::AsRawFd;
use std::sync::Arc;

use nydus_utils::compress;
use nydus_utils::digest::RafsDigest;
use nydus_utils::filemap::FileMapState;

use crate::backend::BlobReader;
use crate::device::{BlobChunkInfo, BlobInfo};
Expand Down Expand Up @@ -349,25 +349,9 @@ impl BlobMetaInfo {
file.set_len(expected_size as u64)?;
}

let fd = file.as_raw_fd();
let base = unsafe {
libc::mmap(
std::ptr::null_mut(),
expected_size as usize,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd,
0,
)
};
if base == libc::MAP_FAILED {
return Err(last_error!("failed to mmap blob chunk_map"));
} else if base.is_null() {
return Err(ebadf!("failed to mmap blob chunk_map"));
}

let header = unsafe { (base as *mut u8).add(aligned_info_size as usize) };
let header = unsafe { &mut *(header as *mut BlobMetaHeaderOndisk) };
let mut filemap = FileMapState::new(file, 0, expected_size)?;
let base = filemap.validate_range(0, expected_size)?;
let header = filemap.get_mut::<BlobMetaHeaderOndisk>(aligned_info_size as usize)?;
if u32::from_le(header.s_magic) != BLOB_METADATA_MAGIC
|| u32::from_le(header.s_magic2) != BLOB_METADATA_MAGIC
|| u32::from_le(header.s_features) != blob_info.meta_flags()
Expand All @@ -390,15 +374,17 @@ impl BlobMetaInfo {
header.s_ci_offset = u64::to_le(blob_info.meta_ci_offset());
header.s_ci_compressed_size = u64::to_le(blob_info.meta_ci_compressed_size());
header.s_ci_uncompressed_size = u64::to_le(blob_info.meta_ci_uncompressed_size());
filemap.sync_data()?;

file.sync_data()?;
let header = filemap.get_mut::<BlobMetaHeaderOndisk>(aligned_info_size as usize)?;
header.s_magic = u32::to_le(BLOB_METADATA_MAGIC);
header.s_magic2 = u32::to_le(BLOB_METADATA_MAGIC);
filemap.sync_data()?;
}

let chunk_infos = unsafe {
ManuallyDrop::new(Vec::from_raw_parts(
base as *mut BlobChunkInfoOndisk,
base as *mut u8 as *mut BlobChunkInfoOndisk,
chunk_count as usize,
chunk_count as usize,
))
Expand All @@ -410,8 +396,7 @@ impl BlobMetaInfo {
uncompressed_size: round_up_4k(blob_info.uncompressed_size()),
chunk_count,
chunks: chunk_infos,
base: base as *const u8,
unmap_len: expected_size,
_filemap: filemap,
is_stargz: blob_info.is_stargz(),
});

Expand Down Expand Up @@ -712,26 +697,11 @@ pub struct BlobMetaState {
uncompressed_size: u64,
chunk_count: u32,
chunks: ManuallyDrop<Vec<BlobChunkInfoOndisk>>,
base: *const u8,
unmap_len: usize,
_filemap: FileMapState,
/// The blob meta is for an stargz image.
is_stargz: bool,
}

// // Safe to Send/Sync because the underlying data structures are readonly
unsafe impl Send for BlobMetaState {}
unsafe impl Sync for BlobMetaState {}

impl Drop for BlobMetaState {
fn drop(&mut self) {
if !self.base.is_null() {
let size = self.unmap_len;
unsafe { libc::munmap(self.base as *mut u8 as *mut libc::c_void, size) };
self.base = std::ptr::null();
}
}
}

impl BlobMetaState {
fn get_chunk_index_nocheck(&self, addr: u64, compressed: bool) -> Result<usize> {
let chunks = &self.chunks;
Expand Down Expand Up @@ -848,6 +818,7 @@ mod tests {
use nydus_utils::metrics::BackendMetrics;
use std::fs::{File, OpenOptions};
use std::io::Write;
use std::os::unix::io::AsRawFd;
use vmm_sys_util::tempfile::TempFile;

#[test]
Expand All @@ -867,8 +838,7 @@ mod tests {
comp_info: 0x00ff_f000_0010_0000,
},
]),
base: std::ptr::null(),
unmap_len: 0,
_filemap: FileMapState::default(),
is_stargz: false,
};

Expand Down Expand Up @@ -952,8 +922,7 @@ mod tests {
comp_info: 0x00ff_f000_0000_5000,
},
]),
base: std::ptr::null(),
unmap_len: 0,
_filemap: FileMapState::default(),
is_stargz: false,
};
let info = BlobMetaInfo {
Expand Down
31 changes: 30 additions & 1 deletion utils/src/filemap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
use std::fs::File;
use std::io::Result;
use std::mem::size_of;
use std::os::unix::io::{AsRawFd, IntoRawFd, RawFd};
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};

/// Struct to manage memory range mapped from file objects.
///
Expand All @@ -18,6 +18,10 @@ pub struct FileMapState {
fd: RawFd,
}

// Safe to Send/Sync because the underlying data structures are readonly
unsafe impl Send for FileMapState {}
unsafe impl Sync for FileMapState {}

impl Default for FileMapState {
fn default() -> Self {
FileMapState {
Expand Down Expand Up @@ -96,6 +100,23 @@ impl FileMapState {
Ok(unsafe { &*(start as *const T) })
}

/// Cast a subregion of the mapped area to an mutable object reference.
pub fn get_mut<T>(&mut self, offset: usize) -> Result<&mut T> {
let start = self.base.wrapping_add(offset);
let end = start.wrapping_add(size_of::<T>());

if start > end
|| start < self.base
|| end < self.base
|| end > self.end
|| start as usize & (std::mem::align_of::<T>() - 1) != 0
{
return Err(einval!("invalid mmap offset"));
}

Ok(unsafe { &mut *(start as *const T as *mut T) })
}

/// Check whether the range [offset, offset + size) is valid and return the start address.
pub fn validate_range(&self, offset: usize, size: usize) -> Result<*const u8> {
let start = self.base.wrapping_add(offset);
Expand All @@ -115,6 +136,14 @@ impl FileMapState {
pub unsafe fn offset(&self, offset: usize) -> *const u8 {
self.base.wrapping_add(offset)
}

/// Sync mapped file data into disk.
pub fn sync_data(&self) -> Result<()> {
let file = unsafe { File::from_raw_fd(self.fd) };
let result = file.sync_data();
std::mem::forget(file);
result
}
}

#[cfg(test)]
Expand Down

0 comments on commit cec5d13

Please sign in to comment.