Skip to content

Commit

Permalink
misc zettacache cleanup (openzfs#491)
Browse files Browse the repository at this point in the history
SlabTrait: mark_slab_info() instead of phys_type()
This allows the specific slab (Bitmap, Evacuating, etc) to directly
access the SpaceMap, potentially writing several entries to it.

Helper methods on `struct Slab` should return whatever the SlabTrait
method returns.

BlockAllocatorBuilder & Slabs::open(): separate claiming of the
BlockAllocator's slabs from creation of the Builder.

Tweak logging messages, add some details to assertion failures and
errors.

Slight refactoring of code in summarized.rs, merge.rs

SpaceMapEntry::SlabInfo can be a 2-tuple rather than 1-tuple of struct
with 2 members.  In this case the member names don't add anything to
comprehension, they just echo the type names.  Since this is serialized
with bincode, the on-disk representation is the same.

Use lock_non_send where possible
  • Loading branch information
ahrens authored Jun 27, 2022
1 parent e260c6e commit 36e8682
Show file tree
Hide file tree
Showing 11 changed files with 122 additions and 119 deletions.
11 changes: 10 additions & 1 deletion cmd/zfs_object_agent/zettacache/src/block_access.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use std::any::type_name;
use std::collections::BTreeMap;
use std::fmt::Debug;
use std::fmt::Display;
Expand Down Expand Up @@ -872,7 +873,15 @@ impl BlockAccess {

let struct_obj: T = match header.encoding {
EncodeType::Json => from_json_slice(serde_slice)?,
EncodeType::Bincode => Self::bincode_options().deserialize(serde_slice)?,
EncodeType::Bincode => Self::bincode_options()
.deserialize(serde_slice)
.with_context(|| {
format!(
"{header:?} {}-byte payload for {}: {serde_slice:?}",
type_name::<T>(),
serde_slice.len()
)
})?,
EncodeType::BincodeFixint => Self::bincode_fixint_options().deserialize(serde_slice)?,
};
Ok((struct_obj, buf.len() - remainder_slice.len()))
Expand Down
74 changes: 40 additions & 34 deletions cmd/zfs_object_agent/zettacache/src/block_allocator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ use util::RangeTree;
use self::slabs::Slabs;
use crate::base_types::*;
use crate::block_access::BlockAccess;
use crate::slab_allocator::SlabAccess;
use crate::slab_allocator::SlabAllocator;
use crate::slab_allocator::SlabAllocatorBuilder;
use crate::slab_allocator::SlabId;
Expand Down Expand Up @@ -111,11 +112,11 @@ trait SlabTrait {
fn free(&mut self, extent: Extent);
fn flush_to_spacemap(&mut self, spacemap: &mut SpaceMap);
fn condense_to_spacemap(&self, spacemap: &mut SpaceMap);
fn mark_slab_info(&self, id: SlabId, spacemap: &mut SpaceMap);
fn max_size(&self) -> u32;
fn capacity_bytes(&self) -> u64;
fn free_space(&self) -> u64;
fn allocated_space(&self) -> u64;
fn phys_type(&self) -> SlabPhysType;
fn num_segments(&self) -> u64;
fn allocated_extents(&self) -> Vec<Extent>;
fn dump_info(&self);
Expand Down Expand Up @@ -327,10 +328,13 @@ impl SlabTrait for BitmapSlab {
u64::from(self.total_slots - self.allocatable.len()) * u64::from(self.slot_size)
}

fn phys_type(&self) -> SlabPhysType {
SlabPhysType::BitmapBased {
block_size: self.slot_size,
}
fn mark_slab_info(&self, id: SlabId, spacemap: &mut SpaceMap) {
spacemap.mark_slab_info(
id,
SlabPhysType::BitmapBased {
block_size: self.slot_size,
},
);
}

fn dump_info(&self) {
Expand Down Expand Up @@ -574,10 +578,13 @@ impl SlabTrait for ExtentSlab {
self.total_space - self.free_space()
}

fn phys_type(&self) -> SlabPhysType {
SlabPhysType::ExtentBased {
max_size: self.max_allowed_alloc_size,
}
fn mark_slab_info(&self, id: SlabId, spacemap: &mut SpaceMap) {
spacemap.mark_slab_info(
id,
SlabPhysType::ExtentBased {
max_size: self.max_allowed_alloc_size,
},
);
}

fn dump_info(&self) {
Expand Down Expand Up @@ -693,8 +700,8 @@ impl SlabTrait for EvacuatingSlab {
0
}

fn phys_type(&self) -> SlabPhysType {
SlabPhysType::Evacuating
fn mark_slab_info(&self, id: SlabId, spacemap: &mut SpaceMap) {
spacemap.mark_slab_info(id, SlabPhysType::Evacuating);
}

fn dump_info(&self) {
Expand Down Expand Up @@ -757,11 +764,11 @@ impl Slab {
}

fn import_alloc(&mut self, extent: Extent) {
self.inner.as_mut_dyn().import_alloc(extent);
self.inner.as_mut_dyn().import_alloc(extent)
}

fn import_free(&mut self, extent: Extent) {
self.inner.as_mut_dyn().import_free(extent);
self.inner.as_mut_dyn().import_free(extent)
}

fn allocate(&mut self, size: u32) -> Option<Extent> {
Expand All @@ -770,24 +777,24 @@ impl Slab {
}

fn free(&mut self, extent: Extent) {
self.inner.as_mut_dyn().free(extent);
self.inner.as_mut_dyn().free(extent)
}

fn mark_slab_info(&self, spacemap: &mut SpaceMap) {
spacemap.mark_slab_info(self.id, self.inner.as_dyn().phys_type());
self.inner.as_dyn().mark_slab_info(self.id, spacemap);
}

fn flush_to_spacemap(&mut self, spacemap: &mut SpaceMap) {
self.inner.as_mut_dyn().flush_to_spacemap(spacemap);
self.is_dirty = false;
self.is_allocd = false;
self.inner.as_mut_dyn().flush_to_spacemap(spacemap)
}

fn condense_to_spacemap(&mut self, spacemap: &mut SpaceMap) {
// By leaving a new mark with the slab info when condensing we make the entries in the old
// spacemap obsolete.
self.mark_slab_info(spacemap);
self.inner.as_mut_dyn().condense_to_spacemap(spacemap);
self.inner.as_mut_dyn().condense_to_spacemap(spacemap)
}

fn max_size(&self) -> u32 {
Expand Down Expand Up @@ -823,7 +830,7 @@ impl Slab {

fn dump_info(&self) {
writeln_stdout!("{:?}", self.id);
self.inner.as_dyn().dump_info();
self.inner.as_dyn().dump_info()
}

fn location(&self) -> DiskLocation {
Expand Down Expand Up @@ -975,12 +982,12 @@ impl BlockAllocatorBuilder {
/// Claims the slabs used by the block allocator with the SlabAllocatorBuilder.
pub async fn new(
block_access: Arc<BlockAccess>,
slab_builder: &mut SlabAllocatorBuilder,
slab_access: &SlabAccess,
phys: BlockAllocatorPhys,
) -> Self {
let slabs = Slabs::open(
block_access.clone(),
slab_builder,
slab_access,
&phys.spacemap,
&phys.spacemap_next,
)
Expand All @@ -992,7 +999,13 @@ impl BlockAllocatorBuilder {
}
}

pub async fn build(self, slab_allocator: Arc<SlabAllocator>) -> BlockAllocator {
pub fn claim(&self, slab_builder: &mut SlabAllocatorBuilder) {
for slab in self.slabs.iter() {
slab_builder.claim(slab.id);
}
}

pub fn build(self, slab_allocator: Arc<SlabAllocator>) -> BlockAllocator {
let phys = self.phys;
let slabs = self.slabs;
let block_access = self.block_access;
Expand Down Expand Up @@ -1399,10 +1412,10 @@ impl BlockAllocator {
.slab_buckets
.get_bucket_size_for_allocation_size(slab.max_size());

let extents: Vec<Extent> = slab
let extents = slab
.allocated_extents()
.iter()
.flat_map(|&old| {
.into_iter()
.flat_map(|old| {
match slab.inner {
SlabEnum::BitmapBased(_) => {
let extent_size = u32::try_from(old.size).unwrap();
Expand All @@ -1429,19 +1442,15 @@ impl BlockAllocator {
SlabEnum::Evacuating(_) => panic!("invalid slab type"),
}
})
.collect();
.collect::<Vec<_>>();

let mut map = extents
.iter()
.map(
|&old| match self.allocate_impl(bucket, u32::try_from(old.size).unwrap()) {
Some(new) => (old, Some(new)),
None => {
trace!(
"cache rebalance allocation failed for old extent '{:?}' in bucket '{:?}'",
old,
bucket
);
trace!("cache rebalance allocation failed for old {old:?} in {bucket:?}");
(old, None)
}
},
Expand Down Expand Up @@ -1476,10 +1485,7 @@ impl BlockAllocator {
let merged = before - after;
if before != after {
trace!(
"rebalance of slab '{:?}' has {} entries after merging ({} entries merged)",
id,
after,
merged,
"rebalance of {id:?} has {after} entries after merging ({merged} entries merged)",
);
}

Expand Down
48 changes: 22 additions & 26 deletions cmd/zfs_object_agent/zettacache/src/block_allocator/slabs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use crate::block_access::BlockAccess;
use crate::block_allocator::EvacuatingSlab;
use crate::block_allocator::ExtentSlab;
use crate::block_allocator::SlabPhysType;
use crate::slab_allocator::SlabAllocatorBuilder;
use crate::slab_allocator::SlabAccess;
use crate::space_map::SpaceMapEntry;
use crate::space_map::SpaceMapPhys;

Expand All @@ -33,7 +33,10 @@ impl Slabs {

/// Panics if not present
pub fn get_mut(&mut self, id: SlabId) -> &mut Slab {
let slab = self.0.get_mut(id).unwrap();
let slab = self
.0
.get_mut(id)
.unwrap_or_else(|| panic!("{id:?} not present"));
assert_eq!(slab.id, id);
slab
}
Expand Down Expand Up @@ -70,7 +73,7 @@ impl Slabs {

pub async fn open(
block_access: Arc<BlockAccess>,
slab_builder: &mut SlabAllocatorBuilder,
slab_access: &SlabAccess,
spacemap: &SpaceMapPhys,
spacemap_next: &SpaceMapPhys,
) -> Self {
Expand All @@ -84,52 +87,45 @@ impl Slabs {

let mut import_cb = |entry| match entry {
SpaceMapEntry::Alloc(extent) => {
let slab_id = slab_builder.extent_to_slab_id(extent);
slabs.get_mut(slab_id).import_alloc(extent)
let slab_id = slab_access.extent_to_slab_id(extent);
slabs.get_mut(slab_id).import_alloc(extent);
}
SpaceMapEntry::Free(extent) => {
let slab_id = slab_builder.extent_to_slab_id(extent);
slabs.get_mut(slab_id).import_free(extent)
let slab_id = slab_access.extent_to_slab_id(extent);
slabs.get_mut(slab_id).import_free(extent);
}
SpaceMapEntry::SlabInfo(info) => {
let slab_extent = slab_builder.slab_id_to_extent(info.slab_id);
match info.slab_type {
SpaceMapEntry::SlabInfo(slab_id, slab_type) => {
let slab_extent = slab_access.slab_id_to_extent(slab_id);
match slab_type {
SlabPhysType::BitmapBased { block_size } => {
slabs.insert(
info.slab_id,
BitmapSlab::new_slab(info.slab_id, slab_extent, block_size),
slab_id,
BitmapSlab::new_slab(slab_id, slab_extent, block_size),
);
}
SlabPhysType::ExtentBased { max_size } => {
slabs.insert(
info.slab_id,
ExtentSlab::new_slab(info.slab_id, slab_extent, max_size),
slab_id,
ExtentSlab::new_slab(slab_id, slab_extent, max_size),
);
}
SlabPhysType::Free => {
let old = slabs.remove(info.slab_id);
assert!(old.is_some());
let removed = slabs.remove(slab_id);
assert!(removed.is_some());
}
SlabPhysType::Evacuating => {
slabs.insert(
info.slab_id,
EvacuatingSlab::new_slab(info.slab_id, slab_extent),
);
slabs.insert(slab_id, EvacuatingSlab::new_slab(slab_id, slab_extent));
}
}
}
};
spacemap
.load(block_access.clone(), slab_builder.access(), &mut import_cb)
.load(block_access.clone(), slab_access, &mut import_cb)
.await;
spacemap_next
.load(block_access.clone(), slab_builder.access(), &mut import_cb)
.load(block_access.clone(), slab_access, &mut import_cb)
.await;

for slab in slabs.iter() {
slab_builder.claim(slab.id);
}

info!(
"read {} of spacemaps and processed {} entries in {}ms",
nice_p2size(spacemap.bytes() + spacemap_next.bytes()),
Expand Down
7 changes: 4 additions & 3 deletions cmd/zfs_object_agent/zettacache/src/block_allocator/zcdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ use super::SlabEnum;
use super::Slabs;
use crate::block_access::BlockAccess;
use crate::block_allocator::SlabId;
use crate::slab_allocator::SlabAccess;
use crate::slab_allocator::SlabAllocatorBuilder;
use crate::DumpSlabsOptions;

Expand Down Expand Up @@ -301,11 +302,11 @@ fn zcachedb_dump_slabs_print_legend() {

pub async fn zcachedb_dump_slabs(
block_access: Arc<BlockAccess>,
slab_builder: &mut SlabAllocatorBuilder,
slab_access: &SlabAccess,
phys: BlockAllocatorPhys,
opts: DumpSlabsOptions,
) {
let slab_size = slab_builder.slab_size();
let slab_size = slab_access.slab_size();
let buckets = phys.slab_buckets.buckets.clone();
let mut cache_slabs = vec![];
let mut slabs_per_device = HashMap::new();
Expand All @@ -314,7 +315,7 @@ pub async fn zcachedb_dump_slabs(
}
let slabs = Slabs::open(
block_access.clone(),
slab_builder,
slab_access,
&phys.spacemap,
&phys.spacemap_next,
)
Expand Down
4 changes: 2 additions & 2 deletions cmd/zfs_object_agent/zettacache/src/block_based_log/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -218,8 +218,8 @@ impl<T: BlockBasedLogEntry> BlockBasedLogPhys<T> {
let mut total_consumed = 0;
while total_consumed < extent_bytes.len() {
// XXX handle checksum error here
let (chunk, consumed): (BlockBasedLogChunk<T>, usize) = block_access
.chunk_from_raw(&extent_bytes[total_consumed..])
let (chunk, consumed) = block_access
.chunk_from_raw::<BlockBasedLogChunk<T>>(&extent_bytes[total_consumed..])
.unwrap();
assert_lt!(chunk.id, next_chunk);
if chunk_tx.send(chunk).await.is_err() {
Expand Down
Loading

0 comments on commit 36e8682

Please sign in to comment.