diff --git a/cmd/zfs_object_agent/zettacache/src/block_access.rs b/cmd/zfs_object_agent/zettacache/src/block_access.rs index 99fdcd2db459..5a08defc062f 100644 --- a/cmd/zfs_object_agent/zettacache/src/block_access.rs +++ b/cmd/zfs_object_agent/zettacache/src/block_access.rs @@ -1,3 +1,4 @@ +use std::any::type_name; use std::collections::BTreeMap; use std::fmt::Debug; use std::fmt::Display; @@ -872,7 +873,15 @@ impl BlockAccess { let struct_obj: T = match header.encoding { EncodeType::Json => from_json_slice(serde_slice)?, - EncodeType::Bincode => Self::bincode_options().deserialize(serde_slice)?, + EncodeType::Bincode => Self::bincode_options() + .deserialize(serde_slice) + .with_context(|| { + format!( + "{header:?} {}-byte payload for {}: {serde_slice:?}", + type_name::(), + serde_slice.len() + ) + })?, EncodeType::BincodeFixint => Self::bincode_fixint_options().deserialize(serde_slice)?, }; Ok((struct_obj, buf.len() - remainder_slice.len())) diff --git a/cmd/zfs_object_agent/zettacache/src/block_allocator/mod.rs b/cmd/zfs_object_agent/zettacache/src/block_allocator/mod.rs index 3880676237aa..b19f310fd904 100644 --- a/cmd/zfs_object_agent/zettacache/src/block_allocator/mod.rs +++ b/cmd/zfs_object_agent/zettacache/src/block_allocator/mod.rs @@ -31,6 +31,7 @@ use util::RangeTree; use self::slabs::Slabs; use crate::base_types::*; use crate::block_access::BlockAccess; +use crate::slab_allocator::SlabAccess; use crate::slab_allocator::SlabAllocator; use crate::slab_allocator::SlabAllocatorBuilder; use crate::slab_allocator::SlabId; @@ -111,11 +112,11 @@ trait SlabTrait { fn free(&mut self, extent: Extent); fn flush_to_spacemap(&mut self, spacemap: &mut SpaceMap); fn condense_to_spacemap(&self, spacemap: &mut SpaceMap); + fn mark_slab_info(&self, id: SlabId, spacemap: &mut SpaceMap); fn max_size(&self) -> u32; fn capacity_bytes(&self) -> u64; fn free_space(&self) -> u64; fn allocated_space(&self) -> u64; - fn phys_type(&self) -> SlabPhysType; fn num_segments(&self) -> u64; fn allocated_extents(&self) -> Vec; fn dump_info(&self); @@ -327,10 +328,13 @@ impl SlabTrait for BitmapSlab { u64::from(self.total_slots - self.allocatable.len()) * u64::from(self.slot_size) } - fn phys_type(&self) -> SlabPhysType { - SlabPhysType::BitmapBased { - block_size: self.slot_size, - } + fn mark_slab_info(&self, id: SlabId, spacemap: &mut SpaceMap) { + spacemap.mark_slab_info( + id, + SlabPhysType::BitmapBased { + block_size: self.slot_size, + }, + ); } fn dump_info(&self) { @@ -574,10 +578,13 @@ impl SlabTrait for ExtentSlab { self.total_space - self.free_space() } - fn phys_type(&self) -> SlabPhysType { - SlabPhysType::ExtentBased { - max_size: self.max_allowed_alloc_size, - } + fn mark_slab_info(&self, id: SlabId, spacemap: &mut SpaceMap) { + spacemap.mark_slab_info( + id, + SlabPhysType::ExtentBased { + max_size: self.max_allowed_alloc_size, + }, + ); } fn dump_info(&self) { @@ -693,8 +700,8 @@ impl SlabTrait for EvacuatingSlab { 0 } - fn phys_type(&self) -> SlabPhysType { - SlabPhysType::Evacuating + fn mark_slab_info(&self, id: SlabId, spacemap: &mut SpaceMap) { + spacemap.mark_slab_info(id, SlabPhysType::Evacuating); } fn dump_info(&self) { @@ -757,11 +764,11 @@ impl Slab { } fn import_alloc(&mut self, extent: Extent) { - self.inner.as_mut_dyn().import_alloc(extent); + self.inner.as_mut_dyn().import_alloc(extent) } fn import_free(&mut self, extent: Extent) { - self.inner.as_mut_dyn().import_free(extent); + self.inner.as_mut_dyn().import_free(extent) } fn allocate(&mut self, size: u32) -> Option { @@ -770,24 +777,24 @@ impl Slab { } fn free(&mut self, extent: Extent) { - self.inner.as_mut_dyn().free(extent); + self.inner.as_mut_dyn().free(extent) } fn mark_slab_info(&self, spacemap: &mut SpaceMap) { - spacemap.mark_slab_info(self.id, self.inner.as_dyn().phys_type()); + self.inner.as_dyn().mark_slab_info(self.id, spacemap); } fn flush_to_spacemap(&mut self, spacemap: &mut SpaceMap) { - self.inner.as_mut_dyn().flush_to_spacemap(spacemap); self.is_dirty = false; self.is_allocd = false; + self.inner.as_mut_dyn().flush_to_spacemap(spacemap) } fn condense_to_spacemap(&mut self, spacemap: &mut SpaceMap) { // By leaving a new mark with the slab info when condensing we make the entries in the old // spacemap obsolete. self.mark_slab_info(spacemap); - self.inner.as_mut_dyn().condense_to_spacemap(spacemap); + self.inner.as_mut_dyn().condense_to_spacemap(spacemap) } fn max_size(&self) -> u32 { @@ -823,7 +830,7 @@ impl Slab { fn dump_info(&self) { writeln_stdout!("{:?}", self.id); - self.inner.as_dyn().dump_info(); + self.inner.as_dyn().dump_info() } fn location(&self) -> DiskLocation { @@ -975,12 +982,12 @@ impl BlockAllocatorBuilder { /// Claims the slabs used by the block allocator with the SlabAllocatorBuilder. pub async fn new( block_access: Arc, - slab_builder: &mut SlabAllocatorBuilder, + slab_access: &SlabAccess, phys: BlockAllocatorPhys, ) -> Self { let slabs = Slabs::open( block_access.clone(), - slab_builder, + slab_access, &phys.spacemap, &phys.spacemap_next, ) @@ -992,7 +999,13 @@ impl BlockAllocatorBuilder { } } - pub async fn build(self, slab_allocator: Arc) -> BlockAllocator { + pub fn claim(&self, slab_builder: &mut SlabAllocatorBuilder) { + for slab in self.slabs.iter() { + slab_builder.claim(slab.id); + } + } + + pub fn build(self, slab_allocator: Arc) -> BlockAllocator { let phys = self.phys; let slabs = self.slabs; let block_access = self.block_access; @@ -1399,10 +1412,10 @@ impl BlockAllocator { .slab_buckets .get_bucket_size_for_allocation_size(slab.max_size()); - let extents: Vec = slab + let extents = slab .allocated_extents() - .iter() - .flat_map(|&old| { + .into_iter() + .flat_map(|old| { match slab.inner { SlabEnum::BitmapBased(_) => { let extent_size = u32::try_from(old.size).unwrap(); @@ -1429,7 +1442,7 @@ impl BlockAllocator { SlabEnum::Evacuating(_) => panic!("invalid slab type"), } }) - .collect(); + .collect::>(); let mut map = extents .iter() @@ -1437,11 +1450,7 @@ impl BlockAllocator { |&old| match self.allocate_impl(bucket, u32::try_from(old.size).unwrap()) { Some(new) => (old, Some(new)), None => { - trace!( - "cache rebalance allocation failed for old extent '{:?}' in bucket '{:?}'", - old, - bucket - ); + trace!("cache rebalance allocation failed for old {old:?} in {bucket:?}"); (old, None) } }, @@ -1476,10 +1485,7 @@ impl BlockAllocator { let merged = before - after; if before != after { trace!( - "rebalance of slab '{:?}' has {} entries after merging ({} entries merged)", - id, - after, - merged, + "rebalance of {id:?} has {after} entries after merging ({merged} entries merged)", ); } diff --git a/cmd/zfs_object_agent/zettacache/src/block_allocator/slabs.rs b/cmd/zfs_object_agent/zettacache/src/block_allocator/slabs.rs index 2bb35ec40913..32ad56dd34e6 100644 --- a/cmd/zfs_object_agent/zettacache/src/block_allocator/slabs.rs +++ b/cmd/zfs_object_agent/zettacache/src/block_allocator/slabs.rs @@ -13,7 +13,7 @@ use crate::block_access::BlockAccess; use crate::block_allocator::EvacuatingSlab; use crate::block_allocator::ExtentSlab; use crate::block_allocator::SlabPhysType; -use crate::slab_allocator::SlabAllocatorBuilder; +use crate::slab_allocator::SlabAccess; use crate::space_map::SpaceMapEntry; use crate::space_map::SpaceMapPhys; @@ -33,7 +33,10 @@ impl Slabs { /// Panics if not present pub fn get_mut(&mut self, id: SlabId) -> &mut Slab { - let slab = self.0.get_mut(id).unwrap(); + let slab = self + .0 + .get_mut(id) + .unwrap_or_else(|| panic!("{id:?} not present")); assert_eq!(slab.id, id); slab } @@ -70,7 +73,7 @@ impl Slabs { pub async fn open( block_access: Arc, - slab_builder: &mut SlabAllocatorBuilder, + slab_access: &SlabAccess, spacemap: &SpaceMapPhys, spacemap_next: &SpaceMapPhys, ) -> Self { @@ -84,52 +87,45 @@ impl Slabs { let mut import_cb = |entry| match entry { SpaceMapEntry::Alloc(extent) => { - let slab_id = slab_builder.extent_to_slab_id(extent); - slabs.get_mut(slab_id).import_alloc(extent) + let slab_id = slab_access.extent_to_slab_id(extent); + slabs.get_mut(slab_id).import_alloc(extent); } SpaceMapEntry::Free(extent) => { - let slab_id = slab_builder.extent_to_slab_id(extent); - slabs.get_mut(slab_id).import_free(extent) + let slab_id = slab_access.extent_to_slab_id(extent); + slabs.get_mut(slab_id).import_free(extent); } - SpaceMapEntry::SlabInfo(info) => { - let slab_extent = slab_builder.slab_id_to_extent(info.slab_id); - match info.slab_type { + SpaceMapEntry::SlabInfo(slab_id, slab_type) => { + let slab_extent = slab_access.slab_id_to_extent(slab_id); + match slab_type { SlabPhysType::BitmapBased { block_size } => { slabs.insert( - info.slab_id, - BitmapSlab::new_slab(info.slab_id, slab_extent, block_size), + slab_id, + BitmapSlab::new_slab(slab_id, slab_extent, block_size), ); } SlabPhysType::ExtentBased { max_size } => { slabs.insert( - info.slab_id, - ExtentSlab::new_slab(info.slab_id, slab_extent, max_size), + slab_id, + ExtentSlab::new_slab(slab_id, slab_extent, max_size), ); } SlabPhysType::Free => { - let old = slabs.remove(info.slab_id); - assert!(old.is_some()); + let removed = slabs.remove(slab_id); + assert!(removed.is_some()); } SlabPhysType::Evacuating => { - slabs.insert( - info.slab_id, - EvacuatingSlab::new_slab(info.slab_id, slab_extent), - ); + slabs.insert(slab_id, EvacuatingSlab::new_slab(slab_id, slab_extent)); } } } }; spacemap - .load(block_access.clone(), slab_builder.access(), &mut import_cb) + .load(block_access.clone(), slab_access, &mut import_cb) .await; spacemap_next - .load(block_access.clone(), slab_builder.access(), &mut import_cb) + .load(block_access.clone(), slab_access, &mut import_cb) .await; - for slab in slabs.iter() { - slab_builder.claim(slab.id); - } - info!( "read {} of spacemaps and processed {} entries in {}ms", nice_p2size(spacemap.bytes() + spacemap_next.bytes()), diff --git a/cmd/zfs_object_agent/zettacache/src/block_allocator/zcdb.rs b/cmd/zfs_object_agent/zettacache/src/block_allocator/zcdb.rs index 5c6fd2c24457..552ca7842df0 100644 --- a/cmd/zfs_object_agent/zettacache/src/block_allocator/zcdb.rs +++ b/cmd/zfs_object_agent/zettacache/src/block_allocator/zcdb.rs @@ -16,6 +16,7 @@ use super::SlabEnum; use super::Slabs; use crate::block_access::BlockAccess; use crate::block_allocator::SlabId; +use crate::slab_allocator::SlabAccess; use crate::slab_allocator::SlabAllocatorBuilder; use crate::DumpSlabsOptions; @@ -301,11 +302,11 @@ fn zcachedb_dump_slabs_print_legend() { pub async fn zcachedb_dump_slabs( block_access: Arc, - slab_builder: &mut SlabAllocatorBuilder, + slab_access: &SlabAccess, phys: BlockAllocatorPhys, opts: DumpSlabsOptions, ) { - let slab_size = slab_builder.slab_size(); + let slab_size = slab_access.slab_size(); let buckets = phys.slab_buckets.buckets.clone(); let mut cache_slabs = vec![]; let mut slabs_per_device = HashMap::new(); @@ -314,7 +315,7 @@ pub async fn zcachedb_dump_slabs( } let slabs = Slabs::open( block_access.clone(), - slab_builder, + slab_access, &phys.spacemap, &phys.spacemap_next, ) diff --git a/cmd/zfs_object_agent/zettacache/src/block_based_log/mod.rs b/cmd/zfs_object_agent/zettacache/src/block_based_log/mod.rs index 5151d9b62329..25dd774e666d 100644 --- a/cmd/zfs_object_agent/zettacache/src/block_based_log/mod.rs +++ b/cmd/zfs_object_agent/zettacache/src/block_based_log/mod.rs @@ -218,8 +218,8 @@ impl BlockBasedLogPhys { let mut total_consumed = 0; while total_consumed < extent_bytes.len() { // XXX handle checksum error here - let (chunk, consumed): (BlockBasedLogChunk, usize) = block_access - .chunk_from_raw(&extent_bytes[total_consumed..]) + let (chunk, consumed) = block_access + .chunk_from_raw::>(&extent_bytes[total_consumed..]) .unwrap(); assert_lt!(chunk.id, next_chunk); if chunk_tx.send(chunk).await.is_err() { diff --git a/cmd/zfs_object_agent/zettacache/src/block_based_log/summarized.rs b/cmd/zfs_object_agent/zettacache/src/block_based_log/summarized.rs index ba12068e6591..3a2a26bfebdb 100644 --- a/cmd/zfs_object_agent/zettacache/src/block_based_log/summarized.rs +++ b/cmd/zfs_object_agent/zettacache/src/block_based_log/summarized.rs @@ -221,18 +221,18 @@ impl ReadOnlySummarizedBlockBasedLog { /// Returns the exact location/size of this chunk (not the whole contiguous extent) fn chunk_extent(&self, chunk_id: ChunkId) -> Extent { let chunk_id = usize::from64(chunk_id.0); - let chunk_summary = self.chunks[chunk_id]; + let chunk_offset = self.chunks[chunk_id].offset; let chunk_size = if chunk_id == self.chunks.len() - 1 { - self.phys.this.next_chunk_offset - chunk_summary.offset + self.phys.this.next_chunk_offset - chunk_offset } else { - self.chunks[chunk_id + 1].offset - chunk_summary.offset + self.chunks[chunk_id + 1].offset - chunk_offset }; Extent { location: self .phys .this - .offset_to_location(self.slab_allocator.access(), chunk_summary.offset), + .offset_to_location(self.slab_allocator.access(), chunk_offset), size: chunk_size, } } @@ -395,14 +395,12 @@ impl SummarizedBlockBasedLog { let mut new_chunks = Vec::new(); self.this .flush_impl(|_, offset, first_entry| { + let first_key = first_entry.key(); let entry = BlockBasedLogChunkSummaryEntry { offset, first_entry, }; - new_chunks.push(SummaryEntry { - offset, - first_key: first_entry.key(), - }); + new_chunks.push(SummaryEntry { offset, first_key }); self.chunk_summary.push(entry); }) .await; @@ -443,8 +441,8 @@ impl SummarizedBlockBasedLog { ); self.trim_key = Some(trim_key); if let Some(chunk_id) = self.readonly.lookup_chunk_by_key(&trim_key) { - let chunk_summary = self.readonly.chunks[usize::from64(chunk_id.0)]; - self.this.trim(chunk_summary.offset); + let chunk_offset = self.readonly.chunks[usize::from64(chunk_id.0)].offset; + self.this.trim(chunk_offset); } } diff --git a/cmd/zfs_object_agent/zettacache/src/slab_allocator.rs b/cmd/zfs_object_agent/zettacache/src/slab_allocator.rs index aceb78a4f568..0fadd7663cdf 100644 --- a/cmd/zfs_object_agent/zettacache/src/slab_allocator.rs +++ b/cmd/zfs_object_agent/zettacache/src/slab_allocator.rs @@ -18,7 +18,7 @@ use std::sync::RwLock; use bimap::BiBTreeMap; use bytesize::ByteSize; -use log::trace; +use log::*; use more_asserts::*; use rand::seq::SliceRandom; use rand::thread_rng; @@ -191,7 +191,7 @@ impl SlabAllocatorBuilder { pub fn claim(&mut self, slab_id: SlabId) { let removed = self.allocatable.remove(&slab_id); - assert!(removed); + assert!(removed, "{slab_id:?} claimed twice"); } pub fn build(self) -> SlabAllocator { @@ -218,6 +218,7 @@ impl SlabAllocatorBuilder { self.access.extent_to_slab_id(extent) } + #[allow(dead_code)] pub fn slab_size(&self) -> u64 { self.access.slab_size() } @@ -332,8 +333,8 @@ impl SlabAllocator { let inner = self.inner.lock().unwrap(); let target_free_slabs = inner.reserved_slabs + TARGET_AVAILABLE_SLABS_PCT.apply(self.access.num_slabs()); - let current_free_slabs = inner.allocatable.len() as u64; - target_free_slabs.saturating_sub(current_free_slabs) + let current_free_slabs = inner.allocatable.len() + inner.freeing.len(); + target_free_slabs.saturating_sub(current_free_slabs as u64) } /// Release the space held by freed slabs, allowing them to be re-allocated. This is safe to diff --git a/cmd/zfs_object_agent/zettacache/src/space_map.rs b/cmd/zfs_object_agent/zettacache/src/space_map.rs index 15fe1e49e143..8f09a0037aa1 100644 --- a/cmd/zfs_object_agent/zettacache/src/space_map.rs +++ b/cmd/zfs_object_agent/zettacache/src/space_map.rs @@ -16,17 +16,11 @@ use crate::slab_allocator::SlabAllocator; use crate::slab_allocator::SlabAllocatorBuilder; use crate::slab_allocator::SlabId; -#[derive(Debug, Serialize, Deserialize, Copy, Clone)] -pub struct SlabInfoEntry { - pub slab_id: SlabId, - pub slab_type: SlabPhysType, -} - #[derive(Debug, Serialize, Deserialize, Copy, Clone)] pub enum SpaceMapEntry { Alloc(Extent), Free(Extent), - SlabInfo(SlabInfoEntry), + SlabInfo(SlabId, SlabPhysType), } impl BlockBasedLogEntry for SpaceMapEntry {} @@ -107,10 +101,7 @@ impl SpaceMap { } pub fn mark_slab_info(&mut self, slab_id: SlabId, slab_type: SlabPhysType) { - self.log.push(SpaceMapEntry::SlabInfo(SlabInfoEntry { - slab_id, - slab_type, - })); + self.log.push(SpaceMapEntry::SlabInfo(slab_id, slab_type)); } pub async fn flush(&mut self) -> SpaceMapPhys { diff --git a/cmd/zfs_object_agent/zettacache/src/zettacache/merge.rs b/cmd/zfs_object_agent/zettacache/src/zettacache/merge.rs index 50077d29f919..a7d47abbc4c4 100644 --- a/cmd/zfs_object_agent/zettacache/src/zettacache/merge.rs +++ b/cmd/zfs_object_agent/zettacache/src/zettacache/merge.rs @@ -373,10 +373,8 @@ impl MergeState { let begin = Instant::now(); debug!("using {start_key:?} as start key for merge"); - let mut index_stream; - let mut progress; - { - info!( + + info!( "writing new index to merge {} pending changes into index of {} entries ({}), eviction cutoff {:?}, ghost cutoff {:?}", self.old_pending_changes.len(), old_index_phys.len(), @@ -384,14 +382,13 @@ impl MergeState { self.eviction_cutoff, self.ghost_cutoff, ); - index_stream = old_index_phys.iter_chunks(block_access, slab_access); - progress = Progress::new( - tx, - old_index_phys.atime_histogram().first_ghost(), - old_index_phys.atime_histogram().first_live(), - self.last_atime - old_index_phys.atime_histogram().first_ghost() + 1, - ); - } + let mut index_stream = old_index_phys.iter_chunks(block_access, slab_access); + let mut progress = Progress::new( + tx, + old_index_phys.atime_histogram().first_ghost(), + old_index_phys.atime_histogram().first_live(), + self.last_atime - old_index_phys.atime_histogram().first_ghost() + 1, + ); let mut pending_changes_iter = self .old_pending_changes .range((start_key.map_or(Unbounded, Excluded), Unbounded)) diff --git a/cmd/zfs_object_agent/zettacache/src/zettacache/mod.rs b/cmd/zfs_object_agent/zettacache/src/zettacache/mod.rs index df83331a6035..76c42db1144e 100644 --- a/cmd/zfs_object_agent/zettacache/src/zettacache/mod.rs +++ b/cmd/zfs_object_agent/zettacache/src/zettacache/mod.rs @@ -120,7 +120,7 @@ tunable! { static ref QUANTILES_IN_SIZE_HISTOGRAM: usize = 100; - // Buffers for incomming data blocks: the "demand" buffer is for read-miss blocks. The + // Buffers for incoming data blocks: the "demand" buffer is for read-miss blocks. The // "speculative" buffer is for blocks being written. Note that ingesting a single block from // an object can result in "inflation" since the entire object must be held in memory. But // this is mitigated by the fact that we typically ingest the entire object on writes, and @@ -624,13 +624,14 @@ impl Inner { let block_builder = BlockAllocatorBuilder::new( block_access.clone(), - &mut slab_builder, + slab_builder.access(), checkpoint.block_allocator, ) .await; + block_builder.claim(&mut slab_builder); let slab_allocator = Arc::new(slab_builder.build()); - let block_allocator = block_builder.build(slab_allocator.clone()).await; + let block_allocator = block_builder.build(slab_allocator.clone()); let operation_log = BlockBasedLog::open( block_access.clone(), @@ -784,11 +785,9 @@ impl Inner { checkpoint_wanted: std::sync::Mutex::new(checkpoint_wanted_tx), }); - let my_cache = this.clone(); + let inner = this.clone(); measure!("checkpoint_task").spawn(async move { - my_cache - .checkpoint_task(checkpoint_wanted_rx, merging) - .await; + inner.checkpoint_task(checkpoint_wanted_rx, merging).await; }); let inner = this.clone(); @@ -993,8 +992,8 @@ impl Inner { // merge task complete, replace the current index with the new index Some(MergeMessage::Complete(new_index)) => { let mut indices = self.indices.write().await; - let mut locked = lock_measured!(&self.locked).await; - locked.rotate_index(&mut indices.old, new_index).await; + let mut locked = lock_non_send_measured!(&self.locked).await; + locked.rotate_index(&mut indices.old, new_index); locked.block_allocator.rebalance_fini(); indices.new = None; merging = None; @@ -1482,20 +1481,22 @@ impl Inner { } async fn add_disk(&self, path: &Path) -> Result<()> { - lock_measured!(&self.locked).await.add_disk(path)?; + lock_non_send_measured!(&self.locked).await.add_disk(path)?; self.sync_checkpoint().await; Ok(()) } // Returns the amount of additional space, in bytes async fn expand_disk(&self, path: &Path) -> Result { - let additional_bytes = lock_measured!(&self.locked).await.expand_disk(path)?; + let additional_bytes = lock_non_send_measured!(&self.locked) + .await + .expand_disk(path)?; self.sync_checkpoint().await; Ok(additional_bytes) } async fn initiate_merge(&self) { - lock_measured!(&self.locked).await.request_merge(); + lock_non_send_measured!(&self.locked).await.request_merge(); self.sync_checkpoint().await; } @@ -1518,11 +1519,14 @@ impl Inner { } async fn hits_by_size_data(&self) -> SizeHistogramPhys { - lock_measured!(&self.locked).await.size_histogram.clone() + lock_non_send_measured!(&self.locked) + .await + .size_histogram + .clone() } async fn clear_hit_data(&self) { - lock_measured!(&self.locked).await.clear_hit_data(); + lock_non_send_measured!(&self.locked).await.clear_hit_data(); } fn devices(&self) -> DeviceList { @@ -2066,7 +2070,7 @@ impl Locked { /// Switch to the new index returned from the merge task and clear the merging state. /// Called with the old index write-locked. - async fn rotate_index(&mut self, old_index: &mut IndexRun, new_index: IndexRun) { + fn rotate_index(&mut self, old_index: &mut IndexRun, new_index: IndexRun) { let mut merge = Arc::try_unwrap(self.merge.take().unwrap()) .expect("unable to unwrap merge state during index rotation"); diff --git a/cmd/zfs_object_agent/zettacache/src/zettacache/zcdb.rs b/cmd/zfs_object_agent/zettacache/src/zettacache/zcdb.rs index 435337998a74..b999ba1e5ace 100644 --- a/cmd/zfs_object_agent/zettacache/src/zettacache/zcdb.rs +++ b/cmd/zfs_object_agent/zettacache/src/zettacache/zcdb.rs @@ -218,7 +218,7 @@ impl ZCacheDBHandle { pub async fn dump_slabs(&mut self, opts: DumpSlabsOptions) { zcachedb_dump_slabs( self.block_access.clone(), - &mut self.slab_builder, + self.slab_builder.access(), self.checkpoint.block_allocator.clone(), opts, )