Skip to content

Commit

Permalink
feat(rebuild): add rebuild rangers
Browse files Browse the repository at this point in the history
Adds a set of rebuild rangers which can be used by different rebuilds:
1. full rebuild - walk the entire device range and copy every segment
   (current nexus full rebuild behaviour).
2. partial rebuild - walk the allocated segments only and copy them.
3. partial eq rebuild - walk the entire device range and copy only allocated
   segments (current nexus partial rebuild behaviour).

Signed-off-by: Tiago Castro <tiagolobocastro@gmail.com>
  • Loading branch information
tiagolobocastro committed Feb 29, 2024
1 parent 091e7da commit 5c72e5f
Show file tree
Hide file tree
Showing 6 changed files with 277 additions and 14 deletions.
6 changes: 6 additions & 0 deletions io-engine/src/core/segment_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,3 +97,9 @@ impl SegmentMap {
self.count_ones() * self.segment_size / self.block_len
}
}

impl From<SegmentMap> for BitVec {
fn from(value: SegmentMap) -> Self {
value.segments
}
}
1 change: 1 addition & 0 deletions io-engine/src/rebuild/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ mod rebuild_map;
mod rebuild_state;
mod rebuild_stats;
mod rebuild_task;
mod rebuilders;

pub use bdev_rebuild::BdevRebuildJob;
pub use nexus_rebuild::NexusRebuildJob;
Expand Down
4 changes: 4 additions & 0 deletions io-engine/src/rebuild/nexus_rebuild.rs
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,10 @@ impl NexusRebuildJobBackend {

#[async_trait::async_trait(?Send)]
impl RebuildTaskCopier for NexusRebuildDescriptor {
fn descriptor(&self) -> &RebuildDescriptor {
&self.common
}

/// Copies one segment worth of data from source into destination. During
/// this time the LBA range being copied is locked so that there cannot be
/// front end I/O to the same LBA range.
Expand Down
7 changes: 7 additions & 0 deletions io-engine/src/rebuild/rebuild_map.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use bit_vec::BitVec;
use std::fmt::{Debug, Formatter};

use crate::core::SegmentMap;
Expand Down Expand Up @@ -62,3 +63,9 @@ impl RebuildMap {
self.segments.count_dirty_blks()
}
}

impl From<RebuildMap> for BitVec {
fn from(value: RebuildMap) -> Self {
value.segments.into()
}
}
20 changes: 6 additions & 14 deletions io-engine/src/rebuild/rebuild_task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@ impl RebuildTasks {
/// can be expanded for sub-segment copies.
#[async_trait::async_trait(?Send)]
pub(super) trait RebuildTaskCopier {
fn descriptor(&self) -> &RebuildDescriptor;
/// Copies an entire segment at the given block address, from source to
/// target using a `DmaBuf`.
async fn copy_segment(
Expand All @@ -206,25 +207,16 @@ pub(super) trait RebuildTaskCopier {

#[async_trait::async_trait(?Send)]
impl RebuildTaskCopier for RebuildDescriptor {
fn descriptor(&self) -> &RebuildDescriptor {
self
}

/// Copies one segment worth of data from source into destination.
async fn copy_segment(
&self,
blk: u64,
task: &mut RebuildTask,
) -> Result<bool, RebuildError> {
// todo: move the map out of the descriptor, into the specific backends.
if self.is_blk_sync(blk) {
return Ok(false);
}

// Perform the copy.
let result = task.copy_one(blk, self).await;

// In the case of success, mark the segment as already transferred.
if result.is_ok() {
self.blk_synced(blk);
}

result
task.copy_one(blk, self).await
}
}
253 changes: 253 additions & 0 deletions io-engine/src/rebuild/rebuilders.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,253 @@
use crate::rebuild::{
rebuild_descriptor::RebuildDescriptor,
rebuild_task::{RebuildTask, RebuildTaskCopier},
RebuildError,
RebuildMap,
};
use bit_vec::BitVec;
use std::{ops::Range, rc::Rc};

/// A rebuild may rebuild a device by walking it differently, for example:
/// 1. full rebuild - walk the entire device range and copy every segment
/// (current nexus full rebuild behaviour).
/// 2. partial rebuild - walk the allocated segments only and copy them.
/// 3. partial eq rebuild - walk the entire device range and copy only allocated
/// segments (current nexus partial rebuild behaviour).
pub(super) trait RangeRebuilder<T: RebuildTaskCopier> {
/// Fetch the next block to rebuild.
fn next(&mut self) -> Option<u64>;
/// Peek the next block to rebuild.
fn peek_next(&self) -> Option<u64>;
/// Get the remaining blocks we have yet to be rebuilt.
fn blocks_remaining(&self) -> u64;
/// Check if this is a partial rebuild.
fn is_partial(&self) -> bool;
/// Get the rebuild descriptor reference.
fn desc(&self) -> &RebuildDescriptor;
/// Get the copier which can copy a segment.
fn copier(&self) -> Rc<T>;
}

/// The range is the full range of the request, in steps of segment size.
pub(super) struct FullRebuild<T: RebuildTaskCopier> {
range: PeekableIterator<std::iter::StepBy<Range<u64>>>,
copier: Rc<T>,
}
impl<T: RebuildTaskCopier> FullRebuild<T> {
/// Create a full rebuild with the given copier.
#[allow(dead_code)]
pub(super) fn new(copier: T) -> Self {
let desc = copier.descriptor();
let range = desc.range.clone();
Self {
range: PeekableIterator::new(
range.step_by(desc.segment_size_blks as usize),
),
copier: Rc::new(copier),
}
}
}
impl<T: RebuildTaskCopier> RangeRebuilder<T> for FullRebuild<T> {
fn next(&mut self) -> Option<u64> {
self.range.next()
}
fn peek_next(&self) -> Option<u64> {
self.range.peek().cloned()
}

fn blocks_remaining(&self) -> u64 {
self.peek_next()
.map(|r| self.desc().range.end.max(r) - r)
.unwrap_or_default()
}
fn is_partial(&self) -> bool {
false
}

fn desc(&self) -> &RebuildDescriptor {
self.copier.descriptor()
}
fn copier(&self) -> Rc<T> {
self.copier.clone()
}
}

/// A partial rebuild range which steps through each segment but triggers
/// the copy only if the segment dirty bit is set.
pub(super) struct PartialRebuild<T: RebuildTaskCopier> {
range: PeekableIterator<std::iter::Enumerate<bit_vec::IntoIter>>,
total: u64,
rebuilt: u64,
copier: Rc<T>,
}
impl<T: RebuildTaskCopier> PartialRebuild<T> {
/// Create a partial sequential rebuild with the given copier and segment
/// map.
#[allow(dead_code)]
pub(super) fn new(map: RebuildMap, copier: T) -> Self {
let total = map.count_dirty_blks();
let bit_vec: BitVec = map.into();
Self {
range: PeekableIterator::new(bit_vec.into_iter().enumerate()),
total,
rebuilt: 0,
copier: Rc::new(copier),
}
}
}
impl<T: RebuildTaskCopier> RangeRebuilder<T> for PartialRebuild<T> {
fn next(&mut self) -> Option<u64> {
for (blk, is_set) in self.range.by_ref() {
if is_set {
return Some(blk as u64);
}
}
None
}
fn peek_next(&self) -> Option<u64> {
// todo: should we add a wrapper to ensure we peek only set bits?
self.range.peek().map(|(blk, _)| *blk as u64)
}

fn blocks_remaining(&self) -> u64 {
self.total.max(self.rebuilt) - self.rebuilt
}
fn is_partial(&self) -> bool {
false
}

fn desc(&self) -> &RebuildDescriptor {
self.copier.descriptor()
}
fn copier(&self) -> Rc<T> {
self.copier.clone()
}
}

/// The range is the full range of the request, in steps of segment size
/// and a copy is triggered for each segment.
/// However, during the copy itself, clean segments are skipped.
pub(super) struct PartialSeqRebuild<T: RebuildTaskCopier> {
range: PeekableIterator<std::iter::StepBy<Range<u64>>>,
copier: Rc<PartialSeqCopier<T>>,
}
impl<T: RebuildTaskCopier> PartialSeqRebuild<T> {
/// Create a partial sequential rebuild with the given copier and segment
/// map.
#[allow(dead_code)]
pub(super) fn new(map: RebuildMap, copier: T) -> Self {
let desc = copier.descriptor();
let range = desc.range.clone();
Self {
range: PeekableIterator::new(
range.step_by(desc.segment_size_blks as usize),
),
copier: Rc::new(PartialSeqCopier::new(map, copier)),
}
}
}
impl<T: RebuildTaskCopier> RangeRebuilder<PartialSeqCopier<T>>
for PartialSeqRebuild<T>
{
fn next(&mut self) -> Option<u64> {
self.range.next()
}
fn peek_next(&self) -> Option<u64> {
self.range.peek().cloned()
}

fn blocks_remaining(&self) -> u64 {
self.copier.map.lock().count_dirty_blks()
}
fn is_partial(&self) -> bool {
true
}

fn desc(&self) -> &RebuildDescriptor {
self.copier.descriptor()
}
fn copier(&self) -> Rc<PartialSeqCopier<T>> {
self.copier.clone()
}
}
/// The partial sequential rebuild copier, which uses a bitmap to determine if a
/// particular block range must be copied.
pub(super) struct PartialSeqCopier<T: RebuildTaskCopier> {
map: parking_lot::Mutex<RebuildMap>,
copier: T,
}
impl<T: RebuildTaskCopier> PartialSeqCopier<T> {
fn new(map: RebuildMap, copier: T) -> Self {
Self {
map: parking_lot::Mutex::new(map),
copier,
}
}
/// Checks if the block has to be transferred.
/// If no rebuild map is present, all blocks are considered unsynced.
#[inline(always)]
fn is_blk_sync(&self, blk: u64) -> bool {
self.map.lock().is_blk_clean(blk)
}

/// Marks the rebuild segment starting from the given logical block as
/// already transferred.
#[inline(always)]
fn blk_synced(&self, blk: u64) {
self.map.lock().blk_clean(blk);
}
}
#[async_trait::async_trait(?Send)]
impl<T: RebuildTaskCopier> RebuildTaskCopier for PartialSeqCopier<T> {
fn descriptor(&self) -> &RebuildDescriptor {
self.copier.descriptor()
}

/// Copies one segment worth of data from source into destination.
async fn copy_segment(
&self,
blk: u64,
task: &mut RebuildTask,
) -> Result<bool, RebuildError> {
if self.is_blk_sync(blk) {
return Ok(false);
}

let result = self.copier.copy_segment(blk, task).await;

// In the case of success, mark the segment as already transferred.
if result.is_ok() {
self.blk_synced(blk);
}

result
}
}

/// Adds peekable functionality to a generic iterator.
/// > Note: the peekable from the std library is not sufficient here because it
/// > requires a mutable reference to peek. We get around this limitation by
/// > always setting the peek at a small performance cost.
struct PeekableIterator<I: Iterator> {
iter: I,
peek: Option<I::Item>,
}
impl<I: Iterator> PeekableIterator<I> {
fn new(mut iter: I) -> Self {
Self {
peek: iter.next(),
iter,
}
}
/// Peek into the future for the next value which next would yield.
fn peek(&self) -> Option<&I::Item> {
self.peek.as_ref()
}
}
impl<I: Iterator> Iterator for PeekableIterator<I> {
type Item = I::Item;

fn next(&mut self) -> Option<Self::Item> {
std::mem::replace(&mut self.peek, self.iter.next())
}
}

0 comments on commit 5c72e5f

Please sign in to comment.