Skip to content

Commit

Permalink
[791] Avoid Pearl Storage clone (#792)
Browse files Browse the repository at this point in the history
* [791] Fix simple places

* [791] Build fix

* [791] Holder heavily refactored

* [791] Fix some build errors

* [791] Some fixes

* [791] Do not consume holder on close

* [791] Revert some changes in group.rs

* [791] Build fix

* [791] Build fix

* [791] Add initialization protection

* [791] Reduce Holder cloning overhead

* [791] Build fix

* [791] Build fix

* [791] Build fix

* [791 Warn fix

* [791] Update CHANGELOG.md

* [791] Updates according to code review

* Update CHANGELOG.md
  • Loading branch information
ikopylov authored May 15, 2023
1 parent ff8b27a commit 00f439e
Show file tree
Hide file tree
Showing 5 changed files with 260 additions and 259 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ Bob versions changelog
- Added mimalloc allocator for musl target (#688)

#### Changed

- Avoid Pearl Storage clone (#791)

#### Fixed

Expand Down
12 changes: 3 additions & 9 deletions bob-backend/src/pearl/disk_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -595,16 +595,10 @@ impl DiskController {
let holders = group.holders();
let holders = holders.read().await;
for holder in holders.iter() {
let storage = holder.storage().read().await;
let storage = storage.storage().clone();
let id = holder.get_id();
let holder = holder.clone();
futures.push(async move {
match storage.close().await {
Ok(_) => debug!("holder {} closed", id),
Err(e) => {
error!("error closing holder{}: {} (disk: {:?})", id, e, self.disk)
}
}
holder.close_storage().await;
debug!("holder {} closed", holder.get_id());
});
}
}
Expand Down
28 changes: 18 additions & 10 deletions bob-backend/src/pearl/group.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,9 @@ impl Group {

pub async fn remount(&self, pp: impl Hooks) -> AnyResult<()> {
let _reinit_lock = self.reinit_lock.write().await;
self.holders.write().await.clear();
let cleared = self.holders.write().await.clear_and_get_values();
close_holders(cleared.iter()).await; // Close old holders
std::mem::drop(cleared); // This is to guarantee, that all resources will be released before `run_under_reinit_lock` is called
self.run_under_reinit_lock(pp).await
}

Expand Down Expand Up @@ -223,9 +225,11 @@ impl Group {
// holder but instead try to restart the whole disk
if !e.is_possible_disk_disconnection() && !e.is_duplicate() && !e.is_not_ready() {
error!("pearl holder will restart: {:?}", e);
holder.try_reinit().await?;
holder.prepare_storage().await?;
debug!("backend pearl group put common storage prepared");
if let Err(err) = holder.try_reinit().await {
warn!("Pearl backend holder reinit ended with error: {:?}", err);
} else {
debug!("Pearl backend holder reinited");
}
}
Err(e)
} else {
Expand Down Expand Up @@ -288,9 +292,11 @@ impl Group {
let result = holder.read(key).await;
if let Err(e) = &result {
if !e.is_key_not_found() && !e.is_not_ready() {
holder.try_reinit().await?;
holder.prepare_storage().await?;
debug!("backend pearl group get common storage prepared");
if let Err(err) = holder.try_reinit().await {
warn!("Pearl backend holder reinit ended with error: {:?}", err);
} else {
debug!("Pearl backend holder reinited");
}
}
}
result
Expand Down Expand Up @@ -400,9 +406,11 @@ impl Group {
// holder but instead try to restart the whole disk
if !e.is_possible_disk_disconnection() && !e.is_duplicate() && !e.is_not_ready() {
error!("pearl holder will restart: {:?}", e);
holder.try_reinit().await?;
holder.prepare_storage().await?;
debug!("backend::pearl::group::delete_common storage prepared");
if let Err(err) = holder.try_reinit().await {
warn!("Pearl backend holder reinit ended with error: {:?}", err);
} else {
debug!("Pearl backend holder reinited");
}
}
Err(e)
} else {
Expand Down
Loading

0 comments on commit 00f439e

Please sign in to comment.