Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: use multiple threads when exporting snapshots #3331

Merged
merged 7 commits into from
Aug 7, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@

### Changed

- [#3331](https://github.com/ChainSafe/forest/pull/3331): Use multiple cores
when exporting snapshots.

### Removed

### Fixed
Expand Down
19 changes: 16 additions & 3 deletions src/chain/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,24 @@ use crate::blocks::Tipset;
use crate::db::car::forest;
use crate::ipld::{stream_chain, CidHashSet};
use crate::utils::io::{AsyncWriterWithChecksum, Checksum};
use crate::utils::stream::par_buffer;
use anyhow::{Context, Result};
use digest::Digest;
use fvm_ipld_blockstore::Blockstore;
use std::sync::Arc;
use tokio::io::{AsyncWrite, AsyncWriteExt, BufWriter};

pub use self::{store::*, weight::*};

pub async fn export<D: Digest>(
db: impl Blockstore,
db: impl Blockstore + Send + Sync + 'static,
tipset: &Tipset,
lookup_depth: ChainEpochDelta,
writer: impl AsyncWrite + Unpin,
seen: CidHashSet,
skip_checksum: bool,
) -> Result<Option<digest::Output<D>>, Error> {
let db = Arc::new(db);
let stateroot_lookup_limit = tipset.epoch() - lookup_depth;
let roots = tipset.key().cids().to_vec();

Expand All @@ -29,8 +32,18 @@ pub async fn export<D: Digest>(

// Stream stateroots in range stateroot_lookup_limit..=tipset.epoch(). Also
// stream all block headers until genesis.
let blocks =
stream_chain(&db, tipset.clone().chain(&db), stateroot_lookup_limit).with_seen(seen);
let blocks = par_buffer(
// Queue 1k blocks. This is enuogh to saturate the compressor and blocks
// are small enough that keeping 1k in memory isn't a problem. Average
// block size is between 1kb and 2kb.
1024,
lemmih marked this conversation as resolved.
Show resolved Hide resolved
stream_chain(
Arc::clone(&db),
tipset.clone().chain(Arc::clone(&db)),
stateroot_lookup_limit,
)
.with_seen(seen),
);

// Encode Ipld key-value pairs in zstd frames
let frames = forest::Encoder::compress_stream(8000usize.next_power_of_two(), 3, blocks);
Expand Down
19 changes: 6 additions & 13 deletions src/cli/subcommands/archive_cmd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,16 +98,8 @@ impl ArchiveCommands {
diff,
} => {
let store = ManyCar::try_from(snapshot_files)?;

do_export(
&store,
store.heaviest_tipset()?,
output_path,
epoch,
depth,
diff,
)
.await
let heaviest_tipset = store.heaviest_tipset()?;
do_export(store, heaviest_tipset, output_path, epoch, depth, diff).await
}
Self::Checkpoints {
snapshot_files: snapshot,
Expand Down Expand Up @@ -142,7 +134,7 @@ fn build_output_path(
}

async fn do_export(
store: impl Blockstore,
store: impl Blockstore + Send + Sync + 'static,
root: Tipset,
output_path: PathBuf,
epoch_option: Option<ChainEpoch>,
Expand Down Expand Up @@ -401,9 +393,10 @@ mod tests {
async fn export() {
let output_path = TempDir::new().unwrap();
let store = AnyCar::try_from(calibnet::DEFAULT_GENESIS).unwrap();
let heaviest_tipset = store.heaviest_tipset().unwrap();
do_export(
&store,
store.heaviest_tipset().unwrap(),
store,
heaviest_tipset,
output_path.path().into(),
Some(0),
1,
Expand Down
6 changes: 3 additions & 3 deletions src/rpc/chain_api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ pub(in crate::rpc) async fn chain_export<DB>(
}): Params<ChainExportParams>,
) -> Result<ChainExportResult, JsonRpcError>
where
DB: Blockstore,
DB: Blockstore + Send + Sync + 'static,
{
lazy_static::lazy_static! {
static ref LOCK: Mutex<()> = Mutex::new(());
Expand Down Expand Up @@ -82,7 +82,7 @@ where

match if dry_run {
crate::chain::export::<Sha256>(
&data.chain_store.db,
Arc::clone(&data.chain_store.db),
&start_ts,
recent_roots,
VoidAsyncWriter,
Expand All @@ -93,7 +93,7 @@ where
} else {
let file = tokio::fs::File::create(&output_path).await?;
crate::chain::export::<Sha256>(
&data.chain_store.db,
Arc::clone(&data.chain_store.db),
&start_ts,
recent_roots,
file,
Expand Down
11 changes: 6 additions & 5 deletions src/tool/subcommands/benchmark_cmd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use crate::db::car::ManyCar;
use crate::ipld::{stream_chain, stream_graph};
use crate::shim::clock::ChainEpoch;
use crate::utils::db::car_stream::CarStream;
use crate::utils::stream::par_buffer;
use anyhow::{Context as _, Result};
use clap::Subcommand;
use futures::{StreamExt, TryStreamExt};
Expand Down Expand Up @@ -140,7 +141,7 @@ async fn benchmark_forest_encoding(
let frames = crate::db::car::forest::Encoder::compress_stream(
frame_size,
compression_level,
block_stream.map_err(anyhow::Error::from),
par_buffer(1024, block_stream.map_err(anyhow::Error::from)),
);
crate::db::car::forest::Encoder::write(&mut dest, roots, frames).await?;
dest.flush().await?;
Expand All @@ -157,7 +158,7 @@ async fn benchmark_exporting(
epoch: Option<ChainEpoch>,
depth: ChainEpochDelta,
) -> Result<()> {
let store = open_store(input)?;
let store = Arc::new(open_store(input)?);
let heaviest = store.heaviest_tipset()?;
let idx = ChainIndex::new(&store);
let ts = idx.tipset_by_height(
Expand All @@ -172,15 +173,15 @@ async fn benchmark_exporting(
let mut dest = indicatif_sink("exported");

let blocks = stream_chain(
&store,
ts.deref().clone().chain(&store),
Arc::clone(&store),
ts.deref().clone().chain(Arc::clone(&store)),
stateroot_lookup_limit,
);

let frames = crate::db::car::forest::Encoder::compress_stream(
frame_size,
compression_level,
blocks.map_err(anyhow::Error::from),
par_buffer(1024, blocks.map_err(anyhow::Error::from)),
);
crate::db::car::forest::Encoder::write(&mut dest, ts.key().cids.clone(), frames).await?;
dest.flush().await?;
Expand Down
1 change: 1 addition & 0 deletions src/utils/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ pub mod misc;
pub mod monitoring;
pub mod net;
pub mod proofs_api;
pub mod stream;
pub mod version;

use futures::{
Expand Down
21 changes: 21 additions & 0 deletions src/utils/stream.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// Copyright 2019-2023 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use futures::{Stream, StreamExt};

/// Decouple stream generation and stream consumption into separate threads,
/// keeping not-yet-consumed elements in a bounded queue. This is similar to
/// [`stream::buffered`](https://docs.rs/futures/latest/futures/stream/trait.StreamExt.html#method.buffered)
/// and
/// [`sink::buffer`](https://docs.rs/futures/latest/futures/sink/trait.SinkExt.html#method.buffer).
/// The key difference is that [`par_buffer`] is parallel rather than concurrent
/// and will make use of multiple cores when both the stream and the stream
/// consumer are CPU-bound. Because a new thread is spawned, the stream has to
/// be [`Sync`], [`Send`] and `'static`.
pub fn par_buffer<V: Send + Sync + 'static>(
cap: usize,
stream: impl Stream<Item = V> + Send + Sync + 'static,
) -> impl Stream<Item = V> {
let (send, recv) = flume::bounded(cap);
tokio::task::spawn(stream.map(Ok).forward(send.into_sink()));
recv.into_stream()
}