Skip to content
This repository has been archived by the owner on Jan 13, 2025. It is now read-only.

Commit

Permalink
buffers data shreds to make larger erasure coded sets (bp #15849) (#1…
Browse files Browse the repository at this point in the history
…6074)

* buffers data shreds to make larger erasure coded sets (#15849)

Broadcast stage batches up to 8 entries:
https://github.com/solana-labs/solana/blob/79280b304/core/src/broadcast_stage/broadcast_utils.rs#L26-L29
which will be serialized into some number of shreds and chunked into FEC
sets of at most 32 shreds each:
https://github.com/solana-labs/solana/blob/79280b304/ledger/src/shred.rs#L576-L597
So depending on the size of entries, FEC sets can be small, which may
aggravate loss rate.
For example 16 FEC sets of 2:2 data/code shreds each have higher loss
rate than one 32:32 set.

This commit broadcasts data shreds immediately, but also buffers them
until it has a batch of 32 data shreds, at which point 32 coding shreds
are generated and broadcasted.

(cherry picked from commit 4f82b89)

# Conflicts:
#	ledger/src/shred.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
  • Loading branch information
mergify[bot] and behzadnouri authored Mar 23, 2021
1 parent a04ca03 commit 9d37a33
Show file tree
Hide file tree
Showing 7 changed files with 333 additions and 179 deletions.
12 changes: 7 additions & 5 deletions core/benches/shredder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,13 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
let shredder =
Shredder::new(1, 0, RECOMMENDED_FEC_RATE, Arc::new(Keypair::new()), 0, 0).unwrap();
let data_shreds = shredder
.entries_to_data_shreds(&entries, true, 0, &mut ProcessShredsStats::default())
.entries_to_data_shreds(
&entries,
true, // is_last_in_slot
0, // next_shred_index
0, // fec_set_offset
&mut ProcessShredsStats::default(),
)
.0;
assert!(data_shreds.len() >= num_shreds);
data_shreds
Expand Down Expand Up @@ -127,10 +133,8 @@ fn bench_shredder_coding(bencher: &mut Bencher) {
let data_shreds = make_shreds(symbol_count);
bencher.iter(|| {
Shredder::generate_coding_shreds(
0,
RECOMMENDED_FEC_RATE,
&data_shreds[..symbol_count],
0,
symbol_count,
)
.len();
Expand All @@ -142,10 +146,8 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
let data_shreds = make_shreds(symbol_count);
let coding_shreds = Shredder::generate_coding_shreds(
0,
RECOMMENDED_FEC_RATE,
&data_shreds[..symbol_count],
0,
symbol_count,
);
bencher.iter(|| {
Expand Down
14 changes: 8 additions & 6 deletions core/src/broadcast_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -472,12 +472,14 @@ pub mod test {
) {
let num_entries = max_ticks_per_n_shreds(num, None);
let (data_shreds, _) = make_slot_entries(slot, 0, num_entries);
let keypair = Arc::new(Keypair::new());
let shredder = Shredder::new(slot, 0, RECOMMENDED_FEC_RATE, keypair, 0, 0)
.expect("Expected to create a new shredder");

let coding_shreds = shredder
.data_shreds_to_coding_shreds(&data_shreds[0..], &mut ProcessShredsStats::default());
let keypair = Keypair::new();
let coding_shreds = Shredder::data_shreds_to_coding_shreds(
&keypair,
&data_shreds[0..],
RECOMMENDED_FEC_RATE,
&mut ProcessShredsStats::default(),
)
.unwrap();
(
data_shreds.clone(),
coding_shreds.clone(),
Expand Down
8 changes: 6 additions & 2 deletions core/src/broadcast_stage/broadcast_utils.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use crate::poh_recorder::WorkingBankEntry;
use crate::result::Result;
use solana_ledger::entry::Entry;
use solana_ledger::{entry::Entry, shred::Shred};
use solana_runtime::bank::Bank;
use solana_sdk::clock::Slot;
use std::{
Expand All @@ -16,11 +16,15 @@ pub(super) struct ReceiveResults {
pub last_tick_height: u64,
}

#[derive(Copy, Clone)]
#[derive(Clone)]
pub struct UnfinishedSlotInfo {
pub next_shred_index: u32,
pub slot: Slot,
pub parent: Slot,
// Data shreds buffered to make a batch of size
// MAX_DATA_SHREDS_PER_FEC_BLOCK.
pub(crate) data_shreds_buffer: Vec<Shred>,
pub(crate) fec_set_offset: u32, // See Shredder::fec_set_index.
}

/// This parameter tunes how many entries are received in one iteration of recv loop
Expand Down
Loading

0 comments on commit 9d37a33

Please sign in to comment.