9
9
solana_entry:: entry:: { create_ticks, Entry } ,
10
10
solana_ledger:: shred:: {
11
11
max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats , Shred , ShredFlags ,
12
- Shredder , MAX_DATA_SHREDS_PER_FEC_BLOCK , SIZE_OF_DATA_SHRED_PAYLOAD ,
12
+ Shredder , LEGACY_SHRED_DATA_CAPACITY , MAX_DATA_SHREDS_PER_FEC_BLOCK ,
13
13
} ,
14
14
solana_perf:: test_tx,
15
15
solana_sdk:: { hash:: Hash , packet:: PACKET_DATA_SIZE , signature:: Keypair } ,
@@ -38,12 +38,11 @@ fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Ent
38
38
}
39
39
40
40
fn make_shreds ( num_shreds : usize ) -> Vec < Shred > {
41
- let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD ;
42
41
let txs_per_entry = 128 ;
43
42
let num_entries = max_entries_per_n_shred (
44
43
& make_test_entry ( txs_per_entry) ,
45
44
2 * num_shreds as u64 ,
46
- Some ( shred_size ) ,
45
+ Some ( LEGACY_SHRED_DATA_CAPACITY ) ,
47
46
) ;
48
47
let entries = make_large_unchained_entries ( txs_per_entry, num_entries) ;
49
48
let shredder = Shredder :: new ( 1 , 0 , 0 , 0 ) . unwrap ( ) ;
@@ -73,10 +72,10 @@ fn make_concatenated_shreds(num_shreds: usize) -> Vec<u8> {
73
72
#[ bench]
74
73
fn bench_shredder_ticks ( bencher : & mut Bencher ) {
75
74
let kp = Keypair :: new ( ) ;
76
- let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD ;
75
+ let shred_size = LEGACY_SHRED_DATA_CAPACITY ;
77
76
let num_shreds = ( ( 1000 * 1000 ) + ( shred_size - 1 ) ) / shred_size;
78
77
// ~1Mb
79
- let num_ticks = max_ticks_per_n_shreds ( 1 , Some ( SIZE_OF_DATA_SHRED_PAYLOAD ) ) * num_shreds as u64 ;
78
+ let num_ticks = max_ticks_per_n_shreds ( 1 , Some ( LEGACY_SHRED_DATA_CAPACITY ) ) * num_shreds as u64 ;
80
79
let entries = create_ticks ( num_ticks, 0 , Hash :: default ( ) ) ;
81
80
bencher. iter ( || {
82
81
let shredder = Shredder :: new ( 1 , 0 , 0 , 0 ) . unwrap ( ) ;
@@ -87,7 +86,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
87
86
#[ bench]
88
87
fn bench_shredder_large_entries ( bencher : & mut Bencher ) {
89
88
let kp = Keypair :: new ( ) ;
90
- let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD ;
89
+ let shred_size = LEGACY_SHRED_DATA_CAPACITY ;
91
90
let num_shreds = ( ( 1000 * 1000 ) + ( shred_size - 1 ) ) / shred_size;
92
91
let txs_per_entry = 128 ;
93
92
let num_entries = max_entries_per_n_shred (
@@ -106,7 +105,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
106
105
#[ bench]
107
106
fn bench_deshredder ( bencher : & mut Bencher ) {
108
107
let kp = Keypair :: new ( ) ;
109
- let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD ;
108
+ let shred_size = LEGACY_SHRED_DATA_CAPACITY ;
110
109
// ~10Mb
111
110
let num_shreds = ( ( 10000 * 1000 ) + ( shred_size - 1 ) ) / shred_size;
112
111
let num_ticks = max_ticks_per_n_shreds ( 1 , Some ( shred_size) ) * num_shreds as u64 ;
@@ -121,7 +120,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
121
120
122
121
#[ bench]
123
122
fn bench_deserialize_hdr ( bencher : & mut Bencher ) {
124
- let data = vec ! [ 0 ; SIZE_OF_DATA_SHRED_PAYLOAD ] ;
123
+ let data = vec ! [ 0 ; LEGACY_SHRED_DATA_CAPACITY ] ;
125
124
126
125
let shred = Shred :: new_from_data ( 2 , 1 , 1 , & data, ShredFlags :: LAST_SHRED_IN_SLOT , 0 , 0 , 1 ) ;
127
126
0 commit comments