Skip to content
This repository has been archived by the owner on Jan 13, 2025. It is now read-only.

cleanup clippy tests #10172

Merged
merged 4 commits into from
May 29, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions core/src/cluster_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2425,8 +2425,7 @@ mod tests {

fn test_crds_values(pubkey: Pubkey) -> Vec<CrdsValue> {
let entrypoint = ContactInfo::new_localhost(&pubkey, timestamp());
let entrypoint_crdsvalue =
CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint.clone()));
let entrypoint_crdsvalue = CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint));
vec![entrypoint_crdsvalue]
}

Expand Down
4 changes: 2 additions & 2 deletions core/src/cluster_info_vote_listener.rs
Original file line number Diff line number Diff line change
Expand Up @@ -957,7 +957,7 @@ mod tests {
&exit,
Arc::new(RwLock::new(bank_forks)),
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore.clone(),
blockstore,
))),
));

Expand Down Expand Up @@ -1072,7 +1072,7 @@ mod tests {
&exit,
Arc::new(RwLock::new(bank_forks)),
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore.clone(),
blockstore,
))),
));

Expand Down
34 changes: 17 additions & 17 deletions core/src/commitment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -551,9 +551,9 @@ mod tests {
cache2.increase_confirmation_stake(2, 5);

let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache0.clone()); // Slot 1, conf 2
block_commitment.entry(2).or_insert(cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
block_commitment.entry(1).or_insert_with(|| cache0.clone()); // Slot 1, conf 2
block_commitment.entry(2).or_insert_with(|| cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert_with(|| cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
Expand All @@ -568,9 +568,9 @@ mod tests {

// Build map with multiple slots at conf 1
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache1.clone()); // Slot 1, conf 1
block_commitment.entry(2).or_insert(cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
block_commitment.entry(1).or_insert_with(|| cache1.clone()); // Slot 1, conf 1
block_commitment.entry(2).or_insert_with(|| cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert_with(|| cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
Expand All @@ -585,9 +585,9 @@ mod tests {

// Build map with slot gaps
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache1.clone()); // Slot 1, conf 1
block_commitment.entry(3).or_insert(cache1.clone()); // Slot 3, conf 1
block_commitment.entry(5).or_insert(cache2.clone()); // Slot 5, conf 0
block_commitment.entry(1).or_insert_with(|| cache1.clone()); // Slot 1, conf 1
block_commitment.entry(3).or_insert(cache1); // Slot 3, conf 1
block_commitment.entry(5).or_insert_with(|| cache2.clone()); // Slot 5, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
Expand All @@ -602,9 +602,9 @@ mod tests {

// Build map with no conf 1 slots, but one higher
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache0.clone()); // Slot 1, conf 2
block_commitment.entry(2).or_insert(cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
block_commitment.entry(1).or_insert(cache0); // Slot 1, conf 2
block_commitment.entry(2).or_insert_with(|| cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert_with(|| cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
Expand All @@ -619,15 +619,15 @@ mod tests {

// Build map with no conf 1 or higher slots
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache2.clone()); // Slot 1, conf 0
block_commitment.entry(2).or_insert(cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
block_commitment.entry(1).or_insert_with(|| cache2.clone()); // Slot 1, conf 0
block_commitment.entry(2).or_insert_with(|| cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert(cache2); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
bank_slot_5,
blockstore,
0,
0,
);
Expand Down
2 changes: 1 addition & 1 deletion core/src/replay_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2618,7 +2618,7 @@ pub(crate) mod tests {

let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
BlockCommitmentCache::default_with_blockstore(blockstore),
));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Expand Down
4 changes: 2 additions & 2 deletions core/src/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2321,7 +2321,7 @@ pub mod tests {
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}"]}}"#,
tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let res = io.handle_request_sync(&req, meta);
let expected = json!({
"jsonrpc": "2.0",
"result": {
Expand Down Expand Up @@ -2361,7 +2361,7 @@ pub mod tests {
);

// should panic because `bank` is not frozen
let _ = io.handle_request_sync(&req, meta.clone());
let _ = io.handle_request_sync(&req, meta);
}

#[test]
Expand Down
9 changes: 5 additions & 4 deletions core/src/rpc_pubsub.rs
Original file line number Diff line number Diff line change
Expand Up @@ -745,11 +745,11 @@ mod tests {
);
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore.clone()),
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
));

let subscriptions =
RpcSubscriptions::new(&exit, bank_forks.clone(), block_commitment_cache.clone());
RpcSubscriptions::new(&exit, bank_forks.clone(), block_commitment_cache);
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
Expand Down Expand Up @@ -895,8 +895,7 @@ mod tests {
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("voteNotification");

// Setup Subscriptions
let subscriptions =
RpcSubscriptions::new(&exit, bank_forks.clone(), block_commitment_cache.clone());
let subscriptions = RpcSubscriptions::new(&exit, bank_forks, block_commitment_cache);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@svenski123 Could you check the flakiness of this test test_vote_subscribe locally? it seems like this test got a lot unstable with removing .clone() on ci:

My guess is that this or that .clone() takes some time and removing that makes it more susceptible to the some kind of race condition. If you're brave enough, feel free to go down the rabbit hole or just add #allow(...)...

FYI, this test has been a bit flaky for some time, iirc.

rpc.subscriptions = Arc::new(subscriptions);
rpc.vote_subscribe(session, subscriber);

Expand All @@ -922,6 +921,8 @@ mod tests {
});

// Process votes and check they were notified.
// FIX-ME-BETTER-LATER - clone below is required for testcase to pass
#[allow(clippy::redundant_clone)]
ClusterInfoVoteListener::get_and_process_votes_for_tests(
&votes_receiver,
&vote_tracker,
Expand Down
2 changes: 1 addition & 1 deletion ledger/src/blockstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4922,7 +4922,7 @@ pub mod tests {
// Trying to insert value into slot <= than last root should fail
{
let mut coding_shred =
Shred::new_empty_from_header(shred.clone(), DataShredHeader::default(), coding);
Shred::new_empty_from_header(shred, DataShredHeader::default(), coding);
let index = index_cf.get(coding_shred.slot()).unwrap().unwrap();
coding_shred.set_slot(*last_root.read().unwrap());
assert!(!Blockstore::should_insert_coding_shred(
Expand Down
17 changes: 10 additions & 7 deletions ledger/tests/shred.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ use std::{
sync::Arc,
};

type IndexShredsMap = BTreeMap<u32, Vec<Shred>>;

#[test]
fn test_multi_fec_block_coding() {
let keypair = Arc::new(Keypair::new());
Expand Down Expand Up @@ -123,9 +125,9 @@ fn test_multi_fec_block_different_size_coding() {
let num_data = fec_data_shreds.len();
let num_coding = fec_coding_shreds.len();
let all_shreds: Vec<Shred> = fec_data_shreds
.into_iter()
.iter()
.step_by(2)
.chain(fec_coding_shreds.into_iter().step_by(2))
.chain(fec_coding_shreds.iter().step_by(2))
.cloned()
.collect();

Expand Down Expand Up @@ -162,8 +164,8 @@ fn test_multi_fec_block_different_size_coding() {
fn sort_data_coding_into_fec_sets(
data_shreds: Vec<Shred>,
coding_shreds: Vec<Shred>,
fec_data: &mut BTreeMap<u32, Vec<Shred>>,
fec_coding: &mut BTreeMap<u32, Vec<Shred>>,
fec_data: &mut IndexShredsMap,
fec_coding: &mut IndexShredsMap,
data_slot_and_index: &mut HashSet<(Slot, u32)>,
coding_slot_and_index: &mut HashSet<(Slot, u32)>,
) {
Expand All @@ -175,7 +177,7 @@ fn sort_data_coding_into_fec_sets(
data_slot_and_index.insert(key);
let fec_entry = fec_data
.entry(shred.common_header.fec_set_index)
.or_insert(vec![]);
.or_insert_with(|| vec![]);
fec_entry.push(shred);
}
for shred in coding_shreds {
Expand All @@ -186,16 +188,17 @@ fn sort_data_coding_into_fec_sets(
coding_slot_and_index.insert(key);
let fec_entry = fec_coding
.entry(shred.common_header.fec_set_index)
.or_insert(vec![]);
.or_insert_with(|| vec![]);
fec_entry.push(shred);
}
}

#[allow(clippy::assertions_on_constants)]
fn setup_different_sized_fec_blocks(
slot: Slot,
parent_slot: Slot,
keypair: Arc<Keypair>,
) -> (BTreeMap<u32, Vec<Shred>>, BTreeMap<u32, Vec<Shred>>, usize) {
) -> (IndexShredsMap, IndexShredsMap, usize) {
let shredder =
Shredder::new(slot, parent_slot, 1.0, keypair, 0, 0).expect("Failed in creating shredder");
let keypair0 = Keypair::new();
Expand Down
2 changes: 1 addition & 1 deletion sdk/src/signature.rs
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,7 @@ mod tests {
}

fn pubkeys(signers: &[&dyn Signer]) -> Vec<Pubkey> {
signers.into_iter().map(|x| x.pubkey()).collect()
signers.iter().map(|x| x.pubkey()).collect()
}

#[test]
Expand Down
15 changes: 6 additions & 9 deletions sdk/src/slot_history.rs
Original file line number Diff line number Diff line change
Expand Up @@ -99,9 +99,9 @@ mod tests {
slot_history.add(MAX_ENTRIES);
assert_eq!(slot_history.check(0), Check::TooOld);
assert_eq!(slot_history.check(1), Check::NotFound);
assert_eq!(slot_history.check(2), Check::Found);
assert_eq!(slot_history.check(20), Check::Found);
assert_eq!(slot_history.check(MAX_ENTRIES), Check::Found);
for i in &[2, 20, MAX_ENTRIES] {
assert_eq!(slot_history.check(*i), Check::Found);
}
for i in 3..20 {
assert_eq!(slot_history.check(i), Check::NotFound, "i: {}", i);
}
Expand All @@ -113,12 +113,9 @@ mod tests {
info!("add max_entries + 3");
let slot = 3 * MAX_ENTRIES + 3;
slot_history.add(slot);
assert_eq!(slot_history.check(0), Check::TooOld);
assert_eq!(slot_history.check(1), Check::TooOld);
assert_eq!(slot_history.check(2), Check::TooOld);
assert_eq!(slot_history.check(20), Check::TooOld);
assert_eq!(slot_history.check(21), Check::TooOld);
assert_eq!(slot_history.check(MAX_ENTRIES), Check::TooOld);
for i in &[0, 1, 2, 20, 21, MAX_ENTRIES] {
assert_eq!(slot_history.check(*i), Check::TooOld);
}
let start = slot - MAX_ENTRIES + 1;
let end = slot;
for i in start..end {
Expand Down