diff --git a/zebra-network/src/meta_addr.rs b/zebra-network/src/meta_addr.rs index 06fe2a0c784..9b323ee3ec6 100644 --- a/zebra-network/src/meta_addr.rs +++ b/zebra-network/src/meta_addr.rs @@ -238,6 +238,11 @@ impl MetaAddr { self.last_seen } + /// Set the last time we interacted with this peer. + pub(crate) fn set_last_seen(&mut self, last_seen: DateTime32) { + self.last_seen = last_seen; + } + /// Is this address a directly connected client? pub fn is_direct_client(&self) -> bool { match self.last_connection_state { diff --git a/zebra-network/src/peer_set/candidate_set.rs b/zebra-network/src/peer_set/candidate_set.rs index be51be482da..ae0d0328ff4 100644 --- a/zebra-network/src/peer_set/candidate_set.rs +++ b/zebra-network/src/peer_set/candidate_set.rs @@ -1,12 +1,16 @@ use std::{cmp::min, mem, sync::Arc, time::Duration}; -use chrono::{DateTime, Utc}; use futures::stream::{FuturesUnordered, StreamExt}; use tokio::time::{sleep, sleep_until, timeout, Sleep}; use tower::{Service, ServiceExt}; +use zebra_chain::serialization::DateTime32; + use crate::{constants, types::MetaAddr, AddressBook, BoxError, Request, Response}; +#[cfg(test)] +mod tests; + /// The `CandidateSet` manages the `PeerSet`'s peer reconnection attempts. /// /// It divides the set of all possible candidate peers into disjoint subsets, @@ -229,7 +233,7 @@ where ?addrs, "got response to GetPeers" ); - let addrs = validate_addrs(addrs, Utc::now()); + let addrs = validate_addrs(addrs, DateTime32::now()); self.send_addrs(addrs); } Err(e) => { @@ -329,24 +333,66 @@ where /// If the data in an address is invalid, this function can: /// - modify the address data, or /// - delete the address. -// -// TODO: re-enable this lint when last_seen_limit is used -#[allow(unused_variables)] +/// +/// # Security +/// +/// Adjusts untrusted last seen times so they are not in the future. This stops +/// malicious peers keeping all their addresses at the front of the connection +/// queue. Honest peers with future clock skew also get adjusted. +/// +/// Rejects all addresses if any calculated times overflow or underflow. fn validate_addrs( addrs: impl IntoIterator, - last_seen_limit: DateTime, -) -> impl IntoIterator { + last_seen_limit: DateTime32, +) -> impl Iterator { // Note: The address book handles duplicate addresses internally, // so we don't need to de-duplicate addresses here. // TODO: // We should eventually implement these checks in this function: - // - Zebra should stop believing far-future last_seen times from peers (#1871) // - Zebra should ignore peers that are older than 3 weeks (part of #1865) // - Zebra should count back 3 weeks from the newest peer timestamp sent // by the other peer, to compensate for clock skew // - Zebra should limit the number of addresses it uses from a single Addrs // response (#1869) - addrs + let mut addrs: Vec<_> = addrs.into_iter().collect(); + + limit_last_seen_times(&mut addrs, last_seen_limit); + + addrs.into_iter() +} + +/// Ensure all reported `last_seen` times are less than or equal to `last_seen_limit`. +/// +/// This will consider all addresses as invalid if trying to offset their +/// `last_seen` times to be before the limit causes an overflow. +fn limit_last_seen_times(addrs: &mut Vec, last_seen_limit: DateTime32) { + let (oldest_reported_seen_timestamp, newest_reported_seen_timestamp) = + addrs + .iter() + .fold((u32::MAX, u32::MIN), |(oldest, newest), addr| { + let last_seen = addr.get_last_seen().timestamp(); + (oldest.min(last_seen), newest.max(last_seen)) + }); + + // If any time is in the future, adjust all times, to compensate for clock skew on honest peers + if newest_reported_seen_timestamp > last_seen_limit.timestamp() { + let offset = newest_reported_seen_timestamp - last_seen_limit.timestamp(); + + // Apply offset to oldest timestamp to check for underflow + let oldest_resulting_timestamp = oldest_reported_seen_timestamp as i64 - offset as i64; + if oldest_resulting_timestamp >= 0 { + // No overflow is possible, so apply offset to all addresses + for addr in addrs { + let old_last_seen = addr.get_last_seen().timestamp(); + let new_last_seen = old_last_seen - offset; + + addr.set_last_seen(new_last_seen.into()); + } + } else { + // An overflow will occur, so reject all gossiped peers + addrs.clear(); + } + } } diff --git a/zebra-network/src/peer_set/candidate_set/tests.rs b/zebra-network/src/peer_set/candidate_set/tests.rs new file mode 100644 index 00000000000..af9a66f543c --- /dev/null +++ b/zebra-network/src/peer_set/candidate_set/tests.rs @@ -0,0 +1,4 @@ +//! [`CandidateSet`] tests. + +mod prop; +mod vectors; diff --git a/zebra-network/src/peer_set/candidate_set/tests/prop.rs b/zebra-network/src/peer_set/candidate_set/tests/prop.rs new file mode 100644 index 00000000000..e35139cc04c --- /dev/null +++ b/zebra-network/src/peer_set/candidate_set/tests/prop.rs @@ -0,0 +1,23 @@ +use proptest::{collection::vec, prelude::*}; + +use zebra_chain::serialization::DateTime32; + +use super::super::validate_addrs; +use crate::types::MetaAddr; + +proptest! { + /// Test that validated gossiped peers never have a `last_seen` time that's in the future. + #[test] + fn no_last_seen_times_are_in_the_future( + gossiped_peers in vec(MetaAddr::gossiped_strategy(), 1..10), + last_seen_limit in any::(), + ) { + zebra_test::init(); + + let validated_peers = validate_addrs(gossiped_peers, last_seen_limit); + + for peer in validated_peers { + prop_assert![peer.get_last_seen() <= last_seen_limit]; + } + } +} diff --git a/zebra-network/src/peer_set/candidate_set/tests/vectors.rs b/zebra-network/src/peer_set/candidate_set/tests/vectors.rs new file mode 100644 index 00000000000..4da9a8ab93a --- /dev/null +++ b/zebra-network/src/peer_set/candidate_set/tests/vectors.rs @@ -0,0 +1,137 @@ +use std::{ + convert::TryInto, + net::{IpAddr, SocketAddr}, +}; + +use chrono::{DateTime, Duration, Utc}; + +use zebra_chain::serialization::DateTime32; + +use super::super::validate_addrs; +use crate::types::{MetaAddr, PeerServices}; + +/// Test that offset is applied when all addresses have `last_seen` times in the future. +#[test] +fn offsets_last_seen_times_in_the_future() { + let last_seen_limit = DateTime32::now(); + let last_seen_limit_chrono = last_seen_limit.to_chrono(); + + let input_peers = mock_gossiped_peers(vec![ + last_seen_limit_chrono + Duration::minutes(30), + last_seen_limit_chrono + Duration::minutes(15), + last_seen_limit_chrono + Duration::minutes(45), + ]); + + let validated_peers: Vec<_> = validate_addrs(input_peers, last_seen_limit).collect(); + + let expected_offset = Duration::minutes(45); + let expected_peers = mock_gossiped_peers(vec![ + last_seen_limit_chrono + Duration::minutes(30) - expected_offset, + last_seen_limit_chrono + Duration::minutes(15) - expected_offset, + last_seen_limit_chrono + Duration::minutes(45) - expected_offset, + ]); + + assert_eq!(validated_peers, expected_peers); +} + +/// Test that offset is not applied if all addresses have `last_seen` times that are in the past. +#[test] +fn doesnt_offset_last_seen_times_in_the_past() { + let last_seen_limit = DateTime32::now(); + let last_seen_limit_chrono = last_seen_limit.to_chrono(); + + let input_peers = mock_gossiped_peers(vec![ + last_seen_limit_chrono - Duration::minutes(30), + last_seen_limit_chrono - Duration::minutes(45), + last_seen_limit_chrono - Duration::days(1), + ]); + + let validated_peers: Vec<_> = validate_addrs(input_peers.clone(), last_seen_limit).collect(); + + let expected_peers = input_peers; + + assert_eq!(validated_peers, expected_peers); +} + +/// Test that offset is applied to all the addresses if at least one has a `last_seen` time in the +/// future. +/// +/// Times that are in the past should be changed as well. +#[test] +fn offsets_all_last_seen_times_if_one_is_in_the_future() { + let last_seen_limit = DateTime32::now(); + let last_seen_limit_chrono = last_seen_limit.to_chrono(); + + let input_peers = mock_gossiped_peers(vec![ + last_seen_limit_chrono + Duration::minutes(55), + last_seen_limit_chrono - Duration::days(3), + last_seen_limit_chrono - Duration::hours(2), + ]); + + let validated_peers: Vec<_> = validate_addrs(input_peers, last_seen_limit).collect(); + + let expected_offset = Duration::minutes(55); + let expected_peers = mock_gossiped_peers(vec![ + last_seen_limit_chrono + Duration::minutes(55) - expected_offset, + last_seen_limit_chrono - Duration::days(3) - expected_offset, + last_seen_limit_chrono - Duration::hours(2) - expected_offset, + ]); + + assert_eq!(validated_peers, expected_peers); +} + +/// Test that offset is not applied if the most recent `last_seen` time is equal to the limit. +#[test] +fn doesnt_offsets_if_most_recent_last_seen_times_is_exactly_the_limit() { + let last_seen_limit = DateTime32::now(); + let last_seen_limit_chrono = last_seen_limit.to_chrono(); + + let input_peers = mock_gossiped_peers(vec![ + last_seen_limit_chrono, + last_seen_limit_chrono - Duration::minutes(3), + last_seen_limit_chrono - Duration::hours(1), + ]); + + let validated_peers: Vec<_> = validate_addrs(input_peers.clone(), last_seen_limit).collect(); + + let expected_peers = input_peers; + + assert_eq!(validated_peers, expected_peers); +} + +/// Rejects all addresses if underflow occurs when applying the offset. +#[test] +fn rejects_all_addresses_if_applying_offset_causes_an_underflow() { + let last_seen_limit = DateTime32::now(); + + let input_peers = mock_gossiped_peers(vec![ + DateTime32::from(u32::MIN).to_chrono(), + last_seen_limit.to_chrono(), + DateTime32::from(u32::MAX).to_chrono(), + ]); + + let mut validated_peers = validate_addrs(input_peers, last_seen_limit); + + assert!(validated_peers.next().is_none()); +} + +/// Create a mock list of gossiped [`MetaAddr`]s with the specified `last_seen_times`. +/// +/// The IP address and port of the generated ports should not matter for the test. +fn mock_gossiped_peers(last_seen_times: impl IntoIterator>) -> Vec { + last_seen_times + .into_iter() + .enumerate() + .map(|(index, last_seen_chrono)| { + let last_seen = last_seen_chrono + .try_into() + .expect("`last_seen` time doesn't fit in a `DateTime32`"); + + MetaAddr::new_gossiped_meta_addr( + SocketAddr::new(IpAddr::from([192, 168, 1, index as u8]), 20_000), + PeerServices::NODE_NETWORK, + last_seen, + ) + }) + .collect() +}