diff --git a/iroh-p2p/Cargo.toml b/iroh-p2p/Cargo.toml index 3ea8459e41..c9e8592546 100644 --- a/iroh-p2p/Cargo.toml +++ b/iroh-p2p/Cargo.toml @@ -14,7 +14,6 @@ async-stream = "0.3.3" async-trait = "0.1.56" asynchronous-codec = "0.6.0" bytes = "1.1.0" -caches = "0.2.2" cid = "0.8.0" clap = { version = "4.0.9", features = ["derive"] } config = "0.13.1" @@ -27,6 +26,7 @@ iroh-rpc-client = { path = "../iroh-rpc-client", default-features = false } iroh-rpc-types = { path = "../iroh-rpc-types", default-features = false } iroh-util = { path = "../iroh-util" } lazy_static = "1.4" +lru = "0.8" names = { version = "0.14.0", default-features = false } rand = "0.8.5" serde = { version = "1.0", features = ["derive"] } @@ -71,9 +71,13 @@ default-features = false features = ["std", "multihash-impl", "identity", "sha2"] [dev-dependencies] +criterion = "0.4" [features] default = ["rpc-grpc", "rpc-mem"] rpc-grpc = ["iroh-rpc-types/grpc", "iroh-rpc-client/grpc", "iroh-metrics/rpc-grpc"] rpc-mem = ["iroh-rpc-types/mem", "iroh-rpc-client/mem"] +[[bench]] +name = "lru_cache" +harness = false \ No newline at end of file diff --git a/iroh-p2p/benches/lru_cache.rs b/iroh-p2p/benches/lru_cache.rs new file mode 100644 index 0000000000..e403fcd0d2 --- /dev/null +++ b/iroh-p2p/benches/lru_cache.rs @@ -0,0 +1,187 @@ +//! Test the LRU cache implementation. +//! +//! These are a few simple tests of the operations we do on empty and full caches. Mostly +//! how populated the cache is doesn't seem to affect things much. +//! +//! # Running the benchmarks +//! +//! Install `cargo-criterion`: +//! +//! ```shell +//! cargo install cargo-criterion +//! ``` +//! +//! Run the benchmarks: +//! +//! ```shell +//! cargo criterion -p iroh-p2p +//! ``` + +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; +use libp2p::PeerId; + +// The size of the cache to make. Taken from behaviour::peer_manager::DEFAULT_BAD_PEER_CAP. +const CACHE_SIZE: usize = 10 * 4096; + +fn bench_contains_empty(c: &mut Criterion) { + let mut group = c.benchmark_group("Contains, almost empty cache"); + group.bench_function("lru", |bencher| { + bencher.iter_batched( + // setup + || { + let mut cache = lru::LruCache::new(CACHE_SIZE.try_into().unwrap()); + let peer = PeerId::random(); + cache.put(peer, ()); + for _ in 0..16 { + cache.put(PeerId::random(), ()); + } + let missing = PeerId::random(); + assert!(cache.contains(&peer)); + assert!(!cache.contains(&missing)); + (cache, peer, missing) + }, + // routine + |(cache, peer, missing)| { + cache.contains(&peer); + cache.contains(&missing); + cache // drop outside of routine + }, + BatchSize::SmallInput, + ) + }); + group.finish(); +} + +fn bench_contains_full(c: &mut Criterion) { + let mut group = c.benchmark_group("Contains, full cache"); + group.bench_function("lru", |bencher| { + bencher.iter_batched( + // setup + || { + let mut cache = lru::LruCache::new(CACHE_SIZE.try_into().unwrap()); + for _ in 0..CACHE_SIZE { + cache.put(PeerId::random(), ()); + } + let peer = PeerId::random(); + cache.put(peer, ()); + let missing = PeerId::random(); + assert!(cache.contains(&peer)); + assert!(!cache.contains(&missing)); + (cache, peer, missing) + }, + // routine + |(cache, peer, missing)| { + cache.contains(&peer); + cache.contains(&missing); + cache // drop outside of routine + }, + BatchSize::LargeInput, + ) + }); + group.finish(); +} + +fn bench_put_empty(c: &mut Criterion) { + let mut group = c.benchmark_group("put, almost empty cache"); + group.bench_function("lru", |bencher| { + bencher.iter_batched( + // setup + || { + let cache = lru::LruCache::new(CACHE_SIZE.try_into().unwrap()); + let peer_id = PeerId::random(); + (cache, peer_id) + }, + // routine + |(mut cache, peer_id)| { + cache.put(peer_id, ()); + (cache, peer_id) // drop outside of routine + }, + BatchSize::SmallInput, + ) + }); + group.finish(); +} + +fn bench_put_full(c: &mut Criterion) { + let mut group = c.benchmark_group("put, full cache"); + group.bench_function("lru", |bencher| { + bencher.iter_batched( + // setup + || { + let mut cache = lru::LruCache::new(CACHE_SIZE.try_into().unwrap()); + for _ in 0..CACHE_SIZE { + cache.put(PeerId::random(), ()); + } + let peer_id = PeerId::random(); + (cache, peer_id) + }, + // routine + |(mut cache, peer_id)| { + cache.put(peer_id, ()); + (cache, peer_id) // drop outside of routine + }, + BatchSize::LargeInput, + ) + }); + group.finish(); +} + +fn bench_pop_empty(c: &mut Criterion) { + let mut group = c.benchmark_group("pop, almost empty cache"); + group.bench_function("lru", |benches| { + benches.iter_batched( + // setup + || { + let mut cache = lru::LruCache::new(CACHE_SIZE.try_into().unwrap()); + for _ in 0..16 { + cache.put(PeerId::random(), ()); + } + let peer_id = PeerId::random(); + cache.put(peer_id, ()); + (cache, peer_id) + }, + // routine + |(mut cache, peer_id)| { + cache.pop(&peer_id); + (cache, peer_id) // drop outside of routine + }, + BatchSize::SmallInput, + ) + }); + group.finish(); +} +fn bench_pop_full(c: &mut Criterion) { + let mut group = c.benchmark_group("pop, full cache"); + group.bench_function("lru", |benches| { + benches.iter_batched( + // setup + || { + let mut cache = lru::LruCache::new(CACHE_SIZE.try_into().unwrap()); + for _ in 0..CACHE_SIZE { + cache.put(PeerId::random(), ()); + } + let peer_id = PeerId::random(); + cache.put(peer_id, ()); + (cache, peer_id) + }, + // routine + |(mut cache, peer_id)| { + cache.pop(&peer_id); + (cache, peer_id) // drop outside of routine + }, + BatchSize::LargeInput, + ) + }); + group.finish(); +} + +criterion_group!( + benches, + bench_contains_empty, + bench_contains_full, + bench_put_empty, + bench_put_full, + bench_pop_empty, + bench_pop_full, +); +criterion_main!(benches); diff --git a/iroh-p2p/src/behaviour/peer_manager.rs b/iroh-p2p/src/behaviour/peer_manager.rs index 7bf3decb17..3137aff547 100644 --- a/iroh-p2p/src/behaviour/peer_manager.rs +++ b/iroh-p2p/src/behaviour/peer_manager.rs @@ -1,10 +1,10 @@ use std::{ + num::NonZeroUsize, task::{Context, Poll}, time::Duration, }; use ahash::AHashMap; -use caches::{Cache, PutResult}; use iroh_metrics::{core::MRecorder, inc, p2p::P2PMetrics}; use libp2p::{ core::{connection::ConnectionId, transport::ListenerId, ConnectedPoint}, @@ -16,10 +16,11 @@ use libp2p::{ }, Multiaddr, PeerId, }; +use lru::LruCache; pub struct PeerManager { info: AHashMap, - bad_peers: caches::RawLRU, + bad_peers: LruCache, } #[derive(Default, Debug, Clone)] @@ -35,13 +36,13 @@ impl Info { } } -const DEFAULT_BAD_PEER_CAP: usize = 10 * 4096; +const DEFAULT_BAD_PEER_CAP: Option = NonZeroUsize::new(10 * 4096); impl Default for PeerManager { fn default() -> Self { PeerManager { info: Default::default(), - bad_peers: caches::RawLRU::new(DEFAULT_BAD_PEER_CAP).unwrap(), + bad_peers: LruCache::new(DEFAULT_BAD_PEER_CAP.unwrap()), } } } @@ -94,7 +95,7 @@ impl NetworkBehaviour for PeerManager { other_established: usize, ) { if other_established == 0 { - let p = self.bad_peers.remove(peer_id); + let p = self.bad_peers.pop(peer_id); if p.is_some() { inc!(P2PMetrics::BadPeerRemoved); } @@ -150,10 +151,9 @@ impl NetworkBehaviour for PeerManager { match error { DialError::ConnectionLimit(_) | DialError::DialPeerConditionFalse(_) => {} _ => { - if PutResult::Put == self.bad_peers.put(peer_id, ()) { + if self.bad_peers.put(peer_id, ()).is_none() { inc!(P2PMetrics::BadPeer); } - self.info.remove(&peer_id); } }