diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index eca8516f84..e786e1e348 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -9,7 +9,7 @@ use std::{ mem, net::{IpAddr, Ipv6Addr, SocketAddr}, rc::Rc, - time::{Duration, Instant}, + time::Duration, }; use neqo_common::{Datagram, Decoder}; @@ -65,14 +65,6 @@ fn change_source_port(d: &Datagram) -> Datagram { Datagram::new(new_port(d.source()), d.destination(), d.tos(), &d[..]) } -/// As these tests use a new path, that path often has a non-zero RTT. -/// Pacing can be a problem when testing that path. This skips time forward. -fn skip_pacing(c: &mut Connection, now: Instant) -> Instant { - let pacing = c.process_output(now).callback(); - assert_ne!(pacing, Duration::new(0, 0)); - now + pacing -} - #[test] fn rebinding_port() { let mut client = default_client(); @@ -100,7 +92,7 @@ fn path_forwarding_attack() { let mut client = default_client(); let mut server = default_server(); connect_force_idle(&mut client, &mut server); - let mut now = now(); + let now = now(); let dgram = send_something(&mut client, now); let dgram = change_path(&dgram, DEFAULT_ADDR_V4); @@ -160,7 +152,6 @@ fn path_forwarding_attack() { assert_v6_path(&client_data2, false); // The server keeps sending on the new path. - now = skip_pacing(&mut server, now); let server_data2 = send_something(&mut server, now); assert_v4_path(&server_data2, false); diff --git a/neqo-transport/src/pace.rs b/neqo-transport/src/pace.rs index d34d015ab1..642a656da2 100644 --- a/neqo-transport/src/pace.rs +++ b/neqo-transport/src/pace.rs @@ -14,6 +14,8 @@ use std::{ use neqo_common::qtrace; +use crate::rtt::GRANULARITY; + /// This value determines how much faster the pacer operates than the /// congestion window. /// @@ -74,19 +76,26 @@ impl Pacer { /// the current time is). pub fn next(&self, rtt: Duration, cwnd: usize) -> Instant { if self.c >= self.p { - qtrace!([self], "next {}/{:?} no wait = {:?}", cwnd, rtt, self.t); - self.t - } else { - // This is the inverse of the function in `spend`: - // self.t + rtt * (self.p - self.c) / (PACER_SPEEDUP * cwnd) - let r = rtt.as_nanos(); - let d = r.saturating_mul(u128::try_from(self.p - self.c).unwrap()); - let add = d / u128::try_from(cwnd * PACER_SPEEDUP).unwrap(); - let w = u64::try_from(add).map(Duration::from_nanos).unwrap_or(rtt); - let nxt = self.t + w; - qtrace!([self], "next {}/{:?} wait {:?} = {:?}", cwnd, rtt, w, nxt); - nxt + qtrace!([self], "next {cwnd}/{rtt:?} no wait = {:?}", self.t); + return self.t; + } + + // This is the inverse of the function in `spend`: + // self.t + rtt * (self.p - self.c) / (PACER_SPEEDUP * cwnd) + let r = rtt.as_nanos(); + let d = r.saturating_mul(u128::try_from(self.p - self.c).unwrap()); + let add = d / u128::try_from(cwnd * PACER_SPEEDUP).unwrap(); + let w = u64::try_from(add).map(Duration::from_nanos).unwrap_or(rtt); + + // If the increment is below the timer granularity, send immediately. + if w < GRANULARITY { + qtrace!([self], "next {cwnd}/{rtt:?} below granularity ({w:?})",); + return self.t; } + + let nxt = self.t + w; + qtrace!([self], "next {cwnd}/{rtt:?} wait {w:?} = {nxt:?}"); + nxt } /// Spend credit. This cannot fail; users of this API are expected to call @@ -168,4 +177,18 @@ mod tests { p.spend(n, RTT, CWND, PACKET); assert_eq!(p.next(RTT, CWND), n); } + + #[test] + fn send_immediately_below_granularity() { + const SHORT_RTT: Duration = Duration::from_millis(10); + let n = now(); + let mut p = Pacer::new(true, n, PACKET, PACKET); + assert_eq!(p.next(SHORT_RTT, CWND), n); + p.spend(n, SHORT_RTT, CWND, PACKET); + assert_eq!( + p.next(SHORT_RTT, CWND), + n, + "Expect packet to be sent immediately, instead of being paced below timer granularity." + ); + } }