From da1173213c1c53ee7ca1d2b7b6a33e005f55c8fb Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Fri, 8 Nov 2024 18:24:17 +0300 Subject: [PATCH] fix small bug in backoff update (#1586) * fix small bug in backoff update - use saturating_sub * additional important optimization for tx --- io/zenoh-transport/src/common/pipeline.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 62320e379d..054a5e0a16 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -434,7 +434,7 @@ enum Pull { // Inner structure to keep track and signal backoff operations #[derive(Clone)] struct Backoff { - threshold: Duration, + threshold: MicroSeconds, last_bytes: BatchSize, atomic: Arc, // active: bool, @@ -443,7 +443,7 @@ struct Backoff { impl Backoff { fn new(threshold: Duration, atomic: Arc) -> Self { Self { - threshold, + threshold: threshold.as_micros() as MicroSeconds, last_bytes: 0, atomic, // active: false, @@ -486,14 +486,13 @@ impl StageOutIn { // Verify that we have not been doing backoff for too long let mut backoff = 0; if !pull { - let diff = LOCAL_EPOCH.elapsed().as_micros() as MicroSeconds - - self.backoff.atomic.first_write.load(Ordering::Relaxed); - let threshold = self.backoff.threshold.as_micros() as MicroSeconds; + let diff = (LOCAL_EPOCH.elapsed().as_micros() as MicroSeconds) + .saturating_sub(self.backoff.atomic.first_write.load(Ordering::Relaxed)); - if diff >= threshold { + if diff >= self.backoff.threshold { pull = true; } else { - backoff = threshold - diff; + backoff = self.backoff.threshold - diff; } }