Skip to content

Commit

Permalink
use accurate bounds instead of histogram
Browse files Browse the repository at this point in the history
  • Loading branch information
TheBlueMatt committed Dec 14, 2024
1 parent b91162d commit d912f6c
Showing 1 changed file with 30 additions and 6 deletions.
36 changes: 30 additions & 6 deletions lightning/src/routing/scoring.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,8 @@ pub static NO_DATA_PENALTY: core::sync::atomic::AtomicU64 = core::sync::atomic::
pub static POW: core::sync::atomic::AtomicU8 = core::sync::atomic::AtomicU8::new(3);
/// lulz
pub static ADDL: core::sync::atomic::AtomicU8 = core::sync::atomic::AtomicU8::new(10);
/// trololol
pub static HIST_DECAY: core::sync::atomic::AtomicU64 = core::sync::atomic::AtomicU64::new(2047);

/// We define Score ever-so-slightly differently based on whether we are being built for C bindings
/// or not. For users, `LockableScore` must somehow be writeable to disk. For Rust users, this is
Expand Down Expand Up @@ -1714,23 +1716,24 @@ mod bucketed_history {
buckets[i as usize] = new_val;
}
}
HistoricalBucketRangeTracker { buckets }
HistoricalBucketRangeTracker { buckets, history: Vec::new() }
}
}

/// Tracks the historical state of a distribution as a weighted average of how much time was spent
/// in each of 32 buckets.
#[derive(Clone, Copy)]
#[derive(Clone)]
pub(super) struct HistoricalBucketRangeTracker {
buckets: [u16; 32],
history: Vec<u32>,
}

/// Buckets are stored in fixed point numbers with a 5 bit fractional part. Thus, the value
/// "one" is 32, or this constant.
pub const BUCKET_FIXED_POINT_ONE: u16 = 32;

impl HistoricalBucketRangeTracker {
pub(super) fn new() -> Self { Self { buckets: [0; 32] } }
pub(super) fn new() -> Self { Self { buckets: [0; 32], history: Vec::new() } }
fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
// We have 32 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
// we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
Expand All @@ -1755,19 +1758,21 @@ mod bucketed_history {

let pos: u16 = amount_to_pos(liquidity_offset_msat, capacity_msat);
if pos < POSITION_TICKS {
let decay = super::HIST_DECAY.load(core::sync::atomic::Ordering::Acquire) as u32;
for e in self.buckets.iter_mut() {
*e = ((*e as u32) * 2047 / 2048) as u16;
*e = ((*e as u32) * decay / 2048) as u16;
}
let bucket = pos_to_bucket(pos);
self.buckets[bucket] = self.buckets[bucket].saturating_add(BUCKET_FIXED_POINT_ONE);
}
self.history.push((((liquidity_offset_msat as f64) / (capacity_msat as f64)) * (u32::MAX as f64)) as u32);
}
}

impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required), (2, history, required) });
impl_writeable_tlv_based!(LegacyHistoricalBucketRangeTracker, { (0, buckets, required) });

#[derive(Clone, Copy)]
#[derive(Clone)]
#[repr(C)] // Force the fields in memory to be in the order we specify.
pub(super) struct HistoricalLiquidityTracker {
// This struct sits inside a `(u64, ChannelLiquidity)` in memory, and we first read the
Expand Down Expand Up @@ -1926,6 +1931,25 @@ mod bucketed_history {
return None;
}

let min_hist = if self.source_less_than_target { &self.tracker.min_liquidity_offset_history.history } else { &self.tracker.max_liquidity_offset_history.history };
let max_hist = if !self.source_less_than_target { &self.tracker.min_liquidity_offset_history.history } else { &self.tracker.max_liquidity_offset_history.history };
assert_eq!(min_hist.len(), max_hist.len());
assert!(min_hist.len() > 0);
let mut mul = 1.0;
let mut sum_prob = 1.0;
let mut sum_count = 0.0;
let dec = (super::HIST_DECAY.load(core::sync::atomic::Ordering::Acquire) as f64) / 2048.0;
for (min, max) in max_hist.iter().rev().zip(min_hist.iter().rev()) {
let min = (capacity_msat as f64 * (min as f64) / (u32::MAX as f64)) as u64;
let max = (capacity_msat as f64 * (max as f64) / (u32::MAX as f64)) as u64;

let (numerator, denominator) = success_probability(amount_msat, min, max, capacity_msat, params, true);
sum_prob += mul * (numerator as f64) / (denominator as f64);
sum_count += mul;
mul *= dec;
}
return Some((sum_prob / sum_count * 1024.0 * 1024.0 * 1024.0) as u64);

let mut cumulative_success_prob_times_billion = 0;
// Special-case the 0th min bucket - it generally means we failed a payment, so only
// consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all
Expand Down

0 comments on commit d912f6c

Please sign in to comment.