Skip to content

Fail HTLC backwards before upstream claims on-chain #2457

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

80 changes: 77 additions & 3 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ use crate::ln::{PaymentHash, PaymentPreimage};
use crate::ln::msgs::DecodeError;
use crate::ln::chan_utils;
use crate::ln::chan_utils::{CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction, TxCreationKeys};
use crate::ln::channelmanager::{HTLCSource, SentHTLCId};
use crate::ln::channelmanager::{HTLCSource, SentHTLCId, HTLCPreviousHopData};
use crate::chain;
use crate::chain::{BestBlock, WatchedOutput};
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator};
Expand Down Expand Up @@ -171,19 +171,25 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorEvent,
);

/// Simple structure sent back by `chain::Watch` when an HTLC from a forward channel is detected on
/// chain. Used to update the corresponding HTLC in the backward channel. Failing to pass the
/// preimage claim backward will lead to loss of funds.
/// chain, or when we failing an HTLC awaiting downstream confirmation to prevent a
/// backwards channel from going on-chain. Used to update the corresponding HTLC in the backward
/// channel. Failing to pass the preimage claim backward will lead to loss of funds.
#[derive(Clone, PartialEq, Eq)]
pub struct HTLCUpdate {
pub(crate) payment_hash: PaymentHash,
pub(crate) payment_preimage: Option<PaymentPreimage>,
pub(crate) source: HTLCSource,
pub(crate) htlc_value_satoshis: Option<u64>,
/// If this is an update to fail back the upstream HTLC, this signals whether we're failing
/// back this HTLC because we saw a downstream claim on-chain, or if we're close to the
/// upstream timeout and want to prevent the channel from going on-chain.
pub(crate) awaiting_downstream_confirmation: bool,
}
impl_writeable_tlv_based!(HTLCUpdate, {
(0, payment_hash, required),
(1, htlc_value_satoshis, option),
(2, source, required),
(3, awaiting_downstream_confirmation, (default_value, false)),
(4, payment_preimage, option),
});

Expand Down Expand Up @@ -897,6 +903,12 @@ pub(crate) struct ChannelMonitorImpl<Signer: WriteableEcdsaChannelSigner> {
/// Ordering of tuple data: (their_per_commitment_point, feerate_per_kw, to_broadcaster_sats,
/// to_countersignatory_sats)
initial_counterparty_commitment_info: Option<(PublicKey, u32, u64, u64)>,

/// In-memory only HTLC ids used to track upstream HTLCs that have been failed backwards due to
/// a downstream channel force-close remaining unconfirmed by the time the upstream timeout
/// expires. This is used to tell us we already generated an event to fail this HTLC back
/// during a previous block scan.
failed_back_htlc_ids: HashSet<u64>,
}

/// Transaction outputs to watch for on-chain spends.
Expand Down Expand Up @@ -1239,6 +1251,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
best_block,
counterparty_node_id: Some(counterparty_node_id),
initial_counterparty_commitment_info: None,
failed_back_htlc_ids: HashSet::new(),
})
}

Expand Down Expand Up @@ -3560,6 +3573,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
payment_preimage: None,
source: source.clone(),
htlc_value_satoshis,
awaiting_downstream_confirmation: false,
}));
self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC {
commitment_tx_output_idx,
Expand Down Expand Up @@ -3591,6 +3605,63 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
}
}

// Fail back HTLCs on backwards channels if they expire within `LATENCY_GRACE_PERIOD_BLOCKS`
// blocks. If we haven't seen the preimage for an HTLC by the time the previous hop's
// timeout expires, we've lost that HTLC, so we might as well fail it back instead of having our
// counterparty force-close the channel.
let current_holder_htlcs = self.current_holder_commitment_tx.htlc_outputs.iter()
.map(|&(ref a, _, ref b)| (a, b.as_ref()));

let current_counterparty_htlcs = if let Some(txid) = self.current_counterparty_commitment_txid {
if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) {
Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed))))
} else { None }
} else { None }.into_iter().flatten();

let prev_counterparty_htlcs = if let Some(txid) = self.prev_counterparty_commitment_txid {
if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) {
Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed))))
} else { None }
} else { None }.into_iter().flatten();

let htlcs = current_holder_htlcs
.chain(current_counterparty_htlcs)
.chain(prev_counterparty_htlcs);

let height = self.best_block.height();
for (htlc, source_opt) in htlcs {
// Only check forwarded HTLCs' previous hops
let source = match source_opt {
Some(source) => source,
None => continue,
};
let (cltv_expiry, htlc_id) = match source {
HTLCSource::PreviousHopData(HTLCPreviousHopData {
htlc_id, cltv_expiry: Some(cltv_expiry), ..
}) if !self.failed_back_htlc_ids.contains(htlc_id) => (*cltv_expiry, *htlc_id),
_ => continue,
};
if cltv_expiry <= height + LATENCY_GRACE_PERIOD_BLOCKS {
let duplicate_event = self.pending_monitor_events.iter().any(
|update| if let &MonitorEvent::HTLCEvent(ref upd) = update {
upd.source == *source
} else { false });
if !duplicate_event {
log_debug!(logger, "Failing back HTLC {} upstream to preserve the \
channel as the forward HTLC hasn't resolved and our backward HTLC \
expires soon at {}", log_bytes!(htlc.payment_hash.0), cltv_expiry);
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
source: source.clone(),
payment_preimage: None,
payment_hash: htlc.payment_hash,
htlc_value_satoshis: Some(htlc.amount_msat / 1000),
awaiting_downstream_confirmation: true,
}));
self.failed_back_htlc_ids.insert(htlc_id);
}
}
}

self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height(), broadcaster, fee_estimator, logger);
self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height(), broadcaster, fee_estimator, logger);

Expand Down Expand Up @@ -3937,6 +4008,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
payment_preimage: Some(payment_preimage),
payment_hash,
htlc_value_satoshis: Some(amount_msat / 1000),
awaiting_downstream_confirmation: false,
}));
}
} else if offered_preimage_claim {
Expand All @@ -3960,6 +4032,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
payment_preimage: Some(payment_preimage),
payment_hash,
htlc_value_satoshis: Some(amount_msat / 1000),
awaiting_downstream_confirmation: false,
}));
}
} else {
Expand Down Expand Up @@ -4386,6 +4459,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
best_block,
counterparty_node_id,
initial_counterparty_commitment_info,
failed_back_htlc_ids: HashSet::new(),
})))
}
}
Expand Down
47 changes: 39 additions & 8 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2100,6 +2100,45 @@ impl<SP: Deref> Channel<SP> where
Ok(())
}

/// The total fee paid to close the channel and claim an HTLC-success transaction, accounting
/// for an added input (spending a P2WPKH) and P2WPKH output for fee bumping.
fn cost_to_claim_onchain<L: Deref>(&self, feerate_per_kw: u32, logger: &L) -> u64
where L::Target: Logger
{
let commitment_weight = if self.context.is_outbound() {
let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
commitment_stats.total_fee_sat
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should be returning the weight here, not the fee.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should be able to compute the weight without re-building the transaction, you may need a few more consts than we have defined, and it'll need to depend on the channel type.

} else { 0 };

let htlc_weight = htlc_success_tx_weight(self.context.get_channel_type());
let p2wpkh_weight = 31 * 4;
let input_weight = 272; // Including witness, assuming it spends from a p2wpkh
Copy link
Contributor Author

@alecchendev alecchendev Aug 28, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

By the way I got this number from this post. Edit: oh it's in the spec too

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

FYI some of these are already defined in bump_transaction.rs.

Comment on lines +2115 to +2116
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These only apply with anchors, otherwise the HTLC success transaction has the same feerate as the commitment transaction.


let total_weight = commitment_weight + htlc_weight + p2wpkh_weight + input_weight;
let total_fee = feerate_per_kw as u64 * total_weight / 1000;
total_fee
}
Comment on lines +2118 to +2121
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh also, for now I’ve only calculated the total fee if we were to claim just the one HTLC tx, but we may also want to consider all the fees we'd pay if we were to claim other pending HTLCs (though it's not clear which ones we'll end up having to claim ourselves vs our counterparty)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

though it's not clear which ones we'll end up having to claim ourselves vs our counterparty

Ah because there could be HTLCs that expire much later than the HTLC we're attempting to fail back and the counterparty still has a chance to claim those onchain?


/// Check whether we should risk letting this channel force-close while waiting
/// for a downstream HTLC resolution on-chain. See [`ChannelConfig::early_fail_multiplier`]
/// for more info.
pub(super) fn check_worth_upstream_closing<F: Deref, L: Deref>(
&self, htlc_id: u64, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
) -> Option<bool>
where F::Target: FeeEstimator, L::Target: Logger
{
let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority);
let htlc = self.context.pending_inbound_htlcs.iter().find(|htlc| htlc.htlc_id == htlc_id)?;
let total_fee = self.cost_to_claim_onchain(feerate_per_kw, logger);
let threshold = total_fee * self.context.config().early_fail_multiplier / 1_000_000;
if htlc.amount_msat / 1000 >= threshold {
Some(true)
} else {
Some(false)
}
}

#[inline]
fn get_closing_scriptpubkey(&self) -> Script {
// The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
Expand Down Expand Up @@ -2238,10 +2277,6 @@ impl<SP: Deref> Channel<SP> where
}
}
if pending_idx == core::usize::MAX {
#[cfg(any(test, fuzzing))]
// If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
// this is simply a duplicate claim, not previously failed and we lost funds.
debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
return UpdateFulfillFetch::DuplicateClaim {};
}

Expand Down Expand Up @@ -2410,10 +2445,6 @@ impl<SP: Deref> Channel<SP> where
}
}
if pending_idx == core::usize::MAX {
#[cfg(any(test, fuzzing))]
// If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
// is simply a duplicate fail, not previously failed and we failed-back too early.
debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
return Ok(None);
}

Expand Down
Loading