diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index da3970da8eb..9c0de49cbf4 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -38,7 +38,7 @@ use crate::ln::{PaymentHash, PaymentPreimage}; use crate::ln::msgs::DecodeError; use crate::ln::chan_utils; use crate::ln::chan_utils::{CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction, TxCreationKeys}; -use crate::ln::channelmanager::{HTLCSource, SentHTLCId}; +use crate::ln::channelmanager::{HTLCSource, SentHTLCId, HTLCPreviousHopData}; use crate::chain; use crate::chain::{BestBlock, WatchedOutput}; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator}; @@ -171,19 +171,25 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorEvent, ); /// Simple structure sent back by `chain::Watch` when an HTLC from a forward channel is detected on -/// chain. Used to update the corresponding HTLC in the backward channel. Failing to pass the -/// preimage claim backward will lead to loss of funds. +/// chain, or when we failing an HTLC awaiting downstream confirmation to prevent a +/// backwards channel from going on-chain. Used to update the corresponding HTLC in the backward +/// channel. Failing to pass the preimage claim backward will lead to loss of funds. #[derive(Clone, PartialEq, Eq)] pub struct HTLCUpdate { pub(crate) payment_hash: PaymentHash, pub(crate) payment_preimage: Option, pub(crate) source: HTLCSource, pub(crate) htlc_value_satoshis: Option, + /// If this is an update to fail back the upstream HTLC, this signals whether we're failing + /// back this HTLC because we saw a downstream claim on-chain, or if we're close to the + /// upstream timeout and want to prevent the channel from going on-chain. + pub(crate) awaiting_downstream_confirmation: bool, } impl_writeable_tlv_based!(HTLCUpdate, { (0, payment_hash, required), (1, htlc_value_satoshis, option), (2, source, required), + (3, awaiting_downstream_confirmation, (default_value, false)), (4, payment_preimage, option), }); @@ -897,6 +903,12 @@ pub(crate) struct ChannelMonitorImpl { /// Ordering of tuple data: (their_per_commitment_point, feerate_per_kw, to_broadcaster_sats, /// to_countersignatory_sats) initial_counterparty_commitment_info: Option<(PublicKey, u32, u64, u64)>, + + /// In-memory only HTLC ids used to track upstream HTLCs that have been failed backwards due to + /// a downstream channel force-close remaining unconfirmed by the time the upstream timeout + /// expires. This is used to tell us we already generated an event to fail this HTLC back + /// during a previous block scan. + failed_back_htlc_ids: HashSet, } /// Transaction outputs to watch for on-chain spends. @@ -1239,6 +1251,7 @@ impl ChannelMonitor { best_block, counterparty_node_id: Some(counterparty_node_id), initial_counterparty_commitment_info: None, + failed_back_htlc_ids: HashSet::new(), }) } @@ -3560,6 +3573,7 @@ impl ChannelMonitorImpl { payment_preimage: None, source: source.clone(), htlc_value_satoshis, + awaiting_downstream_confirmation: false, })); self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { commitment_tx_output_idx, @@ -3591,6 +3605,63 @@ impl ChannelMonitorImpl { } } + // Fail back HTLCs on backwards channels if they expire within `LATENCY_GRACE_PERIOD_BLOCKS` + // blocks. If we haven't seen the preimage for an HTLC by the time the previous hop's + // timeout expires, we've lost that HTLC, so we might as well fail it back instead of having our + // counterparty force-close the channel. + let current_holder_htlcs = self.current_holder_commitment_tx.htlc_outputs.iter() + .map(|&(ref a, _, ref b)| (a, b.as_ref())); + + let current_counterparty_htlcs = if let Some(txid) = self.current_counterparty_commitment_txid { + if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) { + Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed)))) + } else { None } + } else { None }.into_iter().flatten(); + + let prev_counterparty_htlcs = if let Some(txid) = self.prev_counterparty_commitment_txid { + if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) { + Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed)))) + } else { None } + } else { None }.into_iter().flatten(); + + let htlcs = current_holder_htlcs + .chain(current_counterparty_htlcs) + .chain(prev_counterparty_htlcs); + + let height = self.best_block.height(); + for (htlc, source_opt) in htlcs { + // Only check forwarded HTLCs' previous hops + let source = match source_opt { + Some(source) => source, + None => continue, + }; + let (cltv_expiry, htlc_id) = match source { + HTLCSource::PreviousHopData(HTLCPreviousHopData { + htlc_id, cltv_expiry: Some(cltv_expiry), .. + }) if !self.failed_back_htlc_ids.contains(htlc_id) => (*cltv_expiry, *htlc_id), + _ => continue, + }; + if cltv_expiry <= height + LATENCY_GRACE_PERIOD_BLOCKS { + let duplicate_event = self.pending_monitor_events.iter().any( + |update| if let &MonitorEvent::HTLCEvent(ref upd) = update { + upd.source == *source + } else { false }); + if !duplicate_event { + log_debug!(logger, "Failing back HTLC {} upstream to preserve the \ + channel as the forward HTLC hasn't resolved and our backward HTLC \ + expires soon at {}", log_bytes!(htlc.payment_hash.0), cltv_expiry); + self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { + source: source.clone(), + payment_preimage: None, + payment_hash: htlc.payment_hash, + htlc_value_satoshis: Some(htlc.amount_msat / 1000), + awaiting_downstream_confirmation: true, + })); + self.failed_back_htlc_ids.insert(htlc_id); + } + } + } + self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height(), broadcaster, fee_estimator, logger); self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height(), broadcaster, fee_estimator, logger); @@ -3937,6 +4008,7 @@ impl ChannelMonitorImpl { payment_preimage: Some(payment_preimage), payment_hash, htlc_value_satoshis: Some(amount_msat / 1000), + awaiting_downstream_confirmation: false, })); } } else if offered_preimage_claim { @@ -3960,6 +4032,7 @@ impl ChannelMonitorImpl { payment_preimage: Some(payment_preimage), payment_hash, htlc_value_satoshis: Some(amount_msat / 1000), + awaiting_downstream_confirmation: false, })); } } else { @@ -4386,6 +4459,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP best_block, counterparty_node_id, initial_counterparty_commitment_info, + failed_back_htlc_ids: HashSet::new(), }))) } } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index f68d7433e4a..d4ff4769a19 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2100,6 +2100,45 @@ impl Channel where Ok(()) } + /// The total fee paid to close the channel and claim an HTLC-success transaction, accounting + /// for an added input (spending a P2WPKH) and P2WPKH output for fee bumping. + fn cost_to_claim_onchain(&self, feerate_per_kw: u32, logger: &L) -> u64 + where L::Target: Logger + { + let commitment_weight = if self.context.is_outbound() { + let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); + commitment_stats.total_fee_sat + } else { 0 }; + + let htlc_weight = htlc_success_tx_weight(self.context.get_channel_type()); + let p2wpkh_weight = 31 * 4; + let input_weight = 272; // Including witness, assuming it spends from a p2wpkh + + let total_weight = commitment_weight + htlc_weight + p2wpkh_weight + input_weight; + let total_fee = feerate_per_kw as u64 * total_weight / 1000; + total_fee + } + + /// Check whether we should risk letting this channel force-close while waiting + /// for a downstream HTLC resolution on-chain. See [`ChannelConfig::early_fail_multiplier`] + /// for more info. + pub(super) fn check_worth_upstream_closing( + &self, htlc_id: u64, fee_estimator: &LowerBoundedFeeEstimator, logger: &L + ) -> Option + where F::Target: FeeEstimator, L::Target: Logger + { + let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority); + let htlc = self.context.pending_inbound_htlcs.iter().find(|htlc| htlc.htlc_id == htlc_id)?; + let total_fee = self.cost_to_claim_onchain(feerate_per_kw, logger); + let threshold = total_fee * self.context.config().early_fail_multiplier / 1_000_000; + if htlc.amount_msat / 1000 >= threshold { + Some(true) + } else { + Some(false) + } + } + #[inline] fn get_closing_scriptpubkey(&self) -> Script { // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script @@ -2238,10 +2277,6 @@ impl Channel where } } if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and - // this is simply a duplicate claim, not previously failed and we lost funds. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return UpdateFulfillFetch::DuplicateClaim {}; } @@ -2410,10 +2445,6 @@ impl Channel where } } if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this - // is simply a duplicate fail, not previously failed and we failed-back too early. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return Ok(None); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 55980dd7acf..ddd2c99d05c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -104,6 +104,7 @@ pub(super) enum PendingHTLCRouting { /// The SCID from the onion that we should forward to. This could be a real SCID or a fake one /// generated using `get_fake_scid` from the scid_utils::fake_scid module. short_channel_id: u64, // This should be NonZero eventually when we bump MSRV + incoming_cltv_expiry: Option, }, Receive { payment_data: msgs::FinalOnionHopData, @@ -124,6 +125,16 @@ pub(super) enum PendingHTLCRouting { }, } +impl PendingHTLCRouting { + fn incoming_cltv_expiry(&self) -> Option { + match self { + Self::Forward { incoming_cltv_expiry, .. } => *incoming_cltv_expiry, + Self::Receive { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), + Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), + } + } +} + #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug pub(super) struct PendingHTLCInfo { pub(super) routing: PendingHTLCRouting, @@ -182,13 +193,16 @@ pub(crate) struct HTLCPreviousHopData { // Note that this may be an outbound SCID alias for the associated channel. short_channel_id: u64, user_channel_id: Option, - htlc_id: u64, + pub(crate) htlc_id: u64, incoming_packet_shared_secret: [u8; 32], phantom_shared_secret: Option<[u8; 32]>, // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards // channel with a preimage provided by the forward channel. outpoint: OutPoint, + /// Used to preserve our backwards channel by failing back in case an HTLC claim in the forward + /// channel remains unconfirmed for too long. + pub(crate) cltv_expiry: Option, } enum OnionPayload { @@ -2742,6 +2756,7 @@ where routing: PendingHTLCRouting::Forward { onion_packet: outgoing_packet, short_channel_id, + incoming_cltv_expiry: Some(msg.cltv_expiry), }, payment_hash: msg.payment_hash, incoming_shared_secret: shared_secret, @@ -3771,8 +3786,9 @@ where })?; let routing = match payment.forward_info.routing { - PendingHTLCRouting::Forward { onion_packet, .. } => { - PendingHTLCRouting::Forward { onion_packet, short_channel_id: next_hop_scid } + PendingHTLCRouting::Forward { onion_packet, incoming_cltv_expiry, .. } => { + PendingHTLCRouting::Forward { onion_packet, short_channel_id: next_hop_scid, + incoming_cltv_expiry } }, _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted }; @@ -3808,7 +3824,7 @@ where err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) })?; - if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing { + if let PendingHTLCRouting::Forward { short_channel_id, incoming_cltv_expiry, .. } = payment.forward_info.routing { let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: payment.prev_short_channel_id, user_channel_id: Some(payment.prev_user_channel_id), @@ -3816,6 +3832,7 @@ where htlc_id: payment.prev_htlc_id, incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, phantom_shared_secret: None, + cltv_expiry: incoming_cltv_expiry, }); let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10); @@ -3853,6 +3870,7 @@ where outgoing_cltv_value, .. } }) => { + let cltv_expiry = routing.incoming_cltv_expiry(); macro_rules! failure_handler { ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); @@ -3864,6 +3882,7 @@ where htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret: $phantom_ss, + cltv_expiry, }); let reason = if $next_hop_unknown { @@ -3967,7 +3986,8 @@ where prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, forward_info: PendingHTLCInfo { incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, - routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, .. + routing: PendingHTLCRouting::Forward { onion_packet, incoming_cltv_expiry, .. }, + skimmed_fee_msat, .. }, }) => { log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id); @@ -3979,6 +3999,7 @@ where incoming_packet_shared_secret: incoming_shared_secret, // Phantom payments are only PendingHTLCRouting::Receive. phantom_shared_secret: None, + cltv_expiry: incoming_cltv_expiry, }); if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat, payment_hash, outgoing_cltv_value, htlc_source.clone(), @@ -4060,6 +4081,7 @@ where htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret, + cltv_expiry: Some(cltv_expiry), }, // We differentiate the received value from the sender intended value // if possible so that we don't prematurely mark MPP payments complete @@ -4090,6 +4112,7 @@ where htlc_id: $htlc.prev_hop.htlc_id, incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, phantom_shared_secret, + cltv_expiry: Some(cltv_expiry), }), payment_hash, HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), HTLCDestination::FailedPayment { payment_hash: $payment_hash }, @@ -4791,6 +4814,37 @@ where } } + /// Check whether we should risk letting an upstream channel force-close while waiting + /// for a downstream HTLC resolution on-chain. See [`ChannelConfig::early_fail_multiplier`] + /// for more info. + fn check_worth_upstream_closing(&self, source: &HTLCSource) -> bool { + let (short_channel_id, htlc_id) = match source { + HTLCSource::OutboundRoute { .. } => { + debug_assert!(false, "This should not be called on outbound HTLCs"); + return false; + }, + HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, .. }) => { + (short_channel_id, htlc_id) + }, + }; + let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(short_channel_id) { + Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), + None => return false, + }; + let per_peer_state = self.per_peer_state.read().unwrap(); + let mut peer_state_lock = match per_peer_state.get(&counterparty_node_id) { + Some(peer_state_mutex) => peer_state_mutex.lock().unwrap(), + None => return false, + }; + let peer_state = &mut *peer_state_lock; + let chan = match peer_state.channel_by_id.get(&channel_id) { + Some(chan) => chan, + None => return false, + }; + chan.check_worth_upstream_closing( + *htlc_id, &self.fee_estimator, &self.logger).unwrap_or(false) + } + /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { @@ -6068,6 +6122,7 @@ where htlc_id: prev_htlc_id, incoming_packet_shared_secret: forward_info.incoming_shared_secret, phantom_shared_secret: None, + cltv_expiry: forward_info.routing.incoming_cltv_expiry(), }); failed_intercept_forwards.push((htlc_source, forward_info.payment_hash, @@ -6338,10 +6393,12 @@ where log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", &preimage); self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint); } else { - log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); - let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; - let reason = HTLCFailReason::from_failure_code(0x4000 | 8); - self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); + if !htlc_update.awaiting_downstream_confirmation || !self.check_worth_upstream_closing(&htlc_update.source) { + log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); + let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; + let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); + } } }, MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | @@ -7195,6 +7252,7 @@ where incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret, phantom_shared_secret: None, outpoint: htlc.prev_funding_outpoint, + cltv_expiry: htlc.forward_info.routing.incoming_cltv_expiry(), }); let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing { @@ -7900,6 +7958,7 @@ impl_writeable_tlv_based!(PhantomRouteHints, { impl_writeable_tlv_based_enum!(PendingHTLCRouting, (0, Forward) => { (0, onion_packet, required), + (1, incoming_cltv_expiry, option), (2, short_channel_id, required), }, (1, Receive) => { @@ -8005,6 +8064,7 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (0, short_channel_id, required), (1, phantom_shared_secret, option), (2, outpoint, required), + (3, cltv_expiry, option), (4, htlc_id, required), (6, incoming_packet_shared_secret, required), (7, user_channel_id, option), diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2fbc36ce9a5..8e902da4580 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2214,6 +2214,138 @@ fn channel_reserve_in_flight_removes() { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); } +enum PostFailBackAction { + TimeoutOnChain, + ClaimOnChain, + FailOffChain, + ClaimOffChain, +} + +#[test] +fn test_fail_back_before_backwards_timeout() { + do_test_fail_back_before_backwards_timeout(PostFailBackAction::TimeoutOnChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOnChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::FailOffChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOffChain); +} + +fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBackAction) { + // Test that we fail an HTLC upstream if we are still waiting for confirmation downstream + // just before the upstream timeout expires + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + for node in nodes.iter() { + *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000; + } + + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + // Start every node on the same block height to make reasoning about timeouts easier + connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + + // Force close downstream with timeout + let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1; + connect_blocks(&nodes[1], timeout_blocks); + let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, + &[nodes[2].node.get_our_node_id(); 1], 100_000); + check_closed_broadcast!(nodes[1], true); + check_added_monitors!(nodes[1], 1); + + // Nothing is confirmed for a while + // We subtract `LATENCY_GRACE_PERIOD_BLOCKS` once because we already confirmed these blocks + // to force-close downstream, and once more because it's also used as the buffer when failing + // upstream. + let upstream_timeout_blocks = + MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - LATENCY_GRACE_PERIOD_BLOCKS; + connect_blocks(&nodes[1], upstream_timeout_blocks); + + // Connect blocks for nodes[0] to make sure they don't go on-chain + connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks); + + // Check that nodes[1] fails the HTLC upstream + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2 + }]); + check_added_monitors!(nodes[1], 1); + let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, + PaymentFailedConditions::new().blamed_chan_closed(true)); + + // Make sure we handle possible duplicate fails or extra messages after failing back + match post_fail_back_action { + PostFailBackAction::TimeoutOnChain => { + // Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again + mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment + mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout + connect_blocks(&nodes[1], ANTI_REORG_DELAY); + // Expect handling another fail back event, but the HTLC is already gone + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2 + }]); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::ClaimOnChain => { + nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + check_added_monitors!(nodes[2], 1); + get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + + connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); + let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS); + check_closed_broadcast!(nodes[2], true); + check_closed_event(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, false, + &[nodes[1].node.get_our_node_id(); 1], 100_000); + check_added_monitors!(nodes[2], 1); + + mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment + mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success + connect_blocks(&nodes[1], ANTI_REORG_DELAY); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::FailOffChain => { + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], + vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]); + check_added_monitors!(nodes[2], 1); + let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let update_fail = commitment_update.update_fail_htlcs[0].clone(); + + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &update_fail); + let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(err_msg.channel_id, chan_2.2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::ClaimOffChain => { + nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + check_added_monitors!(nodes[2], 1); + let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone(); + + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &update_fulfill); + let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(err_msg.channel_id, chan_2.2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + }; +} + #[test] fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -2310,7 +2442,7 @@ fn channel_monitor_network_test() { let node2_commitment_txid; { let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE); - connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1); + connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT); node2_commitment_txid = node_txn[0].txid(); @@ -3060,9 +3192,9 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence mine_transaction(&nodes[1], &commitment_tx[0]); - check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false - , [nodes[2].node.get_our_node_id()], 100000); - connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, + &[nodes[2].node.get_our_node_id()], 100000); + connect_blocks(&nodes[1], 100 - nodes[2].best_block_info().1); let timeout_tx = { let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); if nodes[1].connect_style.borrow().skips_blocks() { diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index 267774481b4..515a3912741 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -485,6 +485,62 @@ pub struct ChannelConfig { /// [`PaymentClaimable::counterparty_skimmed_fee_msat`]: crate::events::Event::PaymentClaimable::counterparty_skimmed_fee_msat // TODO: link to bLIP when it's merged pub accept_underpaying_htlcs: bool, + /// A multiplier on the on-chain fees (assuming the high priority feerate) setting the + /// threshold for an HTLC's value for which we will fail the HTLC early to prevent closing a + /// channel. This field is denoted in millionths. + /// + /// If we have forwarded an HTLC to a peer and they have not claimed it on or off-chain by the + /// time the previous hop's HTLC timeout expires, we can choose to fail back the HTLC to save + /// the upstream channel from closing on-chain, or we can risk the channel in hopes of getting + /// a last-minute preimage from downstream. The risk may or may not be worth it depending on + /// how large the HTLC is relative to how much we will lose in fees if the channel closes. + /// + /// This roughly translates to how much the HTLC value should be proportional to the amount + /// we'll pay on chain. So if the value of this flag is 1,500,000, it means we should only risk + /// going on-chain if the HTLC value is at least 1.5x the amount we'll pay on chain. + /// + /// Based on this multiplier, we will fail back HTLCs in the described situation if the HTLC's + /// value is below the following threshold: + /// `high_priority_feerate_per_kw * total_weight * early_fail_multiplier / 1,000,000,000` (we + /// divide by 1,000,000,000 because feerate is denoted by kiloweight units, and the multiplier + /// is denoted in millionths). + /// + /// Total weight is defined by the situation where we took the risk closing on-chain, and the + /// result was in our favor, i.e. we claimed with an HTLC-success transaction. So, we use the + /// sum of the weight of the commitment transaction (if we're the funder of the channel) and + /// the weight of an HTLC-success transaction, including one extra P2WPKH input and output + /// to account for fee-bumping. These weights are calculated as follows: + /// + /// [Commitment transaction weight](https://github.com/lightning/bolts/blob/master/03-transactions.md#expected-weight-of-the-commitment-transaction): + /// * (no option_anchors) 500 + 172 * num-htlc-outputs + 224 weight + /// * (option_anchors) 900 + 172 * num-htlc-outputs + 224 weight + /// + /// [HTLC-success weight](https://github.com/lightning/bolts/blob/master/03-transactions.md#expected-weight-of-htlc-timeout-and-htlc-success-transactions): + /// * HTLC-success weight: 703 (706 with anchors) weight + /// * 1 input + witness spending a P2WPKH output: 272 weight + /// * 1 P2WPKH output: 31 bytes * 4 = 124 weight + /// * Total: 1099 (1102 with anchors) weight + /// + /// Sample calculations: + /// * Total weight assuming we're the funder, on a non-anchor channel with 1 HTLC: + /// 500 + 172 * 1 + 224 + 1099 = 1995. + /// * Feerate: 1 sat/vbyte -> 253 sats/KW (250 sat/KW, although the minimum defaults to 253 + /// sats/KW for rounding, see [`FeeEstimator`]) + /// * Cost to claim-onchain: 1995 * 253 / 1000 = 504 sats. + /// * Minimum HTLC value required to risk going on-chain: + /// 1995 * 253 * 1,500,000 / 1,000 / 1,000,000 = 757 sats. + /// * Feerate: 30 sat/vbyte -> 7,500 sat/KW + /// * Cost to claim on-chain: 1995 * 7500 / 1000 = 14,962 sats. + /// * Minimum HTLC value required to risk going on-chain: + /// 1995 * 7500 * 1,500,000 / 1,000 / 1,000,000 = 22,443 sats. + /// + /// If you prefer to always let upstream HTLCs resolve on-chain in the event that your + /// downstream channel takes too long to confirm on-chain, just set this multiplier to 0. + /// + /// Default value: 1,500,000. + /// + /// [`FeeEstimator`]: crate::chain::chaininterface::FeeEstimator + pub early_fail_multiplier: u64, } impl ChannelConfig { @@ -518,6 +574,7 @@ impl Default for ChannelConfig { max_dust_htlc_exposure: MaxDustHTLCExposure::FeeRateMultiplier(5000), force_close_avoidance_max_fee_satoshis: 1000, accept_underpaying_htlcs: false, + early_fail_multiplier: 1_500_000, } } } @@ -534,6 +591,7 @@ impl crate::util::ser::Writeable for ChannelConfig { (2, self.forwarding_fee_base_msat, required), (3, self.max_dust_htlc_exposure, required), (4, self.cltv_expiry_delta, required), + (5, self.early_fail_multiplier, required), (6, max_dust_htlc_exposure_msat_fixed_limit, required), // ChannelConfig serialized this field with a required type of 8 prior to the introduction of // LegacyChannelConfig. To make sure that serialization is not compatible with this one, we use @@ -553,12 +611,14 @@ impl crate::util::ser::Readable for ChannelConfig { let mut max_dust_htlc_exposure_msat = None; let mut max_dust_htlc_exposure_enum = None; let mut force_close_avoidance_max_fee_satoshis = 1000; + let mut early_fail_multiplier = 1_500_000; read_tlv_fields!(reader, { (0, forwarding_fee_proportional_millionths, required), (1, accept_underpaying_htlcs, (default_value, false)), (2, forwarding_fee_base_msat, required), (3, max_dust_htlc_exposure_enum, option), (4, cltv_expiry_delta, required), + (5, early_fail_multiplier, (default_value, 1_500_000u64)), // Has always been written, but became optionally read in 0.0.116 (6, max_dust_htlc_exposure_msat, option), (10, force_close_avoidance_max_fee_satoshis, required), @@ -573,6 +633,7 @@ impl crate::util::ser::Readable for ChannelConfig { cltv_expiry_delta, max_dust_htlc_exposure: max_dust_htlc_exposure_msat, force_close_avoidance_max_fee_satoshis, + early_fail_multiplier, }) } } @@ -585,6 +646,7 @@ pub struct ChannelConfigUpdate { pub cltv_expiry_delta: Option, pub max_dust_htlc_exposure_msat: Option, pub force_close_avoidance_max_fee_satoshis: Option, + pub early_fail_multiplier: Option, } impl Default for ChannelConfigUpdate { @@ -595,6 +657,7 @@ impl Default for ChannelConfigUpdate { cltv_expiry_delta: None, max_dust_htlc_exposure_msat: None, force_close_avoidance_max_fee_satoshis: None, + early_fail_multiplier: None, } } } @@ -607,6 +670,7 @@ impl From for ChannelConfigUpdate { cltv_expiry_delta: Some(config.cltv_expiry_delta), max_dust_htlc_exposure_msat: Some(config.max_dust_htlc_exposure), force_close_avoidance_max_fee_satoshis: Some(config.force_close_avoidance_max_fee_satoshis), + early_fail_multiplier: Some(config.early_fail_multiplier), } } } @@ -650,6 +714,7 @@ impl crate::util::ser::Writeable for LegacyChannelConfig { (4, self.announced_channel, required), (5, self.options.max_dust_htlc_exposure, required), (6, self.commit_upfront_shutdown_pubkey, required), + (7, self.options.early_fail_multiplier, required), (8, self.options.forwarding_fee_base_msat, required), }); Ok(()) @@ -666,6 +731,7 @@ impl crate::util::ser::Readable for LegacyChannelConfig { let mut commit_upfront_shutdown_pubkey = false; let mut forwarding_fee_base_msat = 0; let mut max_dust_htlc_exposure_enum = None; + let mut early_fail_multiplier = 1_500_000; read_tlv_fields!(reader, { (0, forwarding_fee_proportional_millionths, required), // Has always been written, but became optionally read in 0.0.116 @@ -674,6 +740,7 @@ impl crate::util::ser::Readable for LegacyChannelConfig { (3, force_close_avoidance_max_fee_satoshis, (default_value, 1000u64)), (4, announced_channel, required), (5, max_dust_htlc_exposure_enum, option), + (7, early_fail_multiplier, (default_value, 1_500_000u64)), (6, commit_upfront_shutdown_pubkey, required), (8, forwarding_fee_base_msat, required), }); @@ -689,6 +756,7 @@ impl crate::util::ser::Readable for LegacyChannelConfig { force_close_avoidance_max_fee_satoshis, forwarding_fee_base_msat, accept_underpaying_htlcs: false, + early_fail_multiplier, }, announced_channel, commit_upfront_shutdown_pubkey,