Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 20 additions & 8 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5899,26 +5899,38 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
// chain when our counterparty is waiting for expiration to off-chain fail an HTLC
// we give ourselves a few blocks of headroom after expiration before going
// on-chain for an expired HTLC.
let htlc_outbound = $holder_tx == htlc.offered;
if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) ||
(!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
log_info!(logger, "Force-closing channel due to {} HTLC timeout - HTLC with payment hash {} expires at {}", if htlc_outbound { "outbound" } else { "inbound"}, htlc.payment_hash, htlc.cltv_expiry);
return Some(htlc.payment_hash);
let htlc_outbound = $holder_tx == htlc.0.offered;
let has_incoming = if htlc_outbound {
if let Some(source) = htlc.1.as_deref() {
match *source {
HTLCSource::OutboundRoute { .. } => false,
HTLCSource::PreviousHopData(_) => true,
}
} else {
panic!("Every offered non-dust HTLC should have a corresponding source");
}
} else {
true
};
if (htlc_outbound && has_incoming && htlc.0.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) ||
(!htlc_outbound && htlc.0.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.0.payment_hash)) {
log_info!(logger, "Force-closing channel due to {} HTLC timeout - HTLC with payment hash {} expires at {}", if htlc_outbound { "outbound" } else { "inbound"}, htlc.0.payment_hash, htlc.0.cltv_expiry);
return Some(htlc.0.payment_hash);
}
}
}
}

scan_commitment!(holder_commitment_htlcs!(self, CURRENT), true);
scan_commitment!(holder_commitment_htlcs!(self, CURRENT_WITH_SOURCES), true);

if let Some(ref txid) = self.funding.current_counterparty_commitment_txid {
if let Some(ref htlc_outputs) = self.funding.counterparty_claimable_outpoints.get(txid) {
scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
scan_commitment!(htlc_outputs.iter(), false);
}
}
if let Some(ref txid) = self.funding.prev_counterparty_commitment_txid {
if let Some(ref htlc_outputs) = self.funding.counterparty_claimable_outpoints.get(txid) {
scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
scan_commitment!(htlc_outputs.iter(), false);
}
}

Expand Down
86 changes: 86 additions & 0 deletions lightning/src/ln/async_payments_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3031,6 +3031,92 @@ fn held_htlc_timeout() {
);
}

#[test]
fn fail_held_htlc_on_reconnect() {
// Test that if a held HTLC by the sender LSP fails but the async sender is offline, the HTLC
// is failed on reconnect instead of FC the channel.
let chanmon_cfgs = create_chanmon_cfgs(4);
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);

let (sender_cfg, recipient_cfg) = (often_offline_node_cfg(), often_offline_node_cfg());
let mut sender_lsp_cfg = test_default_channel_config();
sender_lsp_cfg.enable_htlc_hold = true;
let mut invoice_server_cfg = test_default_channel_config();
invoice_server_cfg.accept_forwards_to_priv_channels = true;

let node_chanmgrs = create_node_chanmgrs(
4,
&node_cfgs,
&[Some(sender_cfg), Some(sender_lsp_cfg), Some(invoice_server_cfg), Some(recipient_cfg)],
);
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
let chan = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
unify_blockheight_across_nodes(&nodes);
let sender = &nodes[0];
let sender_lsp = &nodes[1];
let invoice_server = &nodes[2];
let recipient = &nodes[3];

let amt_msat = 5000;
let (_, peer_node_id, static_invoice_om) = build_async_offer_and_init_payment(amt_msat, &nodes);
let payment_hash =
lock_in_htlc_for_static_invoice(&static_invoice_om, peer_node_id, sender, sender_lsp);

sender_lsp.node.process_pending_htlc_forwards();
let (peer_id, held_htlc_om) =
extract_held_htlc_available_oms(sender, &[sender_lsp, invoice_server, recipient])
.pop()
.unwrap();
recipient.onion_messenger.handle_onion_message(peer_id, &held_htlc_om);

let _ = extract_release_htlc_oms(recipient, &[sender, sender_lsp, invoice_server]);

// Disconnect async sender <-> sender LSP
sender.node.peer_disconnected(sender_lsp.node.get_our_node_id());
sender_lsp.node.peer_disconnected(sender.node.get_our_node_id());

// Connect blocks such that they cause the HTLC to timeout
let chan_id = chan.0.channel_id;
let channel =
sender.node.list_channels().iter().find(|c| c.channel_id == chan_id).unwrap().clone();
let htlc_expiry = channel
.pending_outbound_htlcs
.iter()
.find(|htlc| htlc.payment_hash == payment_hash)
.unwrap()
.cltv_expiry;
let blocks_to_connect = htlc_expiry - sender.best_block_info().1 + 100;
connect_blocks(sender, blocks_to_connect);
connect_blocks(sender_lsp, blocks_to_connect);

sender_lsp.node.process_pending_htlc_forwards();
let mut evs = sender_lsp.node.get_and_clear_pending_events();
assert_eq!(evs.len(), 1);
match evs.pop().unwrap() {
Event::HTLCHandlingFailed { failure_type, failure_reason, .. } => {
assert!(matches!(failure_type, HTLCHandlingFailureType::InvalidForward { .. }));
assert!(matches!(
failure_reason,
Some(HTLCHandlingFailureReason::Local {
reason: LocalHTLCFailureReason::ForwardExpiryBuffer
})
));
},
_ => panic!(),
}

// After reconnecting, check that HTLC was failed and channel is open.
let mut reconnect_args = ReconnectArgs::new(&sender, &sender_lsp);
reconnect_args.pending_cell_htlc_fails.0 = 1;
reconnect_nodes(reconnect_args);

expect_payment_failed!(sender, payment_hash, false);
assert_eq!(sender.node.list_channels().len(), 1);
assert_eq!(sender_lsp.node.list_channels().len(), 2);
}

#[test]
fn intercepted_hold_htlc() {
// Test a payment `sender --> LSP --> recipient` such that the HTLC is both a hold htlc and an
Expand Down
12 changes: 11 additions & 1 deletion lightning/src/ln/async_signer_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1014,6 +1014,12 @@ fn do_test_async_holder_signatures(keyed_anchors: bool, p2a_anchor: bool, remote
// We'll connect blocks until the sender has to go onchain to time out the HTLC.
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);

let closure_reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) };
nodes[0]
.node
.force_close_broadcasting_latest_txn(&chan_id, &node_b_id, closure_reason.to_string())
.unwrap();

// No transaction should be broadcast since the signer is not available yet.
assert!(nodes[0].tx_broadcaster.txn_broadcast().is_empty());
assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
Expand Down Expand Up @@ -1076,7 +1082,11 @@ fn do_test_async_holder_signatures(keyed_anchors: bool, p2a_anchor: bool, remote
let closure_reason = if remote_commitment {
ClosureReason::CommitmentTxConfirmed
} else {
ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }
let closure_reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) };
ClosureReason::HolderForceClosed {
broadcasted_latest_txn: Some(true),
message: closure_reason.to_string(),
}
};
check_closed_event(&nodes[0], 1, closure_reason, false, &[node_b_id], 100_000);

Expand Down
5 changes: 5 additions & 0 deletions lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,11 @@ fn test_monitor_and_persister_update_fail() {
chain_mon
.chain_monitor
.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200);
chain_mon.chain_monitor.get_monitor(chan.2).unwrap().broadcast_latest_holder_commitment_txn(
&&tx_broadcaster,
&nodes[0].fee_estimator,
&nodes[0].logger,
);

// Try to update ChannelMonitor
nodes[1].node.claim_funds(preimage);
Expand Down
Loading