diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 5f2bb248cd1..5b9d7435d12 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -5899,26 +5899,38 @@ impl ChannelMonitorImpl { // chain when our counterparty is waiting for expiration to off-chain fail an HTLC // we give ourselves a few blocks of headroom after expiration before going // on-chain for an expired HTLC. - let htlc_outbound = $holder_tx == htlc.offered; - if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) || - (!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) { - log_info!(logger, "Force-closing channel due to {} HTLC timeout - HTLC with payment hash {} expires at {}", if htlc_outbound { "outbound" } else { "inbound"}, htlc.payment_hash, htlc.cltv_expiry); - return Some(htlc.payment_hash); + let htlc_outbound = $holder_tx == htlc.0.offered; + let has_incoming = if htlc_outbound { + if let Some(source) = htlc.1.as_deref() { + match *source { + HTLCSource::OutboundRoute { .. } => false, + HTLCSource::PreviousHopData(_) => true, + } + } else { + panic!("Every offered non-dust HTLC should have a corresponding source"); + } + } else { + true + }; + if (htlc_outbound && has_incoming && htlc.0.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) || + (!htlc_outbound && htlc.0.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.0.payment_hash)) { + log_info!(logger, "Force-closing channel due to {} HTLC timeout - HTLC with payment hash {} expires at {}", if htlc_outbound { "outbound" } else { "inbound"}, htlc.0.payment_hash, htlc.0.cltv_expiry); + return Some(htlc.0.payment_hash); } } } } - scan_commitment!(holder_commitment_htlcs!(self, CURRENT), true); + scan_commitment!(holder_commitment_htlcs!(self, CURRENT_WITH_SOURCES), true); if let Some(ref txid) = self.funding.current_counterparty_commitment_txid { if let Some(ref htlc_outputs) = self.funding.counterparty_claimable_outpoints.get(txid) { - scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false); + scan_commitment!(htlc_outputs.iter(), false); } } if let Some(ref txid) = self.funding.prev_counterparty_commitment_txid { if let Some(ref htlc_outputs) = self.funding.counterparty_claimable_outpoints.get(txid) { - scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false); + scan_commitment!(htlc_outputs.iter(), false); } } diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index d56670f4d67..0496febc761 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -3031,6 +3031,92 @@ fn held_htlc_timeout() { ); } +#[test] +fn fail_held_htlc_on_reconnect() { + // Test that if a held HTLC by the sender LSP fails but the async sender is offline, the HTLC + // is failed on reconnect instead of FC the channel. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + + let (sender_cfg, recipient_cfg) = (often_offline_node_cfg(), often_offline_node_cfg()); + let mut sender_lsp_cfg = test_default_channel_config(); + sender_lsp_cfg.enable_htlc_hold = true; + let mut invoice_server_cfg = test_default_channel_config(); + invoice_server_cfg.accept_forwards_to_priv_channels = true; + + let node_chanmgrs = create_node_chanmgrs( + 4, + &node_cfgs, + &[Some(sender_cfg), Some(sender_lsp_cfg), Some(invoice_server_cfg), Some(recipient_cfg)], + ); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let chan = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0); + unify_blockheight_across_nodes(&nodes); + let sender = &nodes[0]; + let sender_lsp = &nodes[1]; + let invoice_server = &nodes[2]; + let recipient = &nodes[3]; + + let amt_msat = 5000; + let (_, peer_node_id, static_invoice_om) = build_async_offer_and_init_payment(amt_msat, &nodes); + let payment_hash = + lock_in_htlc_for_static_invoice(&static_invoice_om, peer_node_id, sender, sender_lsp); + + sender_lsp.node.process_pending_htlc_forwards(); + let (peer_id, held_htlc_om) = + extract_held_htlc_available_oms(sender, &[sender_lsp, invoice_server, recipient]) + .pop() + .unwrap(); + recipient.onion_messenger.handle_onion_message(peer_id, &held_htlc_om); + + let _ = extract_release_htlc_oms(recipient, &[sender, sender_lsp, invoice_server]); + + // Disconnect async sender <-> sender LSP + sender.node.peer_disconnected(sender_lsp.node.get_our_node_id()); + sender_lsp.node.peer_disconnected(sender.node.get_our_node_id()); + + // Connect blocks such that they cause the HTLC to timeout + let chan_id = chan.0.channel_id; + let channel = + sender.node.list_channels().iter().find(|c| c.channel_id == chan_id).unwrap().clone(); + let htlc_expiry = channel + .pending_outbound_htlcs + .iter() + .find(|htlc| htlc.payment_hash == payment_hash) + .unwrap() + .cltv_expiry; + let blocks_to_connect = htlc_expiry - sender.best_block_info().1 + 100; + connect_blocks(sender, blocks_to_connect); + connect_blocks(sender_lsp, blocks_to_connect); + + sender_lsp.node.process_pending_htlc_forwards(); + let mut evs = sender_lsp.node.get_and_clear_pending_events(); + assert_eq!(evs.len(), 1); + match evs.pop().unwrap() { + Event::HTLCHandlingFailed { failure_type, failure_reason, .. } => { + assert!(matches!(failure_type, HTLCHandlingFailureType::InvalidForward { .. })); + assert!(matches!( + failure_reason, + Some(HTLCHandlingFailureReason::Local { + reason: LocalHTLCFailureReason::ForwardExpiryBuffer + }) + )); + }, + _ => panic!(), + } + + // After reconnecting, check that HTLC was failed and channel is open. + let mut reconnect_args = ReconnectArgs::new(&sender, &sender_lsp); + reconnect_args.pending_cell_htlc_fails.0 = 1; + reconnect_nodes(reconnect_args); + + expect_payment_failed!(sender, payment_hash, false); + assert_eq!(sender.node.list_channels().len(), 1); + assert_eq!(sender_lsp.node.list_channels().len(), 2); +} + #[test] fn intercepted_hold_htlc() { // Test a payment `sender --> LSP --> recipient` such that the HTLC is both a hold htlc and an diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index de4ee805808..4b0194bab21 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -1014,6 +1014,12 @@ fn do_test_async_holder_signatures(keyed_anchors: bool, p2a_anchor: bool, remote // We'll connect blocks until the sender has to go onchain to time out the HTLC. connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); + let closure_reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; + nodes[0] + .node + .force_close_broadcasting_latest_txn(&chan_id, &node_b_id, closure_reason.to_string()) + .unwrap(); + // No transaction should be broadcast since the signer is not available yet. assert!(nodes[0].tx_broadcaster.txn_broadcast().is_empty()); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); @@ -1076,7 +1082,11 @@ fn do_test_async_holder_signatures(keyed_anchors: bool, p2a_anchor: bool, remote let closure_reason = if remote_commitment { ClosureReason::CommitmentTxConfirmed } else { - ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) } + let closure_reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; + ClosureReason::HolderForceClosed { + broadcasted_latest_txn: Some(true), + message: closure_reason.to_string(), + } }; check_closed_event(&nodes[0], 1, closure_reason, false, &[node_b_id], 100_000); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1bc1bfbc2ff..60f77ea0c47 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -118,6 +118,11 @@ fn test_monitor_and_persister_update_fail() { chain_mon .chain_monitor .block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200); + chain_mon.chain_monitor.get_monitor(chan.2).unwrap().broadcast_latest_holder_commitment_txn( + &&tx_broadcaster, + &nodes[0].fee_estimator, + &nodes[0].logger, + ); // Try to update ChannelMonitor nodes[1].node.claim_funds(preimage); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index d1c0ac8f12b..8d5b9218fde 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -706,9 +706,12 @@ pub fn channel_monitor_network_test() { // confusing us in the following tests. let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&chan_3.2); + // Create another channel from 2 -> 3 to route HTLC to timeout + create_announced_chan_between_nodes(&nodes, 2, 3); + // One pending HTLC to time out: let (payment_preimage_2, payment_hash_2, ..) = - route_payment(&nodes[3], &[&nodes[4]], 3_000_000); + route_payment(&nodes[2], &[&nodes[3], &nodes[4]], 3_000_000); // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for // buffer space). @@ -747,8 +750,20 @@ pub fn channel_monitor_network_test() { // Claim the payment on nodes[4], giving it knowledge of the preimage claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2); + let chan_id = chan_4.2; + let channel = + nodes[4].node.list_channels().iter().find(|c| c.channel_id == chan_id).unwrap().clone(); + let htlc_expiry = channel + .pending_inbound_htlcs + .iter() + .find(|inbound_htlc| inbound_htlc.payment_hash == payment_hash_2) + .unwrap() + .cltv_expiry; - connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); + connect_blocks( + &nodes[4], + htlc_expiry - nodes[4].best_block_info().1 - CLTV_CLAIM_BUFFER + 2, + ); let events = nodes[4].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); let close_chan_update_2 = match events[1] { @@ -777,15 +792,16 @@ pub fn channel_monitor_network_test() { let node_id_3 = node_d_id; nodes[3].gossip_sync.handle_channel_update(Some(node_id_4), &close_chan_update_2).unwrap(); nodes[4].gossip_sync.handle_channel_update(Some(node_id_3), &close_chan_update_1).unwrap(); - assert_eq!(nodes[3].node.list_channels().len(), 0); + // Here nodes[3] should only have its channel with nodes[2] + assert_eq!(nodes[3].node.list_channels().len(), 1); assert_eq!(nodes[4].node.list_channels().len(), 0); assert_eq!( nodes[3].chain_monitor.chain_monitor.watch_channel(chan_3.2, chan_3_mon), Ok(ChannelMonitorUpdateStatus::Completed) ); - let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash_2) }; - check_closed_event!(nodes[3], 1, reason, [node_id_4], 100000); + let closure_reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash_2) }; + check_closed_event!(nodes[3], 1, closure_reason, [node_id_4], 100000); } #[xtest(feature = "_externalize_tests")] @@ -4918,42 +4934,52 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { } fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let _chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); let (route, payment_hash, _, payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 }); + get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 50000 } else { 3000000 }); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors(&nodes[0], 1); + check_added_monitors!(nodes[0], 1); + let update_0 = get_htlc_update_msgs!(nodes[0], node_b_id); + + nodes[1].node.handle_update_add_htlc(node_a_id, &update_0.update_add_htlcs[0]); + commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 1); - let _as_update = get_htlc_update_msgs!(nodes[0], node_b_id); + let _bs_update = get_htlc_update_msgs!(nodes[1], node_c_id); - // As far as A is concerned, the HTLC is now present only in the latest remote commitment - // transaction, however it is not in A's latest local commitment, so we can just broadcast that + // As far as B is concerned, the HTLC is now present only in the latest remote commitment + // transaction, however it is not in B's latest local commitment, so we can just broadcast that // to "time out" the HTLC. - let starting_block = nodes[1].best_block_info(); + let starting_block = nodes[2].best_block_info(); let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 { - connect_block(&nodes[0], &block); + connect_block(&nodes[1], &block); block.header.prev_blockhash = block.block_hash(); } - test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); - check_closed_broadcast!(nodes[0], true); - check_added_monitors(&nodes[0], 1); + + test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE); + check_closed_broadcast!(nodes[1], true); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event!(nodes[1], 1, reason, [node_c_id], 100000); } fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { @@ -4964,54 +4990,62 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let _chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present - // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions. + // Fail the payment, but don't deliver B's final RAA, resulting in the HTLC only being present + // in C's previous (unrevoked) commitment transaction, but none of B's commitment transactions. // Also optionally test that we *don't* fail the channel in case the commitment transaction was // actually revoked. let htlc_value = if use_dust { 50000 } else { 3000000 }; - let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); - nodes[1].node.fail_htlc_backwards(&our_payment_hash); + let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], htlc_value); + nodes[2].node.fail_htlc_backwards(&our_payment_hash); expect_and_process_pending_htlcs_and_htlc_handling_failed( - &nodes[1], + &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }], ); - check_added_monitors(&nodes[1], 1); + check_added_monitors(&nodes[2], 1); - let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_updates.commitment_signed); - check_added_monitors(&nodes[0], 1); - let as_updates = get_revoke_commit_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_revoke_and_ack(node_a_id, &as_updates.0); - check_added_monitors(&nodes[1], 1); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_updates.1); + let c_updates_to_b = get_htlc_update_msgs!(nodes[2], node_b_id); + nodes[1].node.handle_update_fail_htlc(node_c_id, &c_updates_to_b.update_fail_htlcs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &c_updates_to_b.commitment_signed); check_added_monitors(&nodes[1], 1); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + let b_updates_to_c = get_revoke_commit_msgs!(nodes[1], node_c_id); + nodes[2].node.handle_revoke_and_ack(node_b_id, &b_updates_to_c.0); + check_added_monitors(&nodes[2], 1); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &b_updates_to_c.1); + check_added_monitors(&nodes[2], 1); + let cs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); if check_revoke_no_close { - nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); - check_added_monitors(&nodes[0], 1); + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_revoke_and_ack); + check_added_monitors(&nodes[1], 1); } - let starting_block = nodes[1].best_block_info(); + let starting_block = nodes[2].best_block_info(); let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 { - connect_block(&nodes[0], &block); + connect_block(&nodes[1], &block); block.header.prev_blockhash = block.block_hash(); } if !check_revoke_no_close { - test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); - check_closed_broadcast!(nodes[0], true); - check_added_monitors(&nodes[0], 1); + test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE); + check_closed_broadcast!(nodes[1], true); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(our_payment_hash) }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event!(nodes[1], 1, reason, [node_c_id], 100000); } else { - expect_payment_failed!(nodes[0], our_payment_hash, true); + // Check we get a forwarding failure when the commitment transaction was revoked. + expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[1], + &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }], + ); + check_added_monitors(&nodes[1], 1); + let _ = get_htlc_update_msgs!(nodes[1], node_a_id); } } @@ -7200,7 +7234,8 @@ pub fn test_update_err_monitor_lockdown() { // Route a HTLC from node 0 to node 1 (but don't settle) let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); - // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain + // Copy ChainMonitor to simulate a watchtower and trigger broadcast of local commitment + // transaction. let chain_source = test_utils::TestChainSource::new(Network::Testnet); let logger = test_utils::TestLogger::with_id(format!("node {}", 0)); let persister = test_utils::TestPersister::new(); @@ -7236,6 +7271,11 @@ pub fn test_update_err_monitor_lockdown() { // transaction lock time requirements here. chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200)); watchtower.chain_monitor.block_connected(&block, 200); + watchtower.chain_monitor.get_monitor(chan_1.2).unwrap().broadcast_latest_holder_commitment_txn( + &&chanmon_cfgs[0].tx_broadcaster, + &nodes[0].fee_estimator, + &nodes[0].logger, + ); // Try to update ChannelMonitor nodes[1].node.claim_funds(preimage); @@ -7286,22 +7326,25 @@ pub fn test_concurrent_monitor_claim() { // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it, // state N+1 confirms. Alice claims output from state N+1. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let _ = create_announced_chan_between_nodes(&nodes, 2, 0); // Rebalance the network to generate htlc in the two directions + send_payment(&nodes[2], &[&nodes[0]], 10_000_000); send_payment(&nodes[0], &[&nodes[1]], 10_000_000); - // Route a HTLC from node 0 to node 1 (but don't settle) - let (_, payment_hash_timeout, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); + // Route a HTLC from 2 -> 0 -> 1 (but don't settle) + let (_, payment_hash_timeout, ..) = + route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 9_000_000); // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain let chain_source = test_utils::TestChainSource::new(Network::Testnet); @@ -7340,11 +7383,18 @@ pub fn test_concurrent_monitor_claim() { let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time // requirements here. - const HTLC_TIMEOUT_BROADCAST: u32 = - CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS; - let next_block = (block.clone(), HTLC_TIMEOUT_BROADCAST); - alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, next_block); - watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST); + let channel = + nodes[0].node.list_channels().iter().find(|c| c.channel_id == chan_1.2).unwrap().clone(); + let htlc_expiry = channel + .pending_outbound_htlcs + .iter() + .find(|outbound_htlc| outbound_htlc.payment_hash == payment_hash_timeout) + .unwrap() + .cltv_expiry; + let htlc_timeout: u32 = htlc_expiry + LATENCY_GRACE_PERIOD_BLOCKS; + let next_block = (block.clone(), htlc_timeout); + alice_broadcaster.blocks.lock().unwrap().resize((htlc_timeout) as usize, next_block); + watchtower_alice.chain_monitor.block_connected(&block, htlc_timeout); // Watchtower Alice should have broadcast a commitment/HTLC-timeout { @@ -7388,7 +7438,7 @@ pub fn test_concurrent_monitor_claim() { watchtower }; let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); - watchtower_bob.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST - 1); + watchtower_bob.chain_monitor.block_connected(&block, htlc_timeout - 1); // Route another payment to generate another update with still previous HTLC pending let (route, payment_hash, _, payment_secret) = @@ -7438,10 +7488,9 @@ pub fn test_concurrent_monitor_claim() { check_added_monitors(&nodes[0], 1); //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout - watchtower_bob.chain_monitor.block_connected( - &create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), - HTLC_TIMEOUT_BROADCAST, - ); + watchtower_bob + .chain_monitor + .block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), htlc_timeout); // Watchtower Bob should have broadcast a commitment/HTLC-timeout let bob_state_y; @@ -7452,7 +7501,7 @@ pub fn test_concurrent_monitor_claim() { }; // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout - let height = HTLC_TIMEOUT_BROADCAST + 1; + let height = htlc_timeout + 1; connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); check_closed_broadcast(&nodes[0], 1, true); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash_timeout) }; diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index b316381398e..43584af5f4d 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -16,7 +16,7 @@ use crate::chain::Watch; use crate::chain::channelmonitor::{Balance, BalanceSource, ChannelMonitorUpdateStep, HolderCommitmentTransactionBalance, ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; -use crate::events::bump_transaction::{BumpTransactionEvent}; +use crate::events::bump_transaction::BumpTransactionEvent; use crate::events::{Event, ClosureReason, HTLCHandlingFailureType}; use crate::ln::channel; use crate::ln::types::ChannelId; @@ -1431,6 +1431,7 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc let _b_htlc_msgs = get_htlc_update_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); connect_blocks(&nodes[0], htlc_cltv_timeout + 1 - 10); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), "".to_string()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -1455,7 +1456,7 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc }); assert!(failed_payments.is_empty()); match &events[0] { - Event::ChannelClosed { reason: ClosureReason::HTLCsTimedOut { .. }, .. } => {}, + Event::ChannelClosed { reason: ClosureReason::HolderForceClosed { .. }, .. } => {}, _ => panic!(), } @@ -2520,28 +2521,36 @@ fn do_test_yield_anchors_events(have_htlcs: bool, p2a_anchor: bool) { // allowing the consumer to provide additional fees to the commitment transaction to be // broadcast. Once the commitment transaction confirms, events for the HTLC resolution should be // emitted by LDK, such that the consumer can attach fees to the zero fee HTLC transactions. - let mut chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); let mut anchors_config = test_default_channel_config(); anchors_config.channel_handshake_config.announce_for_forwarding = true; anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; anchors_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; anchors_config.manually_accept_inbound_channels = true; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config)]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone()), Some(anchors_config.clone()), Some(anchors_config)]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); let coinbase_tx = provide_anchor_reserves(&nodes); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value( &nodes, 0, 1, 1_000_000, 500_000_000 ); + let (_, _, _chan_2_id, _funding_tx) = create_announced_chan_between_nodes_with_value( + &nodes, 2, 1, 1_000_000, 500_000_000 + ); + let (_, _, _chan_3_id, _funding_tx) = create_announced_chan_between_nodes_with_value( + &nodes, 3, 0, 1_000_000, 500_000_000 + ); + + let mut payment_preimage_1 = None; let mut payment_hash_1 = None; let mut payment_preimage_2 = None; let mut payment_hash_2 = None; if have_htlcs { - let (preimage_1, hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - let (preimage_2, hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 2_000_000); + let (preimage_1, hash_1, ..) = route_payment(&nodes[3], &[&nodes[0], &nodes[1]], 1_000_000); + let (preimage_2, hash_2, ..) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 2_000_000); payment_preimage_1 = Some(preimage_1); payment_hash_1 = Some(hash_1); payment_preimage_2 = Some(preimage_2); @@ -2691,11 +2700,15 @@ fn do_test_yield_anchors_events(have_htlcs: bool, p2a_anchor: bool) { connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - let conditions = PaymentFailedConditions::new().from_mon_update(); - expect_payment_failed_conditions(&nodes[0], payment_hash_1.unwrap(), false, conditions); - connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32); + expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[0], + &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[1].node.get_our_node_id()), channel_id: chan_id }], + ); + check_added_monitors(&nodes[0], 1); + let _ = get_htlc_update_msgs!(nodes[0], nodes[3].node.get_our_node_id()); + let holder_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(holder_events.len(), 3); for event in holder_events { diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 8c9e552fa04..0af7fc95287 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -919,6 +919,70 @@ fn test_partial_claim_before_restart() { do_test_partial_claim_before_restart(true, true); } +#[test] +fn test_no_fc_outgoing_payment() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + + let mut intercept_forwards_config = test_default_channel_config(); + intercept_forwards_config.accept_intercept_htlcs = true; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); + let nodes_0_deserialized; + + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let _chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + + // Send payment from 0 -> 1 -> 2 that will be failed back + let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 10_000); + + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[2], + &[HTLCHandlingFailureType::Receive { payment_hash }] + ); + check_added_monitors!(nodes[2], 1); + + // After committing the HTLCs, fail it back from 2 -> 1 + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + + nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[1], nodes[2], &updates.commitment_signed, true); + + let _updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + // Disconnect the peers before failing from 1 -> 0 + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + + let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id_1).encode(); + reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); + + // While disconnected, connect blocks such that it goes past HTLC expiry. When nodes reconnect instead of FC the + // channel, check that the HTLC was failed and channel is still open. + let channel = nodes[0].node.list_channels().iter().find(|c| c.channel_id == chan_id_1).unwrap().clone(); + let htlc_expiry = channel.pending_outbound_htlcs.iter().find(|outbound_htlc| outbound_htlc.payment_hash == payment_hash).unwrap().cltv_expiry; + let blocks_to_connect = 200; + connect_blocks(&nodes[0], htlc_expiry - nodes[0].best_block_info().1 + blocks_to_connect); + connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + blocks_to_connect); + + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.pending_htlc_fails.0 = 1; + reconnect_nodes(reconnect_args); + + expect_payment_failed!(nodes[0], payment_hash, true); + + assert_eq!(nodes[0].node.list_channels().len(), 1); + assert_eq!(nodes[1].node.list_channels().len(), 2); + + assert_eq!(nodes[0].node.list_channels().iter().find(|chan| chan.channel_id == chan_id_1).unwrap().pending_outbound_htlcs.len(), 0); + assert_eq!(nodes[1].node.list_channels().iter().find(|chan| chan.channel_id == chan_id_1).unwrap().pending_inbound_htlcs.len(), 0); +} + fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_htlc: bool, use_intercept: bool) { if !use_cs_commitment { assert!(!claim_htlc); } // If we go to forward a payment, and the ChannelMonitor persistence completes, but the diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 90cd459938e..405dca7d7ab 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -497,6 +497,7 @@ fn test_set_outpoints_partial_claiming() { // Connect blocks on node B connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), "".to_string()).unwrap(); check_closed_broadcast!(nodes[1], true); check_closed_events(&nodes[1], &[ExpectedCloseEvent { channel_capacity_sats: Some(1_000_000), @@ -818,27 +819,28 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa fn do_test_retries_own_commitment_broadcast_after_reorg(keyed_anchors: bool, p2a_anchor: bool, revoked_counterparty_commitment: bool) { // Tests that a node will retry broadcasting its own commitment after seeing a confirmed // counterparty commitment be reorged out. - let mut chanmon_cfgs = create_chanmon_cfgs(2); + let mut chanmon_cfgs = create_chanmon_cfgs(3); if revoked_counterparty_commitment { chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; } - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut config = test_default_channel_config(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config.clone())]); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); let nodes_1_deserialized; - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let coinbase_tx = provide_anchor_reserves(&nodes); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); + let _ = create_announced_chan_between_nodes(&nodes, 2, 0); // Route a payment so we have an HTLC to claim as well. - let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (_, payment_hash, ..) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1_000_000); if revoked_counterparty_commitment { // Trigger a new commitment by routing a dummy HTLC. We will have B broadcast the previous commitment. @@ -854,9 +856,9 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(keyed_anchors: bool, p2a // Connect blocks until the HTLC expiry is met, prompting a commitment broadcast by A. connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); + let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); - let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; check_closed_event(&nodes[0], 1, reason, false, &[nodes[1].node.get_our_node_id()], 100_000); if keyed_anchors || p2a_anchor { handle_bump_close_event(&nodes[0]); @@ -907,7 +909,6 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(keyed_anchors: bool, p2a // Confirm B's commitment, A should now broadcast an HTLC timeout for commitment B. mine_transactions(&nodes[0], &[&tx, &anchor_tx]); tx - } else { let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1);