Skip to content

Commit a18b940

Browse files
committed
f - test fixups
* Explicitly assert no hanging messages after off-chain actions * Get rid of manual user force-close for timeout * Connect blocks on nodes[0] to make sure they don't go on-chain * Add comment on syncing block height on nodes * Simplify and wrap especially long lines * Add more confirmations to onchain test cases * Increase feerate to hit fail back code path now that we have a threshold for HTLC value * Exepct new fail events now that we don't prevent duplicate fail events * Replace timeout buffer with grace period
1 parent 3591c49 commit a18b940

File tree

2 files changed

+43
-33
lines changed

2 files changed

+43
-33
lines changed

lightning/src/ln/channel.rs

-8
Original file line numberDiff line numberDiff line change
@@ -2277,10 +2277,6 @@ impl<SP: Deref> Channel<SP> where
22772277
}
22782278
}
22792279
if pending_idx == core::usize::MAX {
2280-
#[cfg(any(test, fuzzing))]
2281-
// If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2282-
// this is simply a duplicate claim, not previously failed and we lost funds.
2283-
debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
22842280
return UpdateFulfillFetch::DuplicateClaim {};
22852281
}
22862282

@@ -2449,10 +2445,6 @@ impl<SP: Deref> Channel<SP> where
24492445
}
24502446
}
24512447
if pending_idx == core::usize::MAX {
2452-
#[cfg(any(test, fuzzing))]
2453-
// If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2454-
// is simply a duplicate fail, not previously failed and we failed-back too early.
2455-
debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
24562448
return Ok(None);
24572449
}
24582450

lightning/src/ln/functional_tests.rs

+43-25
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
use crate::chain;
1515
use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
1616
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17-
use crate::chain::channelmonitor::{self, TIMEOUT_FAIL_BACK_BUFFER};
17+
use crate::chain::channelmonitor;
1818
use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
1919
use crate::chain::transaction::OutPoint;
2020
use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
@@ -2236,56 +2236,69 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac
22362236
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
22372237
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
22382238
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2239+
for node in nodes.iter() {
2240+
*node.fee_estimator.sat_per_kw.lock().unwrap() = 2000;
2241+
}
22392242

22402243
create_announced_chan_between_nodes(&nodes, 0, 1);
22412244
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
22422245

2246+
// Start every node on the same block height to make reasoning about timeouts easier
22432247
connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
22442248
connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
22452249
connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
22462250

22472251
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
22482252

22492253
// Force close downstream with timeout
2250-
nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2251-
check_added_monitors!(nodes[1], 1);
2252-
check_closed_broadcast!(nodes[1], true);
2253-
2254-
connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2254+
let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1;
2255+
connect_blocks(&nodes[1], timeout_blocks);
22552256
let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2256-
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false,
2257+
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false,
22572258
&[nodes[2].node.get_our_node_id(); 1], 100_000);
2259+
check_closed_broadcast!(nodes[1], true);
2260+
check_added_monitors!(nodes[1], 1);
22582261

22592262
// Nothing is confirmed for a while
2260-
connect_blocks(&nodes[1], MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - TIMEOUT_FAIL_BACK_BUFFER);
2263+
// We subtract `LATENCY_GRACE_PERIOD_BLOCKS` once because we already confirmed these blocks
2264+
// to force-close downstream, and once more because it's also used as the buffer when failing
2265+
// upstream.
2266+
let upstream_timeout_blocks =
2267+
MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - LATENCY_GRACE_PERIOD_BLOCKS;
2268+
connect_blocks(&nodes[1], upstream_timeout_blocks);
2269+
2270+
// Connect blocks for nodes[0] to make sure they don't go on-chain
2271+
connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks);
22612272

22622273
// Check that nodes[1] fails the HTLC upstream
2263-
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
2274+
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
2275+
vec![HTLCDestination::NextHopChannel {
2276+
node_id: Some(nodes[2].node.get_our_node_id()),
2277+
channel_id: chan_2.2
2278+
}]);
22642279
check_added_monitors!(nodes[1], 1);
2265-
let events = nodes[1].node.get_and_clear_pending_msg_events();
2266-
assert_eq!(events.len(), 1);
2267-
let (update_fail, commitment_signed) = match events[0] {
2268-
MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
2269-
assert!(update_add_htlcs.is_empty());
2270-
assert!(update_fulfill_htlcs.is_empty());
2271-
assert_eq!(update_fail_htlcs.len(), 1);
2272-
assert!(update_fail_malformed_htlcs.is_empty());
2273-
assert!(update_fee.is_none());
2274-
(update_fail_htlcs[0].clone(), commitment_signed.clone())
2275-
},
2276-
_ => panic!("Unexpected event"),
2277-
};
2280+
let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
2281+
let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates;
22782282

2279-
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail);
2283+
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
22802284
commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
2281-
expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().blamed_chan_closed(true));
2285+
expect_payment_failed_conditions(&nodes[0], payment_hash, false,
2286+
PaymentFailedConditions::new().blamed_chan_closed(true));
22822287

22832288
// Make sure we handle possible duplicate fails or extra messages after failing back
22842289
match post_fail_back_action {
22852290
PostFailBackAction::TimeoutOnChain => {
22862291
// Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again
22872292
mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment
22882293
mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout
2294+
connect_blocks(&nodes[1], ANTI_REORG_DELAY);
2295+
// Expect handling another fail back event, but the HTLC is already gone
2296+
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
2297+
vec![HTLCDestination::NextHopChannel {
2298+
node_id: Some(nodes[2].node.get_our_node_id()),
2299+
channel_id: chan_2.2
2300+
}]);
2301+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
22892302
},
22902303
PostFailBackAction::ClaimOnChain => {
22912304
nodes[2].node.claim_funds(payment_preimage);
@@ -2302,17 +2315,21 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac
23022315

23032316
mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment
23042317
mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success
2318+
connect_blocks(&nodes[1], ANTI_REORG_DELAY);
2319+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
23052320
},
23062321
PostFailBackAction::FailOffChain => {
23072322
nodes[2].node.fail_htlc_backwards(&payment_hash);
2308-
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
2323+
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2],
2324+
vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
23092325
check_added_monitors!(nodes[2], 1);
23102326
let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
23112327
let update_fail = commitment_update.update_fail_htlcs[0].clone();
23122328

23132329
nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &update_fail);
23142330
let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
23152331
assert_eq!(err_msg.channel_id, chan_2.2);
2332+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
23162333
},
23172334
PostFailBackAction::ClaimOffChain => {
23182335
nodes[2].node.claim_funds(payment_preimage);
@@ -2324,6 +2341,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac
23242341
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &update_fulfill);
23252342
let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
23262343
assert_eq!(err_msg.channel_id, chan_2.2);
2344+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
23272345
},
23282346
};
23292347
}

0 commit comments

Comments
 (0)