14
14
use crate::chain;
15
15
use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
16
16
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17
- use crate::chain::channelmonitor::{self, TIMEOUT_FAIL_BACK_BUFFER} ;
17
+ use crate::chain::channelmonitor;
18
18
use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
19
19
use crate::chain::transaction::OutPoint;
20
20
use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
@@ -2236,56 +2236,69 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac
2236
2236
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2237
2237
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2238
2238
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2239
+ for node in nodes.iter() {
2240
+ *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000;
2241
+ }
2239
2242
2240
2243
create_announced_chan_between_nodes(&nodes, 0, 1);
2241
2244
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2242
2245
2246
+ // Start every node on the same block height to make reasoning about timeouts easier
2243
2247
connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2244
2248
connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2245
2249
connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2246
2250
2247
2251
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2248
2252
2249
2253
// Force close downstream with timeout
2250
- nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2251
- check_added_monitors!(nodes[1], 1);
2252
- check_closed_broadcast!(nodes[1], true);
2253
-
2254
- connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2254
+ let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1;
2255
+ connect_blocks(&nodes[1], timeout_blocks);
2255
2256
let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2256
- check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed , false,
2257
+ check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed , false,
2257
2258
&[nodes[2].node.get_our_node_id(); 1], 100_000);
2259
+ check_closed_broadcast!(nodes[1], true);
2260
+ check_added_monitors!(nodes[1], 1);
2258
2261
2259
2262
// Nothing is confirmed for a while
2260
- connect_blocks(&nodes[1], MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - TIMEOUT_FAIL_BACK_BUFFER);
2263
+ // We subtract `LATENCY_GRACE_PERIOD_BLOCKS` once because we already confirmed these blocks
2264
+ // to force-close downstream, and once more because it's also used as the buffer when failing
2265
+ // upstream.
2266
+ let upstream_timeout_blocks =
2267
+ MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - LATENCY_GRACE_PERIOD_BLOCKS;
2268
+ connect_blocks(&nodes[1], upstream_timeout_blocks);
2269
+
2270
+ // Connect blocks for nodes[0] to make sure they don't go on-chain
2271
+ connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks);
2261
2272
2262
2273
// Check that nodes[1] fails the HTLC upstream
2263
- expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
2274
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
2275
+ vec![HTLCDestination::NextHopChannel {
2276
+ node_id: Some(nodes[2].node.get_our_node_id()),
2277
+ channel_id: chan_2.2
2278
+ }]);
2264
2279
check_added_monitors!(nodes[1], 1);
2265
- let events = nodes[1].node.get_and_clear_pending_msg_events();
2266
- assert_eq!(events.len(), 1);
2267
- let (update_fail, commitment_signed) = match events[0] {
2268
- MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
2269
- assert!(update_add_htlcs.is_empty());
2270
- assert!(update_fulfill_htlcs.is_empty());
2271
- assert_eq!(update_fail_htlcs.len(), 1);
2272
- assert!(update_fail_malformed_htlcs.is_empty());
2273
- assert!(update_fee.is_none());
2274
- (update_fail_htlcs[0].clone(), commitment_signed.clone())
2275
- },
2276
- _ => panic!("Unexpected event"),
2277
- };
2280
+ let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
2281
+ let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates;
2278
2282
2279
- nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail );
2283
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0] );
2280
2284
commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
2281
- expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().blamed_chan_closed(true));
2285
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false,
2286
+ PaymentFailedConditions::new().blamed_chan_closed(true));
2282
2287
2283
2288
// Make sure we handle possible duplicate fails or extra messages after failing back
2284
2289
match post_fail_back_action {
2285
2290
PostFailBackAction::TimeoutOnChain => {
2286
2291
// Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again
2287
2292
mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment
2288
2293
mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout
2294
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY);
2295
+ // Expect handling another fail back event, but the HTLC is already gone
2296
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
2297
+ vec![HTLCDestination::NextHopChannel {
2298
+ node_id: Some(nodes[2].node.get_our_node_id()),
2299
+ channel_id: chan_2.2
2300
+ }]);
2301
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2289
2302
},
2290
2303
PostFailBackAction::ClaimOnChain => {
2291
2304
nodes[2].node.claim_funds(payment_preimage);
@@ -2302,17 +2315,21 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac
2302
2315
2303
2316
mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment
2304
2317
mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success
2318
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY);
2319
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2305
2320
},
2306
2321
PostFailBackAction::FailOffChain => {
2307
2322
nodes[2].node.fail_htlc_backwards(&payment_hash);
2308
- expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
2323
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2],
2324
+ vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
2309
2325
check_added_monitors!(nodes[2], 1);
2310
2326
let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
2311
2327
let update_fail = commitment_update.update_fail_htlcs[0].clone();
2312
2328
2313
2329
nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &update_fail);
2314
2330
let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
2315
2331
assert_eq!(err_msg.channel_id, chan_2.2);
2332
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2316
2333
},
2317
2334
PostFailBackAction::ClaimOffChain => {
2318
2335
nodes[2].node.claim_funds(payment_preimage);
@@ -2324,6 +2341,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac
2324
2341
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &update_fulfill);
2325
2342
let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
2326
2343
assert_eq!(err_msg.channel_id, chan_2.2);
2344
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2327
2345
},
2328
2346
};
2329
2347
}
0 commit comments