diff --git a/src/ln/channel.rs b/src/ln/channel.rs index 1d736d41d6f..188e1c7256d 100644 --- a/src/ln/channel.rs +++ b/src/ln/channel.rs @@ -98,22 +98,27 @@ impl ChannelKeys { } } -#[derive(PartialEq)] +enum InboundHTLCRemovalReason { + FailRelay(msgs::OnionErrorPacket), + FailMalformed(([u8; 32], u16)), + Fulfill([u8; 32]), +} + enum InboundHTLCState { /// Added by remote, to be included in next local commitment tx. - RemoteAnnounced, + RemoteAnnounced(PendingHTLCStatus), /// Included in a received commitment_signed message (implying we've revoke_and_ack'ed it), but /// the remote side hasn't yet revoked their previous state, which we need them to do before we /// accept this HTLC. Implies AwaitingRemoteRevoke. /// We also have not yet included this HTLC in a commitment_signed message, and are waiting on /// a remote revoke_and_ack on a previous state before we can do so. - AwaitingRemoteRevokeToAnnounce, + AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus), /// Included in a received commitment_signed message (implying we've revoke_and_ack'ed it), but /// the remote side hasn't yet revoked their previous state, which we need them to do before we /// accept this HTLC. Implies AwaitingRemoteRevoke. /// We have included this HTLC in our latest commitment_signed and are now just waiting on a /// revoke_and_ack. - AwaitingAnnouncedRemoteRevoke, + AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus), Committed, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -125,7 +130,7 @@ enum InboundHTLCState { /// ChannelMonitor::would_broadcast_at_height) so we actually remove the HTLC from our own /// local state before then, once we're sure that the next commitment_signed and /// ChannelMonitor::provide_latest_local_commitment_tx_info will not include this HTLC. - LocalRemoved, + LocalRemoved(InboundHTLCRemovalReason), } struct InboundHTLCOutput { @@ -134,13 +139,8 @@ struct InboundHTLCOutput { cltv_expiry: u32, payment_hash: [u8; 32], state: InboundHTLCState, - /// If we're in LocalRemoved, set to true if we fulfilled the HTLC, and can claim money - local_removed_fulfilled: bool, - /// state pre-Committed implies pending_forward_state, otherwise it must be None - pending_forward_state: Option, } -#[derive(PartialEq)] enum OutboundHTLCState { /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -152,7 +152,9 @@ enum OutboundHTLCState { /// allowed to remove it, the "can only be removed once committed on both sides" requirement /// doesn't matter to us and its up to them to enforce it, worst-case they jump ahead but /// we'll never get out of sync). - LocalAnnounced, + /// Note that we Box the OnionPacket as its rather large and we don't want to blow up + /// OutboundHTLCOutput's size just for a temporary bit + LocalAnnounced(Box), Committed, /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize /// the change (though they'll need to revoke before we fail the payment). @@ -791,21 +793,23 @@ impl Channel { for ref htlc in self.pending_inbound_htlcs.iter() { let include = match htlc.state { - InboundHTLCState::RemoteAnnounced => !generated_by_local, - InboundHTLCState::AwaitingRemoteRevokeToAnnounce => !generated_by_local, - InboundHTLCState::AwaitingAnnouncedRemoteRevoke => true, + InboundHTLCState::RemoteAnnounced(_) => !generated_by_local, + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => !generated_by_local, + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => true, InboundHTLCState::Committed => true, - InboundHTLCState::LocalRemoved => !generated_by_local, + InboundHTLCState::LocalRemoved(_) => !generated_by_local, }; if include { add_htlc_output!(htlc, false); remote_htlc_total_msat += htlc.amount_msat; } else { - match htlc.state { - InboundHTLCState::LocalRemoved => { - if generated_by_local && htlc.local_removed_fulfilled { - value_to_self_msat_offset += htlc.amount_msat as i64; + match &htlc.state { + &InboundHTLCState::LocalRemoved(ref reason) => { + if generated_by_local { + if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + value_to_self_msat_offset += htlc.amount_msat as i64; + } } }, _ => {}, @@ -815,7 +819,7 @@ impl Channel { for ref htlc in self.pending_outbound_htlcs.iter() { let include = match htlc.state { - OutboundHTLCState::LocalAnnounced => generated_by_local, + OutboundHTLCState::LocalAnnounced(_) => generated_by_local, OutboundHTLCState::Committed => true, OutboundHTLCState::RemoteRemoved => generated_by_local, OutboundHTLCState::AwaitingRemoteRevokeToRemove => generated_by_local, @@ -1131,7 +1135,8 @@ impl Channel { for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { assert_eq!(htlc.payment_hash, payment_hash_calc); - if htlc.state != InboundHTLCState::Committed { + if let InboundHTLCState::Committed = htlc.state { + } else { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); // Don't return in release mode here so that we can update channel_monitor } @@ -1176,12 +1181,12 @@ impl Channel { { let htlc = &mut self.pending_inbound_htlcs[pending_idx]; - if htlc.state != InboundHTLCState::Committed { + if let InboundHTLCState::Committed = htlc.state { + } else { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); return Ok((None, Some(self.channel_monitor.clone()))); } - htlc.state = InboundHTLCState::LocalRemoved; - htlc.local_removed_fulfilled = true; + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); } Ok((Some(msgs::UpdateFulfillHTLC { @@ -1213,7 +1218,8 @@ impl Channel { let mut pending_idx = std::usize::MAX; for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { - if htlc.state != InboundHTLCState::Committed { + if let InboundHTLCState::Committed = htlc.state { + } else { debug_assert!(false, "Have an inbound HTLC we tried to fail before it was fully committed to"); return Err(HandleError{err: "Unable to find a pending HTLC which matched the given HTLC ID", action: Some(msgs::ErrorAction::IgnoreError)}); } @@ -1253,8 +1259,7 @@ impl Channel { { let htlc = &mut self.pending_inbound_htlcs[pending_idx]; - htlc.state = InboundHTLCState::LocalRemoved; - htlc.local_removed_fulfilled = false; + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); } Ok(Some(msgs::UpdateFailHTLC { @@ -1533,9 +1538,7 @@ impl Channel { amount_msat: msg.amount_msat, payment_hash: msg.payment_hash, cltv_expiry: msg.cltv_expiry, - state: InboundHTLCState::RemoteAnnounced, - local_removed_fulfilled: false, - pending_forward_state: Some(pending_forward_state), + state: InboundHTLCState::RemoteAnnounced(pending_forward_state), }); Ok(()) @@ -1554,7 +1557,7 @@ impl Channel { } }; match htlc.state { - OutboundHTLCState::LocalAnnounced => + OutboundHTLCState::LocalAnnounced(_) => return Err(ChannelError::Close("Remote tried to fulfill HTLC before it had been committed")), OutboundHTLCState::Committed => { htlc.state = OutboundHTLCState::RemoteRemoved; @@ -1675,13 +1678,16 @@ impl Channel { self.channel_monitor.provide_latest_local_commitment_tx_info(local_commitment_tx.0, local_keys, self.feerate_per_kw, htlcs_and_sigs); for htlc in self.pending_inbound_htlcs.iter_mut() { - if htlc.state == InboundHTLCState::RemoteAnnounced { - htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce; + let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state { + Some(forward_info.clone()) + } else { None }; + if let Some(forward_info) = new_forward { + htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info); need_our_commitment = true; } } for htlc in self.pending_outbound_htlcs.iter_mut() { - if htlc.state == OutboundHTLCState::RemoteRemoved { + if let OutboundHTLCState::RemoteRemoved = htlc.state { htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove; need_our_commitment = true; } @@ -1833,15 +1839,15 @@ impl Channel { let mut value_to_self_msat_diff: i64 = 0; // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug) self.pending_inbound_htlcs.retain(|htlc| { - if htlc.state == InboundHTLCState::LocalRemoved { - if htlc.local_removed_fulfilled { + if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state { + if let &InboundHTLCRemovalReason::Fulfill(_) = reason { value_to_self_msat_diff += htlc.amount_msat as i64; } false } else { true } }); self.pending_outbound_htlcs.retain(|htlc| { - if htlc.state == OutboundHTLCState::AwaitingRemovedRemoteRevoke { + if let OutboundHTLCState::AwaitingRemovedRemoteRevoke = htlc.state { if let Some(reason) = htlc.fail_reason.clone() { // We really want take() here, but, again, non-mut ref :( revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason)); } else { @@ -1852,30 +1858,45 @@ impl Channel { } else { true } }); for htlc in self.pending_inbound_htlcs.iter_mut() { - if htlc.state == InboundHTLCState::AwaitingRemoteRevokeToAnnounce { - htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke; - require_commitment = true; - } else if htlc.state == InboundHTLCState::AwaitingAnnouncedRemoteRevoke { - match htlc.pending_forward_state.take().unwrap() { - PendingHTLCStatus::Fail(fail_msg) => { - htlc.state = InboundHTLCState::LocalRemoved; - require_commitment = true; - match fail_msg { - HTLCFailureMsg::Relay(msg) => update_fail_htlcs.push(msg), - HTLCFailureMsg::Malformed(msg) => update_fail_malformed_htlcs.push(msg), + let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state { + true + } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state { + true + } else { false }; + if swap { + let mut state = InboundHTLCState::Committed; + mem::swap(&mut state, &mut htlc.state); + + if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state { + htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info); + require_commitment = true; + } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state { + match forward_info { + PendingHTLCStatus::Fail(fail_msg) => { + require_commitment = true; + match fail_msg { + HTLCFailureMsg::Relay(msg) => { + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone())); + update_fail_htlcs.push(msg) + }, + HTLCFailureMsg::Malformed(msg) => { + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code))); + update_fail_malformed_htlcs.push(msg) + }, + } + }, + PendingHTLCStatus::Forward(forward_info) => { + to_forward_infos.push((forward_info, htlc.htlc_id)); + htlc.state = InboundHTLCState::Committed; } - }, - PendingHTLCStatus::Forward(forward_info) => { - to_forward_infos.push((forward_info, htlc.htlc_id)); - htlc.state = InboundHTLCState::Committed; } } } } for htlc in self.pending_outbound_htlcs.iter_mut() { - if htlc.state == OutboundHTLCState::LocalAnnounced { + if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { htlc.state = OutboundHTLCState::Committed; - } else if htlc.state == OutboundHTLCState::AwaitingRemoteRevokeToRemove { + } else if let OutboundHTLCState::AwaitingRemoteRevokeToRemove = htlc.state { htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke; require_commitment = true; } @@ -1985,22 +2006,21 @@ impl Channel { let mut inbound_drop_count = 0; self.pending_inbound_htlcs.retain(|htlc| { match htlc.state { - InboundHTLCState::RemoteAnnounced => { + InboundHTLCState::RemoteAnnounced(_) => { // They sent us an update_add_htlc but we never got the commitment_signed. // We'll tell them what commitment_signed we're expecting next and they'll drop // this HTLC accordingly inbound_drop_count += 1; false }, - InboundHTLCState::AwaitingRemoteRevokeToAnnounce|InboundHTLCState::AwaitingAnnouncedRemoteRevoke => { - // Same goes for AwaitingRemoteRevokeToRemove and AwaitingRemovedRemoteRevoke + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => { // We received a commitment_signed updating this HTLC and (at least hopefully) // sent a revoke_and_ack (which we can re-transmit) and have heard nothing // in response to it yet, so don't touch it. true }, InboundHTLCState::Committed => true, - InboundHTLCState::LocalRemoved => { // Same goes for LocalAnnounced + InboundHTLCState::LocalRemoved(_) => { // We (hopefully) sent a commitment_signed updating this HTLC (which we can // re-transmit if needed) and they may have even sent a revoke_and_ack back // (that we missed). Keep this around for now and if they tell us they missed @@ -2011,7 +2031,7 @@ impl Channel { }); for htlc in self.pending_outbound_htlcs.iter_mut() { - if htlc.state == OutboundHTLCState::RemoteRemoved { + if let OutboundHTLCState::RemoteRemoved = htlc.state { // They sent us an update to remove this but haven't yet sent the corresponding // commitment_signed, we need to move it back to Committed and they can re-send // the update upon reconnection. @@ -2059,40 +2079,49 @@ impl Channel { if msg.next_local_commitment_number == 0 || msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number == 0 || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER { - return Err(ChannelError::Close("Peer send garbage channel_reestablish")); + return Err(ChannelError::Close("Peer sent a garbage channel_reestablish")); } // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all // remaining cases either succeed or ErrorMessage-fail). self.channel_state &= !(ChannelState::PeerDisconnected as u32); - let mut required_revoke = None; - if msg.next_remote_commitment_number == INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number { + let required_revoke = if msg.next_remote_commitment_number == INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number { // Remote isn't waiting on any RevokeAndACK from us! // Note that if we need to repeat our FundingLocked we'll do that in the next if block. + None } else if msg.next_remote_commitment_number == (INITIAL_COMMITMENT_NUMBER - 1) - self.cur_local_commitment_transaction_number { let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number)); let per_commitment_secret = chan_utils::build_commitment_secret(self.local_keys.commitment_seed, self.cur_local_commitment_transaction_number + 2); - required_revoke = Some(msgs::RevokeAndACK { + Some(msgs::RevokeAndACK { channel_id: self.channel_id, per_commitment_secret, next_per_commitment_point, - }); + }) } else { return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction")); - } + }; - if msg.next_local_commitment_number == INITIAL_COMMITMENT_NUMBER - self.cur_remote_commitment_transaction_number { - if msg.next_remote_commitment_number == INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number { - log_debug!(self, "Reconnected channel {} with no lost commitment txn", log_bytes!(self.channel_id())); - if msg.next_local_commitment_number == 1 && msg.next_remote_commitment_number == 1 { - let next_per_commitment_secret = self.build_local_commitment_secret(self.cur_local_commitment_transaction_number); - let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &next_per_commitment_secret); - return Ok((Some(msgs::FundingLocked { - channel_id: self.channel_id(), - next_per_commitment_point: next_per_commitment_point, - }), None, None, None)); - } + // We increment cur_remote_commitment_transaction_number only upon receipt of + // revoke_and_ack, not on sending commitment_signed, so we add one if have + // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten + // the corresponding revoke_and_ack back yet. + let our_next_remote_commitment_number = INITIAL_COMMITMENT_NUMBER - self.cur_remote_commitment_transaction_number + if (self.channel_state & ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }; + + let resend_funding_locked = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number == 1 { + let next_per_commitment_secret = self.build_local_commitment_secret(self.cur_local_commitment_transaction_number); + let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &next_per_commitment_secret); + Some(msgs::FundingLocked { + channel_id: self.channel_id(), + next_per_commitment_point: next_per_commitment_point, + }) + } else { None }; + + if msg.next_local_commitment_number == our_next_remote_commitment_number { + if required_revoke.is_some() { + log_debug!(self, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.channel_id())); + } else { + log_debug!(self, "Reconnected channel {} with no loss", log_bytes!(self.channel_id())); } if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { @@ -2110,20 +2139,69 @@ impl Channel { panic!("Got non-channel-failing result from free_holding_cell_htlcs"); } }, - Ok(Some((commitment_update, channel_monitor))) => return Ok((None, required_revoke, Some(commitment_update), Some(channel_monitor))), - Ok(None) => return Ok((None, required_revoke, None, None)), + Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor))), + Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None)), } } else { - return Ok((None, required_revoke, None, None)); + return Ok((resend_funding_locked, required_revoke, None, None)); + } + } else if msg.next_local_commitment_number == our_next_remote_commitment_number - 1 { + if required_revoke.is_some() { + log_debug!(self, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id())); + } else { + log_debug!(self, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.channel_id())); + } + let mut update_add_htlcs = Vec::new(); + let mut update_fulfill_htlcs = Vec::new(); + let mut update_fail_htlcs = Vec::new(); + let mut update_fail_malformed_htlcs = Vec::new(); + + for htlc in self.pending_outbound_htlcs.iter() { + if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state { + update_add_htlcs.push(msgs::UpdateAddHTLC { + channel_id: self.channel_id(), + htlc_id: htlc.htlc_id, + amount_msat: htlc.amount_msat, + payment_hash: htlc.payment_hash, + cltv_expiry: htlc.cltv_expiry, + onion_routing_packet: (**onion_packet).clone(), + }); + } } - } else if msg.next_local_commitment_number == (INITIAL_COMMITMENT_NUMBER - 1) - self.cur_remote_commitment_transaction_number { - return Ok((None, required_revoke, + + for htlc in self.pending_inbound_htlcs.iter() { + if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state { + match reason { + &InboundHTLCRemovalReason::FailRelay(ref err_packet) => { + update_fail_htlcs.push(msgs::UpdateFailHTLC { + channel_id: self.channel_id(), + htlc_id: htlc.htlc_id, + reason: err_packet.clone() + }); + }, + &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => { + update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC { + channel_id: self.channel_id(), + htlc_id: htlc.htlc_id, + sha256_of_onion: sha256_of_onion.clone(), + failure_code: failure_code.clone(), + }); + }, + &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => { + update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC { + channel_id: self.channel_id(), + htlc_id: htlc.htlc_id, + payment_preimage: payment_preimage.clone(), + }); + }, + } + } + } + + return Ok((resend_funding_locked, required_revoke, Some(msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, + update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, + update_fee: None, //TODO: We need to support re-generating any update_fees in the last commitment_signed! commitment_signed: self.send_commitment_no_state_update().expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0, }), None)); } else { @@ -2141,7 +2219,7 @@ impl Channel { return Ok((None, None, Vec::new())); } for htlc in self.pending_inbound_htlcs.iter() { - if htlc.state == InboundHTLCState::RemoteAnnounced { + if let InboundHTLCState::RemoteAnnounced(_) = htlc.state { return Err(HandleError{err: "Got shutdown with remote pending HTLCs", action: None}); } } @@ -2206,7 +2284,7 @@ impl Channel { } }); for htlc in self.pending_outbound_htlcs.iter() { - if htlc.state == OutboundHTLCState::LocalAnnounced { + if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { return Ok((None, None, dropped_outbound_htlcs)); } } @@ -2828,7 +2906,7 @@ impl Channel { amount_msat: amount_msat, payment_hash: payment_hash.clone(), cltv_expiry: cltv_expiry, - state: OutboundHTLCState::LocalAnnounced, + state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())), source, fail_reason: None, }); @@ -2862,7 +2940,7 @@ impl Channel { } let mut have_updates = self.pending_update_fee.is_some(); for htlc in self.pending_outbound_htlcs.iter() { - if htlc.state == OutboundHTLCState::LocalAnnounced { + if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { have_updates = true; } if have_updates { break; } @@ -2878,12 +2956,15 @@ impl Channel { // fail to generate this, we still are at least at a position where upgrading their status // is acceptable. for htlc in self.pending_inbound_htlcs.iter_mut() { - if htlc.state == InboundHTLCState::AwaitingRemoteRevokeToAnnounce { - htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke; + let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state { + Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone())) + } else { None }; + if let Some(state) = new_state { + htlc.state = state; } } for htlc in self.pending_outbound_htlcs.iter_mut() { - if htlc.state == OutboundHTLCState::AwaitingRemoteRevokeToRemove { + if let OutboundHTLCState::AwaitingRemoteRevokeToRemove = htlc.state { htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke; } } @@ -2952,7 +3033,7 @@ impl Channel { /// holding cell HTLCs for payment failure. pub fn get_shutdown(&mut self) -> Result<(msgs::Shutdown, Vec<(HTLCSource, [u8; 32])>), APIError> { for htlc in self.pending_outbound_htlcs.iter() { - if htlc.state == OutboundHTLCState::LocalAnnounced { + if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first"}); } } @@ -3188,8 +3269,6 @@ mod tests { cltv_expiry: 500, payment_hash: [0; 32], state: InboundHTLCState::Committed, - local_removed_fulfilled: false, - pending_forward_state: None, }; let mut sha = Sha256::new(); sha.input(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()); @@ -3203,8 +3282,6 @@ mod tests { cltv_expiry: 501, payment_hash: [0; 32], state: InboundHTLCState::Committed, - local_removed_fulfilled: false, - pending_forward_state: None, }; let mut sha = Sha256::new(); sha.input(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()); @@ -3248,8 +3325,6 @@ mod tests { cltv_expiry: 504, payment_hash: [0; 32], state: InboundHTLCState::Committed, - local_removed_fulfilled: false, - pending_forward_state: None, }; let mut sha = Sha256::new(); sha.input(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()); diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 9bdbc804082..9cca16a4278 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -2569,6 +2569,12 @@ mod tests { } fn create_chan_between_nodes_with_value(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { + let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat); + let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked); + (announcement, as_update, bs_update, channel_id, tx) + } + + fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) { node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap(); let events_1 = node_a.node.get_and_clear_pending_events(); @@ -2641,47 +2647,53 @@ mod tests { _ => panic!("Unexpected event"), }; - confirm_transaction(&node_a.chain_monitor, &tx, chan_id); - let events_5 = node_a.node.get_and_clear_pending_events(); + confirm_transaction(&node_b.chain_monitor, &tx, chan_id); + let events_5 = node_b.node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); match events_5[0] { Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => { - assert_eq!(*node_id, node_b.node.get_our_node_id()); + assert_eq!(*node_id, node_a.node.get_our_node_id()); assert!(announcement_sigs.is_none()); - node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), msg).unwrap() + node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap() }, _ => panic!("Unexpected event"), }; let channel_id; - confirm_transaction(&node_b.chain_monitor, &tx, chan_id); - let events_6 = node_b.node.get_and_clear_pending_events(); + confirm_transaction(&node_a.chain_monitor, &tx, chan_id); + let events_6 = node_a.node.get_and_clear_pending_events(); assert_eq!(events_6.len(), 1); - let as_announcement_sigs = match events_6[0] { + (match events_6[0] { Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => { - assert_eq!(*node_id, node_a.node.get_our_node_id()); channel_id = msg.channel_id.clone(); - let as_announcement_sigs = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap().unwrap(); - node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &(*announcement_sigs).clone().unwrap()).unwrap(); - as_announcement_sigs + assert_eq!(*node_id, node_b.node.get_our_node_id()); + (msg.clone(), announcement_sigs.clone().unwrap()) }, _ => panic!("Unexpected event"), + }, channel_id, tx) + } + + fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) { + let bs_announcement_sigs = { + let bs_announcement_sigs = node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap().unwrap(); + node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap(); + bs_announcement_sigs }; - let events_7 = node_a.node.get_and_clear_pending_events(); + let events_7 = node_b.node.get_and_clear_pending_events(); assert_eq!(events_7.len(), 1); - let (announcement, as_update) = match events_7[0] { + let (announcement, bs_update) = match events_7[0] { Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { (msg, update_msg) }, _ => panic!("Unexpected event"), }; - node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_announcement_sigs).unwrap(); - let events_8 = node_b.node.get_and_clear_pending_events(); + node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs).unwrap(); + let events_8 = node_a.node.get_and_clear_pending_events(); assert_eq!(events_8.len(), 1); - let bs_update = match events_8[0] { + let as_update = match events_8[0] { Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { assert!(*announcement == *msg); update_msg @@ -2691,7 +2703,7 @@ mod tests { *node_a.network_chan_count.borrow_mut() += 1; - ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone(), channel_id, tx) + ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone()) } fn create_announced_chan_between_nodes(nodes: &Vec, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { @@ -4662,7 +4674,9 @@ mod tests { assert_eq!(channel_state.short_to_id.len(), 0); } - fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize)) { + /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas + /// for claims/fails they are separated out. + fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) { let reestablish_1 = node_a.node.peer_connected(&node_b.node.get_our_node_id()); let reestablish_2 = node_b.node.peer_connected(&node_a.node.get_our_node_id()); @@ -4670,7 +4684,7 @@ mod tests { for msg in reestablish_1 { resp_1.push(node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap()); } - if pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 { + if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { check_added_monitors!(node_b, 1); } else { check_added_monitors!(node_b, 0); @@ -4680,29 +4694,43 @@ mod tests { for msg in reestablish_2 { resp_2.push(node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap()); } - if pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 { + if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { check_added_monitors!(node_a, 1); } else { check_added_monitors!(node_a, 0); } // We dont yet support both needing updates, as that would require a different commitment dance: - assert!((pending_htlc_claims.0 == 0 && pending_htlc_fails.0 == 0) || (pending_htlc_claims.1 == 0 && pending_htlc_fails.1 == 0)); + assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) || + (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0)); for chan_msgs in resp_1.drain(..) { if pre_all_htlcs { - let _announcement_sigs_opt = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap(); + let a = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()); + let _announcement_sigs_opt = a.unwrap(); //TODO: Test announcement_sigs re-sending when we've implemented it } else { assert!(chan_msgs.0.is_none()); } - assert!(chan_msgs.1.is_none()); - if pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 { + if pending_raa.0 { + assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none()); + check_added_monitors!(node_a, 1); + } else { + assert!(chan_msgs.1.is_none()); + } + if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { let commitment_update = chan_msgs.2.unwrap(); - assert!(commitment_update.update_add_htlcs.is_empty()); // We can't relay while disconnected - assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0); - assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0); + if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed + assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize); + } else { + assert!(commitment_update.update_add_htlcs.is_empty()); + } + assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0); + assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0); assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); + for update_add in commitment_update.update_add_htlcs { + node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap(); + } for update_fulfill in commitment_update.update_fulfill_htlcs { node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap(); } @@ -4710,7 +4738,15 @@ mod tests { node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap(); } - commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false); + if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed + commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false); + } else { + let (as_revoke_and_ack, as_commitment_signed) = node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + check_added_monitors!(node_a, 1); + assert!(as_commitment_signed.is_none()); + assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(node_b, 1); + } } else { assert!(chan_msgs.2.is_none()); } @@ -4723,13 +4759,23 @@ mod tests { } else { assert!(chan_msgs.0.is_none()); } - assert!(chan_msgs.1.is_none()); - if pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 { + if pending_raa.1 { + assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none()); + check_added_monitors!(node_b, 1); + } else { + assert!(chan_msgs.1.is_none()); + } + if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { let commitment_update = chan_msgs.2.unwrap(); - assert!(commitment_update.update_add_htlcs.is_empty()); // We can't relay while disconnected - assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0); - assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0); + if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed + assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize); + } + assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0); + assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0); assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); + for update_add in commitment_update.update_add_htlcs { + node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap(); + } for update_fulfill in commitment_update.update_fulfill_htlcs { node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap(); } @@ -4737,7 +4783,15 @@ mod tests { node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap(); } - commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false); + if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed + commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false); + } else { + let (bs_revoke_and_ack, bs_commitment_signed) = node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + check_added_monitors!(node_b, 1); + assert!(bs_commitment_signed.is_none()); + assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(node_a, 1); + } } else { assert!(chan_msgs.2.is_none()); } @@ -4753,7 +4807,7 @@ mod tests { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0)); + reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; @@ -4762,7 +4816,7 @@ mod tests { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0)); + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; @@ -4775,7 +4829,7 @@ mod tests { claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3); fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5); - reconnect_nodes(&nodes[0], &nodes[1], false, (1, 0), (1, 0)); + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -4797,6 +4851,193 @@ mod tests { fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6); } + fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { + // Test that we can reconnect when in-flight HTLC updates get dropped + let mut nodes = create_network(2); + if messages_delivered == 0 { + create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001); + // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect) + } else { + create_announced_chan_between_nodes(&nodes, 0, 1); + } + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + + let payment_event = { + nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id); + + if messages_delivered < 2 { + // Drop the payment_event messages, and let them get re-generated in reconnect_nodes! + } else { + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + check_added_monitors!(nodes[1], 1); + + if messages_delivered >= 3 { + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + if messages_delivered >= 4 { + let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed.unwrap()).unwrap(); + assert!(as_commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + if messages_delivered >= 5 { + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + } + } + } + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + if messages_delivered < 2 { + // Even if the funding_locked messages get exchanged, as long as nothing further was + // received on either side, both sides will need to resend them. + reconnect_nodes(&nodes[0], &nodes[1], true, (0, 1), (0, 0), (0, 0), (0, 0), (false, false)); + } else if messages_delivered == 2 { + // nodes[0] still wants its RAA + commitment_signed + reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (true, false)); + } else if messages_delivered == 3 { + // nodes[0] still wants its commitment_signed + reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } else if messages_delivered == 4 { + // nodes[1] still wants its final RAA + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, true)); + } else if messages_delivered == 5 { + // Everything was delivered... + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + let events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + let events_2 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(payment_hash_1, *payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + } + + nodes[1].node.claim_funds(payment_preimage_1); + check_added_monitors!(nodes[1], 1); + + let events_3 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + let (update_fulfill_htlc, commitment_signed) = match events_3[0] { + Event::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), + }; + + if messages_delivered >= 1 { + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap(); + + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + match events_4[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(payment_preimage_1, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + + if messages_delivered >= 2 { + let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + + if messages_delivered >= 3 { + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + if messages_delivered >= 4 { + let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap(); + assert!(bs_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + if messages_delivered >= 5 { + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + } + } + } + } + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + if messages_delivered < 2 { + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); + //TODO: Deduplicate PaymentSent events, then enable this if: + //if messages_delivered < 1 { + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + match events_4[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(payment_preimage_1, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + //} + } else if messages_delivered == 2 { + // nodes[0] still wants its RAA + commitment_signed + reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, true)); + } else if messages_delivered == 3 { + // nodes[0] still wants its commitment_signed + reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, false)); + } else if messages_delivered == 4 { + // nodes[1] still wants its final RAA + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (true, false)); + } else if messages_delivered == 5 { + // Everything was delivered... + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + // Channel should still work fine... + let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0; + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + } + + #[test] + fn test_drop_messages_peer_disconnect_a() { + do_test_drop_messages_peer_disconnect(0); + do_test_drop_messages_peer_disconnect(1); + do_test_drop_messages_peer_disconnect(2); + } + + #[test] + fn test_drop_messages_peer_disconnect_b() { + do_test_drop_messages_peer_disconnect(3); + do_test_drop_messages_peer_disconnect(4); + do_test_drop_messages_peer_disconnect(5); + } + #[test] fn test_invalid_channel_announcement() { //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs diff --git a/src/ln/msgs.rs b/src/ln/msgs.rs index 9ff62de38c6..5b3d57aae02 100644 --- a/src/ln/msgs.rs +++ b/src/ln/msgs.rs @@ -224,6 +224,7 @@ pub struct FundingSigned { } /// A funding_locked message to be sent or received from a peer +#[derive(Clone)] pub struct FundingLocked { pub(crate) channel_id: [u8; 32], pub(crate) next_per_commitment_point: PublicKey, diff --git a/src/util/events.rs b/src/util/events.rs index 22721729be1..a317f076658 100644 --- a/src/util/events.rs +++ b/src/util/events.rs @@ -60,6 +60,8 @@ pub enum Event { }, /// Indicates an outbound payment we made succeeded (ie it made it all the way to its target /// and we got back the payment preimage for it). + /// Note that duplicative PaymentSent Events may be generated - it is your responsibility to + /// deduplicate them by payment_preimage (which MUST be unique)! PaymentSent { /// The preimage to the hash given to ChannelManager::send_payment. /// Note that this serves as a payment receipt, if you wish to have such a thing, you must @@ -68,6 +70,8 @@ pub enum Event { }, /// Indicates an outbound payment we made failed. Probably some intermediary node dropped /// something. You may wish to retry with a different route. + /// Note that duplicative PaymentFailed Events may be generated - it is your responsibility to + /// deduplicate them by payment_hash (which MUST be unique)! PaymentFailed { /// The hash which was given to ChannelManager::send_payment. payment_hash: [u8; 32],