diff --git a/src/ln/channel.rs b/src/ln/channel.rs index 464bfc6e303..992807493e8 100644 --- a/src/ln/channel.rs +++ b/src/ln/channel.rs @@ -14,7 +14,7 @@ use crypto::digest::Digest; use crypto::hkdf::{hkdf_extract,hkdf_expand}; use ln::msgs; -use ln::msgs::{ErrorAction, HandleError}; +use ln::msgs::{ErrorAction, HandleError, RAACommitmentOrder}; use ln::channelmonitor::ChannelMonitor; use ln::channelmanager::{PendingHTLCStatus, HTLCSource, PendingForwardHTLCInfo, HTLCFailReason, HTLCFailureMsg}; use ln::chan_utils::{TxCreationKeys,HTLCOutputInCommitment,HTLC_SUCCESS_TX_WEIGHT,HTLC_TIMEOUT_TX_WEIGHT}; @@ -296,6 +296,12 @@ pub(super) struct Channel { cur_local_commitment_transaction_number: u64, cur_remote_commitment_transaction_number: u64, value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees + /// Upon receipt of a channel_reestablish we have to figure out whether to send a + /// revoke_and_ack first or a commitment update first. Generally, we prefer to send + /// revoke_and_ack first, but if we had a pending commitment update of our own waiting on a + /// remote revoke when we received the latest commitment update from the remote we have to make + /// sure that commitment update gets resent first. + received_commitment_while_awaiting_raa: bool, pending_inbound_htlcs: Vec, pending_outbound_htlcs: Vec, holding_cell_htlc_updates: Vec, @@ -492,6 +498,8 @@ impl Channel { cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, value_to_self_msat: channel_value_satoshis * 1000 - push_msat, + received_commitment_while_awaiting_raa: false, + pending_inbound_htlcs: Vec::new(), pending_outbound_htlcs: Vec::new(), holding_cell_htlc_updates: Vec::new(), @@ -647,6 +655,8 @@ impl Channel { cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, value_to_self_msat: msg.push_msat, + received_commitment_while_awaiting_raa: false, + pending_inbound_htlcs: Vec::new(), pending_outbound_htlcs: Vec::new(), holding_cell_htlc_updates: Vec::new(), @@ -1696,6 +1706,7 @@ impl Channel { self.cur_local_commitment_transaction_number -= 1; self.last_local_commitment_txn = new_local_commitment_txn; + self.received_commitment_while_awaiting_raa = (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) != 0; let (our_commitment_signed, monitor_update) = if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok - @@ -1831,6 +1842,7 @@ impl Channel { self.their_prev_commitment_point = self.their_cur_commitment_point; self.their_cur_commitment_point = Some(msg.next_per_commitment_point); self.cur_remote_commitment_transaction_number -= 1; + self.received_commitment_while_awaiting_raa = false; let mut to_forward_infos = Vec::new(); let mut revoked_htlcs = Vec::new(); @@ -2072,7 +2084,7 @@ impl Channel { /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. - pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option, Option), ChannelError> { + pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option, Option, RAACommitmentOrder), ChannelError> { if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { // While BOLT 2 doesn't indicate explicitly we should error this channel here, it // almost certainly indicates we are going to end up out-of-sync in some way, so we @@ -2120,6 +2132,12 @@ impl Channel { }) } else { None }; + let order = if self.received_commitment_while_awaiting_raa { + RAACommitmentOrder::CommitmentFirst + } else { + RAACommitmentOrder::RevokeAndACKFirst + }; + if msg.next_local_commitment_number == our_next_remote_commitment_number { if required_revoke.is_some() { log_debug!(self, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.channel_id())); @@ -2142,11 +2160,11 @@ impl Channel { panic!("Got non-channel-failing result from free_holding_cell_htlcs"); } }, - Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor))), - Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None)), + Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), order)), + Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, order)), } } else { - return Ok((resend_funding_locked, required_revoke, None, None)); + return Ok((resend_funding_locked, required_revoke, None, None, order)); } } else if msg.next_local_commitment_number == our_next_remote_commitment_number - 1 { if required_revoke.is_some() { @@ -2206,7 +2224,7 @@ impl Channel { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee: None, //TODO: We need to support re-generating any update_fees in the last commitment_signed! commitment_signed: self.send_commitment_no_state_update().expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0, - }), None)); + }), None, order)); } else { return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction")); } diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 1e396612cf8..774fdf6aa6a 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -26,7 +26,7 @@ use ln::channel::{Channel, ChannelError, ChannelKeys}; use ln::channelmonitor::{ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; use ln::router::{Route,RouteHop}; use ln::msgs; -use ln::msgs::{HandleError,ChannelMessageHandler}; +use ln::msgs::{ChannelMessageHandler, HandleError, RAACommitmentOrder}; use util::{byte_utils, events, internal_traits, rng}; use util::sha2::Sha256; use util::ser::{Readable, Writeable}; @@ -2168,7 +2168,7 @@ impl ChannelManager { Ok(()) } - fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option), MsgHandleErrInternal> { + fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option, RAACommitmentOrder), MsgHandleErrInternal> { let (res, chan_monitor) = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { @@ -2176,9 +2176,9 @@ impl ChannelManager { if chan.get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (funding_locked, revoke_and_ack, commitment_update, channel_monitor) = chan.channel_reestablish(msg) + let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order) = chan.channel_reestablish(msg) .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; - (Ok((funding_locked, revoke_and_ack, commitment_update)), channel_monitor) + (Ok((funding_locked, revoke_and_ack, commitment_update, order)), channel_monitor) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } @@ -2448,7 +2448,7 @@ impl ChannelMessageHandler for ChannelManager { handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id) } - fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option), HandleError> { + fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option, RAACommitmentOrder), HandleError> { handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id) } @@ -4938,6 +4938,7 @@ mod tests { assert!(chan_msgs.0.is_none()); } if pending_raa.0 { + assert!(chan_msgs.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none()); check_added_monitors!(node_a, 1); } else { @@ -4985,6 +4986,7 @@ mod tests { assert!(chan_msgs.0.is_none()); } if pending_raa.1 { + assert!(chan_msgs.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none()); check_added_monitors!(node_b, 1); } else { @@ -5316,6 +5318,141 @@ mod tests { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); } + #[test] + fn test_drop_messages_peer_disconnect_dual_htlc() { + // Test that we can handle reconnecting when both sides of a channel have pending + // commitment_updates when we disconnect. + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + + // Now try to send a second payment which will fail to send + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + + nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::UpdateHTLCs { .. } => {}, + _ => panic!("Unexpected event"), + } + + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + + let events_2 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap(); + let events_3 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + let (_, commitment_update) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap(); + assert!(commitment_update.is_none()); + check_added_monitors!(nodes[0], 1); + }, + _ => panic!("Unexpected event"), + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + assert_eq!(reestablish_2.len(), 1); + + let as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + let bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + assert!(bs_resp.1.is_none()); + assert!(bs_resp.2.is_none()); + + assert!(as_resp.3 == msgs::RAACommitmentOrder::CommitmentFirst); + + assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1); + assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty()); + assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); + assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); + assert!(as_resp.2.as_ref().unwrap().update_fee.is_none()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]).unwrap(); + let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed).unwrap(); + assert!(bs_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + let bs_second_commitment_signed = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()).unwrap().unwrap(); + assert!(bs_second_commitment_signed.update_add_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fee.is_none()); + check_added_monitors!(nodes[1], 1); + + let as_commitment_signed = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap(); + assert!(as_commitment_signed.update_add_htlcs.is_empty()); + assert!(as_commitment_signed.update_fulfill_htlcs.is_empty()); + assert!(as_commitment_signed.update_fail_htlcs.is_empty()); + assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty()); + assert!(as_commitment_signed.update_fee.is_none()); + check_added_monitors!(nodes[0], 1); + + let (as_revoke_and_ack, as_second_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed).unwrap(); + assert!(as_second_commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + let (bs_second_revoke_and_ack, bs_third_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed).unwrap(); + assert!(bs_third_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + let events_4 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + match events_4[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + let events_5 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PaymentReceived { ref payment_hash, amt: _ } => { + assert_eq!(payment_hash_2, *payment_hash); + }, + _ => panic!("Unexpected event"), + } + + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + } + #[test] fn test_invalid_channel_announcement() { //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs diff --git a/src/ln/msgs.rs b/src/ln/msgs.rs index 7b0254bddfd..bab2674e6fa 100644 --- a/src/ln/msgs.rs +++ b/src/ln/msgs.rs @@ -500,6 +500,18 @@ pub enum HTLCFailChannelUpdate { } } +/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should +/// be sent in the order they appear in the return value, however sometimes the order needs to be +/// variable at runtime (eg handle_channel_reestablish needs to re-send messages in the order they +/// were originally sent). In those cases, this enum is also returned. +#[derive(Clone, PartialEq)] +pub enum RAACommitmentOrder { + /// Send the CommitmentUpdate messages first + CommitmentFirst, + /// Send the RevokeAndACK message first + RevokeAndACKFirst, +} + /// A trait to describe an object which can receive channel messages. /// /// Messages MAY be called in parallel when they originate from different their_node_ids, however @@ -554,7 +566,7 @@ pub trait ChannelMessageHandler : events::EventsProvider + Send + Sync { /// Handle a peer reconnecting, possibly generating channel_reestablish message(s). fn peer_connected(&self, their_node_id: &PublicKey) -> Vec; /// Handle an incoming channel_reestablish message from the given peer. - fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish) -> Result<(Option, Option, Option), HandleError>; + fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish) -> Result<(Option, Option, Option, RAACommitmentOrder), HandleError>; // Error: /// Handle an incoming error message from the given peer. diff --git a/src/ln/peer_handler.rs b/src/ln/peer_handler.rs index 81ef41cf789..b629e7fa6b0 100644 --- a/src/ln/peer_handler.rs +++ b/src/ln/peer_handler.rs @@ -658,30 +658,44 @@ impl PeerManager { }, 136 => { let msg = try_potential_decodeerror!(msgs::ChannelReestablish::read(&mut reader)); - let (funding_locked, revoke_and_ack, commitment_update) = try_potential_handleerror!(self.message_handler.chan_handler.handle_channel_reestablish(&peer.their_node_id.unwrap(), &msg)); + let (funding_locked, revoke_and_ack, commitment_update, order) = try_potential_handleerror!(self.message_handler.chan_handler.handle_channel_reestablish(&peer.their_node_id.unwrap(), &msg)); if let Some(lock_msg) = funding_locked { encode_and_send_msg!(lock_msg, 36); } - if let Some(revoke_msg) = revoke_and_ack { - encode_and_send_msg!(revoke_msg, 133); - } - match commitment_update { - Some(resps) => { - for resp in resps.update_add_htlcs { - encode_and_send_msg!(resp, 128); - } - for resp in resps.update_fulfill_htlcs { - encode_and_send_msg!(resp, 130); - } - for resp in resps.update_fail_htlcs { - encode_and_send_msg!(resp, 131); - } - if let Some(resp) = resps.update_fee { - encode_and_send_msg!(resp, 134); - } - encode_and_send_msg!(resps.commitment_signed, 132); + macro_rules! handle_raa { () => { + if let Some(revoke_msg) = revoke_and_ack { + encode_and_send_msg!(revoke_msg, 133); + } + } } + macro_rules! handle_cu { () => { + match commitment_update { + Some(resps) => { + for resp in resps.update_add_htlcs { + encode_and_send_msg!(resp, 128); + } + for resp in resps.update_fulfill_htlcs { + encode_and_send_msg!(resp, 130); + } + for resp in resps.update_fail_htlcs { + encode_and_send_msg!(resp, 131); + } + if let Some(resp) = resps.update_fee { + encode_and_send_msg!(resp, 134); + } + encode_and_send_msg!(resps.commitment_signed, 132); + }, + None => {}, + } + } } + match order { + msgs::RAACommitmentOrder::RevokeAndACKFirst => { + handle_raa!(); + handle_cu!(); + }, + msgs::RAACommitmentOrder::CommitmentFirst => { + handle_cu!(); + handle_raa!(); }, - None => {}, } }, diff --git a/src/util/test_utils.rs b/src/util/test_utils.rs index f8341e88587..8ab02b2674a 100644 --- a/src/util/test_utils.rs +++ b/src/util/test_utils.rs @@ -128,7 +128,7 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> { Err(HandleError { err: "", action: None }) } - fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option), HandleError> { + fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option, msgs::RAACommitmentOrder), HandleError> { Err(HandleError { err: "", action: None }) } fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}