@@ -206,7 +206,7 @@ pub struct PaymentPreimage(pub [u8;32]);
206206#[ derive( Hash , Copy , Clone , PartialEq , Eq , Debug ) ]
207207pub struct PaymentSecret ( pub [ u8 ; 32 ] ) ;
208208
209- type ShutdownResult = ( Option < OutPoint > , ChannelMonitorUpdate , Vec < ( HTLCSource , PaymentHash ) > ) ;
209+ type ShutdownResult = ( Option < ( OutPoint , ChannelMonitorUpdate ) > , Vec < ( HTLCSource , PaymentHash ) > ) ;
210210
211211/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
212212/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
@@ -333,6 +333,15 @@ pub(super) struct ChannelHolder<Signer: Sign> {
333333 pub ( super ) pending_msg_events : Vec < MessageSendEvent > ,
334334}
335335
336+ /// Events which we process internally but cannot be procsesed immediately at the generation site
337+ /// for some reason. They are handled in timer_chan_freshness_every_min, so may be processed with
338+ /// quite some time lag.
339+ enum BackgroundEvent {
340+ /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
341+ /// commitment transaction.
342+ ClosingMonitorUpdate ( ( OutPoint , ChannelMonitorUpdate ) ) ,
343+ }
344+
336345/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
337346/// the latest Init features we heard from the peer.
338347struct PeerState {
@@ -436,6 +445,7 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
436445 per_peer_state : RwLock < HashMap < PublicKey , Mutex < PeerState > > > ,
437446
438447 pending_events : Mutex < Vec < events:: Event > > ,
448+ pending_background_events : Mutex < Vec < BackgroundEvent > > ,
439449 /// Used when we have to take a BIG lock to make sure everything is self-consistent.
440450 /// Essentially just when we're serializing ourselves out.
441451 /// Taken first everywhere where we are making changes before any other locks.
@@ -794,6 +804,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
794804 per_peer_state : RwLock :: new ( HashMap :: new ( ) ) ,
795805
796806 pending_events : Mutex :: new ( Vec :: new ( ) ) ,
807+ pending_background_events : Mutex :: new ( Vec :: new ( ) ) ,
797808 total_consistency_lock : RwLock :: new ( ( ) ) ,
798809 persistence_notifier : PersistenceNotifier :: new ( ) ,
799810
@@ -942,12 +953,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
942953
943954 #[ inline]
944955 fn finish_force_close_channel ( & self , shutdown_res : ShutdownResult ) {
945- let ( funding_txo_option , monitor_update , mut failed_htlcs) = shutdown_res;
956+ let ( monitor_update_option , mut failed_htlcs) = shutdown_res;
946957 log_trace ! ( self . logger, "Finishing force-closure of channel {} HTLCs to fail" , failed_htlcs. len( ) ) ;
947958 for htlc_source in failed_htlcs. drain ( ..) {
948959 self . fail_htlc_backwards_internal ( self . channel_state . lock ( ) . unwrap ( ) , htlc_source. 0 , & htlc_source. 1 , HTLCFailReason :: Reason { failure_code : 0x4000 | 8 , data : Vec :: new ( ) } ) ;
949960 }
950- if let Some ( funding_txo) = funding_txo_option {
961+ if let Some ( ( funding_txo, monitor_update ) ) = monitor_update_option {
951962 // There isn't anything we can do if we get an update failure - we're already
952963 // force-closing. The monitor update on the required in-memory copy should broadcast
953964 // the latest local state, which is the best we can do anyway. Thus, it is safe to
@@ -1854,13 +1865,42 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
18541865 events. append ( & mut new_events) ;
18551866 }
18561867
1868+ /// Free the background events, generally called from timer_chan_freshness_every_min.
1869+ ///
1870+ /// Exposed for testing to allow us to process events quickly without generating accidental
1871+ /// BroadcastChannelUpdate events in timer_chan_freshness_every_min.
1872+ ///
1873+ /// Expects the caller to have a total_consistency_lock read lock.
1874+ fn process_background_events ( & self ) {
1875+ let mut background_events = Vec :: new ( ) ;
1876+ mem:: swap ( & mut * self . pending_background_events . lock ( ) . unwrap ( ) , & mut background_events) ;
1877+ for event in background_events. drain ( ..) {
1878+ match event {
1879+ BackgroundEvent :: ClosingMonitorUpdate ( ( funding_txo, update) ) => {
1880+ // The channel has already been closed, so no use bothering to care about the
1881+ // monitor updating completing.
1882+ let _ = self . chain_monitor . update_channel ( funding_txo, update) ;
1883+ } ,
1884+ }
1885+ }
1886+ }
1887+
1888+ #[ cfg( any( test, feature = "_test_utils" ) ) ]
1889+ pub ( crate ) fn test_process_background_events ( & self ) {
1890+ self . process_background_events ( ) ;
1891+ }
1892+
18571893 /// If a peer is disconnected we mark any channels with that peer as 'disabled'.
18581894 /// After some time, if channels are still disabled we need to broadcast a ChannelUpdate
18591895 /// to inform the network about the uselessness of these channels.
18601896 ///
18611897 /// This method handles all the details, and must be called roughly once per minute.
1898+ ///
1899+ /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
18621900 pub fn timer_chan_freshness_every_min ( & self ) {
18631901 let _persistence_guard = PersistenceNotifierGuard :: new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
1902+ self . process_background_events ( ) ;
1903+
18641904 let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
18651905 let channel_state = & mut * channel_state_lock;
18661906 for ( _, chan) in channel_state. by_id . iter_mut ( ) {
@@ -1953,6 +1993,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
19531993 //identify whether we sent it or not based on the (I presume) very different runtime
19541994 //between the branches here. We should make this async and move it into the forward HTLCs
19551995 //timer handling.
1996+
1997+ // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
1998+ // from block_connected which may run during initialization prior to the chain_monitor
1999+ // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
19562000 match source {
19572001 HTLCSource :: OutboundRoute { ref path, .. } => {
19582002 log_trace ! ( self . logger, "Failing outbound payment HTLC with payment_hash {}" , log_bytes!( payment_hash. 0 ) ) ;
@@ -2418,7 +2462,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
24182462 // We do not do a force-close here as that would generate a monitor update for
24192463 // a monitor that we didn't manage to store (and that we don't care about - we
24202464 // don't respond with the funding_signed so the channel can never go on chain).
2421- let ( _funding_txo_option , _monitor_update, failed_htlcs) = chan. force_shutdown ( true ) ;
2465+ let ( _monitor_update, failed_htlcs) = chan. force_shutdown ( true ) ;
24222466 assert ! ( failed_htlcs. is_empty( ) ) ;
24232467 return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( "ChannelMonitor storage failure" . to_owned ( ) , funding_msg. channel_id ) ) ;
24242468 } ,
@@ -3100,6 +3144,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
31003144 self . finish_force_close_channel ( failure) ;
31013145 }
31023146 }
3147+
3148+ /// Handle a list of channel failures during a block_connected or block_disconnected call,
3149+ /// pushing the channel monitor update (if any) to the background events queue and removing the
3150+ /// Channel object.
3151+ fn handle_init_event_channel_failures ( & self , mut failed_channels : Vec < ShutdownResult > ) {
3152+ for mut failure in failed_channels. drain ( ..) {
3153+ // Either a commitment transactions has been confirmed on-chain or
3154+ // Channel::block_disconnected detected that the funding transaction has been
3155+ // reorganized out of the main chain.
3156+ // We cannot broadcast our latest local state via monitor update (as
3157+ // Channel::force_shutdown tries to make us do) as we may still be in initialization,
3158+ // so we track the update internally and handle it when the user next calls
3159+ // timer_chan_freshness_every_min, guaranteeing we're running normally.
3160+ if let Some ( ( funding_txo, update) ) = failure. 0 . take ( ) {
3161+ assert_eq ! ( update. updates. len( ) , 1 ) ;
3162+ if let ChannelMonitorUpdateStep :: ChannelForceClosed { should_broadcast } = update. updates [ 0 ] {
3163+ assert ! ( should_broadcast) ;
3164+ } else { unreachable ! ( ) ; }
3165+ self . pending_background_events . lock ( ) . unwrap ( ) . push ( BackgroundEvent :: ClosingMonitorUpdate ( ( funding_txo, update) ) ) ;
3166+ }
3167+ self . finish_force_close_channel ( failure) ;
3168+ }
3169+ }
31033170}
31043171
31053172impl < Signer : Sign , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref > MessageSendEventsProvider for ChannelManager < Signer , M , T , K , F , L >
@@ -3167,6 +3234,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
31673234{
31683235 /// Updates channel state based on transactions seen in a connected block.
31693236 pub fn block_connected ( & self , header : & BlockHeader , txdata : & TransactionData , height : u32 ) {
3237+ // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
3238+ // during initialization prior to the chain_monitor being fully configured in some cases.
3239+ // See the docs for `ChannelManagerReadArgs` for more.
31703240 let header_hash = header. block_hash ( ) ;
31713241 log_trace ! ( self . logger, "Block {} at height {} connected" , header_hash, height) ;
31723242 let _persistence_guard = PersistenceNotifierGuard :: new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
@@ -3218,9 +3288,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
32183288 if let Some ( short_id) = channel. get_short_channel_id ( ) {
32193289 short_to_id. remove ( & short_id) ;
32203290 }
3221- // It looks like our counterparty went on-chain. We go ahead and
3222- // broadcast our latest local state as well here, just in case its
3223- // some kind of SPV attack, though we expect these to be dropped.
3291+ // It looks like our counterparty went on-chain. Close the channel.
32243292 failed_channels. push ( channel. force_shutdown ( true ) ) ;
32253293 if let Ok ( update) = self . get_channel_update ( & channel) {
32263294 pending_msg_events. push ( events:: MessageSendEvent :: BroadcastChannelUpdate {
@@ -3254,9 +3322,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
32543322 !htlcs. is_empty ( ) // Only retain this entry if htlcs has at least one entry.
32553323 } ) ;
32563324 }
3257- for failure in failed_channels. drain ( ..) {
3258- self . finish_force_close_channel ( failure) ;
3259- }
3325+
3326+ self . handle_init_event_channel_failures ( failed_channels) ;
32603327
32613328 for ( source, payment_hash, reason) in timed_out_htlcs. drain ( ..) {
32623329 self . fail_htlc_backwards_internal ( self . channel_state . lock ( ) . unwrap ( ) , source, & payment_hash, reason) ;
@@ -3282,6 +3349,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
32823349 /// If necessary, the channel may be force-closed without letting the counterparty participate
32833350 /// in the shutdown.
32843351 pub fn block_disconnected ( & self , header : & BlockHeader ) {
3352+ // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
3353+ // during initialization prior to the chain_monitor being fully configured in some cases.
3354+ // See the docs for `ChannelManagerReadArgs` for more.
32853355 let _persistence_guard = PersistenceNotifierGuard :: new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
32863356 let mut failed_channels = Vec :: new ( ) ;
32873357 {
@@ -3306,9 +3376,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
33063376 }
33073377 } ) ;
33083378 }
3309- for failure in failed_channels. drain ( ..) {
3310- self . finish_force_close_channel ( failure) ;
3311- }
3379+ self . handle_init_event_channel_failures ( failed_channels) ;
33123380 self . latest_block_height . fetch_sub ( 1 , Ordering :: AcqRel ) ;
33133381 * self . last_block_hash . try_lock ( ) . expect ( "block_(dis)connected must not be called in parallel" ) = header. block_hash ( ) ;
33143382 }
@@ -3914,6 +3982,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
39143982 event. write ( writer) ?;
39153983 }
39163984
3985+ let background_events = self . pending_background_events . lock ( ) . unwrap ( ) ;
3986+ ( background_events. len ( ) as u64 ) . write ( writer) ?;
3987+ for event in background_events. iter ( ) {
3988+ match event {
3989+ BackgroundEvent :: ClosingMonitorUpdate ( ( funding_txo, monitor_update) ) => {
3990+ 0u8 . write ( writer) ?;
3991+ funding_txo. write ( writer) ?;
3992+ monitor_update. write ( writer) ?;
3993+ } ,
3994+ }
3995+ }
3996+
39173997 ( self . last_node_announcement_serial . load ( Ordering :: Acquire ) as u32 ) . write ( writer) ?;
39183998
39193999 Ok ( ( ) )
@@ -3929,11 +4009,22 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
39294009/// ChannelManager)>::read(reader, args).
39304010/// This may result in closing some Channels if the ChannelMonitor is newer than the stored
39314011/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
3932- /// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using
3933- /// ChannelMonitor::get_outputs_to_watch() and ChannelMonitor::get_funding_txo().
4012+ /// 3) If you are not fetching full blocks, register all relevant ChannelMonitor outpoints the same
4013+ /// way you would handle a `chain::Filter` call using ChannelMonitor::get_outputs_to_watch() and
4014+ /// ChannelMonitor::get_funding_txo().
39344015/// 4) Reconnect blocks on your ChannelMonitors.
3935- /// 5) Move the ChannelMonitors into your local chain::Watch.
3936- /// 6) Disconnect/connect blocks on the ChannelManager.
4016+ /// 5) Disconnect/connect blocks on the ChannelManager.
4017+ /// 6) Move the ChannelMonitors into your local chain::Watch.
4018+ ///
4019+ /// Note that the ordering of #4-6 is not of importance, however all three must occur before you
4020+ /// call any other methods on the newly-deserialized ChannelManager.
4021+ ///
4022+ /// Note that because some channels may be closed during deserialization, it is critical that you
4023+ /// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to
4024+ /// you. If you deserialize an old ChannelManager (during which force-closure transactions may be
4025+ /// broadcast), and then later deserialize a newer version of the same ChannelManager (which will
4026+ /// not force-close the same channels but consider them live), you may end up revoking a state for
4027+ /// which you've already broadcasted the transaction.
39374028pub struct ChannelManagerReadArgs < ' a , Signer : ' a + Sign , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref >
39384029 where M :: Target : chain:: Watch < Signer > ,
39394030 T :: Target : BroadcasterInterface ,
@@ -4064,7 +4155,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
40644155 channel. get_cur_counterparty_commitment_transaction_number ( ) > monitor. get_cur_counterparty_commitment_number ( ) ||
40654156 channel. get_latest_monitor_update_id ( ) < monitor. get_latest_update_id ( ) {
40664157 // But if the channel is behind of the monitor, close the channel:
4067- let ( _, _ , mut new_failed_htlcs) = channel. force_shutdown ( true ) ;
4158+ let ( _, mut new_failed_htlcs) = channel. force_shutdown ( true ) ;
40684159 failed_htlcs. append ( & mut new_failed_htlcs) ;
40694160 monitor. broadcast_latest_holder_commitment_txn ( & args. tx_broadcaster , & args. logger ) ;
40704161 } else {
@@ -4128,6 +4219,15 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
41284219 }
41294220 }
41304221
4222+ let background_event_count: u64 = Readable :: read ( reader) ?;
4223+ let mut pending_background_events_read: Vec < BackgroundEvent > = Vec :: with_capacity ( cmp:: min ( background_event_count as usize , MAX_ALLOC_SIZE /mem:: size_of :: < BackgroundEvent > ( ) ) ) ;
4224+ for _ in 0 ..background_event_count {
4225+ match <u8 as Readable >:: read ( reader) ? {
4226+ 0 => pending_background_events_read. push ( BackgroundEvent :: ClosingMonitorUpdate ( ( Readable :: read ( reader) ?, Readable :: read ( reader) ?) ) ) ,
4227+ _ => return Err ( DecodeError :: InvalidValue ) ,
4228+ }
4229+ }
4230+
41314231 let last_node_announcement_serial: u32 = Readable :: read ( reader) ?;
41324232
41334233 let mut secp_ctx = Secp256k1 :: new ( ) ;
@@ -4157,6 +4257,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
41574257 per_peer_state : RwLock :: new ( per_peer_state) ,
41584258
41594259 pending_events : Mutex :: new ( pending_events_read) ,
4260+ pending_background_events : Mutex :: new ( pending_background_events_read) ,
41604261 total_consistency_lock : RwLock :: new ( ( ) ) ,
41614262 persistence_notifier : PersistenceNotifier :: new ( ) ,
41624263
0 commit comments