Skip to content

Commit 2f21d56

Browse files
Don't serialize nodes which we have no channels to
1 parent 1325bf6 commit 2f21d56

File tree

2 files changed

+41
-15
lines changed

2 files changed

+41
-15
lines changed

Diff for: lightning/src/ln/channelmanager.rs

+34-14
Original file line numberDiff line numberDiff line change
@@ -493,6 +493,18 @@ pub(super) struct PeerState<Signer: ChannelSigner> {
493493
is_connected: bool,
494494
}
495495

496+
impl <Signer: ChannelSigner> PeerState<Signer> {
497+
/// Indicates that a peer meets the criteria where we're ok to remove it from our storage.
498+
/// If true is passed for `require_disconnected`, the function will return false if we haven't
499+
/// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`.
500+
fn ok_to_remove(&self, require_disconnected: bool) -> bool {
501+
if require_disconnected && self.is_connected {
502+
return false
503+
}
504+
self.channel_by_id.len() == 0
505+
}
506+
}
507+
496508
/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
497509
/// actually ours and not some duplicate HTLC sent to us by a node along the route.
498510
///
@@ -3521,8 +3533,7 @@ where
35213533

35223534
true
35233535
});
3524-
let peer_should_be_removed = !peer_state.is_connected && peer_state.channel_by_id.len() == 0;
3525-
if peer_should_be_removed {
3536+
if peer_state.ok_to_remove(true) {
35263537
pending_peers_awaiting_removal.push(counterparty_node_id);
35273538
}
35283539
}
@@ -3544,7 +3555,7 @@ where
35443555
// have no channels to the peer.
35453556
let remove_entry = {
35463557
let peer_state = entry.get().lock().unwrap();
3547-
!peer_state.is_connected && peer_state.channel_by_id.len() == 0
3558+
peer_state.ok_to_remove(true)
35483559
};
35493560
if remove_entry {
35503561
entry.remove_entry();
@@ -6254,9 +6265,8 @@ where
62546265
fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
62556266
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
62566267
let mut failed_channels = Vec::new();
6257-
let mut no_channels_remain = true;
62586268
let mut per_peer_state = self.per_peer_state.write().unwrap();
6259-
{
6269+
let remove_peer = {
62606270
log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
62616271
log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
62626272
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
@@ -6269,8 +6279,6 @@ where
62696279
update_maps_on_chan_removal!(self, chan);
62706280
self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
62716281
return false;
6272-
} else {
6273-
no_channels_remain = false;
62746282
}
62756283
true
62766284
});
@@ -6300,9 +6308,10 @@ where
63006308
});
63016309
debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
63026310
peer_state.is_connected = false;
6303-
}
6304-
}
6305-
if no_channels_remain {
6311+
peer_state.ok_to_remove(true)
6312+
} else { true }
6313+
};
6314+
if remove_peer {
63066315
per_peer_state.remove(counterparty_node_id);
63076316
}
63086317
mem::drop(per_peer_state);
@@ -6896,13 +6905,17 @@ where
68966905
best_block.block_hash().write(writer)?;
68976906
}
68986907

6908+
let mut serializable_peer_count: u64 = 0;
68996909
{
69006910
let per_peer_state = self.per_peer_state.read().unwrap();
69016911
let mut unfunded_channels = 0;
69026912
let mut number_of_channels = 0;
69036913
for (_, peer_state_mutex) in per_peer_state.iter() {
69046914
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
69056915
let peer_state = &mut *peer_state_lock;
6916+
if !peer_state.ok_to_remove(false) {
6917+
serializable_peer_count += 1;
6918+
}
69066919
number_of_channels += peer_state.channel_by_id.len();
69076920
for (_, channel) in peer_state.channel_by_id.iter() {
69086921
if !channel.is_funding_initiated() {
@@ -6953,11 +6966,18 @@ where
69536966
htlc_purposes.push(purpose);
69546967
}
69556968

6956-
(per_peer_state.len() as u64).write(writer)?;
6969+
(serializable_peer_count).write(writer)?;
69576970
for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
6958-
peer_pubkey.write(writer)?;
6959-
let peer_state = peer_state_mutex.lock().unwrap();
6960-
peer_state.latest_features.write(writer)?;
6971+
let peer_state_lock = peer_state_mutex.lock().unwrap();
6972+
let peer_state = &*peer_state_lock;
6973+
// Peers which we have no channels to should be dropped once disconnected. As we
6974+
// disconnect all peers when shutting down and serializing the ChannelManager, we
6975+
// consider all peers as disconnected here. There's therefore no need write peers with
6976+
// no channels.
6977+
if !peer_state.ok_to_remove(false) {
6978+
peer_pubkey.write(writer)?;
6979+
peer_state.latest_features.write(writer)?;
6980+
}
69616981
}
69626982

69636983
let events = self.pending_events.lock().unwrap();

Diff for: lightning/src/ln/reorg_tests.rs

+7-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ use crate::chain::channelmonitor::ANTI_REORG_DELAY;
1313
use crate::chain::transaction::OutPoint;
1414
use crate::chain::Confirm;
1515
use crate::ln::channelmanager::ChannelManager;
16-
use crate::ln::msgs::ChannelMessageHandler;
16+
use crate::ln::msgs::{ChannelMessageHandler, Init};
1717
use crate::util::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination};
1818
use crate::util::test_utils;
1919
use crate::util::ser::Writeable;
@@ -374,6 +374,12 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
374374
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
375375

376376
// Now check that we can create a new channel
377+
if reload_node && nodes[0].node.per_peer_state.read().unwrap().len() == 0 {
378+
// If we dropped the channel before reloading the node, nodes[1] was also dropped from
379+
// nodes[0] storage, and hence not connected again on startup. We therefore need to
380+
// reconnect to the node before attempting to create a new channel.
381+
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
382+
}
377383
create_announced_chan_between_nodes(&nodes, 0, 1);
378384
send_payment(&nodes[0], &[&nodes[1]], 8000000);
379385
}

0 commit comments

Comments
 (0)