Skip to content

Commit

Permalink
f basically rewrite the whole thing, this time with tests
Browse files Browse the repository at this point in the history
  • Loading branch information
TheBlueMatt committed Jan 27, 2023
1 parent 6f0354a commit 7f9ee5a
Showing 1 changed file with 161 additions and 30 deletions.
191 changes: 161 additions & 30 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4251,6 +4251,37 @@ where
Ok(())
}

/// Gets the number of peers which match the given filter which have unfunded inbound channels.
///
/// The filter is called for each node and provided with the number of unfunded channels the
/// node has.
///
/// If the peer in question has funded channels, however, zero will be returned.
fn peers_with_unfunded_channels<Filter>(&self, mut filter: Filter) -> usize
where Filter: FnMut(&PublicKey, &PeerState<<SP::Target as SignerProvider>::Signer>, usize) -> bool {
let mut peers_with_unfunded_channels = 0;
let best_block_height = self.best_block.read().unwrap().height();
{
let peer_state_lock = self.per_peer_state.read().unwrap();
for (node_id, peer_mtx) in peer_state_lock.iter() {
let peer = peer_mtx.lock().unwrap();
let mut num_unfunded_channels = 0;
for (_, chan) in peer.channel_by_id.iter() {
if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
chan.get_funding_tx_confirmations(best_block_height) == 0
{
num_unfunded_channels += 1;
}
}
if !filter(node_id, &*peer, num_unfunded_channels) { continue; }
if num_unfunded_channels == peer.channel_by_id.len() {
peers_with_unfunded_channels += 1;
}
}
}
return peers_with_unfunded_channels;
}

fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
if msg.chain_hash != self.genesis_hash {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
Expand All @@ -4270,25 +4301,37 @@ where
if let None = peer_state_mutex_opt {
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone()))
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;

let mut unfunded_channels = 0;
let best_block_height = self.best_block.read().unwrap().height();
for (_, chan) in peer_state.channel_by_id.iter() {
if chan.get_funding_tx_confirmations(best_block_height) == 0 && chan.minimum_depth() != 0 {
unfunded_channels += 1;
}
}
if unfunded_channels >= MAX_UNFUNDED_CHANS_PER_PEER {
// Get the number of peers with channels, but without funded ones. We don't care too much
// about peers that never open a channel, so we filter by peers that have at least one
// channel, and then limit the number of those with unfunded channels.
let mut this_node_unfunded_channels = 0;
let peers_with_unfunded_channels = self.peers_with_unfunded_channels(
|node_id, node, unfunded_channels| {
if node_id == counterparty_node_id {
this_node_unfunded_channels = unfunded_channels;
}
!node.channel_by_id.is_empty()
});
if this_node_unfunded_channels >= MAX_UNFUNDED_CHANS_PER_PEER {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
msg.temporary_channel_id.clone()));
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
// If this peer already has some channels, a new channel won't increase our number of peers
// with unfunded channels, so as long as we aren't over the maximum number of unfunded
// channels per-peer we can accept channels from a peer with existing ones.
if peer_state.channel_by_id.is_empty() && peers_with_unfunded_channels >= MAX_NO_CHANNEL_PEERS {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"Have too many peers with unfunded channels, not accepting new ones".to_owned(),
msg.temporary_channel_id.clone()));
}

let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
&self.default_configuration, best_block_height, &self.logger, outbound_scid_alias)
&self.default_configuration, self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias)
{
Err(e) => {
self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
Expand Down Expand Up @@ -6241,26 +6284,19 @@ where

let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);

let mut peers_with_unfunded_channels = 0;
let mut peer_has_channels = false;
let best_block_height = self.best_block.read().unwrap().height();
{
let peer_state_lock = self.per_peer_state.read().unwrap();
for (node_id, peer) in peer_state_lock.iter() {
if node_id == counterparty_node_id {
peer_has_channels = true;
break;
}
let mut has_funded_channels = false;
for (_, chan) in peer.lock().unwrap().channel_by_id.iter() {
if chan.get_funding_tx_confirmations(best_block_height) != 0 || chan.minimum_depth() == 0 {
has_funded_channels = true;
}
// If we have too many peers connected which don't have funded channels, disconnect the
// peer immediately. If we have a bunch of unfunded channels taking up space in memory for
// disconnected peers, we still let new peers connect, but we'll reject new channels from
// them.
let mut this_peer_has_funded_channels = false;
let connected_peers_with_unfunded_channels = self.peers_with_unfunded_channels(
|node_id, node, num_unfunded_channels| {
if node_id == counterparty_node_id && num_unfunded_channels != node.channel_by_id.len() {
this_peer_has_funded_channels = true;
}
if !has_funded_channels { peers_with_unfunded_channels += 1 }
}
}
if !peer_has_channels && inbound && peers_with_unfunded_channels >= MAX_NO_CHANNEL_PEERS {
node.is_connected
});
if inbound && !this_peer_has_funded_channels && connected_peers_with_unfunded_channels >= MAX_NO_CHANNEL_PEERS {
return Err(());
}

Expand Down Expand Up @@ -8449,6 +8485,101 @@ mod tests {
nodes[1].node.handle_update_fee(&unkown_public_key, &update_fee_msg);
}

#[test]
fn test_connection_limiting() {
// Test that we limit un-channel'd peers and un-funded channels properly.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);

// Note that create_network connects the nodes together for us

nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());

let mut funding_tx = None;
for (idx, _) in (0..super::MAX_UNFUNDED_CHANS_PER_PEER).enumerate() {
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());

if idx == 0 {
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
funding_tx = Some(tx.clone());
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx).unwrap();
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());

nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());

nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
check_added_monitors!(nodes[0], 1);
}
open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
}

// A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
open_channel_msg.temporary_channel_id);

// Further, because all of our channels with nodes[0] are inbound, and none of them funded,
// it doesn't count as a "protected" peer, counting towards the MAX_NO_CHANNEL_PEERS limit.
let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
for _ in 1..super::MAX_NO_CHANNEL_PEERS {
let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
peer_pks.push(random_pk);
nodes[1].node.peer_connected(&random_pk, &msgs::Init {
features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
}
let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();

// Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
// them if we have too many un-channel'd peers.
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
for ev in chan_closed_events {
if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
}
nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();

// Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
// Even though we accept one more connection from new peers, we won't actually let them
// open channels.
assert_eq!(peer_pks.len(), super::MAX_NO_CHANNEL_PEERS - 1);
for peer_pk in peer_pks {
nodes[1].node.handle_open_channel(&peer_pk, &open_channel_msg);
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pk);
open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
}
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id,
open_channel_msg.temporary_channel_id);

// If we fund the first channel, nodes[0] has a live on-chain channel with us, it is now
// "protected" and can connect again.
mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());

// Further, because the first channel was funded, we can open another channel with
// last_random_pk.
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
}

#[cfg(anchors)]
#[test]
fn test_anchors_zero_fee_htlc_tx_fallback() {
Expand Down

0 comments on commit 7f9ee5a

Please sign in to comment.