From d205d8d515bb43c8557c8aae95b138892ca63422 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 16 Jan 2025 14:21:59 -0800 Subject: [PATCH 1/3] Use channel ID over funding outpoint to track monitors in ChainMonitor Once dual funding/splicing is supported, channels will no longer maintain their original funding outpoint. Ideally, we identify `ChannelMonitor`s with a stable identifier, such as the channel ID, which is fixed throughout the channel's lifetime. This commit replaces the use of funding outpoints as the key to the monitor index with the channel ID. Note that this is strictly done to the in-memory state; it does not change how we track monitors within their persisted state, they are still keyed by their funding outpoint there. Addressing that is left for follow-up work. --- fuzz/src/chanmon_consistency.rs | 91 +++++----- lightning-block-sync/src/init.rs | 2 +- lightning/src/chain/chainmonitor.rs | 162 +++++++++-------- lightning/src/chain/mod.rs | 10 +- lightning/src/ln/async_signer_tests.rs | 12 +- lightning/src/ln/chanmon_update_fail_tests.rs | 128 +++++++------- lightning/src/ln/channelmanager.rs | 24 +-- lightning/src/ln/functional_test_utils.rs | 46 ++--- lightning/src/ln/functional_tests.rs | 67 +++---- lightning/src/ln/monitor_tests.rs | 164 +++++++++--------- lightning/src/ln/priv_short_conf_tests.rs | 4 +- lightning/src/ln/reload_tests.rs | 31 ++-- lightning/src/ln/shutdown_tests.rs | 4 +- lightning/src/util/test_utils.rs | 34 ++-- 14 files changed, 389 insertions(+), 390 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 2bbcac2fd60..1f07f868439 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -173,6 +173,7 @@ struct LatestMonitorState { /// A set of (monitor id, serialized `ChannelMonitor`)s which we're currently "persisting", /// from LDK's perspective. pending_monitors: Vec<(u64, Vec)>, + funding_txo: OutPoint, } struct TestChainMonitor { @@ -189,7 +190,7 @@ struct TestChainMonitor { Arc, >, >, - pub latest_monitors: Mutex>, + pub latest_monitors: Mutex>, } impl TestChainMonitor { pub fn new( @@ -213,17 +214,19 @@ impl TestChainMonitor { } impl chain::Watch for TestChainMonitor { fn watch_channel( - &self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor, + &self, channel_id: ChannelId, monitor: channelmonitor::ChannelMonitor, ) -> Result { let mut ser = VecWriter(Vec::new()); monitor.write(&mut ser).unwrap(); let monitor_id = monitor.get_latest_update_id(); - let res = self.chain_monitor.watch_channel(funding_txo, monitor); + let funding_txo = monitor.get_funding_txo().0; + let res = self.chain_monitor.watch_channel(channel_id, monitor); let state = match res { Ok(chain::ChannelMonitorUpdateStatus::Completed) => LatestMonitorState { persisted_monitor_id: monitor_id, persisted_monitor: ser.0, pending_monitors: Vec::new(), + funding_txo, }, Ok(chain::ChannelMonitorUpdateStatus::InProgress) => { panic!("The test currently doesn't test initial-persistence via the async pipeline") @@ -231,17 +234,17 @@ impl chain::Watch for TestChainMonitor { Ok(chain::ChannelMonitorUpdateStatus::UnrecoverableError) => panic!(), Err(()) => panic!(), }; - if self.latest_monitors.lock().unwrap().insert(funding_txo, state).is_some() { + if self.latest_monitors.lock().unwrap().insert(channel_id, state).is_some() { panic!("Already had monitor pre-watch_channel"); } res } fn update_channel( - &self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate, + &self, channel_id: ChannelId, update: &channelmonitor::ChannelMonitorUpdate, ) -> chain::ChannelMonitorUpdateStatus { let mut map_lock = self.latest_monitors.lock().unwrap(); - let map_entry = map_lock.get_mut(&funding_txo).expect("Didn't have monitor on update call"); + let map_entry = map_lock.get_mut(&channel_id).expect("Didn't have monitor on update call"); let latest_monitor_data = map_entry .pending_monitors .last() @@ -265,7 +268,7 @@ impl chain::Watch for TestChainMonitor { .unwrap(); let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); - let res = self.chain_monitor.update_channel(funding_txo, update); + let res = self.chain_monitor.update_channel(channel_id, update); match res { chain::ChannelMonitorUpdateStatus::Completed => { map_entry.persisted_monitor_id = update.update_id; @@ -711,9 +714,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut monitors = new_hash_map(); let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap(); - for (outpoint, mut prev_state) in old_monitors.drain() { + for (channel_id, mut prev_state) in old_monitors.drain() { monitors.insert( - outpoint, + prev_state.funding_txo, <(BlockHash, ChannelMonitor)>::read( &mut Cursor::new(&prev_state.persisted_monitor), (&*$keys_manager, &*$keys_manager), @@ -725,7 +728,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { // considering them discarded. LDK should replay these for us as they're stored in // the `ChannelManager`. prev_state.pending_monitors.clear(); - chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, prev_state); + chain_monitor.latest_monitors.lock().unwrap().insert(channel_id, prev_state); } let mut monitor_refs = new_hash_map(); for (outpoint, monitor) in monitors.iter() { @@ -752,9 +755,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .1, chain_monitor.clone(), ); - for (funding_txo, mon) in monitors.drain() { + for (_, mon) in monitors.drain() { assert_eq!( - chain_monitor.chain_monitor.watch_channel(funding_txo, mon), + chain_monitor.chain_monitor.watch_channel(mon.channel_id(), mon), Ok(ChannelMonitorUpdateStatus::Completed) ); } @@ -825,7 +828,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }; $source.handle_accept_channel($dest.get_our_node_id(), &accept_channel); - let funding_output; { let mut events = $source.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -845,7 +847,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { script_pubkey: output_script, }], }; - funding_output = OutPoint { txid: tx.compute_txid(), index: 0 }; $source .funding_transaction_generated( temporary_channel_id, @@ -890,13 +891,19 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { $source.handle_funding_signed($dest.get_our_node_id(), &funding_signed); let events = $source.get_and_clear_pending_events(); assert_eq!(events.len(), 1); - if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] { + let channel_id = if let events::Event::ChannelPending { + ref counterparty_node_id, + ref channel_id, + .. + } = events[0] + { assert_eq!(counterparty_node_id, &$dest.get_our_node_id()); + channel_id.clone() } else { panic!("Wrong event type"); - } + }; - funding_output + channel_id }}; } @@ -963,8 +970,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut nodes = [node_a, node_b, node_c]; - let chan_1_funding = make_channel!(nodes[0], nodes[1], keys_manager_b, 0); - let chan_2_funding = make_channel!(nodes[1], nodes[2], keys_manager_c, 1); + let chan_1_id = make_channel!(nodes[0], nodes[1], keys_manager_b, 0); + let chan_2_id = make_channel!(nodes[1], nodes[2], keys_manager_c, 1); for node in nodes.iter() { confirm_txn!(node); @@ -1363,14 +1370,14 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }; - let complete_all_monitor_updates = |monitor: &Arc, chan_funding| { - if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_funding) { + let complete_all_monitor_updates = |monitor: &Arc, chan_id| { + if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { assert!( state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), "updates should be sorted by id" ); for (id, data) in state.pending_monitors.drain(..) { - monitor.chain_monitor.channel_monitor_updated(*chan_funding, id).unwrap(); + monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); if id > state.persisted_monitor_id { state.persisted_monitor_id = id; state.persisted_monitor = data; @@ -1410,10 +1417,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { ChannelMonitorUpdateStatus::Completed }, - 0x08 => complete_all_monitor_updates(&monitor_a, &chan_1_funding), - 0x09 => complete_all_monitor_updates(&monitor_b, &chan_1_funding), - 0x0a => complete_all_monitor_updates(&monitor_b, &chan_2_funding), - 0x0b => complete_all_monitor_updates(&monitor_c, &chan_2_funding), + 0x08 => complete_all_monitor_updates(&monitor_a, &chan_1_id), + 0x09 => complete_all_monitor_updates(&monitor_b, &chan_1_id), + 0x0a => complete_all_monitor_updates(&monitor_b, &chan_2_id), + 0x0b => complete_all_monitor_updates(&monitor_c, &chan_2_id), 0x0c => { if !chan_a_disconnected { @@ -1683,21 +1690,21 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { nodes[2].maybe_update_chan_fees(); }, - 0xf0 => complete_monitor_update(&monitor_a, &chan_1_funding, &complete_first), - 0xf1 => complete_monitor_update(&monitor_a, &chan_1_funding, &complete_second), - 0xf2 => complete_monitor_update(&monitor_a, &chan_1_funding, &Vec::pop), + 0xf0 => complete_monitor_update(&monitor_a, &chan_1_id, &complete_first), + 0xf1 => complete_monitor_update(&monitor_a, &chan_1_id, &complete_second), + 0xf2 => complete_monitor_update(&monitor_a, &chan_1_id, &Vec::pop), - 0xf4 => complete_monitor_update(&monitor_b, &chan_1_funding, &complete_first), - 0xf5 => complete_monitor_update(&monitor_b, &chan_1_funding, &complete_second), - 0xf6 => complete_monitor_update(&monitor_b, &chan_1_funding, &Vec::pop), + 0xf4 => complete_monitor_update(&monitor_b, &chan_1_id, &complete_first), + 0xf5 => complete_monitor_update(&monitor_b, &chan_1_id, &complete_second), + 0xf6 => complete_monitor_update(&monitor_b, &chan_1_id, &Vec::pop), - 0xf8 => complete_monitor_update(&monitor_b, &chan_2_funding, &complete_first), - 0xf9 => complete_monitor_update(&monitor_b, &chan_2_funding, &complete_second), - 0xfa => complete_monitor_update(&monitor_b, &chan_2_funding, &Vec::pop), + 0xf8 => complete_monitor_update(&monitor_b, &chan_2_id, &complete_first), + 0xf9 => complete_monitor_update(&monitor_b, &chan_2_id, &complete_second), + 0xfa => complete_monitor_update(&monitor_b, &chan_2_id, &Vec::pop), - 0xfc => complete_monitor_update(&monitor_c, &chan_2_funding, &complete_first), - 0xfd => complete_monitor_update(&monitor_c, &chan_2_funding, &complete_second), - 0xfe => complete_monitor_update(&monitor_c, &chan_2_funding, &Vec::pop), + 0xfc => complete_monitor_update(&monitor_c, &chan_2_id, &complete_first), + 0xfd => complete_monitor_update(&monitor_c, &chan_2_id, &complete_second), + 0xfe => complete_monitor_update(&monitor_c, &chan_2_id, &Vec::pop), 0xff => { // Test that no channel is in a stuck state where neither party can send funds even @@ -1711,10 +1718,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; - complete_all_monitor_updates(&monitor_a, &chan_1_funding); - complete_all_monitor_updates(&monitor_b, &chan_1_funding); - complete_all_monitor_updates(&monitor_b, &chan_2_funding); - complete_all_monitor_updates(&monitor_c, &chan_2_funding); + complete_all_monitor_updates(&monitor_a, &chan_1_id); + complete_all_monitor_updates(&monitor_b, &chan_1_id); + complete_all_monitor_updates(&monitor_b, &chan_2_id); + complete_all_monitor_updates(&monitor_c, &chan_2_id); // Next, make sure peers are all connected to each other if chan_a_disconnected { diff --git a/lightning-block-sync/src/init.rs b/lightning-block-sync/src/init.rs index 430cda0928c..7697adacf2a 100644 --- a/lightning-block-sync/src/init.rs +++ b/lightning-block-sync/src/init.rs @@ -125,7 +125,7 @@ where /// /// // Allow the chain monitor to watch any channels. /// let monitor = monitor_listener.0; -/// chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); +/// chain_monitor.watch_channel(monitor.channel_id(), monitor); /// /// // Create an SPV client to notify the chain monitor and channel manager of block events. /// let chain_poller = poll::ChainPoller::new(block_source, Network::Bitcoin); diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index f1b9f729cb3..32571f57c25 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -43,7 +43,6 @@ use crate::prelude::*; use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard}; use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; -use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; /// `Persist` defines behavior for persisting channel monitors: this could mean @@ -202,14 +201,14 @@ impl MonitorHolder { /// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is /// released. pub struct LockedChannelMonitor<'a, ChannelSigner: EcdsaChannelSigner> { - lock: RwLockReadGuard<'a, HashMap>>, - funding_txo: OutPoint, + lock: RwLockReadGuard<'a, HashMap>>, + channel_id: ChannelId, } impl Deref for LockedChannelMonitor<'_, ChannelSigner> { type Target = ChannelMonitor; fn deref(&self) -> &ChannelMonitor { - &self.lock.get(&self.funding_txo).expect("Checked at construction").monitor + &self.lock.get(&self.channel_id).expect("Checked at construction").monitor } } @@ -236,7 +235,7 @@ pub struct ChainMonitor, { - monitors: RwLock>>, + monitors: RwLock>>, chain_source: Option, broadcaster: T, logger: L, @@ -276,12 +275,12 @@ where C::Target: chain::Filter, FN: Fn(&ChannelMonitor, &TransactionData) -> Vec { let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; - let funding_outpoints = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned()); - let channel_count = funding_outpoints.len(); - for funding_outpoint in funding_outpoints.iter() { + let channel_ids = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned()); + let channel_count = channel_ids.len(); + for channel_id in channel_ids.iter() { let monitor_lock = self.monitors.read().unwrap(); - if let Some(monitor_state) = monitor_lock.get(funding_outpoint) { - if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() { + if let Some(monitor_state) = monitor_lock.get(channel_id) { + if self.update_monitor_with_chain_data(header, best_height, txdata, &process, channel_id, &monitor_state, channel_count).is_err() { // Take the monitors lock for writing so that we poison it and any future // operations going forward fail immediately. core::mem::drop(monitor_lock); @@ -292,11 +291,11 @@ where C::Target: chain::Filter, } } - // do some followup cleanup if any funding outpoints were added in between iterations + // Do another pass to handle any monitors added in between iterations. let monitor_states = self.monitors.write().unwrap(); - for (funding_outpoint, monitor_state) in monitor_states.iter() { - if !funding_outpoints.contains(funding_outpoint) { - if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() { + for (channel_id, monitor_state) in monitor_states.iter() { + if !channel_ids.contains(channel_id) { + if self.update_monitor_with_chain_data(header, best_height, txdata, &process, channel_id, &monitor_state, channel_count).is_err() { log_error!(self.logger, "{}", err_str); panic!("{}", err_str); } @@ -315,7 +314,7 @@ where C::Target: chain::Filter, } fn update_monitor_with_chain_data( - &self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint, + &self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN, channel_id: &ChannelId, monitor_state: &MonitorHolder, channel_count: usize, ) -> Result<(), ()> where FN: Fn(&ChannelMonitor, &TransactionData) -> Vec { let monitor = &monitor_state.monitor; @@ -323,11 +322,10 @@ where C::Target: chain::Filter, let mut txn_outputs = process(monitor, txdata); - let get_partition_key = |funding_outpoint: &OutPoint| { - let funding_txid_hash = funding_outpoint.txid.to_raw_hash(); - let funding_txid_hash_bytes = funding_txid_hash.as_byte_array(); - let funding_txid_u32 = u32::from_be_bytes([funding_txid_hash_bytes[0], funding_txid_hash_bytes[1], funding_txid_hash_bytes[2], funding_txid_hash_bytes[3]]); - funding_txid_u32.wrapping_add(best_height.unwrap_or_default()) + let get_partition_key = |channel_id: &ChannelId| { + let channel_id_bytes = channel_id.0; + let channel_id_u32 = u32::from_be_bytes([channel_id_bytes[0], channel_id_bytes[1], channel_id_bytes[2], channel_id_bytes[3]]); + channel_id_u32.wrapping_add(best_height.unwrap_or_default()) }; let partition_factor = if channel_count < 15 { @@ -337,14 +335,15 @@ where C::Target: chain::Filter, }; let has_pending_claims = monitor_state.monitor.has_pending_claims(); - if has_pending_claims || get_partition_key(funding_outpoint) % partition_factor == 0 { + if has_pending_claims || get_partition_key(channel_id) % partition_factor == 0 { log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor)); // Even though we don't track monitor updates from chain-sync as pending, we still want // updates per-channel to be well-ordered so that users don't see a // `ChannelMonitorUpdate` after a channel persist for a channel with the same // `latest_update_id`. let _pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); - match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) { + let funding_txo = monitor.get_funding_txo().0; + match self.persister.update_persisted_channel(funding_txo, None, monitor) { ChannelMonitorUpdateStatus::Completed => log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data", log_funding_info!(monitor) @@ -410,9 +409,9 @@ where C::Target: chain::Filter, pub fn get_claimable_balances(&self, ignored_channels: &[&ChannelDetails]) -> Vec { let mut ret = Vec::new(); let monitor_states = self.monitors.read().unwrap(); - for (_, monitor_state) in monitor_states.iter().filter(|(funding_outpoint, _)| { + for (_, monitor_state) in monitor_states.iter().filter(|(channel_id, _)| { for chan in ignored_channels { - if chan.funding_txo.as_ref() == Some(funding_outpoint) { + if chan.channel_id == **channel_id { return false; } } @@ -428,10 +427,10 @@ where C::Target: chain::Filter, /// /// Note that the result holds a mutex over our monitor set, and should not be held /// indefinitely. - pub fn get_monitor(&self, funding_txo: OutPoint) -> Result, ()> { + pub fn get_monitor(&self, channel_id: ChannelId) -> Result, ()> { let lock = self.monitors.read().unwrap(); - if lock.get(&funding_txo).is_some() { - Ok(LockedChannelMonitor { lock, funding_txo }) + if lock.get(&channel_id).is_some() { + Ok(LockedChannelMonitor { lock, channel_id }) } else { Err(()) } @@ -441,39 +440,36 @@ where C::Target: chain::Filter, /// /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always /// monitoring for on-chain state resolutions. - pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> { - self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| { - let channel_id = monitor_holder.monitor.channel_id(); - (*outpoint, channel_id) - }).collect() + pub fn list_monitors(&self) -> Vec { + self.monitors.read().unwrap().keys().copied().collect() } #[cfg(not(c_bindings))] - /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored). + /// Lists the pending updates for each [`ChannelMonitor`] (by `ChannelId` being monitored). /// Each `Vec` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`]. - pub fn list_pending_monitor_updates(&self) -> HashMap> { - hash_map_from_iter(self.monitors.read().unwrap().iter().map(|(outpoint, holder)| { - (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone()) + pub fn list_pending_monitor_updates(&self) -> HashMap> { + hash_map_from_iter(self.monitors.read().unwrap().iter().map(|(channel_id, holder)| { + (*channel_id, holder.pending_monitor_updates.lock().unwrap().clone()) })) } #[cfg(c_bindings)] - /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored). + /// Lists the pending updates for each [`ChannelMonitor`] (by `ChannelId` being monitored). /// Each `Vec` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`]. - pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec)> { - self.monitors.read().unwrap().iter().map(|(outpoint, holder)| { - (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone()) + pub fn list_pending_monitor_updates(&self) -> Vec<(ChannelId, Vec)> { + self.monitors.read().unwrap().iter().map(|(channel_id, holder)| { + (*channel_id, holder.pending_monitor_updates.lock().unwrap().clone()) }).collect() } #[cfg(test)] - pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor { - self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor + pub fn remove_monitor(&self, channel_id: &ChannelId) -> ChannelMonitor { + self.monitors.write().unwrap().remove(channel_id).unwrap().monitor } /// Indicates the persistence of a [`ChannelMonitor`] has completed after @@ -496,10 +492,10 @@ where C::Target: chain::Filter, /// /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently /// registered [`ChannelMonitor`]s. - pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: u64) -> Result<(), APIError> { + pub fn channel_monitor_updated(&self, channel_id: ChannelId, completed_update_id: u64) -> Result<(), APIError> { let monitors = self.monitors.read().unwrap(); - let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else { - return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) }); + let monitor_data = if let Some(mon) = monitors.get(&channel_id) { mon } else { + return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching channel ID {} found", channel_id) }); }; let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap(); pending_monitor_updates.retain(|update_id| *update_id != completed_update_id); @@ -507,9 +503,9 @@ where C::Target: chain::Filter, // Note that we only check for pending non-chainsync monitor updates and we don't track monitor // updates resulting from chainsync in `pending_monitor_updates`. let monitor_is_pending_updates = monitor_data.has_pending_updates(&pending_monitor_updates); - log_debug!(self.logger, "Completed off-chain monitor update {} for channel with funding outpoint {:?}, {}", + log_debug!(self.logger, "Completed off-chain monitor update {} for channel with channel ID {}, {}", completed_update_id, - funding_txo, + channel_id, if monitor_is_pending_updates { "still have pending off-chain updates" } else { @@ -520,9 +516,10 @@ where C::Target: chain::Filter, // Completed event. return Ok(()); } - let channel_id = monitor_data.monitor.channel_id(); + let funding_txo = monitor_data.monitor.get_funding_txo().0; self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { - funding_txo, channel_id, + funding_txo, + channel_id, monitor_update_id: monitor_data.monitor.get_latest_update_id(), }], monitor_data.monitor.get_counterparty_node_id())); @@ -534,13 +531,11 @@ where C::Target: chain::Filter, /// chain::Watch API wherein we mark a monitor fully-updated by just calling /// channel_monitor_updated once with the highest ID. #[cfg(any(test, fuzzing))] - pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) { + pub fn force_channel_monitor_updated(&self, channel_id: ChannelId, monitor_update_id: u64) { let monitors = self.monitors.read().unwrap(); - let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) { - (m.monitor.get_counterparty_node_id(), m.monitor.channel_id()) - } else { - (None, ChannelId::v1_from_funding_outpoint(funding_txo)) - }; + let monitor = &monitors.get(&channel_id).unwrap().monitor; + let counterparty_node_id = monitor.get_counterparty_node_id(); + let funding_txo = monitor.get_funding_txo().0; self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { funding_txo, channel_id, @@ -570,10 +565,10 @@ where C::Target: chain::Filter, // Sadly we can't hold the monitors read lock through an async call. Thus we have to do a // crazy dance to process a monitor's events then only remove them once we've done so. let mons_to_process = self.monitors.read().unwrap().keys().cloned().collect::>(); - for funding_txo in mons_to_process { + for channel_id in mons_to_process { let mut ev; match super::channelmonitor::process_events_body!( - self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), self.logger, ev, handler(ev).await) { + self.monitors.read().unwrap().get(&channel_id).map(|m| &m.monitor), self.logger, ev, handler(ev).await) { Ok(()) => {}, Err(ReplayEvent ()) => { self.event_notifier.notify(); @@ -612,10 +607,10 @@ where C::Target: chain::Filter, /// signature generation failure. /// /// `monitor_opt` can be used as a filter to only trigger them for a specific channel monitor. - pub fn signer_unblocked(&self, monitor_opt: Option) { + pub fn signer_unblocked(&self, monitor_opt: Option) { let monitors = self.monitors.read().unwrap(); - if let Some(funding_txo) = monitor_opt { - if let Some(monitor_holder) = monitors.get(&funding_txo) { + if let Some(channel_id) = monitor_opt { + if let Some(monitor_holder) = monitors.get(&channel_id) { monitor_holder.monitor.signer_unblocked( &*self.broadcaster, &*self.fee_estimator, &self.logger ) @@ -640,27 +635,27 @@ where C::Target: chain::Filter, /// data could be moved to an archive location or removed entirely. pub fn archive_fully_resolved_channel_monitors(&self) { let mut have_monitors_to_prune = false; - for (funding_txo, monitor_holder) in self.monitors.read().unwrap().iter() { + for monitor_holder in self.monitors.read().unwrap().values() { let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None); let (is_fully_resolved, needs_persistence) = monitor_holder.monitor.check_and_update_full_resolution_status(&logger); if is_fully_resolved { have_monitors_to_prune = true; } if needs_persistence { - self.persister.update_persisted_channel(*funding_txo, None, &monitor_holder.monitor); + self.persister.update_persisted_channel(monitor_holder.monitor.get_funding_txo().0, None, &monitor_holder.monitor); } } if have_monitors_to_prune { let mut monitors = self.monitors.write().unwrap(); - monitors.retain(|funding_txo, monitor_holder| { + monitors.retain(|channel_id, monitor_holder| { let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None); let (is_fully_resolved, _) = monitor_holder.monitor.check_and_update_full_resolution_status(&logger); if is_fully_resolved { log_info!(logger, - "Archiving fully resolved ChannelMonitor for funding txo {}", - funding_txo + "Archiving fully resolved ChannelMonitor for channel ID {}", + channel_id ); - self.persister.archive_persisted_channel(*funding_txo); + self.persister.archive_persisted_channel(monitor_holder.monitor.get_funding_txo().0); false } else { true @@ -761,12 +756,12 @@ where C::Target: chain::Filter, L::Target: Logger, P::Target: Persist, { - fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor) -> Result { + fn watch_channel(&self, channel_id: ChannelId, monitor: ChannelMonitor) -> Result { let logger = WithChannelMonitor::from(&self.logger, &monitor, None); let mut monitors = self.monitors.write().unwrap(); - let entry = match monitors.entry(funding_outpoint) { + let entry = match monitors.entry(channel_id) { hash_map::Entry::Occupied(_) => { - log_error!(logger, "Failed to add new channel data: channel monitor for given outpoint is already present"); + log_error!(logger, "Failed to add new channel data: channel monitor for given channel ID is already present"); return Err(()); }, hash_map::Entry::Vacant(e) => e, @@ -774,7 +769,7 @@ where C::Target: chain::Filter, log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor)); let update_id = monitor.get_latest_update_id(); let mut pending_monitor_updates = Vec::new(); - let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor); + let persist_res = self.persister.persist_new_channel(monitor.get_funding_txo().0, &monitor); match persist_res { ChannelMonitorUpdateStatus::InProgress => { log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor)); @@ -799,13 +794,13 @@ where C::Target: chain::Filter, Ok(persist_res) } - fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { + fn update_channel(&self, channel_id: ChannelId, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { // `ChannelMonitorUpdate`'s `channel_id` is `None` prior to 0.0.121 and all channels in those // versions are V1-established. For 0.0.121+ the `channel_id` fields is always `Some`. - let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo)); + debug_assert_eq!(update.channel_id.unwrap(), channel_id); // Update the monitor that watches the channel referred to by the given outpoint. let monitors = self.monitors.read().unwrap(); - match monitors.get(&funding_txo) { + match monitors.get(&channel_id) { None => { let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id), None); log_error!(logger, "Failed to update channel monitor: no such monitor registered"); @@ -830,6 +825,7 @@ where C::Target: chain::Filter, let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger); let update_id = update.update_id; + let funding_txo = monitor.get_funding_txo().0; let persist_res = if update_res.is_err() { // Even if updating the monitor returns an error, the monitor's state will // still be changed. Therefore, we should persist the updated monitor despite the error. @@ -882,10 +878,10 @@ where C::Target: chain::Filter, for monitor_state in self.monitors.read().unwrap().values() { let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); if monitor_events.len() > 0 { - let monitor_outpoint = monitor_state.monitor.get_funding_txo().0; + let monitor_funding_txo = monitor_state.monitor.get_funding_txo().0; let monitor_channel_id = monitor_state.monitor.channel_id(); let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id(); - pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id)); + pending_monitor_events.push((monitor_funding_txo, monitor_channel_id, monitor_events, counterparty_node_id)); } } pending_monitor_events @@ -946,7 +942,7 @@ mod tests { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; // Route two payments to be claimed at the same time. let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); @@ -963,7 +959,7 @@ mod tests { let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone(); assert_eq!(persistences.len(), 1); - let (funding_txo, updates) = persistences.iter().next().unwrap(); + let (_, updates) = persistences.iter().next().unwrap(); assert_eq!(updates.len(), 2); // Note that updates is a HashMap so the ordering here is actually random. This shouldn't @@ -972,23 +968,23 @@ mod tests { let next_update = update_iter.next().unwrap().clone(); // Should contain next_update when pending updates listed. #[cfg(not(c_bindings))] - assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo) + assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(&channel_id) .unwrap().contains(&next_update)); #[cfg(c_bindings)] assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter() - .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update)); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, next_update.clone()).unwrap(); + .find(|(chan_id, _)| *chan_id == channel_id).unwrap().1.contains(&next_update)); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(channel_id, next_update.clone()).unwrap(); // Should not contain the previously pending next_update when pending updates listed. #[cfg(not(c_bindings))] - assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo) + assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(&channel_id) .unwrap().contains(&next_update)); #[cfg(c_bindings)] assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter() - .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update)); + .find(|(chan_id, _)| *chan_id == channel_id).unwrap().1.contains(&next_update)); assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(channel_id, update_iter.next().unwrap().clone()).unwrap(); let claim_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(claim_events.len(), 2); diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index 36b1ce57309..a9466660ddf 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -261,7 +261,7 @@ pub enum ChannelMonitorUpdateStatus { /// /// See method documentation and [`ChannelMonitorUpdateStatus`] for specific requirements. pub trait Watch { - /// Watches a channel identified by `funding_txo` using `monitor`. + /// Watches a channel identified by `channel_id` using `monitor`. /// /// Implementations are responsible for watching the chain for the funding transaction along /// with any spends of outputs returned by [`get_outputs_to_watch`]. In practice, this means @@ -270,15 +270,15 @@ pub trait Watch { /// A return of `Err(())` indicates that the channel should immediately be force-closed without /// broadcasting the funding transaction. /// - /// If the given `funding_txo` has previously been registered via `watch_channel`, `Err(())` + /// If the given `channel_id` has previously been registered via `watch_channel`, `Err(())` /// must be returned. /// /// [`get_outputs_to_watch`]: channelmonitor::ChannelMonitor::get_outputs_to_watch /// [`block_connected`]: channelmonitor::ChannelMonitor::block_connected /// [`block_disconnected`]: channelmonitor::ChannelMonitor::block_disconnected - fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result; + fn watch_channel(&self, channel_id: ChannelId, monitor: ChannelMonitor) -> Result; - /// Updates a channel identified by `funding_txo` by applying `update` to its monitor. + /// Updates a channel identified by `channel_id` by applying `update` to its monitor. /// /// Implementations must call [`ChannelMonitor::update_monitor`] with the given update. This /// may fail (returning an `Err(())`), in which case this should return @@ -293,7 +293,7 @@ pub trait Watch { /// [`ChannelMonitorUpdateStatus::UnrecoverableError`], see its documentation for more info. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager - fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus; + fn update_channel(&self, channel_id: ChannelId, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus; /// Returns any monitor events since the last call. Subsequent calls must only return new /// events. diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 6b8df9af0cf..4a857d191a9 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -489,8 +489,8 @@ fn do_test_async_raa_peer_disconnect(test_case: UnblockSignerAcrossDisconnectCas if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, block_raa_signer_op); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = dst.chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - dst.chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = dst.chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + dst.chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); check_added_monitors!(dst, 0); } @@ -614,8 +614,8 @@ fn do_test_async_commitment_signature_peer_disconnect(test_case: UnblockSignerAc if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = dst.chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - dst.chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = dst.chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + dst.chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); check_added_monitors!(dst, 0); } @@ -737,8 +737,8 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { if monitor_update_failure { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); check_added_monitors!(nodes[0], 0); } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index c081937650e..48e02ffcfe5 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -16,7 +16,6 @@ use bitcoin::constants::genesis_block; use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; -use crate::chain::transaction::OutPoint; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; @@ -49,7 +48,6 @@ fn test_monitor_and_persister_update_fail() { // Create some initial channel let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let outpoint = OutPoint { txid: chan.3.compute_txid(), index: 0 }; // Rebalance the network to generate htlc in the two directions send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); @@ -73,14 +71,14 @@ fn test_monitor_and_persister_update_fail() { }; let chain_mon = { let new_monitor = { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan.2).unwrap(); let new_monitor = <(BlockHash, ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; assert!(new_monitor == *monitor); new_monitor }; let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!(chain_mon.watch_channel(chan.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); chain_mon }; chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200); @@ -101,12 +99,12 @@ fn test_monitor_and_persister_update_fail() { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Check that the persister returns InProgress (and will never actually complete) // as the monitor update errors. - if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor paused"); } + if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(chan.2, &update) {} else { panic!("Expected monitor paused"); } logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Failed to update ChannelMonitor for channel [0-9a-f]*.").unwrap(), 1); // Apply the monitor update to the original ChainMonitor, ensuring the // ChannelManager and ChannelMonitor aren't out of sync. - assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), + assert_eq!(nodes[0].chain_monitor.update_channel(chan.2, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { @@ -151,8 +149,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -312,8 +310,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Now fix monitor updating... chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); macro_rules! disconnect_reconnect_peers { () => { { @@ -621,8 +619,8 @@ fn test_monitor_update_fail_cs() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(responses.len(), 2); @@ -654,8 +652,8 @@ fn test_monitor_update_fail_cs() { } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -716,8 +714,8 @@ fn test_monitor_update_fail_no_rebroadcast() { check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); @@ -778,8 +776,8 @@ fn test_monitor_update_raa_while_paused() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -904,8 +902,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Restore monitor updating, ensuring we immediately get a fail-back update and a // update_add update. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); @@ -1159,8 +1157,8 @@ fn test_monitor_update_fail_reestablish() { .contents.channel_flags & 2, 0); // The "disabled" bit should be unset as we just reconnected chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1237,8 +1235,8 @@ fn raa_no_response_awaiting_raa_state() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); // nodes[1] should be AwaitingRAA here! check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1359,8 +1357,8 @@ fn claim_while_disconnected_monitor_update_fail() { // Now un-fail the monitor, which will result in B sending its original commitment update, // receiving the commitment update from A, and the resulting commitment dances. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1474,8 +1472,8 @@ fn monitor_failed_no_reestablish_response() { let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1567,8 +1565,8 @@ fn first_message_on_recv_ordering() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1658,8 +1656,8 @@ fn test_monitor_update_fail_claim() { // Now restore monitor updating on the 0<->1 channel and claim the funds on B. let channel_id = chan_1.2; - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); @@ -1758,8 +1756,8 @@ fn test_monitor_update_on_pending_forwards() { check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1825,8 +1823,8 @@ fn monitor_update_claim_fail_no_response() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1865,7 +1863,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - let channel_id = ChannelId::v1_from_funding_outpoint(OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }); + let channel_id = ChannelId::v1_from_funding_txid(funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index); nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); @@ -1875,8 +1873,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); @@ -1921,8 +1919,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: } chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { @@ -2017,8 +2015,8 @@ fn test_path_paused_mpp() { // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2_id, latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None); @@ -2394,8 +2392,8 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // If we finish updating the monitor, we should free the holding cell right away (this did // not occur prior to #756). chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id); + let (mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, mon_id); expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); // New outbound messages should be generated immediately upon a call to @@ -2613,15 +2611,15 @@ fn test_temporary_error_during_shutdown() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id())); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[0].node.handle_closing_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); @@ -2656,7 +2654,7 @@ fn double_temp_error() { // `claim_funds` results in a ChannelMonitorUpdate. nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); - let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, @@ -2665,11 +2663,11 @@ fn double_temp_error() { check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1); + let (latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_2); // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions // and get both PaymentClaimed events at once. @@ -3112,8 +3110,8 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating // process. - let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap(); + let (_, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).unwrap(); // When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has // completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C @@ -3140,8 +3138,8 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // ChannelMonitorUpdate hasn't yet completed. reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap(); + let (_, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).unwrap(); // The ChannelMonitorUpdate which was completed prior to the reconnect only contained the // preimage (as it was a replay of the original ChannelMonitorUpdate from before we @@ -3309,8 +3307,8 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, // Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending // `PaymentForwarded` event will finally be released. - let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id); + let (ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id_ab, ab_update_id); // If the A<->B channel was closed before we reload, we'll replay the claim against it on // reload, causing the `PaymentForwarded` event to get replayed. @@ -3387,8 +3385,8 @@ fn test_sync_async_persist_doesnt_hang() { // Immediately complete the monitor update, but before the ChannelManager has a chance to see // the MonitorEvent::Completed, create a channel update by receiving a claim on the second // payment. - let (outpoint, _, ab_update_id) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap(); + let (_, ab_update_id) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).unwrap(); nodes[1].node.claim_funds(payment_preimage_2); check_added_monitors(&nodes[1], 1); @@ -3500,7 +3498,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]); } - let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2; + let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().1; let mut events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), if close_during_reload { 2 } else { 1 }); expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000), @@ -3516,7 +3514,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Once we run event processing the monitor should free, check that it was indeed the B<->C // channel which was updated. check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 }); - let post_ev_bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2; + let post_ev_bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().1; assert!(bc_update_id != post_ev_bc_update_id); // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates @@ -3601,8 +3599,8 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // ...but once we complete the A<->B channel preimage persistence, the B<->C channel // unlocks and we send both peers commitment updates. - let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - assert!(nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).is_ok()); + let (ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); + assert!(nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).is_ok()); let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 2); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index eac2a1474d7..24d9dad1067 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1725,7 +1725,7 @@ where /// /// // Move the monitors to the ChannelManager's chain::Watch parameter /// for monitor in channel_monitors { -/// chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); +/// chain_monitor.watch_channel(monitor.channel_id(), monitor); /// } /// # Ok(()) /// # } @@ -3320,7 +3320,7 @@ macro_rules! handle_new_monitor_update { $in_flight_updates.len() - 1 }); if $self.background_events_processed_since_startup.load(Ordering::Acquire) { - let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]); + let update_res = $self.chain_monitor.update_channel($chan_id, &$in_flight_updates[$update_idx]); handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed) } else { // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we @@ -6327,10 +6327,10 @@ where for event in background_events.drain(..) { match event { - BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => { + BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((_funding_txo, channel_id, update)) => { // The channel has already been closed, so no use bothering to care about the // monitor updating completing. - let _ = self.chain_monitor.update_channel(funding_txo, &update); + let _ = self.chain_monitor.update_channel(channel_id, &update); }, BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => { self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update); @@ -8135,7 +8135,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible"); }, hash_map::Entry::Vacant(i_e) => { - let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); + let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { i_e.insert(chan.context.get_counterparty_node_id()); mem::drop(outpoint_to_peer_lock); @@ -8160,8 +8160,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(()) } else { let logger = WithChannelContext::from(&self.logger, &chan.context, None); - log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated"); - fail_chan!("Duplicate funding outpoint"); + log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated"); + fail_chan!("Duplicate channel ID"); } } } @@ -8187,10 +8187,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ .funding_signed(&msg, best_block, &self.signer_provider, &self.logger) .and_then(|(funded_chan, monitor)| { self.chain_monitor - .watch_channel(funded_chan.context.get_funding_txo().unwrap(), monitor) + .watch_channel(funded_chan.context.channel_id(), monitor) .map(|persist_status| (funded_chan, persist_status)) .map_err(|()| { - ChannelError::close("Channel funding outpoint was a duplicate".to_owned()) + ChannelError::close("Channel ID was a duplicate".to_owned()) }) }) { @@ -8796,16 +8796,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let monitor = try_channel_entry!( self, peer_state, chan.commitment_signed_initial_v2(msg, best_block, &self.signer_provider, &&logger), chan_entry); - let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); + let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR); } else { let logger = WithChannelContext::from(&self.logger, &chan.context, None); - log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated"); + log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated"); try_channel_entry!(self, peer_state, Err(ChannelError::Close( ( - "Channel funding outpoint was a duplicate".to_owned(), + "Channel ID was a duplicate".to_owned(), ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, ) )), chan_entry) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index f1361337b35..fdb44646d59 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -255,8 +255,8 @@ pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) { // Ensure `get_claimable_balances`' self-tests never panic - for (funding_outpoint, _channel_id) in node.chain_monitor.chain_monitor.list_monitors() { - node.chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(); + for channel_id in node.chain_monitor.chain_monitor.list_monitors() { + node.chain_monitor.chain_monitor.get_monitor(channel_id).unwrap().get_claimable_balances(); } } @@ -554,9 +554,7 @@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> { channel_keys_id = Some(context.channel_keys_id); } - let monitor = self.chain_monitor.chain_monitor.list_monitors().into_iter() - .find(|(_, channel_id)| *channel_id == *chan_id) - .and_then(|(funding_txo, _)| self.chain_monitor.chain_monitor.get_monitor(funding_txo).ok()); + let monitor = self.chain_monitor.chain_monitor.get_monitor(*chan_id).ok(); if let Some(monitor) = monitor { monitor.do_mut_signer_call(|signer| { channel_keys_id = channel_keys_id.or(Some(signer.inner.channel_keys_id())); @@ -695,9 +693,9 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let feeest = test_utils::TestFeeEstimator::new(253); let mut deserialized_monitors = Vec::new(); { - for (outpoint, _channel_id) in self.chain_monitor.chain_monitor.list_monitors() { + for channel_id in self.chain_monitor.chain_monitor.list_monitors() { let mut w = test_utils::TestVecWriter(Vec::new()); - self.chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut w).unwrap(); + self.chain_monitor.chain_monitor.get_monitor(channel_id).unwrap().write(&mut w).unwrap(); let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap(); deserialized_monitors.push(deserialized_monitor); @@ -739,8 +737,8 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let chain_source = test_utils::TestChainSource::new(Network::Testnet); let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager); for deserialized_monitor in deserialized_monitors.drain(..) { - let funding_outpoint = deserialized_monitor.get_funding_txo().0; - if chain_monitor.watch_channel(funding_outpoint, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) { + let channel_id = deserialized_monitor.channel_id(); + if chain_monitor.watch_channel(channel_id, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) { panic!(); } } @@ -1038,20 +1036,7 @@ macro_rules! get_channel_type_features { macro_rules! get_monitor { ($node: expr, $channel_id: expr) => { { - use bitcoin::hashes::Hash; - let mut monitor = None; - // Assume funding vout is either 0 or 1 blindly - for index in 0..2 { - if let Ok(mon) = $node.chain_monitor.chain_monitor.get_monitor( - $crate::chain::transaction::OutPoint { - txid: bitcoin::Txid::from_slice(&$channel_id.0[..]).unwrap(), index - }) - { - monitor = Some(mon); - break; - } - } - monitor.unwrap() + $node.chain_monitor.chain_monitor.get_monitor($channel_id).unwrap() } } } @@ -1172,8 +1157,8 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User assert!(node_read.is_empty()); for monitor in monitors_read.drain(..) { - let funding_outpoint = monitor.get_funding_txo().0; - assert_eq!(node.chain_monitor.watch_channel(funding_outpoint, monitor), + let channel_id = monitor.channel_id(); + assert_eq!(node.chain_monitor.watch_channel(channel_id, monitor), Ok(ChannelMonitorUpdateStatus::Completed)); check_added_monitors!(node, 1); } @@ -1277,7 +1262,7 @@ pub fn create_dual_funding_utxos_with_prev_txs( } pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, expected_temporary_channel_id: ChannelId) -> Transaction { - let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, &node_b.node.get_our_node_id(), channel_value, 42); + let (temporary_channel_id, tx, _) = create_funding_transaction(node_a, &node_b.node.get_our_node_id(), channel_value, 42); assert_eq!(temporary_channel_id, expected_temporary_channel_id); assert!(node_a.node.funding_transaction_generated(temporary_channel_id, node_b.node.get_our_node_id(), tx.clone()).is_ok()); @@ -1285,11 +1270,16 @@ pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: & let funding_created_msg = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()); assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id); + + let channel_id = ChannelId::v1_from_funding_txid( + funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index + ); + node_b.node.handle_funding_created(node_a.node.get_our_node_id(), &funding_created_msg); { let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } expect_channel_pending_event(&node_b, &node_a.node.get_our_node_id()); @@ -1298,7 +1288,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: & { let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } expect_channel_pending_event(&node_a, &node_b.node.get_our_node_id()); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 48512d17026..bb464389a61 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -562,19 +562,23 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { if steps & 0x0f == 2 { return; } nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); - let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); if steps & 0x0f == 3 { return; } nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let channel_id = ChannelId::v1_from_funding_txid( + funding_created.funding_txid.as_byte_array(), funding_created.funding_output_index + ); + if steps & 0x0f == 4 { return; } nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -586,7 +590,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { { let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } @@ -2517,7 +2521,7 @@ fn channel_monitor_network_test() { // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and // confusing us in the following tests. - let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.compute_txid(), index: 0 }); + let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&chan_3.2); // One pending HTLC to time out: let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000); @@ -2587,7 +2591,7 @@ fn channel_monitor_network_test() { assert_eq!(nodes[3].node.list_channels().len(), 0); assert_eq!(nodes[4].node.list_channels().len(), 0); - assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.compute_txid(), index: 0 }, chan_3_mon), + assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(chan_3.2, chan_3_mon), Ok(ChannelMonitorUpdateStatus::Completed)); check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [node_id_4], 100000); } @@ -3221,7 +3225,7 @@ fn test_htlc_on_chain_success() { { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0.txid, chan_2.3.compute_txid()); + assert_eq!(added_monitors[0].0, chan_2.2); added_monitors.clear(); } let forwarded_events = nodes[1].node.get_and_clear_pending_events(); @@ -3259,8 +3263,8 @@ fn test_htlc_on_chain_success() { { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 2); - assert_eq!(added_monitors[0].0.txid, chan_1.3.compute_txid()); - assert_eq!(added_monitors[1].0.txid, chan_1.3.compute_txid()); + assert_eq!(added_monitors[0].0, chan_1.2); + assert_eq!(added_monitors[1].0, chan_1.2); added_monitors.clear(); } assert_eq!(events.len(), 3); @@ -8257,7 +8261,7 @@ fn test_bump_txn_sanitize_tracking_maps() { connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn)); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.compute_txid(), index: 0 }).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan.2).unwrap(); assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty()); assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty()); } @@ -8872,7 +8876,6 @@ fn test_update_err_monitor_lockdown() { // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let outpoint = OutPoint { txid: chan_1.3.compute_txid(), index: 0 }; // Rebalance the network to generate htlc in the two directions send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); @@ -8886,14 +8889,14 @@ fn test_update_err_monitor_lockdown() { let persister = test_utils::TestPersister::new(); let watchtower = { let new_monitor = { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; assert!(new_monitor == *monitor); new_monitor }; let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); watchtower }; let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); @@ -8915,8 +8918,8 @@ fn test_update_err_monitor_lockdown() { let mut node_0_peer_state_lock; if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { - assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress); - assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); + assert_eq!(watchtower.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::InProgress); + assert_eq!(nodes[0].chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { assert!(false); @@ -8942,7 +8945,6 @@ fn test_concurrent_monitor_claim() { // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let outpoint = OutPoint { txid: chan_1.3.compute_txid(), index: 0 }; // Rebalance the network to generate htlc in the two directions send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); @@ -8959,14 +8961,14 @@ fn test_concurrent_monitor_claim() { ); let watchtower_alice = { let new_monitor = { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; assert!(new_monitor == *monitor); new_monitor }; let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); watchtower }; let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); @@ -8991,14 +8993,14 @@ fn test_concurrent_monitor_claim() { let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks)); let watchtower_bob = { let new_monitor = { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; assert!(new_monitor == *monitor); new_monitor }; let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); watchtower }; watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1); @@ -9018,9 +9020,9 @@ fn test_concurrent_monitor_claim() { if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update - assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress); - assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); - assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); + assert_eq!(watchtower_alice.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::InProgress); + assert_eq!(watchtower_bob.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); + assert_eq!(nodes[0].chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { assert!(false); @@ -9441,9 +9443,9 @@ fn test_peer_funding_sidechannel() { #[test] fn test_duplicate_conflicting_funding_from_second_peer() { - // Test that if a user tries to fund a channel with a funding outpoint they'd previously used + // Test that if a user tries to fund a channel with a channel ID they'd previously used // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we - // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks + // don't regress in the fuzzer, as such funding getting passed our channel_id-matches checks // implies the user (and our counterparty) has reused cryptographic keys across channels, which // we require the user not do. let chanmon_cfgs = create_chanmon_cfgs(4); @@ -9453,14 +9455,15 @@ fn test_duplicate_conflicting_funding_from_second_peer() { let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); - let (_, tx, funding_output) = + let (_, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + let real_chan_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails. let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3; let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone(); - nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap(); + nodes[0].chain_monitor.chain_monitor.watch_channel(real_chan_id, dummy_monitor).unwrap(); nodes[0].node.funding_transaction_generated(temp_chan_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); @@ -9475,7 +9478,7 @@ fn test_duplicate_conflicting_funding_from_second_peer() { // watch_channel call which failed), but zero monitor updates. check_added_monitors!(nodes[0], 1); get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()); - let err_reason = ClosureReason::ProcessingError { err: "Channel funding outpoint was a duplicate".to_owned() }; + let err_reason = ClosureReason::ProcessingError { err: "Channel ID was a duplicate".to_owned() }; check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_chan_id, true, err_reason)]); } @@ -9567,17 +9570,21 @@ fn test_duplicate_chan_id() { } // Move the first channel through the funding flow... - let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let channel_id = ChannelId::v1_from_funding_txid( + funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index + ); + nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -9664,7 +9671,7 @@ fn test_duplicate_chan_id() { { let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index e2c76643348..8cf7074e376 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -336,7 +336,7 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) { inbound_claiming_htlc_rounded_msat: 0, inbound_htlc_rounded_msat: 0, }], - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); assert_eq!(vec![Balance::ClaimableOnChannelClose { amount_satoshis: 1_000, transaction_fee_satoshis: 0, outbound_payment_htlc_rounded_msat: 0, @@ -344,7 +344,7 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) { inbound_claiming_htlc_rounded_msat: 0, inbound_htlc_rounded_msat: 0, }], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); @@ -379,13 +379,13 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) { confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, source: BalanceSource::CoopClose, }], - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); assert_eq!(vec![Balance::ClaimableAwaitingConfirmations { amount_satoshis: 1000, confirmation_height: nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1, source: BalanceSource::CoopClose, }], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); @@ -399,9 +399,9 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) { connect_blocks(&nodes[1], 1); assert_eq!(Vec::::new(), - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); assert_eq!(Vec::::new(), - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); let spendable_outputs_a = test_spendable_output(&nodes[0], &shutdown_tx[0], false); assert_eq!( @@ -549,7 +549,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { inbound_claiming_htlc_rounded_msat: 0, inbound_htlc_rounded_msat: 0, }, sent_htlc_balance.clone(), sent_htlc_timeout_balance.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose { amount_satoshis: 1_000, transaction_fee_satoshis: 0, @@ -558,7 +558,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { inbound_claiming_htlc_rounded_msat: 0, inbound_htlc_rounded_msat: 3300, }, received_htlc_balance.clone(), received_htlc_timeout_balance.clone()]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); nodes[1].node.claim_funds(payment_preimage); check_added_monitors!(nodes[1], 1); @@ -615,7 +615,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { a_expected_balances.push(sent_htlc_balance.clone()); } assert_eq!(sorted_vec(a_expected_balances), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); assert_eq!(vec![Balance::ClaimableOnChannelClose { amount_satoshis: 1_000 + 3_000 + 4_000, transaction_fee_satoshis: 0, @@ -625,7 +625,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { 200 /* 1 HTLC */ } else { 300 /* 2 HTLCs */ }, inbound_htlc_rounded_msat: 0, }], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); // Broadcast the closing transaction (which has both pending HTLCs in it) and get B's // broadcasted HTLC claim transaction with preimage. @@ -698,7 +698,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, source: BalanceSource::CounterpartyForceClosed, }, sent_htlc_balance.clone(), sent_htlc_timeout_balance.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // The main non-HTLC balance is just awaiting confirmations, but the claimable height is the // CSV delay, not ANTI_REORG_DELAY. assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { @@ -709,7 +709,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { // Both HTLC balances are "contentious" as our counterparty could claim them if we wait too // long. received_htlc_claiming_balance.clone(), received_htlc_timeout_claiming_balance.clone()]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[0], dust_payment_hash, false); @@ -718,13 +718,13 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { // After ANTI_REORG_DELAY, A will consider its balance fully spendable and generate a // `SpendableOutputs` event. However, B still has to wait for the CSV delay. assert_eq!(sorted_vec(vec![sent_htlc_balance.clone(), sent_htlc_timeout_balance.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { amount_satoshis: 1_000, confirmation_height: node_b_commitment_claimable, source: BalanceSource::HolderForceClosed, }, received_htlc_claiming_balance.clone(), received_htlc_timeout_claiming_balance.clone()]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); test_spendable_output(&nodes[0], &remote_txn[0], false); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); @@ -738,10 +738,10 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { expect_payment_sent(&nodes[0], payment_preimage, None, true, false); } assert_eq!(sorted_vec(vec![sent_htlc_balance.clone(), sent_htlc_timeout_balance.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); assert_eq!(vec![sent_htlc_timeout_balance.clone()], - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); // When the HTLC timeout output is spendable in the next block, A should broadcast it connect_blocks(&nodes[0], htlc_cltv_timeout - nodes[0].best_block_info().1); @@ -769,12 +769,12 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, source: BalanceSource::Htlc, }], - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); // After ANTI_REORG_DELAY, A will generate a SpendableOutputs event and drop the claimable // balance entry. connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); assert_eq!(Vec::::new(), - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); expect_payment_failed!(nodes[0], timeout_payment_hash, false); test_spendable_output(&nodes[0], &a_htlc_timeout_tx, false); @@ -794,7 +794,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { confirmation_height: node_b_htlc_claimable, source: BalanceSource::Htlc, }, received_htlc_timeout_claiming_balance.clone()]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // After reaching the commitment output CSV, we'll get a SpendableOutputs event for it and have // only the HTLCs claimable on node B. @@ -806,7 +806,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { confirmation_height: node_b_htlc_claimable, source: BalanceSource::Htlc, }, received_htlc_timeout_claiming_balance.clone()]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // After reaching the claimed HTLC output CSV, we'll get a SpendableOutptus event for it and // have only one HTLC output left spendable. @@ -814,17 +814,17 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { test_spendable_output(&nodes[1], &b_broadcast_txn[0], anchors); assert_eq!(vec![received_htlc_timeout_claiming_balance.clone()], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); // Finally, mine the HTLC timeout transaction that A broadcasted (even though B should be able // to claim this HTLC with the preimage it knows!). It will remain listed as a claimable HTLC // until ANTI_REORG_DELAY confirmations on the spend. mine_transaction(&nodes[1], &a_htlc_timeout_tx); assert_eq!(vec![received_htlc_timeout_claiming_balance.clone()], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); assert_eq!(Vec::::new(), - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new @@ -833,7 +833,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { connect_blocks(node, 6); connect_blocks(node, 6); assert!(node.chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - assert!(node.chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(node.chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); } } @@ -888,7 +888,6 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { // Create a single channel with two pending HTLCs from nodes[0] to nodes[1], one which nodes[1] // knows the preimage for, one which it does not. let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); - let funding_outpoint = OutPoint { txid: funding_tx.compute_txid(), index: 0 }; let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 10_000_000); let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety @@ -965,7 +964,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { confirmation_height: node_a_commitment_claimable, source: BalanceSource::HolderForceClosed, }, htlc_balance_known_preimage.clone(), htlc_balance_unknown_preimage.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Get nodes[1]'s HTLC claim tx for the second HTLC mine_transaction(&nodes[1], &commitment_tx); @@ -984,7 +983,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { confirmation_height: node_a_commitment_claimable, source: BalanceSource::HolderForceClosed, }, htlc_balance_known_preimage.clone(), htlc_balance_unknown_preimage.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); if anchors { handle_bump_htlc_event(&nodes[0], 1); } @@ -1014,7 +1013,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { confirmation_height: node_a_commitment_claimable, source: BalanceSource::HolderForceClosed, }, htlc_balance_known_preimage.clone(), htlc_balance_unknown_preimage.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); if anchors { // The HTLC timeout claim corresponding to the counterparty preimage claim is removed from the @@ -1044,7 +1043,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { confirmation_height: node_a_htlc_claimable, source: BalanceSource::Htlc, }, htlc_balance_unknown_preimage.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Finally make the HTLC transactions have ANTI_REORG_DELAY blocks. This call previously // panicked as described in the test introduction. This will remove the "maybe claimable" @@ -1061,7 +1060,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { confirmation_height: node_a_htlc_claimable, source: BalanceSource::Htlc, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Connect blocks until the commitment transaction's CSV expires, providing us the relevant // `SpendableOutputs` event and removing the claimable balance entry. @@ -1074,7 +1073,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { confirmation_height: node_a_htlc_claimable, source: BalanceSource::Htlc, }], - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); let to_self_spendable_output = test_spendable_output(&nodes[0], &commitment_tx, false); assert_eq!( get_monitor!(nodes[0], chan_id).get_spendable_outputs(&commitment_tx, commitment_tx_conf_height_a), @@ -1084,7 +1083,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { // Connect blocks until the HTLC-Timeout's CSV expires, providing us the relevant // `SpendableOutputs` event and removing the claimable balance entry. connect_blocks(&nodes[0], node_a_htlc_claimable - nodes[0].best_block_info().1); - assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); test_spendable_output(&nodes[0], &timeout_htlc_txn[0], false); // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're @@ -1093,7 +1092,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { connect_blocks(&nodes[0], 6); connect_blocks(&nodes[0], 6); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); } #[test] @@ -1112,7 +1111,6 @@ fn test_no_preimage_inbound_htlc_balances() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); - let funding_outpoint = OutPoint { txid: funding_tx.compute_txid(), index: 0 }; // Send two HTLCs, one from A to B, and one from B to A. let to_b_failed_payment_hash = route_payment(&nodes[0], &[&nodes[1]], 10_000_000).1; @@ -1158,7 +1156,7 @@ fn test_no_preimage_inbound_htlc_balances() { inbound_claiming_htlc_rounded_msat: 0, inbound_htlc_rounded_msat: 0, }, a_received_htlc_balance.clone(), a_sent_htlc_balance.clone()]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose { amount_satoshis: 500_000 - 20_000, @@ -1168,7 +1166,7 @@ fn test_no_preimage_inbound_htlc_balances() { inbound_claiming_htlc_rounded_msat: 0, inbound_htlc_rounded_msat: 0, }, b_received_htlc_balance.clone(), b_sent_htlc_balance.clone()]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Get nodes[0]'s commitment transaction and HTLC-Timeout transaction let as_txn = get_local_commitment_txn!(nodes[0], chan_id); @@ -1193,7 +1191,7 @@ fn test_no_preimage_inbound_htlc_balances() { check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); assert_eq!(as_pre_spend_claims, - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[1], &as_txn[0]); check_added_monitors!(nodes[1], 1); @@ -1207,7 +1205,7 @@ fn test_no_preimage_inbound_htlc_balances() { source: BalanceSource::CounterpartyForceClosed, }, b_received_htlc_balance.clone(), b_sent_htlc_balance.clone()]); assert_eq!(bs_pre_spend_claims, - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // We'll broadcast the HTLC-Timeout transaction one block prior to the htlc's expiration (as it // is confirmable in the next block), but will still include the same claimable balances as no @@ -1222,11 +1220,11 @@ fn test_no_preimage_inbound_htlc_balances() { [HTLCDestination::FailedPayment { payment_hash: to_a_failed_payment_hash }]); assert_eq!(as_pre_spend_claims, - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], 1); assert_eq!(as_pre_spend_claims, - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // For node B, we'll get the non-HTLC funds claimable after ANTI_REORG_DELAY confirmations connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); @@ -1243,11 +1241,11 @@ fn test_no_preimage_inbound_htlc_balances() { check_spends!(bs_htlc_timeout_claim[0], as_txn[0]); assert_eq!(bs_pre_spend_claims, - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[1], 1); assert_eq!(bs_pre_spend_claims, - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Now confirm the two HTLC timeout transactions for A, checking that the inbound HTLC resolves // after ANTI_REORG_DELAY confirmations and the other takes BREAKDOWN_TIMEOUT confirmations. @@ -1263,7 +1261,7 @@ fn test_no_preimage_inbound_htlc_balances() { confirmation_height: as_timeout_claimable_height, source: BalanceSource::Htlc, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[0], &bs_htlc_timeout_claim[0]); assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { @@ -1276,7 +1274,7 @@ fn test_no_preimage_inbound_htlc_balances() { confirmation_height: as_timeout_claimable_height, source: BalanceSource::Htlc, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Once as_htlc_timeout_claim[0] reaches ANTI_REORG_DELAY confirmations, we should get a // payment failure event. @@ -1294,7 +1292,7 @@ fn test_no_preimage_inbound_htlc_balances() { confirmation_height: core::cmp::max(as_timeout_claimable_height, htlc_cltv_timeout), source: BalanceSource::Htlc, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], node_a_commitment_claimable - nodes[0].best_block_info().1); assert_eq!(vec![Balance::ClaimableAwaitingConfirmations { @@ -1302,11 +1300,11 @@ fn test_no_preimage_inbound_htlc_balances() { confirmation_height: core::cmp::max(as_timeout_claimable_height, htlc_cltv_timeout), source: BalanceSource::Htlc, }], - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); test_spendable_output(&nodes[0], &as_txn[0], false); connect_blocks(&nodes[0], as_timeout_claimable_height - nodes[0].best_block_info().1); - assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); test_spendable_output(&nodes[0], &as_htlc_timeout_claim[0], false); // The process for B should be completely identical as well, noting that the non-HTLC-balance @@ -1318,7 +1316,7 @@ fn test_no_preimage_inbound_htlc_balances() { confirmation_height: bs_timeout_claimable_height, source: BalanceSource::Htlc, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[1], &as_htlc_timeout_claim[0]); assert_eq!(sorted_vec(vec![b_received_htlc_balance.clone(), Balance::ClaimableAwaitingConfirmations { @@ -1326,17 +1324,17 @@ fn test_no_preimage_inbound_htlc_balances() { confirmation_height: bs_timeout_claimable_height, source: BalanceSource::Htlc, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); expect_payment_failed!(nodes[1], to_a_failed_payment_hash, false); assert_eq!(vec![b_received_htlc_balance.clone()], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); test_spendable_output(&nodes[1], &bs_htlc_timeout_claim[0], false); connect_blocks(&nodes[1], 1); - assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new @@ -1344,7 +1342,7 @@ fn test_no_preimage_inbound_htlc_balances() { connect_blocks(&nodes[1], 6); connect_blocks(&nodes[1], 6); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); } fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_spend_first: bool) { @@ -1471,7 +1469,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ outbound_payment: true, }, ]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()), ); mine_transaction(&nodes[1], &as_revoked_txn[0]); @@ -1526,7 +1524,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ htlc_unclaimed_balance(4_000), htlc_unclaimed_balance(5_000), ]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()), ); if confirm_htlc_spend_first { @@ -1558,7 +1556,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ pinnable_claimed_balance.clone(), to_self_unclaimed_balance.clone(), ]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()), ); } else { assert_eq!( @@ -1569,7 +1567,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ htlc_unclaimed_balance(4_000), htlc_unclaimed_balance(5_000), ]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()), ); } @@ -1584,7 +1582,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ pinnable_claimed_balance.clone(), unpinnable_claimed_balance.clone(), ]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()), ); connect_blocks(&nodes[1], 3); @@ -1618,7 +1616,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ expect_payment_failed_conditions_event(payment_failed_events[2..].to_vec(), timeout_payment_hash, false, PaymentFailedConditions::new()); } - assert_eq!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(), Vec::new()); + assert_eq!(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances(), Vec::new()); // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new @@ -1626,7 +1624,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ connect_blocks(&nodes[1], 6); connect_blocks(&nodes[1], 6); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); } #[test] @@ -1776,7 +1774,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { amount_satoshis: 1_000, }]); assert_eq!(as_balances, - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[0], &revoked_htlc_success); let as_htlc_claim_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -1789,7 +1787,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { check_spends!(as_htlc_claim_tx[1], revoked_local_txn[0]); assert_eq!(as_balances, - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); assert_eq!(as_htlc_claim_tx[0].output.len(), 1); let as_revoked_htlc_success_claim_fee = chan_feerate * as_htlc_claim_tx[0].weight().to_wu() / 1000; @@ -1819,7 +1817,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, source: BalanceSource::CounterpartyForceClosed, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 3); test_spendable_output(&nodes[0], &revoked_local_txn[0], false); @@ -1833,7 +1831,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { confirmation_height: nodes[0].best_block_info().1 + 2, source: BalanceSource::CounterpartyForceClosed, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], 2); test_spendable_output(&nodes[0], &as_htlc_claim_tx[0], false); @@ -1843,7 +1841,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2 amount_satoshis: 1_000, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], revoked_htlc_timeout.lock_time.to_consensus_u32() - nodes[0].best_block_info().1); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(&nodes[0], @@ -1881,7 +1879,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2 amount_satoshis: 1_000, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[0], &revoked_htlc_timeout_claim); assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable { @@ -1892,7 +1890,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1, source: BalanceSource::CounterpartyForceClosed, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[0], &revoked_to_self_claim); assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { @@ -1905,14 +1903,14 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 2, source: BalanceSource::CounterpartyForceClosed, }]), - sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2); test_spendable_output(&nodes[0], &revoked_htlc_timeout_claim, false); connect_blocks(&nodes[0], 1); test_spendable_output(&nodes[0], &revoked_to_self_claim, false); - assert_eq!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(), Vec::new()); + assert_eq!(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances(), Vec::new()); // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new @@ -1920,7 +1918,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { connect_blocks(&nodes[0], 6); connect_blocks(&nodes[0], 6); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); } #[test] @@ -2025,7 +2023,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { payment_hash: claimed_payment_hash, outbound_payment: true, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[1], &as_revoked_txn[0]); check_closed_broadcast!(nodes[1], true); @@ -2063,7 +2061,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2 amount_satoshis: 3_000, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); // Confirm A's HTLC-Success transaction which presumably raced B's claim, causing B to create a // new claim. @@ -2119,7 +2117,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { // anyway, so its not a big change. amount_satoshis: 3_000, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[1], 5); test_spendable_output(&nodes[1], &as_revoked_txn[0], false); @@ -2135,7 +2133,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { // anyway, so its not a big change. amount_satoshis: 3_000, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[1], &claim_txn_2[0]); let htlc_2_claim_maturity = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -2150,7 +2148,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { confirmation_height: htlc_2_claim_maturity, source: BalanceSource::CounterpartyForceClosed, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); connect_blocks(&nodes[1], 5); test_spendable_output(&nodes[1], &claim_txn_2[0], false); @@ -2161,7 +2159,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1 amount_satoshis: 4_000, }]), - sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transactions(&nodes[1], &[&claim_txn[0], &claim_txn_2[1]]); let rest_claim_maturity = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -2179,7 +2177,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { source: BalanceSource::CounterpartyForceClosed, }, ], - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()); + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); // We shouldn't fail the payment until we spend the output @@ -2198,7 +2196,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { panic!("unexpected event"); } } - assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); // Ensure that even if we connect more blocks, potentially replaying the entire chain if we're // using `ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks`, we don't get new @@ -2206,7 +2204,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { connect_blocks(&nodes[1], 6); connect_blocks(&nodes[1], 6); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty()); + assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances().is_empty()); } #[test] @@ -2287,16 +2285,16 @@ fn do_test_claimable_balance_correct_while_payment_pending(outbound_payment: boo if outbound_payment { assert_eq!( 1_000_000 - commitment_tx_fee - anchor_outputs_value - 4_001 /* Note HTLC timeout amount of 4001 sats is excluded for outbound payment */, - nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint_ab).unwrap().get_claimable_balances().iter().map( + nodes[0].chain_monitor.chain_monitor.get_monitor(chan_ab_id).unwrap().get_claimable_balances().iter().map( |x| x.claimable_amount_satoshis()).sum()); } else { assert_eq!( 0u64, - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint_ab).unwrap().get_claimable_balances().iter().map( + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_ab_id).unwrap().get_claimable_balances().iter().map( |x| x.claimable_amount_satoshis()).sum()); assert_eq!( 1_000_000 - commitment_tx_fee - anchor_outputs_value /* Note HTLC timeout amount of 4000 sats is included */, - nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint_bc).unwrap().get_claimable_balances().iter().map( + nodes[1].chain_monitor.chain_monitor.get_monitor(chan_bc_id).unwrap().get_claimable_balances().iter().map( |x| x.claimable_amount_satoshis()).sum()); } } @@ -2411,7 +2409,7 @@ fn do_test_monitor_rebroadcast_pending_claims(anchors: bool) { let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; - let commitment_txn = get_local_commitment_txn!(&nodes[0], &chan_id); + let commitment_txn = get_local_commitment_txn!(&nodes[0], chan_id); assert_eq!(commitment_txn.len(), if anchors { 1 /* commitment tx only */} else { 2 /* commitment and htlc timeout tx */ }); check_spends!(&commitment_txn[0], &funding_tx); mine_transaction(&nodes[0], &commitment_txn[0]); @@ -2769,7 +2767,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() { bob_persister, bob_chain_monitor, bob_deserialized ); for chan_id in [chan_a.2, chan_b.2].iter() { - let monitor = get_monitor!(nodes[1], chan_id); + let monitor = get_monitor!(nodes[1], *chan_id); for payment in [payment_a, payment_b, payment_c, payment_d].iter() { monitor.provide_payment_preimage_unsafe_legacy( &payment.1, &payment.0, &node_cfgs[1].tx_broadcaster, diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 5c45498753b..f2525e63d4c 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -716,8 +716,8 @@ fn test_0conf_channel_with_async_monitor() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (outpoint, _, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&bs_raa.channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap(); + let (_, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&bs_raa.channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(bs_raa.channel_id, latest_update).unwrap(); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 039fe85197f..04c549712cc 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -25,6 +25,7 @@ use crate::util::errors::APIError; use crate::util::ser::{Writeable, ReadableArgs}; use crate::util::config::UserConfig; +use bitcoin::hashes::Hash; use bitcoin::hash_types::BlockHash; use crate::prelude::*; @@ -256,11 +257,16 @@ fn test_manager_serialize_deserialize_events() { node_a.node.funding_transaction_generated(temporary_channel_id, node_b.node.get_our_node_id(), tx.clone()).unwrap(); check_added_monitors!(node_a, 0); - node_b.node.handle_funding_created(node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())); + let funding_created = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()); + let channel_id = ChannelId::v1_from_funding_txid( + funding_created.funding_txid.as_byte_array(), funding_created.funding_output_index + ); + + node_b.node.handle_funding_created(node_a.node.get_our_node_id(), &funding_created); { let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } @@ -269,7 +275,7 @@ fn test_manager_serialize_deserialize_events() { { let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); + assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } // Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead @@ -368,7 +374,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut node_0_stale_monitors_serialized = Vec::new(); for chan_id_iter in &[chan_id_1, chan_id_2, channel_id] { let mut writer = test_utils::TestVecWriter(Vec::new()); - get_monitor!(nodes[0], chan_id_iter).write(&mut writer).unwrap(); + get_monitor!(nodes[0], *chan_id_iter).write(&mut writer).unwrap(); node_0_stale_monitors_serialized.push(writer.0); } @@ -386,7 +392,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { // nodes[3]) let mut node_0_monitors_serialized = Vec::new(); for chan_id_iter in &[chan_id_1, chan_id_2, channel_id] { - node_0_monitors_serialized.push(get_monitor!(nodes[0], chan_id_iter).encode()); + node_0_monitors_serialized.push(get_monitor!(nodes[0], *chan_id_iter).encode()); } logger = test_utils::TestLogger::new(); @@ -450,8 +456,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { assert!(nodes_0_read.is_empty()); for monitor in node_0_monitors.drain(..) { - let funding_outpoint = monitor.get_funding_txo().0; - assert_eq!(nodes[0].chain_monitor.watch_channel(funding_outpoint, monitor), + assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.channel_id(), monitor), Ok(ChannelMonitorUpdateStatus::Completed)); check_added_monitors!(nodes[0], 1); } @@ -835,10 +840,10 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // monitors and ChannelManager, for use later, if we don't want to persist both monitors. let mut original_monitor = test_utils::TestVecWriter(Vec::new()); if !persist_both_monitors { - for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() { + for channel_id in nodes[3].chain_monitor.chain_monitor.list_monitors() { if channel_id == chan_id_not_persisted { assert!(original_monitor.0.is_empty()); - nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap(); + nodes[3].chain_monitor.chain_monitor.get_monitor(channel_id).unwrap().write(&mut original_monitor).unwrap(); } } } @@ -855,18 +860,18 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // crashed in between the two persistence calls - using one old ChannelMonitor and one new one, // with the old ChannelManager. let mut updated_monitor = test_utils::TestVecWriter(Vec::new()); - for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() { + for channel_id in nodes[3].chain_monitor.chain_monitor.list_monitors() { if channel_id == chan_id_persisted { assert!(updated_monitor.0.is_empty()); - nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap(); + nodes[3].chain_monitor.chain_monitor.get_monitor(channel_id).unwrap().write(&mut updated_monitor).unwrap(); } } // If `persist_both_monitors` is set, get the second monitor here as well if persist_both_monitors { - for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() { + for channel_id in nodes[3].chain_monitor.chain_monitor.list_monitors() { if channel_id == chan_id_not_persisted { assert!(original_monitor.0.is_empty()); - nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap(); + nodes[3].chain_monitor.chain_monitor.get_monitor(channel_id).unwrap().write(&mut original_monitor).unwrap(); } } } diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index f6e1cd74f02..c068d7f12d6 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -1368,8 +1368,8 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { expect_channel_shutdown_state!(nodes[0], chan_id, ChannelShutdownState::ResolvingHTLCs); assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new()); - let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); let as_raa_closing_signed = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_raa_closing_signed.len(), 2); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 48bdfe2324a..7ebbf1c5734 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -387,9 +387,9 @@ impl SignerProvider for OnlyReadsKeysInterface { } pub struct TestChainMonitor<'a> { - pub added_monitors: Mutex)>>, + pub added_monitors: Mutex)>>, pub monitor_updates: Mutex>>, - pub latest_monitor_update_id: Mutex>, + pub latest_monitor_update_id: Mutex>, pub chain_monitor: ChainMonitor< TestChannelSigner, &'a TestChainSource, @@ -432,14 +432,14 @@ impl<'a> TestChainMonitor<'a> { } pub fn complete_sole_pending_chan_update(&self, channel_id: &ChannelId) { - let (outpoint, _, latest_update) = + let (_, latest_update) = self.latest_monitor_update_id.lock().unwrap().get(channel_id).unwrap().clone(); - self.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap(); + self.chain_monitor.channel_monitor_updated(*channel_id, latest_update).unwrap(); } } impl<'a> chain::Watch for TestChainMonitor<'a> { fn watch_channel( - &self, funding_txo: OutPoint, monitor: ChannelMonitor, + &self, channel_id: ChannelId, monitor: ChannelMonitor, ) -> Result { // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... @@ -452,23 +452,21 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { .unwrap() .1; assert!(new_monitor == monitor); - self.latest_monitor_update_id.lock().unwrap().insert( - monitor.channel_id(), - (funding_txo, monitor.get_latest_update_id(), monitor.get_latest_update_id()), - ); - self.added_monitors.lock().unwrap().push((funding_txo, monitor)); - self.chain_monitor.watch_channel(funding_txo, new_monitor) + self.latest_monitor_update_id + .lock() + .unwrap() + .insert(channel_id, (monitor.get_latest_update_id(), monitor.get_latest_update_id())); + self.added_monitors.lock().unwrap().push((channel_id, monitor)); + self.chain_monitor.watch_channel(channel_id, new_monitor) } fn update_channel( - &self, funding_txo: OutPoint, update: &ChannelMonitorUpdate, + &self, channel_id: ChannelId, update: &ChannelMonitorUpdate, ) -> chain::ChannelMonitorUpdateStatus { // Every monitor update should survive roundtrip let mut w = TestVecWriter(Vec::new()); update.write(&mut w).unwrap(); assert_eq!(ChannelMonitorUpdate::read(&mut &w.0[..]).unwrap(), *update); - let channel_id = - update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo)); self.monitor_updates .lock() @@ -491,11 +489,11 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { self.latest_monitor_update_id .lock() .unwrap() - .insert(channel_id, (funding_txo, update.update_id, update.update_id)); - let update_res = self.chain_monitor.update_channel(funding_txo, update); + .insert(channel_id, (update.update_id, update.update_id)); + let update_res = self.chain_monitor.update_channel(channel_id, update); // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... - let monitor = self.chain_monitor.get_monitor(funding_txo).unwrap(); + let monitor = self.chain_monitor.get_monitor(channel_id).unwrap(); w.0.clear(); monitor.write(&mut w).unwrap(); let new_monitor = <(BlockHash, ChannelMonitor)>::read( @@ -510,7 +508,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { } else { assert!(new_monitor == *monitor); } - self.added_monitors.lock().unwrap().push((funding_txo, new_monitor)); + self.added_monitors.lock().unwrap().push((channel_id, new_monitor)); update_res } From e8854f9a0713c63967b5ec1aae3fbb7e4dcc8eaf Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 16 Jan 2025 15:23:16 -0800 Subject: [PATCH 2/3] Use channel ID over funding outpoint to track monitors in ChannelManager As motivated by the previous commit, we do some of the same work here at the `ChannelManager` level instead. Unfortunately, we still need to track the funding outpoint to support downgrades by writing the in flight monitor updates as two separate TLVs, one using the channel IDs, and the other using the funding outpoints. Once we are willing to stop supporting downgrades past this version, we can fully drop it. --- fuzz/src/chanmon_consistency.rs | 13 +-- lightning/src/ln/channelmanager.rs | 133 +++++++++++++--------- lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/ln/reload_tests.rs | 4 +- 4 files changed, 88 insertions(+), 66 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 1f07f868439..59918139af5 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -173,7 +173,6 @@ struct LatestMonitorState { /// A set of (monitor id, serialized `ChannelMonitor`)s which we're currently "persisting", /// from LDK's perspective. pending_monitors: Vec<(u64, Vec)>, - funding_txo: OutPoint, } struct TestChainMonitor { @@ -219,14 +218,12 @@ impl chain::Watch for TestChainMonitor { let mut ser = VecWriter(Vec::new()); monitor.write(&mut ser).unwrap(); let monitor_id = monitor.get_latest_update_id(); - let funding_txo = monitor.get_funding_txo().0; let res = self.chain_monitor.watch_channel(channel_id, monitor); let state = match res { Ok(chain::ChannelMonitorUpdateStatus::Completed) => LatestMonitorState { persisted_monitor_id: monitor_id, persisted_monitor: ser.0, pending_monitors: Vec::new(), - funding_txo, }, Ok(chain::ChannelMonitorUpdateStatus::InProgress) => { panic!("The test currently doesn't test initial-persistence via the async pipeline") @@ -716,7 +713,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap(); for (channel_id, mut prev_state) in old_monitors.drain() { monitors.insert( - prev_state.funding_txo, + channel_id, <(BlockHash, ChannelMonitor)>::read( &mut Cursor::new(&prev_state.persisted_monitor), (&*$keys_manager, &*$keys_manager), @@ -731,8 +728,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { chain_monitor.latest_monitors.lock().unwrap().insert(channel_id, prev_state); } let mut monitor_refs = new_hash_map(); - for (outpoint, monitor) in monitors.iter() { - monitor_refs.insert(*outpoint, monitor); + for (channel_id, monitor) in monitors.iter() { + monitor_refs.insert(*channel_id, monitor); } let read_args = ChannelManagerReadArgs { @@ -755,9 +752,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .1, chain_monitor.clone(), ); - for (_, mon) in monitors.drain() { + for (channel_id, mon) in monitors.drain() { assert_eq!( - chain_monitor.chain_monitor.watch_channel(mon.channel_id(), mon), + chain_monitor.chain_monitor.watch_channel(channel_id, mon), Ok(ChannelMonitorUpdateStatus::Completed) ); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 24d9dad1067..29a0c3df677 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1334,7 +1334,8 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// for broadcast messages, where ordering isn't as strict). pub(super) pending_msg_events: Vec, /// Map from Channel IDs to pending [`ChannelMonitorUpdate`]s which have been passed to the - /// user but which have not yet completed. + /// user but which have not yet completed. We still keep the funding outpoint around to backfill + /// the legacy TLV field to support downgrading. /// /// Note that the channel may no longer exist. For example if the channel was closed but we /// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update @@ -1346,7 +1347,7 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// where we complete one [`ChannelMonitorUpdate`] (but there are more pending as background /// events) but we conclude all pending [`ChannelMonitorUpdate`]s have completed and its safe /// to run post-completion actions. - in_flight_monitor_updates: BTreeMap>, + in_flight_monitor_updates: BTreeMap)>, /// Map from a specific channel to some action(s) that should be taken when all pending /// [`ChannelMonitorUpdate`]s for the channel complete updating. /// @@ -1389,7 +1390,7 @@ impl PeerState where SP::Target: SignerProvider { if require_disconnected && self.is_connected { return false } - for (_, updates) in self.in_flight_monitor_updates.iter() { + for (_, updates) in self.in_flight_monitor_updates.values() { if !updates.is_empty() { return false; } @@ -3309,8 +3310,8 @@ macro_rules! handle_new_monitor_update { $chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident, _internal_outer, $completed: expr ) => { { - $in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo) - .or_insert_with(Vec::new); + $in_flight_updates = &mut $peer_state.in_flight_monitor_updates.entry($chan_id) + .or_insert_with(|| ($funding_txo, Vec::new())).1; // During startup, we push monitor updates as background events through to here in // order to replay updates that were in-flight when we shut down. Thus, we have to // filter for uniqueness here. @@ -4031,11 +4032,11 @@ where // `MonitorUpdateCompletionAction`s. // TODO: If we do the `in_flight_monitor_updates.is_empty()` check in // `locked_close_channel` we can skip the locks here. - if let Some(funding_txo) = shutdown_res.channel_funding_txo { + if shutdown_res.channel_funding_txo.is_some() { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) { let mut peer_state = peer_state_mtx.lock().unwrap(); - if peer_state.in_flight_monitor_updates.get(&funding_txo).map(|l| l.is_empty()).unwrap_or(true) { + if peer_state.in_flight_monitor_updates.get(&shutdown_res.channel_id).map(|(_, updates)| updates.is_empty()).unwrap_or(true) { let update_actions = peer_state.monitor_update_blocked_actions .remove(&shutdown_res.channel_id).unwrap_or(Vec::new()); @@ -7593,7 +7594,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state = &mut *peer_state_lock; let remaining_in_flight = - if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) { + if let Some((_, pending)) = peer_state.in_flight_monitor_updates.get_mut(channel_id) { pending.retain(|upd| upd.update_id > highest_applied_update_id); pending.len() } else { 0 }; @@ -12931,12 +12932,15 @@ where pending_claiming_payments = None; } - let mut in_flight_monitor_updates: Option>> = None; + let mut legacy_in_flight_monitor_updates: Option>> = None; + let mut in_flight_monitor_updates: Option>> = None; for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) { - for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() { + for (channel_id, (funding_txo, updates)) in peer_state.in_flight_monitor_updates.iter() { if !updates.is_empty() { - if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); } - in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates); + legacy_in_flight_monitor_updates.get_or_insert_with(|| new_hash_map()) + .insert((counterparty_id, funding_txo), updates); + in_flight_monitor_updates.get_or_insert_with(|| new_hash_map()) + .insert((counterparty_id, channel_id), updates); } } } @@ -12951,11 +12955,12 @@ where (7, self.fake_scid_rand_bytes, required), (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option), (9, htlc_purposes, required_vec), - (10, in_flight_monitor_updates, option), + (10, legacy_in_flight_monitor_updates, option), (11, self.probing_cookie_secret, required), (13, htlc_onion_fields, optional_vec), (14, decode_update_add_htlcs_opt, option), (15, self.inbound_payment_id_secret, required), + (17, in_flight_monitor_updates, required), }); Ok(()) @@ -13091,8 +13096,7 @@ where /// runtime settings which were stored when the ChannelManager was serialized. pub default_config: UserConfig, - /// A map from channel funding outpoints to ChannelMonitors for those channels (ie - /// value.context.get_funding_txo() should be the key). + /// A map from channel IDs to ChannelMonitors for those channels. /// /// If a monitor is inconsistent with the channel state during deserialization the channel will /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This @@ -13103,7 +13107,7 @@ where /// this struct. /// /// This is not exported to bindings users because we have no HashMap bindings - pub channel_monitors: HashMap::EcdsaSigner>>, + pub channel_monitors: HashMap::EcdsaSigner>>, } impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> @@ -13132,7 +13136,7 @@ where entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, message_router, logger, default_config, channel_monitors: hash_map_from_iter( - channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }) + channel_monitors.drain(..).map(|monitor| { (monitor.channel_id(), monitor) }) ), } } @@ -13195,22 +13199,21 @@ where let mut failed_htlcs = Vec::new(); let channel_count: u64 = Readable::read(reader)?; - let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); + let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); - let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize); for _ in 0..channel_count { let mut channel: FundedChannel = FundedChannel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) ))?; let logger = WithChannelContext::from(&args.logger, &channel.context, None); + let channel_id = channel.context.channel_id(); + channel_id_set.insert(channel_id); let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?; - funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id()); - funding_txo_set.insert(funding_txo.clone()); - if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { + if let Some(ref mut monitor) = args.channel_monitors.get_mut(&channel_id) { if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || @@ -13293,9 +13296,7 @@ where if let Some(short_channel_id) = channel.context.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); } - if let Some(funding_txo) = channel.context.get_funding_txo() { - outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id()); - } + outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id()); per_peer_state.entry(channel.context.get_counterparty_node_id()) .or_insert_with(|| Mutex::new(empty_peer_state())) .get_mut().unwrap() @@ -13325,8 +13326,8 @@ where } } - for (funding_txo, monitor) in args.channel_monitors.iter() { - if !funding_txo_set.contains(funding_txo) { + for (channel_id, monitor) in args.channel_monitors.iter() { + if !channel_id_set.contains(channel_id) { let mut should_queue_fc_update = false; if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() { // If the ChannelMonitor had any updates, we may need to update it further and @@ -13364,10 +13365,11 @@ where updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], channel_id: Some(monitor.channel_id()), }; + let funding_txo = monitor.get_funding_txo().0; if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() { let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, - funding_txo: *funding_txo, + funding_txo, channel_id, update: monitor_update, }; @@ -13380,7 +13382,7 @@ where // generate a `ChannelMonitorUpdate` for it aside from this // `ChannelForceClosed` one. monitor_update.update_id = u64::MAX; - close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update))); + close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, monitor_update))); } } } @@ -13480,7 +13482,10 @@ where let mut pending_claiming_payments = Some(new_hash_map()); let mut monitor_update_blocked_actions_per_peer: Option>)>> = Some(Vec::new()); let mut events_override = None; - let mut in_flight_monitor_updates: Option>> = None; + let mut legacy_in_flight_monitor_updates: Option>> = None; + // We use this one over the legacy since they represent the same data, just with a different + // key. We still need to read the legacy one as it's an even TLV. + let mut in_flight_monitor_updates: Option>> = None; let mut decode_update_add_htlcs: Option>> = None; let mut inbound_payment_id_secret = None; read_tlv_fields!(reader, { @@ -13493,11 +13498,12 @@ where (7, fake_scid_rand_bytes, option), (8, events_override, option), (9, claimable_htlc_purposes, optional_vec), - (10, in_flight_monitor_updates, option), + (10, legacy_in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), (14, decode_update_add_htlcs, option), (15, inbound_payment_id_secret, option), + (17, in_flight_monitor_updates, required), }); let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map()); if fake_scid_rand_bytes.is_none() { @@ -13531,6 +13537,27 @@ where } let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap()); + // Handle transitioning from the legacy TLV to the new one on upgrades. + if let Some(legacy_in_flight_upds) = legacy_in_flight_monitor_updates { + // We should never serialize an empty map. + if legacy_in_flight_upds.is_empty() { + return Err(DecodeError::InvalidValue); + } + if in_flight_monitor_updates.is_none() { + let in_flight_upds = in_flight_monitor_updates.get_or_insert_with(|| new_hash_map()); + for ((counterparty_node_id, funding_txo), updates) in legacy_in_flight_upds { + // All channels with legacy in flight monitor updates are v1 channels. + let channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); + in_flight_upds.insert((counterparty_node_id, channel_id), updates); + } + } else { + // We should never serialize an empty map. + if in_flight_monitor_updates.as_ref().unwrap().is_empty() { + return Err(DecodeError::InvalidValue); + } + } + } + // We have to replay (or skip, if they were completed after we wrote the `ChannelManager`) // each `ChannelMonitorUpdate` in `in_flight_monitor_updates`. After doing so, we have to // check that each channel we have isn't newer than the latest `ChannelMonitorUpdate`(s) we @@ -13544,11 +13571,12 @@ where // Because the actual handling of the in-flight updates is the same, it's macro'ized here: let mut pending_background_events = Vec::new(); macro_rules! handle_in_flight_updates { - ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr, - $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr + ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $monitor: expr, + $peer_state: expr, $logger: expr, $channel_info_log: expr ) => { { let mut max_in_flight_update_id = 0; $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); + let funding_txo = $monitor.get_funding_txo().0; for update in $chan_in_flight_upds.iter() { log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", update.update_id, $channel_info_log, &$monitor.channel_id()); @@ -13556,7 +13584,7 @@ where pending_background_events.push( BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id: $counterparty_node_id, - funding_txo: $funding_txo, + funding_txo: funding_txo, channel_id: $monitor.channel_id(), update: update.clone(), }); @@ -13575,7 +13603,7 @@ where .and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v)) .or_insert(max_in_flight_update_id); } - if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() { + if $peer_state.in_flight_monitor_updates.insert($monitor.channel_id(), (funding_txo, $chan_in_flight_upds)).is_some() { log_error!($logger, "Duplicate in-flight monitor update set for the same channel!"); return Err(DecodeError::InvalidValue); } @@ -13586,28 +13614,27 @@ where for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() { let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; - for chan in peer_state.channel_by_id.values() { + for (chan_id, chan) in peer_state.channel_by_id.iter() { if let Some(funded_chan) = chan.as_funded() { let logger = WithChannelContext::from(&args.logger, &funded_chan.context, None); // Channels that were persisted have to be funded, otherwise they should have been // discarded. - let funding_txo = funded_chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?; - let monitor = args.channel_monitors.get(&funding_txo) + let monitor = args.channel_monitors.get(chan_id) .expect("We already checked for monitor presence when loading channels"); let mut max_in_flight_update_id = monitor.get_latest_update_id(); if let Some(in_flight_upds) = &mut in_flight_monitor_updates { - if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) { + if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, *chan_id)) { max_in_flight_update_id = cmp::max(max_in_flight_update_id, handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds, - funding_txo, monitor, peer_state, logger, "")); + monitor, peer_state, logger, "")); } } if funded_chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id { // If the channel is ahead of the monitor, return DangerousValue: log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight", - funded_chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id); + chan_id, monitor.get_latest_update_id(), max_in_flight_update_id); log_error!(logger, " but the ChannelManager is at update_id {}.", funded_chan.get_latest_unblocked_monitor_update_id()); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); @@ -13625,10 +13652,9 @@ where } if let Some(in_flight_upds) = in_flight_monitor_updates { - for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds { - let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied(); - let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id, None); - if let Some(monitor) = args.channel_monitors.get(&funding_txo) { + for ((counterparty_id, channel_id), mut chan_in_flight_updates) in in_flight_upds { + let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None); + if let Some(monitor) = args.channel_monitors.get(&channel_id) { // Now that we've removed all the in-flight monitor updates for channels that are // still open, we need to replay any monitor updates that are for closed channels, // creating the neccessary peer_state entries as we go. @@ -13636,12 +13662,11 @@ where Mutex::new(empty_peer_state()) }); let mut peer_state = peer_state_mutex.lock().unwrap(); - handle_in_flight_updates!(counterparty_id, chan_in_flight_updates, - funding_txo, monitor, peer_state, logger, "closed "); + handle_in_flight_updates!(counterparty_id, chan_in_flight_updates, monitor, + peer_state, logger, "closed "); } else { log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) = - channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } ); + log_error!(logger, " The ChannelMonitor for channel {} is missing.", channel_id); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); @@ -13692,9 +13717,9 @@ where .and_modify(|v| *v = cmp::max(update.update_id, *v)) .or_insert(update.update_id); } - let in_flight_updates = per_peer_state.in_flight_monitor_updates - .entry(*funding_txo) - .or_insert_with(Vec::new); + let in_flight_updates = &mut per_peer_state.in_flight_monitor_updates + .entry(*channel_id) + .or_insert_with(|| (*funding_txo, Vec::new())).1; debug_assert!(!in_flight_updates.iter().any(|upd| upd == update)); in_flight_updates.push(update.clone()); } @@ -13818,7 +13843,7 @@ where .filter_map(|(htlc_source, (htlc, preimage_opt))| { if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source { if let Some(payment_preimage) = preimage_opt { - let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.outpoint); + let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.channel_id); // Note that for channels which have gone to chain, // `get_all_current_outbound_htlcs` is never pruned and always returns // a constant set until the monitor is removed/archived. Thus, we @@ -14306,7 +14331,7 @@ where ); } } - if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) { + if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id) { // Note that this is unsafe as we no longer require the // `ChannelMonitor`s to be re-persisted prior to this // `ChannelManager` being persisted after we get started running. diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index fdb44646d59..4ec2fa78a66 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -712,7 +712,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { { let mut channel_monitors = new_hash_map(); for monitor in deserialized_monitors.iter() { - channel_monitors.insert(monitor.get_funding_txo().0, monitor); + channel_monitors.insert(monitor.channel_id(), monitor); } let scorer = RwLock::new(test_utils::TestScorer::new()); @@ -1138,7 +1138,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User let (_, node_deserialized) = { let mut channel_monitors = new_hash_map(); for monitor in monitors_read.iter() { - assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none()); + assert!(channel_monitors.insert(monitor.channel_id(), monitor).is_none()); } <(BlockHash, TestChannelManager<'b, 'c>)>::read(&mut node_read, ChannelManagerReadArgs { default_config, diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 04c549712cc..09e9c8278c2 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -432,7 +432,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { chain_monitor: nodes[0].chain_monitor, tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, - channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), + channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), }) { } else { panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return"); }; @@ -450,7 +450,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { chain_monitor: nodes[0].chain_monitor, tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, - channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), + channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), }).unwrap(); nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); From 717db82540f3737f2ae187a6bfc4c307c09e07c7 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 31 Jan 2025 13:13:53 -0800 Subject: [PATCH 3/3] Remove funding script return value from ChannelMonitor::get_funding_txo It's not needed except for one place where we can just access the field directly, so we can avoid the allocation on each call. For API consumers, they may still access the funding script via `ChannelMonitor::get_funding_script`. --- lightning/src/chain/chainmonitor.rs | 16 ++++++++-------- lightning/src/chain/channelmonitor.rs | 25 +++++++++++++++++-------- lightning/src/ln/channelmanager.rs | 12 ++++++------ lightning/src/util/persist.rs | 14 +++++++------- 4 files changed, 38 insertions(+), 29 deletions(-) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 32571f57c25..120420894e9 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -342,7 +342,7 @@ where C::Target: chain::Filter, // `ChannelMonitorUpdate` after a channel persist for a channel with the same // `latest_update_id`. let _pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); - let funding_txo = monitor.get_funding_txo().0; + let funding_txo = monitor.get_funding_txo(); match self.persister.update_persisted_channel(funding_txo, None, monitor) { ChannelMonitorUpdateStatus::Completed => log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data", @@ -516,7 +516,7 @@ where C::Target: chain::Filter, // Completed event. return Ok(()); } - let funding_txo = monitor_data.monitor.get_funding_txo().0; + let funding_txo = monitor_data.monitor.get_funding_txo(); self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { funding_txo, channel_id, @@ -535,7 +535,7 @@ where C::Target: chain::Filter, let monitors = self.monitors.read().unwrap(); let monitor = &monitors.get(&channel_id).unwrap().monitor; let counterparty_node_id = monitor.get_counterparty_node_id(); - let funding_txo = monitor.get_funding_txo().0; + let funding_txo = monitor.get_funding_txo(); self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { funding_txo, channel_id, @@ -642,7 +642,7 @@ where C::Target: chain::Filter, have_monitors_to_prune = true; } if needs_persistence { - self.persister.update_persisted_channel(monitor_holder.monitor.get_funding_txo().0, None, &monitor_holder.monitor); + self.persister.update_persisted_channel(monitor_holder.monitor.get_funding_txo(), None, &monitor_holder.monitor); } } if have_monitors_to_prune { @@ -655,7 +655,7 @@ where C::Target: chain::Filter, "Archiving fully resolved ChannelMonitor for channel ID {}", channel_id ); - self.persister.archive_persisted_channel(monitor_holder.monitor.get_funding_txo().0); + self.persister.archive_persisted_channel(monitor_holder.monitor.get_funding_txo()); false } else { true @@ -769,7 +769,7 @@ where C::Target: chain::Filter, log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor)); let update_id = monitor.get_latest_update_id(); let mut pending_monitor_updates = Vec::new(); - let persist_res = self.persister.persist_new_channel(monitor.get_funding_txo().0, &monitor); + let persist_res = self.persister.persist_new_channel(monitor.get_funding_txo(), &monitor); match persist_res { ChannelMonitorUpdateStatus::InProgress => { log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor)); @@ -825,7 +825,7 @@ where C::Target: chain::Filter, let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger); let update_id = update.update_id; - let funding_txo = monitor.get_funding_txo().0; + let funding_txo = monitor.get_funding_txo(); let persist_res = if update_res.is_err() { // Even if updating the monitor returns an error, the monitor's state will // still be changed. Therefore, we should persist the updated monitor despite the error. @@ -878,7 +878,7 @@ where C::Target: chain::Filter, for monitor_state in self.monitors.read().unwrap().values() { let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); if monitor_events.len() > 0 { - let monitor_funding_txo = monitor_state.monitor.get_funding_txo().0; + let monitor_funding_txo = monitor_state.monitor.get_funding_txo(); let monitor_channel_id = monitor_state.monitor.channel_id(); let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id(); pending_monitor_events.push((monitor_funding_txo, monitor_channel_id, monitor_events, counterparty_node_id)); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 8788fbb894d..6da544655f3 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1574,8 +1574,13 @@ impl ChannelMonitor { } /// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for. - pub fn get_funding_txo(&self) -> (OutPoint, ScriptBuf) { - self.inner.lock().unwrap().get_funding_txo().clone() + pub fn get_funding_txo(&self) -> OutPoint { + self.inner.lock().unwrap().get_funding_txo() + } + + /// Gets the funding script of the channel this ChannelMonitor is monitoring for. + pub fn get_funding_script(&self) -> ScriptBuf { + self.inner.lock().unwrap().get_funding_script() } /// Gets the channel_id of the channel this ChannelMonitor is monitoring for. @@ -1599,8 +1604,8 @@ impl ChannelMonitor { { let lock = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*lock, None); - log_trace!(&logger, "Registering funding outpoint {}", &lock.get_funding_txo().0); - filter.register_tx(&lock.get_funding_txo().0.txid, &lock.get_funding_txo().1); + log_trace!(&logger, "Registering funding outpoint {}", &lock.get_funding_txo()); + filter.register_tx(&lock.funding_info.0.txid, &lock.funding_info.1); for (txid, outputs) in lock.get_outputs_to_watch().iter() { for (index, script_pubkey) in outputs.iter() { assert!(*index <= u16::MAX as u32); @@ -2061,7 +2066,7 @@ impl ChannelMonitor { "Thought we were done claiming funds, but claimable_balances now has entries"); log_error!(logger, "WARNING: LDK thought it was done claiming all the available funds in the ChannelMonitor for channel {}, but later decided it had more to claim. This is potentially an important bug in LDK, please report it at https://github.com/lightningdevkit/rust-lightning/issues/new", - inner.get_funding_txo().0); + inner.get_funding_txo()); inner.balances_empty_height = None; (false, true) }, @@ -2070,7 +2075,7 @@ impl ChannelMonitor { // None. It is set to the current block height. log_debug!(logger, "ChannelMonitor funded at {} is now fully resolved. It will become archivable in {} blocks", - inner.get_funding_txo().0, ARCHIVAL_DELAY_BLOCKS); + inner.get_funding_txo(), ARCHIVAL_DELAY_BLOCKS); inner.balances_empty_height = Some(current_height); (false, true) }, @@ -3296,8 +3301,12 @@ impl ChannelMonitorImpl { self.latest_update_id } - fn get_funding_txo(&self) -> &(OutPoint, ScriptBuf) { - &self.funding_info + fn get_funding_txo(&self) -> OutPoint { + self.funding_info.0 + } + + fn get_funding_script(&self) -> ScriptBuf { + self.funding_info.1.clone() } pub fn channel_id(&self) -> ChannelId { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 29a0c3df677..65c41bc531a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -8131,7 +8131,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, hash_map::Entry::Vacant(e) => { let mut outpoint_to_peer_lock = self.outpoint_to_peer.lock().unwrap(); - match outpoint_to_peer_lock.entry(monitor.get_funding_txo().0) { + match outpoint_to_peer_lock.entry(monitor.get_funding_txo()) { hash_map::Entry::Occupied(_) => { fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible"); }, @@ -13365,7 +13365,7 @@ where updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], channel_id: Some(monitor.channel_id()), }; - let funding_txo = monitor.get_funding_txo().0; + let funding_txo = monitor.get_funding_txo(); if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() { let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, @@ -13576,7 +13576,7 @@ where ) => { { let mut max_in_flight_update_id = 0; $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); - let funding_txo = $monitor.get_funding_txo().0; + let funding_txo = $monitor.get_funding_txo(); for update in $chan_in_flight_upds.iter() { log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", update.update_id, $channel_info_log, &$monitor.channel_id()); @@ -13741,7 +13741,7 @@ where // We only rebuild the pending payments map if we were most recently serialized by // 0.0.102+ for (_, monitor) in args.channel_monitors.iter() { - let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0); + let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo()); if counterparty_opt.is_none() { for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() { let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); @@ -13821,7 +13821,7 @@ where // `ChannelMonitor` is removed. let compl_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint: monitor.get_funding_txo().0, + channel_funding_outpoint: monitor.get_funding_txo(), channel_id: monitor.channel_id(), counterparty_node_id: path.hops[0].pubkey, }; @@ -13934,7 +13934,7 @@ where // channel_id -> peer map entry). counterparty_opt.is_none(), counterparty_opt.cloned().or(monitor.get_counterparty_node_id()), - monitor.get_funding_txo().0, monitor.channel_id())) + monitor.get_funding_txo(), monitor.channel_id())) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index d0e9e01a482..52ba75a8c73 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -356,8 +356,8 @@ where (&*entropy_source, &*signer_provider), ) { Ok((block_hash, channel_monitor)) => { - if channel_monitor.get_funding_txo().0.txid != txid - || channel_monitor.get_funding_txo().0.index != index + if channel_monitor.get_funding_txo().txid != txid + || channel_monitor.get_funding_txo().index != index { return Err(io::Error::new( io::ErrorKind::InvalidData, @@ -617,8 +617,8 @@ where (&*self.entropy_source, &*self.signer_provider), ) { Ok((blockhash, channel_monitor)) => { - if channel_monitor.get_funding_txo().0.txid != outpoint.txid - || channel_monitor.get_funding_txo().0.index != outpoint.index + if channel_monitor.get_funding_txo().txid != outpoint.txid + || channel_monitor.get_funding_txo().index != outpoint.index { log_error!( self.logger, @@ -1219,7 +1219,7 @@ mod tests { // check that when we read it, we got the right update id assert_eq!(mon.get_latest_update_id(), $expected_update_id); - let monitor_name = MonitorName::from(mon.get_funding_txo().0); + let monitor_name = MonitorName::from(mon.get_funding_txo()); assert_eq!( persister_0 .kv_store @@ -1238,7 +1238,7 @@ mod tests { assert_eq!(persisted_chan_data_1.len(), 1); for (_, mon) in persisted_chan_data_1.iter() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); - let monitor_name = MonitorName::from(mon.get_funding_txo().0); + let monitor_name = MonitorName::from(mon.get_funding_txo()); assert_eq!( persister_1 .kv_store @@ -1433,7 +1433,7 @@ mod tests { // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible) let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates().unwrap(); let (_, monitor) = &persisted_chan_data[0]; - let monitor_name = MonitorName::from(monitor.get_funding_txo().0); + let monitor_name = MonitorName::from(monitor.get_funding_txo()); persister_0 .kv_store .write(