diff --git a/Cargo.lock b/Cargo.lock index 956e9de623..049c16b688 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1706,6 +1706,7 @@ checksum = "72aa14c04dfae8dd7d8a2b1cb7ca2152618cd01336dbfe704b8dcbf8d41dbd69" name = "db_common" version = "0.1.0" dependencies = [ + "hex 0.4.2", "log 0.4.14", "rusqlite", "sql-builder", @@ -3751,21 +3752,22 @@ dependencies = [ [[package]] name = "lightning" -version = "0.0.104" +version = "0.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0113e6b5a55b7ead30fb0a992b787e69a0551fa15b7eed93c99490eb018ab793" +checksum = "a10e7439623b293d000fc875627704210d8d0ff5b7badbb689f2a3d51afc618f" dependencies = [ "bitcoin", - "hex 0.3.2", + "hex 0.4.2", "regex 0.1.80", "secp256k1", ] [[package]] name = "lightning-background-processor" -version = "0.0.104" +version = "0.0.105" dependencies = [ "bitcoin", + "db_common", "lightning", "lightning-invoice", "lightning-persister", @@ -3773,9 +3775,9 @@ dependencies = [ [[package]] name = "lightning-invoice" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2531e38818b3872b9acbcc3ff499f93962d2ff23ee0d05fc386ef70b993a38a0" +checksum = "ce661ad7182c2b258d1506e264095d2b4c71436a91b72834d7fb87b7e92e06c0" dependencies = [ "bech32", "bitcoin_hashes", @@ -3786,9 +3788,9 @@ dependencies = [ [[package]] name = "lightning-net-tokio" -version = "0.0.104" +version = "0.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1851ab90c2739929526c56e06d6b07def4508bfce70f190757fde2de648d1d9c" +checksum = "e6eb1bcdf0a29baccb13d514e4a8761c9d8038ffd107f27e45182f0177f5e007" dependencies = [ "bitcoin", "lightning", @@ -3797,15 +3799,20 @@ dependencies = [ [[package]] name = "lightning-persister" -version = "0.0.104" +version = "0.0.105" dependencies = [ "async-trait", "bitcoin", "common", + "db_common", + "derive_more", + "hex 0.4.2", "libc", "lightning", "parking_lot 0.12.0", + "rand 0.7.3", "secp256k1", + "serde", "serde_json", "winapi 0.3.9", ] diff --git a/mm2src/coins/Cargo.toml b/mm2src/coins/Cargo.toml index 769b69e442..d7ed4df1dc 100644 --- a/mm2src/coins/Cargo.toml +++ b/mm2src/coins/Cargo.toml @@ -50,9 +50,9 @@ jsonrpc-core = "8.0.1" keys = { path = "../mm2_bitcoin/keys" } lazy_static = "1.4" libc = "0.2" -lightning = "0.0.104" +lightning = "0.0.105" lightning-background-processor = { path = "lightning_background_processor" } -lightning-invoice = "0.12.0" +lightning-invoice = "0.13.0" metrics = "0.12" mocktopus = "0.7.0" num-traits = "0.2" @@ -93,7 +93,7 @@ web-sys = { version = "0.3.55", features = ["console", "Headers", "Request", "Re [target.'cfg(not(target_arch = "wasm32"))'.dependencies] dirs = { version = "1" } lightning-persister = { path = "lightning_persister" } -lightning-net-tokio = "0.0.104" +lightning-net-tokio = "0.0.105" rust-ini = { version = "0.13" } rustls = { version = "0.19", features = ["dangerous_configuration"] } tokio = { version = "1.7" } diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index 54134acda2..72cee7bfa3 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -48,7 +48,7 @@ use std::collections::HashMap; use std::ops::Deref; use std::path::PathBuf; use std::str::FromStr; -use std::sync::atomic::{AtomicU64, Ordering as AtomicOrderding}; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; use std::sync::{Arc, Mutex}; use web3::types::{Action as TraceAction, BlockId, BlockNumber, Bytes, CallRequest, FilterBuilder, Log, Trace, TraceFilterBuilder, Transaction as Web3Transaction, TransactionId}; @@ -3100,13 +3100,13 @@ impl MmCoin for EthCoin { }) } - fn required_confirmations(&self) -> u64 { self.required_confirmations.load(AtomicOrderding::Relaxed) } + fn required_confirmations(&self) -> u64 { self.required_confirmations.load(AtomicOrdering::Relaxed) } fn requires_notarization(&self) -> bool { false } fn set_required_confirmations(&self, confirmations: u64) { self.required_confirmations - .store(confirmations, AtomicOrderding::Relaxed); + .store(confirmations, AtomicOrdering::Relaxed); } fn set_requires_notarization(&self, _requires_nota: bool) { diff --git a/mm2src/coins/lightning.rs b/mm2src/coins/lightning.rs index a8e9c7b298..13a7cf54ae 100644 --- a/mm2src/coins/lightning.rs +++ b/mm2src/coins/lightning.rs @@ -1,4 +1,12 @@ -use super::{lp_coinfind_or_err, MmCoinEnum}; +pub mod ln_conf; +pub mod ln_errors; +mod ln_events; +mod ln_p2p; +mod ln_platform; +mod ln_serialization; +mod ln_utils; + +use super::{lp_coinfind_or_err, DerivationMethod, MmCoinEnum}; use crate::utxo::rpc_clients::UtxoRpcClientEnum; use crate::utxo::utxo_common::{big_decimal_from_sat_unsigned, UtxoTxBuilder}; use crate::utxo::{sat_from_big_decimal, BlockchainNetwork, FeePolicy, UtxoCommonOps, UtxoTxGenerationOps}; @@ -8,34 +16,35 @@ use crate::{BalanceFut, CoinBalance, FeeApproxStage, FoundSwapTxSpend, HistorySy UtxoStandardCoin, ValidateAddressResult, ValidatePaymentInput, WithdrawError, WithdrawFut, WithdrawRequest}; use async_trait::async_trait; use bigdecimal::BigDecimal; -use bitcoin::blockdata::script::Script; -use bitcoin::hash_types::Txid; use bitcoin::hashes::Hash; use bitcoin_hashes::sha256::Hash as Sha256; use chain::TransactionOutput; +use common::executor::spawn; use common::ip_addr::myipaddr; -use common::log::LogOnError; +use common::log::{LogOnError, LogState}; use common::mm_ctx::MmArc; use common::mm_error::prelude::*; use common::mm_number::MmNumber; -use common::{async_blocking, log}; +use common::{async_blocking, calc_total_pages, log, now_ms, ten, PagingOptionsEnum}; use futures::{FutureExt, TryFutureExt}; use futures01::Future; use keys::{AddressHashEnum, KeyPair}; use lightning::chain::channelmonitor::Balance; -use lightning::chain::keysinterface::KeysInterface; -use lightning::chain::keysinterface::KeysManager; -use lightning::chain::WatchedOutput; +use lightning::chain::keysinterface::{KeysInterface, KeysManager, Recipient}; +use lightning::chain::Access; use lightning::ln::channelmanager::{ChannelDetails, MIN_FINAL_CLTV_EXPIRY}; -use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::ln::{PaymentHash, PaymentPreimage}; +use lightning::routing::network_graph::{NetGraphMsgHandler, NetworkGraph}; use lightning::util::config::UserConfig; use lightning_background_processor::BackgroundProcessor; -use lightning_invoice::utils::create_invoice_from_channelmanager; -use lightning_invoice::Invoice; -use lightning_persister::storage::{NodesAddressesMapShared, Storage}; -use lightning_persister::FilesystemPersister; -use ln_conf::{ChannelOptions, LightningCoinConf, PlatformCoinConfirmations}; -use ln_connections::{connect_to_node, ConnectToNodeRes}; +use lightning_invoice::payment; +use lightning_invoice::utils::{create_invoice_from_channelmanager, DefaultRouter}; +use lightning_invoice::{Invoice, InvoiceDescription}; +use lightning_persister::storage::{ClosedChannelsFilter, DbStorage, FileSystemStorage, HTLCStatus, + NodesAddressesMapShared, PaymentInfo, PaymentType, PaymentsFilter, Scorer, + SqlChannelDetails}; +use lightning_persister::LightningPersister; +use ln_conf::{ChannelOptions, LightningCoinConf, LightningProtocolConf, PlatformCoinConfirmations}; use ln_errors::{ClaimableBalancesError, ClaimableBalancesResult, CloseChannelError, CloseChannelResult, ConnectToNodeError, ConnectToNodeResult, EnableLightningError, EnableLightningResult, GenerateInvoiceError, GenerateInvoiceResult, GetChannelDetailsError, GetChannelDetailsResult, @@ -43,8 +52,10 @@ use ln_errors::{ClaimableBalancesError, ClaimableBalancesResult, CloseChannelErr ListPaymentsError, ListPaymentsResult, OpenChannelError, OpenChannelResult, SendPaymentError, SendPaymentResult}; use ln_events::LightningEventHandler; +use ln_p2p::{connect_to_node, ConnectToNodeRes, PeerManager}; +use ln_platform::{h256_json_from_txid, Platform}; use ln_serialization::{InvoiceForRPC, NodeAddress, PublicKeyForRPC}; -use ln_utils::{ChainMonitor, ChannelManager, InvoicePayer, PeerManager}; +use ln_utils::{ChainMonitor, ChannelManager}; use parking_lot::Mutex as PaMutex; use rpc::v1::types::{Bytes as BytesJson, H256 as H256Json}; use script::{Builder, TransactionInputSigner}; @@ -56,75 +67,14 @@ use std::collections::{HashMap, HashSet}; use std::fmt; use std::net::SocketAddr; use std::str::FromStr; -use std::sync::Arc; - -pub mod ln_conf; -mod ln_connections; -pub mod ln_errors; -mod ln_events; -mod ln_rpc; -mod ln_serialization; -pub mod ln_utils; - -type PaymentsMap = HashMap; -type PaymentsMapShared = Arc>; - -pub struct PlatformFields { - pub platform_coin: UtxoStandardCoin, - /// Main/testnet/signet/regtest Needed for lightning node to know which network to connect to - pub network: BlockchainNetwork, - // Default fees to and confirmation targets to be used for FeeEstimator. Default fees are used when the call for - // estimate_fee_sat fails. - pub default_fees_and_confirmations: PlatformCoinConfirmations, - // This cache stores the transactions that the LN node has interest in. - pub registered_txs: PaMutex>>, - // This cache stores the outputs that the LN node has interest in. - pub registered_outputs: PaMutex>, - // This cache stores transactions to be broadcasted once the other node accepts the channel - pub unsigned_funding_txs: PaMutex>, -} - -impl PlatformFields { - pub fn add_tx(&self, txid: &Txid, script_pubkey: &Script) { - let mut registered_txs = self.registered_txs.lock(); - match registered_txs.get_mut(txid) { - Some(h) => { - h.insert(script_pubkey.clone()); - }, - None => { - let mut script_pubkeys = HashSet::new(); - script_pubkeys.insert(script_pubkey.clone()); - registered_txs.insert(*txid, script_pubkeys); - }, - } - } - - pub fn add_output(&self, output: WatchedOutput) { - let mut registered_outputs = self.registered_outputs.lock(); - registered_outputs.push(output); - } -} - -#[derive(Clone, Serialize)] -#[serde(rename_all = "lowercase")] -pub enum HTLCStatus { - Pending, - Succeeded, - Failed, -} +use std::sync::{Arc, Mutex}; -#[derive(Clone)] -pub struct PaymentInfo { - pub preimage: Option, - pub secret: Option, - pub status: HTLCStatus, - pub amt_msat: Option, - pub fee_paid_msat: Option, -} +type Router = DefaultRouter, Arc>; +type InvoicePayer = payment::InvoicePayer, Router, Arc>, Arc, E>; #[derive(Clone)] pub struct LightningCoin { - pub platform_fields: Arc, + pub platform: Arc, pub conf: LightningCoinConf, /// The lightning node peer manager that takes care of connecting to peers, etc.. pub peer_manager: Arc, @@ -140,13 +90,10 @@ pub struct LightningCoin { /// The lightning node invoice payer. pub invoice_payer: Arc>>, /// The lightning node persister that takes care of writing/reading data from storage. - pub persister: Arc, - /// The mutex storing the inbound payments info. - pub inbound_payments: PaymentsMapShared, - /// The mutex storing the outbound payments info. - pub outbound_payments: PaymentsMapShared, - /// The mutex storing the addresses of the nodes that are used for reconnecting. - pub nodes_addresses: NodesAddressesMapShared, + pub persister: Arc, + /// The mutex storing the addresses of the nodes that the lightning node has open channels with, + /// these addresses are used for reconnecting. + pub open_channels_nodes: NodesAddressesMapShared, } impl fmt::Debug for LightningCoin { @@ -154,8 +101,9 @@ impl fmt::Debug for LightningCoin { } impl LightningCoin { - fn platform_coin(&self) -> &UtxoStandardCoin { &self.platform_fields.platform_coin } + fn platform_coin(&self) -> &UtxoStandardCoin { &self.platform.coin } + #[inline] fn my_node_id(&self) -> String { self.channel_manager.get_our_node_id().to_string() } fn get_balance_msat(&self) -> (u64, u64) { @@ -174,19 +122,31 @@ impl LightningCoin { }) } - fn pay_invoice(&self, invoice: Invoice) -> SendPaymentResult<(PaymentHash, PaymentInfo)> { + fn pay_invoice(&self, invoice: Invoice) -> SendPaymentResult { self.invoice_payer .pay_invoice(&invoice) .map_to_mm(|e| SendPaymentError::PaymentError(format!("{:?}", e)))?; let payment_hash = PaymentHash((*invoice.payment_hash()).into_inner()); + let payment_type = PaymentType::OutboundPayment { + destination: *invoice.payee_pub_key().unwrap_or(&invoice.recover_payee_pub_key()), + }; + let description = match invoice.description() { + InvoiceDescription::Direct(d) => d.to_string(), + InvoiceDescription::Hash(h) => hex::encode(h.0.into_inner()), + }; let payment_secret = Some(*invoice.payment_secret()); - Ok((payment_hash, PaymentInfo { + Ok(PaymentInfo { + payment_hash, + payment_type, + description, preimage: None, secret: payment_secret, - status: HTLCStatus::Pending, amt_msat: invoice.amount_milli_satoshis(), fee_paid_msat: None, - })) + status: HTLCStatus::Pending, + created_at: now_ms() / 1000, + last_updated: now_ms() / 1000, + }) } fn keysend( @@ -194,7 +154,7 @@ impl LightningCoin { destination: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32, - ) -> SendPaymentResult<(PaymentHash, PaymentInfo)> { + ) -> SendPaymentResult { if final_cltv_expiry_delta < MIN_FINAL_CLTV_EXPIRY { return MmError::err(SendPaymentError::CLTVExpiryError( final_cltv_expiry_delta, @@ -206,14 +166,68 @@ impl LightningCoin { .pay_pubkey(destination, payment_preimage, amount_msat, final_cltv_expiry_delta) .map_to_mm(|e| SendPaymentError::PaymentError(format!("{:?}", e)))?; let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); + let payment_type = PaymentType::OutboundPayment { destination }; - Ok((payment_hash, PaymentInfo { + Ok(PaymentInfo { + payment_hash, + payment_type, + description: "".into(), preimage: Some(payment_preimage), secret: None, - status: HTLCStatus::Pending, amt_msat: Some(amount_msat), fee_paid_msat: None, - })) + status: HTLCStatus::Pending, + created_at: now_ms() / 1000, + last_updated: now_ms() / 1000, + }) + } + + async fn get_open_channels_by_filter( + &self, + filter: Option, + paging: PagingOptionsEnum, + limit: usize, + ) -> ListChannelsResult { + let mut total_open_channels: Vec = self + .channel_manager + .list_channels() + .into_iter() + .map(From::from) + .collect(); + + total_open_channels.sort_by(|a, b| a.rpc_channel_id.cmp(&b.rpc_channel_id)); + + let open_channels_filtered = if let Some(ref f) = filter { + total_open_channels + .into_iter() + .filter(|chan| apply_open_channel_filter(chan, f)) + .collect() + } else { + total_open_channels + }; + + let offset = match paging { + PagingOptionsEnum::PageNumber(page) => (page.get() - 1) * limit, + PagingOptionsEnum::FromId(rpc_id) => open_channels_filtered + .iter() + .position(|x| x.rpc_channel_id == rpc_id) + .map(|pos| pos + 1) + .unwrap_or_default(), + }; + + let total = open_channels_filtered.len(); + + let channels = if offset + limit <= total { + open_channels_filtered[offset..offset + limit].to_vec() + } else { + open_channels_filtered[offset..].to_vec() + }; + + Ok(GetOpenChannelsResult { + channels, + skipped: offset, + total, + }) } } @@ -387,7 +401,7 @@ impl MarketCoinOps for LightningCoin { Box::new(self.platform_coin().my_balance().map(|res| res.spendable)) } - fn platform_ticker(&self) -> &str { self.platform_fields.platform_coin.ticker() } + fn platform_ticker(&self) -> &str { self.platform_coin().ticker() } fn send_raw_tx(&self, _tx: &str) -> Box + Send> { Box::new(futures01::future::err( @@ -426,7 +440,13 @@ impl MarketCoinOps for LightningCoin { fn current_block(&self) -> Box + Send> { Box::new(futures01::future::ok(0)) } - fn display_priv_key(&self) -> Result { Ok(self.keys_manager.get_node_secret().to_string()) } + fn display_priv_key(&self) -> Result { + Ok(self + .keys_manager + .get_node_secret(Recipient::Node) + .map_err(|_| "Unsupported recipient".to_string())? + .to_string()) + } // Todo: Implement this when implementing swaps for lightning as it's is used only for swaps fn min_tx_amount(&self) -> BigDecimal { unimplemented!() } @@ -522,6 +542,161 @@ impl MmCoin for LightningCoin { fn is_coin_protocol_supported(&self, _info: &Option>) -> bool { unimplemented!() } } +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct LightningParams { + // The listening port for the p2p LN node + pub listening_port: u16, + // Printable human-readable string to describe this node to other users. + pub node_name: [u8; 32], + // Node's RGB color. This is used for showing the node in a network graph with the desired color. + pub node_color: [u8; 3], + // Invoice Payer is initialized while starting the lightning node, and it requires the number of payment retries that + // it should do before considering a payment failed or partially failed. If not provided the number of retries will be 5 + // as this is a good default value. + pub payment_retries: Option, + // Node's backup path for channels and other data that requires backup. + pub backup_path: Option, +} + +pub async fn start_lightning( + ctx: &MmArc, + platform_coin: UtxoStandardCoin, + protocol_conf: LightningProtocolConf, + conf: LightningCoinConf, + params: LightningParams, +) -> EnableLightningResult { + // Todo: add support for Hardware wallets for funding transactions and spending spendable outputs (channel closing transactions) + if let DerivationMethod::HDWallet(_) = platform_coin.as_ref().derivation_method { + return MmError::err(EnableLightningError::UnsupportedMode( + "'start_lightning'".into(), + "iguana".into(), + )); + } + + let platform = Arc::new(Platform::new( + platform_coin.clone(), + protocol_conf.network.clone(), + protocol_conf.confirmations, + )); + + // Initialize the Logger + let logger = ctx.log.0.clone(); + + // Initialize Persister + let persister = ln_utils::init_persister(ctx, platform.clone(), conf.ticker.clone(), params.backup_path).await?; + + // Initialize the KeysManager + let keys_manager = ln_utils::init_keys_manager(ctx)?; + + // Initialize the NetGraphMsgHandler. This is used for providing routes to send payments over + let network_graph = Arc::new(persister.get_network_graph(protocol_conf.network.into()).await?); + spawn(ln_utils::persist_network_graph_loop( + persister.clone(), + network_graph.clone(), + )); + let network_gossip = Arc::new(NetGraphMsgHandler::new( + network_graph.clone(), + None::>, + logger.clone(), + )); + + // Initialize the ChannelManager + let (chain_monitor, channel_manager) = ln_utils::init_channel_manager( + platform.clone(), + logger.clone(), + persister.clone(), + keys_manager.clone(), + conf.clone().into(), + ) + .await?; + + // Initialize the PeerManager + let peer_manager = ln_p2p::init_peer_manager( + ctx.clone(), + params.listening_port, + channel_manager.clone(), + network_gossip.clone(), + keys_manager + .get_node_secret(Recipient::Node) + .map_to_mm(|_| EnableLightningError::UnsupportedMode("'start_lightning'".into(), "local node".into()))?, + logger.clone(), + ) + .await?; + + // Initialize the event handler + let event_handler = Arc::new(ln_events::LightningEventHandler::new( + // It's safe to use unwrap here for now until implementing Native Client for Lightning + platform.clone(), + channel_manager.clone(), + keys_manager.clone(), + persister.clone(), + )); + + // Initialize routing Scorer + let scorer = Arc::new(Mutex::new(persister.get_scorer(network_graph.clone()).await?)); + spawn(ln_utils::persist_scorer_loop(persister.clone(), scorer.clone())); + + // Create InvoicePayer + let router = DefaultRouter::new(network_graph, logger.clone()); + let invoice_payer = Arc::new(InvoicePayer::new( + channel_manager.clone(), + router, + scorer, + logger.clone(), + event_handler, + payment::RetryAttempts(params.payment_retries.unwrap_or(5)), + )); + + // Persist ChannelManager + // Note: if the ChannelManager is not persisted properly to disk, there is risk of channels force closing the next time LN starts up + let channel_manager_persister = persister.clone(); + let persist_channel_manager_callback = + move |node: &ChannelManager| channel_manager_persister.persist_manager(&*node); + + // Start Background Processing. Runs tasks periodically in the background to keep LN node operational. + // InvoicePayer will act as our event handler as it handles some of the payments related events before + // delegating it to LightningEventHandler. + let background_processor = Arc::new(BackgroundProcessor::start( + persist_channel_manager_callback, + invoice_payer.clone(), + chain_monitor.clone(), + channel_manager.clone(), + Some(network_gossip), + peer_manager.clone(), + logger, + )); + + // If channel_nodes_data file exists, read channels nodes data from disk and reconnect to channel nodes/peers if possible. + let open_channels_nodes = Arc::new(PaMutex::new( + ln_utils::get_open_channels_nodes_addresses(persister.clone(), channel_manager.clone()).await?, + )); + spawn(ln_p2p::connect_to_nodes_loop( + open_channels_nodes.clone(), + peer_manager.clone(), + )); + + // Broadcast Node Announcement + spawn(ln_p2p::ln_node_announcement_loop( + channel_manager.clone(), + params.node_name, + params.node_color, + params.listening_port, + )); + + Ok(LightningCoin { + platform, + conf, + peer_manager, + background_processor, + channel_manager, + chain_monitor, + keys_manager, + invoice_payer, + persister, + open_channels_nodes, + }) +} + #[derive(Deserialize)] pub struct ConnectToNodeRequest { pub coin: String, @@ -542,11 +717,14 @@ pub async fn connect_to_lightning_node(ctx: MmArc, req: ConnectToNodeRequest) -> // If a node that we have an open channel with changed it's address, "connect_to_lightning_node" // can be used to reconnect to the new address while saving this new address for reconnections. - if let ConnectToNodeRes::ConnectedSuccessfully(_, _) = res { - if let Entry::Occupied(mut entry) = ln_coin.nodes_addresses.lock().entry(node_pubkey) { + if let ConnectToNodeRes::ConnectedSuccessfully { .. } = res { + if let Entry::Occupied(mut entry) = ln_coin.open_channels_nodes.lock().entry(node_pubkey) { entry.insert(node_addr); } - ln_coin.persister.save_nodes_addresses(ln_coin.nodes_addresses).await?; + ln_coin + .persister + .save_nodes_addresses(ln_coin.open_channels_nodes) + .await?; } Ok(res.to_string()) @@ -576,7 +754,7 @@ pub struct OpenChannelRequest { #[derive(Serialize)] pub struct OpenChannelResponse { - temporary_channel_id: H256Json, + rpc_channel_id: u64, node_address: NodeAddress, } @@ -647,38 +825,134 @@ pub async fn open_channel(ctx: MmArc, req: OpenChannelRequest) -> OpenChannelRes user_config.own_channel_config.our_htlc_minimum_msat = min; } + let rpc_channel_id = ln_coin.persister.get_last_channel_rpc_id().await? as u64 + 1; + let temp_channel_id = async_blocking(move || { channel_manager - .create_channel(node_pubkey, amount_in_sat, push_msat, 1, Some(user_config)) + .create_channel(node_pubkey, amount_in_sat, push_msat, rpc_channel_id, Some(user_config)) .map_to_mm(|e| OpenChannelError::FailureToOpenChannel(node_pubkey.to_string(), format!("{:?}", e))) }) .await?; { - let mut unsigned_funding_txs = ln_coin.platform_fields.unsigned_funding_txs.lock(); - unsigned_funding_txs.insert(temp_channel_id, unsigned); + let mut unsigned_funding_txs = ln_coin.platform.unsigned_funding_txs.lock(); + unsigned_funding_txs.insert(rpc_channel_id, unsigned); } + let pending_channel_details = SqlChannelDetails::new( + rpc_channel_id, + temp_channel_id, + node_pubkey, + true, + user_config.channel_options.announced_channel, + ); + // Saving node data to reconnect to it on restart - ln_coin.nodes_addresses.lock().insert(node_pubkey, node_addr); - ln_coin.persister.save_nodes_addresses(ln_coin.nodes_addresses).await?; + ln_coin.open_channels_nodes.lock().insert(node_pubkey, node_addr); + ln_coin + .persister + .save_nodes_addresses(ln_coin.open_channels_nodes) + .await?; + + ln_coin.persister.add_channel_to_db(pending_channel_details).await?; Ok(OpenChannelResponse { - temporary_channel_id: temp_channel_id.into(), + rpc_channel_id, node_address: req.node_address, }) } #[derive(Deserialize)] -pub struct ListChannelsRequest { +pub struct OpenChannelsFilter { + pub channel_id: Option, + pub counterparty_node_id: Option, + pub funding_tx: Option, + pub from_funding_value_sats: Option, + pub to_funding_value_sats: Option, + pub is_outbound: Option, + pub from_balance_msat: Option, + pub to_balance_msat: Option, + pub from_outbound_capacity_msat: Option, + pub to_outbound_capacity_msat: Option, + pub from_inbound_capacity_msat: Option, + pub to_inbound_capacity_msat: Option, + pub confirmed: Option, + pub is_usable: Option, + pub is_public: Option, +} + +fn apply_open_channel_filter(channel_details: &ChannelDetailsForRPC, filter: &OpenChannelsFilter) -> bool { + let is_channel_id = filter.channel_id.is_none() || Some(&channel_details.channel_id) == filter.channel_id.as_ref(); + + let is_counterparty_node_id = filter.counterparty_node_id.is_none() + || Some(&channel_details.counterparty_node_id) == filter.counterparty_node_id.as_ref(); + + let is_funding_tx = filter.funding_tx.is_none() || channel_details.funding_tx == filter.funding_tx; + + let is_from_funding_value_sats = + Some(&channel_details.funding_tx_value_sats) >= filter.from_funding_value_sats.as_ref(); + + let is_to_funding_value_sats = filter.to_funding_value_sats.is_none() + || Some(&channel_details.funding_tx_value_sats) <= filter.to_funding_value_sats.as_ref(); + + let is_outbound = filter.is_outbound.is_none() || Some(&channel_details.is_outbound) == filter.is_outbound.as_ref(); + + let is_from_balance_msat = Some(&channel_details.balance_msat) >= filter.from_balance_msat.as_ref(); + + let is_to_balance_msat = + filter.to_balance_msat.is_none() || Some(&channel_details.balance_msat) <= filter.to_balance_msat.as_ref(); + + let is_from_outbound_capacity_msat = + Some(&channel_details.outbound_capacity_msat) >= filter.from_outbound_capacity_msat.as_ref(); + + let is_to_outbound_capacity_msat = filter.to_outbound_capacity_msat.is_none() + || Some(&channel_details.outbound_capacity_msat) <= filter.to_outbound_capacity_msat.as_ref(); + + let is_from_inbound_capacity_msat = + Some(&channel_details.inbound_capacity_msat) >= filter.from_inbound_capacity_msat.as_ref(); + + let is_to_inbound_capacity_msat = filter.to_inbound_capacity_msat.is_none() + || Some(&channel_details.inbound_capacity_msat) <= filter.to_inbound_capacity_msat.as_ref(); + + let is_confirmed = filter.confirmed.is_none() || Some(&channel_details.confirmed) == filter.confirmed.as_ref(); + + let is_usable = filter.is_usable.is_none() || Some(&channel_details.is_usable) == filter.is_usable.as_ref(); + + let is_public = filter.is_public.is_none() || Some(&channel_details.is_public) == filter.is_public.as_ref(); + + is_channel_id + && is_counterparty_node_id + && is_funding_tx + && is_from_funding_value_sats + && is_to_funding_value_sats + && is_outbound + && is_from_balance_msat + && is_to_balance_msat + && is_from_outbound_capacity_msat + && is_to_outbound_capacity_msat + && is_from_inbound_capacity_msat + && is_to_inbound_capacity_msat + && is_confirmed + && is_usable + && is_public +} + +#[derive(Deserialize)] +pub struct ListOpenChannelsRequest { pub coin: String, + pub filter: Option, + #[serde(default = "ten")] + limit: usize, + #[serde(default)] + paging_options: PagingOptionsEnum, } -#[derive(Serialize)] +#[derive(Clone, Serialize)] pub struct ChannelDetailsForRPC { - pub channel_id: String, - pub counterparty_node_id: String, - pub funding_tx: Option, + pub rpc_channel_id: u64, + pub channel_id: H256Json, + pub counterparty_node_id: PublicKeyForRPC, + pub funding_tx: Option, pub funding_tx_output_index: Option, pub funding_tx_value_sats: u64, /// True if the channel was initiated (and thus funded) by us. @@ -699,9 +973,10 @@ pub struct ChannelDetailsForRPC { impl From for ChannelDetailsForRPC { fn from(details: ChannelDetails) -> ChannelDetailsForRPC { ChannelDetailsForRPC { - channel_id: hex::encode(details.channel_id), - counterparty_node_id: details.counterparty.node_id.to_string(), - funding_tx: details.funding_txo.map(|tx| tx.txid.to_string()), + rpc_channel_id: details.user_channel_id, + channel_id: details.channel_id.into(), + counterparty_node_id: PublicKeyForRPC(details.counterparty.node_id), + funding_tx: details.funding_txo.map(|tx| h256_json_from_txid(tx.txid)), funding_tx_output_index: details.funding_txo.map(|tx| tx.index), funding_tx_value_sats: details.channel_value_satoshis, is_outbound: details.is_outbound, @@ -715,36 +990,101 @@ impl From for ChannelDetailsForRPC { } } +struct GetOpenChannelsResult { + pub channels: Vec, + pub skipped: usize, + pub total: usize, +} + #[derive(Serialize)] -pub struct ListChannelsResponse { - channels: Vec, +pub struct ListOpenChannelsResponse { + open_channels: Vec, + limit: usize, + skipped: usize, + total: usize, + total_pages: usize, + paging_options: PagingOptionsEnum, } -pub async fn list_channels(ctx: MmArc, req: ListChannelsRequest) -> ListChannelsResult { +pub async fn list_open_channels_by_filter( + ctx: MmArc, + req: ListOpenChannelsRequest, +) -> ListChannelsResult { let coin = lp_coinfind_or_err(&ctx, &req.coin).await?; let ln_coin = match coin { MmCoinEnum::LightningCoin(c) => c, _ => return MmError::err(ListChannelsError::UnsupportedCoin(coin.ticker().to_string())), }; - let channels = ln_coin - .channel_manager - .list_channels() - .into_iter() - .map(From::from) - .collect(); - Ok(ListChannelsResponse { channels }) + let result = ln_coin + .get_open_channels_by_filter(req.filter, req.paging_options.clone(), req.limit) + .await?; + + Ok(ListOpenChannelsResponse { + open_channels: result.channels, + limit: req.limit, + skipped: result.skipped, + total: result.total, + total_pages: calc_total_pages(result.total, req.limit), + paging_options: req.paging_options, + }) +} + +#[derive(Deserialize)] +pub struct ListClosedChannelsRequest { + pub coin: String, + pub filter: Option, + #[serde(default = "ten")] + limit: usize, + #[serde(default)] + paging_options: PagingOptionsEnum, +} + +#[derive(Serialize)] +pub struct ListClosedChannelsResponse { + closed_channels: Vec, + limit: usize, + skipped: usize, + total: usize, + total_pages: usize, + paging_options: PagingOptionsEnum, +} + +pub async fn list_closed_channels_by_filter( + ctx: MmArc, + req: ListClosedChannelsRequest, +) -> ListChannelsResult { + let coin = lp_coinfind_or_err(&ctx, &req.coin).await?; + let ln_coin = match coin { + MmCoinEnum::LightningCoin(c) => c, + _ => return MmError::err(ListChannelsError::UnsupportedCoin(coin.ticker().to_string())), + }; + let closed_channels_res = ln_coin + .persister + .get_closed_channels_by_filter(req.filter, req.paging_options.clone(), req.limit) + .await?; + + Ok(ListClosedChannelsResponse { + closed_channels: closed_channels_res.channels, + limit: req.limit, + skipped: closed_channels_res.skipped, + total: closed_channels_res.total, + total_pages: calc_total_pages(closed_channels_res.total, req.limit), + paging_options: req.paging_options, + }) } #[derive(Deserialize)] pub struct GetChannelDetailsRequest { pub coin: String, - pub channel_id: H256Json, + pub rpc_channel_id: u64, } #[derive(Serialize)] -pub struct GetChannelDetailsResponse { - channel_details: ChannelDetailsForRPC, +#[serde(tag = "status", content = "details")] +pub enum GetChannelDetailsResponse { + Open(ChannelDetailsForRPC), + Closed(SqlChannelDetails), } pub async fn get_channel_details( @@ -756,15 +1096,23 @@ pub async fn get_channel_details( MmCoinEnum::LightningCoin(c) => c, _ => return MmError::err(GetChannelDetailsError::UnsupportedCoin(coin.ticker().to_string())), }; - let channel_details = ln_coin + let channel_details = match ln_coin .channel_manager .list_channels() .into_iter() - .find(|chan| chan.channel_id == req.channel_id.0) - .ok_or(GetChannelDetailsError::NoSuchChannel(req.channel_id))? - .into(); + .find(|chan| chan.user_channel_id == req.rpc_channel_id) + { + Some(details) => GetChannelDetailsResponse::Open(details.into()), + None => GetChannelDetailsResponse::Closed( + ln_coin + .persister + .get_channel_from_db(req.rpc_channel_id) + .await? + .ok_or(GetChannelDetailsError::NoSuchChannel(req.rpc_channel_id))?, + ), + }; - Ok(GetChannelDetailsResponse { channel_details }) + Ok(channel_details) } #[derive(Deserialize)] @@ -790,8 +1138,8 @@ pub async fn generate_invoice( MmCoinEnum::LightningCoin(c) => c, _ => return MmError::err(GenerateInvoiceError::UnsupportedCoin(coin.ticker().to_string())), }; - let nodes_addresses = ln_coin.nodes_addresses.lock().clone(); - for (node_pubkey, node_addr) in nodes_addresses { + let open_channels_nodes = ln_coin.open_channels_nodes.lock().clone(); + for (node_pubkey, node_addr) in open_channels_nodes { connect_to_node(node_pubkey, node_addr, ln_coin.peer_manager.clone()) .await .error_log_with_msg(&format!( @@ -799,16 +1147,30 @@ pub async fn generate_invoice( node_pubkey )); } - let network = ln_coin.platform_fields.network.clone().into(); + let network = ln_coin.platform.network.clone().into(); let invoice = create_invoice_from_channelmanager( &ln_coin.channel_manager, ln_coin.keys_manager, network, req.amount_in_msat, - req.description, + req.description.clone(), )?; + let payment_hash = invoice.payment_hash().into_inner(); + let payment_info = PaymentInfo { + payment_hash: PaymentHash(payment_hash), + payment_type: PaymentType::InboundPayment, + description: req.description, + preimage: None, + secret: Some(*invoice.payment_secret()), + amt_msat: req.amount_in_msat, + fee_paid_msat: None, + status: HTLCStatus::Pending, + created_at: now_ms() / 1000, + last_updated: now_ms() / 1000, + }; + ln_coin.persister.add_or_update_payment_in_db(payment_info).await?; Ok(GenerateInvoiceResponse { - payment_hash: invoice.payment_hash().into_inner().into(), + payment_hash: payment_hash.into(), invoice: invoice.into(), }) } @@ -848,8 +1210,8 @@ pub async fn send_payment(ctx: MmArc, req: SendPaymentReq) -> SendPaymentResult< MmCoinEnum::LightningCoin(c) => c, _ => return MmError::err(SendPaymentError::UnsupportedCoin(coin.ticker().to_string())), }; - let nodes_addresses = ln_coin.nodes_addresses.lock().clone(); - for (node_pubkey, node_addr) in nodes_addresses { + let open_channels_nodes = ln_coin.open_channels_nodes.lock().clone(); + for (node_pubkey, node_addr) in open_channels_nodes { connect_to_node(node_pubkey, node_addr, ln_coin.peer_manager.clone()) .await .error_log_with_msg(&format!( @@ -857,7 +1219,7 @@ pub async fn send_payment(ctx: MmArc, req: SendPaymentReq) -> SendPaymentResult< node_pubkey )); } - let (payment_hash, payment_info) = match req.payment { + let payment_info = match req.payment { Payment::Invoice { invoice } => ln_coin.pay_invoice(invoice.into())?, Payment::Keysend { destination, @@ -865,65 +1227,146 @@ pub async fn send_payment(ctx: MmArc, req: SendPaymentReq) -> SendPaymentResult< expiry, } => ln_coin.keysend(destination.into(), amount_in_msat, expiry)?, }; - let mut outbound_payments = ln_coin.outbound_payments.lock(); - outbound_payments.insert(payment_hash, payment_info); + ln_coin + .persister + .add_or_update_payment_in_db(payment_info.clone()) + .await?; Ok(SendPaymentResponse { - payment_hash: payment_hash.0.into(), + payment_hash: payment_info.payment_hash.0.into(), }) } +#[derive(Deserialize)] +pub struct PaymentsFilterForRPC { + pub payment_type: Option, + pub description: Option, + pub status: Option, + pub from_amount_msat: Option, + pub to_amount_msat: Option, + pub from_fee_paid_msat: Option, + pub to_fee_paid_msat: Option, + pub from_timestamp: Option, + pub to_timestamp: Option, +} + +impl From for PaymentsFilter { + fn from(filter: PaymentsFilterForRPC) -> Self { + PaymentsFilter { + payment_type: filter.payment_type.map(From::from), + description: filter.description, + status: filter.status, + from_amount_msat: filter.from_amount_msat, + to_amount_msat: filter.to_amount_msat, + from_fee_paid_msat: filter.from_fee_paid_msat, + to_fee_paid_msat: filter.to_fee_paid_msat, + from_timestamp: filter.from_timestamp, + to_timestamp: filter.to_timestamp, + } + } +} + #[derive(Deserialize)] pub struct ListPaymentsReq { pub coin: String, + pub filter: Option, + #[serde(default = "ten")] + limit: usize, + #[serde(default)] + paging_options: PagingOptionsEnum, +} + +#[derive(Deserialize, Serialize)] +#[serde(tag = "type")] +pub enum PaymentTypeForRPC { + #[serde(rename = "Outbound Payment")] + OutboundPayment { destination: PublicKeyForRPC }, + #[serde(rename = "Inbound Payment")] + InboundPayment, +} + +impl From for PaymentTypeForRPC { + fn from(payment_type: PaymentType) -> Self { + match payment_type { + PaymentType::OutboundPayment { destination } => PaymentTypeForRPC::OutboundPayment { + destination: PublicKeyForRPC(destination), + }, + PaymentType::InboundPayment => PaymentTypeForRPC::InboundPayment, + } + } +} + +impl From for PaymentType { + fn from(payment_type: PaymentTypeForRPC) -> Self { + match payment_type { + PaymentTypeForRPC::OutboundPayment { destination } => PaymentType::OutboundPayment { + destination: destination.into(), + }, + PaymentTypeForRPC::InboundPayment => PaymentType::InboundPayment, + } + } } #[derive(Serialize)] pub struct PaymentInfoForRPC { - status: HTLCStatus, + payment_hash: H256Json, + payment_type: PaymentTypeForRPC, + description: String, + #[serde(skip_serializing_if = "Option::is_none")] amount_in_msat: Option, + #[serde(skip_serializing_if = "Option::is_none")] fee_paid_msat: Option, + status: HTLCStatus, + created_at: u64, + last_updated: u64, } impl From for PaymentInfoForRPC { fn from(info: PaymentInfo) -> Self { PaymentInfoForRPC { - status: info.status, + payment_hash: info.payment_hash.0.into(), + payment_type: info.payment_type.into(), + description: info.description, amount_in_msat: info.amt_msat, fee_paid_msat: info.fee_paid_msat, + status: info.status, + created_at: info.created_at, + last_updated: info.last_updated, } } } #[derive(Serialize)] pub struct ListPaymentsResponse { - pub inbound_payments: HashMap, - pub outbound_payments: HashMap, + payments: Vec, + limit: usize, + skipped: usize, + total: usize, + total_pages: usize, + paging_options: PagingOptionsEnum, } -pub async fn list_payments(ctx: MmArc, req: ListPaymentsReq) -> ListPaymentsResult { +pub async fn list_payments_by_filter(ctx: MmArc, req: ListPaymentsReq) -> ListPaymentsResult { let coin = lp_coinfind_or_err(&ctx, &req.coin).await?; let ln_coin = match coin { MmCoinEnum::LightningCoin(c) => c, _ => return MmError::err(ListPaymentsError::UnsupportedCoin(coin.ticker().to_string())), }; - let inbound_payments = ln_coin - .inbound_payments - .lock() - .clone() - .into_iter() - .map(|(hash, info)| (hash.0.into(), info.into())) - .collect(); - let outbound_payments = ln_coin - .outbound_payments - .lock() - .clone() - .into_iter() - .map(|(hash, info)| (hash.0.into(), info.into())) - .collect(); + let get_payments_res = ln_coin + .persister + .get_payments_by_filter( + req.filter.map(From::from), + req.paging_options.clone().map(|h| PaymentHash(h.0)), + req.limit, + ) + .await?; Ok(ListPaymentsResponse { - inbound_payments, - outbound_payments, + payments: get_payments_res.payments.into_iter().map(From::from).collect(), + limit: req.limit, + skipped: get_payments_res.skipped, + total: get_payments_res.total, + total_pages: calc_total_pages(get_payments_res.total, req.limit), + paging_options: req.paging_options, }) } @@ -933,17 +1376,8 @@ pub struct GetPaymentDetailsRequest { pub payment_hash: H256Json, } -#[derive(Serialize)] -enum PaymentType { - #[serde(rename = "Outbound Payment")] - OutboundPayment, - #[serde(rename = "Inbound Payment")] - InboundPayment, -} - #[derive(Serialize)] pub struct GetPaymentDetailsResponse { - payment_type: PaymentType, payment_details: PaymentInfoForRPC, } @@ -957,17 +1391,13 @@ pub async fn get_payment_details( _ => return MmError::err(GetPaymentDetailsError::UnsupportedCoin(coin.ticker().to_string())), }; - if let Some(payment_info) = ln_coin.outbound_payments.lock().get(&PaymentHash(req.payment_hash.0)) { - return Ok(GetPaymentDetailsResponse { - payment_type: PaymentType::OutboundPayment, - payment_details: payment_info.clone().into(), - }); - } - - if let Some(payment_info) = ln_coin.inbound_payments.lock().get(&PaymentHash(req.payment_hash.0)) { + if let Some(payment_info) = ln_coin + .persister + .get_payment_from_db(PaymentHash(req.payment_hash.0)) + .await? + { return Ok(GetPaymentDetailsResponse { - payment_type: PaymentType::InboundPayment, - payment_details: payment_info.clone().into(), + payment_details: payment_info.into(), }); } diff --git a/mm2src/coins/lightning/ln_conf.rs b/mm2src/coins/lightning/ln_conf.rs index 582dfa3855..3773631e9c 100644 --- a/mm2src/coins/lightning/ln_conf.rs +++ b/mm2src/coins/lightning/ln_conf.rs @@ -3,7 +3,7 @@ use lightning::util::config::{ChannelConfig, ChannelHandshakeConfig, ChannelHand #[derive(Clone, Debug, Deserialize, Serialize)] pub struct DefaultFeesAndConfirmations { - pub default_feerate: u64, + pub default_fee_per_kb: u64, pub n_blocks: u32, } @@ -241,6 +241,8 @@ impl From for UserConfig { if let Some(accept_inbound) = conf.accept_inbound_channels { user_config.accept_inbound_channels = accept_inbound; } + // This allows OpenChannelRequest event to be fired + user_config.manually_accept_inbound_channels = true; user_config } diff --git a/mm2src/coins/lightning/ln_connections.rs b/mm2src/coins/lightning/ln_connections.rs deleted file mode 100644 index 1fb7d46447..0000000000 --- a/mm2src/coins/lightning/ln_connections.rs +++ /dev/null @@ -1,103 +0,0 @@ -use super::*; -use common::executor::{spawn, Timer}; -use derive_more::Display; -use lightning_persister::storage::NodesAddressesMapShared; -use tokio::net::TcpListener; - -const TRY_RECONNECTING_TO_NODE_INTERVAL: f64 = 60.; - -pub async fn ln_p2p_loop(peer_manager: Arc, listener: TcpListener) { - loop { - let peer_mgr = peer_manager.clone(); - let tcp_stream = match listener.accept().await { - Ok((stream, addr)) => { - log::debug!("New incoming lightning connection from node address: {}", addr); - stream - }, - Err(e) => { - log::error!("Error on accepting lightning connection: {}", e); - continue; - }, - }; - if let Ok(stream) = tcp_stream.into_std() { - spawn(async move { - lightning_net_tokio::setup_inbound(peer_mgr.clone(), stream).await; - }); - }; - } -} - -#[derive(Display)] -pub enum ConnectToNodeRes { - #[display(fmt = "Already connected to node: {}@{}", _0, _1)] - AlreadyConnected(String, String), - #[display(fmt = "Connected successfully to node : {}@{}", _0, _1)] - ConnectedSuccessfully(String, String), -} - -pub async fn connect_to_node( - pubkey: PublicKey, - node_addr: SocketAddr, - peer_manager: Arc, -) -> ConnectToNodeResult { - if peer_manager.get_peer_node_ids().contains(&pubkey) { - return Ok(ConnectToNodeRes::AlreadyConnected( - pubkey.to_string(), - node_addr.to_string(), - )); - } - - match lightning_net_tokio::connect_outbound(Arc::clone(&peer_manager), pubkey, node_addr).await { - Some(connection_closed_future) => { - let mut connection_closed_future = Box::pin(connection_closed_future); - loop { - // Make sure the connection is still established. - match futures::poll!(&mut connection_closed_future) { - std::task::Poll::Ready(_) => { - return MmError::err(ConnectToNodeError::ConnectionError(format!( - "Node {} disconnected before finishing the handshake", - pubkey - ))); - }, - std::task::Poll::Pending => {}, - } - - match peer_manager.get_peer_node_ids().contains(&pubkey) { - true => break, - // Wait for the handshake to complete if false. - false => Timer::sleep_ms(10).await, - } - } - }, - None => { - return MmError::err(ConnectToNodeError::ConnectionError(format!( - "Failed to connect to node: {}", - pubkey - ))) - }, - } - - Ok(ConnectToNodeRes::ConnectedSuccessfully( - pubkey.to_string(), - node_addr.to_string(), - )) -} - -pub async fn connect_to_nodes_loop(nodes_addresses: NodesAddressesMapShared, peer_manager: Arc) { - loop { - let nodes_addresses = nodes_addresses.lock().clone(); - for (pubkey, node_addr) in nodes_addresses { - let peer_manager = peer_manager.clone(); - match connect_to_node(pubkey, node_addr, peer_manager.clone()).await { - Ok(res) => { - if let ConnectToNodeRes::ConnectedSuccessfully(_, _) = res { - log::info!("{}", res.to_string()); - } - }, - Err(e) => log::error!("{}", e.to_string()), - } - } - - Timer::sleep(TRY_RECONNECTING_TO_NODE_INTERVAL).await; - } -} diff --git a/mm2src/coins/lightning/ln_errors.rs b/mm2src/coins/lightning/ln_errors.rs index 34e0df82ae..ff333f729c 100644 --- a/mm2src/coins/lightning/ln_errors.rs +++ b/mm2src/coins/lightning/ln_errors.rs @@ -1,8 +1,11 @@ use crate::utxo::rpc_clients::UtxoRpcError; use crate::utxo::GenerateTxError; use crate::{BalanceError, CoinFindError, NumConversError, PrivKeyNotAllowed, UnexpectedDerivationMethod}; +use bitcoin::consensus::encode; +use common::jsonrpc_client::JsonRpcError; use common::mm_error::prelude::*; use common::HttpStatusCode; +use db_common::sqlite::rusqlite::Error as SqlError; use derive_more::Display; use http::StatusCode; use lightning_invoice::SignOrCreationError; @@ -20,6 +23,7 @@ pub type ListPaymentsResult = Result>; pub type GetPaymentDetailsResult = Result>; pub type CloseChannelResult = Result>; pub type ClaimableBalancesResult = Result>; +pub type SaveChannelClosingResult = Result>; #[derive(Debug, Deserialize, Display, Serialize, SerializeErrorType)] #[serde(tag = "error_type", content = "error_data")] @@ -42,6 +46,8 @@ pub enum EnableLightningError { HashError(String), #[display(fmt = "RPC error {}", _0)] RpcError(String), + #[display(fmt = "DB error {}", _0)] + DbError(String), ConnectToNodeError(String), } @@ -56,7 +62,8 @@ impl HttpStatusCode for EnableLightningError { | EnableLightningError::IOError(_) | EnableLightningError::HashError(_) | EnableLightningError::ConnectToNodeError(_) - | EnableLightningError::InvalidConfiguration(_) => StatusCode::INTERNAL_SERVER_ERROR, + | EnableLightningError::InvalidConfiguration(_) + | EnableLightningError::DbError(_) => StatusCode::INTERNAL_SERVER_ERROR, } } } @@ -65,6 +72,10 @@ impl From for EnableLightningError { fn from(err: std::io::Error) -> EnableLightningError { EnableLightningError::IOError(err.to_string()) } } +impl From for EnableLightningError { + fn from(err: SqlError) -> EnableLightningError { EnableLightningError::DbError(err.to_string()) } +} + #[derive(Debug, Deserialize, Display, Serialize, SerializeErrorType)] #[serde(tag = "error_type", content = "error_data")] pub enum ConnectToNodeError { @@ -127,6 +138,8 @@ pub enum OpenChannelError { InternalError(String), #[display(fmt = "I/O error {}", _0)] IOError(String), + #[display(fmt = "DB error {}", _0)] + DbError(String), ConnectToNodeError(String), #[display(fmt = "No such coin {}", _0)] NoSuchCoin(String), @@ -148,6 +161,7 @@ impl HttpStatusCode for OpenChannelError { | OpenChannelError::InternalError(_) | OpenChannelError::GenerateTxErr(_) | OpenChannelError::IOError(_) + | OpenChannelError::DbError(_) | OpenChannelError::InvalidPath(_) | OpenChannelError::ConvertTxErr(_) => StatusCode::INTERNAL_SERVER_ERROR, OpenChannelError::NoSuchCoin(_) | OpenChannelError::BalanceError(_) => StatusCode::PRECONDITION_REQUIRED, @@ -199,6 +213,10 @@ impl From for OpenChannelError { fn from(err: std::io::Error) -> OpenChannelError { OpenChannelError::IOError(err.to_string()) } } +impl From for OpenChannelError { + fn from(err: SqlError) -> OpenChannelError { OpenChannelError::DbError(err.to_string()) } +} + #[derive(Debug, Deserialize, Display, Serialize, SerializeErrorType)] #[serde(tag = "error_type", content = "error_data")] pub enum ListChannelsError { @@ -206,6 +224,8 @@ pub enum ListChannelsError { UnsupportedCoin(String), #[display(fmt = "No such coin {}", _0)] NoSuchCoin(String), + #[display(fmt = "DB error {}", _0)] + DbError(String), } impl HttpStatusCode for ListChannelsError { @@ -213,6 +233,7 @@ impl HttpStatusCode for ListChannelsError { match self { ListChannelsError::UnsupportedCoin(_) => StatusCode::BAD_REQUEST, ListChannelsError::NoSuchCoin(_) => StatusCode::PRECONDITION_REQUIRED, + ListChannelsError::DbError(_) => StatusCode::INTERNAL_SERVER_ERROR, } } } @@ -225,6 +246,10 @@ impl From for ListChannelsError { } } +impl From for ListChannelsError { + fn from(err: SqlError) -> ListChannelsError { ListChannelsError::DbError(err.to_string()) } +} + #[derive(Debug, Deserialize, Display, Serialize, SerializeErrorType)] #[serde(tag = "error_type", content = "error_data")] pub enum GetChannelDetailsError { @@ -232,8 +257,10 @@ pub enum GetChannelDetailsError { UnsupportedCoin(String), #[display(fmt = "No such coin {}", _0)] NoSuchCoin(String), - #[display(fmt = "Channel with id: {:?} is not found", _0)] - NoSuchChannel(H256Json), + #[display(fmt = "Channel with rpc id: {} is not found", _0)] + NoSuchChannel(u64), + #[display(fmt = "DB error {}", _0)] + DbError(String), } impl HttpStatusCode for GetChannelDetailsError { @@ -242,6 +269,7 @@ impl HttpStatusCode for GetChannelDetailsError { GetChannelDetailsError::UnsupportedCoin(_) => StatusCode::BAD_REQUEST, GetChannelDetailsError::NoSuchCoin(_) => StatusCode::PRECONDITION_REQUIRED, GetChannelDetailsError::NoSuchChannel(_) => StatusCode::NOT_FOUND, + GetChannelDetailsError::DbError(_) => StatusCode::INTERNAL_SERVER_ERROR, } } } @@ -254,6 +282,10 @@ impl From for GetChannelDetailsError { } } +impl From for GetChannelDetailsError { + fn from(err: SqlError) -> GetChannelDetailsError { GetChannelDetailsError::DbError(err.to_string()) } +} + #[derive(Debug, Deserialize, Display, Serialize, SerializeErrorType)] #[serde(tag = "error_type", content = "error_data")] pub enum GenerateInvoiceError { @@ -263,6 +295,8 @@ pub enum GenerateInvoiceError { NoSuchCoin(String), #[display(fmt = "Invoice signing or creation error: {}", _0)] SignOrCreationError(String), + #[display(fmt = "DB error {}", _0)] + DbError(String), } impl HttpStatusCode for GenerateInvoiceError { @@ -270,7 +304,9 @@ impl HttpStatusCode for GenerateInvoiceError { match self { GenerateInvoiceError::UnsupportedCoin(_) => StatusCode::BAD_REQUEST, GenerateInvoiceError::NoSuchCoin(_) => StatusCode::PRECONDITION_REQUIRED, - GenerateInvoiceError::SignOrCreationError(_) => StatusCode::INTERNAL_SERVER_ERROR, + GenerateInvoiceError::SignOrCreationError(_) | GenerateInvoiceError::DbError(_) => { + StatusCode::INTERNAL_SERVER_ERROR + }, } } } @@ -287,6 +323,10 @@ impl From for GenerateInvoiceError { fn from(e: SignOrCreationError) -> Self { GenerateInvoiceError::SignOrCreationError(e.to_string()) } } +impl From for GenerateInvoiceError { + fn from(err: SqlError) -> GenerateInvoiceError { GenerateInvoiceError::DbError(err.to_string()) } +} + #[derive(Debug, Deserialize, Display, Serialize, SerializeErrorType)] #[serde(tag = "error_type", content = "error_data")] pub enum SendPaymentError { @@ -300,6 +340,8 @@ pub enum SendPaymentError { PaymentError(String), #[display(fmt = "Final cltv expiry delta {} is below the required minimum of {}", _0, _1)] CLTVExpiryError(u32, u32), + #[display(fmt = "DB error {}", _0)] + DbError(String), } impl HttpStatusCode for SendPaymentError { @@ -309,7 +351,8 @@ impl HttpStatusCode for SendPaymentError { SendPaymentError::NoSuchCoin(_) => StatusCode::PRECONDITION_REQUIRED, SendPaymentError::PaymentError(_) | SendPaymentError::NoRouteFound(_) - | SendPaymentError::CLTVExpiryError(_, _) => StatusCode::INTERNAL_SERVER_ERROR, + | SendPaymentError::CLTVExpiryError(_, _) + | SendPaymentError::DbError(_) => StatusCode::INTERNAL_SERVER_ERROR, } } } @@ -322,6 +365,10 @@ impl From for SendPaymentError { } } +impl From for SendPaymentError { + fn from(err: SqlError) -> SendPaymentError { SendPaymentError::DbError(err.to_string()) } +} + #[derive(Debug, Deserialize, Display, Serialize, SerializeErrorType)] #[serde(tag = "error_type", content = "error_data")] pub enum ListPaymentsError { @@ -329,6 +376,8 @@ pub enum ListPaymentsError { UnsupportedCoin(String), #[display(fmt = "No such coin {}", _0)] NoSuchCoin(String), + #[display(fmt = "DB error {}", _0)] + DbError(String), } impl HttpStatusCode for ListPaymentsError { @@ -336,6 +385,7 @@ impl HttpStatusCode for ListPaymentsError { match self { ListPaymentsError::UnsupportedCoin(_) => StatusCode::BAD_REQUEST, ListPaymentsError::NoSuchCoin(_) => StatusCode::PRECONDITION_REQUIRED, + ListPaymentsError::DbError(_) => StatusCode::INTERNAL_SERVER_ERROR, } } } @@ -348,6 +398,10 @@ impl From for ListPaymentsError { } } +impl From for ListPaymentsError { + fn from(err: SqlError) -> ListPaymentsError { ListPaymentsError::DbError(err.to_string()) } +} + #[derive(Debug, Deserialize, Display, Serialize, SerializeErrorType)] #[serde(tag = "error_type", content = "error_data")] pub enum GetPaymentDetailsError { @@ -357,6 +411,8 @@ pub enum GetPaymentDetailsError { NoSuchCoin(String), #[display(fmt = "Payment with hash: {:?} is not found", _0)] NoSuchPayment(H256Json), + #[display(fmt = "DB error {}", _0)] + DbError(String), } impl HttpStatusCode for GetPaymentDetailsError { @@ -365,6 +421,7 @@ impl HttpStatusCode for GetPaymentDetailsError { GetPaymentDetailsError::UnsupportedCoin(_) => StatusCode::BAD_REQUEST, GetPaymentDetailsError::NoSuchCoin(_) => StatusCode::PRECONDITION_REQUIRED, GetPaymentDetailsError::NoSuchPayment(_) => StatusCode::NOT_FOUND, + GetPaymentDetailsError::DbError(_) => StatusCode::INTERNAL_SERVER_ERROR, } } } @@ -377,6 +434,10 @@ impl From for GetPaymentDetailsError { } } +impl From for GetPaymentDetailsError { + fn from(err: SqlError) -> GetPaymentDetailsError { GetPaymentDetailsError::DbError(err.to_string()) } +} + #[derive(Debug, Deserialize, Display, Serialize, SerializeErrorType)] #[serde(tag = "error_type", content = "error_data")] pub enum CloseChannelError { @@ -431,3 +492,70 @@ impl From for ClaimableBalancesError { } } } + +#[derive(Display)] +pub enum SaveChannelClosingError { + #[display(fmt = "DB error: {}", _0)] + DbError(String), + #[display(fmt = "Channel with rpc id {} not found in DB", _0)] + ChannelNotFound(u64), + #[display(fmt = "funding_generated_in_block is Null in DB")] + BlockHeightNull, + #[display(fmt = "Funding transaction hash is Null in DB")] + FundingTxNull, + #[display(fmt = "Error parsing funding transaction hash: {}", _0)] + FundingTxParseError(String), + #[display(fmt = "Error while waiting for the funding transaction to be spent: {}", _0)] + WaitForFundingTxSpendError(String), +} + +impl From for SaveChannelClosingError { + fn from(err: SqlError) -> SaveChannelClosingError { SaveChannelClosingError::DbError(err.to_string()) } +} + +#[derive(Debug)] +#[allow(clippy::large_enum_variant)] +pub enum GetTxError { + Rpc(UtxoRpcError), + TxDeserialization(encode::Error), +} + +impl From for GetTxError { + fn from(err: UtxoRpcError) -> GetTxError { GetTxError::Rpc(err) } +} + +impl From for GetTxError { + fn from(err: encode::Error) -> GetTxError { GetTxError::TxDeserialization(err) } +} + +#[derive(Debug)] +#[allow(clippy::large_enum_variant)] +pub enum GetHeaderError { + Rpc(JsonRpcError), + HeaderDeserialization(encode::Error), +} + +impl From for GetHeaderError { + fn from(err: JsonRpcError) -> GetHeaderError { GetHeaderError::Rpc(err) } +} + +impl From for GetHeaderError { + fn from(err: encode::Error) -> GetHeaderError { GetHeaderError::HeaderDeserialization(err) } +} + +#[derive(Debug)] +#[allow(clippy::large_enum_variant)] +pub enum FindWatchedOutputSpendError { + HashNotHeight, + DeserializationErr(encode::Error), + RpcError(String), + GetHeaderError(GetHeaderError), +} + +impl From for FindWatchedOutputSpendError { + fn from(err: JsonRpcError) -> Self { FindWatchedOutputSpendError::RpcError(err.to_string()) } +} + +impl From for FindWatchedOutputSpendError { + fn from(err: encode::Error) -> Self { FindWatchedOutputSpendError::DeserializationErr(err) } +} diff --git a/mm2src/coins/lightning/ln_events.rs b/mm2src/coins/lightning/ln_events.rs index d71b2f7fc6..2fc8c5966c 100644 --- a/mm2src/coins/lightning/ln_events.rs +++ b/mm2src/coins/lightning/ln_events.rs @@ -1,8 +1,10 @@ use super::*; +use crate::lightning::ln_errors::{SaveChannelClosingError, SaveChannelClosingResult}; use bitcoin::blockdata::script::Script; use bitcoin::blockdata::transaction::Transaction; use common::executor::{spawn, Timer}; -use common::log; +use common::log::{error, info}; +use common::now_ms; use core::time::Duration; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use lightning::chain::keysinterface::SpendableOutputDescriptor; @@ -10,17 +12,17 @@ use lightning::util::events::{Event, EventHandler, PaymentPurpose}; use rand::Rng; use script::{Builder, SignatureVersion}; use secp256k1::Secp256k1; -use std::collections::hash_map::Entry; use std::convert::TryFrom; use std::sync::Arc; use utxo_signer::with_key_pair::sign_tx; +const TRY_LOOP_INTERVAL: f64 = 60.; + pub struct LightningEventHandler { - filter: Arc, + platform: Arc, channel_manager: Arc, keys_manager: Arc, - inbound_payments: PaymentsMapShared, - outbound_payments: PaymentsMapShared, + persister: Arc, } impl EventHandler for LightningEventHandler { @@ -28,81 +30,120 @@ impl EventHandler for LightningEventHandler { match event { Event::FundingGenerationReady { temporary_channel_id, + channel_value_satoshis, output_script, - .. - } => self.handle_funding_generation_ready(*temporary_channel_id, output_script), + user_channel_id, + } => self.handle_funding_generation_ready( + *temporary_channel_id, + *channel_value_satoshis, + output_script, + *user_channel_id, + ), + Event::PaymentReceived { payment_hash, amt, purpose, } => self.handle_payment_received(*payment_hash, *amt, purpose), + Event::PaymentSent { payment_preimage, payment_hash, fee_paid_msat, .. } => self.handle_payment_sent(*payment_preimage, *payment_hash, *fee_paid_msat), - Event::PaymentFailed { payment_hash, .. } => self.handle_payment_failed(payment_hash), + + Event::PaymentFailed { payment_hash, .. } => self.handle_payment_failed(*payment_hash), + Event::PendingHTLCsForwardable { time_forwardable } => self.handle_pending_htlcs_forwards(*time_forwardable), + Event::SpendableOutputs { outputs } => self.handle_spendable_outputs(outputs), + // Todo: an RPC for total amount earned - Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => log::info!( - "Recieved a fee of {} milli-satoshis for a successfully forwarded payment through our {} lightning node. Was the forwarded HTLC claimed by our counterparty via an on-chain transaction?: {}", - fee_earned_msat.unwrap_or_default(), - self.filter.platform_coin.ticker(), - claim_from_onchain_tx, - ), - // Todo: Use storage to store channels history - Event::ChannelClosed { channel_id, reason, .. } => log::info!( - "Channel: {} closed for the following reason: {}", - hex::encode(channel_id), - reason - ), + Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => info!( + "Received a fee of {} milli-satoshis for a successfully forwarded payment through our {} lightning node. Was the forwarded HTLC claimed by our counterparty via an on-chain transaction?: {}", + fee_earned_msat.unwrap_or_default(), + self.platform.coin.ticker(), + claim_from_onchain_tx, + ), + + Event::ChannelClosed { + channel_id, + user_channel_id, + reason, + } => self.handle_channel_closed(*channel_id, *user_channel_id, reason.to_string()), + // Todo: Add spent UTXOs to RecentlySpentOutPoints if it's not discarded - Event::DiscardFunding { channel_id, transaction } => log::info!( - "Discarding funding tx: {} for channel {}", - transaction.txid().to_string(), - hex::encode(channel_id), - ), + Event::DiscardFunding { channel_id, transaction } => info!( + "Discarding funding tx: {} for channel {}", + transaction.txid().to_string(), + hex::encode(channel_id), + ), + // Handling updating channel penalties after successfully routing a payment along a path is done by the InvoicePayer. Event::PaymentPathSuccessful { payment_id, payment_hash, path, - } => log::info!( + } => info!( "Payment path: {:?}, successful for payment hash: {}, payment id: {}", path.iter().map(|hop| hop.pubkey.to_string()).collect::>(), payment_hash.map(|h| hex::encode(h.0)).unwrap_or_default(), hex::encode(payment_id.0) ), + // Handling updating channel penalties after a payment fails to route through a channel is done by the InvoicePayer. // Also abandoning or retrying a payment is handled by the InvoicePayer. - Event::PaymentPathFailed { payment_hash, rejected_by_dest, all_paths_failed, path, .. } => log::info!( + Event::PaymentPathFailed { + payment_hash, + rejected_by_dest, + all_paths_failed, + path, + .. + } => info!( "Payment path: {:?}, failed for payment hash: {}, Was rejected by destination?: {}, All paths failed?: {}", path.iter().map(|hop| hop.pubkey.to_string()).collect::>(), hex::encode(payment_hash.0), rejected_by_dest, all_paths_failed, ), + + Event::OpenChannelRequest { + temporary_channel_id, + counterparty_node_id, + funding_satoshis, + push_msat, + } => { + info!( + "Handling OpenChannelRequest from node: {} with funding value: {} and starting balance: {}", + counterparty_node_id, + funding_satoshis, + push_msat, + ); + if self.channel_manager.accept_inbound_channel(temporary_channel_id).is_ok() { + // Todo: once the rust-lightning PR for user_channel_id in accept_inbound_channel is released + // use user_channel_id to get the funding tx here once the funding tx is available. + } + }, } } } // Generates the raw funding transaction with one output equal to the channel value. fn sign_funding_transaction( - temp_channel_id: [u8; 32], + user_channel_id: u64, output_script: &Script, - filter: Arc, + platform: Arc, ) -> OpenChannelResult { - let coin = &filter.platform_coin; + let coin = &platform.coin; let mut unsigned = { - let unsigned_funding_txs = filter.unsigned_funding_txs.lock(); + let unsigned_funding_txs = platform.unsigned_funding_txs.lock(); unsigned_funding_txs - .get(&temp_channel_id) + .get(&user_channel_id) .ok_or_else(|| { OpenChannelError::InternalError(format!( - "Unsigned funding tx not found for temporary channel id: {}", - hex::encode(temp_channel_id) + "Unsigned funding tx not found for internal channel id: {}", + user_channel_id )) })? .clone() @@ -124,50 +165,90 @@ fn sign_funding_transaction( Transaction::try_from(signed).map_to_mm(|e| OpenChannelError::ConvertTxErr(e.to_string())) } +async fn save_channel_closing_details( + persister: Arc, + platform: Arc, + user_channel_id: u64, + reason: String, +) -> SaveChannelClosingResult<()> { + persister.update_channel_to_closed(user_channel_id, reason).await?; + + let channel_details = persister + .get_channel_from_db(user_channel_id) + .await? + .ok_or_else(|| MmError::new(SaveChannelClosingError::ChannelNotFound(user_channel_id)))?; + + let closing_tx_hash = platform.get_channel_closing_tx(channel_details).await?; + + persister.add_closing_tx_to_db(user_channel_id, closing_tx_hash).await?; + + Ok(()) +} + impl LightningEventHandler { pub fn new( - filter: Arc, + platform: Arc, channel_manager: Arc, keys_manager: Arc, - inbound_payments: PaymentsMapShared, - outbound_payments: PaymentsMapShared, + persister: Arc, ) -> Self { LightningEventHandler { - filter, + platform, channel_manager, keys_manager, - inbound_payments, - outbound_payments, + persister, } } - fn handle_funding_generation_ready(&self, temporary_channel_id: [u8; 32], output_script: &Script) { - log::info!( - "Handling FundingGenerationReady event for temporary_channel_id: {}", - hex::encode(temporary_channel_id) + fn handle_funding_generation_ready( + &self, + temporary_channel_id: [u8; 32], + channel_value_satoshis: u64, + output_script: &Script, + user_channel_id: u64, + ) { + info!( + "Handling FundingGenerationReady event for internal channel id: {}", + user_channel_id ); - let funding_tx = match sign_funding_transaction(temporary_channel_id, output_script, self.filter.clone()) { + let funding_tx = match sign_funding_transaction(user_channel_id, output_script, self.platform.clone()) { Ok(tx) => tx, Err(e) => { - log::error!( - "Error generating funding transaction for temporary channel id {:?}: {}", - temporary_channel_id, + error!( + "Error generating funding transaction for internal channel id {}: {}", + user_channel_id, e.to_string() ); return; }, }; + let funding_txid = funding_tx.txid(); // Give the funding transaction back to LDK for opening the channel. if let Err(e) = self .channel_manager .funding_transaction_generated(&temporary_channel_id, funding_tx) { - log::error!("{:?}", e); + error!("{:?}", e); + return; } + let platform = self.platform.clone(); + let persister = self.persister.clone(); + spawn(async move { + let best_block_height = platform.best_block_height(); + persister + .add_funding_tx_to_db( + user_channel_id, + funding_txid.to_string(), + channel_value_satoshis, + best_block_height, + ) + .await + .error_log(); + }); } fn handle_payment_received(&self, payment_hash: PaymentHash, amt: u64, purpose: &PaymentPurpose) { - log::info!( + info!( "Handling PaymentReceived event for payment_hash: {}", hex::encode(payment_hash.0) ); @@ -183,7 +264,7 @@ impl LightningEventHandler { }; let status = match self.channel_manager.claim_funds(payment_preimage) { true => { - log::info!( + info!( "Received an amount of {} millisatoshis for payment hash {}", amt, hex::encode(payment_hash.0) @@ -192,21 +273,40 @@ impl LightningEventHandler { }, false => HTLCStatus::Failed, }; - let mut payments = self.inbound_payments.lock(); - match payments.entry(payment_hash) { - Entry::Occupied(mut e) => { - let payment = e.get_mut(); - payment.status = status; - payment.preimage = Some(payment_preimage); - payment.secret = payment_secret; - }, - Entry::Vacant(e) => { - e.insert(PaymentInfo { + let persister = self.persister.clone(); + match purpose { + PaymentPurpose::InvoicePayment { .. } => spawn(async move { + if let Ok(Some(mut payment_info)) = persister + .get_payment_from_db(payment_hash) + .await + .error_log_passthrough() + { + payment_info.preimage = Some(payment_preimage); + payment_info.status = HTLCStatus::Succeeded; + payment_info.amt_msat = Some(amt); + payment_info.last_updated = now_ms() / 1000; + if let Err(e) = persister.add_or_update_payment_in_db(payment_info).await { + error!("Unable to update payment information in DB: {}", e); + } + } + }), + PaymentPurpose::SpontaneousPayment(_) => { + let payment_info = PaymentInfo { + payment_hash, + payment_type: PaymentType::InboundPayment, + description: "".into(), preimage: Some(payment_preimage), secret: payment_secret, - status, amt_msat: Some(amt), fee_paid_msat: None, + status, + created_at: now_ms() / 1000, + last_updated: now_ms() / 1000, + }; + spawn(async move { + if let Err(e) = persister.add_or_update_payment_in_db(payment_info).await { + error!("Unable to update payment information in DB: {}", e); + } }); }, } @@ -218,36 +318,80 @@ impl LightningEventHandler { payment_hash: PaymentHash, fee_paid_msat: Option, ) { - log::info!( + info!( "Handling PaymentSent event for payment_hash: {}", hex::encode(payment_hash.0) ); - if let Some(payment) = self.outbound_payments.lock().get_mut(&payment_hash) { - payment.preimage = Some(payment_preimage); - payment.status = HTLCStatus::Succeeded; - payment.fee_paid_msat = fee_paid_msat; - log::info!( - "Successfully sent payment of {} millisatoshis with payment hash {}", - payment.amt_msat.unwrap_or_default(), - hex::encode(payment_hash.0) - ); + let persister = self.persister.clone(); + spawn(async move { + if let Ok(Some(mut payment_info)) = persister + .get_payment_from_db(payment_hash) + .await + .error_log_passthrough() + { + payment_info.preimage = Some(payment_preimage); + payment_info.status = HTLCStatus::Succeeded; + payment_info.fee_paid_msat = fee_paid_msat; + payment_info.last_updated = now_ms() / 1000; + let amt_msat = payment_info.amt_msat; + if let Err(e) = persister.add_or_update_payment_in_db(payment_info).await { + error!("Unable to update payment information in DB: {}", e); + } + info!( + "Successfully sent payment of {} millisatoshis with payment hash {}", + amt_msat.unwrap_or_default(), + hex::encode(payment_hash.0) + ); + } + }); + } + + fn handle_channel_closed(&self, channel_id: [u8; 32], user_channel_id: u64, reason: String) { + info!( + "Channel: {} closed for the following reason: {}", + hex::encode(channel_id), + reason + ); + let persister = self.persister.clone(); + let platform = self.platform.clone(); + // Todo: Handle inbound channels closure case after updating to latest version of rust-lightning + // as it has a new OpenChannelRequest event where we can give an inbound channel a user_channel_id + // other than 0 in sql + if user_channel_id != 0 { + spawn(async move { + if let Err(e) = save_channel_closing_details(persister, platform, user_channel_id, reason).await { + error!( + "Unable to update channel {} closing details in DB: {}", + user_channel_id, e + ); + } + }); } } - fn handle_payment_failed(&self, payment_hash: &PaymentHash) { - log::info!( + fn handle_payment_failed(&self, payment_hash: PaymentHash) { + info!( "Handling PaymentFailed event for payment_hash: {}", hex::encode(payment_hash.0) ); - let mut outbound_payments = self.outbound_payments.lock(); - let outbound_payment = outbound_payments.get_mut(payment_hash); - if let Some(payment) = outbound_payment { - payment.status = HTLCStatus::Failed; - } + let persister = self.persister.clone(); + spawn(async move { + if let Ok(Some(mut payment_info)) = persister + .get_payment_from_db(payment_hash) + .await + .error_log_passthrough() + { + payment_info.status = HTLCStatus::Failed; + payment_info.last_updated = now_ms() / 1000; + if let Err(e) = persister.add_or_update_payment_in_db(payment_info).await { + error!("Unable to update payment information in DB: {}", e); + } + } + }); } fn handle_pending_htlcs_forwards(&self, time_forwardable: Duration) { - log::info!("Handling PendingHTLCsForwardable event!"); + info!("Handling PendingHTLCsForwardable event!"); let min_wait_time = time_forwardable.as_millis() as u32; let channel_manager = self.channel_manager.clone(); spawn(async move { @@ -258,18 +402,18 @@ impl LightningEventHandler { } fn handle_spendable_outputs(&self, outputs: &[SpendableOutputDescriptor]) { - log::info!("Handling SpendableOutputs event!"); - let platform_coin = &self.filter.platform_coin; + info!("Handling SpendableOutputs event!"); + let platform_coin = &self.platform.coin; // Todo: add support for Hardware wallets for funding transactions and spending spendable outputs (channel closing transactions) let my_address = match platform_coin.as_ref().derivation_method.iguana_or_err() { Ok(addr) => addr, Err(e) => { - log::error!("{}", e); + error!("{}", e); return; }, }; let change_destination_script = Builder::build_witness_script(&my_address.hash).to_bytes().take().into(); - let feerate_sat_per_1000_weight = self.filter.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + let feerate_sat_per_1000_weight = self.platform.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); let output_descriptors = &outputs.iter().collect::>(); let spending_tx = match self.keys_manager.spend_spendable_outputs( output_descriptors, @@ -280,10 +424,55 @@ impl LightningEventHandler { ) { Ok(tx) => tx, Err(_) => { - log::error!("Error spending spendable outputs"); + error!("Error spending spendable outputs"); return; }, }; - platform_coin.broadcast_transaction(&spending_tx); + + let claiming_tx_inputs_value = outputs.iter().fold(0, |sum, output| match output { + SpendableOutputDescriptor::StaticOutput { output, .. } => sum + output.value, + SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) => sum + descriptor.output.value, + SpendableOutputDescriptor::StaticPaymentOutput(descriptor) => sum + descriptor.output.value, + }); + let claiming_tx_outputs_value = spending_tx.output.iter().fold(0, |sum, txout| sum + txout.value); + if claiming_tx_inputs_value < claiming_tx_outputs_value { + error!( + "Claiming transaction input value {} can't be less than outputs value {}!", + claiming_tx_inputs_value, claiming_tx_outputs_value + ); + return; + } + let claiming_tx_fee = claiming_tx_inputs_value - claiming_tx_outputs_value; + let claiming_tx_fee_per_channel = (claiming_tx_fee as f64) / (outputs.len() as f64); + + for output in outputs { + let (closing_txid, claimed_balance) = match output { + SpendableOutputDescriptor::StaticOutput { outpoint, output } => { + (outpoint.txid.to_string(), output.value) + }, + SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) => { + (descriptor.outpoint.txid.to_string(), descriptor.output.value) + }, + SpendableOutputDescriptor::StaticPaymentOutput(descriptor) => { + (descriptor.outpoint.txid.to_string(), descriptor.output.value) + }, + }; + let claiming_txid = spending_tx.txid().to_string(); + let persister = self.persister.clone(); + spawn(async move { + ok_or_retry_after_sleep!( + persister + .add_claiming_tx_to_db( + closing_txid.clone(), + claiming_txid.clone(), + (claimed_balance as f64) - claiming_tx_fee_per_channel, + ) + .await, + TRY_LOOP_INTERVAL + ); + }); + + self.platform.broadcast_transaction(&spending_tx); + } } } diff --git a/mm2src/coins/lightning/ln_p2p.rs b/mm2src/coins/lightning/ln_p2p.rs new file mode 100644 index 0000000000..35977fb40d --- /dev/null +++ b/mm2src/coins/lightning/ln_p2p.rs @@ -0,0 +1,200 @@ +use super::*; +use common::executor::{spawn, Timer}; +use common::ip_addr::fetch_external_ip; +use common::log::LogState; +use derive_more::Display; +use lightning::chain::Access; +use lightning::ln::msgs::NetAddress; +use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, SimpleArcPeerManager}; +use lightning::routing::network_graph::{NetGraphMsgHandler, NetworkGraph}; +use lightning_net_tokio::SocketDescriptor; +use lightning_persister::storage::NodesAddressesMapShared; +use rand::RngCore; +use secp256k1::SecretKey; +use std::net::{IpAddr, Ipv4Addr}; +use tokio::net::TcpListener; + +const TRY_RECONNECTING_TO_NODE_INTERVAL: f64 = 60.; +const BROADCAST_NODE_ANNOUNCEMENT_INTERVAL: u64 = 600; + +type NetworkGossip = NetGraphMsgHandler, Arc, Arc>; + +pub type PeerManager = + SimpleArcPeerManager; + +#[derive(Display)] +pub enum ConnectToNodeRes { + #[display(fmt = "Already connected to node: {}@{}", pubkey, node_addr)] + AlreadyConnected { pubkey: PublicKey, node_addr: SocketAddr }, + #[display(fmt = "Connected successfully to node : {}@{}", pubkey, node_addr)] + ConnectedSuccessfully { pubkey: PublicKey, node_addr: SocketAddr }, +} + +pub async fn connect_to_node( + pubkey: PublicKey, + node_addr: SocketAddr, + peer_manager: Arc, +) -> ConnectToNodeResult { + if peer_manager.get_peer_node_ids().contains(&pubkey) { + return Ok(ConnectToNodeRes::AlreadyConnected { pubkey, node_addr }); + } + + let mut connection_closed_future = + match lightning_net_tokio::connect_outbound(Arc::clone(&peer_manager), pubkey, node_addr).await { + Some(fut) => Box::pin(fut), + None => { + return MmError::err(ConnectToNodeError::ConnectionError(format!( + "Failed to connect to node: {}", + pubkey + ))) + }, + }; + + loop { + // Make sure the connection is still established. + match futures::poll!(&mut connection_closed_future) { + std::task::Poll::Ready(_) => { + return MmError::err(ConnectToNodeError::ConnectionError(format!( + "Node {} disconnected before finishing the handshake", + pubkey + ))); + }, + std::task::Poll::Pending => {}, + } + + if peer_manager.get_peer_node_ids().contains(&pubkey) { + break; + } + + // Wait for the handshake to complete + Timer::sleep_ms(10).await; + } + + Ok(ConnectToNodeRes::ConnectedSuccessfully { pubkey, node_addr }) +} + +pub async fn connect_to_nodes_loop(open_channels_nodes: NodesAddressesMapShared, peer_manager: Arc) { + loop { + let open_channels_nodes = open_channels_nodes.lock().clone(); + for (pubkey, node_addr) in open_channels_nodes { + let peer_manager = peer_manager.clone(); + match connect_to_node(pubkey, node_addr, peer_manager.clone()).await { + Ok(res) => { + if let ConnectToNodeRes::ConnectedSuccessfully { .. } = res { + log::info!("{}", res.to_string()); + } + }, + Err(e) => log::error!("{}", e.to_string()), + } + } + + Timer::sleep(TRY_RECONNECTING_TO_NODE_INTERVAL).await; + } +} + +// TODO: add TOR address option +fn netaddress_from_ipaddr(addr: IpAddr, port: u16) -> Vec { + if addr == Ipv4Addr::new(0, 0, 0, 0) || addr == Ipv4Addr::new(127, 0, 0, 1) { + return Vec::new(); + } + let mut addresses = Vec::new(); + let address = match addr { + IpAddr::V4(addr) => NetAddress::IPv4 { + addr: u32::from(addr).to_be_bytes(), + port, + }, + IpAddr::V6(addr) => NetAddress::IPv6 { + addr: u128::from(addr).to_be_bytes(), + port, + }, + }; + addresses.push(address); + addresses +} + +pub async fn ln_node_announcement_loop( + channel_manager: Arc, + node_name: [u8; 32], + node_color: [u8; 3], + port: u16, +) { + loop { + // Right now if the node is behind NAT the external ip is fetched on every loop + // If the node does not announce a public IP, it will not be displayed on the network graph, + // and other nodes will not be able to open a channel with it. But it can open channels with other nodes. + let addresses = match fetch_external_ip().await { + Ok(ip) => { + log::debug!("Fetch real IP successfully: {}:{}", ip, port); + netaddress_from_ipaddr(ip, port) + }, + Err(e) => { + log::error!("Error while fetching external ip for node announcement: {}", e); + Timer::sleep(BROADCAST_NODE_ANNOUNCEMENT_INTERVAL as f64).await; + continue; + }, + }; + channel_manager.broadcast_node_announcement(node_color, node_name, addresses); + + Timer::sleep(BROADCAST_NODE_ANNOUNCEMENT_INTERVAL as f64).await; + } +} + +async fn ln_p2p_loop(peer_manager: Arc, listener: TcpListener) { + loop { + let peer_mgr = peer_manager.clone(); + let tcp_stream = match listener.accept().await { + Ok((stream, addr)) => { + log::debug!("New incoming lightning connection from node address: {}", addr); + stream + }, + Err(e) => { + log::error!("Error on accepting lightning connection: {}", e); + continue; + }, + }; + if let Ok(stream) = tcp_stream.into_std() { + spawn(async move { + lightning_net_tokio::setup_inbound(peer_mgr.clone(), stream).await; + }); + }; + } +} + +pub async fn init_peer_manager( + ctx: MmArc, + listening_port: u16, + channel_manager: Arc, + network_gossip: Arc, + node_secret: SecretKey, + logger: Arc, +) -> EnableLightningResult> { + // The set (possibly empty) of socket addresses on which this node accepts incoming connections. + // If the user wishes to preserve privacy, addresses should likely contain only Tor Onion addresses. + let listening_addr = myipaddr(ctx).await.map_to_mm(EnableLightningError::InvalidAddress)?; + // If the listening port is used start_lightning should return an error early + let listener = TcpListener::bind(format!("{}:{}", listening_addr, listening_port)) + .await + .map_to_mm(|e| EnableLightningError::IOError(e.to_string()))?; + + // ephemeral_random_data is used to derive per-connection ephemeral keys + let mut ephemeral_bytes = [0; 32]; + rand::thread_rng().fill_bytes(&mut ephemeral_bytes); + let lightning_msg_handler = MessageHandler { + chan_handler: channel_manager, + route_handler: network_gossip, + }; + + // IgnoringMessageHandler is used as custom message types (experimental and application-specific messages) is not needed + let peer_manager: Arc = Arc::new(PeerManager::new( + lightning_msg_handler, + node_secret, + &ephemeral_bytes, + logger, + Arc::new(IgnoringMessageHandler {}), + )); + + // Initialize p2p networking + spawn(ln_p2p_loop(peer_manager.clone(), listener)); + + Ok(peer_manager) +} diff --git a/mm2src/coins/lightning/ln_platform.rs b/mm2src/coins/lightning/ln_platform.rs new file mode 100644 index 0000000000..3214c033c5 --- /dev/null +++ b/mm2src/coins/lightning/ln_platform.rs @@ -0,0 +1,574 @@ +use super::*; +use crate::lightning::ln_errors::{FindWatchedOutputSpendError, GetHeaderError, GetTxError, SaveChannelClosingError, + SaveChannelClosingResult}; +use crate::utxo::rpc_clients::{electrum_script_hash, BestBlock as RpcBestBlock, BlockHashOrHeight, + ElectrumBlockHeader, ElectrumClient, ElectrumNonce, EstimateFeeMethod, + UtxoRpcClientEnum, UtxoRpcError}; +use crate::utxo::utxo_standard::UtxoStandardCoin; +use crate::{MarketCoinOps, MmCoin}; +use bitcoin::blockdata::block::BlockHeader; +use bitcoin::blockdata::script::Script; +use bitcoin::blockdata::transaction::Transaction; +use bitcoin::consensus::encode::{deserialize, serialize_hex}; +use bitcoin::hash_types::{BlockHash, TxMerkleNode, Txid}; +use bitcoin_hashes::{sha256d, Hash}; +use common::executor::{spawn, Timer}; +use common::jsonrpc_client::JsonRpcErrorType; +use common::log::{debug, error, info}; +use futures::compat::Future01CompatExt; +use keys::hash::H256; +use lightning::chain::{chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}, + Confirm, Filter, WatchedOutput}; +use rpc::v1::types::H256 as H256Json; +use std::cmp; +use std::convert::TryFrom; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; + +const CHECK_FOR_NEW_BEST_BLOCK_INTERVAL: f64 = 60.; +const MIN_ALLOWED_FEE_PER_1000_WEIGHT: u32 = 253; +const TRY_LOOP_INTERVAL: f64 = 60.; + +#[inline] +pub fn h256_json_from_txid(txid: Txid) -> H256Json { H256Json::from(txid.as_hash().into_inner()).reversed() } + +struct TxWithBlockInfo { + tx: Transaction, + block_header: BlockHeader, + block_height: u64, +} + +async fn get_block_header(electrum_client: &ElectrumClient, height: u64) -> Result { + Ok(deserialize( + &electrum_client.blockchain_block_header(height).compat().await?, + )?) +} + +async fn find_watched_output_spend_with_header( + electrum_client: &ElectrumClient, + output: &WatchedOutput, +) -> Result, FindWatchedOutputSpendError> { + // from_block parameter is not used in find_output_spend for electrum clients + let utxo_client: UtxoRpcClientEnum = electrum_client.clone().into(); + let tx_hash = H256::from(output.outpoint.txid.as_hash().into_inner()); + let output_spend = match utxo_client + .find_output_spend( + tx_hash, + output.script_pubkey.as_ref(), + output.outpoint.index.into(), + BlockHashOrHeight::Hash(Default::default()), + ) + .compat() + .await + .map_err(FindWatchedOutputSpendError::RpcError)? + { + Some(output) => output, + None => return Ok(None), + }; + + let height = match output_spend.spent_in_block { + BlockHashOrHeight::Height(h) => h, + _ => return Err(FindWatchedOutputSpendError::HashNotHeight), + }; + let block_header = get_block_header(electrum_client, height as u64) + .await + .map_err(FindWatchedOutputSpendError::GetHeaderError)?; + let spending_tx = Transaction::try_from(output_spend.spending_tx)?; + + Ok(Some(TxWithBlockInfo { + tx: spending_tx, + block_header, + block_height: height as u64, + })) +} + +pub async fn get_best_header(best_header_listener: &ElectrumClient) -> EnableLightningResult { + best_header_listener + .blockchain_headers_subscribe() + .compat() + .await + .map_to_mm(|e| EnableLightningError::RpcError(e.to_string())) +} + +pub async fn update_best_block( + chain_monitor: &ChainMonitor, + channel_manager: &ChannelManager, + best_header: ElectrumBlockHeader, +) { + { + let (new_best_header, new_best_height) = match best_header { + ElectrumBlockHeader::V12(h) => { + let nonce = match h.nonce { + ElectrumNonce::Number(n) => n as u32, + ElectrumNonce::Hash(_) => { + return; + }, + }; + let prev_blockhash = match sha256d::Hash::from_slice(&h.prev_block_hash.0) { + Ok(h) => h, + Err(e) => { + error!("Error while parsing previous block hash for lightning node: {}", e); + return; + }, + }; + let merkle_root = match sha256d::Hash::from_slice(&h.merkle_root.0) { + Ok(h) => h, + Err(e) => { + error!("Error while parsing merkle root for lightning node: {}", e); + return; + }, + }; + ( + BlockHeader { + version: h.version as i32, + prev_blockhash: BlockHash::from_hash(prev_blockhash), + merkle_root: TxMerkleNode::from_hash(merkle_root), + time: h.timestamp as u32, + bits: h.bits as u32, + nonce, + }, + h.block_height as u32, + ) + }, + ElectrumBlockHeader::V14(h) => { + let block_header = match deserialize(&h.hex.into_vec()) { + Ok(header) => header, + Err(e) => { + error!("Block header deserialization error: {}", e.to_string()); + return; + }, + }; + (block_header, h.height as u32) + }, + }; + channel_manager.best_block_updated(&new_best_header, new_best_height); + chain_monitor.best_block_updated(&new_best_header, new_best_height); + } +} + +pub async fn ln_best_block_update_loop( + platform: Arc, + persister: Arc, + chain_monitor: Arc, + channel_manager: Arc, + best_header_listener: ElectrumClient, + best_block: RpcBestBlock, +) { + let mut current_best_block = best_block; + loop { + let best_header = ok_or_continue_after_sleep!(get_best_header(&best_header_listener).await, TRY_LOOP_INTERVAL); + if current_best_block != best_header.clone().into() { + platform.update_best_block_height(best_header.block_height()); + platform + .process_txs_unconfirmations(&chain_monitor, &channel_manager) + .await; + platform + .process_txs_confirmations(&best_header_listener, &persister, &chain_monitor, &channel_manager) + .await; + current_best_block = best_header.clone().into(); + update_best_block(&chain_monitor, &channel_manager, best_header).await; + } + Timer::sleep(CHECK_FOR_NEW_BEST_BLOCK_INTERVAL).await; + } +} + +struct ConfirmedTransactionInfo { + txid: Txid, + header: BlockHeader, + index: usize, + transaction: Transaction, + height: u32, +} + +impl ConfirmedTransactionInfo { + fn new(txid: Txid, header: BlockHeader, index: usize, transaction: Transaction, height: u32) -> Self { + ConfirmedTransactionInfo { + txid, + header, + index, + transaction, + height, + } + } +} + +pub struct Platform { + pub coin: UtxoStandardCoin, + /// Main/testnet/signet/regtest Needed for lightning node to know which network to connect to + pub network: BlockchainNetwork, + /// The best block height. + pub best_block_height: AtomicU64, + /// Default fees to and confirmation targets to be used for FeeEstimator. Default fees are used when the call for + /// estimate_fee_sat fails. + pub default_fees_and_confirmations: PlatformCoinConfirmations, + /// This cache stores the transactions that the LN node has interest in. + pub registered_txs: PaMutex>>, + /// This cache stores the outputs that the LN node has interest in. + pub registered_outputs: PaMutex>, + /// This cache stores transactions to be broadcasted once the other node accepts the channel + pub unsigned_funding_txs: PaMutex>, +} + +impl Platform { + #[inline] + pub fn new( + coin: UtxoStandardCoin, + network: BlockchainNetwork, + default_fees_and_confirmations: PlatformCoinConfirmations, + ) -> Self { + Platform { + coin, + network, + best_block_height: AtomicU64::new(0), + default_fees_and_confirmations, + registered_txs: PaMutex::new(HashMap::new()), + registered_outputs: PaMutex::new(Vec::new()), + unsigned_funding_txs: PaMutex::new(HashMap::new()), + } + } + + #[inline] + fn rpc_client(&self) -> &UtxoRpcClientEnum { &self.coin.as_ref().rpc_client } + + #[inline] + pub fn update_best_block_height(&self, new_height: u64) { + self.best_block_height.store(new_height, AtomicOrdering::Relaxed); + } + + #[inline] + pub fn best_block_height(&self) -> u64 { self.best_block_height.load(AtomicOrdering::Relaxed) } + + pub fn add_tx(&self, txid: Txid, script_pubkey: Script) { + let mut registered_txs = self.registered_txs.lock(); + registered_txs + .entry(txid) + .or_insert_with(HashSet::new) + .insert(script_pubkey); + } + + pub fn add_output(&self, output: WatchedOutput) { + let mut registered_outputs = self.registered_outputs.lock(); + registered_outputs.push(output); + } + + async fn get_tx_if_onchain(&self, txid: Txid) -> Result, GetTxError> { + let txid = h256_json_from_txid(txid); + match self + .rpc_client() + .get_transaction_bytes(&txid) + .compat() + .await + .map_err(|e| e.into_inner()) + { + Ok(bytes) => Ok(Some(deserialize(&bytes.into_vec())?)), + Err(err) => { + if let UtxoRpcError::ResponseParseError(ref json_err) = err { + if let JsonRpcErrorType::Response(_, json) = &json_err.error { + if let Some(message) = json["message"].as_str() { + if message.contains("'code': -5") { + return Ok(None); + } + } + } + } + Err(err.into()) + }, + } + } + + async fn process_tx_for_unconfirmation(&self, txid: Txid, monitor: &T) + where + T: Confirm, + { + match self.get_tx_if_onchain(txid).await { + Ok(Some(_)) => {}, + Ok(None) => { + info!( + "Transaction {} is not found on chain. The transaction will be re-broadcasted.", + txid, + ); + monitor.transaction_unconfirmed(&txid); + }, + Err(e) => error!( + "Error while trying to check if the transaction {} is discarded or not :{:?}", + txid, e + ), + } + } + + pub async fn process_txs_unconfirmations(&self, chain_monitor: &ChainMonitor, channel_manager: &ChannelManager) { + // Retrieve channel manager transaction IDs to check the chain for un-confirmations + let channel_manager_relevant_txids = channel_manager.get_relevant_txids(); + for txid in channel_manager_relevant_txids { + self.process_tx_for_unconfirmation(txid, channel_manager).await; + } + + // Retrieve chain monitor transaction IDs to check the chain for un-confirmations + let chain_monitor_relevant_txids = chain_monitor.get_relevant_txids(); + for txid in chain_monitor_relevant_txids { + self.process_tx_for_unconfirmation(txid, chain_monitor).await; + } + } + + async fn get_confirmed_registered_txs(&self, client: &ElectrumClient) -> Vec { + let registered_txs = self.registered_txs.lock().clone(); + let mut confirmed_registered_txs = Vec::new(); + for (txid, scripts) in registered_txs { + if let Some(transaction) = + ok_or_continue_after_sleep!(self.get_tx_if_onchain(txid).await, TRY_LOOP_INTERVAL) + { + for (_, vout) in transaction.output.iter().enumerate() { + if scripts.contains(&vout.script_pubkey) { + let script_hash = hex::encode(electrum_script_hash(vout.script_pubkey.as_ref())); + let history = ok_or_retry_after_sleep!( + client.scripthash_get_history(&script_hash).compat().await, + TRY_LOOP_INTERVAL + ); + for item in history { + let rpc_txid = h256_json_from_txid(txid); + if item.tx_hash == rpc_txid && item.height > 0 { + let height = item.height as u64; + let header = + ok_or_retry_after_sleep!(get_block_header(client, height).await, TRY_LOOP_INTERVAL); + let index = ok_or_retry_after_sleep!( + client + .blockchain_transaction_get_merkle(rpc_txid, height) + .compat() + .await, + TRY_LOOP_INTERVAL + ) + .pos; + let confirmed_transaction_info = ConfirmedTransactionInfo::new( + txid, + header, + index, + transaction.clone(), + height as u32, + ); + confirmed_registered_txs.push(confirmed_transaction_info); + self.registered_txs.lock().remove(&txid); + } + } + } + } + } + } + confirmed_registered_txs + } + + async fn append_spent_registered_output_txs( + &self, + transactions_to_confirm: &mut Vec, + client: &ElectrumClient, + ) { + let mut outputs_to_remove = Vec::new(); + let registered_outputs = self.registered_outputs.lock().clone(); + for output in registered_outputs { + if let Some(tx_info) = ok_or_continue_after_sleep!( + find_watched_output_spend_with_header(client, &output).await, + TRY_LOOP_INTERVAL + ) { + if !transactions_to_confirm + .iter() + .any(|info| info.txid == tx_info.tx.txid()) + { + let rpc_txid = h256_json_from_txid(tx_info.tx.txid()); + let index = ok_or_retry_after_sleep!( + client + .blockchain_transaction_get_merkle(rpc_txid, tx_info.block_height) + .compat() + .await, + TRY_LOOP_INTERVAL + ) + .pos; + let confirmed_transaction_info = ConfirmedTransactionInfo::new( + tx_info.tx.txid(), + tx_info.block_header, + index, + tx_info.tx, + tx_info.block_height as u32, + ); + transactions_to_confirm.push(confirmed_transaction_info); + } + outputs_to_remove.push(output); + } + } + self.registered_outputs + .lock() + .retain(|output| !outputs_to_remove.contains(output)); + } + + pub async fn process_txs_confirmations( + &self, + client: &ElectrumClient, + persister: &LightningPersister, + chain_monitor: &ChainMonitor, + channel_manager: &ChannelManager, + ) { + let mut transactions_to_confirm = self.get_confirmed_registered_txs(client).await; + self.append_spent_registered_output_txs(&mut transactions_to_confirm, client) + .await; + + transactions_to_confirm.sort_by(|a, b| (a.height, a.index).cmp(&(b.height, b.index))); + + for confirmed_transaction_info in transactions_to_confirm { + let best_block_height = self.best_block_height(); + if let Err(e) = persister + .update_funding_tx_block_height( + confirmed_transaction_info.transaction.txid().to_string(), + best_block_height, + ) + .await + { + error!("Unable to update the funding tx block height in DB: {}", e); + } + channel_manager.transactions_confirmed( + &confirmed_transaction_info.header, + &[( + confirmed_transaction_info.index, + &confirmed_transaction_info.transaction, + )], + confirmed_transaction_info.height, + ); + chain_monitor.transactions_confirmed( + &confirmed_transaction_info.header, + &[( + confirmed_transaction_info.index, + &confirmed_transaction_info.transaction, + )], + confirmed_transaction_info.height, + ); + } + } + + pub async fn get_channel_closing_tx(&self, channel_details: SqlChannelDetails) -> SaveChannelClosingResult { + let from_block = channel_details + .funding_generated_in_block + .ok_or_else(|| MmError::new(SaveChannelClosingError::BlockHeightNull))?; + + let tx_id = channel_details + .funding_tx + .ok_or_else(|| MmError::new(SaveChannelClosingError::FundingTxNull))?; + + let tx_hash = + H256Json::from_str(&tx_id).map_to_mm(|e| SaveChannelClosingError::FundingTxParseError(e.to_string()))?; + + let funding_tx_bytes = ok_or_retry_after_sleep!( + self.rpc_client().get_transaction_bytes(&tx_hash).compat().await, + TRY_LOOP_INTERVAL + ); + + let closing_tx = self + .coin + .wait_for_tx_spend( + &funding_tx_bytes.into_vec(), + (now_ms() / 1000) + 3600, + from_block, + &None, + ) + .compat() + .await + .map_to_mm(SaveChannelClosingError::WaitForFundingTxSpendError)?; + + let closing_tx_hash = format!("{:02x}", closing_tx.tx_hash()); + + Ok(closing_tx_hash) + } +} + +impl FeeEstimator for Platform { + // Gets estimated satoshis of fee required per 1000 Weight-Units. + fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 { + let platform_coin = &self.coin; + + let default_fee = match confirmation_target { + ConfirmationTarget::Background => self.default_fees_and_confirmations.background.default_fee_per_kb, + ConfirmationTarget::Normal => self.default_fees_and_confirmations.normal.default_fee_per_kb, + ConfirmationTarget::HighPriority => self.default_fees_and_confirmations.high_priority.default_fee_per_kb, + }; + + let conf = &platform_coin.as_ref().conf; + let n_blocks = match confirmation_target { + ConfirmationTarget::Background => self.default_fees_and_confirmations.background.n_blocks, + ConfirmationTarget::Normal => self.default_fees_and_confirmations.normal.n_blocks, + ConfirmationTarget::HighPriority => self.default_fees_and_confirmations.high_priority.n_blocks, + }; + let fee_per_kb = tokio::task::block_in_place(move || { + self.rpc_client() + .estimate_fee_sat( + platform_coin.decimals(), + // Todo: when implementing Native client detect_fee_method should be used for Native and + // EstimateFeeMethod::Standard for Electrum + &EstimateFeeMethod::Standard, + &conf.estimate_fee_mode, + n_blocks, + ) + .wait() + .unwrap_or(default_fee) + }); + // Must be no smaller than 253 (ie 1 satoshi-per-byte rounded up to ensure later round-downs don’t put us below 1 satoshi-per-byte). + // https://docs.rs/lightning/0.0.101/lightning/chain/chaininterface/trait.FeeEstimator.html#tymethod.get_est_sat_per_1000_weight + cmp::max((fee_per_kb as f64 / 4.0).ceil() as u32, MIN_ALLOWED_FEE_PER_1000_WEIGHT) + } +} + +impl BroadcasterInterface for Platform { + fn broadcast_transaction(&self, tx: &Transaction) { + let txid = tx.txid(); + let tx_hex = serialize_hex(tx); + debug!("Trying to broadcast transaction: {}", tx_hex); + let fut = self.coin.send_raw_tx(&tx_hex); + spawn(async move { + match fut.compat().await { + Ok(id) => info!("Transaction broadcasted successfully: {:?} ", id), + Err(e) => error!("Broadcast transaction {} failed: {}", txid, e), + } + }); + } +} + +impl Filter for Platform { + // Watches for this transaction on-chain + #[inline] + fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { self.add_tx(*txid, script_pubkey.clone()); } + + // Watches for any transactions that spend this output on-chain + fn register_output(&self, output: WatchedOutput) -> Option<(usize, Transaction)> { + self.add_output(output.clone()); + + let block_hash = match output.block_hash { + Some(h) => H256Json::from(h.as_hash().into_inner()), + None => return None, + }; + + // Although this works for both native and electrum clients as the block hash is available, + // the filter interface which includes register_output and register_tx should be used for electrum clients only, + // this is the reason for initializing the filter as an option in the start_lightning function as it will be None + // when implementing lightning for native clients + let output_spend_info = tokio::task::block_in_place(move || { + let delay = TRY_LOOP_INTERVAL as u64; + ok_or_retry_after_sleep_sync!( + self.rpc_client() + .find_output_spend( + H256::from(output.outpoint.txid.as_hash().into_inner()), + output.script_pubkey.as_ref(), + output.outpoint.index.into(), + BlockHashOrHeight::Hash(block_hash), + ) + .wait(), + delay + ) + }); + + if let Some(info) = output_spend_info { + match Transaction::try_from(info.spending_tx) { + Ok(tx) => Some((info.input_index, tx)), + Err(e) => { + error!("Can't convert transaction error: {}", e.to_string()); + return None; + }, + }; + } + + None + } +} diff --git a/mm2src/coins/lightning/ln_rpc.rs b/mm2src/coins/lightning/ln_rpc.rs deleted file mode 100644 index 7fa6b22f61..0000000000 --- a/mm2src/coins/lightning/ln_rpc.rs +++ /dev/null @@ -1,169 +0,0 @@ -use super::*; -use crate::utxo::rpc_clients::{BlockHashOrHeight, ElectrumClient, EstimateFeeMethod, UtxoRpcClientEnum}; -use crate::utxo::utxo_standard::UtxoStandardCoin; -use crate::{MarketCoinOps, MmCoin}; -use bitcoin::blockdata::block::BlockHeader; -use bitcoin::blockdata::script::Script; -use bitcoin::blockdata::transaction::Transaction; -use bitcoin::consensus::encode; -use bitcoin::hash_types::Txid; -use bitcoin_hashes::Hash; -use common::executor::spawn; -use common::log; -use derive_more::Display; -use futures::compat::Future01CompatExt; -use keys::hash::H256; -use lightning::chain::{chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}, - Filter, WatchedOutput}; -use rpc::v1::types::H256 as H256Json; -use std::cmp; -use std::convert::TryFrom; - -const MIN_ALLOWED_FEE_PER_1000_WEIGHT: u32 = 253; - -impl FeeEstimator for PlatformFields { - // Gets estimated satoshis of fee required per 1000 Weight-Units. - fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 { - let platform_coin = &self.platform_coin; - - let default_fee = match confirmation_target { - ConfirmationTarget::Background => self.default_fees_and_confirmations.background.default_feerate, - ConfirmationTarget::Normal => self.default_fees_and_confirmations.normal.default_feerate, - ConfirmationTarget::HighPriority => self.default_fees_and_confirmations.high_priority.default_feerate, - } * 4; - - let conf = &platform_coin.as_ref().conf; - let n_blocks = match confirmation_target { - ConfirmationTarget::Background => self.default_fees_and_confirmations.background.n_blocks, - ConfirmationTarget::Normal => self.default_fees_and_confirmations.normal.n_blocks, - ConfirmationTarget::HighPriority => self.default_fees_and_confirmations.high_priority.n_blocks, - }; - let fee_per_kb = tokio::task::block_in_place(move || { - platform_coin - .as_ref() - .rpc_client - .estimate_fee_sat( - platform_coin.decimals(), - // Todo: when implementing Native client detect_fee_method should be used for Native and - // EstimateFeeMethod::Standard for Electrum - &EstimateFeeMethod::Standard, - &conf.estimate_fee_mode, - n_blocks, - ) - .wait() - .unwrap_or(default_fee) - }); - // Must be no smaller than 253 (ie 1 satoshi-per-byte rounded up to ensure later round-downs don’t put us below 1 satoshi-per-byte). - // https://docs.rs/lightning/0.0.101/lightning/chain/chaininterface/trait.FeeEstimator.html#tymethod.get_est_sat_per_1000_weight - cmp::max((fee_per_kb as f64 / 4.0).ceil() as u32, MIN_ALLOWED_FEE_PER_1000_WEIGHT) - } -} - -impl BroadcasterInterface for UtxoStandardCoin { - fn broadcast_transaction(&self, tx: &Transaction) { - let tx_hex = encode::serialize_hex(tx); - log::debug!("Trying to broadcast transaction: {}", tx_hex); - let tx_id = tx.txid(); - let fut = self.send_raw_tx(&tx_hex); - spawn(async move { - match fut.compat().await { - Ok(id) => log::info!("Transaction broadcasted successfully: {:?} ", id), - Err(e) => log::error!("Broadcast transaction {} failed: {}", tx_id, e), - } - }); - } -} - -#[derive(Debug, Display)] -pub enum FindWatchedOutputSpendError { - #[display(fmt = "Can't convert transaction: {}", _0)] - TransactionConvertionErr(String), - #[display(fmt = "Can't deserialize block header: {}", _0)] - BlockHeaderDeserializeErr(String), -} - -pub async fn find_watched_output_spend_with_header( - electrum_client: &ElectrumClient, - output: &WatchedOutput, -) -> Result, FindWatchedOutputSpendError> { - // from_block parameter is not used in find_output_spend for electrum clients - let utxo_client: UtxoRpcClientEnum = electrum_client.clone().into(); - let output_spend = match utxo_client - .find_output_spend( - H256::from(output.outpoint.txid.as_hash().into_inner()), - output.script_pubkey.as_ref(), - output.outpoint.index.into(), - BlockHashOrHeight::Hash(Default::default()), - ) - .compat() - .await - { - Ok(Some(output)) => output, - _ => return Ok(None), - }; - - if let BlockHashOrHeight::Height(height) = output_spend.spent_in_block { - if let Ok(header) = electrum_client.blockchain_block_header(height as u64).compat().await { - match encode::deserialize(&header) { - Ok(h) => { - let spending_tx = match Transaction::try_from(output_spend.spending_tx) { - Ok(tx) => tx, - Err(e) => return Err(FindWatchedOutputSpendError::TransactionConvertionErr(e.to_string())), - }; - return Ok(Some((h, output_spend.input_index, spending_tx, height as u64))); - }, - Err(e) => return Err(FindWatchedOutputSpendError::BlockHeaderDeserializeErr(e.to_string())), - } - } - } - Ok(None) -} - -impl Filter for PlatformFields { - // Watches for this transaction on-chain - fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { self.add_tx(txid, script_pubkey); } - - // Watches for any transactions that spend this output on-chain - fn register_output(&self, output: WatchedOutput) -> Option<(usize, Transaction)> { - self.add_output(output.clone()); - - let block_hash = match output.block_hash { - Some(h) => H256Json::from(h.as_hash().into_inner()), - None => return None, - }; - - let client = &self.platform_coin.as_ref().rpc_client; - // Although this works for both native and electrum clients as the block hash is available, - // the filter interface which includes register_output and register_tx should be used for electrum clients only, - // this is the reason for initializing the filter as an option in the start_lightning function as it will be None - // when implementing lightning for native clients - let output_spend_fut = tokio::task::block_in_place(move || { - client - .find_output_spend( - H256::from(output.outpoint.txid.as_hash().into_inner()), - output.script_pubkey.as_ref(), - output.outpoint.index.into(), - BlockHashOrHeight::Hash(block_hash), - ) - .wait() - }); - - match output_spend_fut { - Ok(Some(spent_output_info)) => { - let spending_tx = match Transaction::try_from(spent_output_info.spending_tx) { - Ok(tx) => tx, - Err(e) => { - log::error!("Can't convert transaction error: {}", e.to_string()); - return None; - }, - }; - Some((spent_output_info.input_index, spending_tx)) - }, - Ok(None) => None, - Err(e) => { - log::error!("Error when calling register_output: {}", e); - None - }, - } - } -} diff --git a/mm2src/coins/lightning/ln_serialization.rs b/mm2src/coins/lightning/ln_serialization.rs index 12d43b4299..82f0a700c7 100644 --- a/mm2src/coins/lightning/ln_serialization.rs +++ b/mm2src/coins/lightning/ln_serialization.rs @@ -47,6 +47,7 @@ impl<'de> de::Deserialize<'de> for InvoiceForRPC { } // TODO: support connection to onion addresses +#[derive(Debug, PartialEq)] pub struct NodeAddress { pub pubkey: PublicKey, pub addr: SocketAddr, @@ -101,7 +102,7 @@ impl<'de> de::Deserialize<'de> for NodeAddress { } #[derive(Clone, Debug, PartialEq)] -pub struct PublicKeyForRPC(PublicKey); +pub struct PublicKeyForRPC(pub PublicKey); impl From for PublicKey { fn from(p: PublicKeyForRPC) -> Self { p.0 } @@ -109,16 +110,93 @@ impl From for PublicKey { impl Serialize for PublicKeyForRPC { fn serialize(&self, serializer: S) -> Result { - serializer.serialize_bytes(&self.0.serialize()) + serializer.serialize_str(&self.0.to_string()) } } impl<'de> de::Deserialize<'de> for PublicKeyForRPC { fn deserialize>(deserializer: D) -> Result { - let slice: &[u8] = de::Deserialize::deserialize(deserializer)?; - let pubkey = - PublicKey::from_slice(slice).map_err(|e| de::Error::custom(format!("Error {} parsing pubkey", e)))?; + struct PublicKeyForRPCVisitor; + + impl<'de> de::Visitor<'de> for PublicKeyForRPCVisitor { + type Value = PublicKeyForRPC; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { write!(formatter, "a public key") } + + fn visit_str(self, v: &str) -> Result { + let pubkey = PublicKey::from_str(v).map_err(|e| { + let err = format!("Could not parse public key from str {}, err {}", v, e); + de::Error::custom(err) + })?; + Ok(PublicKeyForRPC(pubkey)) + } + } + + deserializer.deserialize_str(PublicKeyForRPCVisitor) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json as json; + + #[test] + fn test_invoice_for_rpc_serialize() { + let invoice_for_rpc = InvoiceForRPC(str::parse::("lntb20u1p3zqmvrpp52hej7trefx6y633aujj6nltjs8cf7lzyp78tfn5y5wpa3udk5tvqdp8xys9xcmpd3sjqsmgd9czq3njv9c8qatrvd5kumcxqrrsscqp79qy9qsqsp5ccy2qgmptg8dthxsjvw2c43uyvqkg6cqey3jpks4xf0tv7xfrqrq3xfnuffau2h2k8defphv2xsktzn2qj5n2l8d9l9zx64fg6jcmdg9kmpevneyyhfnzrpspqdrky8u7l4c6qdnquh8lnevswwrtcd9ypcq89ga09").unwrap()); + let expected = r#""lntb20u1p3zqmvrpp52hej7trefx6y633aujj6nltjs8cf7lzyp78tfn5y5wpa3udk5tvqdp8xys9xcmpd3sjqsmgd9czq3njv9c8qatrvd5kumcxqrrsscqp79qy9qsqsp5ccy2qgmptg8dthxsjvw2c43uyvqkg6cqey3jpks4xf0tv7xfrqrq3xfnuffau2h2k8defphv2xsktzn2qj5n2l8d9l9zx64fg6jcmdg9kmpevneyyhfnzrpspqdrky8u7l4c6qdnquh8lnevswwrtcd9ypcq89ga09""#; + let actual = json::to_string(&invoice_for_rpc).unwrap(); + assert_eq!(expected, actual); + } + + #[test] + fn test_invoice_for_rpc_deserialize() { + let invoice_for_rpc = r#""lntb20u1p3zqmvrpp52hej7trefx6y633aujj6nltjs8cf7lzyp78tfn5y5wpa3udk5tvqdp8xys9xcmpd3sjqsmgd9czq3njv9c8qatrvd5kumcxqrrsscqp79qy9qsqsp5ccy2qgmptg8dthxsjvw2c43uyvqkg6cqey3jpks4xf0tv7xfrqrq3xfnuffau2h2k8defphv2xsktzn2qj5n2l8d9l9zx64fg6jcmdg9kmpevneyyhfnzrpspqdrky8u7l4c6qdnquh8lnevswwrtcd9ypcq89ga09""#; + let expected = InvoiceForRPC(str::parse::("lntb20u1p3zqmvrpp52hej7trefx6y633aujj6nltjs8cf7lzyp78tfn5y5wpa3udk5tvqdp8xys9xcmpd3sjqsmgd9czq3njv9c8qatrvd5kumcxqrrsscqp79qy9qsqsp5ccy2qgmptg8dthxsjvw2c43uyvqkg6cqey3jpks4xf0tv7xfrqrq3xfnuffau2h2k8defphv2xsktzn2qj5n2l8d9l9zx64fg6jcmdg9kmpevneyyhfnzrpspqdrky8u7l4c6qdnquh8lnevswwrtcd9ypcq89ga09").unwrap()); + let actual = json::from_str(invoice_for_rpc).unwrap(); + assert_eq!(expected, actual); + } + + #[test] + fn test_node_address_serialize() { + let node_address = NodeAddress { + pubkey: PublicKey::from_str("038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9").unwrap(), + addr: SocketAddr::new("203.132.94.196".parse().unwrap(), 9735), + }; + let expected = r#""038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9@203.132.94.196:9735""#; + let actual = json::to_string(&node_address).unwrap(); + assert_eq!(expected, actual); + } + + #[test] + fn test_node_address_deserialize() { + let node_address = + r#""038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9@203.132.94.196:9735""#; + let expected = NodeAddress { + pubkey: PublicKey::from_str("038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9").unwrap(), + addr: SocketAddr::new("203.132.94.196".parse().unwrap(), 9735), + }; + let actual: NodeAddress = json::from_str(node_address).unwrap(); + assert_eq!(expected, actual); + } + + #[test] + fn test_public_key_for_rpc_serialize() { + let public_key_for_rpc = PublicKeyForRPC( + PublicKey::from_str("038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9").unwrap(), + ); + let expected = r#""038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9""#; + let actual = json::to_string(&public_key_for_rpc).unwrap(); + assert_eq!(expected, actual); + } - Ok(PublicKeyForRPC(pubkey)) + #[test] + fn test_public_key_for_rpc_deserialize() { + let public_key_for_rpc = r#""038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9""#; + let expected = PublicKeyForRPC( + PublicKey::from_str("038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9").unwrap(), + ); + let actual = json::from_str(public_key_for_rpc).unwrap(); + assert_eq!(expected, actual); } } diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index 8d30d55466..9467396590 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -1,119 +1,45 @@ use super::*; -use crate::lightning::ln_conf::{LightningCoinConf, LightningProtocolConf}; -use crate::lightning::ln_connections::{connect_to_nodes_loop, ln_p2p_loop}; -use crate::utxo::rpc_clients::{electrum_script_hash, BestBlock as RpcBestBlock, ElectrumBlockHeader, ElectrumClient, - ElectrumNonce, UtxoRpcError}; -use crate::utxo::utxo_standard::UtxoStandardCoin; -use crate::DerivationMethod; -use bitcoin::blockdata::block::BlockHeader; -use bitcoin::blockdata::constants::genesis_block; -use bitcoin::blockdata::transaction::Transaction; -use bitcoin::consensus::encode::deserialize; -use bitcoin::hash_types::{BlockHash, TxMerkleNode, Txid}; +use crate::lightning::ln_platform::{get_best_header, ln_best_block_update_loop, update_best_block}; +use crate::utxo::rpc_clients::BestBlock as RpcBestBlock; +use bitcoin::hash_types::BlockHash; use bitcoin_hashes::{sha256d, Hash}; use common::executor::{spawn, Timer}; -use common::ip_addr::fetch_external_ip; -use common::jsonrpc_client::JsonRpcErrorType; use common::log; use common::log::LogState; use common::mm_ctx::MmArc; -use futures::compat::Future01CompatExt; -use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager}; -use lightning::chain::{chainmonitor, Access, BestBlock, Confirm, Watch}; +use lightning::chain::keysinterface::{InMemorySigner, KeysManager}; +use lightning::chain::{chainmonitor, BestBlock, Watch}; use lightning::ln::channelmanager; use lightning::ln::channelmanager::{ChainParameters, ChannelManagerReadArgs, SimpleArcChannelManager}; -use lightning::ln::msgs::NetAddress; -use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, SimpleArcPeerManager}; -use lightning::routing::network_graph::{NetGraphMsgHandler, NetworkGraph}; -use lightning::routing::scoring::Scorer; +use lightning::routing::network_graph::NetworkGraph; +use lightning::util::config::UserConfig; use lightning::util::ser::ReadableArgs; -use lightning_background_processor::BackgroundProcessor; -use lightning_invoice::payment; -use lightning_invoice::utils::DefaultRouter; -use lightning_net_tokio::SocketDescriptor; -use lightning_persister::storage::Storage; -use lightning_persister::FilesystemPersister; -use parking_lot::Mutex as PaMutex; -use rand::RngCore; -use rpc::v1::types::H256; -use std::cmp::Ordering; -use std::collections::HashMap; -use std::convert::TryInto; +use lightning_persister::storage::{DbStorage, FileSystemStorage, NodesAddressesMap, Scorer}; +use lightning_persister::LightningPersister; use std::fs::File; -use std::net::{IpAddr, Ipv4Addr}; use std::path::PathBuf; use std::sync::{Arc, Mutex}; use std::time::SystemTime; -use tokio::net::TcpListener; -const CHECK_FOR_NEW_BEST_BLOCK_INTERVAL: u64 = 60; -const BROADCAST_NODE_ANNOUNCEMENT_INTERVAL: u64 = 600; const NETWORK_GRAPH_PERSIST_INTERVAL: u64 = 600; const SCORER_PERSIST_INTERVAL: u64 = 600; pub type ChainMonitor = chainmonitor::ChainMonitor< InMemorySigner, - Arc, - Arc, - Arc, + Arc, + Arc, + Arc, Arc, - Arc, + Arc, >; -pub type ChannelManager = SimpleArcChannelManager; +pub type ChannelManager = SimpleArcChannelManager; -pub type PeerManager = SimpleArcPeerManager< - SocketDescriptor, - ChainMonitor, - UtxoStandardCoin, - PlatformFields, - dyn Access + Send + Sync, - LogState, ->; - -pub type InvoicePayer = payment::InvoicePayer, Router, Arc>, Arc, E>; - -type Router = DefaultRouter, Arc>; - -// TODO: add TOR address option -fn netaddress_from_ipaddr(addr: IpAddr, port: u16) -> Vec { - if addr == Ipv4Addr::new(0, 0, 0, 0) || addr == Ipv4Addr::new(127, 0, 0, 1) { - return Vec::new(); - } - let mut addresses = Vec::new(); - let address = match addr { - IpAddr::V4(addr) => NetAddress::IPv4 { - addr: u32::from(addr).to_be_bytes(), - port, - }, - IpAddr::V6(addr) => NetAddress::IPv6 { - addr: u128::from(addr).to_be_bytes(), - port, - }, - }; - addresses.push(address); - addresses -} +#[inline] +fn ln_data_dir(ctx: &MmArc, ticker: &str) -> PathBuf { ctx.dbdir().join("LIGHTNING").join(ticker) } -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct LightningParams { - // The listening port for the p2p LN node - pub listening_port: u16, - // Printable human-readable string to describe this node to other users. - pub node_name: [u8; 32], - // Node's RGB color. This is used for showing the node in a network graph with the desired color. - pub node_color: [u8; 3], - // Invoice Payer is initialized while starting the lightning node, and it requires the number of payment retries that - // it should do before considering a payment failed or partially failed. If not provided the number of retries will be 5 - // as this is a good default value. - pub payment_retries: Option, - // Node's backup path for channels and other data that requires backup. - pub backup_path: Option, -} - -pub fn ln_data_dir(ctx: &MmArc, ticker: &str) -> PathBuf { ctx.dbdir().join("LIGHTNING").join(ticker) } - -pub fn ln_data_backup_dir(ctx: &MmArc, path: Option, ticker: &str) -> Option { +#[inline] +fn ln_data_backup_dir(ctx: &MmArc, path: Option, ticker: &str) -> Option { path.map(|p| { PathBuf::from(&p) .join(&hex::encode(&**ctx.rmd160())) @@ -122,99 +48,102 @@ pub fn ln_data_backup_dir(ctx: &MmArc, path: Option, ticker: &str) -> Op }) } -pub async fn start_lightning( +pub async fn init_persister( ctx: &MmArc, - platform_coin: UtxoStandardCoin, - protocol_conf: LightningProtocolConf, - conf: LightningCoinConf, - params: LightningParams, -) -> EnableLightningResult { - // Todo: add support for Hardware wallets for funding transactions and spending spendable outputs (channel closing transactions) - if let DerivationMethod::HDWallet(_) = platform_coin.as_ref().derivation_method { - return MmError::err(EnableLightningError::UnsupportedMode( - "'start_lightning'".into(), - "iguana".into(), - )); + platform: Arc, + ticker: String, + backup_path: Option, +) -> EnableLightningResult> { + let ln_data_dir = ln_data_dir(ctx, &ticker); + let ln_data_backup_dir = ln_data_backup_dir(ctx, backup_path, &ticker); + let persister = Arc::new(LightningPersister::new( + ticker.replace('-', "_"), + ln_data_dir, + ln_data_backup_dir, + ctx.sqlite_connection + .ok_or(MmError::new(EnableLightningError::DbError( + "sqlite_connection is not initialized".into(), + )))? + .clone(), + )); + let is_initialized = persister.is_fs_initialized().await?; + if !is_initialized { + persister.init_fs().await?; + } + let is_db_initialized = persister.is_db_initialized().await?; + if !is_db_initialized { + persister.init_db().await?; } - // The set (possibly empty) of socket addresses on which this node accepts incoming connections. - // If the user wishes to preserve privacy, addresses should likely contain only Tor Onion addresses. - let listening_addr = myipaddr(ctx.clone()) - .await - .map_to_mm(EnableLightningError::InvalidAddress)?; - // If the listening port is used start_lightning should return an error early - let listener = TcpListener::bind(format!("{}:{}", listening_addr, params.listening_port)) - .await - .map_to_mm(|e| EnableLightningError::IOError(e.to_string()))?; + let closed_channels_without_closing_tx = persister.get_closed_channels_with_no_closing_tx().await?; + for channel_details in closed_channels_without_closing_tx { + let platform = platform.clone(); + let persister = persister.clone(); + let user_channel_id = channel_details.rpc_id; + spawn(async move { + if let Ok(closing_tx_hash) = platform + .get_channel_closing_tx(channel_details) + .await + .error_log_passthrough() + { + if let Err(e) = persister.add_closing_tx_to_db(user_channel_id, closing_tx_hash).await { + log::error!( + "Unable to update channel {} closing details in DB: {}", + user_channel_id, + e + ); + } + } + }); + } - let network = protocol_conf.network.clone().into(); - let platform_fields = Arc::new(PlatformFields { - platform_coin: platform_coin.clone(), - network: protocol_conf.network, - default_fees_and_confirmations: protocol_conf.confirmations, - registered_txs: PaMutex::new(HashMap::new()), - registered_outputs: PaMutex::new(Vec::new()), - unsigned_funding_txs: PaMutex::new(HashMap::new()), - }); + Ok(persister) +} - // Initialize the FeeEstimator. UtxoStandardCoin implements the FeeEstimator trait, so it'll act as our fee estimator. - let fee_estimator = platform_fields.clone(); +pub fn init_keys_manager(ctx: &MmArc) -> EnableLightningResult> { + // The current time is used to derive random numbers from the seed where required, to ensure all random generation is unique across restarts. + let seed: [u8; 32] = ctx.secp256k1_key_pair().private().secret.into(); + let cur = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .map_to_mm(|e| EnableLightningError::SystemTimeError(e.to_string()))?; - // Initialize the Logger - let logger = ctx.log.0.clone(); + Ok(Arc::new(KeysManager::new(&seed, cur.as_secs(), cur.subsec_nanos()))) +} + +pub async fn init_channel_manager( + platform: Arc, + logger: Arc, + persister: Arc, + keys_manager: Arc, + user_config: UserConfig, +) -> EnableLightningResult<(Arc, Arc)> { + // Initialize the FeeEstimator. UtxoStandardCoin implements the FeeEstimator trait, so it'll act as our fee estimator. + let fee_estimator = platform.clone(); // Initialize the BroadcasterInterface. UtxoStandardCoin implements the BroadcasterInterface trait, so it'll act as our transaction // broadcaster. - let broadcaster = Arc::new(platform_coin); - - // Initialize Persist - let ticker = conf.ticker.clone(); - let ln_data_dir = ln_data_dir(ctx, &ticker); - let ln_data_backup_dir = ln_data_backup_dir(ctx, params.backup_path, &ticker); - let persister = Arc::new(FilesystemPersister::new(ln_data_dir, ln_data_backup_dir)); - let is_initialized = persister.is_initialized().await?; - if !is_initialized { - persister.init().await?; - } - - // Initialize the Filter. PlatformFields implements the Filter trait, we can use it to construct the filter. - let filter = Some(platform_fields.clone()); + let broadcaster = platform.clone(); // Initialize the ChainMonitor let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( - filter.clone(), + Some(platform.clone()), broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone(), )); - let seed: [u8; 32] = ctx.secp256k1_key_pair().private().secret.into(); - - // The current time is used to derive random numbers from the seed where required, to ensure all random generation is unique across restarts. - let cur = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .map_to_mm(|e| EnableLightningError::SystemTimeError(e.to_string()))?; - - // Initialize the KeysManager - let keys_manager = Arc::new(KeysManager::new(&seed, cur.as_secs(), cur.subsec_nanos())); - // Read ChannelMonitor state from disk, important for lightning node is restarting and has at least 1 channel let mut channelmonitors = persister .read_channelmonitors(keys_manager.clone()) .map_to_mm(|e| EnableLightningError::IOError(e.to_string()))?; // This is used for Electrum only to prepare for chain synchronization - if let Some(ref filter) = filter { - for (_, chan_mon) in channelmonitors.iter() { - chan_mon.load_outputs_to_watch(filter); - } + for (_, chan_mon) in channelmonitors.iter() { + chan_mon.load_outputs_to_watch(&platform); } - let mut restarting_node = true; - // TODO: Right now it's safe to unwrap here, when implementing Native client for lightning whenever filter is used - // the code it's used in will be a part of the electrum client implementation only - let rpc_client = match &filter.clone().unwrap().platform_coin.as_ref().rpc_client { + let rpc_client = match &platform.coin.as_ref().rpc_client { UtxoRpcClientEnum::Electrum(c) => c.clone(), UtxoRpcClientEnum::Native(_) => { return MmError::err(EnableLightningError::UnsupportedMode( @@ -224,12 +153,12 @@ pub async fn start_lightning( }, }; let best_header = get_best_header(&rpc_client).await?; + platform.update_best_block_height(best_header.block_height()); let best_block = RpcBestBlock::from(best_header.clone()); let best_block_hash = BlockHash::from_hash( sha256d::Hash::from_slice(&best_block.hash.0).map_to_mm(|e| EnableLightningError::HashError(e.to_string()))?, ); let (channel_manager_blockhash, channel_manager) = { - let user_config = conf.clone().into(); if let Ok(mut f) = File::open(persister.manager_path()) { let mut channel_monitor_mut_references = Vec::new(); for (_, channel_monitor) in channelmonitors.iter_mut() { @@ -249,9 +178,8 @@ pub async fn start_lightning( .map_to_mm(|e| EnableLightningError::IOError(e.to_string()))? } else { // Initialize the ChannelManager to starting a new node without history - restarting_node = false; let chain_params = ChainParameters { - network, + network: platform.network.clone().into(), best_block: BestBlock::new(best_block_hash, best_block.height as u32), }; let new_channel_manager = channelmanager::ChannelManager::new( @@ -270,23 +198,14 @@ pub async fn start_lightning( let channel_manager: Arc = Arc::new(channel_manager); // Sync ChannelMonitors and ChannelManager to chain tip if the node is restarting and has open channels - if restarting_node && channel_manager_blockhash != best_block_hash { - process_txs_unconfirmations( - filter.clone().unwrap().clone(), - chain_monitor.clone(), - channel_manager.clone(), - ) - .await; - process_txs_confirmations( - // It's safe to use unwrap here for now until implementing Native Client for Lightning - filter.clone().unwrap().clone(), - rpc_client.clone(), - chain_monitor.clone(), - channel_manager.clone(), - best_header.block_height(), - ) - .await; - update_best_block(chain_monitor.clone(), channel_manager.clone(), best_header).await; + if channel_manager_blockhash != best_block_hash { + platform + .process_txs_unconfirmations(&chain_monitor, &channel_manager) + .await; + platform + .process_txs_confirmations(&rpc_client, &persister, &chain_monitor, &channel_manager) + .await; + update_best_block(&chain_monitor, &channel_manager, best_header).await; } // Give ChannelMonitors to ChainMonitor @@ -297,543 +216,55 @@ pub async fn start_lightning( .map_to_mm(|e| EnableLightningError::IOError(format!("{:?}", e)))?; } - // Initialize the NetGraphMsgHandler. This is used for providing routes to send payments over - let default_network_graph = NetworkGraph::new(genesis_block(network).header.block_hash()); - let network_graph = Arc::new(persister.get_network_graph().await.unwrap_or(default_network_graph)); - let network_gossip = Arc::new(NetGraphMsgHandler::new( - network_graph.clone(), - None::>, - logger.clone(), - )); - let network_graph_persister = persister.clone(); - let network_graph_persist = network_graph.clone(); - spawn(async move { - loop { - if let Err(e) = network_graph_persister - .save_network_graph(network_graph_persist.clone()) - .await - { - log::warn!( - "Failed to persist network graph error: {}, please check disk space and permissions", - e - ); - } - Timer::sleep(NETWORK_GRAPH_PERSIST_INTERVAL as f64).await; - } - }); - - // Initialize the PeerManager - // ephemeral_random_data is used to derive per-connection ephemeral keys - let mut ephemeral_bytes = [0; 32]; - rand::thread_rng().fill_bytes(&mut ephemeral_bytes); - let lightning_msg_handler = MessageHandler { - chan_handler: channel_manager.clone(), - route_handler: network_gossip.clone(), - }; - // IgnoringMessageHandler is used as custom message types (experimental and application-specific messages) is not needed - let peer_manager: Arc = Arc::new(PeerManager::new( - lightning_msg_handler, - keys_manager.get_node_secret(), - &ephemeral_bytes, - logger.clone(), - Arc::new(IgnoringMessageHandler {}), - )); - - // Initialize p2p networking - spawn(ln_p2p_loop(peer_manager.clone(), listener)); - // Update best block whenever there's a new chain tip or a block has been newly disconnected spawn(ln_best_block_update_loop( // It's safe to use unwrap here for now until implementing Native Client for Lightning - filter.clone().unwrap(), + platform, + persister.clone(), chain_monitor.clone(), channel_manager.clone(), rpc_client.clone(), best_block, )); - let inbound_payments = Arc::new(PaMutex::new(HashMap::new())); - let outbound_payments = Arc::new(PaMutex::new(HashMap::new())); - - // Initialize the event handler - let event_handler = Arc::new(ln_events::LightningEventHandler::new( - // It's safe to use unwrap here for now until implementing Native Client for Lightning - filter.clone().unwrap(), - channel_manager.clone(), - keys_manager.clone(), - inbound_payments.clone(), - outbound_payments.clone(), - )); - - // Initialize routing Scorer - let scorer = Arc::new(Mutex::new(persister.get_scorer().await.unwrap_or_default())); - let scorer_persister = persister.clone(); - let scorer_persist = scorer.clone(); - spawn(async move { - loop { - if let Err(e) = scorer_persister.save_scorer(scorer_persist.clone()).await { - log::warn!( - "Failed to persist scorer error: {}, please check disk space and permissions", - e - ); - } - Timer::sleep(SCORER_PERSIST_INTERVAL as f64).await; - } - }); - - // Create InvoicePayer - let router = DefaultRouter::new(network_graph, logger.clone()); - let invoice_payer = Arc::new(InvoicePayer::new( - channel_manager.clone(), - router, - scorer, - logger.clone(), - event_handler, - payment::RetryAttempts(params.payment_retries.unwrap_or(5)), - )); - - // Persist ChannelManager - // Note: if the ChannelManager is not persisted properly to disk, there is risk of channels force closing the next time LN starts up - let channel_manager_persister = persister.clone(); - let persist_channel_manager_callback = - move |node: &ChannelManager| channel_manager_persister.persist_manager(&*node); - - // Start Background Processing. Runs tasks periodically in the background to keep LN node operational. - // InvoicePayer will act as our event handler as it handles some of the payments related events before - // delegating it to LightningEventHandler. - let background_processor = BackgroundProcessor::start( - persist_channel_manager_callback, - invoice_payer.clone(), - chain_monitor.clone(), - channel_manager.clone(), - Some(network_gossip), - peer_manager.clone(), - logger, - ); - - // If node is restarting read other nodes data from disk and reconnect to channel nodes/peers if possible. - let mut nodes_addresses_map = HashMap::new(); - if restarting_node { - let mut nodes_addresses = persister.get_nodes_addresses().await?; - for (pubkey, node_addr) in nodes_addresses.drain() { - if channel_manager - .list_channels() - .iter() - .map(|chan| chan.counterparty.node_id) - .any(|node_id| node_id == pubkey) - { - nodes_addresses_map.insert(pubkey, node_addr); - } - } - } - let nodes_addresses = Arc::new(PaMutex::new(nodes_addresses_map)); - - if restarting_node { - spawn(connect_to_nodes_loop(nodes_addresses.clone(), peer_manager.clone())); - } - - // Broadcast Node Announcement - spawn(ln_node_announcement_loop( - channel_manager.clone(), - params.node_name, - params.node_color, - listening_addr, - params.listening_port, - )); - - Ok(LightningCoin { - platform_fields, - conf, - peer_manager, - background_processor: Arc::new(background_processor), - channel_manager, - chain_monitor, - keys_manager, - invoice_payer, - persister, - inbound_payments, - outbound_payments, - nodes_addresses, - }) -} - -struct ConfirmedTransactionInfo { - txid: Txid, - header: BlockHeader, - index: usize, - transaction: Transaction, - height: u32, -} - -impl ConfirmedTransactionInfo { - fn new(txid: Txid, header: BlockHeader, index: usize, transaction: Transaction, height: u32) -> Self { - ConfirmedTransactionInfo { - txid, - header, - index, - transaction, - height, - } - } -} - -async fn process_tx_for_unconfirmation(txid: Txid, filter: Arc, monitor: Arc) -where - T: Confirm, -{ - if let Err(err) = filter - .platform_coin - .as_ref() - .rpc_client - .get_transaction_bytes(&H256::from(txid.as_hash().into_inner()).reversed()) - .compat() - .await - .map_err(|e| e.into_inner()) - { - if let UtxoRpcError::ResponseParseError(ref json_err) = err { - if let JsonRpcErrorType::Response(_, json) = &json_err.error { - if let Some(message) = json["message"].as_str() { - if message.contains("'code': -5") { - log::info!( - "Transaction {} is not found on chain :{}. The transaction will be re-broadcasted.", - txid, - err - ); - monitor.transaction_unconfirmed(&txid); - } - } - } - } - log::error!( - "Error while trying to check if the transaction {} is discarded or not :{}", - txid, - err - ); - } + Ok((chain_monitor, channel_manager)) } -async fn process_txs_unconfirmations( - filter: Arc, - chain_monitor: Arc, - channel_manager: Arc, -) { - // Retrieve channel manager transaction IDs to check the chain for un-confirmations - let channel_manager_relevant_txids = channel_manager.get_relevant_txids(); - for txid in channel_manager_relevant_txids { - process_tx_for_unconfirmation(txid, filter.clone(), channel_manager.clone()).await; - } - - // Retrieve chain monitor transaction IDs to check the chain for un-confirmations - let chain_monitor_relevant_txids = chain_monitor.get_relevant_txids(); - for txid in chain_monitor_relevant_txids { - process_tx_for_unconfirmation(txid, filter.clone(), chain_monitor.clone()).await; - } -} - -async fn get_confirmed_registered_txs( - filter: Arc, - client: &ElectrumClient, - current_height: u64, -) -> Vec { - let registered_txs = filter.registered_txs.lock().clone(); - let mut confirmed_registered_txs = Vec::new(); - for (txid, scripts) in registered_txs { - let rpc_txid = H256::from(txid.as_hash().into_inner()).reversed(); - match filter - .platform_coin - .as_ref() - .rpc_client - .get_transaction_bytes(&rpc_txid) - .compat() - .await - { - Ok(bytes) => { - let transaction: Transaction = match deserialize(&bytes.into_vec()) { - Ok(tx) => tx, - Err(e) => { - log::error!("Transaction deserialization error: {}", e.to_string()); - continue; - }, - }; - for (_, vout) in transaction.output.iter().enumerate() { - if scripts.contains(&vout.script_pubkey) { - let script_hash = hex::encode(electrum_script_hash(vout.script_pubkey.as_ref())); - let history = client - .scripthash_get_history(&script_hash) - .compat() - .await - .unwrap_or_default(); - for item in history { - if item.tx_hash == rpc_txid { - // If a new block mined the transaction while running process_txs_confirmations it will be confirmed later in ln_best_block_update_loop - if item.height > 0 && item.height <= current_height as i64 { - let height: u64 = match item.height.try_into() { - Ok(h) => h, - Err(e) => { - log::error!("Block height convertion to u64 error: {}", e.to_string()); - continue; - }, - }; - let header = match client.blockchain_block_header(height).compat().await { - Ok(block_header) => match deserialize(&block_header) { - Ok(h) => h, - Err(e) => { - log::error!("Block header deserialization error: {}", e.to_string()); - continue; - }, - }, - Err(_) => continue, - }; - let index = match client - .blockchain_transaction_get_merkle(rpc_txid, height) - .compat() - .await - { - Ok(merkle_branch) => merkle_branch.pos, - Err(e) => { - log::error!( - "Error getting transaction position in the block: {}", - e.to_string() - ); - continue; - }, - }; - let confirmed_transaction_info = ConfirmedTransactionInfo::new( - txid, - header, - index, - transaction.clone(), - height as u32, - ); - confirmed_registered_txs.push(confirmed_transaction_info); - filter.registered_txs.lock().remove(&txid); - } - } - } - } - } - }, - Err(e) => { - log::error!("Error getting transaction {} from chain: {}", txid, e); - continue; - }, - }; - } - confirmed_registered_txs -} - -async fn append_spent_registered_output_txs( - transactions_to_confirm: &mut Vec, - filter: Arc, - client: &ElectrumClient, -) { - let mut outputs_to_remove = Vec::new(); - let registered_outputs = filter.registered_outputs.lock().clone(); - for output in registered_outputs { - let result = match ln_rpc::find_watched_output_spend_with_header(client, &output).await { - Ok(res) => res, - Err(e) => { - log::error!( - "Error while trying to find if the registered output {:?} is spent: {}", - output.outpoint, - e - ); - continue; - }, - }; - if let Some((header, _, tx, height)) = result { - if !transactions_to_confirm.iter().any(|info| info.txid == tx.txid()) { - let rpc_txid = H256::from(tx.txid().as_hash().into_inner()).reversed(); - let index = match client - .blockchain_transaction_get_merkle(rpc_txid, height) - .compat() - .await - { - Ok(merkle_branch) => merkle_branch.pos, - Err(e) => { - log::error!("Error getting transaction position in the block: {}", e.to_string()); - continue; - }, - }; - let confirmed_transaction_info = - ConfirmedTransactionInfo::new(tx.txid(), header, index, tx, height as u32); - transactions_to_confirm.push(confirmed_transaction_info); - } - outputs_to_remove.push(output); - } - } - filter - .registered_outputs - .lock() - .retain(|output| !outputs_to_remove.contains(output)); -} - -async fn process_txs_confirmations( - filter: Arc, - client: ElectrumClient, - chain_monitor: Arc, - channel_manager: Arc, - current_height: u64, -) { - let mut transactions_to_confirm = get_confirmed_registered_txs(filter.clone(), &client, current_height).await; - append_spent_registered_output_txs(&mut transactions_to_confirm, filter.clone(), &client).await; - - transactions_to_confirm.sort_by(|a, b| { - let block_order = a.height.cmp(&b.height); - match block_order { - Ordering::Equal => a.index.cmp(&b.index), - _ => block_order, +pub async fn persist_network_graph_loop(persister: Arc, network_graph: Arc) { + loop { + if let Err(e) = persister.save_network_graph(network_graph.clone()).await { + log::warn!( + "Failed to persist network graph error: {}, please check disk space and permissions", + e + ); } - }); - - for confirmed_transaction_info in transactions_to_confirm { - channel_manager.transactions_confirmed( - &confirmed_transaction_info.header, - &[( - confirmed_transaction_info.index, - &confirmed_transaction_info.transaction, - )], - confirmed_transaction_info.height, - ); - chain_monitor.transactions_confirmed( - &confirmed_transaction_info.header, - &[( - confirmed_transaction_info.index, - &confirmed_transaction_info.transaction, - )], - confirmed_transaction_info.height, - ); + Timer::sleep(NETWORK_GRAPH_PERSIST_INTERVAL as f64).await; } } -async fn get_best_header(best_header_listener: &ElectrumClient) -> EnableLightningResult { - best_header_listener - .blockchain_headers_subscribe() - .compat() - .await - .map_to_mm(|e| EnableLightningError::RpcError(e.to_string())) -} - -async fn update_best_block( - chain_monitor: Arc, - channel_manager: Arc, - best_header: ElectrumBlockHeader, -) { - { - let (new_best_header, new_best_height) = match best_header { - ElectrumBlockHeader::V12(h) => { - let nonce = match h.nonce { - ElectrumNonce::Number(n) => n as u32, - ElectrumNonce::Hash(_) => { - return; - }, - }; - let prev_blockhash = match sha256d::Hash::from_slice(&h.prev_block_hash.0) { - Ok(h) => h, - Err(e) => { - log::error!("Error while parsing previous block hash for lightning node: {}", e); - return; - }, - }; - let merkle_root = match sha256d::Hash::from_slice(&h.merkle_root.0) { - Ok(h) => h, - Err(e) => { - log::error!("Error while parsing merkle root for lightning node: {}", e); - return; - }, - }; - ( - BlockHeader { - version: h.version as i32, - prev_blockhash: BlockHash::from_hash(prev_blockhash), - merkle_root: TxMerkleNode::from_hash(merkle_root), - time: h.timestamp as u32, - bits: h.bits as u32, - nonce, - }, - h.block_height as u32, - ) - }, - ElectrumBlockHeader::V14(h) => { - let block_header = match deserialize(&h.hex.into_vec()) { - Ok(header) => header, - Err(e) => { - log::error!("Block header deserialization error: {}", e.to_string()); - return; - }, - }; - (block_header, h.height as u32) - }, - }; - channel_manager.best_block_updated(&new_best_header, new_best_height); - chain_monitor.best_block_updated(&new_best_header, new_best_height); - } -} - -async fn ln_best_block_update_loop( - filter: Arc, - chain_monitor: Arc, - channel_manager: Arc, - best_header_listener: ElectrumClient, - best_block: RpcBestBlock, -) { - let mut current_best_block = best_block; +pub async fn persist_scorer_loop(persister: Arc, scorer: Arc>) { loop { - let best_header = match get_best_header(&best_header_listener).await { - Ok(h) => h, - Err(e) => { - log::error!("Error while requesting best header for lightning node: {}", e); - Timer::sleep(CHECK_FOR_NEW_BEST_BLOCK_INTERVAL as f64).await; - continue; - }, - }; - if current_best_block != best_header.clone().into() { - process_txs_unconfirmations(filter.clone(), chain_monitor.clone(), channel_manager.clone()).await; - process_txs_confirmations( - filter.clone(), - best_header_listener.clone(), - chain_monitor.clone(), - channel_manager.clone(), - best_header.block_height(), - ) - .await; - current_best_block = best_header.clone().into(); - update_best_block(chain_monitor.clone(), channel_manager.clone(), best_header).await; + if let Err(e) = persister.save_scorer(scorer.clone()).await { + log::warn!( + "Failed to persist scorer error: {}, please check disk space and permissions", + e + ); } - Timer::sleep(CHECK_FOR_NEW_BEST_BLOCK_INTERVAL as f64).await; + Timer::sleep(SCORER_PERSIST_INTERVAL as f64).await; } } -async fn ln_node_announcement_loop( +pub async fn get_open_channels_nodes_addresses( + persister: Arc, channel_manager: Arc, - node_name: [u8; 32], - node_color: [u8; 3], - addr: IpAddr, - port: u16, -) { - let addresses = netaddress_from_ipaddr(addr, port); - loop { - let addresses_to_announce = if addresses.is_empty() { - // Right now if the node is behind NAT the external ip is fetched on every loop - // If the node does not announce a public IP, it will not be displayed on the network graph, - // and other nodes will not be able to open a channel with it. But it can open channels with other nodes. - match fetch_external_ip().await { - Ok(ip) => { - log::debug!("Fetch real IP successfully: {}:{}", ip, port); - netaddress_from_ipaddr(ip, port) - }, - Err(e) => { - log::error!("Error while fetching external ip for node announcement: {}", e); - Timer::sleep(BROADCAST_NODE_ANNOUNCEMENT_INTERVAL as f64).await; - continue; - }, - } - } else { - addresses.clone() - }; - - channel_manager.broadcast_node_announcement(node_color, node_name, addresses_to_announce); - - Timer::sleep(BROADCAST_NODE_ANNOUNCEMENT_INTERVAL as f64).await; - } +) -> EnableLightningResult { + let channels = channel_manager.list_channels(); + let mut nodes_addresses = persister.get_nodes_addresses().await?; + nodes_addresses.retain(|pubkey, _node_addr| { + channels + .iter() + .map(|chan| chan.counterparty.node_id) + .any(|node_id| node_id == *pubkey) + }); + Ok(nodes_addresses) } diff --git a/mm2src/coins/lightning_background_processor/Cargo.toml b/mm2src/coins/lightning_background_processor/Cargo.toml index e9e85b5f77..8c950bfc4c 100644 --- a/mm2src/coins/lightning_background_processor/Cargo.toml +++ b/mm2src/coins/lightning_background_processor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-background-processor" -version = "0.0.104" +version = "0.0.105" authors = ["Valentine Wallace "] license = "MIT OR Apache-2.0" repository = "http://github.com/lightningdevkit/rust-lightning" @@ -11,9 +11,10 @@ edition = "2018" [dependencies] bitcoin = "0.27.1" -lightning = { version = "0.0.104", features = ["std"] } +lightning = { version = "0.0.105", features = ["std"] } [dev-dependencies] -lightning = { version = "0.0.104", features = ["_test_utils"] } -lightning-invoice = "0.12.0" -lightning-persister = { version = "0.0.104", path = "../lightning_persister" } +db_common = { path = "../../db_common" } +lightning = { version = "0.0.105", features = ["_test_utils"] } +lightning-invoice = "0.13.0" +lightning-persister = { version = "0.0.105", path = "../lightning_persister" } diff --git a/mm2src/coins/lightning_background_processor/src/lib.rs b/mm2src/coins/lightning_background_processor/src/lib.rs index e91956e260..c563f1a75f 100644 --- a/mm2src/coins/lightning_background_processor/src/lib.rs +++ b/mm2src/coins/lightning_background_processor/src/lib.rs @@ -57,7 +57,7 @@ const FRESHNESS_TIMER: u64 = 60; const FRESHNESS_TIMER: u64 = 1; #[cfg(all(not(test), not(debug_assertions)))] -const PING_TIMER: u64 = 5; +const PING_TIMER: u64 = 10; /// Signature operations take a lot longer without compiler optimisations. /// Increasing the ping timer allows for this but slower devices will be disconnected if the /// timeout is reached. @@ -148,7 +148,7 @@ impl BackgroundProcessor { /// /// `persist_manager` is responsible for writing out the [`ChannelManager`] to disk, and/or /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a - /// [`ChannelManager`]. See [`FilesystemPersister::persist_manager`] for Rust-Lightning's + /// [`ChannelManager`]. See [`LightningPersister::persist_manager`] for Rust-Lightning's /// provided implementation. /// /// Typically, users should either implement [`ChannelManagerPersister`] to never return an @@ -167,7 +167,7 @@ impl BackgroundProcessor { /// [`stop`]: Self::stop /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable - /// [`FilesystemPersister::persist_manager`]: lightning_persister::FilesystemPersister::persist_manager + /// [`LightningPersister::persist_manager`]: lightning_persister::LightningPersister::persist_manager /// [`NetworkGraph`]: lightning::routing::network_graph::NetworkGraph pub fn start< Signer: 'static + Sign, @@ -229,10 +229,16 @@ impl BackgroundProcessor { let mut have_pruned = false; loop { - peer_manager.process_events(); + peer_manager.process_events(); // Note that this may block on ChannelManager's locking channel_manager.process_pending_events(&event_handler); chain_monitor.process_pending_events(&event_handler); + + // We wait up to 100ms, but track how long it takes to detect being put to sleep, + // see `await_start`'s use below. + let await_start = Instant::now(); let updates_available = channel_manager.await_persistable_update_timeout(Duration::from_millis(100)); + let await_time = await_start.elapsed(); + if updates_available { log_trace!(logger, "Persisting ChannelManager..."); persister.persist_manager(&*channel_manager)?; @@ -241,25 +247,27 @@ impl BackgroundProcessor { // Exit the loop if the background processor was requested to stop. if stop_thread.load(Ordering::Acquire) { log_trace!(logger, "Terminating background processor."); - return Ok(()); + break; } if last_freshness_call.elapsed().as_secs() > FRESHNESS_TIMER { log_trace!(logger, "Calling ChannelManager's timer_tick_occurred"); channel_manager.timer_tick_occurred(); last_freshness_call = Instant::now(); } - if last_ping_call.elapsed().as_secs() > PING_TIMER * 2 { + if await_time > Duration::from_secs(1) { // On various platforms, we may be starved of CPU cycles for several reasons. // E.g. on iOS, if we've been in the background, we will be entirely paused. // Similarly, if we're on a desktop platform and the device has been asleep, we // may not get any cycles. - // In any case, if we've been entirely paused for more than double our ping - // timer, we should have disconnected all sockets by now (and they're probably - // dead anyway), so disconnect them by calling `timer_tick_occurred()` twice. - log_trace!( - logger, - "Awoke after more than double our ping timer, disconnecting peers." - ); + // We detect this by checking if our max-100ms-sleep, above, ran longer than a + // full second, at which point we assume sockets may have been killed (they + // appear to be at least on some platforms, even if it has only been a second). + // Note that we have to take care to not get here just because user event + // processing was slow at the top of the loop. For example, the sample client + // may call Bitcoin Core RPCs during event handling, which very often takes + // more than a handful of seconds to complete, and shouldn't disconnect all our + // peers. + log_trace!(logger, "100ms sleep took more than a second, disconnecting peers."); peer_manager.disconnect_all_peers(); last_ping_call = Instant::now(); } else if last_ping_call.elapsed().as_secs() > PING_TIMER { @@ -281,6 +289,10 @@ impl BackgroundProcessor { } } } + // After we exit, ensure we persist the ChannelManager one final time - this avoids + // some races where users quit while channel updates were in-flight, with + // ChannelMonitor update(s) persisted without a corresponding ChannelManager update. + persister.persist_manager(&*channel_manager) }); Self { stop_thread: stop_thread_clone, @@ -340,8 +352,9 @@ mod tests { use bitcoin::blockdata::constants::genesis_block; use bitcoin::blockdata::transaction::{Transaction, TxOut}; use bitcoin::network::constants::Network; + use db_common::sqlite::rusqlite::Connection; use lightning::chain::channelmonitor::ANTI_REORG_DELAY; - use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager}; + use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient}; use lightning::chain::transaction::OutPoint; use lightning::chain::{chainmonitor, BestBlock, Confirm}; use lightning::get_event_msg; @@ -356,7 +369,7 @@ mod tests { use lightning::util::test_utils; use lightning_invoice::payment::{InvoicePayer, RetryAttempts}; use lightning_invoice::utils::DefaultRouter; - use lightning_persister::FilesystemPersister; + use lightning_persister::LightningPersister; use std::fs; use std::path::PathBuf; use std::sync::{Arc, Mutex}; @@ -378,7 +391,7 @@ mod tests { Arc, Arc, Arc, - Arc, + Arc, >; struct Node { @@ -403,7 +416,7 @@ mod tests { >, >, chain_monitor: Arc, - persister: Arc, + persister: Arc, tx_broadcaster: Arc, network_graph: Arc, logger: Arc, @@ -442,9 +455,11 @@ mod tests { }); let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i))); - let persister = Arc::new(FilesystemPersister::new( + let persister = Arc::new(LightningPersister::new( + format!("node_{}_ticker", i), PathBuf::from(format!("{}_persister_{}", persist_dir, i)), None, + Arc::new(Mutex::new(Connection::open_in_memory().unwrap())), )); let seed = [i as u8; 32]; let network = Network::Testnet; @@ -481,7 +496,7 @@ mod tests { }; let peer_manager = Arc::new(PeerManager::new( msg_handler, - keys_manager.get_node_secret(), + keys_manager.get_node_secret(Recipient::Node).unwrap(), &seed, logger.clone(), IgnoringMessageHandler {}, @@ -903,7 +918,7 @@ mod tests { Arc, >| node_0_persister.persist_manager(node); let router = DefaultRouter::new(Arc::clone(&nodes[0].network_graph), Arc::clone(&nodes[0].logger)); - let scorer = Arc::new(Mutex::new(test_utils::TestScorer::default())); + let scorer = Arc::new(Mutex::new(test_utils::TestScorer::with_penalty(0))); let invoice_payer = Arc::new(InvoicePayer::new( Arc::clone(&nodes[0].node), router, diff --git a/mm2src/coins/lightning_persister/Cargo.toml b/mm2src/coins/lightning_persister/Cargo.toml index 4d246bc9bb..b474276f26 100644 --- a/mm2src/coins/lightning_persister/Cargo.toml +++ b/mm2src/coins/lightning_persister/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-persister" -version = "0.0.104" +version = "0.0.105" edition = "2018" authors = ["Valentine Wallace", "Matt Corallo"] license = "MIT OR Apache-2.0" @@ -13,14 +13,19 @@ Utilities to manage Rust-Lightning channel data persistence and retrieval. async-trait = "0.1" bitcoin = "0.27.1" common = { path = "../../common" } -lightning = "0.0.104" +db_common = { path = "../../db_common" } +derive_more = "0.99" +hex = "0.4.2" +lightning = "0.0.105" libc = "0.2" parking_lot = { version = "0.12.0", features = ["nightly"] } secp256k1 = { version = "0.20" } +serde = "1.0" serde_json = "1.0" [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { version = "0.0.104", features = ["_test_utils"] } \ No newline at end of file +lightning = { version = "0.0.105", features = ["_test_utils"] } +rand = { version = "0.7", features = ["std", "small_rng"] } \ No newline at end of file diff --git a/mm2src/coins/lightning_persister/src/lib.rs b/mm2src/coins/lightning_persister/src/lib.rs index e8076f0668..7ca83b577c 100644 --- a/mm2src/coins/lightning_persister/src/lib.rs +++ b/mm2src/coins/lightning_persister/src/lib.rs @@ -13,13 +13,22 @@ extern crate lightning; extern crate secp256k1; extern crate serde_json; -use crate::storage::{NodesAddressesMap, NodesAddressesMapShared, Storage}; +use crate::storage::{ChannelType, ChannelVisibility, ClosedChannelsFilter, DbStorage, FileSystemStorage, + GetClosedChannelsResult, GetPaymentsResult, HTLCStatus, NodesAddressesMap, + NodesAddressesMapShared, PaymentInfo, PaymentType, PaymentsFilter, Scorer, SqlChannelDetails}; use crate::util::DiskWriteable; use async_trait::async_trait; +use bitcoin::blockdata::constants::genesis_block; use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::hashes::hex::{FromHex, ToHex}; -use common::async_blocking; +use bitcoin::Network; use common::fs::check_dir_operations; +use common::{async_blocking, now_ms, PagingOptionsEnum}; +use db_common::sqlite::rusqlite::{Error as SqlError, Row, ToSql, NO_PARAMS}; +use db_common::sqlite::sql_builder::SqlBuilder; +use db_common::sqlite::{h256_option_slice_from_row, h256_slice_from_row, offset_by_id, query_single_row, + sql_text_conversion_err, string_from_row, validate_table_name, SqliteConnShared, + CHECK_TABLE_EXISTS_SQL}; use lightning::chain; use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use lightning::chain::chainmonitor; @@ -27,12 +36,14 @@ use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate}; use lightning::chain::keysinterface::{KeysInterface, Sign}; use lightning::chain::transaction::OutPoint; use lightning::ln::channelmanager::ChannelManager; +use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::routing::network_graph::NetworkGraph; -use lightning::routing::scoring::Scorer; +use lightning::routing::scoring::ProbabilisticScoringParameters; use lightning::util::logger::Logger; use lightning::util::ser::{Readable, ReadableArgs, Writeable}; use secp256k1::PublicKey; use std::collections::HashMap; +use std::convert::TryInto; use std::fs; use std::io::{BufReader, BufWriter, Cursor, Error}; use std::net::SocketAddr; @@ -41,8 +52,9 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::{Arc, Mutex}; -/// FilesystemPersister persists channel data on disk, where each channel's +/// LightningPersister persists channel data on disk, where each channel's /// data is stored in a file named after its funding outpoint. +/// It is also used to persist payments and channels history to sqlite database. /// /// Warning: this module does the best it can with calls to persist data, but it /// can only guarantee that the data is passed to the drive. It is up to the @@ -52,10 +64,13 @@ use std::sync::{Arc, Mutex}; /// persistent. /// Corollary: especially when dealing with larger amounts of money, it is best /// practice to have multiple channel data backups and not rely only on one -/// FilesystemPersister. -pub struct FilesystemPersister { +/// LightningPersister. + +pub struct LightningPersister { + storage_ticker: String, main_path: PathBuf, backup_path: Option, + sqlite_connection: SqliteConnShared, } impl DiskWriteable for ChannelMonitor { @@ -74,10 +89,493 @@ where fn write_to_file(&self, writer: &mut fs::File) -> Result<(), std::io::Error> { self.write(writer) } } -impl FilesystemPersister { - /// Initialize a new FilesystemPersister and set the path to the individual channels' +fn channels_history_table(ticker: &str) -> String { ticker.to_owned() + "_channels_history" } + +fn payments_history_table(ticker: &str) -> String { ticker.to_owned() + "_payments_history" } + +fn create_channels_history_table_sql(for_coin: &str) -> Result { + let table_name = channels_history_table(for_coin); + validate_table_name(&table_name)?; + + let sql = format!( + "CREATE TABLE IF NOT EXISTS {} ( + id INTEGER NOT NULL PRIMARY KEY, + rpc_id INTEGER NOT NULL UNIQUE, + channel_id VARCHAR(255) NOT NULL, + counterparty_node_id VARCHAR(255) NOT NULL, + funding_tx VARCHAR(255), + funding_value INTEGER, + funding_generated_in_block Integer, + closing_tx VARCHAR(255), + closure_reason TEXT, + claiming_tx VARCHAR(255), + claimed_balance REAL, + is_outbound INTEGER NOT NULL, + is_public INTEGER NOT NULL, + is_closed INTEGER NOT NULL, + created_at INTEGER NOT NULL, + last_updated INTEGER NOT NULL + );", + table_name + ); + + Ok(sql) +} + +fn create_payments_history_table_sql(for_coin: &str) -> Result { + let table_name = payments_history_table(for_coin); + validate_table_name(&table_name)?; + + let sql = format!( + "CREATE TABLE IF NOT EXISTS {} ( + id INTEGER NOT NULL PRIMARY KEY, + payment_hash VARCHAR(255) NOT NULL UNIQUE, + destination VARCHAR(255), + description VARCHAR(641) NOT NULL, + preimage VARCHAR(255), + secret VARCHAR(255), + amount_msat INTEGER, + fee_paid_msat INTEGER, + is_outbound INTEGER NOT NULL, + status VARCHAR(255) NOT NULL, + created_at INTEGER NOT NULL, + last_updated INTEGER NOT NULL + );", + table_name + ); + + Ok(sql) +} + +fn insert_channel_sql(for_coin: &str) -> Result { + let table_name = channels_history_table(for_coin); + validate_table_name(&table_name)?; + + let sql = format!( + "INSERT INTO {} ( + rpc_id, + channel_id, + counterparty_node_id, + is_outbound, + is_public, + is_closed, + created_at, + last_updated + ) VALUES ( + ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8 + );", + table_name + ); + + Ok(sql) +} + +fn upsert_payment_sql(for_coin: &str) -> Result { + let table_name = payments_history_table(for_coin); + validate_table_name(&table_name)?; + + let sql = format!( + "INSERT OR REPLACE INTO {} ( + payment_hash, + destination, + description, + preimage, + secret, + amount_msat, + fee_paid_msat, + is_outbound, + status, + created_at, + last_updated + ) VALUES ( + ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11 + );", + table_name + ); + + Ok(sql) +} + +fn select_channel_by_rpc_id_sql(for_coin: &str) -> Result { + let table_name = channels_history_table(for_coin); + validate_table_name(&table_name)?; + + let sql = format!( + "SELECT + rpc_id, + channel_id, + counterparty_node_id, + funding_tx, + funding_value, + funding_generated_in_block, + closing_tx, + closure_reason, + claiming_tx, + claimed_balance, + is_outbound, + is_public, + is_closed, + created_at, + last_updated + FROM + {} + WHERE + rpc_id=?1", + table_name + ); + + Ok(sql) +} + +fn select_payment_by_hash_sql(for_coin: &str) -> Result { + let table_name = payments_history_table(for_coin); + validate_table_name(&table_name)?; + + let sql = format!( + "SELECT + payment_hash, + destination, + description, + preimage, + secret, + amount_msat, + fee_paid_msat, + status, + is_outbound, + created_at, + last_updated + FROM + {} + WHERE + payment_hash=?1;", + table_name + ); + + Ok(sql) +} + +fn channel_details_from_row(row: &Row<'_>) -> Result { + let channel_details = SqlChannelDetails { + rpc_id: row.get::<_, u32>(0)? as u64, + channel_id: row.get(1)?, + counterparty_node_id: row.get(2)?, + funding_tx: row.get(3)?, + funding_value: row.get::<_, Option>(4)?.map(|v| v as u64), + funding_generated_in_block: row.get::<_, Option>(5)?.map(|v| v as u64), + closing_tx: row.get(6)?, + closure_reason: row.get(7)?, + claiming_tx: row.get(8)?, + claimed_balance: row.get::<_, Option>(9)?, + is_outbound: row.get(10)?, + is_public: row.get(11)?, + is_closed: row.get(12)?, + created_at: row.get::<_, u32>(13)? as u64, + last_updated: row.get::<_, u32>(14)? as u64, + }; + Ok(channel_details) +} + +fn payment_info_from_row(row: &Row<'_>) -> Result { + let is_outbound = row.get::<_, bool>(8)?; + let payment_type = if is_outbound { + PaymentType::OutboundPayment { + destination: PublicKey::from_str(&row.get::<_, String>(1)?).map_err(|e| sql_text_conversion_err(1, e))?, + } + } else { + PaymentType::InboundPayment + }; + + let payment_info = PaymentInfo { + payment_hash: PaymentHash(h256_slice_from_row::(row, 0)?), + payment_type, + description: row.get(2)?, + preimage: h256_option_slice_from_row::(row, 3)?.map(PaymentPreimage), + secret: h256_option_slice_from_row::(row, 4)?.map(PaymentSecret), + amt_msat: row.get::<_, Option>(5)?.map(|v| v as u64), + fee_paid_msat: row.get::<_, Option>(6)?.map(|v| v as u64), + status: HTLCStatus::from_str(&row.get::<_, String>(7)?)?, + created_at: row.get::<_, u32>(9)? as u64, + last_updated: row.get::<_, u32>(10)? as u64, + }; + Ok(payment_info) +} + +fn get_last_channel_rpc_id_sql(for_coin: &str) -> Result { + let table_name = channels_history_table(for_coin); + validate_table_name(&table_name)?; + + let sql = format!("SELECT IFNULL(MAX(rpc_id), 0) FROM {};", table_name); + + Ok(sql) +} + +fn update_funding_tx_sql(for_coin: &str) -> Result { + let table_name = channels_history_table(for_coin); + validate_table_name(&table_name)?; + + let sql = format!( + "UPDATE {} SET + funding_tx = ?1, + funding_value = ?2, + funding_generated_in_block = ?3, + last_updated = ?4 + WHERE + rpc_id = ?5;", + table_name + ); + + Ok(sql) +} + +fn update_funding_tx_block_height_sql(for_coin: &str) -> Result { + let table_name = channels_history_table(for_coin); + validate_table_name(&table_name)?; + + let sql = format!( + "UPDATE {} SET funding_generated_in_block = ?1 WHERE funding_tx = ?2;", + table_name + ); + + Ok(sql) +} + +fn update_channel_to_closed_sql(for_coin: &str) -> Result { + let table_name = channels_history_table(for_coin); + validate_table_name(&table_name)?; + + let sql = format!( + "UPDATE {} SET closure_reason = ?1, is_closed = ?2, last_updated = ?3 WHERE rpc_id = ?4;", + table_name + ); + + Ok(sql) +} + +fn update_closing_tx_sql(for_coin: &str) -> Result { + let table_name = channels_history_table(for_coin); + validate_table_name(&table_name)?; + + let sql = format!( + "UPDATE {} SET closing_tx = ?1, last_updated = ?2 WHERE rpc_id = ?3;", + table_name + ); + + Ok(sql) +} + +fn get_channels_builder_preimage(for_coin: &str) -> Result { + let table_name = channels_history_table(for_coin); + validate_table_name(&table_name)?; + + let mut sql_builder = SqlBuilder::select_from(table_name); + sql_builder.and_where("is_closed = 1"); + Ok(sql_builder) +} + +fn add_fields_to_get_channels_sql_builder(sql_builder: &mut SqlBuilder) { + sql_builder + .field("rpc_id") + .field("channel_id") + .field("counterparty_node_id") + .field("funding_tx") + .field("funding_value") + .field("funding_generated_in_block") + .field("closing_tx") + .field("closure_reason") + .field("claiming_tx") + .field("claimed_balance") + .field("is_outbound") + .field("is_public") + .field("is_closed") + .field("created_at") + .field("last_updated"); +} + +fn finalize_get_channels_sql_builder(sql_builder: &mut SqlBuilder, offset: usize, limit: usize) { + sql_builder.offset(offset); + sql_builder.limit(limit); + sql_builder.order_desc("last_updated"); +} + +fn apply_get_channels_filter(builder: &mut SqlBuilder, params: &mut Vec<(&str, String)>, filter: ClosedChannelsFilter) { + if let Some(channel_id) = filter.channel_id { + builder.and_where("channel_id = :channel_id"); + params.push((":channel_id", channel_id)); + } + + if let Some(counterparty_node_id) = filter.counterparty_node_id { + builder.and_where("counterparty_node_id = :counterparty_node_id"); + params.push((":counterparty_node_id", counterparty_node_id)); + } + + if let Some(funding_tx) = filter.funding_tx { + builder.and_where("funding_tx = :funding_tx"); + params.push((":funding_tx", funding_tx)); + } + + if let Some(from_funding_value) = filter.from_funding_value { + builder.and_where("funding_value >= :from_funding_value"); + params.push((":from_funding_value", from_funding_value.to_string())); + } + + if let Some(to_funding_value) = filter.to_funding_value { + builder.and_where("funding_value <= :to_funding_value"); + params.push((":to_funding_value", to_funding_value.to_string())); + } + + if let Some(closing_tx) = filter.closing_tx { + builder.and_where("closing_tx = :closing_tx"); + params.push((":closing_tx", closing_tx)); + } + + if let Some(closure_reason) = filter.closure_reason { + builder.and_where(format!("closure_reason LIKE '%{}%'", closure_reason)); + } + + if let Some(claiming_tx) = filter.claiming_tx { + builder.and_where("claiming_tx = :claiming_tx"); + params.push((":claiming_tx", claiming_tx)); + } + + if let Some(from_claimed_balance) = filter.from_claimed_balance { + builder.and_where("claimed_balance >= :from_claimed_balance"); + params.push((":from_claimed_balance", from_claimed_balance.to_string())); + } + + if let Some(to_claimed_balance) = filter.to_claimed_balance { + builder.and_where("claimed_balance <= :to_claimed_balance"); + params.push((":to_claimed_balance", to_claimed_balance.to_string())); + } + + if let Some(channel_type) = filter.channel_type { + let is_outbound = match channel_type { + ChannelType::Outbound => true as i32, + ChannelType::Inbound => false as i32, + }; + + builder.and_where("is_outbound = :is_outbound"); + params.push((":is_outbound", is_outbound.to_string())); + } + + if let Some(channel_visibility) = filter.channel_visibility { + let is_public = match channel_visibility { + ChannelVisibility::Public => true as i32, + ChannelVisibility::Private => false as i32, + }; + + builder.and_where("is_public = :is_public"); + params.push((":is_public", is_public.to_string())); + } +} + +fn get_payments_builder_preimage(for_coin: &str) -> Result { + let table_name = payments_history_table(for_coin); + validate_table_name(&table_name)?; + + Ok(SqlBuilder::select_from(table_name)) +} + +fn finalize_get_payments_sql_builder(sql_builder: &mut SqlBuilder, offset: usize, limit: usize) { + sql_builder + .field("payment_hash") + .field("destination") + .field("description") + .field("preimage") + .field("secret") + .field("amount_msat") + .field("fee_paid_msat") + .field("status") + .field("is_outbound") + .field("created_at") + .field("last_updated"); + sql_builder.offset(offset); + sql_builder.limit(limit); + sql_builder.order_desc("last_updated"); +} + +fn apply_get_payments_filter(builder: &mut SqlBuilder, params: &mut Vec<(&str, String)>, filter: PaymentsFilter) { + if let Some(payment_type) = filter.payment_type { + let (is_outbound, destination) = match payment_type { + PaymentType::OutboundPayment { destination } => (true as i32, Some(destination.to_string())), + PaymentType::InboundPayment => (false as i32, None), + }; + if let Some(dest) = destination { + builder.and_where("destination = :dest"); + params.push((":dest", dest)); + } + + builder.and_where("is_outbound = :is_outbound"); + params.push((":is_outbound", is_outbound.to_string())); + } + + if let Some(description) = filter.description { + builder.and_where(format!("description LIKE '%{}%'", description)); + } + + if let Some(status) = filter.status { + builder.and_where("status = :status"); + params.push((":status", status.to_string())); + } + + if let Some(from_amount) = filter.from_amount_msat { + builder.and_where("amount_msat >= :from_amount"); + params.push((":from_amount", from_amount.to_string())); + } + + if let Some(to_amount) = filter.to_amount_msat { + builder.and_where("amount_msat <= :to_amount"); + params.push((":to_amount", to_amount.to_string())); + } + + if let Some(from_fee) = filter.from_fee_paid_msat { + builder.and_where("fee_paid_msat >= :from_fee"); + params.push((":from_fee", from_fee.to_string())); + } + + if let Some(to_fee) = filter.to_fee_paid_msat { + builder.and_where("fee_paid_msat <= :to_fee"); + params.push((":to_fee", to_fee.to_string())); + } + + if let Some(from_time) = filter.from_timestamp { + builder.and_where("created_at >= :from_time"); + params.push((":from_time", from_time.to_string())); + } + + if let Some(to_time) = filter.to_timestamp { + builder.and_where("created_at <= :to_time"); + params.push((":to_time", to_time.to_string())); + } +} + +fn update_claiming_tx_sql(for_coin: &str) -> Result { + let table_name = channels_history_table(for_coin); + validate_table_name(&table_name)?; + + let sql = format!( + "UPDATE {} SET claiming_tx = ?1, claimed_balance = ?2, last_updated = ?3 WHERE closing_tx = ?4;", + table_name + ); + + Ok(sql) +} + +impl LightningPersister { + /// Initialize a new LightningPersister and set the path to the individual channels' /// files. - pub fn new(main_path: PathBuf, backup_path: Option) -> Self { Self { main_path, backup_path } } + pub fn new( + storage_ticker: String, + main_path: PathBuf, + backup_path: Option, + sqlite_connection: SqliteConnShared, + ) -> Self { + Self { + storage_ticker, + main_path, + backup_path, + sqlite_connection, + } + } /// Get the directory which was provided when this persister was initialized. pub fn main_path(&self) -> PathBuf { self.main_path.clone() } @@ -131,7 +629,7 @@ impl FilesystemPersister { path } - /// Writes the provided `ChannelManager` to the path provided at `FilesystemPersister` + /// Writes the provided `ChannelManager` to the path provided at `LightningPersister` /// initialization, within a file called "manager". pub fn persist_manager( &self, @@ -175,6 +673,12 @@ impl FilesystemPersister { "Invalid ChannelMonitor file name", )); } + if filename.unwrap().ends_with(".tmp") { + // If we were in the middle of committing an new update and crashed, it should be + // safe to ignore the update - we should never have returned to the caller and + // irrevocably committed to the new state in any way. + continue; + } let txid = Txid::from_hex(filename.unwrap().split_at(64).0); if txid.is_err() { @@ -218,7 +722,7 @@ impl FilesystemPersister { } } -impl chainmonitor::Persist for FilesystemPersister { +impl chainmonitor::Persist for LightningPersister { // TODO: We really need a way for the persister to inform the user that its time to crash/shut // down once these start returning failure. // A PermanentFailure implies we need to shut down since we're force-closing channels without @@ -259,10 +763,10 @@ impl chainmonitor::Persist for FilesystemPer } #[async_trait] -impl Storage for FilesystemPersister { +impl FileSystemStorage for LightningPersister { type Error = std::io::Error; - async fn init(&self) -> Result<(), Self::Error> { + async fn init_fs(&self) -> Result<(), Self::Error> { let path = self.main_path(); let backup_path = self.backup_path(); async_blocking(move || { @@ -276,7 +780,7 @@ impl Storage for FilesystemPersister { .await } - async fn is_initialized(&self) -> Result { + async fn is_fs_initialized(&self) -> Result { let dir_path = self.main_path(); let backup_dir_path = self.backup_path(); async_blocking(move || { @@ -359,8 +863,11 @@ impl Storage for FilesystemPersister { .await } - async fn get_network_graph(&self) -> Result { + async fn get_network_graph(&self, network: Network) -> Result { let path = self.network_graph_path(); + if !path.exists() { + return Ok(NetworkGraph::new(genesis_block(network).header.block_hash())); + } async_blocking(move || { let file = fs::File::open(path)?; common::log::info!("Reading the saved lightning network graph from file, this can take some time!"); @@ -373,18 +880,28 @@ impl Storage for FilesystemPersister { async fn save_network_graph(&self, network_graph: Arc) -> Result<(), Self::Error> { let path = self.network_graph_path(); async_blocking(move || { - let file = fs::OpenOptions::new().create(true).write(true).open(path)?; + let file = fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path)?; network_graph.write(&mut BufWriter::new(file)) }) .await } - async fn get_scorer(&self) -> Result { + async fn get_scorer(&self, network_graph: Arc) -> Result { let path = self.scorer_path(); + if !path.exists() { + return Ok(Scorer::new(ProbabilisticScoringParameters::default(), network_graph)); + } async_blocking(move || { let file = fs::File::open(path)?; - Scorer::read(&mut BufReader::new(file)) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string())) + Scorer::read( + &mut BufReader::new(file), + (ProbabilisticScoringParameters::default(), network_graph), + ) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string())) }) .await } @@ -393,21 +910,431 @@ impl Storage for FilesystemPersister { let path = self.scorer_path(); async_blocking(move || { let scorer = scorer.lock().unwrap(); - let file = fs::OpenOptions::new().create(true).write(true).open(path)?; + let file = fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path)?; scorer.write(&mut BufWriter::new(file)) }) .await } } +#[async_trait] +impl DbStorage for LightningPersister { + type Error = SqlError; + + async fn init_db(&self) -> Result<(), Self::Error> { + let sqlite_connection = self.sqlite_connection.clone(); + let sql_channels_history = create_channels_history_table_sql(self.storage_ticker.as_str())?; + let sql_payments_history = create_payments_history_table_sql(self.storage_ticker.as_str())?; + async_blocking(move || { + let conn = sqlite_connection.lock().unwrap(); + conn.execute(&sql_channels_history, NO_PARAMS).map(|_| ())?; + conn.execute(&sql_payments_history, NO_PARAMS).map(|_| ())?; + Ok(()) + }) + .await + } + + async fn is_db_initialized(&self) -> Result { + let channels_history_table = channels_history_table(self.storage_ticker.as_str()); + validate_table_name(&channels_history_table)?; + let payments_history_table = payments_history_table(self.storage_ticker.as_str()); + validate_table_name(&payments_history_table)?; + + let sqlite_connection = self.sqlite_connection.clone(); + async_blocking(move || { + let conn = sqlite_connection.lock().unwrap(); + let channels_history_initialized = + query_single_row(&conn, CHECK_TABLE_EXISTS_SQL, [channels_history_table], string_from_row)?; + let payments_history_initialized = + query_single_row(&conn, CHECK_TABLE_EXISTS_SQL, [payments_history_table], string_from_row)?; + Ok(channels_history_initialized.is_some() && payments_history_initialized.is_some()) + }) + .await + } + + async fn get_last_channel_rpc_id(&self) -> Result { + let sql = get_last_channel_rpc_id_sql(self.storage_ticker.as_str())?; + let sqlite_connection = self.sqlite_connection.clone(); + + async_blocking(move || { + let conn = sqlite_connection.lock().unwrap(); + let count: u32 = conn.query_row(&sql, NO_PARAMS, |r| r.get(0))?; + Ok(count) + }) + .await + } + + async fn add_channel_to_db(&self, details: SqlChannelDetails) -> Result<(), Self::Error> { + let for_coin = self.storage_ticker.clone(); + let rpc_id = details.rpc_id.to_string(); + let channel_id = details.channel_id; + let counterparty_node_id = details.counterparty_node_id; + let is_outbound = (details.is_outbound as i32).to_string(); + let is_public = (details.is_public as i32).to_string(); + let is_closed = (details.is_closed as i32).to_string(); + let created_at = (details.created_at as u32).to_string(); + let last_updated = (details.last_updated as u32).to_string(); + + let params = [ + rpc_id, + channel_id, + counterparty_node_id, + is_outbound, + is_public, + is_closed, + created_at, + last_updated, + ]; + + let sqlite_connection = self.sqlite_connection.clone(); + async_blocking(move || { + let mut conn = sqlite_connection.lock().unwrap(); + let sql_transaction = conn.transaction()?; + sql_transaction.execute(&insert_channel_sql(&for_coin)?, ¶ms)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + } + + async fn add_funding_tx_to_db( + &self, + rpc_id: u64, + funding_tx: String, + funding_value: u64, + funding_generated_in_block: u64, + ) -> Result<(), Self::Error> { + let for_coin = self.storage_ticker.clone(); + let funding_value = funding_value.to_string(); + let funding_generated_in_block = funding_generated_in_block.to_string(); + let last_updated = (now_ms() / 1000).to_string(); + let rpc_id = rpc_id.to_string(); + + let params = [ + funding_tx, + funding_value, + funding_generated_in_block, + last_updated, + rpc_id, + ]; + + let sqlite_connection = self.sqlite_connection.clone(); + async_blocking(move || { + let mut conn = sqlite_connection.lock().unwrap(); + let sql_transaction = conn.transaction()?; + sql_transaction.execute(&update_funding_tx_sql(&for_coin)?, ¶ms)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + } + + async fn update_funding_tx_block_height(&self, funding_tx: String, block_height: u64) -> Result<(), Self::Error> { + let for_coin = self.storage_ticker.clone(); + let generated_in_block = block_height as u32; + + let sqlite_connection = self.sqlite_connection.clone(); + async_blocking(move || { + let mut conn = sqlite_connection.lock().unwrap(); + let sql_transaction = conn.transaction()?; + let params = [&generated_in_block as &dyn ToSql, &funding_tx as &dyn ToSql]; + sql_transaction.execute(&update_funding_tx_block_height_sql(&for_coin)?, ¶ms)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + } + + async fn update_channel_to_closed(&self, rpc_id: u64, closure_reason: String) -> Result<(), Self::Error> { + let for_coin = self.storage_ticker.clone(); + let is_closed = "1".to_string(); + let last_updated = (now_ms() / 1000).to_string(); + let rpc_id = rpc_id.to_string(); + + let params = [closure_reason, is_closed, last_updated, rpc_id]; + + let sqlite_connection = self.sqlite_connection.clone(); + async_blocking(move || { + let mut conn = sqlite_connection.lock().unwrap(); + let sql_transaction = conn.transaction()?; + sql_transaction.execute(&update_channel_to_closed_sql(&for_coin)?, ¶ms)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + } + + async fn get_closed_channels_with_no_closing_tx(&self) -> Result, Self::Error> { + let mut builder = get_channels_builder_preimage(self.storage_ticker.as_str())?; + builder.and_where("closing_tx IS NULL"); + add_fields_to_get_channels_sql_builder(&mut builder); + let sql = builder.sql().expect("valid sql"); + let sqlite_connection = self.sqlite_connection.clone(); + + async_blocking(move || { + let conn = sqlite_connection.lock().unwrap(); + + let mut stmt = conn.prepare(&sql)?; + let result = stmt + .query_map_named(&[], channel_details_from_row)? + .collect::>()?; + Ok(result) + }) + .await + } + + async fn add_closing_tx_to_db(&self, rpc_id: u64, closing_tx: String) -> Result<(), Self::Error> { + let for_coin = self.storage_ticker.clone(); + let last_updated = (now_ms() / 1000).to_string(); + let rpc_id = rpc_id.to_string(); + + let params = [closing_tx, last_updated, rpc_id]; + + let sqlite_connection = self.sqlite_connection.clone(); + async_blocking(move || { + let mut conn = sqlite_connection.lock().unwrap(); + let sql_transaction = conn.transaction()?; + sql_transaction.execute(&update_closing_tx_sql(&for_coin)?, ¶ms)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + } + + async fn add_claiming_tx_to_db( + &self, + closing_tx: String, + claiming_tx: String, + claimed_balance: f64, + ) -> Result<(), Self::Error> { + let for_coin = self.storage_ticker.clone(); + let claimed_balance = claimed_balance.to_string(); + let last_updated = (now_ms() / 1000).to_string(); + + let params = [claiming_tx, claimed_balance, last_updated, closing_tx]; + + let sqlite_connection = self.sqlite_connection.clone(); + async_blocking(move || { + let mut conn = sqlite_connection.lock().unwrap(); + let sql_transaction = conn.transaction()?; + sql_transaction.execute(&update_claiming_tx_sql(&for_coin)?, ¶ms)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + } + + async fn get_channel_from_db(&self, rpc_id: u64) -> Result, Self::Error> { + let params = [rpc_id.to_string()]; + let sql = select_channel_by_rpc_id_sql(self.storage_ticker.as_str())?; + let sqlite_connection = self.sqlite_connection.clone(); + + async_blocking(move || { + let conn = sqlite_connection.lock().unwrap(); + query_single_row(&conn, &sql, params, channel_details_from_row) + }) + .await + } + + async fn get_closed_channels_by_filter( + &self, + filter: Option, + paging: PagingOptionsEnum, + limit: usize, + ) -> Result { + let mut sql_builder = get_channels_builder_preimage(self.storage_ticker.as_str())?; + let sqlite_connection = self.sqlite_connection.clone(); + + async_blocking(move || { + let conn = sqlite_connection.lock().unwrap(); + + let mut total_builder = sql_builder.clone(); + total_builder.count("id"); + let total_sql = total_builder.sql().expect("valid sql"); + let total: isize = conn.query_row(&total_sql, NO_PARAMS, |row| row.get(0))?; + let total = total.try_into().expect("count should be always above zero"); + + let offset = match paging { + PagingOptionsEnum::PageNumber(page) => (page.get() - 1) * limit, + PagingOptionsEnum::FromId(rpc_id) => { + let params = [rpc_id as u32]; + let maybe_offset = offset_by_id( + &conn, + &sql_builder, + params, + "rpc_id", + "last_updated DESC", + "rpc_id = ?1", + )?; + match maybe_offset { + Some(offset) => offset, + None => { + return Ok(GetClosedChannelsResult { + channels: vec![], + skipped: 0, + total, + }) + }, + } + }, + }; + + let mut params = vec![]; + if let Some(f) = filter { + apply_get_channels_filter(&mut sql_builder, &mut params, f); + } + let params_as_trait: Vec<_> = params.iter().map(|(key, value)| (*key, value as &dyn ToSql)).collect(); + add_fields_to_get_channels_sql_builder(&mut sql_builder); + finalize_get_channels_sql_builder(&mut sql_builder, offset, limit); + + let sql = sql_builder.sql().expect("valid sql"); + let mut stmt = conn.prepare(&sql)?; + let channels = stmt + .query_map_named(params_as_trait.as_slice(), channel_details_from_row)? + .collect::>()?; + let result = GetClosedChannelsResult { + channels, + skipped: offset, + total, + }; + Ok(result) + }) + .await + } + + async fn add_or_update_payment_in_db(&self, info: PaymentInfo) -> Result<(), Self::Error> { + let for_coin = self.storage_ticker.clone(); + let payment_hash = hex::encode(info.payment_hash.0); + let (is_outbound, destination) = match info.payment_type { + PaymentType::OutboundPayment { destination } => (true as i32, Some(destination.to_string())), + PaymentType::InboundPayment => (false as i32, None), + }; + let description = info.description; + let preimage = info.preimage.map(|p| hex::encode(p.0)); + let secret = info.secret.map(|s| hex::encode(s.0)); + let amount_msat = info.amt_msat.map(|a| a as u32); + let fee_paid_msat = info.fee_paid_msat.map(|f| f as u32); + let status = info.status.to_string(); + let created_at = info.created_at as u32; + let last_updated = info.last_updated as u32; + + let sqlite_connection = self.sqlite_connection.clone(); + async_blocking(move || { + let params = [ + &payment_hash as &dyn ToSql, + &destination as &dyn ToSql, + &description as &dyn ToSql, + &preimage as &dyn ToSql, + &secret as &dyn ToSql, + &amount_msat as &dyn ToSql, + &fee_paid_msat as &dyn ToSql, + &is_outbound as &dyn ToSql, + &status as &dyn ToSql, + &created_at as &dyn ToSql, + &last_updated as &dyn ToSql, + ]; + let mut conn = sqlite_connection.lock().unwrap(); + let sql_transaction = conn.transaction()?; + sql_transaction.execute(&upsert_payment_sql(&for_coin)?, ¶ms)?; + sql_transaction.commit()?; + Ok(()) + }) + .await + } + + async fn get_payment_from_db(&self, hash: PaymentHash) -> Result, Self::Error> { + let params = [hex::encode(hash.0)]; + let sql = select_payment_by_hash_sql(self.storage_ticker.as_str())?; + let sqlite_connection = self.sqlite_connection.clone(); + + async_blocking(move || { + let conn = sqlite_connection.lock().unwrap(); + query_single_row(&conn, &sql, params, payment_info_from_row) + }) + .await + } + + async fn get_payments_by_filter( + &self, + filter: Option, + paging: PagingOptionsEnum, + limit: usize, + ) -> Result { + let mut sql_builder = get_payments_builder_preimage(self.storage_ticker.as_str())?; + let sqlite_connection = self.sqlite_connection.clone(); + + async_blocking(move || { + let conn = sqlite_connection.lock().unwrap(); + + let mut total_builder = sql_builder.clone(); + total_builder.count("id"); + let total_sql = total_builder.sql().expect("valid sql"); + let total: isize = conn.query_row(&total_sql, NO_PARAMS, |row| row.get(0))?; + let total = total.try_into().expect("count should be always above zero"); + + let offset = match paging { + PagingOptionsEnum::PageNumber(page) => (page.get() - 1) * limit, + PagingOptionsEnum::FromId(hash) => { + let hash_str = hex::encode(hash.0); + let params = [&hash_str]; + let maybe_offset = offset_by_id( + &conn, + &sql_builder, + params, + "payment_hash", + "last_updated DESC", + "payment_hash = ?1", + )?; + match maybe_offset { + Some(offset) => offset, + None => { + return Ok(GetPaymentsResult { + payments: vec![], + skipped: 0, + total, + }) + }, + } + }, + }; + + let mut params = vec![]; + if let Some(f) = filter { + apply_get_payments_filter(&mut sql_builder, &mut params, f); + } + let params_as_trait: Vec<_> = params.iter().map(|(key, value)| (*key, value as &dyn ToSql)).collect(); + finalize_get_payments_sql_builder(&mut sql_builder, offset, limit); + + let sql = sql_builder.sql().expect("valid sql"); + let mut stmt = conn.prepare(&sql)?; + let payments = stmt + .query_map_named(params_as_trait.as_slice(), payment_info_from_row)? + .collect::>()?; + let result = GetPaymentsResult { + payments, + skipped: offset, + total, + }; + Ok(result) + }) + .await + } +} + #[cfg(test)] mod tests { + use super::*; extern crate bitcoin; extern crate lightning; - use crate::FilesystemPersister; use bitcoin::blockdata::block::{Block, BlockHeader}; use bitcoin::hashes::hex::FromHex; use bitcoin::Txid; + use common::{block_on, now_ms}; + use db_common::sqlite::rusqlite::Connection; use lightning::chain::chainmonitor::Persist; use lightning::chain::transaction::OutPoint; use lightning::chain::ChannelMonitorUpdateErr; @@ -416,10 +1343,15 @@ mod tests { use lightning::util::events::{ClosureReason, MessageSendEventsProvider}; use lightning::util::test_utils; use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event}; + use rand::distributions::Alphanumeric; + use rand::{Rng, RngCore}; + use secp256k1::{Secp256k1, SecretKey}; use std::fs; + use std::num::NonZeroUsize; use std::path::PathBuf; + use std::sync::{Arc, Mutex}; - impl Drop for FilesystemPersister { + impl Drop for LightningPersister { fn drop(&mut self) { // We test for invalid directory names, so it's OK if directory removal // fails. @@ -430,14 +1362,126 @@ mod tests { } } - // Integration-test the FilesystemPersister. Test relaying a few payments + fn generate_random_channels(num: u64) -> Vec { + let mut rng = rand::thread_rng(); + let mut channels = vec![]; + let s = Secp256k1::new(); + let mut bytes = [0; 32]; + for i in 0..num { + let details = SqlChannelDetails { + rpc_id: i + 1, + channel_id: { + rng.fill_bytes(&mut bytes); + hex::encode(bytes) + }, + counterparty_node_id: { + rng.fill_bytes(&mut bytes); + let secret = SecretKey::from_slice(&bytes).unwrap(); + let pubkey = PublicKey::from_secret_key(&s, &secret); + pubkey.to_string() + }, + funding_tx: { + rng.fill_bytes(&mut bytes); + Some(hex::encode(bytes)) + }, + funding_value: Some(rng.gen::() as u64), + closing_tx: { + rng.fill_bytes(&mut bytes); + Some(hex::encode(bytes)) + }, + closure_reason: { + Some( + rng.sample_iter(&Alphanumeric) + .take(30) + .map(char::from) + .collect::(), + ) + }, + claiming_tx: { + rng.fill_bytes(&mut bytes); + Some(hex::encode(bytes)) + }, + claimed_balance: Some(rng.gen::()), + funding_generated_in_block: Some(rng.gen::() as u64), + is_outbound: rand::random(), + is_public: rand::random(), + is_closed: rand::random(), + created_at: rng.gen::() as u64, + last_updated: rng.gen::() as u64, + }; + channels.push(details); + } + channels + } + + fn generate_random_payments(num: u64) -> Vec { + let mut rng = rand::thread_rng(); + let mut payments = vec![]; + let s = Secp256k1::new(); + let mut bytes = [0; 32]; + for _ in 0..num { + let payment_type = if let 0 = rng.gen::() % 2 { + PaymentType::InboundPayment + } else { + rng.fill_bytes(&mut bytes); + let secret = SecretKey::from_slice(&bytes).unwrap(); + PaymentType::OutboundPayment { + destination: PublicKey::from_secret_key(&s, &secret), + } + }; + let status_rng: u8 = rng.gen(); + let status = if status_rng % 3 == 0 { + HTLCStatus::Succeeded + } else if status_rng % 3 == 1 { + HTLCStatus::Pending + } else { + HTLCStatus::Failed + }; + let description: String = rng.sample_iter(&Alphanumeric).take(30).map(char::from).collect(); + let info = PaymentInfo { + payment_hash: { + rng.fill_bytes(&mut bytes); + PaymentHash(bytes) + }, + payment_type, + description, + preimage: { + rng.fill_bytes(&mut bytes); + Some(PaymentPreimage(bytes)) + }, + secret: { + rng.fill_bytes(&mut bytes); + Some(PaymentSecret(bytes)) + }, + amt_msat: Some(rng.gen::() as u64), + fee_paid_msat: Some(rng.gen::() as u64), + status, + created_at: rng.gen::() as u64, + last_updated: rng.gen::() as u64, + }; + payments.push(info); + } + payments + } + + // Integration-test the LightningPersister. Test relaying a few payments // and check that the persisted data is updated the appropriate number of // times. #[test] fn test_filesystem_persister() { - // Create the nodes, giving them FilesystemPersisters for data persisters. - let persister_0 = FilesystemPersister::new(PathBuf::from("test_filesystem_persister_0"), None); - let persister_1 = FilesystemPersister::new(PathBuf::from("test_filesystem_persister_1"), None); + // Create the nodes, giving them LightningPersisters for data persisters. + let persister_0 = LightningPersister::new( + "test_filesystem_persister_0".into(), + PathBuf::from("test_filesystem_persister_0"), + None, + Arc::new(Mutex::new(Connection::open_in_memory().unwrap())), + ); + let persister_1 = LightningPersister::new( + "test_filesystem_persister_1".into(), + PathBuf::from("test_filesystem_persister_1"), + None, + Arc::new(Mutex::new(Connection::open_in_memory().unwrap())), + ); let chanmon_cfgs = create_chanmon_cfgs(2); let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let chain_mon_0 = test_utils::TestChainMonitor::new( @@ -533,7 +1577,12 @@ mod tests { #[cfg(not(target_os = "windows"))] #[test] fn test_readonly_dir_perm_failure() { - let persister = FilesystemPersister::new(PathBuf::from("test_readonly_dir_perm_failure"), None); + let persister = LightningPersister::new( + "test_readonly_dir_perm_failure".into(), + PathBuf::from("test_readonly_dir_perm_failure"), + None, + Arc::new(Mutex::new(Connection::open_in_memory().unwrap())), + ); fs::create_dir_all(&persister.main_path).unwrap(); // Set up a dummy channel and force close. This will produce a monitor @@ -592,7 +1641,12 @@ mod tests { // channel fails to open because the directories fail to be created. There // don't seem to be invalid filename characters on Unix that Rust doesn't // handle, hence why the test is Windows-only. - let persister = FilesystemPersister::new(PathBuf::from(":<>/"), None); + let persister = LightningPersister::new( + "test_fail_on_open".into(), + PathBuf::from(":<>/"), + None, + Arc::new(Mutex::new(Connection::open_in_memory().unwrap())), + ); let test_txo = OutPoint { txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), @@ -606,4 +1660,454 @@ mod tests { nodes[1].node.get_and_clear_pending_msg_events(); added_monitors.clear(); } + + #[test] + fn test_init_sql_collection() { + let persister = LightningPersister::new( + "init_sql_collection".into(), + PathBuf::from("test_filesystem_persister"), + None, + Arc::new(Mutex::new(Connection::open_in_memory().unwrap())), + ); + let initialized = block_on(persister.is_db_initialized()).unwrap(); + assert!(!initialized); + + block_on(persister.init_db()).unwrap(); + // repetitive init must not fail + block_on(persister.init_db()).unwrap(); + + let initialized = block_on(persister.is_db_initialized()).unwrap(); + assert!(initialized); + } + + #[test] + fn test_add_get_channel_sql() { + let persister = LightningPersister::new( + "add_get_channel".into(), + PathBuf::from("test_filesystem_persister"), + None, + Arc::new(Mutex::new(Connection::open_in_memory().unwrap())), + ); + + block_on(persister.init_db()).unwrap(); + + let last_channel_rpc_id = block_on(persister.get_last_channel_rpc_id()).unwrap(); + assert_eq!(last_channel_rpc_id, 0); + + let channel = block_on(persister.get_channel_from_db(1)).unwrap(); + assert!(channel.is_none()); + + let mut expected_channel_details = SqlChannelDetails::new( + 1, + [0; 32], + PublicKey::from_str("038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9").unwrap(), + true, + true, + ); + block_on(persister.add_channel_to_db(expected_channel_details.clone())).unwrap(); + let last_channel_rpc_id = block_on(persister.get_last_channel_rpc_id()).unwrap(); + assert_eq!(last_channel_rpc_id, 1); + + let actual_channel_details = block_on(persister.get_channel_from_db(1)).unwrap().unwrap(); + assert_eq!(expected_channel_details, actual_channel_details); + + // must fail because we are adding channel with the same rpc_id + block_on(persister.add_channel_to_db(expected_channel_details.clone())).unwrap_err(); + assert_eq!(last_channel_rpc_id, 1); + + expected_channel_details.rpc_id = 2; + block_on(persister.add_channel_to_db(expected_channel_details.clone())).unwrap(); + let last_channel_rpc_id = block_on(persister.get_last_channel_rpc_id()).unwrap(); + assert_eq!(last_channel_rpc_id, 2); + + block_on(persister.add_funding_tx_to_db( + 2, + "9cdafd6d42dcbdc06b0b5bce1866deb82630581285bbfb56870577300c0a8c6e".into(), + 3000, + 50000, + )) + .unwrap(); + expected_channel_details.funding_tx = + Some("9cdafd6d42dcbdc06b0b5bce1866deb82630581285bbfb56870577300c0a8c6e".into()); + expected_channel_details.funding_value = Some(3000); + expected_channel_details.funding_generated_in_block = Some(50000); + + let actual_channel_details = block_on(persister.get_channel_from_db(2)).unwrap().unwrap(); + assert_eq!(expected_channel_details, actual_channel_details); + + block_on(persister.update_funding_tx_block_height( + "9cdafd6d42dcbdc06b0b5bce1866deb82630581285bbfb56870577300c0a8c6e".into(), + 50001, + )) + .unwrap(); + expected_channel_details.funding_generated_in_block = Some(50001); + + let actual_channel_details = block_on(persister.get_channel_from_db(2)).unwrap().unwrap(); + assert_eq!(expected_channel_details, actual_channel_details); + + block_on(persister.update_channel_to_closed(2, "the channel was cooperatively closed".into())).unwrap(); + expected_channel_details.closure_reason = Some("the channel was cooperatively closed".into()); + expected_channel_details.is_closed = true; + + let actual_channel_details = block_on(persister.get_channel_from_db(2)).unwrap().unwrap(); + assert_eq!(expected_channel_details, actual_channel_details); + + let actual_channels = block_on(persister.get_closed_channels_with_no_closing_tx()).unwrap(); + assert_eq!(actual_channels.len(), 1); + + let closed_channels = + block_on(persister.get_closed_channels_by_filter(None, PagingOptionsEnum::default(), 10)).unwrap(); + assert_eq!(closed_channels.channels.len(), 1); + assert_eq!(expected_channel_details, closed_channels.channels[0]); + + block_on(persister.update_channel_to_closed(1, "the channel was cooperatively closed".into())).unwrap(); + let closed_channels = + block_on(persister.get_closed_channels_by_filter(None, PagingOptionsEnum::default(), 10)).unwrap(); + assert_eq!(closed_channels.channels.len(), 2); + + let actual_channels = block_on(persister.get_closed_channels_with_no_closing_tx()).unwrap(); + assert_eq!(actual_channels.len(), 2); + + block_on(persister.add_closing_tx_to_db( + 2, + "5557df9ad2c9b3c57a4df8b4a7da0b7a6f4e923b4a01daa98bf9e5a3b33e9c8f".into(), + )) + .unwrap(); + expected_channel_details.closing_tx = + Some("5557df9ad2c9b3c57a4df8b4a7da0b7a6f4e923b4a01daa98bf9e5a3b33e9c8f".into()); + + let actual_channels = block_on(persister.get_closed_channels_with_no_closing_tx()).unwrap(); + assert_eq!(actual_channels.len(), 1); + + let actual_channel_details = block_on(persister.get_channel_from_db(2)).unwrap().unwrap(); + assert_eq!(expected_channel_details, actual_channel_details); + + block_on(persister.add_claiming_tx_to_db( + "5557df9ad2c9b3c57a4df8b4a7da0b7a6f4e923b4a01daa98bf9e5a3b33e9c8f".into(), + "97f061634a4a7b0b0c2b95648f86b1c39b95e0cf5073f07725b7143c095b612a".into(), + 2000.333333, + )) + .unwrap(); + expected_channel_details.claiming_tx = + Some("97f061634a4a7b0b0c2b95648f86b1c39b95e0cf5073f07725b7143c095b612a".into()); + expected_channel_details.claimed_balance = Some(2000.333333); + + let actual_channel_details = block_on(persister.get_channel_from_db(2)).unwrap().unwrap(); + assert_eq!(expected_channel_details, actual_channel_details); + } + + #[test] + fn test_add_get_payment_sql() { + let persister = LightningPersister::new( + "add_get_payment".into(), + PathBuf::from("test_filesystem_persister"), + None, + Arc::new(Mutex::new(Connection::open_in_memory().unwrap())), + ); + + block_on(persister.init_db()).unwrap(); + + let payment = block_on(persister.get_payment_from_db(PaymentHash([0; 32]))).unwrap(); + assert!(payment.is_none()); + + let mut expected_payment_info = PaymentInfo { + payment_hash: PaymentHash([0; 32]), + payment_type: PaymentType::InboundPayment, + description: "test payment".into(), + preimage: Some(PaymentPreimage([2; 32])), + secret: Some(PaymentSecret([3; 32])), + amt_msat: Some(2000), + fee_paid_msat: Some(100), + status: HTLCStatus::Failed, + created_at: now_ms() / 1000, + last_updated: now_ms() / 1000, + }; + block_on(persister.add_or_update_payment_in_db(expected_payment_info.clone())).unwrap(); + + let actual_payment_info = block_on(persister.get_payment_from_db(PaymentHash([0; 32]))) + .unwrap() + .unwrap(); + assert_eq!(expected_payment_info, actual_payment_info); + + expected_payment_info.payment_hash = PaymentHash([1; 32]); + expected_payment_info.payment_type = PaymentType::OutboundPayment { + destination: PublicKey::from_str("038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9") + .unwrap(), + }; + expected_payment_info.secret = None; + expected_payment_info.amt_msat = None; + expected_payment_info.status = HTLCStatus::Succeeded; + expected_payment_info.last_updated = now_ms() / 1000; + block_on(persister.add_or_update_payment_in_db(expected_payment_info.clone())).unwrap(); + + let actual_payment_info = block_on(persister.get_payment_from_db(PaymentHash([1; 32]))) + .unwrap() + .unwrap(); + assert_eq!(expected_payment_info, actual_payment_info); + } + + #[test] + fn test_get_payments_by_filter() { + let persister = LightningPersister::new( + "test_get_payments_by_filter".into(), + PathBuf::from("test_filesystem_persister"), + None, + Arc::new(Mutex::new(Connection::open_in_memory().unwrap())), + ); + + block_on(persister.init_db()).unwrap(); + + let mut payments = generate_random_payments(100); + + for payment in payments.clone() { + block_on(persister.add_or_update_payment_in_db(payment)).unwrap(); + } + + let paging = PagingOptionsEnum::PageNumber(NonZeroUsize::new(1).unwrap()); + let limit = 4; + + let result = block_on(persister.get_payments_by_filter(None, paging, limit)).unwrap(); + + payments.sort_by(|a, b| b.last_updated.cmp(&a.last_updated)); + let expected_payments = &payments[..4].to_vec(); + let actual_payments = &result.payments; + + assert_eq!(0, result.skipped); + assert_eq!(100, result.total); + assert_eq!(expected_payments, actual_payments); + + let paging = PagingOptionsEnum::PageNumber(NonZeroUsize::new(2).unwrap()); + let limit = 5; + + let result = block_on(persister.get_payments_by_filter(None, paging, limit)).unwrap(); + + let expected_payments = &payments[5..10].to_vec(); + let actual_payments = &result.payments; + + assert_eq!(5, result.skipped); + assert_eq!(100, result.total); + assert_eq!(expected_payments, actual_payments); + + let from_payment_hash = payments[20].payment_hash; + let paging = PagingOptionsEnum::FromId(from_payment_hash); + let limit = 3; + + let result = block_on(persister.get_payments_by_filter(None, paging, limit)).unwrap(); + + let expected_payments = &payments[21..24].to_vec(); + let actual_payments = &result.payments; + + assert_eq!(expected_payments, actual_payments); + + let mut filter = PaymentsFilter { + payment_type: Some(PaymentType::InboundPayment), + description: None, + status: None, + from_amount_msat: None, + to_amount_msat: None, + from_fee_paid_msat: None, + to_fee_paid_msat: None, + from_timestamp: None, + to_timestamp: None, + }; + let paging = PagingOptionsEnum::PageNumber(NonZeroUsize::new(1).unwrap()); + let limit = 10; + + let result = block_on(persister.get_payments_by_filter(Some(filter.clone()), paging.clone(), limit)).unwrap(); + let expected_payments_vec: Vec = payments + .iter() + .map(|p| p.clone()) + .filter(|p| p.payment_type == PaymentType::InboundPayment) + .collect(); + let expected_payments = if expected_payments_vec.len() > 10 { + expected_payments_vec[..10].to_vec() + } else { + expected_payments_vec.clone() + }; + let actual_payments = result.payments; + + assert_eq!(expected_payments, actual_payments); + + filter.status = Some(HTLCStatus::Succeeded); + let result = block_on(persister.get_payments_by_filter(Some(filter.clone()), paging.clone(), limit)).unwrap(); + let expected_payments_vec: Vec = expected_payments_vec + .iter() + .map(|p| p.clone()) + .filter(|p| p.status == HTLCStatus::Succeeded) + .collect(); + let expected_payments = if expected_payments_vec.len() > 10 { + expected_payments_vec[..10].to_vec() + } else { + expected_payments_vec + }; + let actual_payments = result.payments; + + assert_eq!(expected_payments, actual_payments); + + let description = &payments[42].description; + let substr = &description[5..10]; + filter.payment_type = None; + filter.status = None; + filter.description = Some(substr.to_string()); + let result = block_on(persister.get_payments_by_filter(Some(filter), paging, limit)).unwrap(); + let expected_payments_vec: Vec = payments + .iter() + .map(|p| p.clone()) + .filter(|p| p.description.contains(&substr)) + .collect(); + let expected_payments = if expected_payments_vec.len() > 10 { + expected_payments_vec[..10].to_vec() + } else { + expected_payments_vec.clone() + }; + let actual_payments = result.payments; + + assert_eq!(expected_payments, actual_payments); + } + + #[test] + fn test_get_channels_by_filter() { + let persister = LightningPersister::new( + "test_get_channels_by_filter".into(), + PathBuf::from("test_filesystem_persister"), + None, + Arc::new(Mutex::new(Connection::open_in_memory().unwrap())), + ); + + block_on(persister.init_db()).unwrap(); + + let mut channels = generate_random_channels(100); + + for channel in channels.clone() { + block_on(persister.add_channel_to_db(channel.clone())).unwrap(); + block_on(persister.add_funding_tx_to_db( + channel.rpc_id, + channel.funding_tx.unwrap(), + channel.funding_value.unwrap(), + channel.funding_generated_in_block.unwrap(), + )) + .unwrap(); + block_on(persister.update_channel_to_closed(channel.rpc_id, channel.closure_reason.unwrap())).unwrap(); + block_on(persister.add_closing_tx_to_db(channel.rpc_id, channel.closing_tx.clone().unwrap())).unwrap(); + block_on(persister.add_claiming_tx_to_db( + channel.closing_tx.unwrap(), + channel.claiming_tx.unwrap(), + channel.claimed_balance.unwrap(), + )) + .unwrap(); + } + + // get all channels from SQL since updated_at changed from channels generated by generate_random_channels + channels = block_on(persister.get_closed_channels_by_filter(None, PagingOptionsEnum::default(), 100)) + .unwrap() + .channels; + assert_eq!(100, channels.len()); + + let paging = PagingOptionsEnum::PageNumber(NonZeroUsize::new(1).unwrap()); + let limit = 4; + + let result = block_on(persister.get_closed_channels_by_filter(None, paging, limit)).unwrap(); + + let expected_channels = &channels[..4].to_vec(); + let actual_channels = &result.channels; + + assert_eq!(0, result.skipped); + assert_eq!(100, result.total); + assert_eq!(expected_channels, actual_channels); + + let paging = PagingOptionsEnum::PageNumber(NonZeroUsize::new(2).unwrap()); + let limit = 5; + + let result = block_on(persister.get_closed_channels_by_filter(None, paging, limit)).unwrap(); + + let expected_channels = &channels[5..10].to_vec(); + let actual_channels = &result.channels; + + assert_eq!(5, result.skipped); + assert_eq!(100, result.total); + assert_eq!(expected_channels, actual_channels); + + let from_rpc_id = 20; + let paging = PagingOptionsEnum::FromId(from_rpc_id); + let limit = 3; + + let result = block_on(persister.get_closed_channels_by_filter(None, paging, limit)).unwrap(); + + channels.sort_by(|a, b| a.rpc_id.cmp(&b.rpc_id)); + + let expected_channels = &channels[20..23].to_vec(); + let actual_channels = &result.channels; + + assert_eq!(expected_channels, actual_channels); + + channels.sort_by(|a, b| b.last_updated.cmp(&a.last_updated)); + let mut filter = ClosedChannelsFilter { + channel_id: None, + counterparty_node_id: None, + funding_tx: None, + from_funding_value: None, + to_funding_value: None, + closing_tx: None, + closure_reason: None, + claiming_tx: None, + from_claimed_balance: None, + to_claimed_balance: None, + channel_type: Some(ChannelType::Outbound), + channel_visibility: None, + }; + let paging = PagingOptionsEnum::PageNumber(NonZeroUsize::new(1).unwrap()); + let limit = 10; + + let result = + block_on(persister.get_closed_channels_by_filter(Some(filter.clone()), paging.clone(), limit)).unwrap(); + let expected_channels_vec: Vec = channels + .iter() + .map(|chan| chan.clone()) + .filter(|chan| chan.is_outbound) + .collect(); + let expected_channels = if expected_channels_vec.len() > 10 { + expected_channels_vec[..10].to_vec() + } else { + expected_channels_vec.clone() + }; + let actual_channels = result.channels; + + assert_eq!(expected_channels, actual_channels); + + filter.channel_visibility = Some(ChannelVisibility::Public); + let result = + block_on(persister.get_closed_channels_by_filter(Some(filter.clone()), paging.clone(), limit)).unwrap(); + let expected_channels_vec: Vec = expected_channels_vec + .iter() + .map(|chan| chan.clone()) + .filter(|chan| chan.is_public) + .collect(); + let expected_channels = if expected_channels_vec.len() > 10 { + expected_channels_vec[..10].to_vec() + } else { + expected_channels_vec + }; + let actual_channels = result.channels; + + assert_eq!(expected_channels, actual_channels); + + let channel_id = channels[42].channel_id.clone(); + filter.channel_type = None; + filter.channel_visibility = None; + filter.channel_id = Some(channel_id.clone()); + let result = block_on(persister.get_closed_channels_by_filter(Some(filter), paging, limit)).unwrap(); + let expected_channels_vec: Vec = channels + .iter() + .map(|chan| chan.clone()) + .filter(|chan| chan.channel_id == channel_id) + .collect(); + let expected_channels = if expected_channels_vec.len() > 10 { + expected_channels_vec[..10].to_vec() + } else { + expected_channels_vec.clone() + }; + let actual_channels = result.channels; + + assert_eq!(expected_channels, actual_channels); + } } diff --git a/mm2src/coins/lightning_persister/src/storage.rs b/mm2src/coins/lightning_persister/src/storage.rs index b0d0760857..abf1bf4edb 100644 --- a/mm2src/coins/lightning_persister/src/storage.rs +++ b/mm2src/coins/lightning_persister/src/storage.rs @@ -1,33 +1,268 @@ use async_trait::async_trait; +use bitcoin::Network; +use common::{now_ms, PagingOptionsEnum}; +use db_common::sqlite::rusqlite::types::FromSqlError; +use derive_more::Display; +use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::routing::network_graph::NetworkGraph; -use lightning::routing::scoring::Scorer; +use lightning::routing::scoring::ProbabilisticScorer; use parking_lot::Mutex as PaMutex; use secp256k1::PublicKey; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::net::SocketAddr; +use std::str::FromStr; use std::sync::{Arc, Mutex}; pub type NodesAddressesMap = HashMap; pub type NodesAddressesMapShared = Arc>; - +pub type Scorer = ProbabilisticScorer>; #[async_trait] -pub trait Storage: Send + Sync + 'static { +pub trait FileSystemStorage { type Error; /// Initializes dirs/collection/tables in storage for a specified coin - async fn init(&self) -> Result<(), Self::Error>; + async fn init_fs(&self) -> Result<(), Self::Error>; - async fn is_initialized(&self) -> Result; + async fn is_fs_initialized(&self) -> Result; async fn get_nodes_addresses(&self) -> Result, Self::Error>; async fn save_nodes_addresses(&self, nodes_addresses: NodesAddressesMapShared) -> Result<(), Self::Error>; - async fn get_network_graph(&self) -> Result; + async fn get_network_graph(&self, network: Network) -> Result; async fn save_network_graph(&self, network_graph: Arc) -> Result<(), Self::Error>; - async fn get_scorer(&self) -> Result; + async fn get_scorer(&self, network_graph: Arc) -> Result; async fn save_scorer(&self, scorer: Arc>) -> Result<(), Self::Error>; } + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct SqlChannelDetails { + pub rpc_id: u64, + pub channel_id: String, + pub counterparty_node_id: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub funding_tx: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub funding_value: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub closing_tx: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub closure_reason: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub claiming_tx: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub claimed_balance: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub funding_generated_in_block: Option, + pub is_outbound: bool, + pub is_public: bool, + pub is_closed: bool, + pub created_at: u64, + pub last_updated: u64, +} + +impl SqlChannelDetails { + #[inline] + pub fn new( + rpc_id: u64, + channel_id: [u8; 32], + counterparty_node_id: PublicKey, + is_outbound: bool, + is_public: bool, + ) -> Self { + SqlChannelDetails { + rpc_id, + channel_id: hex::encode(channel_id), + counterparty_node_id: counterparty_node_id.to_string(), + funding_tx: None, + funding_value: None, + funding_generated_in_block: None, + closing_tx: None, + closure_reason: None, + claiming_tx: None, + claimed_balance: None, + is_outbound, + is_public, + is_closed: false, + created_at: now_ms() / 1000, + last_updated: now_ms() / 1000, + } + } +} + +#[derive(Clone, Deserialize)] +pub enum ChannelType { + Outbound, + Inbound, +} + +#[derive(Clone, Deserialize)] +pub enum ChannelVisibility { + Public, + Private, +} + +#[derive(Clone, Deserialize)] +pub struct ClosedChannelsFilter { + pub channel_id: Option, + pub counterparty_node_id: Option, + pub funding_tx: Option, + pub from_funding_value: Option, + pub to_funding_value: Option, + pub closing_tx: Option, + pub closure_reason: Option, + pub claiming_tx: Option, + pub from_claimed_balance: Option, + pub to_claimed_balance: Option, + pub channel_type: Option, + pub channel_visibility: Option, +} + +pub struct GetClosedChannelsResult { + pub channels: Vec, + pub skipped: usize, + pub total: usize, +} + +#[derive(Clone, Debug, Deserialize, Display, PartialEq, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum HTLCStatus { + Pending, + Succeeded, + Failed, +} + +impl FromStr for HTLCStatus { + type Err = FromSqlError; + + fn from_str(s: &str) -> Result { + match s { + "Pending" => Ok(HTLCStatus::Pending), + "Succeeded" => Ok(HTLCStatus::Succeeded), + "Failed" => Ok(HTLCStatus::Failed), + _ => Err(FromSqlError::InvalidType), + } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub enum PaymentType { + OutboundPayment { destination: PublicKey }, + InboundPayment, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct PaymentInfo { + pub payment_hash: PaymentHash, + pub payment_type: PaymentType, + pub description: String, + pub preimage: Option, + pub secret: Option, + pub amt_msat: Option, + pub fee_paid_msat: Option, + pub status: HTLCStatus, + pub created_at: u64, + pub last_updated: u64, +} + +#[derive(Clone)] +pub struct PaymentsFilter { + pub payment_type: Option, + pub description: Option, + pub status: Option, + pub from_amount_msat: Option, + pub to_amount_msat: Option, + pub from_fee_paid_msat: Option, + pub to_fee_paid_msat: Option, + pub from_timestamp: Option, + pub to_timestamp: Option, +} + +pub struct GetPaymentsResult { + pub payments: Vec, + pub skipped: usize, + pub total: usize, +} + +#[async_trait] +pub trait DbStorage { + type Error; + + /// Initializes tables in DB. + async fn init_db(&self) -> Result<(), Self::Error>; + + /// Checks if tables have been initialized or not in DB. + async fn is_db_initialized(&self) -> Result; + + /// Gets the last added channel rpc_id. Can be used to deduce the rpc_id for a new channel to be added to DB. + async fn get_last_channel_rpc_id(&self) -> Result; + + /// Inserts a new channel record in the DB. The record's data is completed using add_funding_tx_to_db, + /// add_closing_tx_to_db, add_claiming_tx_to_db when this information is available. + async fn add_channel_to_db(&self, details: SqlChannelDetails) -> Result<(), Self::Error>; + + /// Updates a channel's DB record with the channel's funding transaction information. + async fn add_funding_tx_to_db( + &self, + rpc_id: u64, + funding_tx: String, + funding_value: u64, + funding_generated_in_block: u64, + ) -> Result<(), Self::Error>; + + /// Updates funding_tx_block_height value for a channel in the DB. Should be used to update the block height of + /// the funding tx when the transaction is confirmed on-chain. + async fn update_funding_tx_block_height(&self, funding_tx: String, block_height: u64) -> Result<(), Self::Error>; + + /// Updates the is_closed value for a channel in the DB to 1. + async fn update_channel_to_closed(&self, rpc_id: u64, closure_reason: String) -> Result<(), Self::Error>; + + /// Gets the list of closed channels records in the DB with no closing tx hashs saved yet. Can be used to check if + /// the closing tx hash needs to be fetched from the chain and saved to DB when initializing the persister. + async fn get_closed_channels_with_no_closing_tx(&self) -> Result, Self::Error>; + + /// Updates a channel's DB record with the channel's closing transaction hash. + async fn add_closing_tx_to_db(&self, rpc_id: u64, closing_tx: String) -> Result<(), Self::Error>; + + /// Updates a channel's DB record with information about the transaction responsible for claiming the channel's + /// closing balance back to the user's address. + async fn add_claiming_tx_to_db( + &self, + closing_tx: String, + claiming_tx: String, + claimed_balance: f64, + ) -> Result<(), Self::Error>; + + /// Gets a channel record from DB by the channel's rpc_id. + async fn get_channel_from_db(&self, rpc_id: u64) -> Result, Self::Error>; + + /// Gets the list of closed channels that match the provided filter criteria. The number of requested records is + /// specified by the limit parameter, the starting record to list from is specified by the paging parameter. The + /// total number of matched records along with the number of skipped records are also returned in the result. + async fn get_closed_channels_by_filter( + &self, + filter: Option, + paging: PagingOptionsEnum, + limit: usize, + ) -> Result; + + /// Inserts or updates a new payment record in the DB. + async fn add_or_update_payment_in_db(&self, info: PaymentInfo) -> Result<(), Self::Error>; + + /// Gets a payment's record from DB by the payment's hash. + async fn get_payment_from_db(&self, hash: PaymentHash) -> Result, Self::Error>; + + /// Gets the list of payments that match the provided filter criteria. The number of requested records is specified + /// by the limit parameter, the starting record to list from is specified by the paging parameter. The total number + /// of matched records along with the number of skipped records are also returned in the result. + async fn get_payments_by_filter( + &self, + filter: Option, + paging: PagingOptionsEnum, + limit: usize, + ) -> Result; +} diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 0b2909f8df..949c1eb72a 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -96,6 +96,51 @@ macro_rules! try_f { }; } +macro_rules! ok_or_continue_after_sleep { + ($e:expr, $delay: ident) => { + match $e { + Ok(res) => res, + Err(e) => { + error!("error {:?}", e); + Timer::sleep($delay).await; + continue; + }, + } + }; +} + +#[cfg(not(target_arch = "wasm32"))] +macro_rules! ok_or_retry_after_sleep { + ($e:expr, $delay: ident) => { + loop { + match $e { + Ok(res) => break res, + Err(e) => { + error!("error {:?}", e); + Timer::sleep($delay).await; + continue; + }, + } + } + }; +} + +#[cfg(not(target_arch = "wasm32"))] +macro_rules! ok_or_retry_after_sleep_sync { + ($e:expr, $delay: ident) => { + loop { + match $e { + Ok(res) => break res, + Err(e) => { + error!("error {:?}", e); + std::thread::sleep(core::time::Duration::from_secs($delay)); + continue; + }, + } + } + }; +} + pub mod coin_balance; #[doc(hidden)] #[cfg(test)] @@ -1494,6 +1539,7 @@ pub trait MmCoin: SwapOps + MarketCoinOps + fmt::Debug + Send + Sync + 'static { } #[derive(Clone, Debug)] +#[allow(clippy::large_enum_variant)] pub enum MmCoinEnum { UtxoCoin(UtxoStandardCoin), QtumCoin(QtumCoin), @@ -1508,7 +1554,7 @@ pub enum MmCoinEnum { #[cfg(not(target_arch = "wasm32"))] SplToken(SplToken), #[cfg(not(target_arch = "wasm32"))] - LightningCoin(Box), + LightningCoin(LightningCoin), Test(TestCoin), } @@ -1552,7 +1598,7 @@ impl From for MmCoinEnum { #[cfg(not(target_arch = "wasm32"))] impl From for MmCoinEnum { - fn from(c: LightningCoin) -> MmCoinEnum { MmCoinEnum::LightningCoin(Box::new(c)) } + fn from(c: LightningCoin) -> MmCoinEnum { MmCoinEnum::LightningCoin(c) } } #[cfg(not(target_arch = "wasm32"))] @@ -1572,7 +1618,7 @@ impl Deref for MmCoinEnum { MmCoinEnum::Bch(ref c) => c, MmCoinEnum::SlpToken(ref c) => c, #[cfg(not(target_arch = "wasm32"))] - MmCoinEnum::LightningCoin(ref c) => &**c, + MmCoinEnum::LightningCoin(ref c) => c, #[cfg(not(target_arch = "wasm32"))] MmCoinEnum::ZCoin(ref c) => c, MmCoinEnum::Test(ref c) => c, diff --git a/mm2src/coins/sql_tx_history_storage.rs b/mm2src/coins/sql_tx_history_storage.rs index a7aab2f03f..bc7c2c5bea 100644 --- a/mm2src/coins/sql_tx_history_storage.rs +++ b/mm2src/coins/sql_tx_history_storage.rs @@ -5,9 +5,9 @@ use async_trait::async_trait; use common::mm_error::prelude::*; use common::{async_blocking, PagingOptionsEnum}; use db_common::sqlite::rusqlite::types::Type; -use db_common::sqlite::rusqlite::{Connection, Error as SqlError, Row, ToSql, NO_PARAMS}; +use db_common::sqlite::rusqlite::{Connection, Error as SqlError, Row, NO_PARAMS}; use db_common::sqlite::sql_builder::SqlBuilder; -use db_common::sqlite::{offset_by_id, string_from_row, validate_table_name, CHECK_TABLE_EXISTS_SQL}; +use db_common::sqlite::{offset_by_id, query_single_row, string_from_row, validate_table_name, CHECK_TABLE_EXISTS_SQL}; use rpc::v1::types::Bytes as BytesJson; use serde_json::{self as json}; use std::convert::TryInto; @@ -21,17 +21,18 @@ fn create_tx_history_table_sql(for_coin: &str) -> Result Result let table_name = tx_cache_table(for_coin); validate_table_name(&table_name)?; - let sql = "CREATE TABLE IF NOT EXISTS ".to_owned() - + &table_name - + " ( - tx_hash VARCHAR(255) NOT NULL UNIQUE, - tx_hex TEXT NOT NULL - );"; + let sql = format!( + "CREATE TABLE IF NOT EXISTS {} ( + tx_hash VARCHAR(255) NOT NULL UNIQUE, + tx_hex TEXT NOT NULL + );", + table_name + ); Ok(sql) } @@ -54,9 +56,19 @@ fn insert_tx_in_history_sql(for_coin: &str) -> Result> let table_name = tx_history_table(for_coin); validate_table_name(&table_name)?; - let sql = "INSERT INTO ".to_owned() - + &table_name - + " (tx_hash, internal_id, block_height, confirmation_status, token_id, details_json) VALUES (?1, ?2, ?3, ?4, ?5, ?6);"; + let sql = format!( + "INSERT INTO {} ( + tx_hash, + internal_id, + block_height, + confirmation_status, + token_id, + details_json + ) VALUES ( + ?1, ?2, ?3, ?4, ?5, ?6 + );", + table_name + ); Ok(sql) } @@ -66,25 +78,28 @@ fn insert_tx_in_cache_sql(for_coin: &str) -> Result> { validate_table_name(&table_name)?; // We can simply ignore the repetitive attempt to insert the same tx_hash - let sql = "INSERT OR IGNORE INTO ".to_owned() + &table_name + " (tx_hash, tx_hex) VALUES (?1, ?2);"; + let sql = format!( + "INSERT OR IGNORE INTO {} (tx_hash, tx_hex) VALUES (?1, ?2);", + table_name + ); Ok(sql) } -fn remove_tx_from_table_by_internal_id_sql(for_coin: &str) -> Result> { +fn remove_tx_by_internal_id_sql(for_coin: &str) -> Result> { let table_name = tx_history_table(for_coin); validate_table_name(&table_name)?; - let sql = "DELETE FROM ".to_owned() + &table_name + " WHERE internal_id=?1;"; + let sql = format!("DELETE FROM {} WHERE internal_id=?1;", table_name); Ok(sql) } -fn select_tx_from_table_by_internal_id_sql(for_coin: &str) -> Result> { +fn select_tx_by_internal_id_sql(for_coin: &str) -> Result> { let table_name = tx_history_table(for_coin); validate_table_name(&table_name)?; - let sql = "SELECT details_json FROM ".to_owned() + &table_name + " WHERE internal_id=?1;"; + let sql = format!("SELECT details_json FROM {} WHERE internal_id=?1;", table_name); Ok(sql) } @@ -93,9 +108,15 @@ fn update_tx_in_table_by_internal_id_sql(for_coin: &str) -> Result Result Result Result Result> let table_name = tx_history_table(for_coin); validate_table_name(&table_name)?; - let sql = "SELECT COUNT(DISTINCT tx_hash) FROM ".to_owned() + &table_name + ";"; + let sql = format!("SELECT COUNT(DISTINCT tx_hash) FROM {};", table_name); Ok(sql) } @@ -140,7 +161,7 @@ fn get_tx_hex_from_cache_sql(for_coin: &str) -> Result let table_name = tx_cache_table(for_coin); validate_table_name(&table_name)?; - let sql = "SELECT tx_hex FROM ".to_owned() + &table_name + " WHERE tx_hash = ?1 LIMIT 1;"; + let sql = format!("SELECT tx_hex FROM {} WHERE tx_hash = ?1 LIMIT 1;", table_name); Ok(sql) } @@ -172,7 +193,7 @@ impl SqliteTxHistoryStorage { fn is_table_empty(&self, table_name: &str) -> bool { validate_table_name(table_name).unwrap(); - let sql = "SELECT COUNT(id) FROM ".to_owned() + table_name + ";"; + let sql = format!("SELECT COUNT(id) FROM {};", table_name); let conn = self.0.lock().unwrap(); let rows_count: u32 = conn.query_row(&sql, NO_PARAMS, |row| row.get(0)).unwrap(); rows_count == 0 @@ -181,20 +202,6 @@ impl SqliteTxHistoryStorage { impl TxHistoryStorageError for SqlError {} -fn query_single_row( - conn: &Connection, - query: &str, - params: P, - map_fn: F, -) -> Result, MmError> -where - P: IntoIterator, - P::Item: ToSql, - F: FnOnce(&Row<'_>) -> Result, -{ - db_common::sqlite::query_single_row(conn, query, params, map_fn).map_err(MmError::new) -} - fn tx_details_from_row(row: &Row<'_>) -> Result { let json_string: String = row.get(0)?; json::from_str(&json_string).map_err(|e| SqlError::FromSqlConversionFailure(0, Type::Text, Box::new(e))) @@ -285,7 +292,7 @@ impl TxHistoryStorage for SqliteTxHistoryStorage { for_coin: &str, internal_id: &BytesJson, ) -> Result> { - let sql = remove_tx_from_table_by_internal_id_sql(for_coin)?; + let sql = remove_tx_by_internal_id_sql(for_coin)?; let params = [format!("{:02x}", internal_id)]; let selfi = self.clone(); @@ -310,12 +317,12 @@ impl TxHistoryStorage for SqliteTxHistoryStorage { internal_id: &BytesJson, ) -> Result, MmError> { let params = [format!("{:02x}", internal_id)]; - let sql = select_tx_from_table_by_internal_id_sql(for_coin)?; + let sql = select_tx_by_internal_id_sql(for_coin)?; let selfi = self.clone(); async_blocking(move || { let conn = selfi.0.lock().unwrap(); - query_single_row(&conn, &sql, params, tx_details_from_row) + query_single_row(&conn, &sql, params, tx_details_from_row).map_to_mm(SqlError::from) }) .await } diff --git a/mm2src/coins/utxo/utxo_common.rs b/mm2src/coins/utxo/utxo_common.rs index 12ad3784fc..22499e2724 100644 --- a/mm2src/coins/utxo/utxo_common.rs +++ b/mm2src/coins/utxo/utxo_common.rs @@ -3348,19 +3348,6 @@ where } } -macro_rules! try_loop_with_sleep { - ($e:expr, $delay: ident) => { - match $e { - Ok(res) => res, - Err(e) => { - error!("error {:?}", e); - Timer::sleep($delay).await; - continue; - }, - } - }; -} - pub async fn block_header_utxo_loop(weak: UtxoWeak, constructor: impl Fn(UtxoArc) -> T) { { let coin = match weak.upgrade() { @@ -3400,25 +3387,26 @@ pub async fn block_header_utxo_loop(weak: UtxoWeak, constructo params.difficulty_check, params.constant_difficulty, ); - let height = try_loop_with_sleep!(coin.as_ref().rpc_client.get_block_count().compat().await, check_every); + let height = + ok_or_continue_after_sleep!(coin.as_ref().rpc_client.get_block_count().compat().await, check_every); let client = match &coin.as_ref().rpc_client { UtxoRpcClientEnum::Native(_) => break, UtxoRpcClientEnum::Electrum(client) => client, }; - let (block_registry, block_headers) = try_loop_with_sleep!( + let (block_registry, block_headers) = ok_or_continue_after_sleep!( client .retrieve_last_headers(blocks_limit_to_check, height) .compat() .await, check_every ); - try_loop_with_sleep!( + ok_or_continue_after_sleep!( validate_headers(block_headers, difficulty_check, constant_difficulty), check_every ); let ticker = coin.as_ref().conf.ticker.as_str(); - try_loop_with_sleep!( + ok_or_continue_after_sleep!( storage.add_block_headers_to_storage(ticker, block_registry).await, check_every ); diff --git a/mm2src/coins/utxo/utxo_sql_block_header_storage.rs b/mm2src/coins/utxo/utxo_sql_block_header_storage.rs index 75dfe4f4eb..2e7546407f 100644 --- a/mm2src/coins/utxo/utxo_sql_block_header_storage.rs +++ b/mm2src/coins/utxo/utxo_sql_block_header_storage.rs @@ -26,12 +26,13 @@ fn get_table_name_and_validate(for_coin: &str) -> Result Result> { let table_name = get_table_name_and_validate(for_coin)?; - let sql = "CREATE TABLE IF NOT EXISTS ".to_owned() - + &table_name - + " ( - block_height INTEGER NOT NULL UNIQUE, - hex TEXT NOT NULL - );"; + let sql = format!( + "CREATE TABLE IF NOT EXISTS {} ( + block_height INTEGER NOT NULL UNIQUE, + hex TEXT NOT NULL + );", + table_name + ); Ok(sql) } @@ -39,13 +40,16 @@ fn create_block_header_cache_table_sql(for_coin: &str) -> Result Result> { let table_name = get_table_name_and_validate(for_coin)?; // We can simply ignore the repetitive attempt to insert the same block_height - let sql = "INSERT OR IGNORE INTO ".to_owned() + &table_name + " (block_height, hex) VALUES (?1, ?2);"; + let sql = format!( + "INSERT OR IGNORE INTO {} (block_height, hex) VALUES (?1, ?2);", + table_name + ); Ok(sql) } fn get_block_header_by_height(for_coin: &str) -> Result> { let table_name = get_table_name_and_validate(for_coin)?; - let sql = "SELECT hex FROM ".to_owned() + &table_name + " WHERE block_height=?1;"; + let sql = format!("SELECT hex FROM {} WHERE block_height=?1;", table_name); Ok(sql) } diff --git a/mm2src/coins_activation/src/lightning_activation.rs b/mm2src/coins_activation/src/lightning_activation.rs index 100c7b93cf..3273b76377 100644 --- a/mm2src/coins_activation/src/lightning_activation.rs +++ b/mm2src/coins_activation/src/lightning_activation.rs @@ -3,8 +3,7 @@ use crate::prelude::*; use async_trait::async_trait; use coins::lightning::ln_conf::{LightningCoinConf, LightningProtocolConf}; use coins::lightning::ln_errors::EnableLightningError; -use coins::lightning::ln_utils::{start_lightning, LightningParams}; -use coins::lightning::LightningCoin; +use coins::lightning::{start_lightning, LightningCoin, LightningParams}; use coins::utxo::utxo_standard::UtxoStandardCoin; use coins::utxo::UtxoCommonOps; use coins::{BalanceError, CoinBalance, CoinProtocol, MarketCoinOps, MmCoinEnum}; diff --git a/mm2src/common/Cargo.toml b/mm2src/common/Cargo.toml index d07aa57c21..f92d0ba453 100644 --- a/mm2src/common/Cargo.toml +++ b/mm2src/common/Cargo.toml @@ -37,7 +37,7 @@ http-body = "0.1" itertools = "0.8" keys = { path = "../mm2_bitcoin/keys" } lazy_static = "1.4" -lightning = "0.0.104" +lightning = "0.0.105" log = "0.4.8" num-bigint = { version = "0.2", features = ["serde", "std"] } num-rational = { version = "0.2", features = ["serde", "bigint", "bigint-std"] } diff --git a/mm2src/common/common.rs b/mm2src/common/common.rs index d66d734b51..db2d48c826 100644 --- a/mm2src/common/common.rs +++ b/mm2src/common/common.rs @@ -1695,6 +1695,18 @@ pub enum PagingOptionsEnum { PageNumber(NonZeroUsize), } +impl PagingOptionsEnum { + pub fn map(self, f: F) -> PagingOptionsEnum + where + F: FnOnce(Id) -> U, + { + match self { + PagingOptionsEnum::FromId(id) => PagingOptionsEnum::FromId(f(id)), + PagingOptionsEnum::PageNumber(s) => PagingOptionsEnum::PageNumber(s), + } + } +} + impl Default for PagingOptionsEnum { fn default() -> Self { PagingOptionsEnum::PageNumber(NonZeroUsize::new(1).expect("1 > 0")) } } diff --git a/mm2src/db_common/Cargo.toml b/mm2src/db_common/Cargo.toml index bd5e18f8c1..f1c0e32921 100644 --- a/mm2src/db_common/Cargo.toml +++ b/mm2src/db_common/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +hex = "0.4.2" log = "0.4.8" uuid = { version = "0.7", features = ["serde", "v4"] } diff --git a/mm2src/db_common/src/sqlite.rs b/mm2src/db_common/src/sqlite.rs index 4e11afb1ab..ae383fad3d 100644 --- a/mm2src/db_common/src/sqlite.rs +++ b/mm2src/db_common/src/sqlite.rs @@ -2,6 +2,7 @@ pub use rusqlite; pub use sql_builder; use log::debug; +use rusqlite::types::{FromSql, Type as SqlType}; use rusqlite::{Connection, Error as SqlError, Result as SqlResult, Row, ToSql}; use sql_builder::SqlBuilder; use std::sync::{Arc, Mutex, Weak}; @@ -123,3 +124,36 @@ where let offset = maybe_offset?; Ok(Some(offset.try_into().expect("row index should be always above zero"))) } + +pub fn sql_text_conversion_err(field_id: usize, e: E) -> SqlError +where + E: std::error::Error + Send + Sync + 'static, +{ + SqlError::FromSqlConversionFailure(field_id, SqlType::Text, Box::new(e)) +} + +pub fn h256_slice_from_row(row: &Row<'_>, column_id: usize) -> Result<[u8; 32], SqlError> +where + T: AsRef<[u8]> + FromSql, +{ + let mut h256_slice = [0u8; 32]; + hex::decode_to_slice(row.get::<_, T>(column_id)?, &mut h256_slice as &mut [u8]) + .map_err(|e| sql_text_conversion_err(column_id, e))?; + Ok(h256_slice) +} + +pub fn h256_option_slice_from_row(row: &Row<'_>, column_id: usize) -> Result, SqlError> +where + T: AsRef<[u8]> + FromSql, +{ + let maybe_h256_slice = row.get::<_, Option>(column_id)?; + let res = match maybe_h256_slice { + Some(s) => { + let mut h256_slice = [0u8; 32]; + hex::decode_to_slice(s, &mut h256_slice as &mut [u8]).map_err(|e| sql_text_conversion_err(column_id, e))?; + Some(h256_slice) + }, + None => None, + }; + Ok(res) +} diff --git a/mm2src/mm2_bitcoin/rpc/src/v1/types/hash.rs b/mm2src/mm2_bitcoin/rpc/src/v1/types/hash.rs index 18ebb29081..21ec02a853 100644 --- a/mm2src/mm2_bitcoin/rpc/src/v1/types/hash.rs +++ b/mm2src/mm2_bitcoin/rpc/src/v1/types/hash.rs @@ -157,6 +157,7 @@ impl_hash!(H256, GlobalH256, 32); impl_hash!(H160, GlobalH160, 20); impl H256 { + #[inline] pub fn reversed(&self) -> Self { let mut result = *self; result.0.reverse(); diff --git a/mm2src/mm2_tests/lightning_tests.rs b/mm2src/mm2_tests/lightning_tests.rs index 35e34fa845..929e6c9934 100644 --- a/mm2src/mm2_tests/lightning_tests.rs +++ b/mm2src/mm2_tests/lightning_tests.rs @@ -49,15 +49,15 @@ fn start_lightning_nodes() -> (MarketMakerIt, MarketMakerIt, String, String) { "network": "testnet", "confirmations": { "background": { - "default_feerate": 253, + "default_fee_per_kb": 1012, "n_blocks": 12 }, "normal": { - "default_feerate": 2000, + "default_fee_per_kb": 8000, "n_blocks": 6 }, "high_priority": { - "default_feerate": 5000, + "default_fee_per_kb": 20000, "n_blocks": 1 } } @@ -150,15 +150,15 @@ fn test_enable_lightning() { "network": "testnet", "confirmations": { "background": { - "default_feerate": 253, + "default_fee_per_kb": 1012, "n_blocks": 12 }, "normal": { - "default_feerate": 2000, + "default_fee_per_kb": 8000, "n_blocks": 6 }, "high_priority": { - "default_feerate": 5000, + "default_fee_per_kb": 20000, "n_blocks": 1 } } @@ -266,11 +266,17 @@ fn test_open_channel() { let list_channels_node_1_res: Json = json::from_str(&list_channels_node_1.1).unwrap(); log!("list_channels_node_1_res "[list_channels_node_1_res]); assert_eq!( - list_channels_node_1_res["result"]["channels"][0]["counterparty_node_id"], + list_channels_node_1_res["result"]["open_channels"][0]["counterparty_node_id"], node_2_id ); - assert_eq!(list_channels_node_1_res["result"]["channels"][0]["is_outbound"], false); - assert_eq!(list_channels_node_1_res["result"]["channels"][0]["balance_msat"], 0); + assert_eq!( + list_channels_node_1_res["result"]["open_channels"][0]["is_outbound"], + false + ); + assert_eq!( + list_channels_node_1_res["result"]["open_channels"][0]["balance_msat"], + 0 + ); let list_channels_node_2 = block_on(mm_node_2.rpc(&json! ({ "userpass": mm_node_2.userpass, @@ -288,12 +294,15 @@ fn test_open_channel() { ); let list_channels_node_2_res: Json = json::from_str(&list_channels_node_2.1).unwrap(); assert_eq!( - list_channels_node_2_res["result"]["channels"][0]["counterparty_node_id"], + list_channels_node_2_res["result"]["open_channels"][0]["counterparty_node_id"], node_1_id ); - assert_eq!(list_channels_node_2_res["result"]["channels"][0]["is_outbound"], true); assert_eq!( - list_channels_node_2_res["result"]["channels"][0]["balance_msat"], + list_channels_node_2_res["result"]["open_channels"][0]["is_outbound"], + true + ); + assert_eq!( + list_channels_node_2_res["result"]["open_channels"][0]["balance_msat"], 2000000 ); diff --git a/mm2src/rpc/dispatcher/dispatcher.rs b/mm2src/rpc/dispatcher/dispatcher.rs index 369b7339c9..699f5d73c9 100644 --- a/mm2src/rpc/dispatcher/dispatcher.rs +++ b/mm2src/rpc/dispatcher/dispatcher.rs @@ -33,7 +33,7 @@ use std::net::SocketAddr; cfg_native! { use coins::lightning::{close_channel, connect_to_lightning_node, generate_invoice, get_channel_details, - get_claimable_balances, get_payment_details, list_channels, list_payments, open_channel, + get_claimable_balances, get_payment_details, list_closed_channels_by_filter, list_open_channels_by_filter, list_payments_by_filter, open_channel, send_payment, LightningCoin}; use coins::{SolanaCoin, SplToken}; use coins::z_coin::ZCoin; @@ -170,8 +170,9 @@ async fn dispatcher_v2(request: MmRpcRequest, ctx: MmArc) -> DispatcherResult handle_mmrpc(ctx, request, init_standalone_coin::).await, "init_z_coin_status" => handle_mmrpc(ctx, request, init_standalone_coin_status::).await, "init_z_coin_user_action" => handle_mmrpc(ctx, request, init_standalone_coin_user_action::).await, - "list_channels" => handle_mmrpc(ctx, request, list_channels).await, - "list_payments" => handle_mmrpc(ctx, request, list_payments).await, + "list_closed_channels_by_filter" => handle_mmrpc(ctx, request, list_closed_channels_by_filter).await, + "list_open_channels_by_filter" => handle_mmrpc(ctx, request, list_open_channels_by_filter).await, + "list_payments_by_filter" => handle_mmrpc(ctx, request, list_payments_by_filter).await, "open_channel" => handle_mmrpc(ctx, request, open_channel).await, "send_payment" => handle_mmrpc(ctx, request, send_payment).await, "enable_solana_with_tokens" => {