From 03810e296485c3dd38e83d0d0dc8d3fcbefbaa3e Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 3 Sep 2024 22:40:21 +0200 Subject: [PATCH 01/25] wallet: add export_transactions cli command --- xelis_wallet/src/main.rs | 17 ++++++++++++++++ xelis_wallet/src/wallet.rs | 41 +++++++++++++++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/xelis_wallet/src/main.rs b/xelis_wallet/src/main.rs index 8690ee7a..76839a77 100644 --- a/xelis_wallet/src/main.rs +++ b/xelis_wallet/src/main.rs @@ -1,4 +1,5 @@ use std::{ + fs::File, ops::ControlFlow, path::Path, sync::Arc, @@ -414,6 +415,7 @@ async fn setup_wallet_command_manager(wallet: Arc, command_manager: &Com command_manager.add_command(Command::new("set_nonce", "Set new nonce", CommandHandler::Async(async_handler!(set_nonce))))?; command_manager.add_command(Command::new("logout", "Logout from existing wallet", CommandHandler::Async(async_handler!(logout))))?; command_manager.add_command(Command::new("clear_tx_cache", "Clear the current TX cache", CommandHandler::Async(async_handler!(clear_tx_cache))))?; + command_manager.add_command(Command::with_required_arguments("export_transactions", "Export all your transactions in a CSV file", vec![Arg::new("filename", ArgType::String)], CommandHandler::Async(async_handler!(export_transactions_csv))))?; #[cfg(feature = "network_handler")] { @@ -902,6 +904,21 @@ async fn history(manager: &CommandManager, mut arguments: ArgumentManager) -> Re Ok(()) } +async fn export_transactions_csv(manager: &CommandManager, mut arguments: ArgumentManager) -> Result<(), CommandError> { + let filename = arguments.get_value("filename")?.to_string_value()?; + let context = manager.get_context().lock()?; + let wallet: &Arc = context.get()?; + let storage = wallet.get_storage().read().await; + let transactions = storage.get_transactions()?; + let mut file = File::create(&filename).context("Error while creating CSV file")?; + + wallet.export_transactions_in_csv(transactions, &mut file).context("Error while exporting transactions to CSV")?; + + // writer.flush().context("Error while flushing CSV file")?; + manager.message(format!("Transactions have been exported to {}", filename)); + Ok(()) +} + async fn clear_tx_cache(manager: &CommandManager, _: ArgumentManager) -> Result<(), CommandError> { let context = manager.get_context().lock()?; let wallet: &Arc = context.get()?; diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 6041c482..f7ab10fb 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -1,4 +1,8 @@ -use std::sync::{atomic::{AtomicBool, Ordering}, Arc}; +use std::{ + io::Write, + sync::{atomic::{AtomicBool, Ordering}, + Arc} +}; use anyhow::{Error, Context}; use serde::Serialize; use xelis_common::{ @@ -49,6 +53,7 @@ use crate::{ PASSWORD_HASH_SIZE, SALT_SIZE }, + entry::{EntryData, TransactionEntry as InnerTransactionEntry}, error::WalletError, mnemonics, precomputed_tables::PrecomputedTablesShared, @@ -801,6 +806,40 @@ impl Wallet { Ok(estimated_fees) } + // Export all transactions in CSV format to the given writer + // This will sort the transactions by topoheight before exporting + pub fn export_transactions_in_csv(&self, mut transactions: Vec, w: &mut W) -> Result<(), WalletError> { + trace!("export transactions in csv"); + + // Sort transactions by topoheight + transactions.sort_by(|a, b| a.get_topoheight().cmp(&b.get_topoheight())); + + writeln!(w, "TopoHeight,Hash,Type,From/To,Asset,Amount,Fee,Nonce").context("Error while writing headers")?; + for tx in transactions { + match tx.get_entry() { + EntryData::Burn { asset, amount, fee, nonce } => { + writeln!(w, "{},{},{},{},-,{},{},{}", tx.get_topoheight(), tx.get_hash(), "Burn", asset, amount, fee, nonce).context("Error while writing csv line")?; + }, + EntryData::Coinbase { reward } => { + writeln!(w, "{},{},{},{},-,{},-,-", tx.get_topoheight(), tx.get_hash(), "Coinbase", "XELIS", reward).context("Error while writing csv line")?; + }, + EntryData::Incoming { from, transfers } => { + for transfer in transfers { + writeln!(w, "{},{},{},{},{},{},-,-", tx.get_topoheight(), tx.get_hash(), "Incoming", from.as_address(self.get_network().is_mainnet()), transfer.get_asset(), transfer.get_amount()).context("Error while writing csv line")?; + } + }, + EntryData::Outgoing { transfers, fee, nonce } => { + for transfer in transfers { + writeln!(w, "{},{},{},{},{},{},{},{}", tx.get_topoheight(), tx.get_hash(), "Outgoing", transfer.get_destination().as_address(self.get_network().is_mainnet()), transfer.get_asset(), transfer.get_amount(), fee, nonce).context("Error while writing csv line")?; + } + } + } + } + + w.flush().context("Error while flushing CSV file")?; + Ok(()) + } + // set wallet in online mode: start a communication task which will keep the wallet synced #[cfg(feature = "network_handler")] pub async fn set_online_mode(self: &Arc, daemon_address: &String, auto_reconnect: bool) -> Result<(), WalletError> { From 6a78ffac79205f1215ccc211afd065611507f700 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 4 Sep 2024 11:07:28 +0200 Subject: [PATCH 02/25] common: non zero optional serializer --- xelis_common/src/serializer/reader.rs | 18 ++++++++++++++++++ xelis_common/src/serializer/writer.rs | 17 +++++++++-------- 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/xelis_common/src/serializer/reader.rs b/xelis_common/src/serializer/reader.rs index 2a191c21..57f78b74 100644 --- a/xelis_common/src/serializer/reader.rs +++ b/xelis_common/src/serializer/reader.rs @@ -142,6 +142,24 @@ impl<'a> Reader<'a> { Ok(Some(byte)) } + pub fn read_optional_non_zero_u16(&mut self) -> Result, ReaderError> { + let value = self.read_u16()?; + if value == 0 { + return Ok(None) + } + + Ok(Some(value)) + } + + pub fn read_optional_non_zero_u64(&mut self) -> Result, ReaderError> { + let value = self.read_u64()?; + if value == 0 { + return Ok(None) + } + + Ok(Some(value)) + } + pub fn total_size(&self) -> usize { self.bytes.len() } diff --git a/xelis_common/src/serializer/writer.rs b/xelis_common/src/serializer/writer.rs index fec9d539..5fb267b1 100644 --- a/xelis_common/src/serializer/writer.rs +++ b/xelis_common/src/serializer/writer.rs @@ -59,14 +59,15 @@ impl Writer { } pub fn write_optional_non_zero_u8(&mut self, opt: Option) { - match opt { - Some(v) if v != 0 => { - self.bytes.push(v); - }, - _ => { - self.bytes.push(0); - } - }; + self.bytes.push(opt.unwrap_or(0)); + } + + pub fn write_optional_non_zero_u16(&mut self, opt: Option) { + self.write_u16(opt.unwrap_or(0)); + } + + pub fn write_optional_non_zero_u64(&mut self, opt: Option) { + self.write_u64(&opt.unwrap_or(0)); } pub fn total_write(&self) -> usize { From 986fa35624f6238ee362121d9e197b7a327a3a5a Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 4 Sep 2024 11:08:02 +0200 Subject: [PATCH 03/25] daemon: prevent zero local port --- xelis_daemon/src/p2p/error.rs | 2 ++ xelis_daemon/src/p2p/mod.rs | 4 ++++ xelis_daemon/src/p2p/packet/handshake.rs | 4 ++++ 3 files changed, 10 insertions(+) diff --git a/xelis_daemon/src/p2p/error.rs b/xelis_daemon/src/p2p/error.rs index c7579db4..a3400154 100644 --- a/xelis_daemon/src/p2p/error.rs +++ b/xelis_daemon/src/p2p/error.rs @@ -36,6 +36,8 @@ use super::{ #[derive(Error, Debug)] pub enum P2pError { + #[error("Invalid local port, it must be greater than 0")] + InvalidLocalPort, #[error("disk error: {0}")] DiskError(#[from] DiskError), #[error("Invalid P2P version: {}", _0)] diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 52b9448d..f1edcfe6 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -584,6 +584,10 @@ impl P2pServer { return Err(P2pError::InvalidNetworkID); } + if handshake.get_local_port() == 0 { + return Err(P2pError::InvalidLocalPort); + } + if self.has_peer_id_used(&handshake.get_peer_id()).await { return Err(P2pError::PeerIdAlreadyUsed(handshake.get_peer_id())); } diff --git a/xelis_daemon/src/p2p/packet/handshake.rs b/xelis_daemon/src/p2p/packet/handshake.rs index c4f8adea..3f02d565 100644 --- a/xelis_daemon/src/p2p/packet/handshake.rs +++ b/xelis_daemon/src/p2p/packet/handshake.rs @@ -90,6 +90,10 @@ impl<'a> Handshake<'a> { Peer::new(connection, self.get_peer_id(), self.node_tag.into_owned(), self.local_port, self.version.into_owned(), self.top_hash.into_owned(), self.topoheight, self.height, self.pruned_topoheight, priority, self.cumulative_difficulty.into_owned(), peer_list, peers, self.can_be_shared) } + pub fn get_local_port(&self) -> u16 { + self.local_port + } + pub fn get_version(&self) -> &String { &self.version } From 8743072f8755b3cff5df864485838e7b23ee3932 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 4 Sep 2024 11:44:37 +0200 Subject: [PATCH 04/25] daemon: use optional for peerlist entry, fix visual bugs --- xelis_daemon/src/p2p/peer_list.rs | 124 ++++++++++++++++++------------ 1 file changed, 73 insertions(+), 51 deletions(-) diff --git a/xelis_daemon/src/p2p/peer_list.rs b/xelis_daemon/src/p2p/peer_list.rs index a3a74898..bb7a2db1 100644 --- a/xelis_daemon/src/p2p/peer_list.rs +++ b/xelis_daemon/src/p2p/peer_list.rs @@ -56,11 +56,11 @@ enum PeerListEntryState { #[derive(Serialize, Deserialize, PartialEq, Eq)] pub struct PeerListEntry { - first_seen: TimestampSeconds, - last_seen: TimestampSeconds, - last_connection_try: TimestampSeconds, + first_seen: Option, + last_seen: Option, + last_connection_try: Option, fail_count: u8, - local_port: u16, + local_port: Option, // Until when the peer is banned temp_ban_until: Option, state: PeerListEntryState @@ -128,6 +128,10 @@ impl PeerList { } info!("Peer disconnected: {}", peer); + + // Update the peerlist entry + self.update_peer(&peer).await?; + if let Some(peer_disconnect_channel) = &self.peer_disconnect_channel { debug!("Notifying server that {} disconnected", peer); if let Err(e) = peer_disconnect_channel.send(peer).await { @@ -169,13 +173,21 @@ impl PeerList { debug!("Updating {} in stored peerlist", peer); // reset the fail count and update the last seen time entry.set_fail_count(0); + if entry.get_first_seen().is_none() { + entry.set_first_seen(peer.get_connection().connected_on()); + } + entry.set_last_seen(get_current_time_in_seconds()); entry.set_local_port(peer.get_local_port()); self.cache.set_peerlist_entry(&ip, entry)?; } else { debug!("Saving {} in stored peerlist", peer); - self.cache.set_peerlist_entry(&ip, PeerListEntry::new(peer.get_local_port(), PeerListEntryState::Graylist))?; + let mut entry = PeerListEntry::new(Some(peer.get_local_port()), PeerListEntryState::Graylist); + entry.set_first_seen(peer.get_connection().connected_on()); + entry.set_last_seen(get_current_time_in_seconds()); + + self.cache.set_peerlist_entry(&ip, entry)?; } Ok(()) @@ -224,6 +236,10 @@ impl PeerList { if let Err(e) = peer.signal_exit().await { error!("Error while trying to signal exit to {}: {}", peer, e); } + + if let Err(e) = self.update_peer(&peer).await { + error!("Error while updating peer {}: {}", peer, e); + } } if let Err(e) = self.cache.flush().await { @@ -332,7 +348,7 @@ impl PeerList { entry.set_state(state); self.cache.set_peerlist_entry(addr, entry)?; } else { - self.cache.set_peerlist_entry(addr, PeerListEntry::new(0, state))?; + self.cache.set_peerlist_entry(addr, PeerListEntry::new(None, state))?; } Ok(()) @@ -341,17 +357,14 @@ impl PeerList { // Set a peer to graylist, if its local port is 0, delete it from the stored peerlist // Because it was added manually and never connected to before pub async fn set_graylist_for_peer(&self, ip: &IpAddr) -> Result<(), P2pError> { - let delete = if self.cache.has_peerlist_entry(ip)? { + if self.cache.has_peerlist_entry(ip)? { let mut entry = self.cache.get_peerlist_entry(ip)?; - entry.set_state(PeerListEntryState::Graylist); - entry.get_local_port() == 0 - } else { - false - }; - - if delete { - info!("Deleting {} from stored peerlist", ip); - self.cache.remove_peerlist_entry(ip)?; + if entry.get_local_port().is_none() { + info!("Deleting {} from stored peerlist", ip); + self.cache.remove_peerlist_entry(ip)?; + } else { + entry.set_state(PeerListEntryState::Graylist); + } } Ok(()) @@ -405,7 +418,7 @@ impl PeerList { entry.set_temp_ban_until(Some(get_current_time_in_seconds() + seconds)); self.cache.set_peerlist_entry(ip, entry)?; } else { - self.cache.set_peerlist_entry(ip, PeerListEntry::new(0, PeerListEntryState::Graylist))?; + self.cache.set_peerlist_entry(ip, PeerListEntry::new(None, PeerListEntryState::Graylist))?; } Ok(()) @@ -432,16 +445,18 @@ impl PeerList { let mut potential_gray_peer = None; for res in peerlist_entries { let (ip, mut entry) = res?; - let addr = SocketAddr::new(ip, entry.get_local_port()); - if *entry.get_state() != PeerListEntryState::Blacklist && entry.get_last_connection_try() + (entry.get_fail_count() as u64 * P2P_EXTEND_PEERLIST_DELAY) <= current_time && Self::internal_get_peer_by_addr(&peers, &addr).is_none() { - // Store it if we don't have any whitelisted peer to connect to - if potential_gray_peer.is_none() && *entry.get_state() == PeerListEntryState::Graylist { - potential_gray_peer = Some((ip, addr)); - } else if *entry.get_state() == PeerListEntryState::Whitelist { - debug!("Found peer to connect: {}, updating last connection try", addr); - entry.set_last_connection_try(current_time); - self.cache.set_peerlist_entry(&ip, entry)?; - return Ok(Some(addr)); + if let Some(local_port) = entry.get_local_port() { + let addr = SocketAddr::new(ip, local_port); + if *entry.get_state() != PeerListEntryState::Blacklist && entry.get_last_connection_try().unwrap_or(0) + (entry.get_fail_count() as u64 * P2P_EXTEND_PEERLIST_DELAY) <= current_time && Self::internal_get_peer_by_addr(&peers, &addr).is_none() { + // Store it if we don't have any whitelisted peer to connect to + if potential_gray_peer.is_none() && *entry.get_state() == PeerListEntryState::Graylist { + potential_gray_peer = Some((ip, addr)); + } else if *entry.get_state() == PeerListEntryState::Whitelist { + debug!("Found peer to connect: {}, updating last connection try", addr); + entry.set_last_connection_try(current_time); + self.cache.set_peerlist_entry(&ip, entry)?; + return Ok(Some(addr)); + } } } } @@ -466,7 +481,7 @@ impl PeerList { let mut entry = if self.cache.has_peerlist_entry(ip)? { self.cache.get_peerlist_entry(ip)? } else { - PeerListEntry::new(0, PeerListEntryState::Graylist) + PeerListEntry::new(None, PeerListEntryState::Graylist) }; let fail_count = entry.get_fail_count(); @@ -494,19 +509,18 @@ impl PeerList { return Ok(false); } - self.cache.set_peerlist_entry(&ip, PeerListEntry::new(addr.port(), PeerListEntryState::Graylist))?; + self.cache.set_peerlist_entry(&ip, PeerListEntry::new(Some(addr.port()), PeerListEntryState::Graylist))?; Ok(true) } } impl PeerListEntry { - fn new(local_port: u16, state: PeerListEntryState) -> Self { - let current_time = get_current_time_in_seconds(); + fn new(local_port: Option, state: PeerListEntryState) -> Self { Self { - first_seen: current_time, - last_seen: current_time, - last_connection_try: 0, + first_seen: None, + last_seen: None, + last_connection_try: None, fail_count: 0, local_port, temp_ban_until: None, @@ -514,7 +528,7 @@ impl PeerListEntry { } } - fn get_last_connection_try(&self) -> TimestampSeconds { + fn get_last_connection_try(&self) -> Option { self.last_connection_try } @@ -522,12 +536,20 @@ impl PeerListEntry { &self.state } + pub fn set_first_seen(&mut self, first_seen: TimestampSeconds) { + self.first_seen = Some(first_seen); + } + + pub fn get_first_seen(&self) -> Option { + self.first_seen + } + fn set_last_seen(&mut self, last_seen: TimestampSeconds) { - self.last_seen = last_seen; + self.last_seen = Some(last_seen); } fn set_last_connection_try(&mut self, last_connection_try: TimestampSeconds) { - self.last_connection_try = last_connection_try; + self.last_connection_try = Some(last_connection_try); } fn set_state(&mut self, state: PeerListEntryState) { @@ -551,10 +573,10 @@ impl PeerListEntry { } fn set_local_port(&mut self, local_port: u16) { - self.local_port = local_port; + self.local_port = Some(local_port); } - fn get_local_port(&self) -> u16 { + fn get_local_port(&self) -> Option { self.local_port } } @@ -564,11 +586,11 @@ impl Display for PeerListEntry { let current_time = get_current_time_in_seconds(); write!( f, - "PeerListEntry[state: {:?}, first seen: {} ago, last seen: {} ago, last try: {} ago]", + "PeerListEntry[state: {:?}, first seen: {}, last seen: {}, last try: {}]", self.state, - format_duration(Duration::from_secs(current_time - self.first_seen)), - format_duration(Duration::from_secs(current_time - self.last_seen)), - format_duration(Duration::from_secs(current_time - self.last_connection_try)) + self.first_seen.map(|v| format!("{} ago", format_duration(Duration::from_secs(current_time - v)))).unwrap_or_else(|| "never".to_string()), + self.last_seen.map(|v| format!("{} ago", format_duration(Duration::from_secs(current_time - v)))).unwrap_or_else(|| "never".to_string()), + self.last_connection_try.map(|v| format!("{} ago", format_duration(Duration::from_secs(current_time - v)))).unwrap_or_else(|| "never".to_string()) ) } } @@ -594,21 +616,21 @@ impl Serializer for PeerListEntryState { impl Serializer for PeerListEntry { fn write(&self, writer: &mut Writer) { - self.first_seen.write(writer); - self.last_seen.write(writer); - self.last_connection_try.write(writer); + writer.write_optional_non_zero_u64(self.first_seen); + writer.write_optional_non_zero_u64(self.last_seen); + writer.write_optional_non_zero_u64(self.last_connection_try); self.fail_count.write(writer); - self.local_port.write(writer); + writer.write_optional_non_zero_u16(self.local_port); self.temp_ban_until.write(writer); self.state.write(writer); } fn read(reader: &mut Reader) -> Result { - let first_seen = reader.read_u64()?; - let last_seen = reader.read_u64()?; - let last_connection_try = reader.read_u64()?; + let first_seen = reader.read_optional_non_zero_u64()?; + let last_seen = reader.read_optional_non_zero_u64()?; + let last_connection_try = reader.read_optional_non_zero_u64()?; let fail_count = reader.read_u8()?; - let local_port = reader.read_u16()?; + let local_port = reader.read_optional_non_zero_u16()?; let temp_ban_until = Option::read(reader)?; let state = PeerListEntryState::read(reader)?; From 84b48d64fc8dbee7d22a9f38812be9b8e1371b09 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 4 Sep 2024 15:08:55 +0200 Subject: [PATCH 05/25] daemon: don't connect to tempbanned peers, delete peerlist entry if fail count is set to max --- xelis_daemon/src/p2p/peer_list.rs | 32 ++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/xelis_daemon/src/p2p/peer_list.rs b/xelis_daemon/src/p2p/peer_list.rs index bb7a2db1..aa30781b 100644 --- a/xelis_daemon/src/p2p/peer_list.rs +++ b/xelis_daemon/src/p2p/peer_list.rs @@ -59,6 +59,10 @@ pub struct PeerListEntry { first_seen: Option, last_seen: Option, last_connection_try: Option, + // Fail count is the count of failed connection attempts + // Every `PEER_FAIL_TO_CONNECT_LIMIT`, we will temp ban the peer + // If fail count is at maximum (`u8::MAX`) we will remove the peer from the stored peerlist + // If the peer is whitelisted, we don't increase the fail count fail_count: u8, local_port: Option, // Until when the peer is banned @@ -445,9 +449,16 @@ impl PeerList { let mut potential_gray_peer = None; for res in peerlist_entries { let (ip, mut entry) = res?; + + // If the peer is blacklisted or temp banned, skip it + if *entry.get_state() == PeerListEntryState::Blacklist || entry.get_temp_ban_until().map(|temp_ban_until| temp_ban_until > current_time).unwrap_or(false) { + debug!("Skipping {} because it's blacklisted or temp banned ({})", ip, format_duration(Duration::from_secs(entry.get_temp_ban_until().unwrap_or(0)))); + continue; + } + if let Some(local_port) = entry.get_local_port() { let addr = SocketAddr::new(ip, local_port); - if *entry.get_state() != PeerListEntryState::Blacklist && entry.get_last_connection_try().unwrap_or(0) + (entry.get_fail_count() as u64 * P2P_EXTEND_PEERLIST_DELAY) <= current_time && Self::internal_get_peer_by_addr(&peers, &addr).is_none() { + if entry.get_last_connection_try().unwrap_or(0) + (entry.get_fail_count() as u64 * P2P_EXTEND_PEERLIST_DELAY) <= current_time && Self::internal_get_peer_by_addr(&peers, &addr).is_none() { // Store it if we don't have any whitelisted peer to connect to if potential_gray_peer.is_none() && *entry.get_state() == PeerListEntryState::Graylist { potential_gray_peer = Some((ip, addr)); @@ -486,15 +497,18 @@ impl PeerList { let fail_count = entry.get_fail_count(); if *entry.get_state() != PeerListEntryState::Whitelist { - if temp_ban && fail_count != 0 && fail_count % PEER_FAIL_TO_CONNECT_LIMIT == 0 { - debug!("Temp banning {} for failing too many times (count = {})", ip, fail_count); - entry.set_temp_ban_until(Some(get_current_time_in_seconds() + PEER_TEMP_BAN_TIME_ON_CONNECT)); - } - debug!("Increasing fail count for {}", ip); - entry.set_fail_count(fail_count.wrapping_add(1)); - - self.cache.set_peerlist_entry(ip, entry)?; + if fail_count == u8::MAX { + debug!("Removing {} from stored peerlist because fail count is at max", ip); + self.cache.remove_peerlist_entry(ip)?; + } else { + if temp_ban && fail_count != 0 && fail_count % PEER_FAIL_TO_CONNECT_LIMIT == 0 { + debug!("Temp banning {} for failing too many times (count = {})", ip, fail_count); + entry.set_temp_ban_until(Some(get_current_time_in_seconds() + PEER_TEMP_BAN_TIME_ON_CONNECT)); + } + entry.set_fail_count(fail_count.wrapping_add(1)); + self.cache.set_peerlist_entry(ip, entry)?; + } } else { debug!("{} is whitelisted, not increasing fail count", ip); } From 81ecbcdc8cb2e48c8db7f64cf55ce595cb6af2ae Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 5 Sep 2024 10:35:58 +0200 Subject: [PATCH 06/25] daemon: add temp_ban_address cli command, don't retry too fast the same peer from list --- xelis_daemon/src/config.rs | 11 ++++++++--- xelis_daemon/src/main.rs | 20 ++++++++++++++++++++ xelis_daemon/src/p2p/peer_list.rs | 13 +++++++++---- 3 files changed, 37 insertions(+), 7 deletions(-) diff --git a/xelis_daemon/src/config.rs b/xelis_daemon/src/config.rs index 6737ce53..7f96d838 100644 --- a/xelis_daemon/src/config.rs +++ b/xelis_daemon/src/config.rs @@ -127,6 +127,10 @@ pub const P2P_PING_PEER_LIST_LIMIT: usize = 16; pub const P2P_DEFAULT_MAX_PEERS: usize = 32; // time in seconds between each time we try to connect to a new peer pub const P2P_EXTEND_PEERLIST_DELAY: u64 = 60; +// time in seconds between each time we try to connect to a outgoing peer +// At least 5 minutes of countdown to retry to connect to the same peer +// This will be multiplied by the number of fails +pub const P2P_PEERLIST_RETRY_AFTER: u64 = 60 * 15; // Peer wait on error accept new p2p connections in seconds pub const P2P_PEER_WAIT_ON_ERROR: u64 = 15; // Delay in second to connect to priority nodes @@ -147,12 +151,13 @@ pub const PEER_FAIL_TIME_RESET: u64 = 30 * 60; pub const PEER_FAIL_LIMIT: u8 = 50; // number of fail during handshake before temp ban pub const PEER_FAIL_TO_CONNECT_LIMIT: u8 = 3; -// number of seconds to temp ban the peer in case of fail reached -// Set to 15 minutes -pub const PEER_TEMP_BAN_TIME: u64 = 15 * 60; // number of seconds to temp ban the peer in case of fail reached during handshake +// It is only used for incoming connections // Set to 1 minute pub const PEER_TEMP_BAN_TIME_ON_CONNECT: u64 = 60; +// number of seconds to temp ban the peer in case of fail count limit (`PEER_FAIL_LIMIT`) reached +// Set to 15 minutes +pub const PEER_TEMP_BAN_TIME: u64 = 15 * 60; // millis until we timeout pub const PEER_TIMEOUT_REQUEST_OBJECT: u64 = 15_000; // millis until we timeout during a bootstrap request diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index 3d7a4e5a..a9bff9b8 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -206,6 +206,7 @@ async fn run_prompt(prompt: ShareablePrompt, blockchain: Arc))))?; command_manager.add_command(Command::with_optional_arguments("verify_chain", "Check chain supply", vec![Arg::new("topoheight", ArgType::Number)], CommandHandler::Async(async_handler!(verify_chain::))))?; command_manager.add_command(Command::with_required_arguments("kick_peer", "Kick a peer using its ip:port", vec![Arg::new("address", ArgType::String)], CommandHandler::Async(async_handler!(kick_peer::))))?; + command_manager.add_command(Command::with_required_arguments("temp_ban_address", "Temporarily ban an address", vec![Arg::new("address", ArgType::String), Arg::new("seconds", ArgType::Number)], CommandHandler::Async(async_handler!(temp_ban_address::))))?; command_manager.add_command(Command::new("clear_caches", "Clear storage caches", CommandHandler::Async(async_handler!(clear_caches::))))?; command_manager.add_command(Command::new("clear_rpc_connections", "Clear all WS connections from RPC", CommandHandler::Async(async_handler!(clear_rpc_connections::))))?; command_manager.add_command(Command::new("clear_p2p_connections", "Clear all P2P connections", CommandHandler::Async(async_handler!(clear_p2p_connections::))))?; @@ -473,6 +474,25 @@ async fn kick_peer(manager: &CommandManager, mut args: ArgumentManag Ok(()) } +async fn temp_ban_address(manager: &CommandManager, mut args: ArgumentManager) -> Result<(), CommandError> { + let context = manager.get_context().lock()?; + let blockchain: &Arc> = context.get()?; + match blockchain.get_p2p().read().await.as_ref() { + Some(p2p) => { + let addr: IpAddr = args.get_value("address")?.to_string_value()?.parse().context("Error while parsing socket address")?; + let seconds = args.get_value("seconds")?.to_number()? as u64; + let peer_list = p2p.get_peer_list(); + + peer_list.temp_ban_address(&addr, seconds).await.context("Error while banning address")?; + }, + None => { + manager.error("P2P is not enabled"); + } + }; + + Ok(()) +} + const ELEMENTS_PER_PAGE: usize = 10; async fn list_miners(manager: &CommandManager, mut arguments: ArgumentManager) -> Result<(), CommandError> { diff --git a/xelis_daemon/src/p2p/peer_list.rs b/xelis_daemon/src/p2p/peer_list.rs index aa30781b..3bdd413b 100644 --- a/xelis_daemon/src/p2p/peer_list.rs +++ b/xelis_daemon/src/p2p/peer_list.rs @@ -1,8 +1,8 @@ use crate::{ config::{ - P2P_EXTEND_PEERLIST_DELAY, PEER_FAIL_TO_CONNECT_LIMIT, PEER_TEMP_BAN_TIME_ON_CONNECT, + P2P_PEERLIST_RETRY_AFTER }, p2p::packet::peer_disconnected::PacketPeerDisconnected }; @@ -458,7 +458,7 @@ impl PeerList { if let Some(local_port) = entry.get_local_port() { let addr = SocketAddr::new(ip, local_port); - if entry.get_last_connection_try().unwrap_or(0) + (entry.get_fail_count() as u64 * P2P_EXTEND_PEERLIST_DELAY) <= current_time && Self::internal_get_peer_by_addr(&peers, &addr).is_none() { + if entry.get_last_connection_try().unwrap_or(0) + (entry.get_fail_count() as u64 * P2P_PEERLIST_RETRY_AFTER) <= current_time && Self::internal_get_peer_by_addr(&peers, &addr).is_none() { // Store it if we don't have any whitelisted peer to connect to if potential_gray_peer.is_none() && *entry.get_state() == PeerListEntryState::Graylist { potential_gray_peer = Some((ip, addr)); @@ -487,6 +487,7 @@ impl PeerList { // increase the fail count of a peer + // If tempban is allowed, and the fail count is at the limit, temp ban the peer pub async fn increase_fail_count_for_peerlist_entry(&self, ip: &IpAddr, temp_ban: bool) -> Result<(), P2pError> { trace!("increasing fail count for {}, allow temp ban: {}", ip, temp_ban); let mut entry = if self.cache.has_peerlist_entry(ip)? { @@ -495,18 +496,22 @@ impl PeerList { PeerListEntry::new(None, PeerListEntryState::Graylist) }; - let fail_count = entry.get_fail_count(); if *entry.get_state() != PeerListEntryState::Whitelist { debug!("Increasing fail count for {}", ip); + let mut fail_count = entry.get_fail_count(); if fail_count == u8::MAX { debug!("Removing {} from stored peerlist because fail count is at max", ip); self.cache.remove_peerlist_entry(ip)?; } else { + // If we allow to temp ban, and the fail count is at the limit, temp ban the peer if temp_ban && fail_count != 0 && fail_count % PEER_FAIL_TO_CONNECT_LIMIT == 0 { debug!("Temp banning {} for failing too many times (count = {})", ip, fail_count); entry.set_temp_ban_until(Some(get_current_time_in_seconds() + PEER_TEMP_BAN_TIME_ON_CONNECT)); } - entry.set_fail_count(fail_count.wrapping_add(1)); + + fail_count += 1; + debug!("Fail count is now {} for {}", fail_count, ip); + entry.set_fail_count(fail_count); self.cache.set_peerlist_entry(ip, entry)?; } } else { From ea5780c8b488e8b86e4654b5fb08a59b97e42390 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 8 Sep 2024 00:18:26 +0200 Subject: [PATCH 07/25] common: improve error messages --- xelis_common/src/rpc_server/websocket/mod.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/xelis_common/src/rpc_server/websocket/mod.rs b/xelis_common/src/rpc_server/websocket/mod.rs index 5540cfb1..e24cf4da 100644 --- a/xelis_common/src/rpc_server/websocket/mod.rs +++ b/xelis_common/src/rpc_server/websocket/mod.rs @@ -66,8 +66,10 @@ pub enum WebSocketError { SessionClosed(#[from] actix_ws::Closed), #[error("this session was already closed")] SessionAlreadyClosed, - #[error("error while sending message, channel is closed")] - ChannelClosed, + #[error("error while sending message '{}', channel is closed", _0)] + ChannelClosed(String), + #[error("error while closing, channel is already closed")] + ChannelAlreadyClosed, #[error(transparent)] Elapsed(#[from] Elapsed), } @@ -93,7 +95,7 @@ where // Send a text message to the session pub async fn send_text>(self: &Arc, value: S) -> Result<(), WebSocketError> { self.channel.send(InnerMessage::Text(value.into())) - .map_err(|_| WebSocketError::ChannelClosed)?; + .map_err(|e| WebSocketError::ChannelClosed(e.to_string()))?; Ok(()) } @@ -127,7 +129,7 @@ where // Close the session pub async fn close(&self, reason: Option) -> Result<(), WebSocketError> { self.channel.send(InnerMessage::Close(reason)) - .map_err(|_| WebSocketError::ChannelClosed)?; + .map_err(|_| WebSocketError::ChannelAlreadyClosed)?; Ok(()) } From 49770bd0c1c0292342157d9bfaf9c940e125a7d5 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 8 Sep 2024 17:53:23 +0200 Subject: [PATCH 08/25] common: improve send error msg --- xelis_common/src/json_rpc/mod.rs | 4 ++-- xelis_common/src/json_rpc/websocket.rs | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/xelis_common/src/json_rpc/mod.rs b/xelis_common/src/json_rpc/mod.rs index 000b7bdc..0a96551a 100644 --- a/xelis_common/src/json_rpc/mod.rs +++ b/xelis_common/src/json_rpc/mod.rs @@ -74,6 +74,6 @@ pub enum JsonRPCError { SocketError(#[from] TungsteniteError), #[error(transparent)] Any(#[from] anyhow::Error), - #[error("Error while sending message: {}", _0)] - SendError(String) + #[error("Error while sending message '{}': {}", _0, _1)] + SendError(String, String) } \ No newline at end of file diff --git a/xelis_common/src/json_rpc/websocket.rs b/xelis_common/src/json_rpc/websocket.rs index f219d7a0..d0ced0f8 100644 --- a/xelis_common/src/json_rpc/websocket.rs +++ b/xelis_common/src/json_rpc/websocket.rs @@ -528,12 +528,14 @@ impl // Send a request to the sender channel that will be sent to the server async fn send_message_internal(&self, id: Option, method: &str, params: &P) -> JsonRPCResult<()> { let sender = self.sender.lock().await; - sender.send(InternalMessage::Send(serde_json::to_string(&json!({ + let value = json!({ "jsonrpc": JSON_RPC_VERSION, "method": method, "id": id, "params": params - }))?)).await.map_err(|e| JsonRPCError::SendError(e.to_string()))?; + }); + sender.send(InternalMessage::Send(serde_json::to_string(&value)?)).await + .map_err(|e| JsonRPCError::SendError(value.to_string(), e.to_string()))?; Ok(()) } From 491d92b7e3e4b74b19b22c1c251e5bdaffa74821 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 16 Sep 2024 10:32:55 +0200 Subject: [PATCH 09/25] common: BYTES_PER_KB --- xelis_common/src/config.rs | 7 +++++-- xelis_common/src/utils.rs | 8 +++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/xelis_common/src/config.rs b/xelis_common/src/config.rs index 97ca7e73..6ab51806 100644 --- a/xelis_common/src/config.rs +++ b/xelis_common/src/config.rs @@ -26,12 +26,15 @@ pub const PREFIX_ADDRESS: &str = "xel"; // testnet prefix address pub const TESTNET_PREFIX_ADDRESS: &str = "xet"; +// 1 KB = 1024 bytes +pub const BYTES_PER_KB: usize = 1024; + // Max transaction size in bytes -pub const MAX_TRANSACTION_SIZE: usize = 1024 * 1024; // 1 MB +pub const MAX_TRANSACTION_SIZE: usize = BYTES_PER_KB * BYTES_PER_KB; // 1 MB // Max block size in bytes // 1024 * 1024 + (256 * 1024) bytes = 1.25 MB maximum size per block with txs -pub const MAX_BLOCK_SIZE: usize = (1024 * 1024) + (256 * 1024); +pub const MAX_BLOCK_SIZE: usize = (BYTES_PER_KB * BYTES_PER_KB) + (256 * BYTES_PER_KB); // BlockDAG rules pub const TIPS_LIMIT: usize = 3; // maximum 3 TIPS per block \ No newline at end of file diff --git a/xelis_common/src/utils.rs b/xelis_common/src/utils.rs index 69f94ce2..bf09e566 100644 --- a/xelis_common/src/utils.rs +++ b/xelis_common/src/utils.rs @@ -4,7 +4,8 @@ use crate::{ COIN_DECIMALS, FEE_PER_ACCOUNT_CREATION, FEE_PER_KB, - FEE_PER_TRANSFER + FEE_PER_TRANSFER, + BYTES_PER_KB }, difficulty::Difficulty, varuint::VarUint @@ -50,9 +51,10 @@ pub fn from_coin(value: impl Into, coin_decimals: u8) -> Option { // Sending to a newly created address will increase the fee // Each transfers output will also increase the fee pub fn calculate_tx_fee(tx_size: usize, output_count: usize, new_addresses: usize) -> u64 { - let mut size_in_kb = tx_size as u64 / 1024; + let mut size_in_kb = tx_size as u64 / BYTES_PER_KB as u64; - if tx_size % 1024 != 0 { // we consume a full kb for fee + // we consume a full kb for fee + if tx_size % BYTES_PER_KB != 0 { size_in_kb += 1; } From 715aee1d2117893c6f111927043c55b5927e1f6b Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 16 Sep 2024 10:42:23 +0200 Subject: [PATCH 10/25] daemon: simplify estimate_required_tx_fees --- xelis_daemon/src/core/blockchain.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index ec6e9391..24135bcb 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -2600,23 +2600,19 @@ impl Blockchain { // Estimate the required fees for a transaction // For V1, new keys are only counted one time for creation fee instead of N transfers to it -pub async fn estimate_required_tx_fees(provider: &P, current_topoheight: u64, tx: &Transaction, version: BlockVersion) -> Result { +pub async fn estimate_required_tx_fees(provider: &P, current_topoheight: u64, tx: &Transaction, _: BlockVersion) -> Result { let mut output_count = 0; - let mut new_addresses = 0; let mut processed_keys = HashSet::new(); if let TransactionType::Transfers(transfers) = tx.get_data() { output_count = transfers.len(); for transfer in transfers { if !provider.is_account_registered_at_topoheight(transfer.get_destination(), current_topoheight).await? { - if version == BlockVersion::V0 || !processed_keys.contains(&transfer.get_destination()) { - new_addresses += 1; - processed_keys.insert(transfer.get_destination()); - } + processed_keys.insert(transfer.get_destination()); } } } - Ok(calculate_tx_fee(tx.size(), output_count, new_addresses)) + Ok(calculate_tx_fee(tx.size(), output_count, processed_keys.len())) } // Get the block reward for a side block based on how many side blocks exists at same height From a995e77912fec2442958db2ec88d36358ba85415 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 16 Sep 2024 10:54:39 +0200 Subject: [PATCH 11/25] common: FeeRatesEstimated struct --- xelis_common/src/api/daemon.rs | 14 ++++++++++++++ xelis_common/src/config.rs | 1 + 2 files changed, 15 insertions(+) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index e19ca91e..c9b740bd 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -514,6 +514,20 @@ pub struct GetMempoolCacheResult { balances: HashMap } +// This struct is used to store the fee rate estimation for the following priority levels: +// 1. Low +// 2. Medium +// 3. High +// Each priority is in fee per KB. It cannot be below `FEE_PER_KB` which is required by the network. +#[derive(Serialize, Deserialize)] +pub struct FeeRatesEstimated { + pub low: u64, + pub medium: u64, + pub high: u64, + // The minimum fee rate possible on the network + pub default: u64 +} + #[derive(Serialize, Deserialize)] pub struct GetDifficultyResult { pub difficulty: Difficulty, diff --git a/xelis_common/src/config.rs b/xelis_common/src/config.rs index 6ab51806..d7f073a1 100644 --- a/xelis_common/src/config.rs +++ b/xelis_common/src/config.rs @@ -3,6 +3,7 @@ use crate::crypto::Hash; pub const VERSION: &str = env!("BUILD_VERSION"); pub const XELIS_ASSET: Hash = Hash::zero(); +// Lowest fee per KB possible on the network // 0.00010000 XEL per KB pub const FEE_PER_KB: u64 = 10000; // 0.00100000 XEL per account creation From 76dfbf8c2edce4eca2ebde0f387b4692b3514652 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 16 Sep 2024 11:49:28 +0200 Subject: [PATCH 12/25] daemon: get_estimated_fee_rates rpc method --- xelis_daemon/src/core/mempool.rs | 118 +++++++++++++++++++++++++++++-- xelis_daemon/src/rpc/rpc.rs | 12 ++++ 2 files changed, 125 insertions(+), 5 deletions(-) diff --git a/xelis_daemon/src/core/mempool.rs b/xelis_daemon/src/core/mempool.rs index fde7d18a..7ba300cc 100644 --- a/xelis_daemon/src/core/mempool.rs +++ b/xelis_daemon/src/core/mempool.rs @@ -12,16 +12,18 @@ use serde::{Serialize, Deserialize}; use indexmap::IndexSet; use log::{debug, info, trace, warn}; use xelis_common::{ - time::{TimestampSeconds, get_current_time_in_seconds}, - network::Network, - serializer::Serializer, + config::{BYTES_PER_KB, FEE_PER_KB}, + api::daemon::FeeRatesEstimated, + block::BlockVersion, crypto::{ elgamal::Ciphertext, Hash, PublicKey }, - transaction::Transaction, - block::BlockVersion + network::Network, + serializer::Serializer, + time::{get_current_time_in_seconds, TimestampSeconds}, + transaction::Transaction }; // Wrap a TX with its hash and size in bytes for faster access @@ -51,6 +53,10 @@ pub struct AccountCache { balances: HashMap } +// Mempool is used to store all TXs waiting to be included in a block +// All TXs must be verified before adding them to the mempool +// Caches are used to store the nonce/order cache for each sender and their encrypted balances +// This is necessary to be fast enough to verify the TXs at each chain state change. pub struct Mempool { // Used for log purpose mainnet: bool, @@ -70,6 +76,55 @@ impl Mempool { } } + fn internal_estimate_fee_rates(mut fee_rates: Vec) -> FeeRatesEstimated { + let len = fee_rates.len(); + // Top 30% + let high_priority_count = len * 30 / 100; + // Next 40% + let normal_priority_count = len * 40 / 100; + + if len == 0 || high_priority_count == 0 || normal_priority_count == 0 { + return FeeRatesEstimated { + high: FEE_PER_KB, + medium: FEE_PER_KB, + low: FEE_PER_KB, + default: FEE_PER_KB + }; + } + + // Sort descending by fee rate + fee_rates.sort_by(|a, b| b.cmp(a)); + + let high: u64 = fee_rates[..high_priority_count] + .iter() + .sum::() / high_priority_count as u64; + + let medium: u64 = fee_rates[high_priority_count..(high_priority_count + normal_priority_count)] + .iter() + .sum::() / normal_priority_count as u64; + + let low: u64 = fee_rates[(high_priority_count + normal_priority_count)..] + .iter() + .sum::() / (len - high_priority_count - normal_priority_count) as u64; + + FeeRatesEstimated { + high, + medium, + low, + default: FEE_PER_KB + } + } + + // Find the fee per kB rate estimation for the priority levels + // For this, we need to get the median fee rate for each priority level + pub fn estimate_fee_rates(&self) -> Result { + let fee_rates: Vec<_> = self.txs.values() + .map(SortedTx::get_fee_rate_per_kb) + .collect(); + + Ok(Self::internal_estimate_fee_rates(fee_rates)) + } + // All checks are made in Blockchain before calling this function pub async fn add_tx(&mut self, storage: &S, stable_topoheight: u64, topoheight: u64, hash: Hash, tx: Arc, size: usize, block_version: BlockVersion) -> Result<(), BlockchainError> { let mut state = MempoolState::new(&self, storage, stable_topoheight, topoheight, block_version); @@ -468,22 +523,32 @@ impl Mempool { } impl SortedTx { + // Get the inner TX pub fn get_tx(&self) -> &Arc { &self.tx } + // Get the fee for this TX pub fn get_fee(&self) -> u64 { self.tx.get_fee() } + // Get the fee rate per kB for this TX + pub fn get_fee_rate_per_kb(&self) -> u64 { + self.get_fee() / (self.size as u64 / BYTES_PER_KB as u64) + } + + // Get the stored size of this TX pub fn get_size(&self) -> usize { self.size } + // Get the timestamp when this TX was added to mempool pub fn get_first_seen(&self) -> TimestampSeconds { self.first_seen } + // Consume the TX and return it pub fn consume(self) -> Arc { self.tx } @@ -550,4 +615,47 @@ impl AccountCache { let index = ((nonce - self.min) % (self.max + 1 - self.min)) as usize; self.txs.get_index(index) } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_estimated_fee_rates() { + // Let say we have the following TXs: + // 0.0001 XEL per KB, 0.0002 XEL per KB, 0.0003 XEL per KB, 0.0004 XEL per KB, 0.0005 XEL per KB + let fee_rates = vec![10000, 20000, 30000, 40000, 50000]; + let estimated = super::Mempool::internal_estimate_fee_rates(fee_rates); + assert_eq!(estimated.high, 50000); + assert_eq!(estimated.medium, 35000); + assert_eq!(estimated.low, 15000); + assert_eq!(estimated.default, FEE_PER_KB); + } + + #[test] + fn test_estimated_fee_rates_no_tx() { + let estimated = super::Mempool::internal_estimate_fee_rates(Vec::new()); + assert_eq!(estimated.high, FEE_PER_KB); + assert_eq!(estimated.medium, FEE_PER_KB); + assert_eq!(estimated.low, FEE_PER_KB); + assert_eq!(estimated.default, FEE_PER_KB); + } + + #[test] + fn test_estimated_fee_rates_expensive_tx() { + let fee_rates = vec![FEE_PER_KB * 1000]; + let estimated = super::Mempool::internal_estimate_fee_rates(fee_rates); + assert_eq!(estimated.high, FEE_PER_KB); + assert_eq!(estimated.medium, FEE_PER_KB); + assert_eq!(estimated.low, FEE_PER_KB); + assert_eq!(estimated.default, FEE_PER_KB); + + let fee_rates = vec![FEE_PER_KB * 2, FEE_PER_KB * 2, FEE_PER_KB * 3, FEE_PER_KB * 2, FEE_PER_KB * 1000]; + let estimated = super::Mempool::internal_estimate_fee_rates(fee_rates); + assert_eq!(estimated.high, FEE_PER_KB * 1000); + assert_eq!(estimated.medium, (FEE_PER_KB as f64 * 2.5) as u64); + assert_eq!(estimated.low, FEE_PER_KB * 2); + assert_eq!(estimated.default, FEE_PER_KB); + } } \ No newline at end of file diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index f2d8d7df..0293da21 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -303,6 +303,7 @@ pub fn register_methods(handler: &mut RPCHandler>> handler.register_method("p2p_status", async_handler!(p2p_status::)); handler.register_method("get_peers", async_handler!(get_peers::)); handler.register_method("get_mempool", async_handler!(get_mempool::)); + handler.register_method("get_estimated_fee_rates", async_handler!(get_estimated_fee_rates::)); handler.register_method("get_tips", async_handler!(get_tips::)); handler.register_method("get_dag_order", async_handler!(get_dag_order::)); handler.register_method("get_blocks_range_by_topoheight", async_handler!(get_blocks_range_by_topoheight::)); @@ -831,6 +832,17 @@ async fn get_mempool(context: &Context, body: Value) -> Result(context: &Context, body: Value) -> Result { + if body != Value::Null { + return Err(InternalRpcError::UnexpectedParams) + } + + let blockchain: &Arc> = context.get()?; + let mempool = blockchain.get_mempool().read().await; + let estimated = mempool.estimate_fee_rates()?; + Ok(json!(estimated)) +} + async fn get_blocks_at_height(context: &Context, body: Value) -> Result { let params: GetBlocksAtHeightParams = parse_params(body)?; let blockchain: &Arc> = context.get()?; From e836871fdfa438acafad96d9ed1bbddcfba76f02 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 16 Sep 2024 18:51:49 +0200 Subject: [PATCH 13/25] wallet: add tx hex debug --- xelis_wallet/src/api/rpc.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/xelis_wallet/src/api/rpc.rs b/xelis_wallet/src/api/rpc.rs index 80638746..108a0c05 100644 --- a/xelis_wallet/src/api/rpc.rs +++ b/xelis_wallet/src/api/rpc.rs @@ -51,7 +51,7 @@ use crate::{ wallet::Wallet }; use super::xswd::XSWDWebSocketHandler; -use log::{info, warn}; +use log::{debug, info, warn}; // Register all RPC methods pub fn register_methods(handler: &mut RPCHandler>) { @@ -293,6 +293,7 @@ async fn build_transaction(context: &Context, body: Value) -> Result Date: Mon, 16 Sep 2024 19:00:32 +0200 Subject: [PATCH 14/25] daemon: add 'print_balance' debug command --- xelis_daemon/src/main.rs | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index a9bff9b8..9bed1501 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -217,6 +217,7 @@ async fn run_prompt(prompt: ShareablePrompt, blockchain: Arc))))?; command_manager.add_command(Command::new("list_unexecuted_transactions", "List all unexecuted transactions", CommandHandler::Async(async_handler!(list_unexecuted_transactions::))))?; command_manager.add_command(Command::new("swap_blocks_executions_positions", "Swap the position of two blocks executions", CommandHandler::Async(async_handler!(swap_blocks_executions_positions::))))?; + command_manager.add_command(Command::new("print_balance", "Print the encrypted balance at a specific topoheight", CommandHandler::Async(async_handler!(print_balance::))))?; // Don't keep the lock for ever let (p2p, getwork) = { @@ -448,6 +449,31 @@ async fn swap_blocks_executions_positions(manager: &CommandManager, Ok(()) } +async fn print_balance(manager: &CommandManager, _: ArgumentManager) -> Result<(), CommandError> { + let context = manager.get_context().lock()?; + let blockchain: &Arc> = context.get()?; + let prompt = manager.get_prompt(); + let storage = blockchain.get_storage().read().await; + + let address = prompt.read_input("Address: ", false).await + .context("Error while reading address")?; + let address = Address::from_string(&address) + .context("Invalid address")?; + + let topoheight: u64 = prompt.read("Topoheight: ").await + .context("Error while reading topoheight")?; + + let asset = prompt.read_hash("Asset (default XELIS): ").await.ok(); + let asset = asset.unwrap_or(XELIS_ASSET); + + let balance = storage.get_balance_at_exact_topoheight(&address.to_public_key(), &asset, topoheight).await + .context("Error while retrieving balance")?; + + manager.message(format!("{}", balance)); + + Ok(()) +} + async fn kick_peer(manager: &CommandManager, mut args: ArgumentManager) -> Result<(), CommandError> { let context = manager.get_context().lock()?; let blockchain: &Arc> = context.get()?; From 5723b8e895f826b41612290148efe6ede93578e0 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 20 Sep 2024 10:55:02 +0200 Subject: [PATCH 15/25] common: improve jsonrpc errors --- xelis_common/src/json_rpc/mod.rs | 10 ++++++---- xelis_common/src/json_rpc/websocket.rs | 6 +++--- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/xelis_common/src/json_rpc/mod.rs b/xelis_common/src/json_rpc/mod.rs index 0a96551a..ebbcb924 100644 --- a/xelis_common/src/json_rpc/mod.rs +++ b/xelis_common/src/json_rpc/mod.rs @@ -56,10 +56,12 @@ pub enum JsonRPCError { message: String, data: Option, }, - #[error("Server did not respond to the request")] - NoResponse, - #[error("No response in the given time")] - TimedOut, + #[error("Server did not respond to the request '{}': {}", _0, _1)] + NoResponse(String, String), + #[error("Server returned no result")] + NoResult, + #[error("No response in the given time for '{}'", _0)] + TimedOut(String), #[error("Server returned a response without result")] MissingResult, #[error("Error while (de)serializing JSON data: {}", _0)] diff --git a/xelis_common/src/json_rpc/websocket.rs b/xelis_common/src/json_rpc/websocket.rs index d0ced0f8..22d2b71b 100644 --- a/xelis_common/src/json_rpc/websocket.rs +++ b/xelis_common/src/json_rpc/websocket.rs @@ -552,8 +552,8 @@ impl self.send_message_internal(Some(id), method, params).await?; let response = timeout(self.timeout_after, receiver).await - .or(Err(JsonRPCError::TimedOut))? - .or(Err(JsonRPCError::NoResponse))?; + .map_err(|_| JsonRPCError::TimedOut(json!(params).to_string()))? + .map_err(|e| JsonRPCError::NoResponse(json!(params).to_string(), e.to_string()))?; if let Some(error) = response.error { return Err(JsonRPCError::ServerError { @@ -563,7 +563,7 @@ impl }); } - let result = response.result.ok_or(JsonRPCError::NoResponse)?; + let result = response.result.ok_or(JsonRPCError::NoResult)?; Ok(serde_json::from_value(result)?) } From a1a45ba820326f931e11e728ea13054b77cabb19 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 20 Sep 2024 11:29:44 +0200 Subject: [PATCH 16/25] common: delete failed requests --- xelis_common/src/json_rpc/websocket.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/xelis_common/src/json_rpc/websocket.rs b/xelis_common/src/json_rpc/websocket.rs index 22d2b71b..615443e1 100644 --- a/xelis_common/src/json_rpc/websocket.rs +++ b/xelis_common/src/json_rpc/websocket.rs @@ -549,6 +549,21 @@ impl requests.insert(id, sender); } + // Send the request to the server + // If the request fails, we remove it from the pending requests + match self.send_internal(method, id, params, receiver).await { + Ok(res) => Ok(res), + Err(e) => { + let mut requests = self.requests.lock().await; + debug!("Removing request with id {} from the pending requests due to its fail", id); + requests.remove(&id); + Err(e) + } + } + } + + // Send a request to the server and wait for the response + async fn send_internal(&self, method: &str, id: usize, params: &P, receiver: oneshot::Receiver) -> JsonRPCResult { self.send_message_internal(Some(id), method, params).await?; let response = timeout(self.timeout_after, receiver).await From 33212744e483381208ba5585221f7838ca00bc9d Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 20 Sep 2024 11:38:03 +0200 Subject: [PATCH 17/25] common: add debug log --- xelis_common/src/json_rpc/websocket.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/xelis_common/src/json_rpc/websocket.rs b/xelis_common/src/json_rpc/websocket.rs index 615443e1..bf754480 100644 --- a/xelis_common/src/json_rpc/websocket.rs +++ b/xelis_common/src/json_rpc/websocket.rs @@ -426,7 +426,7 @@ impl continue; } } - + // Check if this ID corresponds to a event subscribed { let mut handlers = self.handler_by_id.lock().await; @@ -442,7 +442,9 @@ impl Message::Close(_) => { break; }, - _ => {} + m => { + debug!("Received unhandled message: {:?}", m); + } } } } From 49f63e72db4ac6d2fe4786fa22df0022c6148675 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 28 Sep 2024 14:21:42 +0200 Subject: [PATCH 18/25] common: allow nullable responses in rpc --- xelis_common/src/json_rpc/mod.rs | 2 -- xelis_common/src/json_rpc/websocket.rs | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/xelis_common/src/json_rpc/mod.rs b/xelis_common/src/json_rpc/mod.rs index ebbcb924..7042c418 100644 --- a/xelis_common/src/json_rpc/mod.rs +++ b/xelis_common/src/json_rpc/mod.rs @@ -58,8 +58,6 @@ pub enum JsonRPCError { }, #[error("Server did not respond to the request '{}': {}", _0, _1)] NoResponse(String, String), - #[error("Server returned no result")] - NoResult, #[error("No response in the given time for '{}'", _0)] TimedOut(String), #[error("Server returned a response without result")] diff --git a/xelis_common/src/json_rpc/websocket.rs b/xelis_common/src/json_rpc/websocket.rs index bf754480..28057c26 100644 --- a/xelis_common/src/json_rpc/websocket.rs +++ b/xelis_common/src/json_rpc/websocket.rs @@ -580,7 +580,7 @@ impl }); } - let result = response.result.ok_or(JsonRPCError::NoResult)?; + let result = response.result.unwrap_or(Value::Null); Ok(serde_json::from_value(result)?) } From ab34c7510e1bb578b34c811c34bf9cf5e2dfcd04 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 28 Sep 2024 14:22:42 +0200 Subject: [PATCH 19/25] wallet: remove context usage for rescan --- xelis_wallet/src/main.rs | 7 +++++-- xelis_wallet/src/wallet.rs | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/xelis_wallet/src/main.rs b/xelis_wallet/src/main.rs index 76839a77..e5aaeee9 100644 --- a/xelis_wallet/src/main.rs +++ b/xelis_wallet/src/main.rs @@ -973,8 +973,11 @@ async fn rescan(manager: &CommandManager, mut arguments: ArgumentManager) -> Res 0 }; - wallet.rescan(topoheight, true).await.context("error while restarting network handler")?; - manager.message("Network handler has been restarted!"); + if let Err(e) = wallet.rescan(topoheight, true).await { + manager.error(format!("Error while rescanning: {}", e)); + } else { + manager.message("Network handler has been restarted!"); + } Ok(()) } diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index f7ab10fb..84899d6b 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -941,7 +941,7 @@ impl Wallet { } } debug!("Starting again network handler"); - network_handler.start(auto_reconnect).await.context("Error while restarting network handler")?; + network_handler.start(auto_reconnect).await?; } else { return Err(WalletError::NotOnlineMode) } From 6ed863a91ac323e4107088732b830596a368e718 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 30 Sep 2024 20:42:49 +0200 Subject: [PATCH 20/25] daemon: prevent selecting TXs from orphaned block tips --- xelis_daemon/src/core/blockchain.rs | 51 +++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 24135bcb..e6b410db 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1507,6 +1507,44 @@ impl Blockchain { Ok(block) } + // Retrieve every orphaned blocks until a given height from the tips + async fn get_orphaned_blocks_for_tips_until_height(&self, storage: &S, tips: impl Iterator, height: u64) -> Result)>, BlockchainError> { + // Current queue of blocks to process + let mut queue: IndexSet = IndexSet::new(); + queue.extend(tips); + + let mut processed = HashSet::new(); + let mut orphaned_blocks = Vec::new(); + + while let Some(hash) = queue.pop() { + // if we already processed this block, skip it + if !processed.insert(hash.clone()) { + continue; + } + + // if the block is not orphaned, we add its tips to the queue + let block = storage.get_block_header_by_hash(&hash).await?; + if block.get_height() <= height { + continue; + } + + // if the block is orphaned, we add it to the list + if self.is_block_orphaned_for_storage(storage, &hash).await { + orphaned_blocks.push((hash.clone(), block.clone())); + } + + for tip in block.get_tips() { + if self.is_block_orphaned_for_storage(storage, &tip).await { + let block = storage.get_block_header_by_hash(&tip).await?; + orphaned_blocks.push((tip.clone(), block)); + queue.insert(tip.clone()); + } + } + } + + Ok(orphaned_blocks) + } + // Get the mining block template for miners // This function is called when a miner request a new block template // We create a block candidate with selected TXs from mempool @@ -1540,7 +1578,12 @@ impl Blockchain { // data used to verify txs let stable_topoheight = self.get_stable_topoheight(); + let stable_height = self.get_stable_height(); let topoheight = self.get_topo_height(); + + // Find all orphaned blocks that will be linked in this block + let orphaned_blocks = self.get_orphaned_blocks_for_tips_until_height(storage, block.get_tips().iter().cloned(), stable_height).await?; + trace!("build chain state for block template"); let mut chain_state = ChainState::new(storage, stable_topoheight, topoheight, block.get_version()); @@ -1550,6 +1593,14 @@ impl Blockchain { break; } + // We don't want to re-include a TX that is already in a TIP block, even if its not executed yet + for (block_hash, block) in orphaned_blocks.iter() { + if block.get_transactions().contains(hash.as_ref()) { + warn!("Skipping TX {} because it is included in tips {}", hash, block_hash); + continue; + } + } + if !self.skip_block_template_txs_verification { // Check if the TX is valid for this potential block trace!("Checking TX {} with nonce {}, {}", hash, tx.get_nonce(), tx.get_source().as_address(self.network.is_mainnet())); From ba42f2f516f4509471d8d0bbc3eef4f611db84bd Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 30 Sep 2024 20:44:16 +0200 Subject: [PATCH 21/25] daemon: search orphaned blocks only when needed --- xelis_daemon/src/core/blockchain.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index e6b410db..b60ec4db 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1582,7 +1582,7 @@ impl Blockchain { let topoheight = self.get_topo_height(); // Find all orphaned blocks that will be linked in this block - let orphaned_blocks = self.get_orphaned_blocks_for_tips_until_height(storage, block.get_tips().iter().cloned(), stable_height).await?; + let mut orphaned_blocks = None; trace!("build chain state for block template"); let mut chain_state = ChainState::new(storage, stable_topoheight, topoheight, block.get_version()); @@ -1593,11 +1593,19 @@ impl Blockchain { break; } - // We don't want to re-include a TX that is already in a TIP block, even if its not executed yet - for (block_hash, block) in orphaned_blocks.iter() { - if block.get_transactions().contains(hash.as_ref()) { - warn!("Skipping TX {} because it is included in tips {}", hash, block_hash); - continue; + if orphaned_blocks.is_none() { + let blocks = self.get_orphaned_blocks_for_tips_until_height(storage, block.get_tips().iter().cloned(), stable_height).await?; + warn!("Found {} orphaned blocks linked for block template", blocks.len()); + orphaned_blocks = Some(blocks); + } + + if let Some(orphaned_blocks) = orphaned_blocks.as_ref() { + // We don't want to re-include a TX that is already in a TIP block, even if its not executed yet + for (block_hash, block) in orphaned_blocks.iter() { + if block.get_transactions().contains(hash.as_ref()) { + warn!("Skipping TX {} because it is included in tips {}", hash, block_hash); + continue; + } } } From c729e8a35eae72c948204cb0f8c5501031b2315a Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 30 Sep 2024 21:16:09 +0200 Subject: [PATCH 22/25] daemon: bounds to provider needed only --- xelis_daemon/src/core/blockchain.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index b60ec4db..268478d4 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1508,7 +1508,7 @@ impl Blockchain { } // Retrieve every orphaned blocks until a given height from the tips - async fn get_orphaned_blocks_for_tips_until_height(&self, storage: &S, tips: impl Iterator, height: u64) -> Result)>, BlockchainError> { + async fn get_orphaned_blocks_for_tips_until_height(&self, provider: &P, tips: impl Iterator, height: u64) -> Result)>, BlockchainError> { // Current queue of blocks to process let mut queue: IndexSet = IndexSet::new(); queue.extend(tips); @@ -1523,19 +1523,19 @@ impl Blockchain { } // if the block is not orphaned, we add its tips to the queue - let block = storage.get_block_header_by_hash(&hash).await?; + let block = provider.get_block_header_by_hash(&hash).await?; if block.get_height() <= height { continue; } // if the block is orphaned, we add it to the list - if self.is_block_orphaned_for_storage(storage, &hash).await { + if self.is_block_orphaned_for_storage(provider, &hash).await { orphaned_blocks.push((hash.clone(), block.clone())); } for tip in block.get_tips() { - if self.is_block_orphaned_for_storage(storage, &tip).await { - let block = storage.get_block_header_by_hash(&tip).await?; + if self.is_block_orphaned_for_storage(provider, &tip).await { + let block = provider.get_block_header_by_hash(&tip).await?; orphaned_blocks.push((tip.clone(), block)); queue.insert(tip.clone()); } @@ -2405,15 +2405,15 @@ impl Blockchain { // Get the block reward for a block // This will search all blocks at same height and verify which one are side blocks - pub async fn get_block_reward(&self, storage: &S, hash: &Hash, past_supply: u64, current_topoheight: u64) -> Result { - let is_side_block = self.is_side_block(storage, hash).await?; + pub async fn get_block_reward(&self, provider: &P, hash: &Hash, past_supply: u64, current_topoheight: u64) -> Result { + let is_side_block = self.is_side_block(provider, hash).await?; let mut side_blocks_count = 0; if is_side_block { // get the block height for this hash - let height = storage.get_height_for_block_hash(hash).await?; - let blocks_at_height = storage.get_blocks_at_height(height).await?; + let height = provider.get_height_for_block_hash(hash).await?; + let blocks_at_height = provider.get_blocks_at_height(height).await?; for block in blocks_at_height { - if *hash != block && self.is_side_block_internal(storage, &block, current_topoheight).await? { + if *hash != block && self.is_side_block_internal(provider, &block, current_topoheight).await? { side_blocks_count += 1; } } @@ -2469,13 +2469,13 @@ impl Blockchain { } // if a block is not ordered, it's an orphaned block and its transactions are not honoured - pub async fn is_block_orphaned_for_storage(&self, storage: &S, hash: &Hash) -> bool { + pub async fn is_block_orphaned_for_storage(&self, provider: &P, hash: &Hash) -> bool { trace!("is block {} orphaned", hash); - !storage.is_block_topological_ordered(hash).await + !provider.is_block_topological_ordered(hash).await } - pub async fn is_side_block(&self, storage: &S, hash: &Hash) -> Result { - self.is_side_block_internal(storage, hash, self.get_topo_height()).await + pub async fn is_side_block(&self, provider: &P, hash: &Hash) -> Result { + self.is_side_block_internal(provider, hash, self.get_topo_height()).await } // a block is a side block if its ordered and its block height is less than or equal to height of past 8 topographical blocks From c10d5a04f219efe7571a8fb2c187b33754946a6d Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 30 Sep 2024 21:35:52 +0200 Subject: [PATCH 23/25] daemon: simplify orphaned blocks search --- xelis_daemon/src/core/blockchain.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 268478d4..b9a973b5 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1519,26 +1519,22 @@ impl Blockchain { while let Some(hash) = queue.pop() { // if we already processed this block, skip it if !processed.insert(hash.clone()) { + debug!("Skipping block {} because it was already processed", hash); continue; } // if the block is not orphaned, we add its tips to the queue let block = provider.get_block_header_by_hash(&hash).await?; if block.get_height() <= height { + debug!("Block {} is not orphaned, skipping it", hash); continue; } - // if the block is orphaned, we add it to the list + // if the block is orphaned, we add it to the list and we check its tips if self.is_block_orphaned_for_storage(provider, &hash).await { + debug!("Block {} is orphaned, adding it to the list", hash); orphaned_blocks.push((hash.clone(), block.clone())); - } - - for tip in block.get_tips() { - if self.is_block_orphaned_for_storage(provider, &tip).await { - let block = provider.get_block_header_by_hash(&tip).await?; - orphaned_blocks.push((tip.clone(), block)); - queue.insert(tip.clone()); - } + queue.extend(block.get_tips().clone()); } } From 6c01a419646a194081d170c391a4e03c51a10dec Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 30 Sep 2024 22:08:20 +0200 Subject: [PATCH 24/25] misc: add v1.15.0 in changelog --- CHANGELOG.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index db1bbc26..97d88e90 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,27 @@ This file contains all the changelogs to ensure that changes can be tracked and To see the full history and exact changes, please refer to the commits history directly. +## v1.15.0 + +Bug fixes for daemon & wallet, improvements & new features. + +Common: +- Improve error messges +- non-zero optional serializer fns +- allow nullable responses in RPC client + +Daemon: +- fix visual bugs for peerlist +- no connection to tempbanned peers +- `temp_ban_address` cli command +- `print_balance` cli command +- do not include txs present in orphaned tips +- `get_estimated_fee_rates` experimental rpc method + +Wallet: +- add `export_transactions` cli command +- more logs + ## v1.14.0 Moving to 1.14.0 due to breaking changes in peerlist storage. From 3998d6c61d39da7cdf09904422739db335eef99f Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 30 Sep 2024 22:09:07 +0200 Subject: [PATCH 25/25] all: update version to v1.15.0 --- Cargo.lock | 8 ++++---- xelis_common/Cargo.toml | 2 +- xelis_daemon/Cargo.toml | 2 +- xelis_miner/Cargo.toml | 2 +- xelis_wallet/Cargo.toml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e44aaffc..20d34cc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3455,7 +3455,7 @@ dependencies = [ [[package]] name = "xelis_common" -version = "1.14.0" +version = "1.15.0" dependencies = [ "actix-rt", "actix-web", @@ -3498,7 +3498,7 @@ dependencies = [ [[package]] name = "xelis_daemon" -version = "1.14.0" +version = "1.15.0" dependencies = [ "actix", "actix-web", @@ -3530,7 +3530,7 @@ dependencies = [ [[package]] name = "xelis_miner" -version = "1.14.0" +version = "1.15.0" dependencies = [ "anyhow", "clap", @@ -3548,7 +3548,7 @@ dependencies = [ [[package]] name = "xelis_wallet" -version = "1.14.0" +version = "1.15.0" dependencies = [ "actix", "actix-web", diff --git a/xelis_common/Cargo.toml b/xelis_common/Cargo.toml index 0a2a68f7..8bfab196 100644 --- a/xelis_common/Cargo.toml +++ b/xelis_common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xelis_common" -version = "1.14.0" +version = "1.15.0" edition = "2021" authors = ["Slixe "] build = "build.rs" diff --git a/xelis_daemon/Cargo.toml b/xelis_daemon/Cargo.toml index aa23fff6..bf21d5ca 100644 --- a/xelis_daemon/Cargo.toml +++ b/xelis_daemon/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xelis_daemon" -version = "1.14.0" +version = "1.15.0" edition = "2021" authors = ["Slixe "] diff --git a/xelis_miner/Cargo.toml b/xelis_miner/Cargo.toml index c779d06b..27ae6d17 100644 --- a/xelis_miner/Cargo.toml +++ b/xelis_miner/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xelis_miner" -version = "1.14.0" +version = "1.15.0" edition = "2021" authors = ["Slixe "] diff --git a/xelis_wallet/Cargo.toml b/xelis_wallet/Cargo.toml index af513ddf..20a6763c 100644 --- a/xelis_wallet/Cargo.toml +++ b/xelis_wallet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xelis_wallet" -version = "1.14.0" +version = "1.15.0" edition = "2021" authors = ["Slixe "]