From 268b7c2cf26d6b06217f303e59b5848a989faa94 Mon Sep 17 00:00:00 2001 From: rustaceanrob Date: Sat, 1 Jun 2024 10:02:36 -1000 Subject: [PATCH] multi: [ci] add build and test [clippy] fix many errors --- .clippy.toml | 1 + .github/workflows/ci.yml | 35 +++++++++++++++ src/chain/chain.rs | 47 ++++++++------------ src/chain/checkpoints.rs | 2 +- src/chain/header_batch.rs | 2 +- src/chain/header_chain.rs | 26 ++++++----- src/chain/mod.rs | 1 + src/db/sqlite/header_db.rs | 8 ++-- src/db/sqlite/peer_db.rs | 2 +- src/db/traits.rs | 4 +- src/filters/cfheader_batch.rs | 6 +-- src/filters/cfheader_chain.rs | 6 +-- src/node/client.rs | 5 +-- src/node/mod.rs | 1 + src/node/node.rs | 84 +++++++++++++---------------------- src/node/node_messages.rs | 4 +- src/peers/peer.rs | 24 +++++----- src/prelude.rs | 10 ++--- src/tx/memory.rs | 2 +- 19 files changed, 138 insertions(+), 132 deletions(-) create mode 100644 .clippy.toml create mode 100644 .github/workflows/ci.yml diff --git a/.clippy.toml b/.clippy.toml new file mode 100644 index 0000000..56ce04e --- /dev/null +++ b/.clippy.toml @@ -0,0 +1 @@ +msrv = "1.56.1" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..be26670 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,35 @@ +name: Build & Test + +on: + push: + branches: + - master + pull_request: + +jobs: + node: + runs-on: ubuntu-latest + strategy: + matrix: + # Minumum Supported Rust Version (MSRV) is 1.56.1. + toolchain: [1.56.1, stable, beta, nightly] + steps: + - uses: actions/checkout@v3 + - name: Update Toolchain + run: | + rustup default ${{ matrix.toolchain }} + rustup component add --toolchain ${{ matrix.toolchain }} rustfmt + rustup component add --toolchain ${{ matrix.toolchain }} clippy + rustup update ${{ matrix.toolchain }} + - name: Lint + run: | + cargo clippy --package kyoto_light_client --all-targets + - name: Format + run: | + cargo fmt --package kyoto_light_client -- --check + - name: Build + run: | + cargo build --package kyoto_light_client --verbose + - name: Test + run: | + cargo test --package kyoto_light_client --verbose diff --git a/src/chain/chain.rs b/src/chain/chain.rs index 6295510..1a5ea19 100644 --- a/src/chain/chain.rs +++ b/src/chain/chain.rs @@ -28,7 +28,7 @@ use crate::{ }, node::{dialog::Dialog, node_messages::NodeMessage}, prelude::{params_from_network, MEDIAN_TIME_PAST}, - tx::{memory::MemoryTransactionCache, store::TransactionStore, types::IndexedTransaction}, + tx::types::IndexedTransaction, }; pub(crate) struct Chain { @@ -41,7 +41,6 @@ pub(crate) struct Chain { best_known_height: Option, scripts: HashSet, block_queue: BlockQueue, - tx_store: MemoryTransactionCache, dialog: Dialog, } @@ -49,7 +48,6 @@ impl Chain { pub(crate) async fn new( network: &Network, scripts: HashSet, - tx_store: MemoryTransactionCache, anchor: HeaderCheckpoint, mut checkpoints: HeaderCheckpoints, mut dialog: Dialog, @@ -103,7 +101,6 @@ impl Chain { best_known_height: None, scripts, block_queue: BlockQueue::new(), - tx_store, dialog, }) } @@ -174,11 +171,7 @@ impl Chain { // Do we have best known height and is our height equal to it pub(crate) fn is_synced(&self) -> bool { if let Some(height) = self.best_known_height { - if (self.height() as u32).ge(&height) { - true - } else { - false - } + (self.height() as u32).ge(&height) } else { false } @@ -313,6 +306,7 @@ impl Chain { "Peer is sending us malicious headers, restarting header sync.".into(), ) .await; + // We assume that this would be so rare that we just clear the whole header chain self.header_chain.clear_all(); return Err(HeaderSyncError::InvalidCheckpoint); } @@ -335,7 +329,7 @@ impl Chain { .inner() .iter() .filter(|header| !self.contains_header(**header)) - .map(|a| *a) + .copied() .collect(); let challenge_chainwork = uncommon .iter() @@ -350,23 +344,23 @@ impl Chain { .eq(&stem.block_hash()) }); if let Some(stem) = stem_position { - let current_chainwork = self.chainwork_after_height(stem); + let current_chainwork = self.header_chain.chainwork_after_index(stem); if current_chainwork.lt(&challenge_chainwork) { self.dialog .send_dialog("Valid reorganization found".into()) .await; self.header_chain.extend(&uncommon); - return Ok(()); + Ok(()) } else { self.dialog .send_warning( "Peer sent us a fork with less work than the current chain".into(), ) .await; - return Err(HeaderSyncError::LessWorkFork); + Err(HeaderSyncError::LessWorkFork) } } else { - return Err(HeaderSyncError::FloatingHeaders); + Err(HeaderSyncError::FloatingHeaders) } } @@ -476,7 +470,7 @@ impl Chain { let mut filter = Filter::new(filter_message.filter, filter_message.block_hash); let expected_filter_hash = self.cf_header_chain.hash_at(&filter_message.block_hash); if let Some(ref_hash) = expected_filter_hash { - if filter.filter_hash().await.ne(&ref_hash) { + if filter.filter_hash().await.ne(ref_hash) { return Err(CFilterSyncError::MisalignedFilterHash); } } @@ -484,14 +478,14 @@ impl Chain { && filter .contains_any(&self.scripts) .await - .map_err(|e| CFilterSyncError::Filter(e))? + .map_err(CFilterSyncError::Filter)? { // Add to the block queue self.block_queue.add(filter_message.block_hash); self.dialog .send_dialog(format!( "Found script at block: {}", - filter_message.block_hash.to_string() + filter_message.block_hash )) .await; } @@ -503,7 +497,7 @@ impl Chain { Ok(None) } } else { - return Err(CFilterSyncError::UnrequestedStophash); + Err(CFilterSyncError::UnrequestedStophash) } } @@ -559,10 +553,10 @@ impl Chain { let height_of_block = self.height_of_hash(block.block_hash()).await; for tx in &block.txdata { if self.scan_inputs(&tx.input) || self.scan_outputs(&tx.output) { - self.tx_store - .add_transaction(&tx, height_of_block, &block.block_hash()) - .await - .unwrap(); + // self.tx_store + // .add_transaction(&tx, height_of_block, &block.block_hash()) + // .await + // .unwrap(); self.dialog .send_data(NodeMessage::Block(block.clone())) .await; @@ -574,23 +568,20 @@ impl Chain { ))) .await; self.dialog - .send_dialog(format!( - "Found transaction: {}", - tx.compute_txid().to_string() - )) + .send_dialog(format!("Found transaction: {}", tx.compute_txid())) .await; } } Ok(()) } - fn scan_inputs(&mut self, inputs: &Vec) -> bool { + fn scan_inputs(&mut self, inputs: &[TxIn]) -> bool { inputs .iter() .any(|input| self.scripts.contains(&input.script_sig)) } - fn scan_outputs(&mut self, inputs: &Vec) -> bool { + fn scan_outputs(&mut self, inputs: &[TxOut]) -> bool { inputs .iter() .any(|out| self.scripts.contains(&out.script_pubkey)) diff --git a/src/chain/checkpoints.rs b/src/chain/checkpoints.rs index 0a8be62..c1c1116 100644 --- a/src/chain/checkpoints.rs +++ b/src/chain/checkpoints.rs @@ -127,7 +127,7 @@ impl HeaderCheckpoints { cp_list.iter().for_each(|(height, hash)| { checkpoints.push_back(HeaderCheckpoint { height: *height, - hash: BlockHash::from_str(&hash).unwrap(), + hash: BlockHash::from_str(hash).unwrap(), }) }); let last = *checkpoints.back().unwrap(); diff --git a/src/chain/header_batch.rs b/src/chain/header_batch.rs index c01b17f..1ca971e 100644 --- a/src/chain/header_batch.rs +++ b/src/chain/header_batch.rs @@ -11,7 +11,7 @@ pub(crate) struct HeadersBatch { // This struct provides basic sanity checks and helper methods. impl HeadersBatch { pub(crate) fn new(batch: Vec
) -> Result { - if batch.len() < 1 { + if batch.is_empty() { return Err(HeadersBatchError::EmptyVec); } Ok(HeadersBatch { batch }) diff --git a/src/chain/header_chain.rs b/src/chain/header_chain.rs index b7d0fa6..55262bd 100644 --- a/src/chain/header_chain.rs +++ b/src/chain/header_chain.rs @@ -75,10 +75,7 @@ impl HeaderChain { .headers .iter() .position(|header| header.block_hash().eq(&blockhash)); - match offset_pos { - Some(index) => Some(self.anchor_checkpoint.height + index + 1), - None => None, - } + offset_pos.map(|index| self.anchor_checkpoint.height + index + 1) } // This header chain contains a block hash @@ -135,6 +132,17 @@ impl HeaderChain { } } + pub(crate) fn chainwork_after_index(&self, index: usize) -> Work { + let work = self + .headers + .iter() + .enumerate() + .filter(|(h, _)| h.gt(&index)) + .map(|(_, header)| header.work()) + .reduce(|acc, next| acc + next); + work.unwrap_or(self.chainwork()) + } + // Human readable chainwork pub(crate) fn log2_work(&self) -> f64 { let work = self @@ -142,10 +150,7 @@ impl HeaderChain { .iter() .map(|header| header.work().log2()) .reduce(|acc, next| acc + next); - match work { - Some(w) => w, - None => 0.0, - } + work.unwrap_or(0.0) } // The last 11 headers, if we have that many @@ -155,7 +160,7 @@ impl HeaderChain { .rev() .take(MEDIAN_TIME_PAST) .rev() - .map(|header_ref| (*header_ref).clone()) + .copied() .collect() } @@ -213,7 +218,7 @@ impl HeaderChain { } self.headers.extend(batch); } - reorged.iter().rev().map(|header| *header).collect() + reorged.iter().rev().copied().collect() } // Clear all the headers from our chain. Only to be used when a peer has feed us faulty checkpoints @@ -304,6 +309,7 @@ mod tests { chain.chainwork_after_height(190_001), block_190_002.work() + block_190_003.work() ); + assert_eq!(chain.chainwork_after_index(1), block_190_003.work()); assert_eq!(chain.tip(), block_190_003.block_hash()); } diff --git a/src/chain/mod.rs b/src/chain/mod.rs index ed31888..e134179 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -1,4 +1,5 @@ pub(crate) mod block_queue; +#[allow(clippy::module_inception)] pub(crate) mod chain; pub mod checkpoints; pub(crate) mod error; diff --git a/src/db/sqlite/header_db.rs b/src/db/sqlite/header_db.rs index 6a6cabc..f5a2d74 100644 --- a/src/db/sqlite/header_db.rs +++ b/src/db/sqlite/header_db.rs @@ -68,7 +68,7 @@ impl HeaderStore for SqliteHeaderDb { let stmt = "SELECT * FROM headers ORDER BY height"; let write_lock = self.conn.lock().await; let mut query = write_lock - .prepare(&stmt) + .prepare(stmt) .map_err(|_| HeaderDatabaseError::LoadError)?; let mut rows = query .query([]) @@ -119,7 +119,7 @@ impl HeaderStore for SqliteHeaderDb { Ok(headers) } - async fn write(&mut self, header_chain: &Vec
) -> Result<(), HeaderDatabaseError> { + async fn write<'a>(&mut self, header_chain: &'a [Header]) -> Result<(), HeaderDatabaseError> { let mut write_lock = self.conn.lock().await; let tx = write_lock .transaction() @@ -127,7 +127,7 @@ impl HeaderStore for SqliteHeaderDb { let count: u64 = tx .query_row("SELECT COUNT(*) FROM headers", [], |row| row.get(0)) .map_err(|_| HeaderDatabaseError::WriteError)?; - let adjusted_count = count.checked_sub(1).unwrap_or(0) + self.anchor_height; + let adjusted_count = count.saturating_sub(1) + self.anchor_height; for (height, header) in header_chain.iter().enumerate() { let adjusted_height = self.anchor_height + 1 + height as u64; if adjusted_height.ge(&(adjusted_count)) { @@ -145,7 +145,7 @@ impl HeaderStore for SqliteHeaderDb { "INSERT OR REPLACE INTO headers (height, block_hash, version, prev_hash, merkle_root, time, bits, nonce) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)" }; tx.execute( - &stmt, + stmt, params![ adjusted_height, hash, diff --git a/src/db/sqlite/peer_db.rs b/src/db/sqlite/peer_db.rs index 486063e..cd47440 100644 --- a/src/db/sqlite/peer_db.rs +++ b/src/db/sqlite/peer_db.rs @@ -172,7 +172,7 @@ impl SqlitePeerDb { if let Some(row) = rows.next()? { let ip_addr: String = row.get(0)?; let port: u16 = row.get(1)?; - lock.execute("DELETE FROM peers WHERE ip_addr = ?1", &[&ip_addr])?; + lock.execute("DELETE FROM peers WHERE ip_addr = ?1", [&ip_addr])?; let ip = ip_addr .parse::() .map_err(|_| rusqlite::Error::InvalidQuery)?; diff --git a/src/db/traits.rs b/src/db/traits.rs index 6d75ca1..76bb359 100644 --- a/src/db/traits.rs +++ b/src/db/traits.rs @@ -6,7 +6,7 @@ use super::error::HeaderDatabaseError; #[async_trait] pub(crate) trait HeaderStore { async fn load(&mut self) -> Result, HeaderDatabaseError>; - async fn write(&mut self, header_chain: &Vec
) -> Result<(), HeaderDatabaseError>; + async fn write<'a>(&mut self, header_chain: &'a [Header]) -> Result<(), HeaderDatabaseError>; } #[async_trait] @@ -14,7 +14,7 @@ impl HeaderStore for () { async fn load(&mut self) -> Result, HeaderDatabaseError> { Ok(Vec::new()) } - async fn write(&mut self, _header_chain: &Vec
) -> Result<(), HeaderDatabaseError> { + async fn write<'a>(&mut self, _header_chain: &'a [Header]) -> Result<(), HeaderDatabaseError> { Ok(()) } } diff --git a/src/filters/cfheader_batch.rs b/src/filters/cfheader_batch.rs index 0ea6c83..7e8548b 100644 --- a/src/filters/cfheader_batch.rs +++ b/src/filters/cfheader_batch.rs @@ -42,8 +42,8 @@ impl CFHeaderBatch { } } -impl Into for CFHeaders { - fn into(self) -> CFHeaderBatch { - CFHeaderBatch::new(self) +impl From for CFHeaderBatch { + fn from(val: CFHeaders) -> Self { + CFHeaderBatch::new(val) } } diff --git a/src/filters/cfheader_chain.rs b/src/filters/cfheader_chain.rs index 73780d7..9fc9392 100644 --- a/src/filters/cfheader_chain.rs +++ b/src/filters/cfheader_chain.rs @@ -52,7 +52,7 @@ impl CFHeaderChain { peer_id: u32, cf_headers: CFHeaderBatch, ) -> Result { - if self.merged_queue.get(&peer_id).is_some() { + if self.merged_queue.contains_key(&peer_id) { return Err(CFHeaderSyncError::UnexpectedCFHeaderMessage); } self.merged_queue.insert(peer_id, cf_headers.inner()); @@ -90,7 +90,7 @@ impl CFHeaderChain { } } // Made it through without finding any conflicts, we can extend the current chain by the reference - self.header_chain.extend_from_slice(&reference_peer); + self.header_chain.extend_from_slice(reference_peer); // Reset the merge queue self.merged_queue.clear(); Ok(AppendAttempt::Extended) @@ -142,7 +142,7 @@ impl CFHeaderChain { self.merged_queue.clear() } - pub(crate) async fn join(&mut self, headers: &Vec
) { + pub(crate) async fn join(&mut self, headers: &[Header]) { headers .iter() .zip(self.header_chain.iter().map(|(_, hash)| hash)) diff --git a/src/node/client.rs b/src/node/client.rs index 512b1c3..0cac387 100644 --- a/src/node/client.rs +++ b/src/node/client.rs @@ -89,9 +89,8 @@ impl Client { pub async fn wait_until_synced(&mut self) { loop { while let Some(message) = self.nrx.recv().await { - match message { - NodeMessage::Synced(_) => return, - _ => (), + if let NodeMessage::Synced(_) = message { + return; } } } diff --git a/src/node/mod.rs b/src/node/mod.rs index 9867d2b..e183a1c 100644 --- a/src/node/mod.rs +++ b/src/node/mod.rs @@ -4,6 +4,7 @@ pub mod client; pub mod config; pub(crate) mod dialog; pub mod error; +#[allow(clippy::module_inception)] pub mod node; pub mod node_messages; mod peer_map; diff --git a/src/node/node.rs b/src/node/node.rs index 29ad404..72e414f 100644 --- a/src/node/node.rs +++ b/src/node/node.rs @@ -31,7 +31,6 @@ use crate::{ filters::cfheader_chain::CFHeaderSyncResult, node::{error::PersistenceError, peer_map::PeerMap}, peers::dns::Dns, - tx::memory::MemoryTransactionCache, }; use super::{ @@ -107,15 +106,12 @@ impl Node { // Take the canonical Bitcoin addresses and map them to a script we can scan for let mut scripts = HashSet::new(); scripts.extend(addresses.iter().map(|address| address.script_pubkey())); - // An in-memory quick access to found transactions - let in_memory_cache = MemoryTransactionCache::new(); // A structured way to talk to the client let mut dialog = Dialog::new(ntx.clone()); // Build the chain let loaded_chain = Chain::new( &network, scripts, - in_memory_cache, checkpoint, checkpoints, dialog.clone(), @@ -172,7 +168,7 @@ impl Node { self.is_running .store(true, std::sync::atomic::Ordering::Relaxed); let (mtx, mut mrx) = mpsc::channel::(32); - let mut node_map = PeerMap::new(mtx, self.network.clone()); + let mut node_map = PeerMap::new(mtx, self.network); loop { self.advance_state().await; node_map.clean().await; @@ -253,7 +249,7 @@ impl Node { self.dialog.send_dialog(format!("[Peer {}]: inv", peer_thread.nonce)) .await; for block in blocks { - self.dialog.send_dialog(format!("New block: {}", block.to_string())) + self.dialog.send_dialog(format!("New block: {}", block)) .await; } node_map.add_one_height(peer_thread.nonce); @@ -279,7 +275,7 @@ impl Node { if let Some(message) = message { match message { ClientMessage::Shutdown => return Ok(()), - _ => (), + ClientMessage::Broadcast(tx) => drop(tx), } } } @@ -299,7 +295,6 @@ impl Node { header_chain.flush_to_disk().await; *state = NodeState::HeadersSynced; } - return; } NodeState::HeadersSynced => { let header_chain = self.chain.lock().await; @@ -309,7 +304,6 @@ impl Node { .await; *state = NodeState::FilterHeadersSynced; } - return; } NodeState::FilterHeadersSynced => { let header_chain = self.chain.lock().await; @@ -319,7 +313,6 @@ impl Node { .await; *state = NodeState::FiltersSynced; } - return; } NodeState::FiltersSynced => { let header_chain = self.chain.lock().await; @@ -333,9 +326,8 @@ impl Node { ))) .await; } - return; } - NodeState::TransactionsSynced => return, + NodeState::TransactionsSynced => (), } } @@ -379,8 +371,7 @@ impl Node { locators: chain.locators(), stop_hash: None, }; - let response = MainThreadMessage::GetHeaders(next_headers); - response + MainThreadMessage::GetHeaders(next_headers) } async fn handle_new_addrs(&mut self, new_peers: Vec
) { @@ -389,7 +380,7 @@ impl Node { self.dialog .send_warning(format!( "Encountered error adding peer to the database: {}", - e.to_string() + e )) .await; } @@ -411,10 +402,7 @@ impl Node { } _ => { self.dialog - .send_warning(format!( - "Unexpected header syncing error: {}", - e.to_string() - )) + .send_warning(format!("Unexpected header syncing error: {}", e)) .await; return Some(MainThreadMessage::Disconnect); } @@ -450,17 +438,17 @@ impl Node { CFHeaderSyncResult::ReadyForNext => { // We added a batch to the queue and still are not at the required height if !chain.is_cf_headers_synced() { - return Some(MainThreadMessage::GetFilterHeaders( + Some(MainThreadMessage::GetFilterHeaders( chain.next_cf_header_message().await.unwrap(), - )); + )) } else if !chain.is_filters_synced() { // The header chain and filter header chain are in sync, but we need filters still - return Some(MainThreadMessage::GetFilters( + Some(MainThreadMessage::GetFilters( chain.next_filter_message().await.unwrap(), - )); + )) } else { // Should be unreachable if we just added filter headers - return None; + None } } CFHeaderSyncResult::Dispute(_) => { @@ -477,10 +465,10 @@ impl Node { self.dialog .send_warning(format!( "Compact filter header syncing encountered an error: {}", - e.to_string() + e )) .await; - return Some(MainThreadMessage::Disconnect); + Some(MainThreadMessage::Disconnect) } } } @@ -488,15 +476,12 @@ impl Node { async fn handle_filter(&mut self, _peer_id: u32, filter: CFilter) -> Option { let mut chain = self.chain.lock().await; match chain.sync_filter(filter).await { - Ok(potential_message) => match potential_message { - Some(message) => Some(MainThreadMessage::GetFilters(message)), - None => None, - }, + Ok(potential_message) => potential_message.map(MainThreadMessage::GetFilters), Err(e) => { self.dialog .send_warning(format!( "Compact filter syncing encountered an error: {}", - e.to_string() + e )) .await; Some(MainThreadMessage::Disconnect) @@ -517,10 +502,7 @@ impl Node { NodeState::FiltersSynced => { if let Err(e) = chain.scan_block(&block).await { self.dialog - .send_warning(format!( - "Unexpected block scanning error: {}", - e.to_string() - )) + .send_warning(format!("Unexpected block scanning error: {}", e)) .await; } None @@ -539,7 +521,7 @@ impl Node { match next_block_hash { Some(block_hash) => { self.dialog - .send_dialog(format!("Next block in queue: {}", block_hash.to_string())) + .send_dialog(format!("Next block in queue: {}", block_hash)) .await; Some(MainThreadMessage::GetBlock(GetBlockConfig { locator: block_hash, @@ -555,7 +537,7 @@ impl Node { async fn handle_inventory_blocks(&mut self, new_height: u32) -> Option { let mut state = self.state.write().await; match *state { - NodeState::Behind => return None, + NodeState::Behind => None, _ => { *state = NodeState::Behind; let mut chain = self.chain.lock().await; @@ -567,7 +549,7 @@ impl Node { chain.set_best_known_height(new_height).await; } chain.clear_filter_header_queue(); - return Some(MainThreadMessage::GetHeaders(next_headers)); + Some(MainThreadMessage::GetHeaders(next_headers)) } } } @@ -584,7 +566,7 @@ impl Node { Some(peer) => Ok(peer), None => self.any_peer().await, }, - Err(e) => return Err(e), + Err(e) => Err(e), }, } // self.any_peer().await @@ -605,16 +587,13 @@ impl Node { async fn any_peer(&mut self) -> Result<(IpAddr, Option), NodeError> { // Rmpty the whitelist, if there is one if let Some(whitelist) = &mut self.white_list { - match whitelist.pop() { - Some((ip, port)) => { - return { - self.dialog - .send_dialog("Using a peer from the white list".into()) - .await; - Ok((ip, Some(port))) - } - } - None => (), + if let Some((ip, port)) = whitelist.pop() { + return { + self.dialog + .send_dialog("Using a peer from the white list".into()) + .await; + Ok((ip, Some(port))) + }; } } let mut chain = self.peer_db.lock().await; @@ -627,10 +606,7 @@ impl Node { // We found some peer to use but may not be reachable Some(peer) => { self.dialog - .send_dialog(format!( - "Loaded peer from the database {}", - peer.0.to_string() - )) + .send_dialog(format!("Loaded peer from the database {}", peer.0)) .await; Ok((peer.0, Some(peer.1))) } @@ -648,7 +624,7 @@ impl Node { self.dialog .send_warning(format!( "Encountered error adding a peer to the database: {}", - e.to_string() + e )) .await; } diff --git a/src/node/node_messages.rs b/src/node/node_messages.rs index 695fbe1..7c97784 100644 --- a/src/node/node_messages.rs +++ b/src/node/node_messages.rs @@ -3,7 +3,7 @@ pub use bitcoin::{Block, Transaction}; use crate::{chain::checkpoints::HeaderCheckpoint, tx::types::IndexedTransaction}; /// Messages receivable by a running node -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum NodeMessage { /// A human readable dialog Dialog(String), @@ -18,7 +18,7 @@ pub enum NodeMessage { } /// Commands to issue a node -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum ClientMessage { /// Stop the node Shutdown, diff --git a/src/peers/peer.rs b/src/peers/peer.rs index fecdbdc..ab9c03b 100644 --- a/src/peers/peer.rs +++ b/src/peers/peer.rs @@ -83,10 +83,8 @@ impl Peer { let mut peer_reader = Reader::new(reader, tx, self.network); let read_handle = tokio::spawn(async move { match peer_reader.read_from_remote().await { - Ok(_) => return Ok(()), - Err(_) => { - return Err(PeerError::Reader); - } + Ok(_) => Ok(()), + Err(_) => Err(PeerError::Reader), } }); loop { @@ -158,7 +156,7 @@ impl Peer { // .await // .map_err(|_| PeerError::BufferWrite)?; // can ask for addresses here depending on if we need them - return Ok(()); + Ok(()) } PeerMessage::Addr(addrs) => { self.main_thread_sender @@ -168,7 +166,7 @@ impl Peer { }) .await .map_err(|_| PeerError::ThreadChannel)?; - return Ok(()); + Ok(()) } PeerMessage::Headers(headers) => { self.main_thread_sender @@ -178,7 +176,7 @@ impl Peer { }) .await .map_err(|_| PeerError::ThreadChannel)?; - return Ok(()); + Ok(()) } PeerMessage::FilterHeaders(cf_headers) => { self.main_thread_sender @@ -188,7 +186,7 @@ impl Peer { }) .await .map_err(|_| PeerError::ThreadChannel)?; - return Ok(()); + Ok(()) } PeerMessage::Filter(filter) => { self.main_thread_sender @@ -198,7 +196,7 @@ impl Peer { }) .await .map_err(|_| PeerError::ThreadChannel)?; - return Ok(()); + Ok(()) } PeerMessage::Block(block) => { self.main_thread_sender @@ -208,7 +206,7 @@ impl Peer { }) .await .map_err(|_| PeerError::ThreadChannel)?; - return Ok(()); + Ok(()) } PeerMessage::NewBlocks(block_hashes) => { self.main_thread_sender @@ -218,7 +216,7 @@ impl Peer { }) .await .map_err(|_| PeerError::ThreadChannel)?; - return Ok(()); + Ok(()) } PeerMessage::Verack => Ok(()), PeerMessage::Ping(nonce) => { @@ -237,7 +235,7 @@ impl Peer { }) .await .map_err(|_| PeerError::ThreadChannel)?; - return Err(PeerError::DisconnectCommand); + Err(PeerError::DisconnectCommand) } } } @@ -296,7 +294,7 @@ pub(crate) struct PeerConfig { pub(crate) enum FindAddresses { None, - CPF, + Cpf, Any, } diff --git a/src/prelude.rs b/src/prelude.rs index 5583a07..7c43396 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -54,23 +54,21 @@ impl Median for Vec { } pub(crate) fn params_from_network(network: &Network) -> Params { - let params = match network { + match network { Network::Bitcoin => panic!("unimplemented network"), Network::Testnet => Params::new(*network), Network::Signet => Params::new(*network), Network::Regtest => Params::new(*network), _ => unreachable!(), - }; - params + } } pub(crate) fn default_port_from_network(network: &Network) -> u16 { - let default_port = match network { + match network { Network::Bitcoin => 8333, Network::Testnet => 18333, Network::Signet => 38333, Network::Regtest => 18444, _ => unreachable!(), - }; - default_port + } } diff --git a/src/tx/memory.rs b/src/tx/memory.rs index 8b66fc8..f0e166e 100644 --- a/src/tx/memory.rs +++ b/src/tx/memory.rs @@ -27,7 +27,7 @@ impl TransactionStore for MemoryTransactionCache { self.transactions.push(IndexedTransaction { transaction: transaction.clone(), height, - hash: hash.clone(), + hash: *hash, }); Ok(()) }