From 8db4c8405bf48d5fa47b4d681bc6b3da61cf6e51 Mon Sep 17 00:00:00 2001 From: Shigeru Myamoto Date: Thu, 19 Dec 2024 20:22:01 -0300 Subject: [PATCH] Fix all typos (#313) Fix all typos in both docstrings and identifiers in all crates --- .../src/pruned_utreexo/chainparams.rs | 2 +- .../floresta-chain/src/pruned_utreexo/mod.rs | 6 ++-- .../src/pruned_utreexo/partial_chain.rs | 22 ++++++------- crates/floresta-cli/src/rpc.rs | 6 ++-- crates/floresta-cli/src/rpc_types.rs | 26 +++++++-------- crates/floresta-common/src/spsc.rs | 6 ++-- .../src/flat_filters_store.rs | 24 +++++++------- crates/floresta-compact-filters/src/lib.rs | 32 +++++++++---------- .../src/network_filters.rs | 16 +++++----- .../src/electrum_protocol.rs | 8 ++--- crates/floresta-watch-only/src/lib.rs | 2 +- crates/floresta-wire/src/cli_wire/mod.rs | 4 +-- .../src/p2p_wire/chain_selector.rs | 16 +++++----- crates/floresta-wire/src/p2p_wire/error.rs | 6 ++-- .../src/p2p_wire/node_context.rs | 4 +-- .../src/p2p_wire/running_node.rs | 2 +- .../floresta-wire/src/p2p_wire/sync_node.rs | 2 +- .../p2p_wire/tests/test_data/block_hashes.txt | 2 +- crates/floresta/src/lib.rs | 2 +- florestad/docs/tutorial(PT-BR).md | 2 +- florestad/src/cli.rs | 2 +- florestad/src/florestad.rs | 2 +- florestad/src/json_rpc/server.rs | 2 +- florestad/src/zmq.rs | 2 +- tests/example_test.py | 2 +- tests/prepare.sh | 6 ++-- 26 files changed, 103 insertions(+), 103 deletions(-) diff --git a/crates/floresta-chain/src/pruned_utreexo/chainparams.rs b/crates/floresta-chain/src/pruned_utreexo/chainparams.rs index 32f83d22..34beec1d 100644 --- a/crates/floresta-chain/src/pruned_utreexo/chainparams.rs +++ b/crates/floresta-chain/src/pruned_utreexo/chainparams.rs @@ -37,7 +37,7 @@ pub struct ChainParams { } /// A dns seed is a authoritative DNS server that returns the IP addresses of nodes that are -/// likely to be accepting incoming connections. This is our prefered way of finding new peers +/// likely to be accepting incoming connections. This is our preferred way of finding new peers /// on the first startup, as peers returned by seeds are likely to be online and accepting /// connections. We may use this as a fallback if we don't have any peers to connect in /// subsequent startups. diff --git a/crates/floresta-chain/src/pruned_utreexo/mod.rs b/crates/floresta-chain/src/pruned_utreexo/mod.rs index f1dded82..0822074d 100644 --- a/crates/floresta-chain/src/pruned_utreexo/mod.rs +++ b/crates/floresta-chain/src/pruned_utreexo/mod.rs @@ -54,7 +54,7 @@ pub trait BlockchainInterface { /// Register for receiving notifications for some event. Right now it only works for /// new blocks, but may work with transactions in the future too. /// if a module performs some heavy-lifting on the block's data, it should pass in a - /// vector or a channel where data can be transfered to the atual worker, otherwise + /// vector or a channel where data can be transferred to the atual worker, otherwise /// chainstate will be stuck for as long as you have work to do. fn subscribe(&self, tx: Arc); /// Tells whether or not we are on ibd @@ -136,7 +136,7 @@ pub trait UpdatableChainstate { /// Returns a partial chainstate from a range of blocks. /// /// [PartialChainState] is a simplified version of `ChainState` that is used during IBD. - /// It doesn't suport reorgs, only hold headers for a subset of blocks and isn't [Sync]. + /// It doesn't support reorgs, only hold headers for a subset of blocks and isn't [Sync]. /// The idea here is that you take a OS thread or some async task that will drive one /// [PartialChainState] to completion by downloading blocks inside that chainstate's range. /// If all goes right, it'll end without error, and you should mark blocks in this range as @@ -152,7 +152,7 @@ pub trait UpdatableChainstate { ) -> Result; /// Marks a chain as fully-valid /// - /// This mimics the behavour of checking every block before this block, and continues + /// This mimics the behaviour of checking every block before this block, and continues /// from this point fn mark_chain_as_assumed(&self, acc: Stump, tip: BlockHash) -> Result; } diff --git a/crates/floresta-chain/src/pruned_utreexo/partial_chain.rs b/crates/floresta-chain/src/pruned_utreexo/partial_chain.rs index 7c7579fc..c7950599 100644 --- a/crates/floresta-chain/src/pruned_utreexo/partial_chain.rs +++ b/crates/floresta-chain/src/pruned_utreexo/partial_chain.rs @@ -3,9 +3,9 @@ //! and then merge them together to get the full chain. This allows us to make //! Initial Block Download in parallel. //! -//! We use a [PartialChainState] insted of the useal ChainState, mainly for +//! We use a [PartialChainState] instead of the useal ChainState, mainly for //! performance. Because we assume that only one worker will hold a [PartialChainState] -//! at a given time, we can drop all syncronization primitives and make a really performatic +//! at a given time, we can drop all synchronization primitives and make a really performatic //! ChainState that will consume and validate blocks as fast as we possibly can. //! //! This choice removes the use of costly atomic operations, but opens space for design flaws @@ -15,7 +15,7 @@ //! - Shared ownership is forbidden: if you have two threads or tasks owning this, you'll have //! data race. If you want to hold shared ownership for this module, you need to place a //! [PartialChainState] inside an `Arc` yourself. Don't just Arc this and expect it to -//! work, as you are garanteed to have data races. +//! work, as you are guaranteed to have data races. //! - The interior is toxic, so no peeking: no references, mutable or not, to any field should //! leak through the API, as we are not enforcing lifetime or borrowing rules at compile time. //! - Sending is fine: There's nothing in this module that makes it not sendable to between @@ -62,7 +62,7 @@ pub(crate) struct PartialChainStateInner { /// result in an error. pub(crate) final_height: u32, /// The error that occurred during validation, if any. It is here so we can - /// pull that afterwords. + /// pull that afterwards. pub(crate) error: Option, /// The consensus parameters, we need this to validate the blocks. pub(crate) consensus: Consensus, @@ -83,14 +83,14 @@ pub(crate) struct PartialChainStateInner { /// We could just use a mutex, but this is not required and very wateful. Partial chains /// differ from the normal chain because they only have one owner, the worker responsible /// for driving this chain to it's completion. Because of that, we can simply use a UnsafeCell -/// and forbit shared access between threads by not implementing [Clone]. +/// and forbid shared access between threads by not implementing [Clone]. pub struct PartialChainState(pub(crate) UnsafeCell); /// We need to send [PartialChainState] between threads/tasks, because the worker thread, once it /// finishes, needs to notify the main task and pass the final partial chain. /// # Safety /// -/// All itens inside the [UnsafeCell] are [Send], most importantly, there are no references or +/// All items inside the [UnsafeCell] are [Send], most importantly, there are no references or /// smart pointers inside it, so sending shouldn't be a problem. unsafe impl Send for PartialChainState {} unsafe impl Sync for PartialChainState {} @@ -255,14 +255,14 @@ impl PartialChainStateInner { } impl PartialChainState { - /// Borrows the inner content as immutable referece. + /// Borrows the inner content as immutable reference. /// /// # Safety /// We can assume this [UnsafeCell] is initialized because the only way to get a /// [PartialChainState] is through our APIs, and we make sure this [UnsafeCell] is /// always valid. /// The reference returned here **should not** leak through the API, as there's no - /// syncronization mechanims for it. + /// synchronization mechanims for it. #[inline(always)] #[must_use] #[doc(hidden)] @@ -270,14 +270,14 @@ impl PartialChainState { unsafe { self.0.get().as_ref().expect("this pointer is valid") } } - /// Borrows the inner content as a mutable referece. + /// Borrows the inner content as a mutable reference. /// /// # Safety /// We can assume this [UnsafeCell] is initialized because the only way to get a /// [PartialChainState] is through our APIs, and we make sure this [UnsafeCell] is /// always valid. /// The reference returned here **should not** leak through the API, as there's no - /// syncronization mechanims for it. + /// synchronization mechanims for it. #[inline(always)] #[allow(clippy::mut_from_ref)] #[must_use] @@ -471,7 +471,7 @@ impl BlockchainInterface for PartialChainState { } fn subscribe(&self, _tx: sync::Arc) { - unimplemented!("partialChainState::subscibe") + unimplemented!("partialChainState::subscribe") } fn estimate_fee(&self, _target: usize) -> Result { diff --git a/crates/floresta-cli/src/rpc.rs b/crates/floresta-cli/src/rpc.rs index 451baa03..951c610d 100644 --- a/crates/floresta-cli/src/rpc.rs +++ b/crates/floresta-cli/src/rpc.rs @@ -19,7 +19,7 @@ pub trait FlorestaRPC { /// designed for efficient light client synchronization. This method returns the filter /// for a given block height, encoded as a hexadecimal string. /// You need to have enabled block filters by setting the `blockfilters=1` option - fn get_block_filter(&self, heigth: u32) -> Result; + fn get_block_filter(&self, height: u32) -> Result; /// Returns general information about the chain we are on /// /// This method returns a bunch of information about the chain we are on, including @@ -214,8 +214,8 @@ impl FlorestaRPC for T { self.call("loaddescriptor", &[Value::String(descriptor)]) } - fn get_block_filter(&self, heigth: u32) -> Result { - self.call("getblockfilter", &[Value::Number(Number::from(heigth))]) + fn get_block_filter(&self, height: u32) -> Result { + self.call("getblockfilter", &[Value::Number(Number::from(height))]) } fn get_block_header(&self, hash: BlockHash) -> Result { diff --git a/crates/floresta-cli/src/rpc_types.rs b/crates/floresta-cli/src/rpc_types.rs index 98afc4d0..e9ddda0c 100644 --- a/crates/floresta-cli/src/rpc_types.rs +++ b/crates/floresta-cli/src/rpc_types.rs @@ -61,7 +61,7 @@ pub struct RawTx { pub size: u32, /// The virtual size of this transaction, as define by the segwit soft-fork pub vsize: u32, - /// The weight of this transacion, as defined by the segwit soft-fork + /// The weight of this transaction, as defined by the segwit soft-fork pub weight: u32, /// This transaction's version. The current bigger version is 2 pub version: u32, @@ -93,7 +93,7 @@ pub struct TxOut { pub value: u64, /// This utxo's index inside the transaction pub n: u32, - /// The loking script of this utxo + /// The locking script of this utxo pub script_pub_key: ScriptPubKey, } @@ -159,7 +159,7 @@ pub struct PeerInfo { /// User agent is a string that represents the client being used by our peer. E.g. /// /Satoshi-26.0/ for bitcoin core version 26 pub user_agent: String, - /// This peer's height at the time we've openned a connection with them + /// This peer's height at the time we've opened a connection with them pub initial_height: u32, /// The connection type of this peer /// @@ -197,7 +197,7 @@ pub struct GetBlockRes { /// /// Currently, blocks have version 2 (see BIP34), but it may also flip some of the LSB for /// either consensus reason (see BIPs 8 and 9) or for version rolling mining, usually bits - /// after the 24th are not touched. Therefore, the actual version is likelly the result of + /// after the 24th are not touched. Therefore, the actual version is likely the result of /// version & ~(1 << 24). /// This is encoded as a number, see `version_hex` for a hex-encoded version pub version: i32, @@ -213,13 +213,13 @@ pub struct GetBlockRes { pub merkleroot: String, /// A list of hex-encoded transaction id for the tx's in this block pub tx: Vec, - /// The timestamp commited to in this block's header + /// The timestamp committed to in this block's header /// /// Since there's no central clock that can tell time precisely in Bitcoin, this value is /// reported by miners and only constrained by a couple of consensus rules. More sensibly, it - /// is **not** garanteed to be monotonical. So a block n might have a lower timestamp than + /// is **not** guaranteed to be monotonical. So a block n might have a lower timestamp than /// block `n - 1`. - /// If you need it to be monotonical, see `mediantime` insted + /// If you need it to be monotonical, see `mediantime` instead pub time: u32, /// The meadian of the last 11 blocktimes. /// @@ -234,13 +234,13 @@ pub struct GetBlockRes { pub nonce: u32, /// Bits is a compact representation for the target. /// - /// This is a exponential format (with well-define rouding) used by openssl that Satoshi + /// This is a exponential format (with well-define rounding) used by openssl that Satoshi /// decided to make consensus critical :/ pub bits: String, /// The difficulty is derived from the current target and is defined as how many hashes, on /// average, one has to make before finding a valid block /// - /// This is computed as 1 / (target / 2 ^ 256). In most softwares (this one inclued) the + /// This is computed as 1 / (target / 2 ^ 256). In most software (this one included) the /// difficulty is a multiple of the smallest possible difficulty. So to find the actual /// difficulty you have to multiply this by the min_diff. /// For mainnet, mindiff is 2 ^ 32 @@ -251,10 +251,10 @@ pub struct GetBlockRes { pub chainwork: String, /// How many transactions in this block pub n_tx: usize, - /// The hash of the block comming before this one + /// The hash of the block coming before this one pub previousblockhash: String, #[serde(skip_serializing_if = "Option::is_none")] - /// The hash of the block comming after this one, if any + /// The hash of the block coming after this one, if any pub nextblockhash: Option, } @@ -269,7 +269,7 @@ pub enum Error { /// An error internal to our jsonrpc server Api(serde_json::Value), /// The server sent an empty response - EmtpyResponse, + EmptyResponse, } impl From for Error { @@ -292,7 +292,7 @@ impl Display for Error { Error::JsonRpc(e) => write!(f, "JsonRpc returned an error {e}"), Error::Api(e) => write!(f, "general jsonrpc error: {e}"), Error::Serde(e) => write!(f, "error while deserializing the response: {e}"), - Error::EmtpyResponse => write!(f, "got an empty response from server"), + Error::EmptyResponse => write!(f, "got an empty response from server"), } } } diff --git a/crates/floresta-common/src/spsc.rs b/crates/floresta-common/src/spsc.rs index 4dcb9533..0a6f321e 100644 --- a/crates/floresta-common/src/spsc.rs +++ b/crates/floresta-common/src/spsc.rs @@ -1,13 +1,13 @@ //! A no-std Single Producer, Single Consumer channel for unidirectional message exchange between //! modules. This module don't use anything from the standard lib and can be easily used in no-std -//! enviroments. We only use mem::take from [core]. +//! environments. We only use mem::take from [core]. use core::mem::take; use crate::prelude::Vec; /// A (Send + Sync) single producer, single consumer channel to notify modules about things. -/// The api is super minimalistic to reduce external dependecies, including from the std-lib +/// The api is super minimalistic to reduce external dependencies, including from the std-lib /// /// One notable difference from the standard mspc channel is that this channel's ends are't /// two different types, while this is possible, there's no reason to do that. Specially @@ -72,7 +72,7 @@ impl Channel { /// An iterator issued every time someone calls `recv`. /// -/// This iterator takes all itens available for reading in a channel +/// This iterator takes all items available for reading in a channel /// and lets the consumer iterate over them, without acquiring the lock /// every time (the mutex is only locked when `recv` is called). /// diff --git a/crates/floresta-compact-filters/src/flat_filters_store.rs b/crates/floresta-compact-filters/src/flat_filters_store.rs index 4489c0bd..5c02f0e8 100644 --- a/crates/floresta-compact-filters/src/flat_filters_store.rs +++ b/crates/floresta-compact-filters/src/flat_filters_store.rs @@ -10,8 +10,8 @@ use std::sync::Mutex; use std::sync::MutexGuard; use std::sync::PoisonError; -use crate::IteratableFilterStore; -use crate::IteratableFilterStoreError; +use crate::IterableFilterStore; +use crate::IterableFilterStoreError; pub struct FiltersIterator { reader: BufReader, @@ -50,9 +50,9 @@ struct FlatFiltersStoreInner { path: PathBuf, } -impl From>> for IteratableFilterStoreError { +impl From>> for IterableFilterStoreError { fn from(_: PoisonError>) -> Self { - IteratableFilterStoreError::Poisoned + IterableFilterStoreError::Poisoned } } @@ -126,9 +126,9 @@ impl IntoIterator for FlatFiltersStore { } } -impl IteratableFilterStore for FlatFiltersStore { +impl IterableFilterStore for FlatFiltersStore { type I = FiltersIterator; - fn set_height(&self, height: u32) -> Result<(), IteratableFilterStoreError> { + fn set_height(&self, height: u32) -> Result<(), IterableFilterStoreError> { let mut inner = self.0.lock()?; inner.file.seek(SeekFrom::Start(0))?; inner.file.write_all(&height.to_le_bytes())?; @@ -136,7 +136,7 @@ impl IteratableFilterStore for FlatFiltersStore { Ok(()) } - fn get_height(&self) -> Result { + fn get_height(&self) -> Result { let mut inner = self.0.lock()?; let mut buf = [0; 4]; @@ -146,7 +146,7 @@ impl IteratableFilterStore for FlatFiltersStore { Ok(u32::from_le_bytes(buf)) } - fn iter(&self, start_height: Option) -> Result { + fn iter(&self, start_height: Option) -> Result { let mut inner = self.0.lock()?; let new_file = File::open(inner.path.clone())?; let mut reader = BufReader::new(new_file); @@ -176,11 +176,11 @@ impl IteratableFilterStore for FlatFiltersStore { &self, block_filter: crate::bip158::BlockFilter, height: u32, - ) -> Result<(), IteratableFilterStoreError> { + ) -> Result<(), IterableFilterStoreError> { let length = block_filter.content.len() as u32; if length > 1_000_000 { - return Err(IteratableFilterStoreError::FilterTooLarge); + return Err(IterableFilterStoreError::FilterTooLarge); } let mut inner = self.0.lock()?; @@ -210,7 +210,7 @@ mod tests { use super::FlatFiltersStore; use crate::bip158::BlockFilter; - use crate::IteratableFilterStore; + use crate::IterableFilterStore; #[test] fn test_filter_store() { @@ -218,7 +218,7 @@ mod tests { let store = FlatFiltersStore::new(path.into()); let res = store.get_height().unwrap_err(); - assert!(matches!(res, crate::IteratableFilterStoreError::Io(_))); + assert!(matches!(res, crate::IterableFilterStoreError::Io(_))); store.set_height(1).expect("could not set height"); assert_eq!(store.get_height().unwrap(), 1); diff --git a/crates/floresta-compact-filters/src/lib.rs b/crates/floresta-compact-filters/src/lib.rs index 6c1712d8..189c9be3 100644 --- a/crates/floresta-compact-filters/src/lib.rs +++ b/crates/floresta-compact-filters/src/lib.rs @@ -33,7 +33,7 @@ pub trait BlockFilterStore: Send + Sync { fn get_height(&self) -> Option; } -pub enum IteratableFilterStoreError { +pub enum IterableFilterStoreError { /// I/O error Io(std::io::Error), /// End of the file @@ -44,50 +44,50 @@ pub enum IteratableFilterStoreError { FilterTooLarge, } -impl Debug for IteratableFilterStoreError { +impl Debug for IterableFilterStoreError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - IteratableFilterStoreError::Io(e) => write!(f, "I/O error: {e}"), - IteratableFilterStoreError::Eof => write!(f, "End of file"), - IteratableFilterStoreError::Poisoned => write!(f, "Lock poisoned"), - IteratableFilterStoreError::FilterTooLarge => write!(f, "Filter too large"), + IterableFilterStoreError::Io(e) => write!(f, "I/O error: {e}"), + IterableFilterStoreError::Eof => write!(f, "End of file"), + IterableFilterStoreError::Poisoned => write!(f, "Lock poisoned"), + IterableFilterStoreError::FilterTooLarge => write!(f, "Filter too large"), } } } -impl From for IteratableFilterStoreError { +impl From for IterableFilterStoreError { fn from(e: std::io::Error) -> Self { - IteratableFilterStoreError::Io(e) + IterableFilterStoreError::Io(e) } } -impl Display for IteratableFilterStoreError { +impl Display for IterableFilterStoreError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { Debug::fmt(self, f) } } -impl From>> for IteratableFilterStoreError { +impl From>> for IterableFilterStoreError { fn from(_: PoisonError>) -> Self { - IteratableFilterStoreError::Poisoned + IterableFilterStoreError::Poisoned } } -pub trait IteratableFilterStore: +pub trait IterableFilterStore: Send + Sync + IntoIterator { type I: Iterator; /// Fetches the first filter and sets our internal cursor to the first filter, /// succeeding calls to [next] will return the next filter until we reach the end - fn iter(&self, start_height: Option) -> Result; + fn iter(&self, start_height: Option) -> Result; /// Writes a new filter to the store fn put_filter( &self, block_filter: bip158::BlockFilter, height: u32, - ) -> Result<(), IteratableFilterStoreError>; + ) -> Result<(), IterableFilterStoreError>; /// Persists the height of the last filter we have - fn set_height(&self, height: u32) -> Result<(), IteratableFilterStoreError>; + fn set_height(&self, height: u32) -> Result<(), IterableFilterStoreError>; /// Fetches the height of the last filter we have - fn get_height(&self) -> Result; + fn get_height(&self) -> Result; } diff --git a/crates/floresta-compact-filters/src/network_filters.rs b/crates/floresta-compact-filters/src/network_filters.rs index 23cfe45c..9f23f19b 100644 --- a/crates/floresta-compact-filters/src/network_filters.rs +++ b/crates/floresta-compact-filters/src/network_filters.rs @@ -2,15 +2,15 @@ use bitcoin::bip158::BlockFilter; use bitcoin::BlockHash; use floresta_chain::pruned_utreexo::BlockchainInterface; -use crate::IteratableFilterStore; -use crate::IteratableFilterStoreError; +use crate::IterableFilterStore; +use crate::IterableFilterStoreError; #[derive(Debug)] -pub struct NetworkFilters { +pub struct NetworkFilters { filters: Storage, } -impl NetworkFilters { +impl NetworkFilters { pub fn new(filters: Storage) -> Self { if filters.get_height().is_err() { filters.set_height(0).unwrap(); @@ -24,7 +24,7 @@ impl NetworkFilters { query: Vec<&[u8]>, start_height: Option, chain: impl BlockchainInterface, - ) -> Result, IteratableFilterStoreError> { + ) -> Result, IterableFilterStoreError> { let mut blocks = Vec::new(); let iter = query.into_iter(); for (height, filter) in self.filters.iter(start_height)? { @@ -40,15 +40,15 @@ impl NetworkFilters { &self, filter: BlockFilter, height: u32, - ) -> Result<(), IteratableFilterStoreError> { + ) -> Result<(), IterableFilterStoreError> { self.filters.put_filter(filter, height) } - pub fn get_height(&self) -> Result { + pub fn get_height(&self) -> Result { self.filters.get_height() } - pub fn save_height(&self, height: u32) -> Result<(), IteratableFilterStoreError> { + pub fn save_height(&self, height: u32) -> Result<(), IterableFilterStoreError> { self.filters.set_height(height) } } diff --git a/crates/floresta-electrum/src/electrum_protocol.rs b/crates/floresta-electrum/src/electrum_protocol.rs index d61077a3..b0566fc9 100644 --- a/crates/floresta-electrum/src/electrum_protocol.rs +++ b/crates/floresta-electrum/src/electrum_protocol.rs @@ -548,7 +548,7 @@ impl ElectrumServer { /// If a user adds a new address that we didn't have cached, this method /// will look for historical transactions for it. /// - /// Usually, we'll relly on compact block filters to speed things up. If + /// Usually, we'll rely on compact block filters to speed things up. If /// we don't have compact block filters, we may rescan using the older, /// more bandwidth-intensive method of actually downloading blocks. async fn rescan_for_addresses( @@ -1020,7 +1020,7 @@ mod test { eprintln!("Error reading from socket: {}", e); Err(e) } - Err(_) => Err(io::Error::new(io::ErrorKind::TimedOut, "Timeout occured")), + Err(_) => Err(io::Error::new(io::ErrorKind::TimedOut, "Timeout occurred")), } } @@ -1131,14 +1131,14 @@ mod test { /// blockchain.scripthash.listunspent * /// blockchain.scripthash.subscribe * /// blockchain.scripthash.unsubscribe * - /// blockchain.transaction.broadcast * + /// blockchain.transaction.broadcast * /// blockchain.transaction.get * /// blockchain.transaction.get_merkle * /// mempool.get_fee_histogram * /// server.add_peer * /// server.donation_address * /// server.features * - /// sserver.peers.subscribe * + /// server.peers.subscribe * /// server.ping * /// server.version * fn generate_request(req_params: &mut Vec) -> Value { diff --git a/crates/floresta-watch-only/src/lib.rs b/crates/floresta-watch-only/src/lib.rs index f5d57495..c9eb6de4 100644 --- a/crates/floresta-watch-only/src/lib.rs +++ b/crates/floresta-watch-only/src/lib.rs @@ -617,7 +617,7 @@ impl AddressCache { Ok(known_descs.contains(desc)) } - /// Tells wheter an address is already cached + /// Tells whether an address is already cached pub fn is_address_cached(&self, script_hash: &Hash) -> bool { let inner = self.inner.read().expect("poisoned lock"); inner.address_map.contains_key(script_hash) diff --git a/crates/floresta-wire/src/cli_wire/mod.rs b/crates/floresta-wire/src/cli_wire/mod.rs index b2502102..c94a05a3 100644 --- a/crates/floresta-wire/src/cli_wire/mod.rs +++ b/crates/floresta-wire/src/cli_wire/mod.rs @@ -268,7 +268,7 @@ impl UtreexodBackend { pub async fn run(self) { try_and_log!(self.get_headers()); if self.is_shutting_down() { - info!("Shuting blockchain down"); + info!("Shutting blockchain down"); try_and_log!(self.chainstate.flush()); return; } @@ -281,7 +281,7 @@ impl UtreexodBackend { loop { std::thread::sleep(Duration::from_secs(1)).await; if self.is_shutting_down() { - info!("Shuting blockchain down"); + info!("Shutting blockchain down"); try_and_log!(self.chainstate.flush()); return; } diff --git a/crates/floresta-wire/src/p2p_wire/chain_selector.rs b/crates/floresta-wire/src/p2p_wire/chain_selector.rs index 9e8d158b..eeb827a6 100644 --- a/crates/floresta-wire/src/p2p_wire/chain_selector.rs +++ b/crates/floresta-wire/src/p2p_wire/chain_selector.rs @@ -27,7 +27,7 @@ //! The most critial part of syncing-up a Bitcoin node is making sure you know about the most-work //! chain. If someone can eclypse you, they can make you start following a chain that only you and //! the attacker care about. If you get paid in this chain, you can't pay someone else outside this -//! chain, because they will be following other chains. Luckly, we only need one honest peer, to +//! chain, because they will be following other chains. Luckily, we only need one honest peer, to //! find the best-work chain and avoid any attacker to fools us into accepting payments in a "fake //! Bitcoin" //! @@ -35,10 +35,10 @@ //! //! In Floresta, we try to pick a good balance between data downloaded and security. We could //! simply download all chains from all peers and pick the most work one. But each header is -//! 80 bytes-long, with ~800k blocks, that's arround 60 MBs. If we have 10 peers, that's 600MBs +//! 80 bytes-long, with ~800k blocks, that's around 60 MBs. If we have 10 peers, that's 600MBs //! (excluding overhead by the p2p messages). Moreover, it's very uncommon to actually have peers //! in different chains. So we can optmistically download all headers from one random peer, and -//! then check with the others if they aggree. If they have another chain for us, we download that +//! then check with the others if they agree. If they have another chain for us, we download that //! chain, and pick whichever has more work. //! //! Most likely we'll only download one chain and all peers will agree with it. Then we can start @@ -78,7 +78,7 @@ use crate::node_context::NodeContext; use crate::node_context::PeerId; #[derive(Debug, Default, Clone)] -/// A p2p driver that attemps to connect with multiple peers, ask which chain are them following +/// A p2p driver that attempts to connect with multiple peers, ask which chain are them following /// and download and verify the headers, **not** the actual blocks. This is the first part of a /// loger IBD pipeline. /// The actual blocks should be downloaded by a SyncPeer. @@ -281,7 +281,7 @@ where hash = self.chain.get_block_hash(height).unwrap(); } - info!("Fork point is arround height={height} hash={hash}"); + info!("Fork point is around height={height} hash={hash}"); // at the end, this variable should hold the last block where they agreed let mut fork = 0; @@ -290,7 +290,7 @@ where .grab_both_peers_version(peer1, peer2, hash, height) .await?; - // Intializing the agree bool for the block on which we landed on + // Initializing the agree bool for the block on which we landed on let agree = peer1_acc == peer2_acc; if agree { @@ -347,11 +347,11 @@ where return Ok(None); }; - let (aggreed, _) = self + let (agreed, _) = self .grab_both_peers_version(peer1, peer2, hash, fork) .await?; - let agreed = match aggreed { + let agreed = match agreed { Some(acc) => Self::parse_acc(acc)?, None => return Ok(None), }; diff --git a/crates/floresta-wire/src/p2p_wire/error.rs b/crates/floresta-wire/src/p2p_wire/error.rs index 3fb44991..5f0fb780 100644 --- a/crates/floresta-wire/src/p2p_wire/error.rs +++ b/crates/floresta-wire/src/p2p_wire/error.rs @@ -5,7 +5,7 @@ use std::io; use floresta_chain::BlockchainError; use floresta_common::impl_error_from; -use floresta_compact_filters::IteratableFilterStoreError; +use floresta_compact_filters::IterableFilterStoreError; use thiserror::Error; use super::peer::PeerError; @@ -36,7 +36,7 @@ pub enum WireError { #[error("Peer timed out")] PeerTimeout, #[error("Compact block filters error")] - CompactBlockFiltersError(IteratableFilterStoreError), + CompactBlockFiltersError(IterableFilterStoreError), #[error("Poisoned lock")] PoisonedLock, #[error("We couldn't parse the provided address due to: {0}")] @@ -47,7 +47,7 @@ impl_error_from!(WireError, PeerError, PeerError); impl_error_from!(WireError, BlockchainError, Blockchain); impl_error_from!( WireError, - IteratableFilterStoreError, + IterableFilterStoreError, CompactBlockFiltersError ); impl_error_from!(WireError, AddrParseError, InvalidAddress); diff --git a/crates/floresta-wire/src/p2p_wire/node_context.rs b/crates/floresta-wire/src/p2p_wire/node_context.rs index 5029b0f5..dea4ac4e 100644 --- a/crates/floresta-wire/src/p2p_wire/node_context.rs +++ b/crates/floresta-wire/src/p2p_wire/node_context.rs @@ -4,11 +4,11 @@ //! create a massive amount of if's in the code, taking different paths depending on which state //! are we in. For that reason, we define the basics of a node, like code shared by all the //! states into one base struct called `UtreexoNode`, we then further refine this struct using -//! fine-tunned `Contexts`, that should implement [NodeContext] and are passed-in as a generic +//! fine-tuned `Contexts`, that should implement [NodeContext] and are passed-in as a generic //! parameter by the caller. //! //! The three flavors of node are: -//! - ChainSelector: This finds the best PoW chain, by downloding multiple candidates and taking +//! - ChainSelector: This finds the best PoW chain, by downloading multiple candidates and taking //! the one with more PoW. It should do it's job quickly, as it blocks our main //! client and can't proceed without this information. //! - SyncNode: Used to download and verify all blocks in a chain. This is computationally diff --git a/crates/floresta-wire/src/p2p_wire/running_node.rs b/crates/floresta-wire/src/p2p_wire/running_node.rs index f4075426..59d1fd4d 100644 --- a/crates/floresta-wire/src/p2p_wire/running_node.rs +++ b/crates/floresta-wire/src/p2p_wire/running_node.rs @@ -383,7 +383,7 @@ where continue; } - // Aks our peers for new addresses + // Ask our peers for new addresses periodic_job!( self.ask_for_addresses().await, self.last_get_address_request, diff --git a/crates/floresta-wire/src/p2p_wire/sync_node.rs b/crates/floresta-wire/src/p2p_wire/sync_node.rs index a41dbd53..2cc252b3 100644 --- a/crates/floresta-wire/src/p2p_wire/sync_node.rs +++ b/crates/floresta-wire/src/p2p_wire/sync_node.rs @@ -1,4 +1,4 @@ -//! A node that downlaods and validates the blockchain. +//! A node that downloads and validates the blockchain. use std::sync::Arc; use std::time::Duration; diff --git a/crates/floresta-wire/src/p2p_wire/tests/test_data/block_hashes.txt b/crates/floresta-wire/src/p2p_wire/tests/test_data/block_hashes.txt index 87100afd..2d9de76b 100644 --- a/crates/floresta-wire/src/p2p_wire/tests/test_data/block_hashes.txt +++ b/crates/floresta-wire/src/p2p_wire/tests/test_data/block_hashes.txt @@ -1,4 +1,4 @@ -Below are signet block hashes of corresponding height aquired from the UTREEXOD node +Below are signet block hashes of corresponding height acquired from the UTREEXOD node 0: 00000008819873e925422c1ff0f99f7cc9bbb232af63a077a480a3633bee1ef6 1: 00000086d6b2636cb2a392d45edc4ec544a10024d30141c9adf4bfd9de533b53 diff --git a/crates/floresta/src/lib.rs b/crates/floresta/src/lib.rs index 2fb1d465..443034d9 100644 --- a/crates/floresta/src/lib.rs +++ b/crates/floresta/src/lib.rs @@ -4,7 +4,7 @@ //! Bitcoin nodes and wallets, powered by Utreexo, a novel accumulator to represent //! the Bitcoin UTXO set. //! -//! This project is layed out as a collection of crates, each implementing one functionality. +//! This project is laid out as a collection of crates, each implementing one functionality. //! They are all named floresta-*. The main crate is floresta, which is a meta-crate //! that depends on all the others. It is meant to be used as a dependency in other projects. //! diff --git a/florestad/docs/tutorial(PT-BR).md b/florestad/docs/tutorial(PT-BR).md index 37eea206..e880d304 100644 --- a/florestad/docs/tutorial(PT-BR).md +++ b/florestad/docs/tutorial(PT-BR).md @@ -2,7 +2,7 @@ ### Introdução -Este programa é uma pequena implementação de node com um Electrum Server acoplado. Ela se comporta semelhante a um setup com Bitcoin Core + Electrum Personal Server, porém com algumas diferenças chave. +Este programa é uma pequena implementação de node com um Electrum Server acoplado. Ela se comporta semelhante a um setup com Bitcoin Core + Electrum Personal Server, porém com algumas diferenças chave. - Node e Electrum Server estão no mesmo binário, tornando o processo mais simples e com menos erros. - O full node utiliza uma tecnologia nova chamada `Utreexo` para reduzir o consumo de recursos, você consegue rodar o node com menos de 1GB de disco e RAM. diff --git a/florestad/src/cli.rs b/florestad/src/cli.rs index d1f4399a..58f29545 100644 --- a/florestad/src/cli.rs +++ b/florestad/src/cli.rs @@ -116,7 +116,7 @@ pub struct Cli { /// Assume blocks before this one as having valid scripts /// /// Assume that blocks that are buried under a considerable work have valid scripts. - /// We still do other checks, like amounts, UTXO existance, reward... the only check we + /// We still do other checks, like amounts, UTXO existence, reward... the only check we /// skip is the script validation pub assume_valid: Option, diff --git a/florestad/src/florestad.rs b/florestad/src/florestad.rs index e1da2307..371b124d 100644 --- a/florestad/src/florestad.rs +++ b/florestad/src/florestad.rs @@ -88,7 +88,7 @@ pub struct Config { pub wallet_xpub: Option>, /// An output descriptor to cache /// - /// This should be a list of ouptut descriptors that we should add to our watch-only wallet. + /// This should be a list of output descriptors that we should add to our watch-only wallet. /// This works just like wallet_xpub, but with a descriptor. pub wallet_descriptor: Option>, /// Where should we read from a config file diff --git a/florestad/src/json_rpc/server.rs b/florestad/src/json_rpc/server.rs index 4f4f7e6d..aa092d6e 100644 --- a/florestad/src/json_rpc/server.rs +++ b/florestad/src/json_rpc/server.rs @@ -297,7 +297,7 @@ impl Rpc for RpcImpl { }); }; - // It's ok to unwrap bacause we know there is at least one element in the vector + // It's ok to unwrap because we know there is at least one element in the vector let addresses = parsed.pop().unwrap(); let addresses = (0..100) .map(|index| { diff --git a/florestad/src/zmq.rs b/florestad/src/zmq.rs index ff30c8fc..1c203376 100644 --- a/florestad/src/zmq.rs +++ b/florestad/src/zmq.rs @@ -14,7 +14,7 @@ use bitcoin::consensus::serialize; /// ```ignore /// use zmq::{Context, Socket}; /// let ctx = Context::new(); -/// // The oposite of PUSH is PULL +/// // The opposite of PUSH is PULL /// let socket = ctx.socket(zmq::SocketType::PULL).unwrap(); /// /// socket.connect(addr).unwrap(); diff --git a/tests/example_test.py b/tests/example_test.py index fc4238e2..e769a53c 100644 --- a/tests/example_test.py +++ b/tests/example_test.py @@ -1,5 +1,5 @@ """ - This is an example of how tests should look lke, see the class bellow for more info + This is an example of how tests should look like, see the class bellow for more info """ import time import os diff --git a/tests/prepare.sh b/tests/prepare.sh index 17cb05d3..27069a5a 100644 --- a/tests/prepare.sh +++ b/tests/prepare.sh @@ -1,10 +1,10 @@ -# Prepares our enviroment to run our tests +# Prepares our environment to run our tests # # This script shold be executed once, before running our functinal test -# for the first time. It'll download and build all needed dependecies +# for the first time. It'll download and build all needed dependencies # to make sure we are not missing anything during our tests. -# Check for dependecies, we need Golang for Utreexod and Rust for Floresta +# Check for dependencies, we need Golang for Utreexod and Rust for Floresta go version &>/dev/null if [ $? -ne 0 ]