diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 00000000000..0f1b1bb104e --- /dev/null +++ b/.codespellrc @@ -0,0 +1,3 @@ +[codespell] +quiet-level = 2 +ignore-words = .github/workflows/ignore-words.txt \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c9eac7317c9..c71cc07344c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -89,12 +89,32 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} slack_webhook: ${{ secrets.SLACK_WEBHOOK_NOTIFY_BUILD }} + spellcheck: + name: Run codespell + runs-on: buildjet-4vcpu-ubuntu-2204 + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Install codespell + run: pip install codespell + + - name: Run codespell + run: codespell + cargo-verifications: needs: - lint-toml-files - prevent-openssl - rustfmt - check-changelog + - spellcheck runs-on: buildjet-4vcpu-ubuntu-2204 env: RUSTFLAGS: -D warnings diff --git a/.github/workflows/ignore-words.txt b/.github/workflows/ignore-words.txt new file mode 100644 index 00000000000..6e1cc0122d3 --- /dev/null +++ b/.github/workflows/ignore-words.txt @@ -0,0 +1,8 @@ +crate +inout +implementor +implementors +ser +fot +mis-match +re-use \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2bcf94db711..58d1b082f4d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -105,7 +105,7 @@ This is a rough outline of what a contributor's workflow looks like: - If the PR contains any breaking changes, add the breaking label to your PR. - If you are part of the FuelLabs Github org, please open a PR from the repository itself. - Otherwise, push your changes to a branch in your fork of the repository and submit a pull request. - - Make sure mention the issue, which is created at step 1, in the commit message. + - Make sure to mention the issue, which is created at step 1, in the commit message. - Your PR will be reviewed and some changes may be requested. - Once you've made changes, your PR must be re-reviewed and approved. - If the PR becomes out of date, you can use GitHub's 'update branch' button. @@ -120,7 +120,7 @@ Thanks for your contributions! For beginners, we have prepared many suitable tasks for you. Checkout our [Help Wanted issues](https://github.com/FuelLabs/fuel-core/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) for a list. -If you are planning something big, for example, relates to multiple components or changes current behaviors, make sure to open an issue to discuss with us before going on. +If you are planning something big, for example, relates to multiple components or changes in current behaviors, make sure to open an issue to discuss with us before going on. The Client team actively develops and maintains several dependencies used in Fuel Core, which you may be also interested in: diff --git a/Makefile.toml b/Makefile.toml index 7f46f6b8041..4c440da7904 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -2,9 +2,9 @@ # https://github.com/sagiegurari/cargo-make/blob/0.36.0/src/lib/descriptor/makefiles/stable.toml # This is a configuration file for the cargo plugin `cargo-make`. We use this plugin because of it's handling around -# cargo workspaces. Specifically, each task is run on workspace members indepedently, avoiding potential issues that +# cargo workspaces. Specifically, each task is run on workspace members independently, avoiding potential issues that # arise from feature unification (https://doc.rust-lang.org/cargo/reference/features.html#feature-unification). -# Feature unification allows two unrelated crates with the same depedency to enable features on eachother. +# Feature unification allows two unrelated crates with the same dependency to enable features on each other. # This is problematic when a crate is built independently (when publishing / being consumed from crates.io), # and it implicitly depended on features enabled by other crates in the same workspace. # While feature resolver v2 attempted to resolve this problem, it still comes up in certain scenarios. diff --git a/crates/chain-config/src/serialization.rs b/crates/chain-config/src/serialization.rs index eac42499cdf..a17d1ee507f 100644 --- a/crates/chain-config/src/serialization.rs +++ b/crates/chain-config/src/serialization.rs @@ -143,7 +143,7 @@ macro_rules! impl_hex_number { let pad = SIZE.checked_sub(bytes.len()) .ok_or(D::Error::custom(format!( - "value cant exceed {WORD_SIZE} bytes" + "value can't exceed {WORD_SIZE} bytes" )))?; if pad != 0 { diff --git a/crates/fuel-core/src/database/storage.rs b/crates/fuel-core/src/database/storage.rs index 6ceab3a776b..c826fb4a880 100644 --- a/crates/fuel-core/src/database/storage.rs +++ b/crates/fuel-core/src/database/storage.rs @@ -243,7 +243,7 @@ pub trait ToDatabaseKey { where Self: 'a; - /// Coverts the key into database key that supports byte presentation. + /// Converts the key into database key that supports byte presentation. fn database_key(&self) -> Self::Type<'_>; } diff --git a/crates/fuel-core/src/graphql_api/service.rs b/crates/fuel-core/src/graphql_api/service.rs index 6c6879ae308..39a24b413a2 100644 --- a/crates/fuel-core/src/graphql_api/service.rs +++ b/crates/fuel-core/src/graphql_api/service.rs @@ -158,7 +158,7 @@ impl RunnableTask for Task { } } -// Need a seperate Data Object for each Query endpoint, cannot be avoided +// Need a separate Data Object for each Query endpoint, cannot be avoided #[allow(clippy::too_many_arguments)] pub fn new_service( config: Config, diff --git a/crates/fuel-core/src/state/rocks_db.rs b/crates/fuel-core/src/state/rocks_db.rs index 85b37faab3a..f8b30db1a23 100644 --- a/crates/fuel-core/src/state/rocks_db.rs +++ b/crates/fuel-core/src/state/rocks_db.rs @@ -77,7 +77,7 @@ impl ShallowTempDir { Self { path } } - /// Returns the path of teh directory. + /// Returns the path of the directory. pub fn path(&self) -> &PathBuf { &self.path } diff --git a/crates/services/consensus_module/poa/src/deadline_clock.rs b/crates/services/consensus_module/poa/src/deadline_clock.rs index e652e551e30..bf69991dcd8 100644 --- a/crates/services/consensus_module/poa/src/deadline_clock.rs +++ b/crates/services/consensus_module/poa/src/deadline_clock.rs @@ -141,7 +141,7 @@ impl DeadlineClock { } /// Clears the timeout, so that now event is produced when it expires. - /// If the event has alread occurred, it will not be removed. + /// If the event has already occurred, it will not be removed. pub async fn clear(&self) { self.control .send(ControlMessage::Clear) diff --git a/crates/services/p2p/src/config/fuel_upgrade.rs b/crates/services/p2p/src/config/fuel_upgrade.rs index af507e55c40..596f60bc827 100644 --- a/crates/services/p2p/src/config/fuel_upgrade.rs +++ b/crates/services/p2p/src/config/fuel_upgrade.rs @@ -12,4 +12,4 @@ impl From<[u8; 32]> for Checksum { fn from(value: [u8; 32]) -> Self { Self(value) } -} +} \ No newline at end of file diff --git a/crates/services/p2p/src/gossipsub/topics.rs b/crates/services/p2p/src/gossipsub/topics.rs index a63e4f60bb9..f56b2c38f0a 100644 --- a/crates/services/p2p/src/gossipsub/topics.rs +++ b/crates/services/p2p/src/gossipsub/topics.rs @@ -42,7 +42,7 @@ impl GossipsubTopics { } } - /// Given a `GossipsubBroadcastRequest` retruns a `GossipTopic` + /// Given a `GossipsubBroadcastRequest` returns a `GossipTopic` /// which is broadcast over the network with the serialized inner value of `GossipsubBroadcastRequest` pub fn get_gossipsub_topic( &self, diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 7cf3b788ac6..92358521123 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -1397,7 +1397,7 @@ mod tests { p2p_config.bootstrap_nodes = node_b.multiaddrs(); let mut node_c = build_service_from_config(p2p_config.clone()).await; - // Node C does not connecto to Node A + // Node C does not connect to Node A // it should receive the propagated message from Node B if `GossipsubMessageAcceptance` is `Accept` node_c .swarm @@ -1444,7 +1444,7 @@ mod tests { // Node B received the correct message // If we try to publish it again we will get `PublishError::Duplicate` - // This asserts that our MessageId calculation is consistant irrespective of which Peer sends it + // This asserts that our MessageId calculation is consistent irrespective of which Peer sends it let broadcast_request = broadcast_request.clone(); matches!(node_b.publish_message(broadcast_request), Err(PublishError::Duplicate)); diff --git a/crates/services/p2p/src/peer_report.rs b/crates/services/p2p/src/peer_report.rs index 176f2246755..d58338d9c0b 100644 --- a/crates/services/p2p/src/peer_report.rs +++ b/crates/services/p2p/src/peer_report.rs @@ -59,7 +59,7 @@ pub enum PeerReportEvent { // `Behaviour` that reports events about peers pub struct PeerReportBehaviour { pending_events: VecDeque, - // regulary checks if reserved nodes are connected + // regularly checks if reserved nodes are connected health_check: Interval, decay_interval: Interval, } diff --git a/crates/services/producer/src/block_producer.rs b/crates/services/producer/src/block_producer.rs index 3e57c794195..2aac3081113 100644 --- a/crates/services/producer/src/block_producer.rs +++ b/crates/services/producer/src/block_producer.rs @@ -107,7 +107,7 @@ where gas_limit: max_gas, }; - // Store the context string incase we error. + // Store the context string in case we error. let context_string = format!("Failed to produce block {height:?} due to execution failure"); let result = self @@ -121,7 +121,7 @@ where } // TODO: Support custom `block_time` for `dry_run`. - /// Simulate a transaction without altering any state. Does not aquire the production lock + /// Simulate a transaction without altering any state. Does not acquire the production lock /// since it is basically a "read only" operation and shouldn't get in the way of normal /// production. pub async fn dry_run( diff --git a/crates/services/relayer/README.md b/crates/services/relayer/README.md index 37c13ced9c1..498965981f1 100644 --- a/crates/services/relayer/README.md +++ b/crates/services/relayer/README.md @@ -11,7 +11,7 @@ Ethereum blocks are considered final after two epochs. Each epoch contains 32 sl Second finality that we have is related to fuel block attestation time limit, how long are we going to wait until challenge comes. It should be at least longer than ethereum finality. Not relevant for first version. -* Problem: Validator deposit to ethereum gets reverted by block reorg. (Eth clients usually have priority for reverted txs but this does not mean it cant happen). It can potentially rearrange order of transactions +* Problem: Validator deposit to ethereum gets reverted by block reorg. (Eth clients usually have priority for reverted txs but this does not mean it can't happen). It can potentially rearrange order of transactions * Solution: Introduce sliding window, only deposits that are at least eth finality long can be finalized and included in validators leader selection. * Problem: How to choose when bridge message event gets enabled for use in fuel, at what exact fuel block does this happen? (Note that we have sliding window) diff --git a/crates/services/txpool/src/lib.rs b/crates/services/txpool/src/lib.rs index 607be531308..0f71450d13c 100644 --- a/crates/services/txpool/src/lib.rs +++ b/crates/services/txpool/src/lib.rs @@ -55,7 +55,7 @@ impl TxInfo { pub fn new(tx: ArcPoolTx) -> Self { let since_epoch = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) - .expect("Now is bellow of the `UNIX_EPOCH`"); + .expect("Now is below of the `UNIX_EPOCH`"); Self { tx, diff --git a/crates/types/src/services/txpool.rs b/crates/types/src/services/txpool.rs index c323761ec82..5d24d2ce492 100644 --- a/crates/types/src/services/txpool.rs +++ b/crates/types/src/services/txpool.rs @@ -186,7 +186,7 @@ pub enum TransactionStatus { /// Why this happened reason: String, }, - /// Transaction was included in a block, but the exection was reverted + /// Transaction was included in a block, but the execution was reverted Failed { /// Included in this block block_id: BlockId, diff --git a/deployment/Dockerfile b/deployment/Dockerfile index f201b3ee60d..c566c144c4a 100644 --- a/deployment/Dockerfile +++ b/deployment/Dockerfile @@ -36,7 +36,7 @@ ENV BUILD_FEATURES=$FEATURES COPY --from=planner /build/recipe.json recipe.json RUN echo $CARGO_PROFILE_RELEASE_DEBUG RUN echo $BUILD_FEATURES -# Build our project dependecies, not our application! +# Build our project dependencies, not our application! RUN xx-cargo chef cook --release --no-default-features --features "${BUILD_FEATURES}" -p fuel-core-bin --recipe-path recipe.json # Up to this point, if our dependency tree stays the same, # all layers should be cached. diff --git a/deployment/e2e-client.Dockerfile b/deployment/e2e-client.Dockerfile index 27d455a9383..5423f25bf40 100644 --- a/deployment/e2e-client.Dockerfile +++ b/deployment/e2e-client.Dockerfile @@ -16,7 +16,7 @@ RUN cargo chef prepare --recipe-path recipe.json FROM chef as builder ENV CARGO_NET_GIT_FETCH_WITH_CLI=true COPY --from=planner /build/recipe.json recipe.json -# Build our project dependecies, not our application! +# Build our project dependencies, not our application! RUN cargo chef cook --release -p fuel-core-e2e-client --features p2p --recipe-path recipe.json # Up to this point, if our dependency tree stays the same, # all layers should be cached. diff --git a/docs/fee_calculations.md b/docs/fee_calculations.md index aa35d988349..f37ec40da4a 100644 --- a/docs/fee_calculations.md +++ b/docs/fee_calculations.md @@ -9,7 +9,7 @@ to include an additional cost to op codes that write new data to storage or to transactions that add new contracts to the chain. There are a number of ways we might calculate this value; we have decided to go -with a simple calculatoin based on our target storage growth and working +with a simple calculation based on our target storage growth and working backward from there. #### Pessimistic Estimate @@ -23,7 +23,7 @@ This gives us this graph: | 500,000,000,000 | 10,000,000 | 31536000 | **15,855** | **630.72** | This is a harsh estimate that isn't taking into account the additional base cost of tx -execution and the cost of any additional op codes. It is also assuming that +execution and the cost of any additional op codes. It is also assumed that all blocks would be maxing out the storage. #### Generous Estimate diff --git a/docs/poa/flows.md b/docs/poa/flows.md index eea9ffe530e..e00ddd6127a 100644 --- a/docs/poa/flows.md +++ b/docs/poa/flows.md @@ -1,7 +1,7 @@ # Flows ## PoA Primary Production Flow -When the node is configured with a POA key, produce blocks and notify network. +When the node is configured with a POA key, produces blocks and notifies network. ```mermaid sequenceDiagram @@ -99,7 +99,7 @@ sequenceDiagram S->>+POA: verify signed block header POA->>+R: await new block da height R-->>-POA: - note right of POA: verify signature against current authority key + note right of POA: verify the signature against current authority key POA->>-S: S->>+BI: commit sealed block BI->>+R: check_da_height for message inclusion diff --git a/tests/tests/trigger_integration/interval.rs b/tests/tests/trigger_integration/interval.rs index 5e54d71b515..2376903f5ac 100644 --- a/tests/tests/trigger_integration/interval.rs +++ b/tests/tests/trigger_integration/interval.rs @@ -83,7 +83,7 @@ async fn poa_interval_produces_empty_blocks_at_correct_rate() { round_time_seconds <= secs_per_round && secs_per_round <= round_time_seconds + 2 * (rounds as u64) / round_time_seconds, - "Round time not within treshold" + "Round time not within threshold" ); } @@ -168,7 +168,7 @@ async fn poa_interval_produces_nonempty_blocks_at_correct_rate() { round_time_seconds <= secs_per_round && secs_per_round <= round_time_seconds + 2 * (rounds as u64) / round_time_seconds, - "Round time not within treshold" + "Round time not within threshold" ); // Make sure all txs got produced