diff --git a/bin/reth/src/commands/stage/drop.rs b/bin/reth/src/commands/stage/drop.rs index 6ff767d7b5a9..0577e408e9d4 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/bin/reth/src/commands/stage/drop.rs @@ -144,11 +144,11 @@ impl Command { tx.clear::()?; tx.clear::()?; tx.put::( - StageId::IndexAccountHistory.to_string(), + StageId::IndexAccountsHistory.to_string(), Default::default(), )?; tx.put::( - StageId::IndexStorageHistory.to_string(), + StageId::IndexStoragesHistory.to_string(), Default::default(), )?; } diff --git a/bin/reth/src/commands/stage/run.rs b/bin/reth/src/commands/stage/run.rs index 931cd901dae3..12fb085df708 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/bin/reth/src/commands/stage/run.rs @@ -23,7 +23,7 @@ use reth_provider::{ProviderFactory, StageCheckpointReader, StageCheckpointWrite use reth_stages::{ stages::{ AccountHashingStage, BodyStage, ExecutionStage, ExecutionStageThresholds, - IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, + IndexAccountsHistoryStage, IndexStoragesHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, TransactionLookupStage, }, ExecInput, ExecOutput, Stage, StageExt, UnwindInput, UnwindOutput, @@ -234,8 +234,8 @@ impl Command { Box::new(MerkleStage::default_execution()), Some(Box::new(MerkleStage::default_unwind())), ), - StageEnum::AccountHistory => (Box::::default(), None), - StageEnum::StorageHistory => (Box::::default(), None), + StageEnum::AccountHistory => (Box::::default(), None), + StageEnum::StorageHistory => (Box::::default(), None), _ => return Ok(()), }; if let Some(unwind_stage) = &unwind_stage { diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index de52bd451d16..9066b541250b 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -55,8 +55,8 @@ use reth_revm::EvmProcessorFactory; use reth_stages::{ prelude::*, stages::{ - AccountHashingStage, ExecutionStage, ExecutionStageThresholds, IndexAccountHistoryStage, - IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, + AccountHashingStage, ExecutionStage, ExecutionStageThresholds, IndexAccountsHistoryStage, + IndexStoragesHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, TotalDifficultyStage, TransactionLookupStage, }, MetricEvent, @@ -878,11 +878,11 @@ impl NodeConfig { stage_config.transaction_lookup.commit_threshold, prune_modes.transaction_lookup, )) - .set(IndexAccountHistoryStage::new( + .set(IndexAccountsHistoryStage::new( stage_config.index_account_history.commit_threshold, prune_modes.account_history, )) - .set(IndexStorageHistoryStage::new( + .set(IndexStoragesHistoryStage::new( stage_config.index_storage_history.commit_threshold, prune_modes.storage_history, )), diff --git a/crates/primitives/src/stage/id.rs b/crates/primitives/src/stage/id.rs index ac44a5e46d9f..50f9c88e2ceb 100644 --- a/crates/primitives/src/stage/id.rs +++ b/crates/primitives/src/stage/id.rs @@ -24,9 +24,9 @@ pub enum StageId { /// Transaction lookup stage in the process. TransactionLookup, /// Index storage history stage in the process. - IndexStorageHistory, + IndexStoragesHistory, /// Index account history stage in the process. - IndexAccountHistory, + IndexAccountsHistory, /// Finish stage in the process. Finish, /// Other custom stage with a provided string identifier. @@ -46,8 +46,8 @@ impl StageId { StageId::StorageHashing, StageId::MerkleExecute, StageId::TransactionLookup, - StageId::IndexStorageHistory, - StageId::IndexAccountHistory, + StageId::IndexStoragesHistory, + StageId::IndexAccountsHistory, StageId::Finish, ]; @@ -64,8 +64,8 @@ impl StageId { StageId::StorageHashing => "StorageHashing", StageId::MerkleExecute => "MerkleExecute", StageId::TransactionLookup => "TransactionLookup", - StageId::IndexAccountHistory => "IndexAccountHistory", - StageId::IndexStorageHistory => "IndexStorageHistory", + StageId::IndexAccountsHistory => "IndexAccountHistory", + StageId::IndexStoragesHistory => "IndexStorageHistory", StageId::Finish => "Finish", StageId::Other(s) => s, } @@ -103,8 +103,8 @@ mod tests { assert_eq!(StageId::AccountHashing.to_string(), "AccountHashing"); assert_eq!(StageId::StorageHashing.to_string(), "StorageHashing"); assert_eq!(StageId::MerkleExecute.to_string(), "MerkleExecute"); - assert_eq!(StageId::IndexAccountHistory.to_string(), "IndexAccountHistory"); - assert_eq!(StageId::IndexStorageHistory.to_string(), "IndexStorageHistory"); + assert_eq!(StageId::IndexAccountsHistory.to_string(), "IndexAccountHistory"); + assert_eq!(StageId::IndexStoragesHistory.to_string(), "IndexStorageHistory"); assert_eq!(StageId::TransactionLookup.to_string(), "TransactionLookup"); assert_eq!(StageId::Finish.to_string(), "Finish"); diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 7879c20d86b0..ce01617c3ce4 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -41,7 +41,7 @@ use crate::{ stages::{ AccountHashingStage, BodyStage, ExecutionStage, FinishStage, HeaderStage, - IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, + IndexAccountsHistoryStage, IndexStoragesHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, TotalDifficultyStage, TransactionLookupStage, }, StageSet, StageSetBuilder, @@ -73,8 +73,8 @@ use std::sync::Arc; /// - [`StorageHashingStage`] /// - [`MerkleStage`] (execute) /// - [`TransactionLookupStage`] -/// - [`IndexStorageHistoryStage`] -/// - [`IndexAccountHistoryStage`] +/// - [`IndexStoragesHistoryStage`] +/// - [`IndexAccountsHistoryStage`] /// - [`FinishStage`] #[derive(Debug)] pub struct DefaultStages { @@ -292,7 +292,7 @@ impl StageSet for HistoryIndexingStages { fn builder(self) -> StageSetBuilder { StageSetBuilder::default() .add_stage(TransactionLookupStage::default()) - .add_stage(IndexStorageHistoryStage::default()) - .add_stage(IndexAccountHistoryStage::default()) + .add_stage(IndexStoragesHistoryStage::default()) + .add_stage(IndexAccountsHistoryStage::default()) } } diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index 71c9c33e27e9..ccefff8a1064 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -14,7 +14,7 @@ use std::fmt::Debug; /// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information /// on index sharding take a look at [`reth_db::tables::AccountsHistory`] #[derive(Debug)] -pub struct IndexAccountHistoryStage { +pub struct IndexAccountsHistoryStage { /// Number of blocks after which the control /// flow will be returned to the pipeline for commit. pub commit_threshold: u64, @@ -22,23 +22,23 @@ pub struct IndexAccountHistoryStage { pub prune_mode: Option, } -impl IndexAccountHistoryStage { - /// Create new instance of [IndexAccountHistoryStage]. +impl IndexAccountsHistoryStage { + /// Create new instance of [IndexAccountsHistoryStage]. pub fn new(commit_threshold: u64, prune_mode: Option) -> Self { Self { commit_threshold, prune_mode } } } -impl Default for IndexAccountHistoryStage { +impl Default for IndexAccountsHistoryStage { fn default() -> Self { Self { commit_threshold: 100_000, prune_mode: None } } } -impl Stage for IndexAccountHistoryStage { +impl Stage for IndexAccountsHistoryStage { /// Return the id of the stage fn id(&self) -> StageId { - StageId::IndexAccountHistory + StageId::IndexAccountsHistory } /// Execute the stage. @@ -177,7 +177,7 @@ mod tests { checkpoint: input_checkpoint .map(|block_number| StageCheckpoint { block_number, stage_checkpoint: None }), }; - let mut stage = IndexAccountHistoryStage::default(); + let mut stage = IndexAccountsHistoryStage::default(); let provider = db.factory.provider_rw().unwrap(); let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(run_to), done: true }); @@ -190,7 +190,7 @@ mod tests { unwind_to, ..Default::default() }; - let mut stage = IndexAccountHistoryStage::default(); + let mut stage = IndexAccountsHistoryStage::default(); let provider = db.factory.provider_rw().unwrap(); let out = stage.unwind(&provider, input).unwrap(); assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) }); @@ -434,7 +434,7 @@ mod tests { // run let input = ExecInput { target: Some(20000), ..Default::default() }; - let mut stage = IndexAccountHistoryStage { + let mut stage = IndexAccountsHistoryStage { prune_mode: Some(PruneMode::Before(36)), ..Default::default() }; @@ -455,22 +455,22 @@ mod tests { assert!(table.is_empty()); } - stage_test_suite_ext!(IndexAccountHistoryTestRunner, index_account_history); + stage_test_suite_ext!(IndexAccountsHistoryTestRunner, index_account_history); - struct IndexAccountHistoryTestRunner { + struct IndexAccountsHistoryTestRunner { pub(crate) db: TestStageDB, commit_threshold: u64, prune_mode: Option, } - impl Default for IndexAccountHistoryTestRunner { + impl Default for IndexAccountsHistoryTestRunner { fn default() -> Self { Self { db: TestStageDB::default(), commit_threshold: 1000, prune_mode: None } } } - impl StageTestRunner for IndexAccountHistoryTestRunner { - type S = IndexAccountHistoryStage; + impl StageTestRunner for IndexAccountsHistoryTestRunner { + type S = IndexAccountsHistoryStage; fn db(&self) -> &TestStageDB { &self.db @@ -481,7 +481,7 @@ mod tests { } } - impl ExecuteStageTestRunner for IndexAccountHistoryTestRunner { + impl ExecuteStageTestRunner for IndexAccountsHistoryTestRunner { type Seed = (); fn seed_execution(&mut self, input: ExecInput) -> Result { @@ -578,7 +578,7 @@ mod tests { } } - impl UnwindStageTestRunner for IndexAccountHistoryTestRunner { + impl UnwindStageTestRunner for IndexAccountsHistoryTestRunner { fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> { let table = self.db.table::().unwrap(); assert!(table.is_empty()); diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index ff552a399d24..e50079f7ce24 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -13,7 +13,7 @@ use std::fmt::Debug; /// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information /// on index sharding take a look at [`reth_db::tables::StorageHistory`]. #[derive(Debug)] -pub struct IndexStorageHistoryStage { +pub struct IndexStoragesHistoryStage { /// Number of blocks after which the control /// flow will be returned to the pipeline for commit. pub commit_threshold: u64, @@ -21,23 +21,23 @@ pub struct IndexStorageHistoryStage { pub prune_mode: Option, } -impl IndexStorageHistoryStage { - /// Create new instance of [IndexStorageHistoryStage]. +impl IndexStoragesHistoryStage { + /// Create new instance of [IndexStoragesHistoryStage]. pub fn new(commit_threshold: u64, prune_mode: Option) -> Self { Self { commit_threshold, prune_mode } } } -impl Default for IndexStorageHistoryStage { +impl Default for IndexStoragesHistoryStage { fn default() -> Self { Self { commit_threshold: 100_000, prune_mode: None } } } -impl Stage for IndexStorageHistoryStage { +impl Stage for IndexStoragesHistoryStage { /// Return the id of the stage fn id(&self) -> StageId { - StageId::IndexStorageHistory + StageId::IndexStoragesHistory } /// Execute the stage. @@ -188,7 +188,7 @@ mod tests { checkpoint: input_checkpoint .map(|block_number| StageCheckpoint { block_number, stage_checkpoint: None }), }; - let mut stage = IndexStorageHistoryStage::default(); + let mut stage = IndexStoragesHistoryStage::default(); let provider = db.factory.provider_rw().unwrap(); let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(run_to), done: true }); @@ -201,7 +201,7 @@ mod tests { unwind_to, ..Default::default() }; - let mut stage = IndexStorageHistoryStage::default(); + let mut stage = IndexStoragesHistoryStage::default(); let provider = db.factory.provider_rw().unwrap(); let out = stage.unwind(&provider, input).unwrap(); assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) }); @@ -448,7 +448,7 @@ mod tests { // run let input = ExecInput { target: Some(20000), ..Default::default() }; - let mut stage = IndexStorageHistoryStage { + let mut stage = IndexStoragesHistoryStage { prune_mode: Some(PruneMode::Before(36)), ..Default::default() }; @@ -469,22 +469,22 @@ mod tests { assert!(table.is_empty()); } - stage_test_suite_ext!(IndexStorageHistoryTestRunner, index_storage_history); + stage_test_suite_ext!(IndexStoragesHistoryTestRunner, index_storage_history); - struct IndexStorageHistoryTestRunner { + struct IndexStoragesHistoryTestRunner { pub(crate) db: TestStageDB, commit_threshold: u64, prune_mode: Option, } - impl Default for IndexStorageHistoryTestRunner { + impl Default for IndexStoragesHistoryTestRunner { fn default() -> Self { Self { db: TestStageDB::default(), commit_threshold: 1000, prune_mode: None } } } - impl StageTestRunner for IndexStorageHistoryTestRunner { - type S = IndexStorageHistoryStage; + impl StageTestRunner for IndexStoragesHistoryTestRunner { + type S = IndexStoragesHistoryStage; fn db(&self) -> &TestStageDB { &self.db @@ -495,7 +495,7 @@ mod tests { } } - impl ExecuteStageTestRunner for IndexStorageHistoryTestRunner { + impl ExecuteStageTestRunner for IndexStoragesHistoryTestRunner { type Seed = (); fn seed_execution(&mut self, input: ExecInput) -> Result { @@ -600,7 +600,7 @@ mod tests { } } - impl UnwindStageTestRunner for IndexStorageHistoryTestRunner { + impl UnwindStageTestRunner for IndexStoragesHistoryTestRunner { fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> { let table = self.db.table::().unwrap(); assert!(table.is_empty()); diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index a48b8d2d516a..79335d502265 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -155,7 +155,7 @@ mod tests { ); // Check AccountHistory - let mut acc_indexing_stage = IndexAccountHistoryStage { + let mut acc_indexing_stage = IndexAccountsHistoryStage { prune_mode: prune_modes.account_history, ..Default::default() }; @@ -171,7 +171,7 @@ mod tests { } // Check StorageHistory - let mut storage_indexing_stage = IndexStorageHistoryStage { + let mut storage_indexing_stage = IndexStoragesHistoryStage { prune_mode: prune_modes.storage_history, ..Default::default() }; diff --git a/docs/crates/stages.md b/docs/crates/stages.md index 1ea64aaab146..e4f7cb3d12e2 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -3,6 +3,7 @@ The `stages` lib plays a central role in syncing the node, maintaining state, updating the database and more. The stages involved in the Reth pipeline are the `HeaderStage`, `BodyStage`, `SenderRecoveryStage`, and `ExecutionStage` (note that this list is non-exhaustive, and more pipeline stages will be added in the near future). Each of these stages are queued up and stored within the Reth pipeline. [File: crates/stages/src/pipeline/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/stages/src/pipeline/mod.rs) + ```rust,ignore pub struct Pipeline { stages: Vec>, @@ -14,12 +15,12 @@ pub struct Pipeline { } ``` - -When the node is first started, a new `Pipeline` is initialized and all of the stages are added into `Pipeline.stages`. Then, the `Pipeline::run` function is called, which starts the pipeline, executing all of the stages continuously in an infinite loop. This process syncs the chain, keeping everything up to date with the chain tip. +When the node is first started, a new `Pipeline` is initialized and all of the stages are added into `Pipeline.stages`. Then, the `Pipeline::run` function is called, which starts the pipeline, executing all of the stages continuously in an infinite loop. This process syncs the chain, keeping everything up to date with the chain tip. Each stage within the pipeline implements the `Stage` trait which provides function interfaces to get the stage id, execute the stage and unwind the changes to the database if there was an issue during the stage execution. [File: crates/stages/src/stage.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/stages/src/stage.rs) + ```rust,ignore pub trait Stage: Send + Sync { /// Get the ID of the stage. @@ -47,13 +48,14 @@ To get a better idea of what is happening at each part of the pipeline, lets wal
- ## HeaderStage + The `HeaderStage` is responsible for syncing the block headers, validating the header integrity and writing the headers to the database. When the `execute()` function is called, the local head of the chain is updated to the most recent block height previously executed by the stage. At this point, the node status is also updated with that block's height, hash and total difficulty. These values are used during any new eth/65 handshakes. After updating the head, a stream is established with other peers in the network to sync the missing chain headers between the most recent state stored in the database and the chain tip. The `HeaderStage` contains a `downloader` attribute, which is a type that implements the `HeaderDownloader` trait. A `HeaderDownloader` is a `Stream` that returns batches of headers. [File: crates/interfaces/src/p2p/headers/downloader.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/interfaces/src/p2p/headers/downloader.rs) + ```rust,ignore pub trait HeaderDownloader: Send + Sync + Stream> + Unpin { /// Updates the gap to sync which ranges from local head to the sync target @@ -75,9 +77,10 @@ pub trait HeaderDownloader: Send + Sync + Stream> + Unp } ``` -The `HeaderStage` relies on the downloader stream to return the headers in descending order starting from the chain tip down to the latest block in the database. While other stages in the `Pipeline` start from the most recent block in the database up to the chain tip, the `HeaderStage` works in reverse to avoid [long-range attacks](https://messari.io/report/long-range-attack). When a node downloads headers in ascending order, it will not know if it is being subjected to a long-range attack until it reaches the most recent blocks. To combat this, the `HeaderStage` starts by getting the chain tip from the Consensus Layer, verifies the tip, and then walks backwards by the parent hash. Each value yielded from the stream is a `SealedHeader`. +The `HeaderStage` relies on the downloader stream to return the headers in descending order starting from the chain tip down to the latest block in the database. While other stages in the `Pipeline` start from the most recent block in the database up to the chain tip, the `HeaderStage` works in reverse to avoid [long-range attacks](https://messari.io/report/long-range-attack). When a node downloads headers in ascending order, it will not know if it is being subjected to a long-range attack until it reaches the most recent blocks. To combat this, the `HeaderStage` starts by getting the chain tip from the Consensus Layer, verifies the tip, and then walks backwards by the parent hash. Each value yielded from the stream is a `SealedHeader`. [File: crates/primitives/src/header.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/primitives/src/header.rs) + ```rust,ignore pub struct SealedHeader { /// Locked Header fields. @@ -88,19 +91,21 @@ pub struct SealedHeader { ``` + Each `SealedHeader` is then validated to ensure that it has the proper parent. Note that this is only a basic response validation, and the `HeaderDownloader` uses the `validate` method during the `stream`, so that each header is validated according to the consensus specification before the header is yielded from the stream. After this, each header is then written to the database. If a header is not valid or the stream encounters any other error, the error is propagated up through the stage execution, the changes to the database are unwound and the stage is resumed from the most recent valid state. -This process continues until all of the headers have been downloaded and written to the database. Finally, the total difficulty of the chain's head is updated and the function returns `Ok(ExecOutput { stage_progress, done: true })`, signaling that the header sync has completed successfully. +This process continues until all of the headers have been downloaded and written to the database. Finally, the total difficulty of the chain's head is updated and the function returns `Ok(ExecOutput { stage_progress, done: true })`, signaling that the header sync has completed successfully.
## TotalDifficultyStage -* TODO: explain stage -
+ +- TODO: explain stage +
## BodyStage -Once the `HeaderStage` completes successfully, the `BodyStage` will start execution. The body stage downloads block bodies for all of the new block headers that were stored locally in the database. The `BodyStage` first determines which block bodies to download by checking if the block body has an ommers hash and transaction root. +Once the `HeaderStage` completes successfully, the `BodyStage` will start execution. The body stage downloads block bodies for all of the new block headers that were stored locally in the database. The `BodyStage` first determines which block bodies to download by checking if the block body has an ommers hash and transaction root. An ommers hash is the Keccak 256-bit hash of the ommers list portion of the block. If you are unfamiliar with ommers blocks, you can [click here to learn more](https://ethereum.org/en/glossary/#ommer). Note that while ommers blocks were important for new blocks created during Ethereum's proof of work chain, Ethereum's proof of stake chain selects exactly one block proposer at a time, causing ommers blocks not to be needed in post-merge Ethereum. @@ -111,6 +116,7 @@ When the `BodyStage` is looking at the headers to determine which block to downl Once the `BodyStage` determines which block bodies to fetch, a new `bodies_stream` is created which downloads all of the bodies from the `starting_block`, up until the `target_block` specified. Each time the `bodies_stream` yields a value, a `SealedBlock` is created using the block header, the ommers hash and the newly downloaded block body. [File: crates/primitives/src/block.rs](https://github.com/paradigmxyz/reth/blob/main/crates/primitives/src/block.rs) + ```rust,ignore pub struct SealedBlock { /// Locked block header. @@ -122,7 +128,7 @@ pub struct SealedBlock { } ``` -The new block is then pre-validated, checking that the ommers hash and transactions root in the block header are the same in the block body. Following a successful pre-validation, the `BodyStage` loops through each transaction in the `block.body`, adding the transaction to the database. This process is repeated for every downloaded block body, with the `BodyStage` returning `Ok(ExecOutput { stage_progress, done: true })` signaling it successfully completed. +The new block is then pre-validated, checking that the ommers hash and transactions root in the block header are the same in the block body. Following a successful pre-validation, the `BodyStage` loops through each transaction in the `block.body`, adding the transaction to the database. This process is repeated for every downloaded block body, with the `BodyStage` returning `Ok(ExecOutput { stage_progress, done: true })` signaling it successfully completed.
@@ -131,6 +137,7 @@ The new block is then pre-validated, checking that the ommers hash and transacti Following a successful `BodyStage`, the `SenderRecoveryStage` starts to execute. The `SenderRecoveryStage` is responsible for recovering the transaction sender for each of the newly added transactions to the database. At the beginning of the execution function, all of the transactions are first retrieved from the database. Then the `SenderRecoveryStage` goes through each transaction and recovers the signer from the transaction signature and hash. The transaction hash is derived by taking the Keccak 256-bit hash of the RLP encoded transaction bytes. This hash is then passed into the `recover_signer` function. [File: crates/primitives/src/transaction/signature.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/primitives/src/transaction/signature.rs) + ```rust,ignore pub(crate) fn recover_signer(&self, hash: B256) -> Option
{ let mut sig: [u8; 65] = [0; 65]; @@ -158,6 +165,7 @@ Once the transaction signer has been recovered, the signer is then added to the Finally, after all headers, bodies and senders are added to the database, the `ExecutionStage` starts to execute. This stage is responsible for executing all of the transactions and updating the state stored in the database. For every new block header added to the database, the corresponding transactions have their signers attached to them and `reth_blockchain_tree::executor::execute_and_verify_receipt()` is called, pushing the state changes resulting from the execution to a `Vec`. [File: crates/stages/src/stages/execution.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/stages/src/stages/execution.rs) + ```rust,ignore pub fn execute_and_verify_receipt( block: &Block, @@ -168,44 +176,51 @@ pub fn execute_and_verify_receipt( ) -> Result ``` -After all headers and their corresponding transactions have been executed, all of the resulting state changes are applied to the database, updating account balances, account bytecode and other state changes. After applying all of the execution state changes, if there was a block reward, it is applied to the validator's account. +After all headers and their corresponding transactions have been executed, all of the resulting state changes are applied to the database, updating account balances, account bytecode and other state changes. After applying all of the execution state changes, if there was a block reward, it is applied to the validator's account. At the end of the `execute()` function, a familiar value is returned, `Ok(ExecOutput { stage_progress, done: true })` signaling a successful completion of the `ExecutionStage`.
## MerkleUnwindStage -* TODO: explain stage -
+ +- TODO: explain stage +
## AccountHashingStage -* TODO: explain stage -
+ +- TODO: explain stage +
## StorageHashingStage -* TODO: explain stage -
+ +- TODO: explain stage +
## MerkleExecuteStage -* TODO: explain stage -
+ +- TODO: explain stage +
## TransactionLookupStage -* TODO: explain stage -
-## IndexStorageHistoryStage -* TODO: explain stage -
+- TODO: explain stage +
-## IndexAccountHistoryStage -* TODO: explain stage -
+## IndexStoragesHistoryStage + +- TODO: explain stage +
+ +## IndexAccountsHistoryStage + +- TODO: explain stage +
## FinishStage -* TODO: explain stage -
+- TODO: explain stage +
# Next Chapter diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 20aa89d01864..2ec2d8428472 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -647,8 +647,8 @@ "Execution": 4, "Finish": 12, "Headers": 0, - "IndexAccountHistory": 11, - "IndexStorageHistory": 10, + "IndexAccountsHistory": 11, + "IndexStoragesHistory": 10, "MerkleExecute": 8, "MerkleUnwind": 5, "SenderRecovery": 3,