From 069f3e3039d510ca56d2058e1be82b6f128784c3 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 16 Sep 2024 12:53:25 +0300 Subject: [PATCH 01/10] wip --- crates/stages/api/src/pipeline/builder.rs | 27 +++--- crates/stages/api/src/pipeline/mod.rs | 21 ++-- crates/stages/api/src/pipeline/set.rs | 44 ++++----- crates/stages/api/src/stage.rs | 26 +++-- crates/stages/api/src/test_utils.rs | 2 +- crates/stages/stages/src/sets.rs | 97 +++++++++++++------ crates/stages/stages/src/stages/bodies.rs | 36 ++++--- crates/stages/stages/src/stages/execution.rs | 46 +++++---- crates/stages/stages/src/stages/finish.rs | 8 +- .../stages/src/stages/hashing_account.rs | 22 ++--- .../stages/src/stages/hashing_storage.rs | 20 ++-- crates/stages/stages/src/stages/headers.rs | 21 ++-- .../src/stages/index_account_history.rs | 14 +-- .../src/stages/index_storage_history.rs | 14 +-- crates/stages/stages/src/stages/merkle.rs | 30 +++--- crates/stages/stages/src/stages/prune.rs | 45 +++++---- .../stages/src/stages/sender_recovery.rs | 34 ++++--- crates/stages/stages/src/stages/tx_lookup.rs | 26 +++-- crates/stages/stages/src/stages/utils.rs | 14 +-- .../src/providers/database/provider.rs | 2 +- 20 files changed, 302 insertions(+), 247 deletions(-) diff --git a/crates/stages/api/src/pipeline/builder.rs b/crates/stages/api/src/pipeline/builder.rs index 8493504939f3..6b0dc0e93228 100644 --- a/crates/stages/api/src/pipeline/builder.rs +++ b/crates/stages/api/src/pipeline/builder.rs @@ -1,19 +1,15 @@ use crate::{pipeline::BoxedStage, MetricEventsSender, Pipeline, Stage, StageId, StageSet}; use alloy_primitives::{BlockNumber, B256}; use reth_db_api::database::Database; -use reth_node_types::NodeTypesWithDB; -use reth_provider::ProviderFactory; +use reth_provider::{providers::ProviderNodeTypes, DatabaseProviderFactory, ProviderFactory}; use reth_static_file::StaticFileProducer; use tokio::sync::watch; /// Builds a [`Pipeline`]. #[must_use = "call `build` to construct the pipeline"] -pub struct PipelineBuilder -where - DB: Database, -{ +pub struct PipelineBuilder { /// All configured stages in the order they will be executed. - stages: Vec>, + stages: Vec>, /// The maximum block number to sync to. max_block: Option, /// A receiver for the current chain tip to sync to. @@ -21,14 +17,11 @@ where metrics_tx: Option, } -impl PipelineBuilder -where - DB: Database, -{ +impl PipelineBuilder { /// Add a stage to the pipeline. pub fn add_stage(mut self, stage: S) -> Self where - S: Stage + 'static, + S: Stage + 'static, { self.stages.push(Box::new(stage)); self @@ -41,7 +34,7 @@ where /// To customize the stages in the set (reorder, disable, insert a stage) call /// [`builder`][StageSet::builder] on the set which will convert it to a /// [`StageSetBuilder`][crate::StageSetBuilder]. - pub fn add_stages>(mut self, set: Set) -> Self { + pub fn add_stages>(mut self, set: Set) -> Self { for stage in set.builder().build() { self.stages.push(stage); } @@ -69,11 +62,15 @@ where } /// Builds the final [`Pipeline`] using the given database. - pub fn build>( + pub fn build( self, provider_factory: ProviderFactory, static_file_producer: StaticFileProducer>, - ) -> Pipeline { + ) -> Pipeline + where + N: ProviderNodeTypes, + ProviderFactory: DatabaseProviderFactory, + { let Self { stages, max_block, tip_tx, metrics_tx } = self; Pipeline { provider_factory, diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index a94112396af5..73f6b134d2ed 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -5,12 +5,11 @@ use crate::{PipelineTarget, StageCheckpoint, StageId}; use alloy_primitives::{BlockNumber, B256}; pub use event::*; use futures_util::Future; -use reth_node_types::NodeTypesWithDB; use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH; use reth_provider::{ - providers::ProviderNodeTypes, writer::UnifiedStorageWriter, FinalizedBlockReader, - FinalizedBlockWriter, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, - StaticFileProviderFactory, + providers::ProviderNodeTypes, writer::UnifiedStorageWriter, DatabaseProviderFactory, + FinalizedBlockReader, FinalizedBlockWriter, ProviderFactory, StageCheckpointReader, + StageCheckpointWriter, StaticFileProviderFactory, }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; @@ -64,11 +63,11 @@ pub type PipelineWithResult = (Pipeline, Result { +pub struct Pipeline { /// Provider factory. provider_factory: ProviderFactory, /// All configured stages in the order they will be executed. - stages: Vec>, + stages: Vec as DatabaseProviderFactory>::ProviderRW>>, /// The maximum block number to sync to. max_block: Option, static_file_producer: StaticFileProducer>, @@ -276,7 +275,7 @@ impl Pipeline { // Unwind stages in reverse order of execution let unwind_pipeline = self.stages.iter_mut().rev(); - let mut provider_rw = self.provider_factory.provider_rw()?; + let mut provider_rw = self.provider_factory.database_provider_rw()?; for stage in unwind_pipeline { let stage_id = stage.id(); @@ -354,7 +353,7 @@ impl Pipeline { stage.post_unwind_commit()?; - provider_rw = self.provider_factory.provider_rw()?; + provider_rw = self.provider_factory.database_provider_rw()?; } Err(err) => { self.event_sender.notify(PipelineEvent::Error { stage_id }); @@ -423,7 +422,7 @@ impl Pipeline { }; } - let provider_rw = self.provider_factory.provider_rw()?; + let provider_rw = self.provider_factory.database_provider_rw()?; self.event_sender.notify(PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { @@ -513,7 +512,7 @@ fn on_stage_error( // FIXME: When handling errors, we do not commit the database transaction. This // leads to the Merkle stage not clearing its checkpoint, and restarting from an // invalid place. - let provider_rw = factory.provider_rw()?; + let provider_rw = factory.database_provider_rw()?; provider_rw.save_stage_checkpoint_progress(StageId::MerkleExecute, vec![])?; provider_rw.save_stage_checkpoint( StageId::MerkleExecute, @@ -574,7 +573,7 @@ fn on_stage_error( } } -impl std::fmt::Debug for Pipeline { +impl std::fmt::Debug for Pipeline { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Pipeline") .field("stages", &self.stages.iter().map(|stage| stage.id()).collect::>()) diff --git a/crates/stages/api/src/pipeline/set.rs b/crates/stages/api/src/pipeline/set.rs index baa9b0f3fcda..d3a9b17893d7 100644 --- a/crates/stages/api/src/pipeline/set.rs +++ b/crates/stages/api/src/pipeline/set.rs @@ -1,5 +1,4 @@ use crate::{Stage, StageId}; -use reth_db_api::database::Database; use std::{ collections::HashMap, fmt::{Debug, Formatter}, @@ -11,26 +10,26 @@ use std::{ /// individual stage sets to determine what kind of configuration they expose. /// /// Individual stages in the set can be added, removed and overridden using [`StageSetBuilder`]. -pub trait StageSet: Sized { +pub trait StageSet: Sized { /// Configures the stages in the set. - fn builder(self) -> StageSetBuilder; + fn builder(self) -> StageSetBuilder; /// Overrides the given [`Stage`], if it is in this set. /// /// # Panics /// /// Panics if the [`Stage`] is not in this set. - fn set + 'static>(self, stage: S) -> StageSetBuilder { + fn set + 'static>(self, stage: S) -> StageSetBuilder { self.builder().set(stage) } } -struct StageEntry { - stage: Box>, +struct StageEntry { + stage: Box>, enabled: bool, } -impl Debug for StageEntry { +impl Debug for StageEntry { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("StageEntry") .field("stage", &self.stage.id()) @@ -45,18 +44,18 @@ impl Debug for StageEntry { /// to the final sync pipeline before/after their dependencies. /// /// Stages inside the set can be disabled, enabled, overridden and reordered. -pub struct StageSetBuilder { - stages: HashMap>, +pub struct StageSetBuilder { + stages: HashMap>, order: Vec, } -impl Default for StageSetBuilder { +impl Default for StageSetBuilder { fn default() -> Self { Self { stages: HashMap::new(), order: Vec::new() } } } -impl Debug for StageSetBuilder { +impl Debug for StageSetBuilder { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("StageSetBuilder") .field("stages", &self.stages) @@ -65,17 +64,14 @@ impl Debug for StageSetBuilder { } } -impl StageSetBuilder -where - DB: Database, -{ +impl StageSetBuilder { fn index_of(&self, stage_id: StageId) -> usize { let index = self.order.iter().position(|&id| id == stage_id); index.unwrap_or_else(|| panic!("Stage does not exist in set: {stage_id}")) } - fn upsert_stage_state(&mut self, stage: Box>, added_at_index: usize) { + fn upsert_stage_state(&mut self, stage: Box>, added_at_index: usize) { let stage_id = stage.id(); if self.stages.insert(stage.id(), StageEntry { stage, enabled: true }).is_some() { if let Some(to_remove) = self @@ -95,7 +91,7 @@ where /// # Panics /// /// Panics if the [`Stage`] is not in this set. - pub fn set + 'static>(mut self, stage: S) -> Self { + pub fn set + 'static>(mut self, stage: S) -> Self { let entry = self .stages .get_mut(&stage.id()) @@ -107,7 +103,7 @@ where /// Adds the given [`Stage`] at the end of this set. /// /// If the stage was already in the group, it is removed from its previous place. - pub fn add_stage + 'static>(mut self, stage: S) -> Self { + pub fn add_stage + 'static>(mut self, stage: S) -> Self { let target_index = self.order.len(); self.order.push(stage.id()); self.upsert_stage_state(Box::new(stage), target_index); @@ -117,7 +113,7 @@ where /// Adds the given [`Stage`] at the end of this set if it's [`Some`]. /// /// If the stage was already in the group, it is removed from its previous place. - pub fn add_stage_opt + 'static>(self, stage: Option) -> Self { + pub fn add_stage_opt + 'static>(self, stage: Option) -> Self { if let Some(stage) = stage { self.add_stage(stage) } else { @@ -129,7 +125,7 @@ where /// /// If a stage is in both sets, it is removed from its previous place in this set. Because of /// this, it is advisable to merge sets first and re-order stages after if needed. - pub fn add_set>(mut self, set: Set) -> Self { + pub fn add_set>(mut self, set: Set) -> Self { for stage in set.builder().build() { let target_index = self.order.len(); self.order.push(stage.id()); @@ -145,7 +141,7 @@ where /// # Panics /// /// Panics if the dependency stage is not in this set. - pub fn add_before + 'static>(mut self, stage: S, before: StageId) -> Self { + pub fn add_before + 'static>(mut self, stage: S, before: StageId) -> Self { let target_index = self.index_of(before); self.order.insert(target_index, stage.id()); self.upsert_stage_state(Box::new(stage), target_index); @@ -159,7 +155,7 @@ where /// # Panics /// /// Panics if the dependency stage is not in this set. - pub fn add_after + 'static>(mut self, stage: S, after: StageId) -> Self { + pub fn add_after + 'static>(mut self, stage: S, after: StageId) -> Self { let target_index = self.index_of(after) + 1; self.order.insert(target_index, stage.id()); self.upsert_stage_state(Box::new(stage), target_index); @@ -240,7 +236,7 @@ where } /// Consumes the builder and returns the contained [`Stage`]s in the order specified. - pub fn build(mut self) -> Vec>> { + pub fn build(mut self) -> Vec>> { let mut stages = Vec::new(); for id in &self.order { if let Some(entry) = self.stages.remove(id) { @@ -253,7 +249,7 @@ where } } -impl StageSet for StageSetBuilder { +impl StageSet for StageSetBuilder { fn builder(self) -> Self { self } diff --git a/crates/stages/api/src/stage.rs b/crates/stages/api/src/stage.rs index 73d69b18177c..162f55d7afec 100644 --- a/crates/stages/api/src/stage.rs +++ b/crates/stages/api/src/stage.rs @@ -1,7 +1,6 @@ use crate::{error::StageError, StageCheckpoint, StageId}; use alloy_primitives::{BlockNumber, TxNumber}; -use reth_db_api::database::Database; -use reth_provider::{BlockReader, DatabaseProviderRW, ProviderError, TransactionsProvider}; +use reth_provider::{BlockReader, ProviderError}; use std::{ cmp::{max, min}, future::{poll_fn, Future}, @@ -71,11 +70,14 @@ impl ExecInput { /// Return the next block range determined the number of transactions within it. /// This function walks the block indices until either the end of the range is reached or /// the number of transactions exceeds the threshold. - pub fn next_block_range_with_transaction_threshold( + pub fn next_block_range_with_transaction_threshold( &self, - provider: &DatabaseProviderRW, + provider: &Provider, tx_threshold: u64, - ) -> Result<(Range, RangeInclusive, bool), StageError> { + ) -> Result<(Range, RangeInclusive, bool), StageError> + where + Provider: BlockReader, + { let start_block = self.next_block(); let target_block = self.target(); @@ -188,7 +190,7 @@ pub struct UnwindOutput { /// /// Stages receive [`DatabaseProviderRW`]. #[auto_impl::auto_impl(Box)] -pub trait Stage: Send + Sync { +pub trait Stage: Send + Sync { /// Get the ID of the stage. /// /// Stage IDs must be unique. @@ -229,11 +231,7 @@ pub trait Stage: Send + Sync { /// Execute the stage. /// It is expected that the stage will write all necessary data to the database /// upon invoking this method. - fn execute( - &mut self, - provider: &DatabaseProviderRW, - input: ExecInput, - ) -> Result; + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result; /// Post execution commit hook. /// @@ -247,7 +245,7 @@ pub trait Stage: Send + Sync { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result; @@ -262,7 +260,7 @@ pub trait Stage: Send + Sync { } /// [Stage] trait extension. -pub trait StageExt: Stage { +pub trait StageExt: Stage { /// Utility extension for the `Stage` trait that invokes `Stage::poll_execute_ready` /// with [`poll_fn`] context. For more information see [`Stage::poll_execute_ready`]. fn execute_ready( @@ -273,4 +271,4 @@ pub trait StageExt: Stage { } } -impl> StageExt for S {} +impl> StageExt for S {} diff --git a/crates/stages/api/src/test_utils.rs b/crates/stages/api/src/test_utils.rs index 8d76cee31bef..3a7a244dbced 100644 --- a/crates/stages/api/src/test_utils.rs +++ b/crates/stages/api/src/test_utils.rs @@ -44,7 +44,7 @@ impl TestStage { } } -impl Stage for TestStage { +impl Stage for TestStage { fn id(&self) -> StageId { self.id } diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index d97b31e061c7..62fb13f86232 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -43,12 +43,18 @@ use crate::{ }; use reth_config::config::StageConfig; use reth_consensus::Consensus; +use reth_db::transaction::DbTxMut; use reth_db_api::database::Database; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader}; use reth_primitives::B256; -use reth_provider::HeaderSyncGapProvider; +use reth_provider::{ + AccountExtReader, BlockReader, DBProvider, HashingWriter, HeaderProvider, + HeaderSyncGapProvider, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, + StaticFileProviderFactory, StatsReader, StorageReader, TrieWriter, +}; use reth_prune_types::PruneModes; +use reth_stages_api::Stage; use std::{ops::Not, sync::Arc}; use tokio::sync::watch; @@ -119,17 +125,20 @@ impl DefaultStages { } } -impl DefaultStages +impl DefaultStages where E: BlockExecutorProvider, { /// Appends the default offline stages and default finish stage to the given builder. - pub fn add_offline_stages( - default_offline: StageSetBuilder, + pub fn add_offline_stages( + default_offline: StageSetBuilder, executor_factory: E, stages_config: StageConfig, prune_modes: PruneModes, - ) -> StageSetBuilder { + ) -> StageSetBuilder + where + OfflineStages: StageSet, + { StageSetBuilder::default() .add_set(default_offline) .add_set(OfflineStages::new(executor_factory, stages_config, prune_modes)) @@ -137,15 +146,16 @@ where } } -impl StageSet for DefaultStages +impl StageSet for DefaultStages where - Provider: HeaderSyncGapProvider + 'static, + P: HeaderSyncGapProvider + 'static, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, E: BlockExecutorProvider, - DB: Database + 'static, + OnlineStages: StageSet, + OfflineStages: StageSet, { - fn builder(self) -> StageSetBuilder { + fn builder(self) -> StageSetBuilder { Self::add_offline_stages( self.online.builder(), self.executor_factory, @@ -189,29 +199,37 @@ impl OnlineStages { } } -impl OnlineStages +impl OnlineStages where - Provider: HeaderSyncGapProvider + 'static, + P: HeaderSyncGapProvider + 'static, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, { /// Create a new builder using the given headers stage. - pub fn builder_with_headers( - headers: HeaderStage, + pub fn builder_with_headers( + headers: HeaderStage, body_downloader: B, - ) -> StageSetBuilder { + ) -> StageSetBuilder + where + HeaderStage: Stage, + BodyStage: Stage, + { StageSetBuilder::default().add_stage(headers).add_stage(BodyStage::new(body_downloader)) } /// Create a new builder using the given bodies stage. - pub fn builder_with_bodies( + pub fn builder_with_bodies( bodies: BodyStage, - provider: Provider, + provider: P, tip: watch::Receiver, header_downloader: H, consensus: Arc, stages_config: StageConfig, - ) -> StageSetBuilder { + ) -> StageSetBuilder + where + BodyStage: Stage, + HeaderStage: Stage, + { StageSetBuilder::default() .add_stage(HeaderStage::new( provider, @@ -224,14 +242,15 @@ where } } -impl StageSet for OnlineStages +impl StageSet for OnlineStages where - DB: Database, - Provider: HeaderSyncGapProvider + 'static, + P: HeaderSyncGapProvider + 'static, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, + HeaderStage: Stage, + BodyStage: Stage, { - fn builder(self) -> StageSetBuilder { + fn builder(self) -> StageSetBuilder { StageSetBuilder::default() .add_stage(HeaderStage::new( self.provider, @@ -275,12 +294,16 @@ impl OfflineStages { } } -impl StageSet for OfflineStages +impl StageSet for OfflineStages where E: BlockExecutorProvider, - DB: Database + 'static, + ExecutionStages: StageSet, + PruneSenderRecoveryStage: Stage, + HashingStages: StageSet, + HistoryIndexingStages: StageSet, + PruneStage: Stage, { - fn builder(self) -> StageSetBuilder { + fn builder(self) -> StageSetBuilder { ExecutionStages::new( self.executor_factory, self.stages_config.clone(), @@ -328,12 +351,13 @@ impl ExecutionStages { } } -impl StageSet for ExecutionStages +impl StageSet for ExecutionStages where - DB: Database, E: BlockExecutorProvider, + SenderRecoveryStage: Stage, + ExecutionStage: Stage, { - fn builder(self) -> StageSetBuilder { + fn builder(self) -> StageSetBuilder { StageSetBuilder::default() .add_stage(SenderRecoveryStage::new(self.stages_config.sender_recovery)) .add_stage(ExecutionStage::from_config( @@ -353,8 +377,14 @@ pub struct HashingStages { stages_config: StageConfig, } -impl StageSet for HashingStages { - fn builder(self) -> StageSetBuilder { +impl StageSet for HashingStages +where + MerkleStage: Stage, + AccountHashingStage: Stage, + StorageHashingStage: Stage, + MerkleStage: Stage, +{ + fn builder(self) -> StageSetBuilder { StageSetBuilder::default() .add_stage(MerkleStage::default_unwind()) .add_stage(AccountHashingStage::new( @@ -379,8 +409,13 @@ pub struct HistoryIndexingStages { prune_modes: PruneModes, } -impl StageSet for HistoryIndexingStages { - fn builder(self) -> StageSetBuilder { +impl StageSet for HistoryIndexingStages +where + TransactionLookupStage: Stage, + IndexStorageHistoryStage: Stage, + IndexAccountHistoryStage: Stage, +{ + fn builder(self) -> StageSetBuilder { StageSetBuilder::default() .add_stage(TransactionLookupStage::new( self.stages_config.transaction_lookup, diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 17515473809f..08a834a609f7 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -17,7 +17,8 @@ use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockRespon use reth_primitives::{StaticFileSegment, TxNumber}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DatabaseProviderRW, HeaderProvider, ProviderError, StatsReader, + BlockReader, DBProvider, DatabaseProviderRW, HeaderProvider, ProviderError, + StaticFileProviderFactory, StatsReader, }; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, @@ -70,7 +71,10 @@ impl BodyStage { } } -impl Stage for BodyStage { +impl Stage for BodyStage +where + Provider: DBProvider + StaticFileProviderFactory + StatsReader + BlockReader, +{ /// Return the id of the stage fn id(&self) -> StageId { StageId::Bodies @@ -106,11 +110,7 @@ impl Stage for BodyStage { /// Download block bodies from the last checkpoint for this stage up until the latest synced /// header, limited by the stage's batch size. - fn execute( - &mut self, - provider: &DatabaseProviderRW, - input: ExecInput, - ) -> Result { + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } @@ -155,7 +155,7 @@ impl Stage for BodyStage { Ordering::Less => { return Err(missing_static_data_error( next_static_file_tx_num.saturating_sub(1), - static_file_provider, + &static_file_provider, provider, )?) } @@ -264,7 +264,7 @@ impl Stage for BodyStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result { self.buffer.take(); @@ -327,7 +327,7 @@ impl Stage for BodyStage { if db_tx_num > static_file_tx_num { return Err(missing_static_data_error( static_file_tx_num, - static_file_provider, + &static_file_provider, provider, )?) } @@ -343,11 +343,14 @@ impl Stage for BodyStage { } } -fn missing_static_data_error( +fn missing_static_data_error( last_tx_num: TxNumber, static_file_provider: &StaticFileProvider, - provider: &DatabaseProviderRW, -) -> Result { + provider: &Provider, +) -> Result +where + Provider: BlockReader, +{ let mut last_block = static_file_provider .get_highest_static_file_block(StaticFileSegment::Transactions) .unwrap_or_default(); @@ -377,9 +380,10 @@ fn missing_static_data_error( // TODO(alexey): ideally, we want to measure Bodies stage progress in bytes, but it's hard to know // beforehand how many bytes we need to download. So the good solution would be to measure the // progress in gas as a proxy to size. Execution stage uses a similar approach. -fn stage_checkpoint( - provider: &DatabaseProviderRW, -) -> ProviderResult { +fn stage_checkpoint(provider: &Provider) -> ProviderResult +where + Provider: StatsReader + StaticFileProviderFactory, +{ Ok(EntitiesCheckpoint { processed: provider.count_entries::()? as u64, // Count only static files entries. If we count the database entries too, we may have diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index dddb892f1c94..44c6b3fc5fdd 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -14,8 +14,9 @@ use reth_primitives_traits::format_gas_throughput; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, writer::UnifiedStorageWriter, - BlockReader, DatabaseProviderRW, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderError, StateWriter, StatsReader, TransactionVariant, + BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, + ProviderError, StateWriter, StaticFileProviderFactory, StatsReader, + TransactionVariant, }; use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; @@ -149,9 +150,9 @@ impl ExecutionStage { /// Given that `start_block` changes with each checkpoint, it's necessary to inspect /// [`tables::AccountsTrie`] to ensure that [`super::MerkleStage`] hasn't /// been previously executed. - fn adjust_prune_modes( + fn adjust_prune_modes( &self, - provider: &DatabaseProviderRW, + provider: &impl StatsReader, start_block: u64, max_block: u64, ) -> Result { @@ -169,10 +170,11 @@ impl ExecutionStage { } } -impl Stage for ExecutionStage +impl Stage for ExecutionStage where - DB: Database, E: BlockExecutorProvider, + Provider: DBProvider + BlockReader + StaticFileProviderFactory + StatsReader, + for<'a> UnifiedStorageWriter<'a, Provider, StaticFileProviderRWRefMut<'a>>: StateWriter, { /// Return the id of the stage fn id(&self) -> StageId { @@ -190,11 +192,7 @@ where } /// Execute the stage - fn execute( - &mut self, - provider: &DatabaseProviderRW, - input: ExecInput, - ) -> Result { + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } @@ -209,7 +207,8 @@ where self.prune_modes.receipts_log_filter.is_empty() { debug!(target: "sync::stages::execution", start = start_block, "Preparing static file producer"); - let mut producer = prepare_static_file_producer(provider, start_block)?; + let mut producer = + prepare_static_file_producer(provider, &static_file_provider, start_block)?; // Since there might be a database <-> static file inconsistency (read // `prepare_static_file_producer` for context), we commit the change straight away. producer.commit()?; @@ -228,8 +227,12 @@ where // Progress tracking let mut stage_progress = start_block; - let mut stage_checkpoint = - execution_checkpoint(static_file_provider, start_block, max_block, input.checkpoint())?; + let mut stage_checkpoint = execution_checkpoint( + &static_file_provider, + start_block, + max_block, + input.checkpoint(), + )?; let mut fetch_block_duration = Duration::default(); let mut execution_duration = Duration::default(); @@ -390,7 +393,7 @@ where /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result { let (range, unwind_to, _) = @@ -432,7 +435,11 @@ where // prepare_static_file_producer does a consistency check that will unwind static files // if the expected highest receipt in the files is higher than the database. // Which is essentially what happens here when we unwind this stage. - let _static_file_producer = prepare_static_file_producer(provider, *range.start())?; + let _static_file_producer = prepare_static_file_producer( + provider, + &provider.static_file_provider(), + *range.start(), + )?; } else { // If there is any kind of receipt pruning/filtering we use the database, since static // files do not support filters. @@ -572,11 +579,13 @@ fn calculate_gas_used_from_headers( /// the height in the static file is higher**, it rolls back (unwinds) the static file. /// **Conversely, if the height in the database is lower**, it triggers a rollback in the database /// (by returning [`StageError`]) until the heights in both the database and static file match. -fn prepare_static_file_producer<'a, 'b, DB: Database>( - provider: &'b DatabaseProviderRW, +fn prepare_static_file_producer<'a, 'b, Provider>( + provider: &'b Provider, + static_file_provider: &'a StaticFileProvider, start_block: u64, ) -> Result, StageError> where + Provider: DBProvider + BlockReader + HeaderProvider, 'b: 'a, { // Get next expected receipt number @@ -588,7 +597,6 @@ where .unwrap_or(0); // Get next expected receipt number in static files - let static_file_provider = provider.static_file_provider(); let next_static_file_receipt_num = static_file_provider .get_highest_static_file_tx(StaticFileSegment::Receipts) .map(|num| num + 1) diff --git a/crates/stages/stages/src/stages/finish.rs b/crates/stages/stages/src/stages/finish.rs index 9eb3a6d762b8..0b8f8f9ee1ce 100644 --- a/crates/stages/stages/src/stages/finish.rs +++ b/crates/stages/stages/src/stages/finish.rs @@ -1,5 +1,3 @@ -use reth_db_api::database::Database; -use reth_provider::DatabaseProviderRW; use reth_stages_api::{ ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, }; @@ -12,14 +10,14 @@ use reth_stages_api::{ #[non_exhaustive] pub struct FinishStage; -impl Stage for FinishStage { +impl Stage for FinishStage { fn id(&self) -> StageId { StageId::Finish } fn execute( &mut self, - _provider: &DatabaseProviderRW, + _provider: &Provider, input: ExecInput, ) -> Result { Ok(ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true }) @@ -27,7 +25,7 @@ impl Stage for FinishStage { fn unwind( &mut self, - _provider: &DatabaseProviderRW, + _provider: &Provider, input: UnwindInput, ) -> Result { Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) }) diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 36cdde023a7e..99b7a068ab9c 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -3,12 +3,11 @@ use reth_config::config::{EtlConfig, HashingConfig}; use reth_db::{tables, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, - database::Database, transaction::{DbTx, DbTxMut}, }; use reth_etl::Collector; use reth_primitives::{keccak256, Account, B256}; -use reth_provider::{AccountExtReader, DatabaseProviderRW, HashingWriter, StatsReader}; +use reth_provider::{AccountExtReader, DBProvider, HashingWriter, StatsReader}; use reth_stages_api::{ AccountHashingCheckpoint, EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, @@ -59,7 +58,7 @@ impl AccountHashingStage { /// Proceeds to go to the `BlockTransitionIndex` end, go back `transitions` and change the /// account state in the `AccountChangeSets` table. pub fn seed( - provider: &DatabaseProviderRW, + provider: &Provider, opts: SeedOpts, ) -> Result, StageError> { use reth_db_api::models::AccountBeforeTx; @@ -125,18 +124,17 @@ impl Default for AccountHashingStage { } } -impl Stage for AccountHashingStage { +impl Stage for AccountHashingStage +where + Provider: DBProvider + HashingWriter + AccountExtReader + StatsReader, +{ /// Return the id of the stage fn id(&self) -> StageId { StageId::AccountHashing } /// Execute the stage. - fn execute( - &mut self, - provider: &DatabaseProviderRW, - input: ExecInput, - ) -> Result { + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } @@ -225,7 +223,7 @@ impl Stage for AccountHashingStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = @@ -282,9 +280,7 @@ pub struct SeedOpts { pub txs: Range, } -fn stage_checkpoint_progress( - provider: &DatabaseProviderRW, -) -> ProviderResult { +fn stage_checkpoint_progress(provider: &impl StatsReader) -> ProviderResult { Ok(EntitiesCheckpoint { processed: provider.count_entries::()? as u64, total: provider.count_entries::()? as u64, diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index 0ff453f543ae..747745164533 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -3,14 +3,13 @@ use reth_config::config::{EtlConfig, HashingConfig}; use reth_db::tables; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRW}, - database::Database, models::{BlockNumberAddress, CompactU256}, table::Decompress, transaction::{DbTx, DbTxMut}, }; use reth_etl::Collector; use reth_primitives::{keccak256, BufMut, StorageEntry, B256}; -use reth_provider::{DatabaseProviderRW, HashingWriter, StatsReader, StorageReader}; +use reth_provider::{DBProvider, DatabaseProviderRW, HashingWriter, StatsReader, StorageReader}; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, StorageHashingCheckpoint, UnwindInput, UnwindOutput, @@ -62,18 +61,17 @@ impl Default for StorageHashingStage { } } -impl Stage for StorageHashingStage { +impl Stage for StorageHashingStage +where + Provider: DBProvider + StorageReader + HashingWriter + StatsReader, +{ /// Return the id of the stage fn id(&self) -> StageId { StageId::StorageHashing } /// Execute the stage. - fn execute( - &mut self, - provider: &DatabaseProviderRW, - input: ExecInput, - ) -> Result { + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { let tx = provider.tx_ref(); if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) @@ -164,7 +162,7 @@ impl Stage for StorageHashingStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = @@ -199,9 +197,7 @@ fn collect( Ok(()) } -fn stage_checkpoint_progress( - provider: &DatabaseProviderRW, -) -> ProviderResult { +fn stage_checkpoint_progress(provider: &impl StatsReader) -> ProviderResult { Ok(EntitiesCheckpoint { processed: provider.count_entries::()? as u64, total: provider.count_entries::()? as u64, diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index dfc183e26363..e313827c5ae3 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -13,7 +13,8 @@ use reth_network_p2p::headers::{downloader::HeaderDownloader, error::HeadersDown use reth_primitives::{BlockHash, BlockNumber, SealedHeader, StaticFileSegment, B256}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - BlockHashReader, DatabaseProviderRW, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, + BlockHashReader, DBProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, + StaticFileProviderFactory, }; use reth_stages_api::{ BlockErrorKind, CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, @@ -88,9 +89,9 @@ where /// /// Writes to static files ( `Header | HeaderTD | HeaderHash` ) and [`tables::HeaderNumbers`] /// database table. - fn write_headers( + fn write_headers( &mut self, - provider: &DatabaseProviderRW, + provider: &impl DBProvider, static_file_provider: StaticFileProvider, ) -> Result { let total_headers = self.header_collector.len(); @@ -183,11 +184,11 @@ where } } -impl Stage for HeaderStage +impl Stage for HeaderStage where - DB: Database, - Provider: HeaderSyncGapProvider, + P: HeaderSyncGapProvider, D: HeaderDownloader, + Provider: DBProvider + StaticFileProviderFactory, { /// Return the id of the stage fn id(&self) -> StageId { @@ -259,11 +260,7 @@ where /// Download the headers in reverse order (falling block numbers) /// starting from the tip of the chain - fn execute( - &mut self, - provider: &DatabaseProviderRW, - input: ExecInput, - ) -> Result { + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { let current_checkpoint = input.checkpoint(); if self.sync_gap.as_ref().ok_or(StageError::MissingSyncGap)?.is_closed() { @@ -310,7 +307,7 @@ where /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result { self.sync_gap.take(); diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index 3b3622d4f35a..a01cf7caaf7d 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -3,9 +3,7 @@ use reth_config::config::{EtlConfig, IndexHistoryConfig}; use reth_db::tables; use reth_db_api::{database::Database, models::ShardedKey, table::Decode, transaction::DbTxMut}; use reth_primitives::Address; -use reth_provider::{ - DatabaseProviderRW, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, -}; +use reth_provider::{DBProvider, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter}; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment}; use reth_stages_api::{ ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, @@ -44,7 +42,11 @@ impl Default for IndexAccountHistoryStage { } } -impl Stage for IndexAccountHistoryStage { +impl Stage for IndexAccountHistoryStage +where + Provider: + DBProvider + HistoryWriter + PruneCheckpointReader + PruneCheckpointWriter, +{ /// Return the id of the stage fn id(&self) -> StageId { StageId::IndexAccountHistory @@ -53,7 +55,7 @@ impl Stage for IndexAccountHistoryStage { /// Execute the stage. fn execute( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, mut input: ExecInput, ) -> Result { if let Some((target_prunable_block, prune_mode)) = self @@ -126,7 +128,7 @@ impl Stage for IndexAccountHistoryStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index 20d7491212a3..12a0fca58d03 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -8,9 +8,7 @@ use reth_db_api::{ table::Decode, transaction::DbTxMut, }; -use reth_provider::{ - DatabaseProviderRW, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, -}; +use reth_provider::{DBProvider, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter}; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment}; use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use std::fmt::Debug; @@ -47,7 +45,11 @@ impl Default for IndexStorageHistoryStage { } } -impl Stage for IndexStorageHistoryStage { +impl Stage for IndexStorageHistoryStage +where + Provider: + DBProvider + PruneCheckpointWriter + HistoryWriter + PruneCheckpointReader, +{ /// Return the id of the stage fn id(&self) -> StageId { StageId::IndexStorageHistory @@ -56,7 +58,7 @@ impl Stage for IndexStorageHistoryStage { /// Execute the stage. fn execute( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, mut input: ExecInput, ) -> Result { if let Some((target_prunable_block, prune_mode)) = self @@ -133,7 +135,7 @@ impl Stage for IndexStorageHistoryStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index b78f41719064..c26b9961a548 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -7,8 +7,8 @@ use reth_db_api::{ }; use reth_primitives::{BlockNumber, GotExpected, SealedHeader, B256}; use reth_provider::{ - DatabaseProviderRW, HeaderProvider, ProviderError, StageCheckpointReader, - StageCheckpointWriter, StatsReader, TrieWriter, + DBProvider, HeaderProvider, ProviderError, StageCheckpointReader, StageCheckpointWriter, + StatsReader, TrieWriter, }; use reth_stages_api::{ BlockErrorKind, EntitiesCheckpoint, ExecInput, ExecOutput, MerkleCheckpoint, Stage, @@ -98,9 +98,9 @@ impl MerkleStage { } /// Gets the hashing progress - pub fn get_execution_checkpoint( + pub fn get_execution_checkpoint( &self, - provider: &DatabaseProviderRW, + provider: &impl StageCheckpointReader, ) -> Result, StageError> { let buf = provider.get_stage_checkpoint_progress(StageId::MerkleExecute)?.unwrap_or_default(); @@ -114,9 +114,9 @@ impl MerkleStage { } /// Saves the hashing progress - pub fn save_execution_checkpoint( + pub fn save_execution_checkpoint( &self, - provider: &DatabaseProviderRW, + provider: &impl StageCheckpointWriter, checkpoint: Option, ) -> Result<(), StageError> { let mut buf = vec![]; @@ -132,7 +132,15 @@ impl MerkleStage { } } -impl Stage for MerkleStage { +impl Stage for MerkleStage +where + Provider: DBProvider + + TrieWriter + + StatsReader + + HeaderProvider + + StageCheckpointReader + + StageCheckpointWriter, +{ /// Return the id of the stage fn id(&self) -> StageId { match self { @@ -144,11 +152,7 @@ impl Stage for MerkleStage { } /// Execute the stage. - fn execute( - &mut self, - provider: &DatabaseProviderRW, - input: ExecInput, - ) -> Result { + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { let threshold = match self { Self::Unwind => { info!(target: "sync::stages::merkle::unwind", "Stage is always skipped"); @@ -286,7 +290,7 @@ impl Stage for MerkleStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result { let tx = provider.tx_ref(); diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 407073bcef0e..7e5b8becda21 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -1,5 +1,8 @@ -use reth_db_api::database::Database; -use reth_provider::{DatabaseProviderRW, PruneCheckpointReader, PruneCheckpointWriter}; +use reth_db::transaction::DbTxMut; +use reth_provider::{ + BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, + StaticFileProviderFactory, +}; use reth_prune::{ PruneMode, PruneModes, PruneSegment, PrunerBuilder, SegmentOutput, SegmentOutputCheckpoint, }; @@ -32,22 +35,25 @@ impl PruneStage { } } -impl Stage for PruneStage { +impl Stage for PruneStage +where + Provider: DBProvider + + PruneCheckpointReader + + PruneCheckpointWriter + + BlockReader + + StaticFileProviderFactory, +{ fn id(&self) -> StageId { StageId::Prune } - fn execute( - &mut self, - provider: &DatabaseProviderRW, - input: ExecInput, - ) -> Result { + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { let mut pruner = PrunerBuilder::default() .segments(self.prune_modes.clone()) .delete_limit(self.commit_threshold) - .build(provider.static_file_provider().clone()); + .build::(provider.static_file_provider().clone()); - let result = pruner.run_with_provider(&provider.0, input.target())?; + let result = pruner.run_with_provider(&provider, input.target())?; if result.progress.is_finished() { Ok(ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true }) } else { @@ -87,7 +93,7 @@ impl Stage for PruneStage { fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result { // We cannot recover the data that was pruned in `execute`, so we just update the @@ -118,16 +124,19 @@ impl PruneSenderRecoveryStage { } } -impl Stage for PruneSenderRecoveryStage { +impl Stage for PruneSenderRecoveryStage +where + Provider: DBProvider + + PruneCheckpointReader + + PruneCheckpointWriter + + BlockReader + + StaticFileProviderFactory, +{ fn id(&self) -> StageId { StageId::PruneSenderRecovery } - fn execute( - &mut self, - provider: &DatabaseProviderRW, - input: ExecInput, - ) -> Result { + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { let mut result = self.0.execute(provider, input)?; // Adjust the checkpoint to the highest pruned block number of the Sender Recovery segment @@ -146,7 +155,7 @@ impl Stage for PruneSenderRecoveryStage { fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result { self.0.unwind(provider, input) diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index 40cc05f10f71..50e7316d4c20 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -8,8 +8,8 @@ use reth_db_api::{ }; use reth_primitives::{Address, GotExpected, StaticFileSegment, TransactionSignedNoHash, TxNumber}; use reth_provider::{ - BlockReader, DatabaseProviderRW, HeaderProvider, ProviderError, PruneCheckpointReader, - StatsReader, + BlockReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader, + StaticFileProviderFactory, StatsReader, }; use reth_prune_types::PruneSegment; use reth_stages_api::{ @@ -51,7 +51,14 @@ impl Default for SenderRecoveryStage { } } -impl Stage for SenderRecoveryStage { +impl Stage for SenderRecoveryStage +where + Provider: DBProvider + + BlockReader + + StaticFileProviderFactory + + StatsReader + + PruneCheckpointReader, +{ /// Return the id of the stage fn id(&self) -> StageId { StageId::SenderRecovery @@ -61,11 +68,7 @@ impl Stage for SenderRecoveryStage { /// [`BlockBodyIndices`][reth_db::tables::BlockBodyIndices], /// collect transactions within that range, recover signer for each transaction and store /// entries in the [`TransactionSenders`][reth_db::tables::TransactionSenders] table. - fn execute( - &mut self, - provider: &DatabaseProviderRW, - input: ExecInput, - ) -> Result { + fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } @@ -110,7 +113,7 @@ impl Stage for SenderRecoveryStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result { let (_, unwind_to, _) = input.unwind_block_range_with_threshold(self.commit_threshold); @@ -129,13 +132,13 @@ impl Stage for SenderRecoveryStage { } } -fn recover_range( +fn recover_range( tx_range: Range, - provider: &DatabaseProviderRW, + provider: &Provider, senders_cursor: &mut CURSOR, ) -> Result<(), StageError> where - DB: Database, + Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, CURSOR: DbCursorRW, { debug!(target: "sync::stages::sender_recovery", ?tx_range, "Recovering senders batch"); @@ -287,9 +290,10 @@ fn recover_sender( Ok((tx_id, sender)) } -fn stage_checkpoint( - provider: &DatabaseProviderRW, -) -> Result { +fn stage_checkpoint(provider: &Provider) -> Result +where + Provider: StatsReader + StaticFileProviderFactory + PruneCheckpointReader, +{ let pruned_entries = provider .get_prune_checkpoint(PruneSegment::SenderRecovery)? .and_then(|checkpoint| checkpoint.tx_number) diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 0e516e69bff8..6d729f9d6d15 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -9,8 +9,8 @@ use reth_db_api::{ use reth_etl::Collector; use reth_primitives::{TxHash, TxNumber}; use reth_provider::{ - BlockReader, DatabaseProviderRW, PruneCheckpointReader, PruneCheckpointWriter, StatsReader, - TransactionsProvider, TransactionsProviderExt, + BlockReader, DBProvider, DatabaseProviderRW, PruneCheckpointReader, PruneCheckpointWriter, + StaticFileProviderFactory, StatsReader, TransactionsProvider, TransactionsProviderExt, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment}; use reth_stages_api::{ @@ -54,7 +54,16 @@ impl TransactionLookupStage { } } -impl Stage for TransactionLookupStage { +impl Stage for TransactionLookupStage +where + Provider: DBProvider + + PruneCheckpointWriter + + BlockReader + + PruneCheckpointReader + + StatsReader + + StaticFileProviderFactory + + TransactionsProviderExt, +{ /// Return the id of the stage fn id(&self) -> StageId { StageId::TransactionLookup @@ -63,7 +72,7 @@ impl Stage for TransactionLookupStage { /// Write transaction hash -> id entries fn execute( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, mut input: ExecInput, ) -> Result { if let Some((target_prunable_block, prune_mode)) = self @@ -178,7 +187,7 @@ impl Stage for TransactionLookupStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW, + provider: &Provider, input: UnwindInput, ) -> Result { let tx = provider.tx_ref(); @@ -212,9 +221,10 @@ impl Stage for TransactionLookupStage { } } -fn stage_checkpoint( - provider: &DatabaseProviderRW, -) -> Result { +fn stage_checkpoint(provider: &Provider) -> Result +where + Provider: PruneCheckpointReader + StaticFileProviderFactory + StatsReader, +{ let pruned_entries = provider .get_prune_checkpoint(PruneSegment::TransactionLookup)? .and_then(|checkpoint| checkpoint.tx_number) diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 15e88a284011..2a30457a915e 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -10,7 +10,7 @@ use reth_db_api::{ }; use reth_etl::Collector; use reth_primitives::BlockNumber; -use reth_provider::DatabaseProviderRW; +use reth_provider::{DBProvider, DatabaseProviderRW}; use reth_stages_api::StageError; use std::{collections::HashMap, hash::Hash, ops::RangeBounds}; use tracing::info; @@ -35,15 +35,15 @@ const DEFAULT_CACHE_THRESHOLD: u64 = 100_000; /// /// As a result, the `Collector` will contain entries such as `(Address1.3, [1,2,3])` and /// `(Address1.300, [100,300])`. The entries may be stored across one or more files. -pub(crate) fn collect_history_indices( - provider: &DatabaseProviderRW, +pub(crate) fn collect_history_indices( + provider: &Provider, range: impl RangeBounds, sharded_key_factory: impl Fn(P, BlockNumber) -> H::Key, partial_key_factory: impl Fn((CS::Key, CS::Value)) -> (u64, P), etl_config: &EtlConfig, ) -> Result, StageError> where - DB: Database, + Provider: DBProvider, CS: Table, H: Table, P: Copy + Eq + Hash, @@ -102,8 +102,8 @@ where /// `Address.StorageKey`). It flushes indices to disk when reaching a shard's max length /// (`NUM_OF_INDICES_IN_SHARD`) or when the partial key changes, ensuring the last previous partial /// key shard is stored. -pub(crate) fn load_history_indices( - provider: &DatabaseProviderRW, +pub(crate) fn load_history_indices( + provider: &Provider, mut collector: Collector, append_only: bool, sharded_key_factory: impl Clone + Fn(P, u64) -> ::Key, @@ -111,7 +111,7 @@ pub(crate) fn load_history_indices( get_partial: impl Fn(::Key) -> P, ) -> Result<(), StageError> where - DB: Database, + Provider: DBProvider, H: Table, P: Copy + Default + Eq, { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index b7bc0cd80910..fa825f597f8f 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -45,7 +45,7 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::TryIntoHistoricalStateProvider; +use reth_storage_api::{TryIntoHistoricalStateProvider}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, From 2a3f4ecabe0a088a12e5a1fc3c81771ef2321599 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 16 Sep 2024 13:21:49 +0300 Subject: [PATCH 02/10] DbTxUnwindExt --- Cargo.lock | 1 - crates/stages/api/Cargo.toml | 1 - crates/stages/stages/src/stages/headers.rs | 17 +++-- .../stages/src/stages/sender_recovery.rs | 3 +- crates/storage/db-api/src/lib.rs | 3 + crates/storage/db-api/src/unwind.rs | 63 ++++++++++++++++++ .../src/providers/database/provider.rs | 65 +------------------ 7 files changed, 82 insertions(+), 71 deletions(-) create mode 100644 crates/storage/db-api/src/unwind.rs diff --git a/Cargo.lock b/Cargo.lock index 5b4ccbd7e37a..6d2c85743b46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8641,7 +8641,6 @@ dependencies = [ "reth-errors", "reth-metrics", "reth-network-p2p", - "reth-node-types", "reth-primitives-traits", "reth-provider", "reth-prune", diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index 8b74b8c5ae02..a5db5b9fb20c 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -23,7 +23,6 @@ reth-prune.workspace = true reth-errors.workspace = true reth-stages-types.workspace = true reth-static-file-types.workspace = true -reth-node-types.workspace = true alloy-primitives.workspace = true diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index e313827c5ae3..e1ebe763c2fb 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -7,6 +7,7 @@ use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, database::Database, transaction::DbTxMut, + DbTxUnwindExt, }; use reth_etl::Collector; use reth_network_p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}; @@ -315,13 +316,17 @@ where // First unwind the db tables, until the unwind_to block number. use the walker to unwind // HeaderNumbers based on the index in CanonicalHeaders // unwind from the next block number since the unwind_to block is exclusive - provider.unwind_table_by_walker::( - (input.unwind_to + 1).., - )?; - provider.unwind_table_by_num::(input.unwind_to)?; - provider.unwind_table_by_num::(input.unwind_to)?; + provider + .tx_ref() + .unwind_table_by_walker::( + (input.unwind_to + 1).., + )?; + provider.tx_ref().unwind_table_by_num::(input.unwind_to)?; + provider + .tx_ref() + .unwind_table_by_num::(input.unwind_to)?; let unfinalized_headers_unwound = - provider.unwind_table_by_num::(input.unwind_to)?; + provider.tx_ref().unwind_table_by_num::(input.unwind_to)?; // determine how many headers to unwind from the static files based on the highest block and // the unwind_to block diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index 50e7316d4c20..75c691634e2a 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -5,6 +5,7 @@ use reth_db_api::{ cursor::DbCursorRW, database::Database, transaction::{DbTx, DbTxMut}, + DbTxUnwindExt, }; use reth_primitives::{Address, GotExpected, StaticFileSegment, TransactionSignedNoHash, TxNumber}; use reth_provider::{ @@ -123,7 +124,7 @@ where .block_body_indices(unwind_to)? .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))? .last_tx_num(); - provider.unwind_table_by_num::(latest_tx_id)?; + provider.tx_ref().unwind_table_by_num::(latest_tx_id)?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) diff --git a/crates/storage/db-api/src/lib.rs b/crates/storage/db-api/src/lib.rs index cd25b3c65fa0..db4cb1000a38 100644 --- a/crates/storage/db-api/src/lib.rs +++ b/crates/storage/db-api/src/lib.rs @@ -81,3 +81,6 @@ mod scale; mod utils; pub use database::Database; + +mod unwind; +pub use unwind::DbTxUnwindExt; diff --git a/crates/storage/db-api/src/unwind.rs b/crates/storage/db-api/src/unwind.rs new file mode 100644 index 000000000000..19d08506f756 --- /dev/null +++ b/crates/storage/db-api/src/unwind.rs @@ -0,0 +1,63 @@ +use crate::{cursor::DbCursorRO, table::Table, transaction::DbTxMut}; +use reth_storage_errors::db::DatabaseError; +use std::ops::RangeBounds; + +/// Extension trait for [`DbTxMut`] that provides unwind functionality. +pub trait DbTxUnwindExt: DbTxMut { + /// Unwind table by some number key. + /// Returns number of rows unwound. + /// + /// Note: Key is not inclusive and specified key would stay in db. + #[inline] + fn unwind_table_by_num(&self, num: u64) -> Result + where + T: Table, + { + self.unwind_table::(num, |key| key) + } + + /// Unwind the table to a provided number key. + /// Returns number of rows unwound. + /// + /// Note: Key is not inclusive and specified key would stay in db. + fn unwind_table(&self, key: u64, mut selector: F) -> Result + where + T: Table, + F: FnMut(T::Key) -> u64, + { + let mut cursor = self.cursor_write::()?; + let mut reverse_walker = cursor.walk_back(None)?; + let mut deleted = 0; + + while let Some(Ok((entry_key, _))) = reverse_walker.next() { + if selector(entry_key.clone()) <= key { + break + } + reverse_walker.delete_current()?; + deleted += 1; + } + + Ok(deleted) + } + + /// Unwind a table forward by a [`Walker`][reth_db_api::cursor::Walker] on another table. + /// + /// Note: Range is inclusive and first key in the range is removed. + fn unwind_table_by_walker( + &self, + range: impl RangeBounds, + ) -> Result<(), DatabaseError> + where + T1: Table, + T2: Table, + { + let mut cursor = self.cursor_write::()?; + let mut walker = cursor.walk_range(range)?; + while let Some((_, value)) = walker.next().transpose()? { + self.delete::(value, None)?; + } + Ok(()) + } +} + +impl DbTxUnwindExt for T where T: DbTxMut {} diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index fa825f597f8f..43a6ae6b8a0c 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -31,7 +31,7 @@ use reth_db_api::{ }, table::Table, transaction::{DbTx, DbTxMut}, - DatabaseError, + DatabaseError, DbTxUnwindExt, }; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; @@ -1355,7 +1355,7 @@ impl DatabaseProvider { return Ok(()) } - self.unwind_table_by_walker::( + self.tx.unwind_table_by_walker::( range.clone(), )?; self.remove::(range.clone())?; @@ -1398,7 +1398,7 @@ impl DatabaseProvider { return Ok(Vec::new()) } - self.unwind_table_by_walker::( + self.tx.unwind_table_by_walker::( range.clone(), )?; let block_header_hashes = self.take::(range.clone())?; @@ -1478,65 +1478,6 @@ impl DatabaseProvider { Ok(blocks) } - /// Unwind table by some number key. - /// Returns number of rows unwound. - /// - /// Note: Key is not inclusive and specified key would stay in db. - #[inline] - pub fn unwind_table_by_num(&self, num: u64) -> Result - where - T: Table, - { - self.unwind_table::(num, |key| key) - } - - /// Unwind the table to a provided number key. - /// Returns number of rows unwound. - /// - /// Note: Key is not inclusive and specified key would stay in db. - pub(crate) fn unwind_table( - &self, - key: u64, - mut selector: F, - ) -> Result - where - T: Table, - F: FnMut(T::Key) -> u64, - { - let mut cursor = self.tx.cursor_write::()?; - let mut reverse_walker = cursor.walk_back(None)?; - let mut deleted = 0; - - while let Some(Ok((entry_key, _))) = reverse_walker.next() { - if selector(entry_key.clone()) <= key { - break - } - reverse_walker.delete_current()?; - deleted += 1; - } - - Ok(deleted) - } - - /// Unwind a table forward by a [`Walker`][reth_db_api::cursor::Walker] on another table. - /// - /// Note: Range is inclusive and first key in the range is removed. - pub fn unwind_table_by_walker( - &self, - range: impl RangeBounds, - ) -> Result<(), DatabaseError> - where - T1: Table, - T2: Table, - { - let mut cursor = self.tx.cursor_write::()?; - let mut walker = cursor.walk_range(range)?; - while let Some((_, value)) = walker.next().transpose()? { - self.tx.delete::(value, None)?; - } - Ok(()) - } - /// Load shard and remove it. If list is empty, last shard was full or /// there are no shards at all. fn take_shard(&self, key: T::Key) -> ProviderResult> From ca15e5fd93af86620bac21e2139eb60c7b69d7c4 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 16 Sep 2024 14:56:31 +0300 Subject: [PATCH 03/10] reth compiles --- Cargo.lock | 2 - .../commands/debug_cmd/in_memory_merkle.rs | 2 +- bin/reth/src/commands/debug_cmd/merkle.rs | 7 +- crates/cli/commands/src/stage/drop.rs | 2 +- .../cli/commands/src/stage/dump/execution.rs | 6 +- .../src/stage/dump/hashing_account.rs | 6 +- .../src/stage/dump/hashing_storage.rs | 6 +- crates/cli/commands/src/stage/dump/merkle.rs | 6 +- crates/cli/commands/src/stage/run.rs | 10 +- crates/consensus/beacon/src/engine/sync.rs | 7 +- crates/engine/tree/Cargo.toml | 1 - crates/engine/tree/src/backfill.rs | 7 +- crates/engine/tree/src/persistence.rs | 8 +- crates/node/builder/src/launch/common.rs | 4 +- crates/stages/api/Cargo.toml | 1 - crates/stages/api/src/pipeline/builder.rs | 5 +- crates/stages/api/src/pipeline/mod.rs | 3 +- crates/stages/stages/src/sets.rs | 8 +- crates/stages/stages/src/stages/bodies.rs | 4 +- crates/stages/stages/src/stages/execution.rs | 18 +- .../stages/src/stages/hashing_storage.rs | 2 +- crates/stages/stages/src/stages/headers.rs | 1 - .../src/stages/index_account_history.rs | 2 +- .../src/stages/index_storage_history.rs | 1 - crates/stages/stages/src/stages/merkle.rs | 5 +- .../stages/src/stages/sender_recovery.rs | 1 - crates/stages/stages/src/stages/tx_lookup.rs | 3 +- crates/stages/stages/src/stages/utils.rs | 4 +- crates/storage/db-common/src/init.rs | 2 +- .../src/providers/database/provider.rs | 435 +++++++++--------- crates/storage/provider/src/traits/state.rs | 7 + crates/storage/provider/src/writer/mod.rs | 11 +- 32 files changed, 285 insertions(+), 302 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6d2c85743b46..666c6e353f7f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6980,7 +6980,6 @@ dependencies = [ "reth-exex-types", "reth-metrics", "reth-network-p2p", - "reth-node-types", "reth-payload-builder", "reth-payload-primitives", "reth-payload-validator", @@ -8637,7 +8636,6 @@ dependencies = [ "futures-util", "metrics", "reth-consensus", - "reth-db-api", "reth-errors", "reth-metrics", "reth-network-p2p", diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 8c6b3f443b29..d46665e48137 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -173,7 +173,7 @@ impl> Command { .try_seal_with_senders() .map_err(|_| BlockValidationError::SenderRecoveryError)?, )?; - let mut storage_writer = UnifiedStorageWriter::from_database(&provider_rw); + let mut storage_writer = UnifiedStorageWriter::from_database(&provider_rw.0); storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; let storages = provider_rw.plain_state_storages(storage_lists)?; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 654d82cffcb9..0eb77ae0ab7b 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -20,8 +20,9 @@ use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockHashOrNumber; use reth_provider::{ - writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, HeaderProvider, - LatestStateProviderRef, OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, + writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, + DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, + ProviderError, ProviderFactory, StateWriter, StaticFileProviderFactory, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ @@ -84,7 +85,7 @@ impl> Command { let Environment { provider_factory, config, data_dir } = self.env.init::(AccessRights::RW)?; - let provider_rw = provider_factory.provider_rw()?; + let provider_rw = provider_factory.database_provider_rw()?; // Configure and build network let network_secret_path = diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index d62a340bfcdc..917903da3744 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -179,7 +179,7 @@ impl> Command { tx.put::(StageId::Finish.to_string(), Default::default())?; - UnifiedStorageWriter::commit_unwind(provider_rw, static_file_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw.0, static_file_provider)?; Ok(()) } diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index dfc320f15d64..c807ac94145d 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -10,7 +10,7 @@ use reth_db_common::DbTool; use reth_evm::{execute::BlockExecutorProvider, noop::NoopBlockExecutorProvider}; use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_provider::{providers::StaticFileProvider, ProviderFactory}; +use reth_provider::{providers::StaticFileProvider, DatabaseProviderFactory, ProviderFactory}; use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; @@ -135,7 +135,7 @@ fn unwind_and_copy>( tip_block_number: u64, output_db: &DatabaseEnv, ) -> eyre::Result<()> { - let provider = db_tool.provider_factory.provider_rw()?; + let provider = db_tool.provider_factory.database_provider_rw()?; let mut exec_stage = ExecutionStage::new_with_executor(NoopBlockExecutorProvider::default()); @@ -175,7 +175,7 @@ where let input = reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)) }; - exec_stage.execute(&output_provider_factory.provider_rw()?, input)?; + exec_stage.execute(&output_provider_factory.database_provider_rw()?, input)?; info!(target: "reth::cli", "Success"); diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 1d96de778d30..94d8129e0382 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -9,7 +9,7 @@ use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_provider::{providers::StaticFileProvider, ProviderFactory}; +use reth_provider::{providers::StaticFileProvider, DatabaseProviderFactory, ProviderFactory}; use reth_stages::{stages::AccountHashingStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; @@ -55,7 +55,7 @@ fn unwind_and_copy>( tip_block_number: u64, output_db: &DatabaseEnv, ) -> eyre::Result<()> { - let provider = db_tool.provider_factory.provider_rw()?; + let provider = db_tool.provider_factory.database_provider_rw()?; let mut exec_stage = AccountHashingStage::default(); exec_stage.unwind( @@ -81,7 +81,7 @@ fn dry_run>( ) -> eyre::Result<()> { info!(target: "reth::cli", "Executing stage."); - let provider = output_provider_factory.provider_rw()?; + let provider = output_provider_factory.database_provider_rw()?; let mut stage = AccountHashingStage { clean_threshold: 1, // Forces hashing from scratch ..Default::default() diff --git a/crates/cli/commands/src/stage/dump/hashing_storage.rs b/crates/cli/commands/src/stage/dump/hashing_storage.rs index 57f0ed53ac5a..16a90eeedcb3 100644 --- a/crates/cli/commands/src/stage/dump/hashing_storage.rs +++ b/crates/cli/commands/src/stage/dump/hashing_storage.rs @@ -8,7 +8,7 @@ use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_provider::{providers::StaticFileProvider, ProviderFactory}; +use reth_provider::{providers::StaticFileProvider, DatabaseProviderFactory, ProviderFactory}; use reth_stages::{stages::StorageHashingStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; @@ -45,7 +45,7 @@ fn unwind_and_copy>( tip_block_number: u64, output_db: &DatabaseEnv, ) -> eyre::Result<()> { - let provider = db_tool.provider_factory.provider_rw()?; + let provider = db_tool.provider_factory.database_provider_rw()?; let mut exec_stage = StorageHashingStage::default(); @@ -76,7 +76,7 @@ fn dry_run>( ) -> eyre::Result<()> { info!(target: "reth::cli", "Executing stage."); - let provider = output_provider_factory.provider_rw()?; + let provider = output_provider_factory.database_provider_rw()?; let mut stage = StorageHashingStage { clean_threshold: 1, // Forces hashing from scratch ..Default::default() diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index bcd05ca9477b..4b3d9c30331e 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -12,7 +12,7 @@ use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_provider::{providers::StaticFileProvider, ProviderFactory}; +use reth_provider::{providers::StaticFileProvider, DatabaseProviderFactory, ProviderFactory}; use reth_prune::PruneModes; use reth_stages::{ stages::{ @@ -73,7 +73,7 @@ fn unwind_and_copy>( output_db: &DatabaseEnv, ) -> eyre::Result<()> { let (from, to) = range; - let provider = db_tool.provider_factory.provider_rw()?; + let provider = db_tool.provider_factory.database_provider_rw()?; let unwind = UnwindInput { unwind_to: from, @@ -150,7 +150,7 @@ fn dry_run>( from: u64, ) -> eyre::Result<()> { info!(target: "reth::cli", "Executing stage."); - let provider = output_provider_factory.provider_rw()?; + let provider = output_provider_factory.database_provider_rw()?; let mut stage = MerkleStage::Execution { // Forces updating the root instead of calculating from scratch diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index b80c2724c4df..1d2ca1d54500 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -34,8 +34,8 @@ use reth_node_metrics::{ version::VersionInfo, }; use reth_provider::{ - writer::UnifiedStorageWriter, ChainSpecProvider, StageCheckpointReader, StageCheckpointWriter, - StaticFileProviderFactory, + writer::UnifiedStorageWriter, ChainSpecProvider, DatabaseProviderFactory, + StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, }; use reth_stages::{ stages::{ @@ -117,7 +117,7 @@ impl> Command { let Environment { provider_factory, config, data_dir } = self.env.init::(AccessRights::RW)?; - let mut provider_rw = provider_factory.provider_rw()?; + let mut provider_rw = provider_factory.database_provider_rw()?; if let Some(listen_addr) = self.metrics { info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr); @@ -333,7 +333,7 @@ impl> Command { provider_rw, provider_factory.static_file_provider(), )?; - provider_rw = provider_factory.provider_rw()?; + provider_rw = provider_factory.database_provider_rw()?; } } } @@ -356,7 +356,7 @@ impl> Command { } if self.commit { UnifiedStorageWriter::commit(provider_rw, provider_factory.static_file_provider())?; - provider_rw = provider_factory.provider_rw()?; + provider_rw = provider_factory.database_provider_rw()?; } if done { diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 1a5bca49de33..bf80986e99e4 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -10,7 +10,6 @@ use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, BlockClient, }; -use reth_node_types::NodeTypesWithDB; use reth_primitives::SealedBlock; use reth_provider::providers::ProviderNodeTypes; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; @@ -34,7 +33,7 @@ use tracing::trace; /// database while the pipeline is still active. pub(crate) struct EngineSyncController where - N: NodeTypesWithDB, + N: ProviderNodeTypes, Client: BlockClient, { /// A downloader that can download full blocks from the network. @@ -394,14 +393,14 @@ pub(crate) enum EngineSyncEvent { /// running, it acquires the write lock over the database. This means that we cannot forward to the /// blockchain tree any messages that would result in database writes, since it would result in a /// deadlock. -enum PipelineState { +enum PipelineState { /// Pipeline is idle. Idle(Option>), /// Pipeline is running and waiting for a response Running(oneshot::Receiver>), } -impl PipelineState { +impl PipelineState { /// Returns `true` if the state matches idle. const fn is_idle(&self) -> bool { matches!(self, Self::Idle(_)) diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 2c8d1922a039..35d0dddc2488 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -31,7 +31,6 @@ reth-revm.workspace = true reth-rpc-types.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true -reth-node-types.workspace = true reth-trie.workspace = true reth-trie-parallel.workspace = true diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs index 9aee45f83b47..4ae244ed2edf 100644 --- a/crates/engine/tree/src/backfill.rs +++ b/crates/engine/tree/src/backfill.rs @@ -8,7 +8,6 @@ //! These modes are mutually exclusive and the node can only be in one mode at a time. use futures::FutureExt; -use reth_node_types::NodeTypesWithDB; use reth_provider::providers::ProviderNodeTypes; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; use reth_tasks::TaskSpawner; @@ -79,7 +78,7 @@ pub enum BackfillEvent { /// Pipeline sync. #[derive(Debug)] -pub struct PipelineSync { +pub struct PipelineSync { /// The type that can spawn the pipeline task. pipeline_task_spawner: Box, /// The current state of the pipeline. @@ -213,14 +212,14 @@ impl BackfillSync for PipelineSync { /// blockchain tree any messages that would result in database writes, since it would result in a /// deadlock. #[derive(Debug)] -enum PipelineState { +enum PipelineState { /// Pipeline is idle. Idle(Option>), /// Pipeline is running and waiting for a response Running(oneshot::Receiver>), } -impl PipelineState { +impl PipelineState { /// Returns `true` if the state matches idle. const fn is_idle(&self) -> bool { matches!(self, Self::Idle(_)) diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 1a116b781372..74d2c297ccd2 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -3,8 +3,8 @@ use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; use reth_primitives::BlockNumHash; use reth_provider::{ - providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, ProviderFactory, - StaticFileProviderFactory, + providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, + DatabaseProviderFactory, ProviderFactory, StaticFileProviderFactory, }; use reth_prune::{PrunerError, PrunerOutput, PrunerWithFactory}; use reth_stages_api::{MetricEvent, MetricEventsSender}; @@ -103,7 +103,7 @@ impl PersistenceService { ) -> Result, PersistenceError> { debug!(target: "engine::persistence", ?new_tip_num, "Removing blocks"); let start_time = Instant::now(); - let provider_rw = self.provider.provider_rw()?; + let provider_rw = self.provider.database_provider_rw()?; let sf_provider = self.provider.static_file_provider(); let new_tip_hash = provider_rw.block_hash(new_tip_num)?; @@ -126,7 +126,7 @@ impl PersistenceService { .map(|block| BlockNumHash { hash: block.block().hash(), number: block.block().number }); if last_block_hash_num.is_some() { - let provider_rw = self.provider.provider_rw()?; + let provider_rw = self.provider.database_provider_rw()?; let static_file_provider = self.provider.static_file_provider(); UnifiedStorageWriter::from(&provider_rw, &static_file_provider).save_blocks(&blocks)?; diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 1fc0d8cae705..04494e5d0920 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -48,7 +48,7 @@ use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_api::clients::EthApiClient; use reth_rpc_builder::config::RethRpcServerConfig; use reth_rpc_layer::JwtSecret; -use reth_stages::{sets::DefaultStages, MetricEvent, Pipeline, PipelineTarget, StageId}; +use reth_stages::{sets::DefaultStages, MetricEvent, PipelineBuilder, PipelineTarget, StageId}; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, error, info, warn}; @@ -422,7 +422,7 @@ where let (_tip_tx, tip_rx) = watch::channel(B256::ZERO); // Builds an unwind-only pipeline - let pipeline = Pipeline::::builder() + let pipeline = PipelineBuilder::default() .add_stages(DefaultStages::new( factory.clone(), tip_rx, diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index a5db5b9fb20c..352d3e024765 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -14,7 +14,6 @@ workspace = true # reth reth-primitives-traits.workspace = true reth-provider.workspace = true -reth-db-api.workspace = true reth-static-file.workspace = true reth-network-p2p.workspace = true reth-tokio-util.workspace = true diff --git a/crates/stages/api/src/pipeline/builder.rs b/crates/stages/api/src/pipeline/builder.rs index 6b0dc0e93228..68ca887fe790 100644 --- a/crates/stages/api/src/pipeline/builder.rs +++ b/crates/stages/api/src/pipeline/builder.rs @@ -1,6 +1,5 @@ use crate::{pipeline::BoxedStage, MetricEventsSender, Pipeline, Stage, StageId, StageSet}; use alloy_primitives::{BlockNumber, B256}; -use reth_db_api::database::Database; use reth_provider::{providers::ProviderNodeTypes, DatabaseProviderFactory, ProviderFactory}; use reth_static_file::StaticFileProducer; use tokio::sync::watch; @@ -85,13 +84,13 @@ impl PipelineBuilder { } } -impl Default for PipelineBuilder { +impl Default for PipelineBuilder { fn default() -> Self { Self { stages: Vec::new(), max_block: None, tip_tx: None, metrics_tx: None } } } -impl std::fmt::Debug for PipelineBuilder { +impl std::fmt::Debug for PipelineBuilder { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("PipelineBuilder") .field("stages", &self.stages.iter().map(|stage| stage.id()).collect::>()) diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 73f6b134d2ed..928c43fb62f2 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -82,7 +82,8 @@ pub struct Pipeline { impl Pipeline { /// Construct a pipeline using a [`PipelineBuilder`]. - pub fn builder() -> PipelineBuilder { + pub fn builder() -> PipelineBuilder< as DatabaseProviderFactory>::ProviderRW> + { PipelineBuilder::default() } diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 62fb13f86232..f45ee3b78d62 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -43,16 +43,10 @@ use crate::{ }; use reth_config::config::StageConfig; use reth_consensus::Consensus; -use reth_db::transaction::DbTxMut; -use reth_db_api::database::Database; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader}; use reth_primitives::B256; -use reth_provider::{ - AccountExtReader, BlockReader, DBProvider, HashingWriter, HeaderProvider, - HeaderSyncGapProvider, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter, - StaticFileProviderFactory, StatsReader, StorageReader, TrieWriter, -}; +use reth_provider::HeaderSyncGapProvider; use reth_prune_types::PruneModes; use reth_stages_api::Stage; use std::{ops::Not, sync::Arc}; diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 08a834a609f7..4152df807547 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -9,7 +9,6 @@ use tracing::*; use reth_db::tables; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, - database::Database, models::{StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals}, transaction::DbTxMut, }; @@ -17,8 +16,7 @@ use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockRespon use reth_primitives::{StaticFileSegment, TxNumber}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DBProvider, DatabaseProviderRW, HeaderProvider, ProviderError, - StaticFileProviderFactory, StatsReader, + BlockReader, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, }; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 44c6b3fc5fdd..8365b836a2d4 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -2,7 +2,7 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; use num_traits::Zero; use reth_config::config::ExecutionConfig; use reth_db::{static_file::HeaderMask, tables}; -use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; +use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_evm::{ execute::{BatchExecutor, BlockExecutorProvider}, metrics::ExecutorMetrics, @@ -15,7 +15,7 @@ use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, writer::UnifiedStorageWriter, BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderError, StateWriter, StaticFileProviderFactory, StatsReader, + ProviderError, StateChangeWriter, StateWriter, StaticFileProviderFactory, StatsReader, TransactionVariant, }; use reth_prune_types::PruneModes; @@ -173,7 +173,8 @@ impl ExecutionStage { impl Stage for ExecutionStage where E: BlockExecutorProvider, - Provider: DBProvider + BlockReader + StaticFileProviderFactory + StatsReader, + Provider: + DBProvider + BlockReader + StaticFileProviderFactory + StatsReader + StateChangeWriter, for<'a> UnifiedStorageWriter<'a, Provider, StaticFileProviderRWRefMut<'a>>: StateWriter, { /// Return the id of the stage @@ -357,7 +358,7 @@ where let time = Instant::now(); // write output - let mut writer = UnifiedStorageWriter::new(&provider, static_file_producer); + let mut writer = UnifiedStorageWriter::new(provider, static_file_producer); writer.write_to_storage(state, OriginalValuesKnown::Yes)?; let db_write_duration = time.elapsed(); @@ -428,6 +429,8 @@ where } } + let static_file_provider = provider.static_file_provider(); + // Unwind all receipts for transactions in the block range if self.prune_modes.receipts.is_none() && self.prune_modes.receipts_log_filter.is_empty() { // We only use static files for Receipts, if there is no receipt pruning of any kind. @@ -435,11 +438,8 @@ where // prepare_static_file_producer does a consistency check that will unwind static files // if the expected highest receipt in the files is higher than the database. // Which is essentially what happens here when we unwind this stage. - let _static_file_producer = prepare_static_file_producer( - provider, - &provider.static_file_provider(), - *range.start(), - )?; + let _static_file_producer = + prepare_static_file_producer(provider, &static_file_provider, *range.start())?; } else { // If there is any kind of receipt pruning/filtering we use the database, since static // files do not support filters. diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index 747745164533..0d28211288ff 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -9,7 +9,7 @@ use reth_db_api::{ }; use reth_etl::Collector; use reth_primitives::{keccak256, BufMut, StorageEntry, B256}; -use reth_provider::{DBProvider, DatabaseProviderRW, HashingWriter, StatsReader, StorageReader}; +use reth_provider::{DBProvider, HashingWriter, StatsReader, StorageReader}; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, StorageHashingCheckpoint, UnwindInput, UnwindOutput, diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index e1ebe763c2fb..e04c08604d2f 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -5,7 +5,6 @@ use reth_consensus::Consensus; use reth_db::{tables, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, - database::Database, transaction::DbTxMut, DbTxUnwindExt, }; diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index a01cf7caaf7d..55e504ac34cf 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -1,7 +1,7 @@ use super::{collect_history_indices, load_history_indices}; use reth_config::config::{EtlConfig, IndexHistoryConfig}; use reth_db::tables; -use reth_db_api::{database::Database, models::ShardedKey, table::Decode, transaction::DbTxMut}; +use reth_db_api::{models::ShardedKey, table::Decode, transaction::DbTxMut}; use reth_primitives::Address; use reth_provider::{DBProvider, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter}; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment}; diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index 12a0fca58d03..b26ef1826e05 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -3,7 +3,6 @@ use crate::{StageCheckpoint, StageId}; use reth_config::config::{EtlConfig, IndexHistoryConfig}; use reth_db::tables; use reth_db_api::{ - database::Database, models::{storage_sharded_key::StorageShardedKey, AddressStorageKey, BlockNumberAddress}, table::Decode, transaction::DbTxMut, diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index c26b9961a548..39dc493acd92 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -1,10 +1,7 @@ use reth_codecs::Compact; use reth_consensus::ConsensusError; use reth_db::tables; -use reth_db_api::{ - database::Database, - transaction::{DbTx, DbTxMut}, -}; +use reth_db_api::transaction::{DbTx, DbTxMut}; use reth_primitives::{BlockNumber, GotExpected, SealedHeader, B256}; use reth_provider::{ DBProvider, HeaderProvider, ProviderError, StageCheckpointReader, StageCheckpointWriter, diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index 75c691634e2a..855c0bc52661 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -3,7 +3,6 @@ use reth_consensus::ConsensusError; use reth_db::{static_file::TransactionMask, tables, RawValue}; use reth_db_api::{ cursor::DbCursorRW, - database::Database, transaction::{DbTx, DbTxMut}, DbTxUnwindExt, }; diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 6d729f9d6d15..6f395af73dee 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -3,13 +3,12 @@ use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db::{tables, RawKey, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, - database::Database, transaction::{DbTx, DbTxMut}, }; use reth_etl::Collector; use reth_primitives::{TxHash, TxNumber}; use reth_provider::{ - BlockReader, DBProvider, DatabaseProviderRW, PruneCheckpointReader, PruneCheckpointWriter, + BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, StatsReader, TransactionsProvider, TransactionsProviderExt, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment}; diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 2a30457a915e..cb9c729aa234 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -1,6 +1,6 @@ //! Utils for `stages`. use reth_config::config::EtlConfig; -use reth_db::{BlockNumberList, Database}; +use reth_db::BlockNumberList; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, models::sharded_key::NUM_OF_INDICES_IN_SHARD, @@ -10,7 +10,7 @@ use reth_db_api::{ }; use reth_etl::Collector; use reth_primitives::BlockNumber; -use reth_provider::{DBProvider, DatabaseProviderRW}; +use reth_provider::DBProvider; use reth_stages_api::StageError; use std::{collections::HashMap, hash::Hash, ops::RangeBounds}; use tracing::info; diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 2fa2026b849c..9c068aa4b82d 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -124,7 +124,7 @@ pub fn init_genesis>( // `commit_unwind`` will first commit the DB and then the static file provider, which is // necessary on `init_genesis`. - UnifiedStorageWriter::commit_unwind(provider_rw, static_file_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw.0, static_file_provider)?; Ok(hash) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 43a6ae6b8a0c..e00ca9e09ec8 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -12,8 +12,8 @@ use crate::{ HistoricalStateProvider, HistoryWriter, LatestStateProvider, OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RequestsProvider, RevertsInit, StageCheckpointReader, StateChangeWriter, StateProviderBox, StateReader, StateWriter, - StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, TrieWriter, WithdrawalsProvider, + StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, + TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; use itertools::{izip, Itertools}; use rayon::slice::ParallelSliceMut; @@ -45,7 +45,7 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{TryIntoHistoricalStateProvider}; +use reth_storage_api::TryIntoHistoricalStateProvider; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, @@ -131,17 +131,19 @@ pub struct DatabaseProvider { } impl DatabaseProvider { - /// Returns a static file provider - pub const fn static_file_provider(&self) -> &StaticFileProvider { - &self.static_file_provider - } - /// Returns reference to prune modes. pub const fn prune_modes_ref(&self) -> &PruneModes { &self.prune_modes } } +impl StaticFileProviderFactory for DatabaseProvider { + /// Returns a static file provider + fn static_file_provider(&self) -> StaticFileProvider { + self.static_file_provider.clone() + } +} + impl DatabaseProvider { /// Creates a provider with an inner read-write transaction. pub const fn new_rw( @@ -952,216 +954,6 @@ impl DatabaseProvider { Ok(self.tx.commit()?) } - /// Remove the last N blocks of state. - /// - /// The latest state will be unwound - /// - /// 1. Iterate over the [`BlockBodyIndices`][tables::BlockBodyIndices] table to get all the - /// transaction ids. - /// 2. Iterate over the [`StorageChangeSets`][tables::StorageChangeSets] table and the - /// [`AccountChangeSets`][tables::AccountChangeSets] tables in reverse order to reconstruct - /// the changesets. - /// - In order to have both the old and new values in the changesets, we also access the - /// plain state tables. - /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, - /// we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the plain state - /// 3. Save the old value to the local state - /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we - /// have seen before we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the local state - /// 3. Set the local state to the value in the changeset - pub fn remove_state(&self, range: RangeInclusive) -> ProviderResult<()> { - if range.is_empty() { - return Ok(()) - } - - // We are not removing block meta as it is used to get block changesets. - let block_bodies = self.get::(range.clone())?; - - // get transaction receipts - let from_transaction_num = - block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); - let to_transaction_num = - block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); - - let storage_range = BlockNumberAddress::range(range.clone()); - - let storage_changeset = self.take::(storage_range)?; - let account_changeset = self.take::(range)?; - - // This is not working for blocks that are not at tip. as plain state is not the last - // state of end range. We should rename the functions or add support to access - // History state. Accessing history state can be tricky but we are not gaining - // anything. - let mut plain_accounts_cursor = self.tx.cursor_write::()?; - let mut plain_storage_cursor = self.tx.cursor_dup_write::()?; - - let (state, _) = self.populate_bundle_state( - account_changeset, - storage_changeset, - &mut plain_accounts_cursor, - &mut plain_storage_cursor, - )?; - - // iterate over local plain state remove all account and all storages. - for (address, (old_account, new_account, storage)) in &state { - // revert account if needed. - if old_account != new_account { - let existing_entry = plain_accounts_cursor.seek_exact(*address)?; - if let Some(account) = old_account { - plain_accounts_cursor.upsert(*address, *account)?; - } else if existing_entry.is_some() { - plain_accounts_cursor.delete_current()?; - } - } - - // revert storages - for (storage_key, (old_storage_value, _new_storage_value)) in storage { - let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; - // delete previous value - // TODO: This does not use dupsort features - if plain_storage_cursor - .seek_by_key_subkey(*address, *storage_key)? - .filter(|s| s.key == *storage_key) - .is_some() - { - plain_storage_cursor.delete_current()? - } - - // insert value if needed - if !old_storage_value.is_zero() { - plain_storage_cursor.upsert(*address, storage_entry)?; - } - } - } - - // iterate over block body and remove receipts - self.remove::(from_transaction_num..=to_transaction_num)?; - - Ok(()) - } - - /// Take the last N blocks of state, recreating the [`ExecutionOutcome`]. - /// - /// The latest state will be unwound and returned back with all the blocks - /// - /// 1. Iterate over the [`BlockBodyIndices`][tables::BlockBodyIndices] table to get all the - /// transaction ids. - /// 2. Iterate over the [`StorageChangeSets`][tables::StorageChangeSets] table and the - /// [`AccountChangeSets`][tables::AccountChangeSets] tables in reverse order to reconstruct - /// the changesets. - /// - In order to have both the old and new values in the changesets, we also access the - /// plain state tables. - /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, - /// we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the plain state - /// 3. Save the old value to the local state - /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we - /// have seen before we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the local state - /// 3. Set the local state to the value in the changeset - pub fn take_state( - &self, - range: RangeInclusive, - ) -> ProviderResult { - if range.is_empty() { - return Ok(ExecutionOutcome::default()) - } - let start_block_number = *range.start(); - - // We are not removing block meta as it is used to get block changesets. - let block_bodies = self.get::(range.clone())?; - - // get transaction receipts - let from_transaction_num = - block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); - let to_transaction_num = - block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); - - let storage_range = BlockNumberAddress::range(range.clone()); - - let storage_changeset = self.take::(storage_range)?; - let account_changeset = self.take::(range)?; - - // This is not working for blocks that are not at tip. as plain state is not the last - // state of end range. We should rename the functions or add support to access - // History state. Accessing history state can be tricky but we are not gaining - // anything. - let mut plain_accounts_cursor = self.tx.cursor_write::()?; - let mut plain_storage_cursor = self.tx.cursor_dup_write::()?; - - // populate bundle state and reverts from changesets / state cursors, to iterate over, - // remove, and return later - let (state, reverts) = self.populate_bundle_state( - account_changeset, - storage_changeset, - &mut plain_accounts_cursor, - &mut plain_storage_cursor, - )?; - - // iterate over local plain state remove all account and all storages. - for (address, (old_account, new_account, storage)) in &state { - // revert account if needed. - if old_account != new_account { - let existing_entry = plain_accounts_cursor.seek_exact(*address)?; - if let Some(account) = old_account { - plain_accounts_cursor.upsert(*address, *account)?; - } else if existing_entry.is_some() { - plain_accounts_cursor.delete_current()?; - } - } - - // revert storages - for (storage_key, (old_storage_value, _new_storage_value)) in storage { - let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; - // delete previous value - // TODO: This does not use dupsort features - if plain_storage_cursor - .seek_by_key_subkey(*address, *storage_key)? - .filter(|s| s.key == *storage_key) - .is_some() - { - plain_storage_cursor.delete_current()? - } - - // insert value if needed - if !old_storage_value.is_zero() { - plain_storage_cursor.upsert(*address, storage_entry)?; - } - } - } - - // iterate over block body and create ExecutionResult - let mut receipt_iter = - self.take::(from_transaction_num..=to_transaction_num)?.into_iter(); - - let mut receipts = Vec::new(); - // loop break if we are at the end of the blocks. - for (_, block_body) in block_bodies { - let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - if let Some((_, receipt)) = receipt_iter.next() { - block_receipts.push(Some(receipt)); - } - } - receipts.push(block_receipts); - } - - Ok(ExecutionOutcome::new_init( - state, - reverts, - Vec::new(), - receipts.into(), - start_block_number, - Vec::new(), - )) - } - /// Remove list of entries from the table. Returns the number of entries removed. #[inline] pub fn remove( @@ -2648,6 +2440,213 @@ impl StateChangeWriter for DatabaseProvider { Ok(()) } + + /// Remove the last N blocks of state. + /// + /// The latest state will be unwound + /// + /// 1. Iterate over the [`BlockBodyIndices`][tables::BlockBodyIndices] table to get all the + /// transaction ids. + /// 2. Iterate over the [`StorageChangeSets`][tables::StorageChangeSets] table and the + /// [`AccountChangeSets`][tables::AccountChangeSets] tables in reverse order to reconstruct + /// the changesets. + /// - In order to have both the old and new values in the changesets, we also access the + /// plain state tables. + /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, + /// we: + /// 1. Take the old value from the changeset + /// 2. Take the new value from the plain state + /// 3. Save the old value to the local state + /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we + /// have seen before we: + /// 1. Take the old value from the changeset + /// 2. Take the new value from the local state + /// 3. Set the local state to the value in the changeset + fn remove_state(&self, range: RangeInclusive) -> ProviderResult<()> { + if range.is_empty() { + return Ok(()) + } + + // We are not removing block meta as it is used to get block changesets. + let block_bodies = self.get::(range.clone())?; + + // get transaction receipts + let from_transaction_num = + block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); + let to_transaction_num = + block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); + + let storage_range = BlockNumberAddress::range(range.clone()); + + let storage_changeset = self.take::(storage_range)?; + let account_changeset = self.take::(range)?; + + // This is not working for blocks that are not at tip. as plain state is not the last + // state of end range. We should rename the functions or add support to access + // History state. Accessing history state can be tricky but we are not gaining + // anything. + let mut plain_accounts_cursor = self.tx.cursor_write::()?; + let mut plain_storage_cursor = self.tx.cursor_dup_write::()?; + + let (state, _) = self.populate_bundle_state( + account_changeset, + storage_changeset, + &mut plain_accounts_cursor, + &mut plain_storage_cursor, + )?; + + // iterate over local plain state remove all account and all storages. + for (address, (old_account, new_account, storage)) in &state { + // revert account if needed. + if old_account != new_account { + let existing_entry = plain_accounts_cursor.seek_exact(*address)?; + if let Some(account) = old_account { + plain_accounts_cursor.upsert(*address, *account)?; + } else if existing_entry.is_some() { + plain_accounts_cursor.delete_current()?; + } + } + + // revert storages + for (storage_key, (old_storage_value, _new_storage_value)) in storage { + let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; + // delete previous value + // TODO: This does not use dupsort features + if plain_storage_cursor + .seek_by_key_subkey(*address, *storage_key)? + .filter(|s| s.key == *storage_key) + .is_some() + { + plain_storage_cursor.delete_current()? + } + + // insert value if needed + if !old_storage_value.is_zero() { + plain_storage_cursor.upsert(*address, storage_entry)?; + } + } + } + + // iterate over block body and remove receipts + self.remove::(from_transaction_num..=to_transaction_num)?; + + Ok(()) + } + + /// Take the last N blocks of state, recreating the [`ExecutionOutcome`]. + /// + /// The latest state will be unwound and returned back with all the blocks + /// + /// 1. Iterate over the [`BlockBodyIndices`][tables::BlockBodyIndices] table to get all the + /// transaction ids. + /// 2. Iterate over the [`StorageChangeSets`][tables::StorageChangeSets] table and the + /// [`AccountChangeSets`][tables::AccountChangeSets] tables in reverse order to reconstruct + /// the changesets. + /// - In order to have both the old and new values in the changesets, we also access the + /// plain state tables. + /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, + /// we: + /// 1. Take the old value from the changeset + /// 2. Take the new value from the plain state + /// 3. Save the old value to the local state + /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we + /// have seen before we: + /// 1. Take the old value from the changeset + /// 2. Take the new value from the local state + /// 3. Set the local state to the value in the changeset + fn take_state(&self, range: RangeInclusive) -> ProviderResult { + if range.is_empty() { + return Ok(ExecutionOutcome::default()) + } + let start_block_number = *range.start(); + + // We are not removing block meta as it is used to get block changesets. + let block_bodies = self.get::(range.clone())?; + + // get transaction receipts + let from_transaction_num = + block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); + let to_transaction_num = + block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); + + let storage_range = BlockNumberAddress::range(range.clone()); + + let storage_changeset = self.take::(storage_range)?; + let account_changeset = self.take::(range)?; + + // This is not working for blocks that are not at tip. as plain state is not the last + // state of end range. We should rename the functions or add support to access + // History state. Accessing history state can be tricky but we are not gaining + // anything. + let mut plain_accounts_cursor = self.tx.cursor_write::()?; + let mut plain_storage_cursor = self.tx.cursor_dup_write::()?; + + // populate bundle state and reverts from changesets / state cursors, to iterate over, + // remove, and return later + let (state, reverts) = self.populate_bundle_state( + account_changeset, + storage_changeset, + &mut plain_accounts_cursor, + &mut plain_storage_cursor, + )?; + + // iterate over local plain state remove all account and all storages. + for (address, (old_account, new_account, storage)) in &state { + // revert account if needed. + if old_account != new_account { + let existing_entry = plain_accounts_cursor.seek_exact(*address)?; + if let Some(account) = old_account { + plain_accounts_cursor.upsert(*address, *account)?; + } else if existing_entry.is_some() { + plain_accounts_cursor.delete_current()?; + } + } + + // revert storages + for (storage_key, (old_storage_value, _new_storage_value)) in storage { + let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; + // delete previous value + // TODO: This does not use dupsort features + if plain_storage_cursor + .seek_by_key_subkey(*address, *storage_key)? + .filter(|s| s.key == *storage_key) + .is_some() + { + plain_storage_cursor.delete_current()? + } + + // insert value if needed + if !old_storage_value.is_zero() { + plain_storage_cursor.upsert(*address, storage_entry)?; + } + } + } + + // iterate over block body and create ExecutionResult + let mut receipt_iter = + self.take::(from_transaction_num..=to_transaction_num)?.into_iter(); + + let mut receipts = Vec::new(); + // loop break if we are at the end of the blocks. + for (_, block_body) in block_bodies { + let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); + for _ in block_body.tx_num_range() { + if let Some((_, receipt)) = receipt_iter.next() { + block_receipts.push(Some(receipt)); + } + } + receipts.push(block_receipts); + } + + Ok(ExecutionOutcome::new_init( + state, + reverts, + Vec::new(), + receipts.into(), + start_block_number, + Vec::new(), + )) + } } impl TrieWriter for DatabaseProvider { diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 8c68c2acdfe8..2f565a64d7cb 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -6,6 +6,7 @@ use revm::db::{ states::{PlainStateReverts, StateChangeset}, OriginalValuesKnown, }; +use std::ops::RangeInclusive; /// A helper trait for [`ExecutionOutcome`] to write state and receipts to storage. pub trait StateWriter { @@ -34,4 +35,10 @@ pub trait StateChangeWriter { /// Writes the hashed state changes to the database fn write_hashed_state(&self, hashed_state: &HashedPostStateSorted) -> ProviderResult<()>; + + /// Remove the block range of state. + fn remove_state(&self, range: RangeInclusive) -> ProviderResult<()>; + + /// Take the block range of state, recreating the [`ExecutionOutcome`]. + fn take_state(&self, range: RangeInclusive) -> ProviderResult; } diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 1af542382744..247c2d86b7d6 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -47,11 +47,8 @@ impl<'a, ProviderDB, ProviderSF> UnifiedStorageWriter<'a, ProviderDB, ProviderSF /// # Parameters /// - `database`: An optional reference to a database provider. /// - `static_file`: An optional mutable reference to a static file instance. - pub fn new

(database: &'a P, static_file: Option) -> Self - where - P: AsRef, - { - Self { database: database.as_ref(), static_file } + pub fn new(database: &'a ProviderDB, static_file: Option) -> Self { + Self { database, static_file } } /// Creates a new instance of [`UnifiedStorageWriter`] from a database provider and a static @@ -60,7 +57,7 @@ impl<'a, ProviderDB, ProviderSF> UnifiedStorageWriter<'a, ProviderDB, ProviderSF where P: AsRef, { - Self::new(database, Some(static_file)) + Self::new(database.as_ref(), Some(static_file)) } /// Creates a new instance of [`UnifiedStorageWriter`] from a database provider. @@ -68,7 +65,7 @@ impl<'a, ProviderDB, ProviderSF> UnifiedStorageWriter<'a, ProviderDB, ProviderSF where P: AsRef, { - Self::new(database, None) + Self::new(database.as_ref(), None) } /// Returns a reference to the database writer. From 4e675a5cd995005b2e494df97af32effe09b9997 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 16 Sep 2024 16:23:41 +0300 Subject: [PATCH 04/10] fix tests --- crates/exex/exex/src/backfill/test_utils.rs | 6 ++-- .../cli/src/commands/import_receipts.rs | 4 +-- crates/stages/api/src/test_utils.rs | 14 ++------- crates/stages/stages/benches/criterion.rs | 9 +++--- crates/stages/stages/benches/setup/mod.rs | 16 ++++++---- crates/stages/stages/src/sets.rs | 1 - crates/stages/stages/src/stages/execution.rs | 29 ++++++++++--------- .../stages/src/stages/hashing_account.rs | 12 +++++--- crates/stages/stages/src/stages/headers.rs | 3 +- .../src/stages/index_account_history.rs | 8 ++--- .../src/stages/index_storage_history.rs | 8 ++--- crates/stages/stages/src/stages/mod.rs | 2 +- crates/stages/stages/src/stages/prune.rs | 4 +-- .../stages/src/stages/sender_recovery.rs | 8 ++--- crates/stages/stages/src/stages/tx_lookup.rs | 6 ++-- crates/stages/stages/src/test_utils/runner.rs | 7 ++--- crates/stages/stages/src/test_utils/set.rs | 5 ++-- .../src/providers/blockchain_provider.rs | 7 +++-- .../provider/src/providers/database/mod.rs | 3 +- .../src/providers/database/provider.rs | 2 +- crates/storage/provider/src/writer/mod.rs | 9 +++--- testing/ef-tests/src/cases/blockchain_test.rs | 11 ++++--- 22 files changed, 88 insertions(+), 86 deletions(-) diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 26f92ea87f09..3dbcb5363158 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -12,7 +12,7 @@ use reth_primitives::{ }; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, - ProviderFactory, + ProviderFactory, StaticFileProviderFactory, }; use reth_revm::database::StateProviderDatabase; use reth_testing_utils::generators::sign_tx_with_key_pair; @@ -63,7 +63,7 @@ where let mut block_execution_output = EthExecutorProvider::ethereum(chain_spec) .executor(StateProviderDatabase::new(LatestStateProviderRef::new( provider.tx_ref(), - provider.static_file_provider().clone(), + provider.static_file_provider(), ))) .execute(BlockExecutionInput { block, total_difficulty: U256::ZERO })?; block_execution_output.state.reverts.sort(); @@ -187,7 +187,7 @@ where let executor = EthExecutorProvider::ethereum(chain_spec).batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new(provider.tx_ref(), provider.static_file_provider().clone()), + LatestStateProviderRef::new(provider.tx_ref(), provider.static_file_provider()), )); let mut execution_outcome = executor.execute_and_verify_batch(vec![ diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index 8074fa92f4d9..fca900f96f5f 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -232,13 +232,13 @@ where static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?; // finally, write the receipts - let mut storage_writer = UnifiedStorageWriter::from(&provider, static_file_producer); + let mut storage_writer = UnifiedStorageWriter::from(&provider.0, static_file_producer); storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; } // as static files works in file ranges, internally it will be committing when creating the // next file range already, so we only need to call explicitly at the end. - UnifiedStorageWriter::commit(provider, static_file_provider)?; + UnifiedStorageWriter::commit(provider.0, static_file_provider)?; Ok(ImportReceiptsResult { total_decoded_receipts, total_filtered_out_dup_txns }) } diff --git a/crates/stages/api/src/test_utils.rs b/crates/stages/api/src/test_utils.rs index 3a7a244dbced..3cd2f4bc4096 100644 --- a/crates/stages/api/src/test_utils.rs +++ b/crates/stages/api/src/test_utils.rs @@ -1,8 +1,6 @@ #![allow(missing_docs)] use crate::{ExecInput, ExecOutput, Stage, StageError, StageId, UnwindInput, UnwindOutput}; -use reth_db_api::database::Database; -use reth_provider::DatabaseProviderRW; use std::collections::VecDeque; /// A test stage that can be used for testing. @@ -49,21 +47,13 @@ impl Stage for TestStage { self.id } - fn execute( - &mut self, - _: &DatabaseProviderRW, - _input: ExecInput, - ) -> Result { + fn execute(&mut self, _: &Provider, _input: ExecInput) -> Result { self.exec_outputs .pop_front() .unwrap_or_else(|| panic!("Test stage {} executed too many times.", self.id)) } - fn unwind( - &mut self, - _: &DatabaseProviderRW, - _input: UnwindInput, - ) -> Result { + fn unwind(&mut self, _: &Provider, _input: UnwindInput) -> Result { self.unwind_outputs .pop_front() .unwrap_or_else(|| panic!("Test stage {} unwound too many times.", self.id)) diff --git a/crates/stages/stages/benches/criterion.rs b/crates/stages/stages/benches/criterion.rs index 0e13437553b5..667caf2ab4a2 100644 --- a/crates/stages/stages/benches/criterion.rs +++ b/crates/stages/stages/benches/criterion.rs @@ -3,16 +3,17 @@ use criterion::{criterion_main, measurement::WallTime, BenchmarkGroup, Criterion #[cfg(not(target_os = "windows"))] use pprof::criterion::{Output, PProfProfiler}; use reth_config::config::{EtlConfig, TransactionLookupConfig}; -use reth_db::{test_utils::TempDatabase, DatabaseEnv}; +use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; use reth_primitives::BlockNumber; +use reth_provider::{DatabaseProvider, DatabaseProviderFactory}; use reth_stages::{ stages::{MerkleStage, SenderRecoveryStage, TransactionLookupStage}, test_utils::TestStageDB, StageCheckpoint, }; use reth_stages_api::{ExecInput, Stage, StageExt, UnwindInput}; -use std::{ops::RangeInclusive, sync::Arc}; +use std::ops::RangeInclusive; use tokio::runtime::Runtime; mod setup; @@ -146,7 +147,7 @@ fn measure_stage( block_interval: RangeInclusive, label: String, ) where - S: Clone + Stage>>, + S: Clone + Stage as Database>::TXMut>>, F: Fn(S, &TestStageDB, StageRange), { let stage_range = ( @@ -170,7 +171,7 @@ fn measure_stage( }, |_| async { let mut stage = stage.clone(); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); stage .execute_ready(input) .await diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index be55207836a2..e0c4a960e179 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -1,12 +1,12 @@ #![allow(unreachable_pub)] use itertools::concat; -use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; +use reth_db::{tables, test_utils::TempDatabase, Database, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, Address, SealedBlock, B256, U256}; -use reth_provider::TrieWriter; +use reth_provider::{DatabaseProvider, DatabaseProviderFactory, TrieWriter}; use reth_stages::{ stages::{AccountHashingStage, StorageHashingStage}, test_utils::{StorageKind, TestStageDB}, @@ -16,7 +16,7 @@ use reth_testing_utils::generators::{ random_eoa_accounts, BlockRangeParams, }; use reth_trie::StateRoot; -use std::{collections::BTreeMap, fs, path::Path, sync::Arc}; +use std::{collections::BTreeMap, fs, path::Path}; use tokio::runtime::Handle; mod constants; @@ -28,7 +28,9 @@ use reth_trie_db::DatabaseStateRoot; pub(crate) type StageRange = (ExecInput, UnwindInput); -pub(crate) fn stage_unwind>>>( +pub(crate) fn stage_unwind< + S: Clone + Stage as Database>::TXMut>>, +>( stage: S, db: &TestStageDB, range: StageRange, @@ -57,7 +59,9 @@ pub(crate) fn stage_unwind>>>( }); } -pub(crate) fn unwind_hashes>>>( +pub(crate) fn unwind_hashes< + S: Clone + Stage as Database>::TXMut>>, +>( stage: S, db: &TestStageDB, range: StageRange, @@ -65,7 +69,7 @@ pub(crate) fn unwind_hashes>>>( let (input, unwind) = range; let mut stage = stage; - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); StorageHashingStage::default().unwind(&provider, unwind).unwrap(); AccountHashingStage::default().unwind(&provider, unwind).unwrap(); diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index f45ee3b78d62..5a527fbc42c2 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -376,7 +376,6 @@ where MerkleStage: Stage, AccountHashingStage: Stage, StorageHashingStage: Stage, - MerkleStage: Stage, { fn builder(self) -> StageSetBuilder { StageSetBuilder::default() diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 8365b836a2d4..a97fd4b76843 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -220,7 +220,7 @@ where let db = StateProviderDatabase(LatestStateProviderRef::new( provider.tx_ref(), - provider.static_file_provider().clone(), + provider.static_file_provider(), )); let mut executor = self.executor_provider.batch_executor(db); executor.set_tip(max_block); @@ -665,8 +665,8 @@ mod tests { StorageEntry, B256, U256, }; use reth_provider::{ - test_utils::create_test_provider_factory, AccountReader, ReceiptProvider, - StaticFileProviderFactory, + test_utils::create_test_provider_factory, AccountReader, DatabaseProviderFactory, + ReceiptProvider, StaticFileProviderFactory, }; use reth_prune_types::{PruneMode, ReceiptsLogPruneConfig}; use reth_stages_api::StageUnitCheckpoint; @@ -853,8 +853,9 @@ mod tests { .commit() .unwrap(); { + let static_file_provider = provider.static_file_provider(); let mut receipts_writer = - provider.static_file_provider().latest_writer(StaticFileSegment::Receipts).unwrap(); + static_file_provider.latest_writer(StaticFileSegment::Receipts).unwrap(); receipts_writer.increment_block(0).unwrap(); receipts_writer.commit().unwrap(); } @@ -894,7 +895,7 @@ mod tests { // Tests node with database and node with static files for mut mode in modes { - let provider = factory.provider_rw().unwrap(); + let provider = factory.database_provider_rw().unwrap(); if let Some(mode) = &mut mode { // Simulating a full node where we write receipts to database @@ -967,7 +968,7 @@ mod tests { "Post changed of a account" ); - let provider = factory.provider_rw().unwrap(); + let provider = factory.database_provider_rw().unwrap(); let mut stage = stage(); stage.prune_modes = mode.unwrap_or_default(); @@ -999,8 +1000,9 @@ mod tests { .commit() .unwrap(); { + let static_file_provider = provider.static_file_provider(); let mut receipts_writer = - provider.static_file_provider().latest_writer(StaticFileSegment::Receipts).unwrap(); + static_file_provider.latest_writer(StaticFileSegment::Receipts).unwrap(); receipts_writer.increment_block(0).unwrap(); receipts_writer.commit().unwrap(); } @@ -1025,7 +1027,7 @@ mod tests { provider.commit().unwrap(); // execute - let mut provider = factory.provider_rw().unwrap(); + let mut provider = factory.database_provider_rw().unwrap(); // If there is a pruning configuration, then it's forced to use the database. // This way we test both cases. @@ -1048,7 +1050,7 @@ mod tests { provider.commit().unwrap(); // Test Unwind - provider = factory.provider_rw().unwrap(); + provider = factory.database_provider_rw().unwrap(); let mut stage = stage(); stage.prune_modes = mode.unwrap_or_default(); @@ -1101,7 +1103,7 @@ mod tests { #[tokio::test] async fn test_selfdestruct() { let test_db = TestStageDB::default(); - let provider = test_db.factory.provider_rw().unwrap(); + let provider = test_db.factory.database_provider_rw().unwrap(); let input = ExecInput { target: Some(1), checkpoint: None }; let mut genesis_rlp = hex!("f901f8f901f3a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0c9ceb8372c88cb461724d8d3d87e8b933f6fc5f679d4841800e662f4428ffd0da056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080830f4240808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); @@ -1116,8 +1118,9 @@ mod tests { .commit() .unwrap(); { + let static_file_provider = provider.static_file_provider(); let mut receipts_writer = - provider.static_file_provider().latest_writer(StaticFileSegment::Receipts).unwrap(); + static_file_provider.latest_writer(StaticFileSegment::Receipts).unwrap(); receipts_writer.increment_block(0).unwrap(); receipts_writer.commit().unwrap(); } @@ -1167,13 +1170,13 @@ mod tests { provider.commit().unwrap(); // execute - let provider = test_db.factory.provider_rw().unwrap(); + let provider = test_db.factory.database_provider_rw().unwrap(); let mut execution_stage = stage(); let _ = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); // assert unwind stage - let provider = test_db.factory.provider_rw().unwrap(); + let provider = test_db.factory.database_provider_rw().unwrap(); assert_eq!(provider.basic_account(destroyed_address), Ok(None), "Account was destroyed"); assert_eq!( diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 99b7a068ab9c..8e7c319a1ba7 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -7,7 +7,10 @@ use reth_db_api::{ }; use reth_etl::Collector; use reth_primitives::{keccak256, Account, B256}; -use reth_provider::{AccountExtReader, DBProvider, HashingWriter, StatsReader}; +use reth_provider::{ + AccountExtReader, DBProvider, DatabaseProvider, HashingWriter, StaticFileProviderFactory, + StatsReader, +}; use reth_stages_api::{ AccountHashingCheckpoint, EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, @@ -57,8 +60,8 @@ impl AccountHashingStage { /// /// Proceeds to go to the `BlockTransitionIndex` end, go back `transitions` and change the /// account state in the `AccountChangeSets` table. - pub fn seed( - provider: &Provider, + pub fn seed( + provider: &DatabaseProvider, opts: SeedOpts, ) -> Result, StageError> { use reth_db_api::models::AccountBeforeTx; @@ -346,6 +349,7 @@ mod tests { use super::*; use crate::test_utils::TestStageDB; use reth_primitives::Address; + use reth_provider::DatabaseProviderFactory; pub(crate) struct AccountHashingTestRunner { pub(crate) db: TestStageDB, @@ -440,7 +444,7 @@ mod tests { type Seed = Vec<(Address, Account)>; fn seed_execution(&mut self, input: ExecInput) -> Result { - let provider = self.db.factory.provider_rw()?; + let provider = self.db.factory.database_provider_rw()?; let res = Ok(AccountHashingStage::seed( &provider, SeedOpts { blocks: 1..=input.target(), accounts: 10, txs: 0..3 }, diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index e04c08604d2f..bcc490d67c1b 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -278,8 +278,7 @@ where // Write the headers and related tables to DB from ETL space let to_be_processed = self.hash_collector.len() as u64; - let last_header_number = - self.write_headers(provider, provider.static_file_provider().clone())?; + let last_header_number = self.write_headers(provider, provider.static_file_provider())?; // Clear ETL collectors self.hash_collector.clear(); diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index 55e504ac34cf..8ca8d173fd85 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -159,7 +159,7 @@ mod tests { transaction::DbTx, }; use reth_primitives::{address, BlockNumber, B256}; - use reth_provider::providers::StaticFileWriter; + use reth_provider::{providers::StaticFileWriter, DatabaseProviderFactory}; use reth_testing_utils::generators::{ self, random_block_range, random_changeset_range, random_contract_account_range, BlockRangeParams, @@ -219,7 +219,7 @@ mod tests { .map(|block_number| StageCheckpoint { block_number, stage_checkpoint: None }), }; let mut stage = IndexAccountHistoryStage::default(); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(run_to), done: true }); provider.commit().unwrap(); @@ -232,7 +232,7 @@ mod tests { ..Default::default() }; let mut stage = IndexAccountHistoryStage::default(); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let out = stage.unwind(&provider, input).unwrap(); assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) }); provider.commit().unwrap(); @@ -479,7 +479,7 @@ mod tests { prune_mode: Some(PruneMode::Before(36)), ..Default::default() }; - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(20000), done: true }); provider.commit().unwrap(); diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index b26ef1826e05..00646da2fd22 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -164,7 +164,7 @@ mod tests { transaction::DbTx, }; use reth_primitives::{address, b256, Address, BlockNumber, StorageEntry, B256, U256}; - use reth_provider::providers::StaticFileWriter; + use reth_provider::{providers::StaticFileWriter, DatabaseProviderFactory}; use reth_testing_utils::generators::{ self, random_block_range, random_changeset_range, random_contract_account_range, BlockRangeParams, @@ -237,7 +237,7 @@ mod tests { .map(|block_number| StageCheckpoint { block_number, stage_checkpoint: None }), }; let mut stage = IndexStorageHistoryStage::default(); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(run_to), done: true }); provider.commit().unwrap(); @@ -250,7 +250,7 @@ mod tests { ..Default::default() }; let mut stage = IndexStorageHistoryStage::default(); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let out = stage.unwind(&provider, input).unwrap(); assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) }); provider.commit().unwrap(); @@ -500,7 +500,7 @@ mod tests { prune_mode: Some(PruneMode::Before(36)), ..Default::default() }; - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(20000), done: true }); provider.commit().unwrap(); diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 694f3e472241..a5d537da42c7 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -144,7 +144,7 @@ mod tests { expect_num_receipts: usize, expect_num_acc_changesets: usize, expect_num_storage_changesets: usize| async move { - let provider = factory.provider_rw().unwrap(); + let provider = factory.database_provider_rw().unwrap(); // Check execution and create receipts and changesets according to the pruning // configuration diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 7e5b8becda21..1995bc000a29 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -51,9 +51,9 @@ where let mut pruner = PrunerBuilder::default() .segments(self.prune_modes.clone()) .delete_limit(self.commit_threshold) - .build::(provider.static_file_provider().clone()); + .build::(provider.static_file_provider()); - let result = pruner.run_with_provider(&provider, input.target())?; + let result = pruner.run_with_provider(provider, input.target())?; if result.progress.is_finished() { Ok(ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true }) } else { diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index 855c0bc52661..cf61a1e31461 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -155,7 +155,7 @@ where }) .unzip(); - let static_file_provider = provider.static_file_provider().clone(); + let static_file_provider = provider.static_file_provider(); // We do not use `tokio::task::spawn_blocking` because, during a shutdown, // there will be a timeout grace period in which Tokio does not allow spawning @@ -339,8 +339,8 @@ mod tests { use reth_db_api::cursor::DbCursorRO; use reth_primitives::{BlockNumber, SealedBlock, TransactionSigned, B256}; use reth_provider::{ - providers::StaticFileWriter, PruneCheckpointWriter, StaticFileProviderFactory, - TransactionsProvider, + providers::StaticFileWriter, DatabaseProviderFactory, PruneCheckpointWriter, + StaticFileProviderFactory, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneMode}; use reth_stages_api::StageUnitCheckpoint; @@ -533,7 +533,7 @@ mod tests { .expect("save stage checkpoint"); provider.commit().expect("commit"); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); assert_eq!( stage_checkpoint(&provider).expect("stage checkpoint"), EntitiesCheckpoint { diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 6f395af73dee..35c0d8637df4 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -252,7 +252,9 @@ mod tests { }; use assert_matches::assert_matches; use reth_primitives::{BlockNumber, SealedBlock, B256}; - use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; + use reth_provider::{ + providers::StaticFileWriter, DatabaseProviderFactory, StaticFileProviderFactory, + }; use reth_stages_api::StageUnitCheckpoint; use reth_testing_utils::generators::{ self, random_block, random_block_range, BlockParams, BlockRangeParams, @@ -406,7 +408,7 @@ mod tests { .expect("save stage checkpoint"); provider.commit().expect("commit"); - let provider = db.factory.provider_rw().unwrap(); + let provider = db.factory.database_provider_rw().unwrap(); assert_eq!( stage_checkpoint(&provider).expect("stage checkpoint"), EntitiesCheckpoint { diff --git a/crates/stages/stages/src/test_utils/runner.rs b/crates/stages/stages/src/test_utils/runner.rs index d0003ff12d90..7b36bb2e8f43 100644 --- a/crates/stages/stages/src/test_utils/runner.rs +++ b/crates/stages/stages/src/test_utils/runner.rs @@ -1,11 +1,10 @@ use super::TestStageDB; -use reth_db::{test_utils::TempDatabase, DatabaseEnv}; -use reth_provider::ProviderError; +use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; +use reth_provider::{DatabaseProvider, ProviderError}; use reth_stages_api::{ ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput, }; use reth_storage_errors::db::DatabaseError; -use std::sync::Arc; use tokio::sync::oneshot; #[derive(thiserror::Error, Debug)] @@ -20,7 +19,7 @@ pub(crate) enum TestRunnerError { /// A generic test runner for stages. pub(crate) trait StageTestRunner { - type S: Stage>> + 'static; + type S: Stage as Database>::TXMut>> + 'static; /// Return a reference to the database. fn db(&self) -> &TestStageDB; diff --git a/crates/stages/stages/src/test_utils/set.rs b/crates/stages/stages/src/test_utils/set.rs index d17695168e2c..ef6d278ba448 100644 --- a/crates/stages/stages/src/test_utils/set.rs +++ b/crates/stages/stages/src/test_utils/set.rs @@ -1,6 +1,5 @@ use super::TEST_STAGE_ID; use crate::{StageSet, StageSetBuilder}; -use reth_db_api::database::Database; use reth_stages_api::{test_utils::TestStage, ExecOutput, StageError, UnwindOutput}; use std::collections::VecDeque; @@ -19,8 +18,8 @@ impl TestStages { } } -impl StageSet for TestStages { - fn builder(self) -> StageSetBuilder { +impl StageSet for TestStages { + fn builder(self) -> StageSetBuilder { StageSetBuilder::default().add_stage( TestStage::new(TEST_STAGE_ID) .with_exec(self.exec_outputs) diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 2eb598e1f944..10d62c95aa1e 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1413,8 +1413,9 @@ mod tests { }; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, - ChangeSetReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, RequestsProvider, - StateProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + ChangeSetReader, DatabaseProviderFactory, HeaderProvider, ReceiptProvider, + ReceiptProviderIdExt, RequestsProvider, StateProviderFactory, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, }; use reth_testing_utils::generators::{ self, random_block, random_block_range, random_changeset_range, random_eoa_accounts, @@ -1490,7 +1491,7 @@ mod tests { .collect(); let factory = create_test_provider_factory_with_chain_spec(chain_spec); - let provider_rw = factory.provider_rw()?; + let provider_rw = factory.database_provider_rw()?; // Insert blocks into the database for block in &database_blocks { diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 0011e2848b1a..8a42675d961a 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -791,8 +791,9 @@ mod tests { ); // Checkpoint and no gap + let static_file_provider = provider.static_file_provider(); let mut static_file_writer = - provider.static_file_provider().latest_writer(StaticFileSegment::Headers).unwrap(); + static_file_provider.latest_writer(StaticFileSegment::Headers).unwrap(); static_file_writer.append_header(head.header(), U256::ZERO, &head.hash()).unwrap(); static_file_writer.commit().unwrap(); drop(static_file_writer); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index e00ca9e09ec8..c62f72a270bf 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -205,7 +205,7 @@ impl TryIntoHistoricalStateProvider for DatabaseProvider } } -impl DatabaseProviderRW { +impl DatabaseProvider { // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] /// Inserts an historical block. **Used for setting up test environments** diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 247c2d86b7d6..099a2b0c8ff6 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -549,6 +549,7 @@ mod tests { use reth_primitives::{ keccak256, Account, Address, Receipt, Receipts, StorageEntry, B256, U256, }; + use reth_storage_api::DatabaseProviderFactory; use reth_trie::{ test_utils::{state_root, storage_root_prehashed}, HashedPostState, HashedStorage, StateRoot, StorageRoot, @@ -755,7 +756,7 @@ mod tests { #[test] fn write_to_db_storage() { let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); + let provider = factory.database_provider_rw().unwrap(); let address_a = Address::ZERO; let address_b = Address::repeat_byte(0xff); @@ -950,7 +951,7 @@ mod tests { #[test] fn write_to_db_multiple_selfdestructs() { let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); + let provider = factory.database_provider_rw().unwrap(); let address1 = Address::random(); let account_info = RevmAccountInfo { nonce: 1, ..Default::default() }; @@ -1265,7 +1266,7 @@ mod tests { #[test] fn storage_change_after_selfdestruct_within_block() { let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); + let provider = factory.database_provider_rw().unwrap(); let address1 = Address::random(); let account1 = RevmAccountInfo { nonce: 1, ..Default::default() }; @@ -1415,7 +1416,7 @@ mod tests { .collect(); let provider_factory = create_test_provider_factory(); - let provider_rw = provider_factory.provider_rw().unwrap(); + let provider_rw = provider_factory.database_provider_rw().unwrap(); // insert initial state to the database let tx = provider_rw.tx_ref(); diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index eb08914cb723..d29aafa82120 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -9,7 +9,7 @@ use rayon::iter::{ParallelBridge, ParallelIterator}; use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment}; use reth_provider::{ providers::StaticFileWriter, test_utils::create_test_provider_factory_with_chain_spec, - HashingWriter, + DatabaseProviderFactory, HashingWriter, StaticFileProviderFactory, }; use reth_stages::{stages::ExecutionStage, ExecInput, Stage}; use std::{collections::BTreeMap, fs, path::Path, sync::Arc}; @@ -86,7 +86,7 @@ impl Case for BlockchainTestCase { let provider = create_test_provider_factory_with_chain_spec(Arc::new( case.network.clone().into(), )) - .provider_rw() + .database_provider_rw() .unwrap(); // Insert initial test state into the provider. @@ -102,10 +102,9 @@ impl Case for BlockchainTestCase { // Initialize receipts static file with genesis { - let mut receipts_writer = provider - .static_file_provider() - .latest_writer(StaticFileSegment::Receipts) - .unwrap(); + let static_file_provider = provider.static_file_provider(); + let mut receipts_writer = + static_file_provider.latest_writer(StaticFileSegment::Receipts).unwrap(); receipts_writer.increment_block(0).unwrap(); receipts_writer.commit_without_sync_all().unwrap(); } From 0c4b032b5cf2cd3da88dcf929d4f051532e20e4f Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 16 Sep 2024 17:35:36 +0300 Subject: [PATCH 05/10] clippy --- crates/storage/provider/src/writer/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 099a2b0c8ff6..9c207e3a9e14 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -47,7 +47,7 @@ impl<'a, ProviderDB, ProviderSF> UnifiedStorageWriter<'a, ProviderDB, ProviderSF /// # Parameters /// - `database`: An optional reference to a database provider. /// - `static_file`: An optional mutable reference to a static file instance. - pub fn new(database: &'a ProviderDB, static_file: Option) -> Self { + pub const fn new(database: &'a ProviderDB, static_file: Option) -> Self { Self { database, static_file } } From 1e8e5f2bfb3aeeb7f0e14a0b830ed63f3a9dc45c Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 16 Sep 2024 17:40:30 +0300 Subject: [PATCH 06/10] fix test-utils --- crates/stages/stages/src/stages/hashing_account.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 8e7c319a1ba7..1524a78df222 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -7,10 +7,7 @@ use reth_db_api::{ }; use reth_etl::Collector; use reth_primitives::{keccak256, Account, B256}; -use reth_provider::{ - AccountExtReader, DBProvider, DatabaseProvider, HashingWriter, StaticFileProviderFactory, - StatsReader, -}; +use reth_provider::{AccountExtReader, DBProvider, HashingWriter, StatsReader}; use reth_stages_api::{ AccountHashingCheckpoint, EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, @@ -66,7 +63,9 @@ impl AccountHashingStage { ) -> Result, StageError> { use reth_db_api::models::AccountBeforeTx; use reth_primitives::U256; - use reth_provider::providers::StaticFileWriter; + use reth_provider::{ + providers::StaticFileWriter, DatabaseProvider, StaticFileProviderFactory, + }; use reth_testing_utils::{ generators, generators::{random_block_range, random_eoa_accounts, BlockRangeParams}, From 05be3b81ba87d59a6331cbafb48bb0d90770b763 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 16 Sep 2024 17:48:32 +0300 Subject: [PATCH 07/10] fixes --- crates/optimism/cli/src/commands/import_receipts.rs | 4 ++-- crates/stages/api/src/stage.rs | 2 +- crates/stages/stages/src/stages/hashing_account.rs | 6 ++---- crates/storage/db-api/src/unwind.rs | 2 +- 4 files changed, 6 insertions(+), 8 deletions(-) diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index fca900f96f5f..8074fa92f4d9 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -232,13 +232,13 @@ where static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?; // finally, write the receipts - let mut storage_writer = UnifiedStorageWriter::from(&provider.0, static_file_producer); + let mut storage_writer = UnifiedStorageWriter::from(&provider, static_file_producer); storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; } // as static files works in file ranges, internally it will be committing when creating the // next file range already, so we only need to call explicitly at the end. - UnifiedStorageWriter::commit(provider.0, static_file_provider)?; + UnifiedStorageWriter::commit(provider, static_file_provider)?; Ok(ImportReceiptsResult { total_decoded_receipts, total_filtered_out_dup_txns }) } diff --git a/crates/stages/api/src/stage.rs b/crates/stages/api/src/stage.rs index 162f55d7afec..1e201aee6635 100644 --- a/crates/stages/api/src/stage.rs +++ b/crates/stages/api/src/stage.rs @@ -188,7 +188,7 @@ pub struct UnwindOutput { /// /// Stages are executed as part of a pipeline where they are executed serially. /// -/// Stages receive [`DatabaseProviderRW`]. +/// Stages receive [`DBProvider`](reth_provider::DBProvider). #[auto_impl::auto_impl(Box)] pub trait Stage: Send + Sync { /// Get the ID of the stage. diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 1524a78df222..16510ac88324 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -58,14 +58,12 @@ impl AccountHashingStage { /// Proceeds to go to the `BlockTransitionIndex` end, go back `transitions` and change the /// account state in the `AccountChangeSets` table. pub fn seed( - provider: &DatabaseProvider, + provider: &reth_provider::DatabaseProvider, opts: SeedOpts, ) -> Result, StageError> { use reth_db_api::models::AccountBeforeTx; use reth_primitives::U256; - use reth_provider::{ - providers::StaticFileWriter, DatabaseProvider, StaticFileProviderFactory, - }; + use reth_provider::{StaticFileProviderFactory, StaticFileWriter}; use reth_testing_utils::{ generators, generators::{random_block_range, random_eoa_accounts, BlockRangeParams}, diff --git a/crates/storage/db-api/src/unwind.rs b/crates/storage/db-api/src/unwind.rs index 19d08506f756..79cf585a62c7 100644 --- a/crates/storage/db-api/src/unwind.rs +++ b/crates/storage/db-api/src/unwind.rs @@ -40,7 +40,7 @@ pub trait DbTxUnwindExt: DbTxMut { Ok(deleted) } - /// Unwind a table forward by a [`Walker`][reth_db_api::cursor::Walker] on another table. + /// Unwind a table forward by a [`Walker`][crate::cursor::Walker] on another table. /// /// Note: Range is inclusive and first key in the range is removed. fn unwind_table_by_walker( From ff8f9de6237f1a72a956fa6af9e5358b90fed4ce Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 18 Sep 2024 13:53:37 +0300 Subject: [PATCH 08/10] fix --- crates/cli/commands/src/stage/drop.rs | 2 +- crates/storage/db-common/src/init.rs | 2 +- crates/storage/provider/src/providers/database/provider.rs | 5 +---- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 60232e18c6a6..8fcbaf044009 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -179,7 +179,7 @@ impl> Command { tx.put::(StageId::Finish.to_string(), Default::default())?; - UnifiedStorageWriter::commit_unwind(provider_rw.0, static_file_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw, static_file_provider)?; Ok(()) } diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index cd8a5bb2e81e..864849ebe5fe 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -133,7 +133,7 @@ where // `commit_unwind`` will first commit the DB and then the static file provider, which is // necessary on `init_genesis`. - UnifiedStorageWriter::commit_unwind(provider_rw.0, static_file_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw, static_file_provider)?; Ok(hash) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index bb4f906217bd..240ca123e3e9 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -2555,10 +2555,7 @@ impl StateChangeWriter for DatabaseProvider { /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - fn take_state( - &self, - range: RangeInclusive, - ) -> ProviderResult { + fn take_state(&self, range: RangeInclusive) -> ProviderResult { if range.is_empty() { return Ok(ExecutionOutcome::default()) } From f5d8bfaf27d78f0b477d7442542cd5dd24e21d20 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 18 Sep 2024 17:18:28 +0300 Subject: [PATCH 09/10] Update crates/stages/stages/src/stages/execution.rs Co-authored-by: Matthias Seitz --- crates/stages/stages/src/stages/execution.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index a97fd4b76843..8c42648ef66c 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -152,7 +152,7 @@ impl ExecutionStage { /// been previously executed. fn adjust_prune_modes( &self, - provider: &impl StatsReader, + provider: impl StatsReader, start_block: u64, max_block: u64, ) -> Result { From e8a7b1befea258322c25512938b4a51db6de6adc Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 19 Sep 2024 13:36:22 +0300 Subject: [PATCH 10/10] fmt --- crates/storage/provider/src/writer/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 224fa53b4e84..63bbbf3e06d7 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -546,8 +546,8 @@ mod tests { models::{AccountBeforeTx, BlockNumberAddress}, transaction::{DbTx, DbTxMut}, }; - use reth_storage_api::DatabaseProviderFactory; use reth_primitives::{Account, Address, Receipt, Receipts, StorageEntry}; + use reth_storage_api::DatabaseProviderFactory; use reth_trie::{ test_utils::{state_root, storage_root_prehashed}, HashedPostState, HashedStorage, StateRoot, StorageRoot,