From 9d2791b60256f9629ccd3d4775dd07afda68c3c3 Mon Sep 17 00:00:00 2001 From: refcell Date: Wed, 24 Apr 2024 14:18:37 -0400 Subject: [PATCH 1/6] fixes feat(primitives): refactor out non-derivation-specific types from kona-derive fix(derive): remove unused type mods fix(derive): remove allow dead from ecotone --- Cargo.lock | 16 +++ crates/derive/Cargo.toml | 3 +- crates/derive/src/params.rs | 14 +-- crates/derive/src/sources/factory.rs | 39 +++++-- crates/derive/src/sources/plasma.rs | 165 ++++++++++++++++++++++++--- crates/derive/src/sources/source.rs | 13 ++- crates/derive/src/types/errors.rs | 9 ++ crates/plasma/Cargo.toml | 33 ++++++ crates/plasma/README.md | 27 +++++ crates/plasma/src/lib.rs | 21 ++++ crates/plasma/src/traits.rs | 65 +++++++++++ crates/plasma/src/types.rs | 84 ++++++++++++++ 12 files changed, 456 insertions(+), 33 deletions(-) create mode 100644 crates/plasma/Cargo.toml create mode 100644 crates/plasma/README.md create mode 100644 crates/plasma/src/lib.rs create mode 100644 crates/plasma/src/traits.rs create mode 100644 crates/plasma/src/types.rs diff --git a/Cargo.lock b/Cargo.lock index 5c5cf35f1..5f2704e7f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1556,6 +1556,7 @@ dependencies = [ "async-trait", "c-kzg", "hashbrown", + "kona-plasma", "kona-primitives", "lru", "miniz_oxide", @@ -1604,6 +1605,21 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "kona-plasma" +version = "0.0.1" +dependencies = [ + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=e3f2f07)", + "alloy-primitives", + "anyhow", + "async-trait", + "kona-primitives", + "serde", + "serde_json", + "tracing", + "tracing-subscriber", +] + [[package]] name = "kona-preimage" version = "0.0.1" diff --git a/crates/derive/Cargo.toml b/crates/derive/Cargo.toml index 794865904..202f36680 100644 --- a/crates/derive/Cargo.toml +++ b/crates/derive/Cargo.toml @@ -18,6 +18,7 @@ alloy-rlp = { workspace = true, features = ["derive"] } # Local kona-primitives = { path = "../primitives", version = "0.0.1" } +kona-plasma = { path = "../plasma", version = "0.0.1" } # External alloy-sol-types = { version = "0.7.1", default-features = false } @@ -51,7 +52,7 @@ serde_json = { version = "1.0.116", default-features = false } [features] default = ["serde", "k256"] -serde = ["dep:serde", "alloy-primitives/serde", "alloy-consensus/serde", "op-alloy-consensus/serde"] +serde = ["dep:serde", "kona-plasma/serde", "alloy-primitives/serde", "alloy-consensus/serde", "op-alloy-consensus/serde"] k256 = ["alloy-primitives/k256", "alloy-consensus/k256", "op-alloy-consensus/k256"] online = [ "dep:revm-primitives", diff --git a/crates/derive/src/params.rs b/crates/derive/src/params.rs index e2c8b60cf..697d87f0a 100644 --- a/crates/derive/src/params.rs +++ b/crates/derive/src/params.rs @@ -30,13 +30,6 @@ pub const CHANNEL_ID_LENGTH: usize = 16; /// [ChannelID] is an opaque identifier for a channel. pub type ChannelID = [u8; CHANNEL_ID_LENGTH]; -/// `keccak256("ConfigUpdate(uint256,uint8,bytes)")` -pub const CONFIG_UPDATE_TOPIC: B256 = - b256!("1d2b0bda21d56b8bd12d4f94ebacffdfb35f5e226f84b461103bb8beab6353be"); - -/// The initial version of the system config event log. -pub const CONFIG_UPDATE_EVENT_VERSION_0: B256 = B256::ZERO; - /// Frames cannot be larger than 1MB. /// Data transactions that carry frames are generally not larger than 128 KB due to L1 network /// conditions, but we leave space to grow larger anyway (gas limit allows for more data). @@ -45,6 +38,13 @@ pub const MAX_FRAME_LEN: usize = 1000; /// Deposit log event abi signature. pub const DEPOSIT_EVENT_ABI: &str = "TransactionDeposited(address,address,uint256,bytes)"; +/// `keccak256("ConfigUpdate(uint256,uint8,bytes)")` +pub const CONFIG_UPDATE_TOPIC: B256 = + b256!("1d2b0bda21d56b8bd12d4f94ebacffdfb35f5e226f84b461103bb8beab6353be"); + +/// The initial version of the system config event log. +pub const CONFIG_UPDATE_EVENT_VERSION_0: B256 = B256::ZERO; + /// Deposit event abi hash. /// /// This is the keccak256 hash of the deposit event ABI signature. diff --git a/crates/derive/src/sources/factory.rs b/crates/derive/src/sources/factory.rs index 1d39d4b27..5e5647e94 100644 --- a/crates/derive/src/sources/factory.rs +++ b/crates/derive/src/sources/factory.rs @@ -3,24 +3,34 @@ use crate::{ sources::{BlobSource, CalldataSource, DataSource, PlasmaSource}, traits::{BlobProvider, ChainProvider, DataAvailabilityProvider}, - types::{BlockInfo, RollupConfig}, + types::{BlockID, BlockInfo, RollupConfig}, }; use alloc::{boxed::Box, fmt::Debug}; use alloy_primitives::{Address, Bytes}; use anyhow::{anyhow, Result}; use async_trait::async_trait; +use kona_plasma::traits::{ChainProvider as PlasmaChainProvider, PlasmaInputFetcher}; /// A factory for creating a calldata and blob provider. #[derive(Debug, Clone, Copy)] -pub struct DataSourceFactory +pub struct DataSourceFactory where C: ChainProvider + Clone, B: BlobProvider + Clone, + PCP: PlasmaChainProvider + Send + Clone, + PIF: PlasmaInputFetcher + Clone, + I: Iterator + Send + Clone, { /// The chain provider to use for the factory. pub chain_provider: C, + /// The plasma chain provider. + pub plasma_chain_provider: PCP, + /// The plasma iterator. + pub plasma_source: I, /// The blob provider pub blob_provider: B, + /// The plasma input fetcher. + pub plasma_input_fetcher: PIF, /// The ecotone timestamp. pub ecotone_timestamp: Option, /// Whether or not plasma is enabled. @@ -29,16 +39,22 @@ where pub signer: Address, } -impl DataSourceFactory +impl DataSourceFactory where C: ChainProvider + Clone + Debug, B: BlobProvider + Clone + Debug, + PCP: PlasmaChainProvider + Send + Clone + Debug, + PIF: PlasmaInputFetcher + Clone + Debug, + I: Iterator + Send + Clone, { /// Creates a new factory. - pub fn new(provider: C, blobs: B, cfg: &RollupConfig) -> Self { + pub fn new(provider: C, blobs: B, pcp: PCP, pif: PIF, s: I, cfg: &RollupConfig) -> Self { Self { chain_provider: provider, + plasma_chain_provider: pcp, + plasma_source: s, blob_provider: blobs, + plasma_input_fetcher: pif, ecotone_timestamp: cfg.ecotone_time, plasma_enabled: cfg.is_plasma_enabled(), signer: cfg.genesis.system_config.batcher_addr, @@ -47,13 +63,16 @@ where } #[async_trait] -impl DataAvailabilityProvider for DataSourceFactory +impl DataAvailabilityProvider for DataSourceFactory where C: ChainProvider + Send + Sync + Clone + Debug, B: BlobProvider + Send + Sync + Clone + Debug, + PCP: PlasmaChainProvider + Send + Sync + Clone + Debug, + PIF: PlasmaInputFetcher + Send + Sync + Clone + Debug, + I: Iterator + Send + Sync + Clone + Debug, { type Item = Bytes; - type DataIter = DataSource; + type DataIter = DataSource; async fn open_data( &self, @@ -81,7 +100,13 @@ where }); Ok(source) } else if self.plasma_enabled { - Ok(DataSource::Plasma(PlasmaSource::new(self.chain_provider.clone()))) + let id = BlockID { hash: block_ref.hash, number: block_ref.number }; + Ok(DataSource::Plasma(PlasmaSource::new( + self.plasma_chain_provider.clone(), + self.plasma_input_fetcher.clone(), + self.plasma_source.clone(), + id, + ))) } else { Err(anyhow!("No data source available")) } diff --git a/crates/derive/src/sources/plasma.rs b/crates/derive/src/sources/plasma.rs index d3cac8b6c..f20f00528 100644 --- a/crates/derive/src/sources/plasma.rs +++ b/crates/derive/src/sources/plasma.rs @@ -1,42 +1,177 @@ //! Plasma Data Source use crate::{ - traits::{AsyncIterator, ChainProvider}, - types::StageResult, + traits::AsyncIterator, + types::{ResetError, StageError, StageResult}, }; use alloc::boxed::Box; use alloy_primitives::Bytes; use async_trait::async_trait; +use kona_plasma::{ + traits::{ChainProvider, PlasmaInputFetcher}, + types::{ + decode_keccak256, Keccak256Commitment, PlasmaError, MAX_INPUT_SIZE, TX_DATA_VERSION_1, + }, +}; +use kona_primitives::block::BlockID; /// A plasma data iterator. #[derive(Debug, Clone)] -#[allow(dead_code)] -pub struct PlasmaSource +pub struct PlasmaSource where CP: ChainProvider + Send, + PIF: PlasmaInputFetcher + Send, + I: Iterator, { + /// The plasma input fetcher. + input_fetcher: PIF, /// The chain provider to use for the plasma source. chain_provider: CP, - /// The plasma commitment. - commitment: Bytes, - /// The block number. - block_number: u64, - /// Whether the plasma source is open. - open: bool, + /// A source data iterator. + source: I, + /// Keeps track of a pending commitment so we can keep trying to fetch the input. + commitment: Option, + /// The block Id. + id: BlockID, } -impl PlasmaSource { +impl PlasmaSource +where + CP: ChainProvider + Send, + PIF: PlasmaInputFetcher + Send, + I: Iterator, +{ /// Instantiates a new plasma data source. - pub fn new(chain_provider: CP) -> Self { - Self { chain_provider, commitment: Bytes::default(), block_number: 0, open: false } + pub fn new(chain_provider: CP, input_fetcher: PIF, source: I, id: BlockID) -> Self { + Self { chain_provider, input_fetcher, source, id, commitment: None } } } #[async_trait] -impl AsyncIterator for PlasmaSource { +impl AsyncIterator for PlasmaSource +where + CP: ChainProvider + Send, + PIF: PlasmaInputFetcher + Send, + I: Iterator + Send, +{ type Item = Bytes; async fn next(&mut self) -> Option> { - unimplemented!("Plasma will not be supported until further notice."); + // Process origin syncs the challenge contract events and updates the local challenge states + // before we can proceed to fetch the input data. This function can be called multiple times + // for the same origin and noop if the origin was already processed. It is also called if + // there is not commitment in the current origin. + match self.input_fetcher.advance_l1_origin(&self.chain_provider, self.id).await { + Some(Ok(_)) => (), + Some(Err(PlasmaError::ReorgRequired)) => { + tracing::error!("new expired challenge"); + return Some(StageResult::Err(StageError::Custom(anyhow::anyhow!( + "new expired challenge" + )))); + } + Some(Err(e)) => { + tracing::error!("failed to advance plasma L1 origin: {:?}", e); + return Some(StageResult::Err(StageError::Plasma(e))); + } + None => { + tracing::warn!("l1 origin advance returned None"); + } + } + + // Set the commitment if it isn't available. + if self.commitment.is_none() { + // The l1 source returns the input commitment for the batch. + let data = match self.source.next().ok_or(PlasmaError::NotEnoughData) { + Ok(d) => d, + Err(e) => { + tracing::warn!("failed to pull next data from the plasma source iterator"); + return Some(Err(StageError::Plasma(e))); + } + }; + + // If the data is empty, + if data.is_empty() { + return Some(Err(StageError::Plasma(PlasmaError::NotEnoughData))); + } + + // If the tx data type is not plasma, we forward it downstream to let the next + // steps validate and potentially parse it as L1 DA inputs. + if data[0] != TX_DATA_VERSION_1 { + return Some(Ok(data)); + } + + // Validate that the batcher inbox data is a commitment. + self.commitment = match decode_keccak256(&data[1..]) { + Ok(c) => Some(c), + Err(e) => { + tracing::warn!("invalid commitment: {}, err: {}", data, e); + return self.next().await; + } + }; + } + + // Use the commitment to fetch the input from the plasma DA provider. + let commitment = self.commitment.as_ref().expect("the commitment must be set"); + + // Fetch the input data from the plasma DA provider. + let data = match self + .input_fetcher + .get_input(&self.chain_provider, commitment.clone(), self.id) + .await + { + Some(Ok(data)) => data, + Some(Err(PlasmaError::ReorgRequired)) => { + // The plasma fetcher may call for a reorg if the pipeline is stalled and the plasma + // DA manager continued syncing origins detached from the pipeline + // origin. + tracing::warn!("challenge for a new previously derived commitment expired"); + return Some(Err(StageError::Reset(ResetError::ReorgRequired))); + } + Some(Err(PlasmaError::ChallengeExpired)) => { + // This commitment was challenged and the challenge expired. + tracing::warn!("challenge expired, skipping batch"); + self.commitment = None; + // Skip the input. + return self.next().await + } + Some(Err(PlasmaError::MissingPastWindow)) => { + return Some(Err(StageError::Custom(anyhow::anyhow!( + "data for comm {:?} not available", + commitment + )))); + } + Some(Err(PlasmaError::ChallengePending)) => { + // Continue stepping without slowing down. + return Some(Err(StageError::NotEnoughData)); + } + Some(Err(e)) => { + // Return temporary error so we can keep retrying. + return Some(Err(StageError::Custom(anyhow::anyhow!( + "failed to fetch input data with comm {:?} from da service: {:?}", + commitment, + e + )))); + } + None => { + // Return temporary error so we can keep retrying. + return Some(Err(StageError::Custom(anyhow::anyhow!( + "failed to fetch input data with comm {:?} from da service", + commitment + )))); + } + }; + + // The data length is limited to a max size to ensure they can be challenged in the DA + // contract. + if data.len() > MAX_INPUT_SIZE { + tracing::warn!("input data (len {}) exceeds max size {MAX_INPUT_SIZE}", data.len()); + self.commitment = None; + return self.next().await; + } + + // Reset the commitment so we can fetch the next one from the source at the next iteration. + self.commitment = None; + + return Some(Ok(data)); } } diff --git a/crates/derive/src/sources/source.rs b/crates/derive/src/sources/source.rs index 95129e20b..8dd392a85 100644 --- a/crates/derive/src/sources/source.rs +++ b/crates/derive/src/sources/source.rs @@ -8,27 +8,34 @@ use crate::{ use alloc::boxed::Box; use alloy_primitives::Bytes; use async_trait::async_trait; +use kona_plasma::traits::{ChainProvider as PlasmaChainProvider, PlasmaInputFetcher}; /// An enum over the various data sources. #[derive(Debug, Clone)] -pub enum DataSource +pub enum DataSource where CP: ChainProvider + Send, B: BlobProvider + Send, + PCP: PlasmaChainProvider + Send, + PIF: PlasmaInputFetcher + Send, + I: Iterator + Send, { /// A calldata source. Calldata(CalldataSource), /// A blob source. Blob(BlobSource), /// A plasma source. - Plasma(PlasmaSource), + Plasma(PlasmaSource), } #[async_trait] -impl AsyncIterator for DataSource +impl AsyncIterator for DataSource where CP: ChainProvider + Send, B: BlobProvider + Send, + PCP: PlasmaChainProvider + Send, + PIF: PlasmaInputFetcher + Send, + I: Iterator + Send, { type Item = Bytes; diff --git a/crates/derive/src/types/errors.rs b/crates/derive/src/types/errors.rs index d677cdba7..fcac6dcd4 100644 --- a/crates/derive/src/types/errors.rs +++ b/crates/derive/src/types/errors.rs @@ -5,6 +5,7 @@ use crate::types::{BlockID, Frame}; use alloc::vec::Vec; use alloy_primitives::{Bytes, B256}; use core::fmt::Display; +use kona_plasma::types::PlasmaError; /// A result type for the derivation pipeline stages. pub type StageResult = Result; @@ -14,6 +15,8 @@ pub type StageResult = Result; pub enum StageError { /// There is no data to read from the channel bank. Eof, + /// Plasma data source error. + Plasma(PlasmaError), /// There is not enough data progress, but if we wait, the stage will eventually return data /// or produce an EOF error. NotEnoughData, @@ -61,6 +64,7 @@ impl PartialEq for StageError { matches!( (self, other), (StageError::Eof, StageError::Eof) | + (StageError::Plasma(_), StageError::Plasma(_)) | (StageError::NotEnoughData, StageError::NotEnoughData) | (StageError::NoChannelsAvailable, StageError::NoChannelsAvailable) | (StageError::NoChannel, StageError::NoChannel) | @@ -93,6 +97,7 @@ impl Display for StageError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { StageError::Eof => write!(f, "End of file"), + StageError::Plasma(e) => write!(f, "Plasma error: {:?}", e), StageError::NotEnoughData => write!(f, "Not enough data"), StageError::BlockFetch(hash) => { write!(f, "Failed to fetch block info and transactions by hash: {}", hash) @@ -126,6 +131,8 @@ pub enum ResetError { /// The first argument is the expected timestamp, and the second argument is the actual /// timestamp. BadTimestamp(u64, u64), + /// A reorg is required. + ReorgRequired, } impl PartialEq for ResetError { @@ -137,6 +144,7 @@ impl PartialEq for ResetError { (ResetError::BadTimestamp(e1, a1), ResetError::BadTimestamp(e2, a2)) => { e1 == e2 && a1 == a2 } + (ResetError::ReorgRequired, ResetError::ReorgRequired) => true, _ => false, } } @@ -151,6 +159,7 @@ impl Display for ResetError { ResetError::BadTimestamp(expected, actual) => { write!(f, "Bad timestamp: expected {}, got {}", expected, actual) } + ResetError::ReorgRequired => write!(f, "Reorg required"), } } } diff --git a/crates/plasma/Cargo.toml b/crates/plasma/Cargo.toml new file mode 100644 index 000000000..283e6e475 --- /dev/null +++ b/crates/plasma/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "kona-plasma" +description = "Plasma Data Availability Adapter" +version = "0.0.1" +edition.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true +homepage.workspace = true + +[dependencies] +# Workspace +anyhow.workspace = true +tracing.workspace = true + +# Local +kona-primitives = { path = "../primitives", verison = "0.0.1" } + +# External +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "e3f2f07", default-features = false } +alloy-primitives = { workspace = true, features = ["rlp"] } +async-trait = "0.1.77" + +# `serde` feature dependencies +serde = { version = "1.0.197", default-features = false, features = ["derive"], optional = true } + +[dev-dependencies] +tracing-subscriber = "0.3.18" +serde_json = { version = "1.0.68", default-features = false } + +[features] +default = ["serde"] +serde = ["dep:serde"] diff --git a/crates/plasma/README.md b/crates/plasma/README.md new file mode 100644 index 000000000..4eadf2ea9 --- /dev/null +++ b/crates/plasma/README.md @@ -0,0 +1,27 @@ +# `kona-plasma` + +Plasma Data Availability Adapter for `kona-derive`. + +[plasma]: https://specs.optimism.io/experimental/plasma.html + +`kona-plasma` is an implementation of the [Plasma][plasma] OP Stack Specification in rust. + +## Usage + +Add `kona-plasma` to your `Cargo.toml`. + +```ignore +[dependencies] +kona-plasma = "0.0.1" + +# Serde is enabled by default and can be disabled by toggling default-features off +# kona-plasma = { version = "0.0.1", default-features = false } +``` + +## Features + +### Serde + +[`serde`] serialization and deserialization support for `kona-plasma` types. + +By default, the `serde` feature is enabled on `kona-plasma`. diff --git a/crates/plasma/src/lib.rs b/crates/plasma/src/lib.rs new file mode 100644 index 000000000..42a7d9d63 --- /dev/null +++ b/crates/plasma/src/lib.rs @@ -0,0 +1,21 @@ +#![doc = include_str!("../README.md")] +#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)] +#![deny(unused_must_use, rust_2018_idioms)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![no_std] + +extern crate alloc; + +pub mod traits; +pub mod types; + +// Re-export kona primitives. +pub use kona_primitives::prelude::*; + +/// The prelude exports common types and traits. +pub mod prelude { + pub use crate::{ + traits::{ChainProvider, PlasmaInputFetcher}, + types::{FinalizedHeadSignal, Keccak256Commitment, PlasmaError, SystemConfig}, + }; +} diff --git a/crates/plasma/src/traits.rs b/crates/plasma/src/traits.rs new file mode 100644 index 000000000..a86769b81 --- /dev/null +++ b/crates/plasma/src/traits.rs @@ -0,0 +1,65 @@ +//! Traits for plasma sources and internal components. + +use crate::types::{FinalizedHeadSignal, PlasmaError}; +use alloc::{boxed::Box, vec::Vec}; +use alloy_consensus::{Header, Receipt, TxEnvelope}; +use alloy_primitives::{Bytes, B256}; +use async_trait::async_trait; +use kona_primitives::{ + block::{BlockID, BlockInfo}, + system_config::SystemConfig, +}; + +/// Describes the functionality of a data source that can provide information from the blockchain. +#[async_trait] +pub trait ChainProvider { + /// Fetch the L1 [Header] for the given [B256] hash. + async fn header_by_hash(&mut self, hash: B256) -> anyhow::Result
; + + /// Returns the block at the given number, or an error if the block does not exist in the data + /// source. + async fn block_info_by_number(&mut self, number: u64) -> anyhow::Result; + + /// Returns all receipts in the block with the given hash, or an error if the block does not + /// exist in the data source. + async fn receipts_by_hash(&mut self, hash: B256) -> anyhow::Result>; + + /// Returns the [BlockInfo] and list of [TxEnvelope]s from the given block hash. + async fn block_info_and_transactions_by_hash( + &mut self, + hash: B256, + ) -> anyhow::Result<(BlockInfo, Vec)>; +} + +/// A plasma input fetcher. +#[async_trait] +pub trait PlasmaInputFetcher { + /// Get the input for the given commitment at the given block number from the DA storage + /// service. + async fn get_input( + &mut self, + fetcher: &CP, + commitment: Bytes, + block: BlockID, + ) -> Option>; + + /// Advance the L1 origin to the given block number, syncing the DA challenge events. + async fn advance_l1_origin( + &mut self, + fetcher: &CP, + block: BlockID, + ) -> Option>; + + /// Reset the challenge origin in case of L1 reorg. + async fn reset( + &mut self, + block_number: BlockInfo, + cfg: SystemConfig, + ) -> Option>; + + /// Notify L1 finalized head so plasma finality is always behind L1. + async fn finalize(&mut self, block_number: BlockInfo) -> Option>; + + /// Set the engine finalization signal callback. + fn on_finalized_head_signal(&mut self, callback: FinalizedHeadSignal); +} diff --git a/crates/plasma/src/types.rs b/crates/plasma/src/types.rs new file mode 100644 index 000000000..fe449d0c1 --- /dev/null +++ b/crates/plasma/src/types.rs @@ -0,0 +1,84 @@ +//! Types for the Kona Plasma crate. + +use alloc::boxed::Box; +use alloy_primitives::{Address, Bytes, U256}; +use core::fmt::Display; +use kona_primitives::block::BlockInfo; + +/// A plasma error. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum PlasmaError { + /// A reorg is required. + ReorgRequired, + /// Not enough data. + NotEnoughData, + /// The commitment was challenge, but the challenge period expired. + ChallengeExpired, + /// Missing data past the challenge period. + MissingPastWindow, + /// A challenge is pending for the given commitment + ChallengePending, +} + +impl Display for PlasmaError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Self::ReorgRequired => write!(f, "reorg required"), + Self::NotEnoughData => write!(f, "not enough data"), + Self::ChallengeExpired => write!(f, "challenge expired"), + Self::MissingPastWindow => write!(f, "missing past window"), + Self::ChallengePending => write!(f, "challenge pending"), + } + } +} + +/// Max input size ensures the canonical chain cannot include input batches too large to +/// challenge in the Data Availability Challenge contract. Value in number of bytes. +/// This value can only be changed in a hard fork. +pub const MAX_INPUT_SIZE: usize = 130672; + +/// TxDataVersion1 is the version number for batcher transactions containing +/// plasma commitments. It should not collide with DerivationVersion which is still +/// used downstream when parsing the frames. +pub const TX_DATA_VERSION_1: u8 = 1; + +/// The default commitment type. +pub type Keccak256Commitment = Bytes; + +/// The default commitment type for the DA storage. +pub const KECCAK_256_COMMITMENT_TYPE: u8 = 0; + +/// DecodeKeccak256 validates and casts the commitment into a Keccak256Commitment. +pub fn decode_keccak256(commitment: &[u8]) -> Result { + if commitment.is_empty() { + return Err(PlasmaError::NotEnoughData); + } + if commitment[0] != KECCAK_256_COMMITMENT_TYPE { + return Err(PlasmaError::NotEnoughData); + } + let c = &commitment[1..]; + if c.len() != 32 { + return Err(PlasmaError::NotEnoughData); + } + Ok(Bytes::copy_from_slice(c)) +} + +/// A callback method for the finalized head signal. +pub type FinalizedHeadSignal = Box; + +/// Optimism system config contract values +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] +pub struct SystemConfig { + /// Batch sender address + pub batcher_addr: Address, + /// L2 gas limit + pub gas_limit: U256, + /// Fee overhead + #[cfg_attr(feature = "serde", serde(rename = "overhead"))] + pub l1_fee_overhead: U256, + /// Fee scalar + #[cfg_attr(feature = "serde", serde(rename = "scalar"))] + pub l1_fee_scalar: U256, +} From dfd0358dbba42a189f960b795baed33ad24d575d Mon Sep 17 00:00:00 2001 From: refcell Date: Thu, 25 Apr 2024 08:21:59 -0700 Subject: [PATCH 2/6] fix(plasma): merge --- crates/plasma/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/plasma/Cargo.toml b/crates/plasma/Cargo.toml index 283e6e475..f9e198f80 100644 --- a/crates/plasma/Cargo.toml +++ b/crates/plasma/Cargo.toml @@ -14,7 +14,7 @@ anyhow.workspace = true tracing.workspace = true # Local -kona-primitives = { path = "../primitives", verison = "0.0.1" } +kona-primitives = { path = "../primitives" } # External alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "e3f2f07", default-features = false } From 96d4ae55c4154a5c4b1ee8af8f105e4adf13e465 Mon Sep 17 00:00:00 2001 From: refcell Date: Thu, 25 Apr 2024 09:18:43 -0700 Subject: [PATCH 3/6] fix(derive): plasma temporary and critical errors --- crates/derive/src/sources/plasma.rs | 10 ++++++---- crates/derive/src/types/errors.rs | 8 ++++++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/crates/derive/src/sources/plasma.rs b/crates/derive/src/sources/plasma.rs index f20f00528..c6d2e6373 100644 --- a/crates/derive/src/sources/plasma.rs +++ b/crates/derive/src/sources/plasma.rs @@ -135,18 +135,20 @@ where return self.next().await } Some(Err(PlasmaError::MissingPastWindow)) => { - return Some(Err(StageError::Custom(anyhow::anyhow!( - "data for comm {:?} not available", + tracing::warn!("missing past window, skipping batch"); + return Some(Err(StageError::Critical(anyhow::anyhow!( + "data for commitment {:?} not available", commitment )))); } Some(Err(PlasmaError::ChallengePending)) => { // Continue stepping without slowing down. + tracing::debug!("plasma challenge pending, proceeding"); return Some(Err(StageError::NotEnoughData)); } Some(Err(e)) => { // Return temporary error so we can keep retrying. - return Some(Err(StageError::Custom(anyhow::anyhow!( + return Some(Err(StageError::Temporary(anyhow::anyhow!( "failed to fetch input data with comm {:?} from da service: {:?}", commitment, e @@ -154,7 +156,7 @@ where } None => { // Return temporary error so we can keep retrying. - return Some(Err(StageError::Custom(anyhow::anyhow!( + return Some(Err(StageError::Temporary(anyhow::anyhow!( "failed to fetch input data with comm {:?} from da service", commitment )))); diff --git a/crates/derive/src/types/errors.rs b/crates/derive/src/types/errors.rs index fcac6dcd4..44af7d206 100644 --- a/crates/derive/src/types/errors.rs +++ b/crates/derive/src/types/errors.rs @@ -15,6 +15,10 @@ pub type StageResult = Result; pub enum StageError { /// There is no data to read from the channel bank. Eof, + /// A temporary error that allows the operation to be retried. + Temporary(anyhow::Error), + /// A critical error. + Critical(anyhow::Error), /// Plasma data source error. Plasma(PlasmaError), /// There is not enough data progress, but if we wait, the stage will eventually return data @@ -64,6 +68,8 @@ impl PartialEq for StageError { matches!( (self, other), (StageError::Eof, StageError::Eof) | + (StageError::Temporary(_), StageError::Temporary(_)) | + (StageError::Critical(_), StageError::Critical(_)) | (StageError::Plasma(_), StageError::Plasma(_)) | (StageError::NotEnoughData, StageError::NotEnoughData) | (StageError::NoChannelsAvailable, StageError::NoChannelsAvailable) | @@ -97,6 +103,8 @@ impl Display for StageError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { StageError::Eof => write!(f, "End of file"), + StageError::Temporary(e) => write!(f, "Temporary error: {}", e), + StageError::Critical(e) => write!(f, "Critical error: {}", e), StageError::Plasma(e) => write!(f, "Plasma error: {:?}", e), StageError::NotEnoughData => write!(f, "Not enough data"), StageError::BlockFetch(hash) => { From df46f23d5abd0fbbf484450188808ddf96e54599 Mon Sep 17 00:00:00 2001 From: refcell Date: Thu, 25 Apr 2024 10:18:31 -0700 Subject: [PATCH 4/6] fix(derive): plasma source tests --- crates/derive/Cargo.toml | 1 + crates/derive/src/sources/plasma.rs | 115 ++++++++++++++++++++- crates/derive/src/types/errors.rs | 4 + crates/plasma/Cargo.toml | 1 + crates/plasma/src/lib.rs | 3 + crates/plasma/src/test_utils.rs | 150 ++++++++++++++++++++++++++++ 6 files changed, 269 insertions(+), 5 deletions(-) create mode 100644 crates/plasma/src/test_utils.rs diff --git a/crates/derive/Cargo.toml b/crates/derive/Cargo.toml index 202f36680..e729b768d 100644 --- a/crates/derive/Cargo.toml +++ b/crates/derive/Cargo.toml @@ -43,6 +43,7 @@ alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "e3f2f reqwest = { version = "0.12", default-features = false, optional = true } [dev-dependencies] +kona-plasma = { path = "../plasma", version = "0.0.1", features = ["default", "test-utils"] } tokio = { version = "1.37", features = ["full"] } proptest = "1.4.0" tracing-subscriber = "0.3.18" diff --git a/crates/derive/src/sources/plasma.rs b/crates/derive/src/sources/plasma.rs index c6d2e6373..7c3f572ed 100644 --- a/crates/derive/src/sources/plasma.rs +++ b/crates/derive/src/sources/plasma.rs @@ -62,16 +62,19 @@ where // for the same origin and noop if the origin was already processed. It is also called if // there is not commitment in the current origin. match self.input_fetcher.advance_l1_origin(&self.chain_provider, self.id).await { - Some(Ok(_)) => (), + Some(Ok(_)) => { + tracing::debug!("plasma input fetcher - l1 origin advanced"); + } Some(Err(PlasmaError::ReorgRequired)) => { tracing::error!("new expired challenge"); - return Some(StageResult::Err(StageError::Custom(anyhow::anyhow!( - "new expired challenge" - )))); + return Some(StageResult::Err(StageError::Reset(ResetError::NewExpiredChallenge))); } Some(Err(e)) => { tracing::error!("failed to advance plasma L1 origin: {:?}", e); - return Some(StageResult::Err(StageError::Plasma(e))); + return Some(StageResult::Err(StageError::Temporary(anyhow::anyhow!( + "failed to advance plasma L1 origin: {:?}", + e + )))); } None => { tracing::warn!("l1 origin advance returned None"); @@ -91,12 +94,14 @@ where // If the data is empty, if data.is_empty() { + tracing::warn!("empty data from plasma source"); return Some(Err(StageError::Plasma(PlasmaError::NotEnoughData))); } // If the tx data type is not plasma, we forward it downstream to let the next // steps validate and potentially parse it as L1 DA inputs. if data[0] != TX_DATA_VERSION_1 { + tracing::info!("non-plasma tx data, forwarding downstream"); return Some(Ok(data)); } @@ -177,3 +182,103 @@ where return Some(Ok(data)); } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::stages::test_utils::{CollectingLayer, TraceStorage}; + use alloc::vec; + use kona_plasma::test_utils::{TestChainProvider, TestPlasmaInputFetcher}; + use tracing::Level; + use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + + #[tokio::test] + async fn test_next_plasma_advance_origin_reorg_error() { + let chain_provider = TestChainProvider::default(); + let input_fetcher = TestPlasmaInputFetcher { + advances: vec![Err(PlasmaError::ReorgRequired)], + ..Default::default() + }; + let source = vec![Bytes::from("hello"), Bytes::from("world")].into_iter(); + let id = BlockID { number: 1, ..Default::default() }; + + let mut plasma_source = PlasmaSource::new(chain_provider, input_fetcher, source, id); + + let err = plasma_source.next().await.unwrap().unwrap_err(); + assert_eq!(err, StageError::Reset(ResetError::NewExpiredChallenge)); + } + + #[tokio::test] + async fn test_next_plasma_advance_origin_other_error() { + let chain_provider = TestChainProvider::default(); + let input_fetcher = TestPlasmaInputFetcher { + advances: vec![Err(PlasmaError::NotEnoughData)], + ..Default::default() + }; + let source = vec![Bytes::from("hello"), Bytes::from("world")].into_iter(); + let id = BlockID { number: 1, ..Default::default() }; + + let mut plasma_source = PlasmaSource::new(chain_provider, input_fetcher, source, id); + + let err = plasma_source.next().await.unwrap().unwrap_err(); + matches!(err, StageError::Temporary(_)); + } + + #[tokio::test] + async fn test_next_plasma_not_enough_source_data() { + let chain_provider = TestChainProvider::default(); + let input_fetcher = TestPlasmaInputFetcher { advances: vec![Ok(())], ..Default::default() }; + let source = vec![].into_iter(); + let id = BlockID { number: 1, ..Default::default() }; + + let mut plasma_source = PlasmaSource::new(chain_provider, input_fetcher, source, id); + + let err = plasma_source.next().await.unwrap().unwrap_err(); + assert_eq!(err, StageError::Plasma(PlasmaError::NotEnoughData)); + } + + #[tokio::test] + async fn test_next_plasma_empty_source_data() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let chain_provider = TestChainProvider::default(); + let input_fetcher = TestPlasmaInputFetcher { advances: vec![Ok(())], ..Default::default() }; + let source = vec![Bytes::from("")].into_iter(); + let id = BlockID { number: 1, ..Default::default() }; + + let mut plasma_source = PlasmaSource::new(chain_provider, input_fetcher, source, id); + + let err = plasma_source.next().await.unwrap().unwrap_err(); + assert_eq!(err, StageError::Plasma(PlasmaError::NotEnoughData)); + + let logs = trace_store.get_by_level(Level::WARN); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("empty data from plasma source")); + } + + #[tokio::test] + async fn test_next_plasma_non_plasma_tx_data_forwards() { + let trace_store: TraceStorage = Default::default(); + let layer = CollectingLayer::new(trace_store.clone()); + tracing_subscriber::Registry::default().with(layer).init(); + + let chain_provider = TestChainProvider::default(); + let input_fetcher = TestPlasmaInputFetcher { advances: vec![Ok(())], ..Default::default() }; + let first = Bytes::copy_from_slice(&[2u8]); + let source = vec![first.clone()].into_iter(); + let id = BlockID { number: 1, ..Default::default() }; + + let mut plasma_source = PlasmaSource::new(chain_provider, input_fetcher, source, id); + + let data = plasma_source.next().await.unwrap().unwrap(); + assert_eq!(data, first); + + let logs = trace_store.get_by_level(Level::INFO); + assert_eq!(logs.len(), 1); + assert!(logs[0].contains("non-plasma tx data, forwarding downstream")); + } + + // TODO: more tests +} diff --git a/crates/derive/src/types/errors.rs b/crates/derive/src/types/errors.rs index 44af7d206..bedd54b36 100644 --- a/crates/derive/src/types/errors.rs +++ b/crates/derive/src/types/errors.rs @@ -141,6 +141,8 @@ pub enum ResetError { BadTimestamp(u64, u64), /// A reorg is required. ReorgRequired, + /// A new expired challenge. + NewExpiredChallenge, } impl PartialEq for ResetError { @@ -153,6 +155,7 @@ impl PartialEq for ResetError { e1 == e2 && a1 == a2 } (ResetError::ReorgRequired, ResetError::ReorgRequired) => true, + (ResetError::NewExpiredChallenge, ResetError::NewExpiredChallenge) => true, _ => false, } } @@ -168,6 +171,7 @@ impl Display for ResetError { write!(f, "Bad timestamp: expected {}, got {}", expected, actual) } ResetError::ReorgRequired => write!(f, "Reorg required"), + ResetError::NewExpiredChallenge => write!(f, "New expired challenge"), } } } diff --git a/crates/plasma/Cargo.toml b/crates/plasma/Cargo.toml index f9e198f80..4bacdf937 100644 --- a/crates/plasma/Cargo.toml +++ b/crates/plasma/Cargo.toml @@ -31,3 +31,4 @@ serde_json = { version = "1.0.68", default-features = false } [features] default = ["serde"] serde = ["dep:serde"] +test-utils = [] diff --git a/crates/plasma/src/lib.rs b/crates/plasma/src/lib.rs index 42a7d9d63..4fee90dbf 100644 --- a/crates/plasma/src/lib.rs +++ b/crates/plasma/src/lib.rs @@ -19,3 +19,6 @@ pub mod prelude { types::{FinalizedHeadSignal, Keccak256Commitment, PlasmaError, SystemConfig}, }; } + +#[cfg(any(test, feature = "test-utils"))] +pub mod test_utils; diff --git a/crates/plasma/src/test_utils.rs b/crates/plasma/src/test_utils.rs new file mode 100644 index 000000000..aaa7cab93 --- /dev/null +++ b/crates/plasma/src/test_utils.rs @@ -0,0 +1,150 @@ +//! Test utilities for the Plasma crate. + +use crate::{ + traits::{ChainProvider, PlasmaInputFetcher}, + types::{FinalizedHeadSignal, PlasmaError}, +}; +use alloc::{boxed::Box, vec::Vec}; +use alloy_consensus::{Header, Receipt, TxEnvelope}; +use alloy_primitives::{Bytes, B256}; +use anyhow::Result; +use async_trait::async_trait; +use kona_primitives::{ + block::{BlockID, BlockInfo}, + system_config::SystemConfig, +}; + +/// A mock plasma input fetcher for testing. +#[derive(Debug, Clone, Default)] +pub struct TestPlasmaInputFetcher { + /// Inputs to return. + pub inputs: Vec>, + /// Advance L1 origin results. + pub advances: Vec>, + /// Reset results. + pub resets: Vec>, +} + +#[async_trait] +impl PlasmaInputFetcher for TestPlasmaInputFetcher { + async fn get_input( + &mut self, + _fetcher: &TestChainProvider, + _commitment: Bytes, + _block: BlockID, + ) -> Option> { + self.inputs.pop() + } + + async fn advance_l1_origin( + &mut self, + _fetcher: &TestChainProvider, + _block: BlockID, + ) -> Option> { + self.advances.pop() + } + + async fn reset( + &mut self, + _block_number: BlockInfo, + _cfg: SystemConfig, + ) -> Option> { + self.resets.pop() + } + + async fn finalize(&mut self, _block_number: BlockInfo) -> Option> { + None + } + + fn on_finalized_head_signal(&mut self, _block_number: FinalizedHeadSignal) {} +} + +/// A mock chain provider for testing. +#[derive(Debug, Clone, Default)] +pub struct TestChainProvider { + /// Maps block numbers to block information using a tuple list. + pub blocks: Vec<(u64, BlockInfo)>, + /// Maps block hashes to header information using a tuple list. + pub headers: Vec<(B256, Header)>, + /// Maps block hashes to receipts using a tuple list. + pub receipts: Vec<(B256, Vec)>, +} + +impl TestChainProvider { + /// Insert a block into the mock chain provider. + pub fn insert_block(&mut self, number: u64, block: BlockInfo) { + self.blocks.push((number, block)); + } + + /// Insert receipts into the mock chain provider. + pub fn insert_receipts(&mut self, hash: B256, receipts: Vec) { + self.receipts.push((hash, receipts)); + } + + /// Insert a header into the mock chain provider. + pub fn insert_header(&mut self, hash: B256, header: Header) { + self.headers.push((hash, header)); + } + + /// Clears headers from the mock chain provider. + pub fn clear_headers(&mut self) { + self.headers.clear(); + } + + /// Clears blocks from the mock chain provider. + pub fn clear_blocks(&mut self) { + self.blocks.clear(); + } + + /// Clears receipts from the mock chain provider. + pub fn clear_receipts(&mut self) { + self.receipts.clear(); + } + + /// Clears all blocks and receipts from the mock chain provider. + pub fn clear(&mut self) { + self.clear_blocks(); + self.clear_receipts(); + self.clear_headers(); + } +} + +#[async_trait] +impl ChainProvider for TestChainProvider { + async fn header_by_hash(&mut self, hash: B256) -> Result
{ + if let Some((_, header)) = self.headers.iter().find(|(_, b)| b.hash_slow() == hash) { + Ok(header.clone()) + } else { + Err(anyhow::anyhow!("Header not found")) + } + } + + async fn block_info_by_number(&mut self, _number: u64) -> Result { + if let Some((_, block)) = self.blocks.iter().find(|(n, _)| *n == _number) { + Ok(*block) + } else { + Err(anyhow::anyhow!("Block not found")) + } + } + + async fn receipts_by_hash(&mut self, _hash: B256) -> Result> { + if let Some((_, receipts)) = self.receipts.iter().find(|(h, _)| *h == _hash) { + Ok(receipts.clone()) + } else { + Err(anyhow::anyhow!("Receipts not found")) + } + } + + async fn block_info_and_transactions_by_hash( + &mut self, + hash: B256, + ) -> Result<(BlockInfo, Vec)> { + let block = self + .blocks + .iter() + .find(|(_, b)| b.hash == hash) + .map(|(_, b)| *b) + .ok_or_else(|| anyhow::anyhow!("Block not found"))?; + Ok((block, Vec::new())) + } +} From 1e95fb2970b61f327cb72622ca77b6af310c6068 Mon Sep 17 00:00:00 2001 From: refcell Date: Sat, 27 Apr 2024 09:40:19 -0700 Subject: [PATCH 5/6] fix(derive): providers refactor and trait dedup --- Cargo.lock | 22 +++ crates/derive/Cargo.toml | 4 +- crates/derive/src/lib.rs | 5 +- crates/derive/src/online/alloy_providers.rs | 9 +- crates/derive/src/params.rs | 13 -- crates/derive/src/sources/blobs.rs | 3 +- crates/derive/src/sources/calldata.rs | 3 +- crates/derive/src/sources/factory.rs | 33 ++-- crates/derive/src/sources/plasma.rs | 6 +- crates/derive/src/sources/source.rs | 17 +- .../src/stages/attributes_queue/builder.rs | 4 +- .../src/stages/attributes_queue/deposits.rs | 6 +- crates/derive/src/stages/batch_queue.rs | 15 +- crates/derive/src/stages/l1_traversal.rs | 9 +- .../stages/test_utils/sys_config_fetcher.rs | 6 +- crates/derive/src/traits/data_sources.rs | 50 +----- crates/derive/src/traits/test_utils.rs | 60 ++++++- .../traits/test_utils/data_availability.rs | 59 ------- crates/derive/src/types/batch/mod.rs | 6 +- .../src/types/batch/span_batch/batch.rs | 60 ++++--- crates/derive/src/types/errors.rs | 139 --------------- crates/derive/src/types/mod.rs | 74 +------- crates/plasma/Cargo.toml | 7 +- crates/plasma/src/lib.rs | 17 +- crates/plasma/src/test_utils.rs | 96 +---------- crates/plasma/src/traits.rs | 27 +-- .../src/block_info.rs} | 0 .../src/types => primitives/src}/deposits.rs | 162 +++++++++++++++++- crates/primitives/src/lib.rs | 36 ++-- .../src/types => primitives/src}/payload.rs | 5 +- crates/primitives/src/raw_tx.rs | 47 +++++ crates/providers/Cargo.toml | 41 +++++ crates/providers/README.md | 3 + crates/providers/src/chain_provider.rs | 29 ++++ crates/providers/src/l2_chain_provider.rs | 28 +++ crates/providers/src/lib.rs | 16 ++ .../src/test_utils.rs} | 124 +++++++------- 37 files changed, 596 insertions(+), 645 deletions(-) delete mode 100644 crates/derive/src/traits/test_utils/data_availability.rs rename crates/{derive/src/types/l1_block_info.rs => primitives/src/block_info.rs} (100%) rename crates/{derive/src/types => primitives/src}/deposits.rs (74%) rename crates/{derive/src/types => primitives/src}/payload.rs (98%) create mode 100644 crates/primitives/src/raw_tx.rs create mode 100644 crates/providers/Cargo.toml create mode 100644 crates/providers/README.md create mode 100644 crates/providers/src/chain_provider.rs create mode 100644 crates/providers/src/l2_chain_provider.rs create mode 100644 crates/providers/src/lib.rs rename crates/{derive/src/traits/test_utils/data_sources.rs => providers/src/test_utils.rs} (93%) diff --git a/Cargo.lock b/Cargo.lock index 5f2704e7f..832cc30b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1558,6 +1558,7 @@ dependencies = [ "hashbrown", "kona-plasma", "kona-primitives", + "kona-providers", "lru", "miniz_oxide", "op-alloy-consensus", @@ -1614,6 +1615,7 @@ dependencies = [ "anyhow", "async-trait", "kona-primitives", + "kona-providers", "serde", "serde_json", "tracing", @@ -1647,6 +1649,26 @@ dependencies = [ "serde_json", ] +[[package]] +name = "kona-providers" +version = "0.0.1" +dependencies = [ + "alloy-consensus 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=e3f2f07)", + "alloy-primitives", + "alloy-rlp", + "alloy-sol-types", + "anyhow", + "async-trait", + "hashbrown", + "kona-primitives", + "op-alloy-consensus", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "lazy_static" version = "1.4.0" diff --git a/crates/derive/Cargo.toml b/crates/derive/Cargo.toml index e729b768d..703bcbf91 100644 --- a/crates/derive/Cargo.toml +++ b/crates/derive/Cargo.toml @@ -19,6 +19,7 @@ alloy-rlp = { workspace = true, features = ["derive"] } # Local kona-primitives = { path = "../primitives", version = "0.0.1" } kona-plasma = { path = "../plasma", version = "0.0.1" } +kona-providers = { path = "../providers", version = "0.0.1" } # External alloy-sol-types = { version = "0.7.1", default-features = false } @@ -53,7 +54,7 @@ serde_json = { version = "1.0.116", default-features = false } [features] default = ["serde", "k256"] -serde = ["dep:serde", "kona-plasma/serde", "alloy-primitives/serde", "alloy-consensus/serde", "op-alloy-consensus/serde"] +serde = ["dep:serde", "kona-plasma/serde", "kona-providers/serde", "kona-primitives/serde", "alloy-primitives/serde", "alloy-consensus/serde", "op-alloy-consensus/serde"] k256 = ["alloy-primitives/k256", "alloy-consensus/k256", "op-alloy-consensus/k256"] online = [ "dep:revm-primitives", @@ -68,3 +69,4 @@ online = [ "revm-primitives/serde", "revm-primitives/c-kzg", ] +test-utils = [ "kona-providers/test-utils", "kona-plasma/test-utils" ] diff --git a/crates/derive/src/lib.rs b/crates/derive/src/lib.rs index 087ba4ac6..b63821783 100644 --- a/crates/derive/src/lib.rs +++ b/crates/derive/src/lib.rs @@ -9,9 +9,8 @@ extern crate alloc; mod params; pub use params::{ ChannelID, CHANNEL_ID_LENGTH, CONFIG_UPDATE_EVENT_VERSION_0, CONFIG_UPDATE_TOPIC, - DEPOSIT_EVENT_ABI, DEPOSIT_EVENT_ABI_HASH, DEPOSIT_EVENT_VERSION_0, DERIVATION_VERSION_0, - FRAME_OVERHEAD, MAX_CHANNEL_BANK_SIZE, MAX_FRAME_LEN, MAX_RLP_BYTES_PER_CHANNEL, - MAX_SPAN_BATCH_BYTES, SEQUENCER_FEE_VAULT_ADDRESS, + DERIVATION_VERSION_0, FRAME_OVERHEAD, MAX_CHANNEL_BANK_SIZE, MAX_FRAME_LEN, + MAX_RLP_BYTES_PER_CHANNEL, MAX_SPAN_BATCH_BYTES, SEQUENCER_FEE_VAULT_ADDRESS, }; pub mod builder; diff --git a/crates/derive/src/online/alloy_providers.rs b/crates/derive/src/online/alloy_providers.rs index ce7b11ddc..133d016eb 100644 --- a/crates/derive/src/online/alloy_providers.rs +++ b/crates/derive/src/online/alloy_providers.rs @@ -1,12 +1,8 @@ //! This module contains concrete implementations of the data provider traits, using an alloy //! provider on the backend. -use crate::{ - traits::{ChainProvider, L2ChainProvider}, - types::{ - Block, BlockInfo, L2BlockInfo, L2ExecutionPayloadEnvelope, OpBlock, RollupConfig, - SystemConfig, - }, +use crate::types::{ + Block, BlockInfo, L2BlockInfo, L2ExecutionPayloadEnvelope, OpBlock, RollupConfig, SystemConfig, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::{Header, Receipt, ReceiptWithBloom, TxEnvelope, TxType}; @@ -17,6 +13,7 @@ use alloy_transport_http::Http; use anyhow::{anyhow, Result}; use async_trait::async_trait; use core::num::NonZeroUsize; +use kona_providers::{ChainProvider, L2ChainProvider}; use lru::LruCache; const CACHE_SIZE: usize = 16; diff --git a/crates/derive/src/params.rs b/crates/derive/src/params.rs index 697d87f0a..555fdb144 100644 --- a/crates/derive/src/params.rs +++ b/crates/derive/src/params.rs @@ -35,22 +35,9 @@ pub type ChannelID = [u8; CHANNEL_ID_LENGTH]; /// conditions, but we leave space to grow larger anyway (gas limit allows for more data). pub const MAX_FRAME_LEN: usize = 1000; -/// Deposit log event abi signature. -pub const DEPOSIT_EVENT_ABI: &str = "TransactionDeposited(address,address,uint256,bytes)"; - /// `keccak256("ConfigUpdate(uint256,uint8,bytes)")` pub const CONFIG_UPDATE_TOPIC: B256 = b256!("1d2b0bda21d56b8bd12d4f94ebacffdfb35f5e226f84b461103bb8beab6353be"); /// The initial version of the system config event log. pub const CONFIG_UPDATE_EVENT_VERSION_0: B256 = B256::ZERO; - -/// Deposit event abi hash. -/// -/// This is the keccak256 hash of the deposit event ABI signature. -/// `keccak256("TransactionDeposited(address,address,uint256,bytes)")` -pub const DEPOSIT_EVENT_ABI_HASH: B256 = - b256!("b3813568d9991fc951961fcb4c784893574240a28925604d09fc577c55bb7c32"); - -/// The initial version of the deposit event log. -pub const DEPOSIT_EVENT_VERSION_0: B256 = B256::ZERO; diff --git a/crates/derive/src/sources/blobs.rs b/crates/derive/src/sources/blobs.rs index 245168dff..c9fd534ad 100644 --- a/crates/derive/src/sources/blobs.rs +++ b/crates/derive/src/sources/blobs.rs @@ -1,7 +1,7 @@ //! Blob Data Source use crate::{ - traits::{AsyncIterator, BlobProvider, ChainProvider, SignedRecoverable}, + traits::{AsyncIterator, BlobProvider, SignedRecoverable}, types::{BlobData, BlockInfo, IndexedBlobHash, StageError, StageResult}, }; use alloc::{boxed::Box, vec::Vec}; @@ -9,6 +9,7 @@ use alloy_consensus::{Transaction, TxEip4844Variant, TxEnvelope, TxType}; use alloy_primitives::{Address, Bytes, TxKind}; use anyhow::Result; use async_trait::async_trait; +use kona_providers::ChainProvider; use tracing::warn; /// A data iterator that reads from a blob. diff --git a/crates/derive/src/sources/calldata.rs b/crates/derive/src/sources/calldata.rs index f9a4000e5..1099ecd05 100644 --- a/crates/derive/src/sources/calldata.rs +++ b/crates/derive/src/sources/calldata.rs @@ -1,13 +1,14 @@ //! CallData Source use crate::{ - traits::{AsyncIterator, ChainProvider, SignedRecoverable}, + traits::{AsyncIterator, SignedRecoverable}, types::{BlockInfo, StageError, StageResult}, }; use alloc::{boxed::Box, collections::VecDeque}; use alloy_consensus::{Transaction, TxEnvelope}; use alloy_primitives::{Address, Bytes, TxKind}; use async_trait::async_trait; +use kona_providers::ChainProvider; /// A data iterator that reads from calldata. #[derive(Debug, Clone)] diff --git a/crates/derive/src/sources/factory.rs b/crates/derive/src/sources/factory.rs index 5e5647e94..d313eb956 100644 --- a/crates/derive/src/sources/factory.rs +++ b/crates/derive/src/sources/factory.rs @@ -2,29 +2,27 @@ use crate::{ sources::{BlobSource, CalldataSource, DataSource, PlasmaSource}, - traits::{BlobProvider, ChainProvider, DataAvailabilityProvider}, + traits::{BlobProvider, DataAvailabilityProvider}, types::{BlockID, BlockInfo, RollupConfig}, }; use alloc::{boxed::Box, fmt::Debug}; use alloy_primitives::{Address, Bytes}; use anyhow::{anyhow, Result}; use async_trait::async_trait; -use kona_plasma::traits::{ChainProvider as PlasmaChainProvider, PlasmaInputFetcher}; +use kona_plasma::traits::PlasmaInputFetcher; +use kona_providers::ChainProvider; /// A factory for creating a calldata and blob provider. #[derive(Debug, Clone, Copy)] -pub struct DataSourceFactory +pub struct DataSourceFactory where - C: ChainProvider + Clone, + C: ChainProvider + Send + Clone, B: BlobProvider + Clone, - PCP: PlasmaChainProvider + Send + Clone, - PIF: PlasmaInputFetcher + Clone, + PIF: PlasmaInputFetcher + Clone, I: Iterator + Send + Clone, { /// The chain provider to use for the factory. pub chain_provider: C, - /// The plasma chain provider. - pub plasma_chain_provider: PCP, /// The plasma iterator. pub plasma_source: I, /// The blob provider @@ -39,19 +37,17 @@ where pub signer: Address, } -impl DataSourceFactory +impl DataSourceFactory where - C: ChainProvider + Clone + Debug, + C: ChainProvider + Send + Clone + Debug, B: BlobProvider + Clone + Debug, - PCP: PlasmaChainProvider + Send + Clone + Debug, - PIF: PlasmaInputFetcher + Clone + Debug, + PIF: PlasmaInputFetcher + Clone + Debug, I: Iterator + Send + Clone, { /// Creates a new factory. - pub fn new(provider: C, blobs: B, pcp: PCP, pif: PIF, s: I, cfg: &RollupConfig) -> Self { + pub fn new(provider: C, blobs: B, pif: PIF, s: I, cfg: &RollupConfig) -> Self { Self { chain_provider: provider, - plasma_chain_provider: pcp, plasma_source: s, blob_provider: blobs, plasma_input_fetcher: pif, @@ -63,16 +59,15 @@ where } #[async_trait] -impl DataAvailabilityProvider for DataSourceFactory +impl DataAvailabilityProvider for DataSourceFactory where C: ChainProvider + Send + Sync + Clone + Debug, B: BlobProvider + Send + Sync + Clone + Debug, - PCP: PlasmaChainProvider + Send + Sync + Clone + Debug, - PIF: PlasmaInputFetcher + Send + Sync + Clone + Debug, + PIF: PlasmaInputFetcher + Send + Sync + Clone + Debug, I: Iterator + Send + Sync + Clone + Debug, { type Item = Bytes; - type DataIter = DataSource; + type DataIter = DataSource; async fn open_data( &self, @@ -102,7 +97,7 @@ where } else if self.plasma_enabled { let id = BlockID { hash: block_ref.hash, number: block_ref.number }; Ok(DataSource::Plasma(PlasmaSource::new( - self.plasma_chain_provider.clone(), + self.chain_provider.clone(), self.plasma_input_fetcher.clone(), self.plasma_source.clone(), id, diff --git a/crates/derive/src/sources/plasma.rs b/crates/derive/src/sources/plasma.rs index 7c3f572ed..4c222fb14 100644 --- a/crates/derive/src/sources/plasma.rs +++ b/crates/derive/src/sources/plasma.rs @@ -8,12 +8,13 @@ use alloc::boxed::Box; use alloy_primitives::Bytes; use async_trait::async_trait; use kona_plasma::{ - traits::{ChainProvider, PlasmaInputFetcher}, + traits::PlasmaInputFetcher, types::{ decode_keccak256, Keccak256Commitment, PlasmaError, MAX_INPUT_SIZE, TX_DATA_VERSION_1, }, }; use kona_primitives::block::BlockID; +use kona_providers::ChainProvider; /// A plasma data iterator. #[derive(Debug, Clone)] @@ -188,7 +189,8 @@ mod tests { use super::*; use crate::stages::test_utils::{CollectingLayer, TraceStorage}; use alloc::vec; - use kona_plasma::test_utils::{TestChainProvider, TestPlasmaInputFetcher}; + use kona_plasma::test_utils::TestPlasmaInputFetcher; + use kona_providers::test_utils::TestChainProvider; use tracing::Level; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; diff --git a/crates/derive/src/sources/source.rs b/crates/derive/src/sources/source.rs index 8dd392a85..49dfb0683 100644 --- a/crates/derive/src/sources/source.rs +++ b/crates/derive/src/sources/source.rs @@ -2,22 +2,22 @@ use crate::{ sources::{BlobSource, CalldataSource, PlasmaSource}, - traits::{AsyncIterator, BlobProvider, ChainProvider}, + traits::{AsyncIterator, BlobProvider}, types::StageResult, }; use alloc::boxed::Box; use alloy_primitives::Bytes; use async_trait::async_trait; -use kona_plasma::traits::{ChainProvider as PlasmaChainProvider, PlasmaInputFetcher}; +use kona_plasma::traits::PlasmaInputFetcher; +use kona_providers::ChainProvider; /// An enum over the various data sources. #[derive(Debug, Clone)] -pub enum DataSource +pub enum DataSource where CP: ChainProvider + Send, B: BlobProvider + Send, - PCP: PlasmaChainProvider + Send, - PIF: PlasmaInputFetcher + Send, + PIF: PlasmaInputFetcher + Send, I: Iterator + Send, { /// A calldata source. @@ -25,16 +25,15 @@ where /// A blob source. Blob(BlobSource), /// A plasma source. - Plasma(PlasmaSource), + Plasma(PlasmaSource), } #[async_trait] -impl AsyncIterator for DataSource +impl AsyncIterator for DataSource where CP: ChainProvider + Send, B: BlobProvider + Send, - PCP: PlasmaChainProvider + Send, - PIF: PlasmaInputFetcher + Send, + PIF: PlasmaInputFetcher + Send, I: Iterator + Send, { type Item = Bytes; diff --git a/crates/derive/src/stages/attributes_queue/builder.rs b/crates/derive/src/stages/attributes_queue/builder.rs index 4282671da..f24778441 100644 --- a/crates/derive/src/stages/attributes_queue/builder.rs +++ b/crates/derive/src/stages/attributes_queue/builder.rs @@ -3,7 +3,6 @@ use super::derive_deposits; use crate::{ params::SEQUENCER_FEE_VAULT_ADDRESS, - traits::{ChainProvider, L2ChainProvider}, types::{ BlockID, BuilderError, EcotoneTransactionBuilder, L1BlockInfoTx, L2BlockInfo, L2PayloadAttributes, RawTransaction, RollupConfig, @@ -12,6 +11,7 @@ use crate::{ use alloc::{boxed::Box, fmt::Debug, sync::Arc, vec, vec::Vec}; use alloy_rlp::Encodable; use async_trait::async_trait; +use kona_providers::{ChainProvider, L2ChainProvider}; /// The [AttributesBuilder] is responsible for preparing [L2PayloadAttributes] /// that can be used to construct an L2 Block containing only deposits. @@ -173,11 +173,11 @@ mod tests { use super::*; use crate::{ stages::test_utils::MockSystemConfigL2Fetcher, - traits::test_utils::TestChainProvider, types::{BlockInfo, SystemConfig}, }; use alloy_consensus::Header; use alloy_primitives::B256; + use kona_providers::test_utils::TestChainProvider; #[tokio::test] async fn test_prepare_payload_block_mismatch_epoch_reset() { diff --git a/crates/derive/src/stages/attributes_queue/deposits.rs b/crates/derive/src/stages/attributes_queue/deposits.rs index d5ffbd656..797dcedd2 100644 --- a/crates/derive/src/stages/attributes_queue/deposits.rs +++ b/crates/derive/src/stages/attributes_queue/deposits.rs @@ -1,12 +1,10 @@ //! Contains a helper method to derive deposit transactions from L1 Receipts. -use crate::{ - params::DEPOSIT_EVENT_ABI_HASH, - types::{decode_deposit, DepositError, RawTransaction}, -}; +use crate::types::{DepositError, RawTransaction}; use alloc::vec::Vec; use alloy_consensus::Receipt; use alloy_primitives::{Address, Log, B256}; +use kona_primitives::{decode_deposit, DEPOSIT_EVENT_ABI_HASH}; /// Derive deposits for transaction receipts. /// diff --git a/crates/derive/src/stages/batch_queue.rs b/crates/derive/src/stages/batch_queue.rs index ca4bd31f2..c2e884e6f 100644 --- a/crates/derive/src/stages/batch_queue.rs +++ b/crates/derive/src/stages/batch_queue.rs @@ -2,7 +2,7 @@ use crate::{ stages::attributes_queue::AttributesProvider, - traits::{L2ChainProvider, OriginAdvancer, OriginProvider, PreviousStage, ResettableStage}, + traits::{OriginAdvancer, OriginProvider, PreviousStage, ResettableStage}, types::{ Batch, BatchValidity, BatchWithInclusionBlock, BlockInfo, L2BlockInfo, RollupConfig, SingleBatch, StageError, StageResult, SystemConfig, @@ -12,6 +12,7 @@ use alloc::{boxed::Box, sync::Arc, vec::Vec}; use anyhow::anyhow; use async_trait::async_trait; use core::fmt::Debug; +use kona_providers::L2ChainProvider; use tracing::{error, info, warn}; /// Provides [Batch]es for the [BatchQueue] stage. @@ -434,7 +435,6 @@ mod tests { channel_reader::BatchReader, test_utils::{CollectingLayer, MockBatchQueueProvider, TraceStorage}, }, - traits::test_utils::MockBlockFetcher, types::{ BatchType, BlockID, Genesis, L1BlockInfoBedrock, L1BlockInfoTx, L2ExecutionPayload, L2ExecutionPayloadEnvelope, @@ -443,6 +443,7 @@ mod tests { use alloc::vec; use alloy_primitives::{address, b256, Address, Bytes, TxKind, B256, U256}; use alloy_rlp::{BytesMut, Encodable}; + use kona_providers::test_utils::TestL2ChainProvider; use miniz_oxide::deflate::compress_to_vec_zlib; use op_alloy_consensus::{OpTxType, TxDeposit}; use tracing::Level; @@ -461,7 +462,7 @@ mod tests { let data = vec![Ok(Batch::Single(SingleBatch::default()))]; let cfg = Arc::new(RollupConfig::default()); let mock = MockBatchQueueProvider::new(data); - let fetcher = MockBlockFetcher::default(); + let fetcher = TestL2ChainProvider::default(); let mut bq = BatchQueue::new(cfg, mock, fetcher); let parent = L2BlockInfo::default(); let result = bq.derive_next_batch(false, parent).await.unwrap_err(); @@ -474,7 +475,7 @@ mod tests { let cfg = Arc::new(RollupConfig::default()); let batch = reader.next_batch(cfg.as_ref()).unwrap(); let mock = MockBatchQueueProvider::new(vec![Ok(batch)]); - let fetcher = MockBlockFetcher::default(); + let fetcher = TestL2ChainProvider::default(); let mut bq = BatchQueue::new(cfg, mock, fetcher); let res = bq.next_batch(L2BlockInfo::default()).await.unwrap_err(); assert_eq!(res, StageError::NotEnoughData); @@ -491,7 +492,7 @@ mod tests { } let mut mock = MockBatchQueueProvider::new(batch_vec); mock.origin = Some(BlockInfo::default()); - let fetcher = MockBlockFetcher::default(); + let fetcher = TestL2ChainProvider::default(); let mut bq = BatchQueue::new(cfg, mock, fetcher); let parent = L2BlockInfo { l1_origin: BlockID { number: 10, ..Default::default() }, @@ -619,7 +620,7 @@ mod tests { ..Default::default() }, }; - let fetcher = MockBlockFetcher { + let fetcher = TestL2ChainProvider { blocks: vec![block_nine, block_seven], payloads: vec![payload, second], ..Default::default() @@ -653,7 +654,7 @@ mod tests { let data = vec![Ok(Batch::Single(SingleBatch::default()))]; let cfg = Arc::new(RollupConfig::default()); let mock = MockBatchQueueProvider::new(data); - let fetcher = MockBlockFetcher::default(); + let fetcher = TestL2ChainProvider::default(); let mut bq = BatchQueue::new(cfg, mock, fetcher); let parent = L2BlockInfo::default(); let batch = bq.next_batch(parent).await.unwrap(); diff --git a/crates/derive/src/stages/l1_traversal.rs b/crates/derive/src/stages/l1_traversal.rs index 65c975108..532b72457 100644 --- a/crates/derive/src/stages/l1_traversal.rs +++ b/crates/derive/src/stages/l1_traversal.rs @@ -2,12 +2,13 @@ use crate::{ stages::L1RetrievalProvider, - traits::{ChainProvider, OriginAdvancer, OriginProvider, PreviousStage, ResettableStage}, + traits::{OriginAdvancer, OriginProvider, PreviousStage, ResettableStage}, types::{BlockInfo, RollupConfig, StageError, StageResult, SystemConfig}, }; use alloc::{boxed::Box, sync::Arc}; use alloy_primitives::Address; use async_trait::async_trait; +use kona_providers::ChainProvider; use tracing::warn; /// The [L1Traversal] stage of the derivation pipeline. @@ -135,13 +136,11 @@ impl ResettableStage for L1Traversal { #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::{ - params::{CONFIG_UPDATE_EVENT_VERSION_0, CONFIG_UPDATE_TOPIC}, - traits::test_utils::TestChainProvider, - }; + use crate::params::{CONFIG_UPDATE_EVENT_VERSION_0, CONFIG_UPDATE_TOPIC}; use alloc::vec; use alloy_consensus::Receipt; use alloy_primitives::{address, b256, hex, Bytes, Log, LogData, B256}; + use kona_providers::test_utils::TestChainProvider; const L1_SYS_CONFIG_ADDR: Address = address!("1337000000000000000000000000000000000000"); diff --git a/crates/derive/src/stages/test_utils/sys_config_fetcher.rs b/crates/derive/src/stages/test_utils/sys_config_fetcher.rs index 53489faa9..2ef05326d 100644 --- a/crates/derive/src/stages/test_utils/sys_config_fetcher.rs +++ b/crates/derive/src/stages/test_utils/sys_config_fetcher.rs @@ -1,13 +1,11 @@ //! Implements a mock [L2SystemConfigFetcher] for testing. -use crate::{ - traits::L2ChainProvider, - types::{L2BlockInfo, L2ExecutionPayloadEnvelope, RollupConfig, SystemConfig}, -}; +use crate::types::{L2BlockInfo, L2ExecutionPayloadEnvelope, RollupConfig, SystemConfig}; use alloc::{boxed::Box, sync::Arc}; use anyhow::Result; use async_trait::async_trait; use hashbrown::HashMap; +use kona_providers::L2ChainProvider; /// A mock implementation of the [`SystemConfigL2Fetcher`] for testing. #[derive(Debug, Default)] diff --git a/crates/derive/src/traits/data_sources.rs b/crates/derive/src/traits/data_sources.rs index 1061b84ea..10640a4cc 100644 --- a/crates/derive/src/traits/data_sources.rs +++ b/crates/derive/src/traits/data_sources.rs @@ -1,56 +1,12 @@ //! Contains traits that describe the functionality of various data sources used in the derivation //! pipeline's stages. -use crate::types::{ - Blob, BlockInfo, IndexedBlobHash, L2BlockInfo, L2ExecutionPayloadEnvelope, RollupConfig, - StageResult, SystemConfig, -}; -use alloc::{boxed::Box, fmt::Debug, sync::Arc, vec::Vec}; -use alloy_consensus::{Header, Receipt, TxEnvelope}; -use alloy_primitives::{Address, Bytes, B256}; +use crate::types::{Blob, BlockInfo, IndexedBlobHash, StageResult}; +use alloc::{boxed::Box, fmt::Debug, vec::Vec}; +use alloy_primitives::{Address, Bytes}; use anyhow::Result; use async_trait::async_trait; -/// Describes the functionality of a data source that can provide information from the blockchain. -#[async_trait] -pub trait ChainProvider { - /// Fetch the L1 [Header] for the given [B256] hash. - async fn header_by_hash(&mut self, hash: B256) -> Result
; - - /// Returns the block at the given number, or an error if the block does not exist in the data - /// source. - async fn block_info_by_number(&mut self, number: u64) -> Result; - - /// Returns all receipts in the block with the given hash, or an error if the block does not - /// exist in the data source. - async fn receipts_by_hash(&mut self, hash: B256) -> Result>; - - /// Returns the [BlockInfo] and list of [TxEnvelope]s from the given block hash. - async fn block_info_and_transactions_by_hash( - &mut self, - hash: B256, - ) -> Result<(BlockInfo, Vec)>; -} - -/// Describes the functionality of a data source that fetches safe blocks. -#[async_trait] -pub trait L2ChainProvider { - /// Returns the L2 block info given a block number. - /// Errors if the block does not exist. - async fn l2_block_info_by_number(&mut self, number: u64) -> Result; - - /// Returns an execution payload for a given number. - /// Errors if the execution payload does not exist. - async fn payload_by_number(&mut self, number: u64) -> Result; - - /// Returns the [SystemConfig] by L2 number. - async fn system_config_by_number( - &mut self, - number: u64, - rollup_config: Arc, - ) -> Result; -} - /// The BlobProvider trait specifies the functionality of a data source that can provide blobs. #[async_trait] pub trait BlobProvider { diff --git a/crates/derive/src/traits/test_utils.rs b/crates/derive/src/traits/test_utils.rs index af6a910d8..6ee6205cc 100644 --- a/crates/derive/src/traits/test_utils.rs +++ b/crates/derive/src/traits/test_utils.rs @@ -1,7 +1,59 @@ //! Test Utilities for derive traits -pub mod data_sources; -pub use data_sources::{MockBlockFetcher, TestChainProvider}; +use crate::{ + traits::{AsyncIterator, DataAvailabilityProvider}, + types::{BlockInfo, StageError, StageResult}, +}; +use alloc::{boxed::Box, vec, vec::Vec}; +use alloy_primitives::{Address, Bytes}; +use anyhow::Result; +use async_trait::async_trait; +use core::fmt::Debug; -pub mod data_availability; -pub use data_availability::{TestDAP, TestIter}; +/// Mock data iterator +#[derive(Debug, Default, PartialEq)] +pub struct TestIter { + /// Holds open data calls with args for assertions. + pub(crate) open_data_calls: Vec<(BlockInfo, Address)>, + /// A queue of results to return as the next iterated data. + pub(crate) results: Vec>, +} + +#[async_trait] +impl AsyncIterator for TestIter { + type Item = Bytes; + + async fn next(&mut self) -> Option> { + Some(self.results.pop().unwrap_or_else(|| Err(StageError::Eof))) + } +} + +/// Mock data availability provider +#[derive(Debug, Default)] +pub struct TestDAP { + /// Specifies the stage results the test iter returns as data. + pub(crate) results: Vec>, +} + +#[async_trait] +impl DataAvailabilityProvider for TestDAP { + type Item = Bytes; + type DataIter = TestIter; + + async fn open_data( + &self, + block_ref: &BlockInfo, + batcher_address: Address, + ) -> Result { + // Construct a new vec of results to return. + let results = self + .results + .iter() + .map(|i| match i { + Ok(r) => Ok(r.clone()), + Err(_) => Err(StageError::Eof), + }) + .collect::>>(); + Ok(TestIter { open_data_calls: vec![(*block_ref, batcher_address)], results }) + } +} diff --git a/crates/derive/src/traits/test_utils/data_availability.rs b/crates/derive/src/traits/test_utils/data_availability.rs deleted file mode 100644 index dbc7aa256..000000000 --- a/crates/derive/src/traits/test_utils/data_availability.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! Test utilities for data availability. - -use crate::{ - traits::{AsyncIterator, DataAvailabilityProvider}, - types::{BlockInfo, StageError, StageResult}, -}; -use alloc::{boxed::Box, vec, vec::Vec}; -use alloy_primitives::{Address, Bytes}; -use anyhow::Result; -use async_trait::async_trait; -use core::fmt::Debug; - -/// Mock data iterator -#[derive(Debug, Default, PartialEq)] -pub struct TestIter { - /// Holds open data calls with args for assertions. - pub(crate) open_data_calls: Vec<(BlockInfo, Address)>, - /// A queue of results to return as the next iterated data. - pub(crate) results: Vec>, -} - -#[async_trait] -impl AsyncIterator for TestIter { - type Item = Bytes; - - async fn next(&mut self) -> Option> { - Some(self.results.pop().unwrap_or_else(|| Err(StageError::Eof))) - } -} - -/// Mock data availability provider -#[derive(Debug, Default)] -pub struct TestDAP { - /// Specifies the stage results the test iter returns as data. - pub(crate) results: Vec>, -} - -#[async_trait] -impl DataAvailabilityProvider for TestDAP { - type Item = Bytes; - type DataIter = TestIter; - - async fn open_data( - &self, - block_ref: &BlockInfo, - batcher_address: Address, - ) -> Result { - // Construct a new vec of results to return. - let results = self - .results - .iter() - .map(|i| match i { - Ok(r) => Ok(r.clone()), - Err(_) => Err(StageError::Eof), - }) - .collect::>>(); - Ok(TestIter { open_data_calls: vec![(*block_ref, batcher_address)], results }) - } -} diff --git a/crates/derive/src/types/batch/mod.rs b/crates/derive/src/types/batch/mod.rs index 58541377f..66407630e 100644 --- a/crates/derive/src/types/batch/mod.rs +++ b/crates/derive/src/types/batch/mod.rs @@ -2,11 +2,9 @@ //! [SingleBatch]. use super::DecodeError; -use crate::{ - traits::L2ChainProvider, - types::{BlockInfo, L2BlockInfo, RollupConfig}, -}; +use crate::types::{BlockInfo, L2BlockInfo, RollupConfig}; use alloy_rlp::{Buf, Decodable}; +use kona_providers::L2ChainProvider; mod batch_type; pub use batch_type::BatchType; diff --git a/crates/derive/src/types/batch/span_batch/batch.rs b/crates/derive/src/types/batch/span_batch/batch.rs index 0ae4bac48..9037d89bb 100644 --- a/crates/derive/src/types/batch/span_batch/batch.rs +++ b/crates/derive/src/types/batch/span_batch/batch.rs @@ -1,15 +1,13 @@ //! The Span Batch Type use super::{SpanBatchError, SpanBatchTransactions}; -use crate::{ - traits::L2ChainProvider, - types::{ - BatchValidity, BlockInfo, L2BlockInfo, RollupConfig, SingleBatch, SpanBatchBits, - SpanBatchElement, - }, +use crate::types::{ + BatchValidity, BlockInfo, L2BlockInfo, RollupConfig, SingleBatch, SpanBatchBits, + SpanBatchElement, }; use alloc::vec::Vec; use alloy_primitives::FixedBytes; +use kona_providers::L2ChainProvider; use op_alloy_consensus::OpTxType; use tracing::{info, warn}; @@ -414,11 +412,11 @@ mod tests { use super::*; use crate::{ stages::test_utils::{CollectingLayer, TraceStorage}, - traits::test_utils::MockBlockFetcher, types::{BlockID, Genesis, L2ExecutionPayload, L2ExecutionPayloadEnvelope, RawTransaction}, }; use alloc::vec; use alloy_primitives::{b256, Bytes, B256}; + use kona_providers::test_utils::TestL2ChainProvider; use op_alloy_consensus::OpTxType; use tracing::Level; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; @@ -473,7 +471,7 @@ mod tests { let l1_blocks = vec![]; let l2_safe_head = L2BlockInfo::default(); let inclusion_block = BlockInfo::default(); - let mut fetcher = MockBlockFetcher::default(); + let mut fetcher = TestL2ChainProvider::default(); let batch = SpanBatch::default(); assert_eq!( batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, @@ -494,7 +492,7 @@ mod tests { let l1_blocks = vec![BlockInfo::default()]; let l2_safe_head = L2BlockInfo::default(); let inclusion_block = BlockInfo::default(); - let mut fetcher = MockBlockFetcher::default(); + let mut fetcher = TestL2ChainProvider::default(); let batch = SpanBatch::default(); assert_eq!( batch.check_batch(&cfg, &l1_blocks, l2_safe_head, &inclusion_block, &mut fetcher).await, @@ -516,7 +514,7 @@ mod tests { let l1_blocks = vec![block]; let l2_safe_head = L2BlockInfo::default(); let inclusion_block = BlockInfo::default(); - let mut fetcher = MockBlockFetcher::default(); + let mut fetcher = TestL2ChainProvider::default(); let first = SpanBatchElement { epoch_num: 10, ..Default::default() }; let batch = SpanBatch { batches: vec![first], ..Default::default() }; assert_eq!( @@ -543,7 +541,7 @@ mod tests { let l1_blocks = vec![block]; let l2_safe_head = L2BlockInfo::default(); let inclusion_block = BlockInfo::default(); - let mut fetcher = MockBlockFetcher::default(); + let mut fetcher = TestL2ChainProvider::default(); let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let batch = SpanBatch { batches: vec![first], ..Default::default() }; assert_eq!( @@ -574,7 +572,7 @@ mod tests { ..Default::default() }; let inclusion_block = BlockInfo::default(); - let mut fetcher = MockBlockFetcher::default(); + let mut fetcher = TestL2ChainProvider::default(); let first = SpanBatchElement { epoch_num: 10, timestamp: 21, ..Default::default() }; let batch = SpanBatch { batches: vec![first], ..Default::default() }; assert_eq!( @@ -602,7 +600,7 @@ mod tests { ..Default::default() }; let inclusion_block = BlockInfo::default(); - let mut fetcher = MockBlockFetcher::default(); + let mut fetcher = TestL2ChainProvider::default(); let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let batch = SpanBatch { batches: vec![first], ..Default::default() }; assert_eq!( @@ -628,7 +626,7 @@ mod tests { ..Default::default() }; let inclusion_block = BlockInfo::default(); - let mut fetcher = MockBlockFetcher::default(); + let mut fetcher = TestL2ChainProvider::default(); let first = SpanBatchElement { epoch_num: 10, timestamp: 11, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 21, ..Default::default() }; let batch = SpanBatch { batches: vec![first, second], ..Default::default() }; @@ -655,7 +653,7 @@ mod tests { ..Default::default() }; let inclusion_block = BlockInfo::default(); - let mut fetcher = MockBlockFetcher::default(); + let mut fetcher = TestL2ChainProvider::default(); let first = SpanBatchElement { epoch_num: 10, timestamp: 8, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { batches: vec![first, second], ..Default::default() }; @@ -682,7 +680,7 @@ mod tests { ..Default::default() }; let inclusion_block = BlockInfo::default(); - let mut fetcher = MockBlockFetcher::default(); + let mut fetcher = TestL2ChainProvider::default(); let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { batches: vec![first, second], ..Default::default() }; @@ -715,7 +713,7 @@ mod tests { l1_origin: BlockID { number: 9, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], ..Default::default() }; fetcher.short_circuit = true; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; @@ -755,7 +753,7 @@ mod tests { block_info: BlockInfo { number: 40, parent_hash, timestamp: 10, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -799,7 +797,7 @@ mod tests { l1_origin: BlockID { number: 8, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -850,7 +848,7 @@ mod tests { l1_origin: BlockID { number: 9, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -900,7 +898,7 @@ mod tests { l1_origin: BlockID { number: 9, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -947,7 +945,7 @@ mod tests { l1_origin: BlockID { number: 14, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -1004,7 +1002,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 20, ..Default::default() }; let second = SpanBatchElement { epoch_num: 10, timestamp: 20, ..Default::default() }; let third = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; @@ -1054,7 +1052,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 20, transactions: vec![] }; let second = SpanBatchElement { epoch_num: 10, timestamp: 20, transactions: vec![] }; let third = SpanBatchElement { epoch_num: 11, timestamp: 20, transactions: vec![] }; @@ -1107,7 +1105,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 20, @@ -1170,7 +1168,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 20, @@ -1229,7 +1227,7 @@ mod tests { block_info: BlockInfo { number: 40, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], ..Default::default() }; let filler_bytes = RawTransaction(Bytes::copy_from_slice(&[OpTxType::Eip1559 as u8])); let first = SpanBatchElement { epoch_num: 10, @@ -1288,7 +1286,7 @@ mod tests { l1_origin: BlockID { number: 9, ..Default::default() }, ..Default::default() }; - let mut fetcher = MockBlockFetcher { blocks: vec![l2_block], ..Default::default() }; + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], ..Default::default() }; let first = SpanBatchElement { epoch_num: 10, timestamp: 10, ..Default::default() }; let second = SpanBatchElement { epoch_num: 11, timestamp: 20, ..Default::default() }; let batch = SpanBatch { @@ -1343,7 +1341,7 @@ mod tests { parent_beacon_block_root: None, execution_payload: L2ExecutionPayload { block_number: 41, ..Default::default() }, }; - let mut fetcher = MockBlockFetcher { + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], payloads: vec![payload], ..Default::default() @@ -1412,7 +1410,7 @@ mod tests { ..Default::default() }, }; - let mut fetcher = MockBlockFetcher { + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], payloads: vec![payload], ..Default::default() @@ -1478,7 +1476,7 @@ mod tests { ..Default::default() }, }; - let mut fetcher = MockBlockFetcher { + let mut fetcher = TestL2ChainProvider { blocks: vec![l2_block], payloads: vec![payload], ..Default::default() diff --git a/crates/derive/src/types/errors.rs b/crates/derive/src/types/errors.rs index bedd54b36..f7a41c0d7 100644 --- a/crates/derive/src/types/errors.rs +++ b/crates/derive/src/types/errors.rs @@ -284,142 +284,3 @@ impl Display for BuilderError { } } } - -/// An [op_alloy_consensus::TxDeposit] validation error. -#[derive(Debug)] -pub enum DepositError { - /// Unexpected number of deposit event log topics. - UnexpectedTopicsLen(usize), - /// Invalid deposit event selector. - /// Expected: [B256] (deposit event selector), Actual: [B256] (event log topic). - InvalidSelector(B256, B256), - /// Incomplete opaqueData slice header (incomplete length). - IncompleteOpaqueData(usize), - /// The log data is not aligned to 32 bytes. - UnalignedData(usize), - /// Failed to decode the `from` field of the deposit event (the second topic). - FromDecode(B256), - /// Failed to decode the `to` field of the deposit event (the third topic). - ToDecode(B256), - /// Invalid opaque data content offset. - InvalidOpaqueDataOffset(Bytes), - /// Invalid opaque data content length. - InvalidOpaqueDataLength(Bytes), - /// Opaque data length exceeds the deposit log event data length. - /// Specified: [usize] (data length), Actual: [usize] (opaque data length). - OpaqueDataOverflow(usize, usize), - /// Opaque data with padding exceeds the specified data length. - PaddedOpaqueDataOverflow(usize, usize), - /// An invalid deposit version. - InvalidVersion(B256), - /// Unexpected opaque data length - UnexpectedOpaqueDataLen(usize), - /// Failed to decode the deposit mint value. - MintDecode(Bytes), - /// Failed to decode the deposit gas value. - GasDecode(Bytes), - /// A custom error wrapping [anyhow::Error]. - Custom(anyhow::Error), -} - -impl PartialEq for DepositError { - fn eq(&self, other: &DepositError) -> bool { - match (self, other) { - (DepositError::UnexpectedTopicsLen(l1), DepositError::UnexpectedTopicsLen(l2)) => { - l1 == l2 - } - (DepositError::InvalidSelector(e1, t1), DepositError::InvalidSelector(e2, t2)) => { - e1 == e2 && t1 == t2 - } - (DepositError::IncompleteOpaqueData(l1), DepositError::IncompleteOpaqueData(l2)) => { - l1 == l2 - } - (DepositError::UnalignedData(d1), DepositError::UnalignedData(d2)) => d1 == d2, - (DepositError::FromDecode(e1), DepositError::FromDecode(e2)) => e1 == e2, - (DepositError::ToDecode(e1), DepositError::ToDecode(e2)) => e1 == e2, - ( - DepositError::InvalidOpaqueDataOffset(o1), - DepositError::InvalidOpaqueDataOffset(o2), - ) => o1 == o2, - ( - DepositError::InvalidOpaqueDataLength(o1), - DepositError::InvalidOpaqueDataLength(o2), - ) => o1 == o2, - ( - DepositError::OpaqueDataOverflow(l1, l2), - DepositError::OpaqueDataOverflow(l3, l4), - ) => l1 == l3 && l2 == l4, - ( - DepositError::PaddedOpaqueDataOverflow(l1, l2), - DepositError::PaddedOpaqueDataOverflow(l3, l4), - ) => l1 == l3 && l2 == l4, - (DepositError::InvalidVersion(v1), DepositError::InvalidVersion(v2)) => v1 == v2, - ( - DepositError::UnexpectedOpaqueDataLen(a), - DepositError::UnexpectedOpaqueDataLen(b), - ) => a == b, - (DepositError::MintDecode(a), DepositError::MintDecode(b)) => a == b, - (DepositError::GasDecode(a), DepositError::GasDecode(b)) => a == b, - (DepositError::Custom(_), DepositError::Custom(_)) => true, - _ => false, - } - } -} - -impl Display for DepositError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - DepositError::UnexpectedTopicsLen(len) => { - write!(f, "Unexpected number of deposit event log topics: {}", len) - } - DepositError::InvalidSelector(expected, actual) => { - write!(f, "Invalid deposit event selector: {}, expected {}", actual, expected) - } - DepositError::IncompleteOpaqueData(len) => { - write!(f, "Incomplete opaqueData slice header (incomplete length): {}", len) - } - DepositError::UnalignedData(data) => { - write!(f, "Unaligned log data, expected multiple of 32 bytes, got: {}", data) - } - DepositError::FromDecode(topic) => { - write!(f, "Failed to decode the `from` address of the deposit log topic: {}", topic) - } - DepositError::ToDecode(topic) => { - write!(f, "Failed to decode the `to` address of the deposit log topic: {}", topic) - } - DepositError::InvalidOpaqueDataOffset(offset) => { - write!(f, "Invalid u64 opaque data content offset: {:?}", offset) - } - DepositError::InvalidOpaqueDataLength(length) => { - write!(f, "Invalid u64 opaque data content length: {:?}", length) - } - DepositError::OpaqueDataOverflow(data_len, opaque_len) => { - write!( - f, - "Specified opaque data length {} exceeds the deposit log event data length {}", - opaque_len, data_len - ) - } - DepositError::PaddedOpaqueDataOverflow(data_len, opaque_len) => { - write!( - f, - "Opaque data with padding exceeds the specified data length: {} > {}", - opaque_len, data_len - ) - } - DepositError::InvalidVersion(version) => { - write!(f, "Invalid deposit version: {}", version) - } - DepositError::UnexpectedOpaqueDataLen(len) => { - write!(f, "Unexpected opaque data length: {}", len) - } - DepositError::MintDecode(data) => { - write!(f, "Failed to decode the u128 deposit mint value: {:?}", data) - } - DepositError::GasDecode(data) => { - write!(f, "Failed to decode the u64 deposit gas value: {:?}", data) - } - DepositError::Custom(e) => write!(f, "Custom error: {}", e), - } - } -} diff --git a/crates/derive/src/types/mod.rs b/crates/derive/src/types/mod.rs index afb1c85ef..35a1977fe 100644 --- a/crates/derive/src/types/mod.rs +++ b/crates/derive/src/types/mod.rs @@ -1,19 +1,11 @@ //! This module contains all of the types used within the derivation pipeline. -/// Re-export the kona primitive prelude -pub use kona_primitives::prelude::*; - -use alloc::vec::Vec; -pub use alloy_consensus::Receipt; -use alloy_primitives::Bytes; -use alloy_rlp::{Decodable, Encodable}; +/// Re-export the kona primitives. +pub use kona_primitives::*; mod attributes; pub use attributes::{L2AttributesWithParent, L2PayloadAttributes}; -mod deposits; -pub use deposits::*; - pub mod batch; pub use batch::{ Batch, BatchType, BatchValidity, BatchWithInclusionBlock, RawSpanBatch, SingleBatch, SpanBatch, @@ -26,14 +18,6 @@ pub use batch::{ mod ecotone; pub use ecotone::*; -mod payload; -pub use payload::{ - L2ExecutionPayload, L2ExecutionPayloadEnvelope, PAYLOAD_MEM_FIXED_COST, PAYLOAD_TX_MEM_OVERHEAD, -}; - -mod l1_block_info; -pub use l1_block_info::{L1BlockInfoBedrock, L1BlockInfoEcotone, L1BlockInfoTx}; - mod blob; pub use blob::{Blob, BlobData, BlobDecodingError, IndexedBlobHash}; @@ -52,57 +36,3 @@ pub use channel::Channel; mod errors; pub use errors::*; - -/// A raw transaction -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[derive(Debug, Default, Clone, PartialEq, Eq)] -pub struct RawTransaction(pub Bytes); - -impl RawTransaction { - /// Returns if the transaction is empty - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Returns if the transaction is a deposit - pub fn is_deposit(&self) -> bool { - !self.0.is_empty() && self.0[0] == 0x7E - } -} - -impl> From for RawTransaction { - fn from(bytes: T) -> Self { - Self(bytes.into()) - } -} - -impl Encodable for RawTransaction { - fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { - self.0.encode(out) - } -} - -impl Decodable for RawTransaction { - /// Decodes RLP encoded bytes into [RawTransaction] bytes - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - let tx_bytes = Bytes::decode(buf)?; - Ok(Self(tx_bytes)) - } -} - -impl AsRef<[u8]> for RawTransaction { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -/// A single L2 block derived from a batch. -#[derive(Debug, Clone)] -pub struct BlockInput { - /// Timestamp of the L2 block - pub timestamp: u64, - /// Transactions included in this block - pub transactions: Vec, - /// The L1 block this batch was fully derived from - pub l1_inclusion_block: u64, -} diff --git a/crates/plasma/Cargo.toml b/crates/plasma/Cargo.toml index 4bacdf937..ef9f73d4b 100644 --- a/crates/plasma/Cargo.toml +++ b/crates/plasma/Cargo.toml @@ -15,6 +15,7 @@ tracing.workspace = true # Local kona-primitives = { path = "../primitives" } +kona-providers = { path = "../providers" } # External alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "e3f2f07", default-features = false } @@ -29,6 +30,6 @@ tracing-subscriber = "0.3.18" serde_json = { version = "1.0.68", default-features = false } [features] -default = ["serde"] -serde = ["dep:serde"] -test-utils = [] +default = [ "serde" ] +serde = [ "dep:serde", "kona-providers/serde", "kona-primitives/serde" ] +test-utils = [ "kona-providers/test-utils" ] diff --git a/crates/plasma/src/lib.rs b/crates/plasma/src/lib.rs index 4fee90dbf..8f46bbff3 100644 --- a/crates/plasma/src/lib.rs +++ b/crates/plasma/src/lib.rs @@ -6,19 +6,14 @@ extern crate alloc; -pub mod traits; -pub mod types; - // Re-export kona primitives. -pub use kona_primitives::prelude::*; +pub use kona_primitives::*; -/// The prelude exports common types and traits. -pub mod prelude { - pub use crate::{ - traits::{ChainProvider, PlasmaInputFetcher}, - types::{FinalizedHeadSignal, Keccak256Commitment, PlasmaError, SystemConfig}, - }; -} +pub mod traits; +pub use traits::PlasmaInputFetcher; + +pub mod types; +pub use types::{FinalizedHeadSignal, Keccak256Commitment, PlasmaError, SystemConfig}; #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/plasma/src/test_utils.rs b/crates/plasma/src/test_utils.rs index aaa7cab93..31d7f4ca8 100644 --- a/crates/plasma/src/test_utils.rs +++ b/crates/plasma/src/test_utils.rs @@ -1,18 +1,18 @@ //! Test utilities for the Plasma crate. use crate::{ - traits::{ChainProvider, PlasmaInputFetcher}, + traits::PlasmaInputFetcher, types::{FinalizedHeadSignal, PlasmaError}, }; use alloc::{boxed::Box, vec::Vec}; -use alloy_consensus::{Header, Receipt, TxEnvelope}; -use alloy_primitives::{Bytes, B256}; +use alloy_primitives::Bytes; use anyhow::Result; use async_trait::async_trait; use kona_primitives::{ block::{BlockID, BlockInfo}, system_config::SystemConfig, }; +use kona_providers::test_utils::TestChainProvider; /// A mock plasma input fetcher for testing. #[derive(Debug, Clone, Default)] @@ -58,93 +58,3 @@ impl PlasmaInputFetcher for TestPlasmaInputFetcher { fn on_finalized_head_signal(&mut self, _block_number: FinalizedHeadSignal) {} } - -/// A mock chain provider for testing. -#[derive(Debug, Clone, Default)] -pub struct TestChainProvider { - /// Maps block numbers to block information using a tuple list. - pub blocks: Vec<(u64, BlockInfo)>, - /// Maps block hashes to header information using a tuple list. - pub headers: Vec<(B256, Header)>, - /// Maps block hashes to receipts using a tuple list. - pub receipts: Vec<(B256, Vec)>, -} - -impl TestChainProvider { - /// Insert a block into the mock chain provider. - pub fn insert_block(&mut self, number: u64, block: BlockInfo) { - self.blocks.push((number, block)); - } - - /// Insert receipts into the mock chain provider. - pub fn insert_receipts(&mut self, hash: B256, receipts: Vec) { - self.receipts.push((hash, receipts)); - } - - /// Insert a header into the mock chain provider. - pub fn insert_header(&mut self, hash: B256, header: Header) { - self.headers.push((hash, header)); - } - - /// Clears headers from the mock chain provider. - pub fn clear_headers(&mut self) { - self.headers.clear(); - } - - /// Clears blocks from the mock chain provider. - pub fn clear_blocks(&mut self) { - self.blocks.clear(); - } - - /// Clears receipts from the mock chain provider. - pub fn clear_receipts(&mut self) { - self.receipts.clear(); - } - - /// Clears all blocks and receipts from the mock chain provider. - pub fn clear(&mut self) { - self.clear_blocks(); - self.clear_receipts(); - self.clear_headers(); - } -} - -#[async_trait] -impl ChainProvider for TestChainProvider { - async fn header_by_hash(&mut self, hash: B256) -> Result
{ - if let Some((_, header)) = self.headers.iter().find(|(_, b)| b.hash_slow() == hash) { - Ok(header.clone()) - } else { - Err(anyhow::anyhow!("Header not found")) - } - } - - async fn block_info_by_number(&mut self, _number: u64) -> Result { - if let Some((_, block)) = self.blocks.iter().find(|(n, _)| *n == _number) { - Ok(*block) - } else { - Err(anyhow::anyhow!("Block not found")) - } - } - - async fn receipts_by_hash(&mut self, _hash: B256) -> Result> { - if let Some((_, receipts)) = self.receipts.iter().find(|(h, _)| *h == _hash) { - Ok(receipts.clone()) - } else { - Err(anyhow::anyhow!("Receipts not found")) - } - } - - async fn block_info_and_transactions_by_hash( - &mut self, - hash: B256, - ) -> Result<(BlockInfo, Vec)> { - let block = self - .blocks - .iter() - .find(|(_, b)| b.hash == hash) - .map(|(_, b)| *b) - .ok_or_else(|| anyhow::anyhow!("Block not found"))?; - Ok((block, Vec::new())) - } -} diff --git a/crates/plasma/src/traits.rs b/crates/plasma/src/traits.rs index a86769b81..f4960bdf8 100644 --- a/crates/plasma/src/traits.rs +++ b/crates/plasma/src/traits.rs @@ -1,35 +1,14 @@ //! Traits for plasma sources and internal components. use crate::types::{FinalizedHeadSignal, PlasmaError}; -use alloc::{boxed::Box, vec::Vec}; -use alloy_consensus::{Header, Receipt, TxEnvelope}; -use alloy_primitives::{Bytes, B256}; +use alloc::boxed::Box; +use alloy_primitives::Bytes; use async_trait::async_trait; use kona_primitives::{ block::{BlockID, BlockInfo}, system_config::SystemConfig, }; - -/// Describes the functionality of a data source that can provide information from the blockchain. -#[async_trait] -pub trait ChainProvider { - /// Fetch the L1 [Header] for the given [B256] hash. - async fn header_by_hash(&mut self, hash: B256) -> anyhow::Result
; - - /// Returns the block at the given number, or an error if the block does not exist in the data - /// source. - async fn block_info_by_number(&mut self, number: u64) -> anyhow::Result; - - /// Returns all receipts in the block with the given hash, or an error if the block does not - /// exist in the data source. - async fn receipts_by_hash(&mut self, hash: B256) -> anyhow::Result>; - - /// Returns the [BlockInfo] and list of [TxEnvelope]s from the given block hash. - async fn block_info_and_transactions_by_hash( - &mut self, - hash: B256, - ) -> anyhow::Result<(BlockInfo, Vec)>; -} +use kona_providers::ChainProvider; /// A plasma input fetcher. #[async_trait] diff --git a/crates/derive/src/types/l1_block_info.rs b/crates/primitives/src/block_info.rs similarity index 100% rename from crates/derive/src/types/l1_block_info.rs rename to crates/primitives/src/block_info.rs diff --git a/crates/derive/src/types/deposits.rs b/crates/primitives/src/deposits.rs similarity index 74% rename from crates/derive/src/types/deposits.rs rename to crates/primitives/src/deposits.rs index 75c148dd8..8c40dbd2a 100644 --- a/crates/derive/src/types/deposits.rs +++ b/crates/primitives/src/deposits.rs @@ -1,14 +1,164 @@ //! Contains deposit transaction types and helper methods. use alloc::{string::String, vec::Vec}; -use alloy_primitives::{keccak256, Address, Bytes, Log, TxKind, B256, U256, U64}; +use alloy_primitives::{b256, keccak256, Address, Bytes, Log, TxKind, B256, U256, U64}; use alloy_rlp::Encodable; +use core::fmt::Display; use op_alloy_consensus::TxDeposit; -use crate::{ - params::DEPOSIT_EVENT_ABI_HASH, - types::{DepositError, RawTransaction}, -}; +use crate::RawTransaction; + +/// Deposit log event abi signature. +pub const DEPOSIT_EVENT_ABI: &str = "TransactionDeposited(address,address,uint256,bytes)"; + +/// Deposit event abi hash. +/// +/// This is the keccak256 hash of the deposit event ABI signature. +/// `keccak256("TransactionDeposited(address,address,uint256,bytes)")` +pub const DEPOSIT_EVENT_ABI_HASH: B256 = + b256!("b3813568d9991fc951961fcb4c784893574240a28925604d09fc577c55bb7c32"); + +/// The initial version of the deposit event log. +pub const DEPOSIT_EVENT_VERSION_0: B256 = B256::ZERO; + +/// An [op_alloy_consensus::TxDeposit] validation error. +#[derive(Debug)] +pub enum DepositError { + /// Unexpected number of deposit event log topics. + UnexpectedTopicsLen(usize), + /// Invalid deposit event selector. + /// Expected: [B256] (deposit event selector), Actual: [B256] (event log topic). + InvalidSelector(B256, B256), + /// Incomplete opaqueData slice header (incomplete length). + IncompleteOpaqueData(usize), + /// The log data is not aligned to 32 bytes. + UnalignedData(usize), + /// Failed to decode the `from` field of the deposit event (the second topic). + FromDecode(B256), + /// Failed to decode the `to` field of the deposit event (the third topic). + ToDecode(B256), + /// Invalid opaque data content offset. + InvalidOpaqueDataOffset(Bytes), + /// Invalid opaque data content length. + InvalidOpaqueDataLength(Bytes), + /// Opaque data length exceeds the deposit log event data length. + /// Specified: [usize] (data length), Actual: [usize] (opaque data length). + OpaqueDataOverflow(usize, usize), + /// Opaque data with padding exceeds the specified data length. + PaddedOpaqueDataOverflow(usize, usize), + /// An invalid deposit version. + InvalidVersion(B256), + /// Unexpected opaque data length + UnexpectedOpaqueDataLen(usize), + /// Failed to decode the deposit mint value. + MintDecode(Bytes), + /// Failed to decode the deposit gas value. + GasDecode(Bytes), + /// A custom error wrapping [anyhow::Error]. + Custom(anyhow::Error), +} + +impl PartialEq for DepositError { + fn eq(&self, other: &DepositError) -> bool { + match (self, other) { + (DepositError::UnexpectedTopicsLen(l1), DepositError::UnexpectedTopicsLen(l2)) => { + l1 == l2 + } + (DepositError::InvalidSelector(e1, t1), DepositError::InvalidSelector(e2, t2)) => { + e1 == e2 && t1 == t2 + } + (DepositError::IncompleteOpaqueData(l1), DepositError::IncompleteOpaqueData(l2)) => { + l1 == l2 + } + (DepositError::UnalignedData(d1), DepositError::UnalignedData(d2)) => d1 == d2, + (DepositError::FromDecode(e1), DepositError::FromDecode(e2)) => e1 == e2, + (DepositError::ToDecode(e1), DepositError::ToDecode(e2)) => e1 == e2, + ( + DepositError::InvalidOpaqueDataOffset(o1), + DepositError::InvalidOpaqueDataOffset(o2), + ) => o1 == o2, + ( + DepositError::InvalidOpaqueDataLength(o1), + DepositError::InvalidOpaqueDataLength(o2), + ) => o1 == o2, + ( + DepositError::OpaqueDataOverflow(l1, l2), + DepositError::OpaqueDataOverflow(l3, l4), + ) => l1 == l3 && l2 == l4, + ( + DepositError::PaddedOpaqueDataOverflow(l1, l2), + DepositError::PaddedOpaqueDataOverflow(l3, l4), + ) => l1 == l3 && l2 == l4, + (DepositError::InvalidVersion(v1), DepositError::InvalidVersion(v2)) => v1 == v2, + ( + DepositError::UnexpectedOpaqueDataLen(a), + DepositError::UnexpectedOpaqueDataLen(b), + ) => a == b, + (DepositError::MintDecode(a), DepositError::MintDecode(b)) => a == b, + (DepositError::GasDecode(a), DepositError::GasDecode(b)) => a == b, + (DepositError::Custom(_), DepositError::Custom(_)) => true, + _ => false, + } + } +} + +impl Display for DepositError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + DepositError::UnexpectedTopicsLen(len) => { + write!(f, "Unexpected number of deposit event log topics: {}", len) + } + DepositError::InvalidSelector(expected, actual) => { + write!(f, "Invalid deposit event selector: {}, expected {}", actual, expected) + } + DepositError::IncompleteOpaqueData(len) => { + write!(f, "Incomplete opaqueData slice header (incomplete length): {}", len) + } + DepositError::UnalignedData(data) => { + write!(f, "Unaligned log data, expected multiple of 32 bytes, got: {}", data) + } + DepositError::FromDecode(topic) => { + write!(f, "Failed to decode the `from` address of the deposit log topic: {}", topic) + } + DepositError::ToDecode(topic) => { + write!(f, "Failed to decode the `to` address of the deposit log topic: {}", topic) + } + DepositError::InvalidOpaqueDataOffset(offset) => { + write!(f, "Invalid u64 opaque data content offset: {:?}", offset) + } + DepositError::InvalidOpaqueDataLength(length) => { + write!(f, "Invalid u64 opaque data content length: {:?}", length) + } + DepositError::OpaqueDataOverflow(data_len, opaque_len) => { + write!( + f, + "Specified opaque data length {} exceeds the deposit log event data length {}", + opaque_len, data_len + ) + } + DepositError::PaddedOpaqueDataOverflow(data_len, opaque_len) => { + write!( + f, + "Opaque data with padding exceeds the specified data length: {} > {}", + opaque_len, data_len + ) + } + DepositError::InvalidVersion(version) => { + write!(f, "Invalid deposit version: {}", version) + } + DepositError::UnexpectedOpaqueDataLen(len) => { + write!(f, "Unexpected opaque data length: {}", len) + } + DepositError::MintDecode(data) => { + write!(f, "Failed to decode the u128 deposit mint value: {:?}", data) + } + DepositError::GasDecode(data) => { + write!(f, "Failed to decode the u64 deposit gas value: {:?}", data) + } + DepositError::Custom(e) => write!(f, "Custom error: {}", e), + } + } +} /// Source domain identifiers for deposit transactions. #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -144,7 +294,7 @@ impl UpgradeDepositSource { /// bytes opaqueData /// ); /// ``` -pub(crate) fn decode_deposit( +pub fn decode_deposit( block_hash: B256, index: usize, log: &Log, diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 2fa6228c4..1196c13cf 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -7,17 +7,33 @@ extern crate alloc; pub mod block; +pub use block::{Block, BlockID, BlockInfo, BlockKind, L2BlockInfo, OpBlock, Withdrawal}; + +pub mod block_info; +pub use block_info::{L1BlockInfoBedrock, L1BlockInfoEcotone, L1BlockInfoTx}; + +pub mod raw_tx; +pub use raw_tx::RawTransaction; + +pub mod deposits; +pub use deposits::{ + decode_deposit, DepositError, DepositSourceDomain, DepositSourceDomainIdentifier, + L1InfoDepositSource, UpgradeDepositSource, UserDepositSource, DEPOSIT_EVENT_ABI_HASH, +}; + pub mod genesis; +pub use genesis::Genesis; + pub mod params; +pub use params::*; + +pub mod payload; +pub use payload::{ + L2ExecutionPayload, L2ExecutionPayloadEnvelope, PAYLOAD_MEM_FIXED_COST, PAYLOAD_TX_MEM_OVERHEAD, +}; + pub mod rollup_config; -pub mod system_config; +pub use rollup_config::RollupConfig; -/// The prelude exports common types and traits. -pub mod prelude { - pub use crate::{ - block::{Block, BlockID, BlockInfo, BlockKind, L2BlockInfo, OpBlock, Withdrawal}, - genesis::Genesis, - rollup_config::RollupConfig, - system_config::{SystemAccounts, SystemConfig, SystemConfigUpdateType}, - }; -} +pub mod system_config; +pub use system_config::{SystemAccounts, SystemConfig, SystemConfigUpdateType}; diff --git a/crates/derive/src/types/payload.rs b/crates/primitives/src/payload.rs similarity index 98% rename from crates/derive/src/types/payload.rs rename to crates/primitives/src/payload.rs index 3c5bea111..bf730db46 100644 --- a/crates/derive/src/types/payload.rs +++ b/crates/primitives/src/payload.rs @@ -13,10 +13,9 @@ pub const PAYLOAD_MEM_FIXED_COST: u64 = 1000; /// 24 bytes per tx overhead (size of slice header in memory). pub const PAYLOAD_TX_MEM_OVERHEAD: u64 = 24; -use crate::types::{L1BlockInfoBedrock, L1BlockInfoEcotone}; - use super::{ - Block, BlockInfo, L1BlockInfoTx, L2BlockInfo, OpBlock, RollupConfig, SystemConfig, Withdrawal, + Block, BlockInfo, L1BlockInfoBedrock, L1BlockInfoEcotone, L1BlockInfoTx, L2BlockInfo, OpBlock, + RollupConfig, SystemConfig, Withdrawal, }; use alloy_rlp::{Decodable, Encodable}; diff --git a/crates/primitives/src/raw_tx.rs b/crates/primitives/src/raw_tx.rs new file mode 100644 index 000000000..cb785b5cb --- /dev/null +++ b/crates/primitives/src/raw_tx.rs @@ -0,0 +1,47 @@ +//! Contains the [RawTransaction] type. + +use alloy_primitives::Bytes; +use alloy_rlp::{Decodable, Encodable}; + +/// A raw transaction +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct RawTransaction(pub Bytes); + +impl RawTransaction { + /// Returns if the transaction is empty + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns if the transaction is a deposit + pub fn is_deposit(&self) -> bool { + !self.0.is_empty() && self.0[0] == 0x7E + } +} + +impl> From for RawTransaction { + fn from(bytes: T) -> Self { + Self(bytes.into()) + } +} + +impl Encodable for RawTransaction { + fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { + self.0.encode(out) + } +} + +impl Decodable for RawTransaction { + /// Decodes RLP encoded bytes into [RawTransaction] bytes + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let tx_bytes = Bytes::decode(buf)?; + Ok(Self(tx_bytes)) + } +} + +impl AsRef<[u8]> for RawTransaction { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} diff --git a/crates/providers/Cargo.toml b/crates/providers/Cargo.toml new file mode 100644 index 000000000..4df33b1c7 --- /dev/null +++ b/crates/providers/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "kona-providers" +description = "Provider traits and implementations for kona crates" +version = "0.0.1" +edition.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true +homepage.workspace = true + +[dependencies] +anyhow.workspace = true +tracing.workspace = true +alloy-rlp = { workspace = true, features = ["derive"] } +alloy-primitives = { workspace = true, features = ["rlp"] } + +# Local +kona-primitives = { path = "../primitives", version = "0.0.1" } + +# External +async-trait = "0.1.77" +hashbrown = { version = "0.14.3", optional = true } + +# Alloy Types +alloy-sol-types = { version = "0.7.1", default-features = false } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "e3f2f07", default-features = false } +op-alloy-consensus = { git = "https://github.com/clabby/op-alloy", branch = "refcell/consensus-port", default-features = false } + +# `serde` feature dependencies +serde = { version = "1.0.197", default-features = false, features = ["derive"], optional = true } + +[dev-dependencies] +hashbrown = "0.14.3" +tokio = { version = "1.37", features = ["full"] } +tracing-subscriber = "0.3.18" +serde_json = { version = "1.0.68", default-features = false } + +[features] +default = ["serde"] +serde = ["dep:serde"] +test-utils = [ "dep:hashbrown" ] diff --git a/crates/providers/README.md b/crates/providers/README.md new file mode 100644 index 000000000..9a5df05c3 --- /dev/null +++ b/crates/providers/README.md @@ -0,0 +1,3 @@ +# `kona-providers` + +Providers traits and implementations for kona crates. diff --git a/crates/providers/src/chain_provider.rs b/crates/providers/src/chain_provider.rs new file mode 100644 index 000000000..18e667d9f --- /dev/null +++ b/crates/providers/src/chain_provider.rs @@ -0,0 +1,29 @@ +//! This module defines the [ChainProvider] trait. + +use alloc::{boxed::Box, vec::Vec}; +use alloy_consensus::{Header, Receipt, TxEnvelope}; +use alloy_primitives::B256; +use anyhow::Result; +use async_trait::async_trait; +use kona_primitives::block::BlockInfo; + +/// Describes the functionality of a data source that can provide information from the blockchain. +#[async_trait] +pub trait ChainProvider { + /// Fetch the L1 [Header] for the given [B256] hash. + async fn header_by_hash(&mut self, hash: B256) -> Result
; + + /// Returns the block at the given number, or an error if the block does not exist in the data + /// source. + async fn block_info_by_number(&mut self, number: u64) -> Result; + + /// Returns all receipts in the block with the given hash, or an error if the block does not + /// exist in the data source. + async fn receipts_by_hash(&mut self, hash: B256) -> Result>; + + /// Returns the [BlockInfo] and list of [TxEnvelope]s from the given block hash. + async fn block_info_and_transactions_by_hash( + &mut self, + hash: B256, + ) -> Result<(BlockInfo, Vec)>; +} diff --git a/crates/providers/src/l2_chain_provider.rs b/crates/providers/src/l2_chain_provider.rs new file mode 100644 index 000000000..6167f3342 --- /dev/null +++ b/crates/providers/src/l2_chain_provider.rs @@ -0,0 +1,28 @@ +//! This module defines the [L2ChainProvider] trait. + +use alloc::{boxed::Box, sync::Arc}; +use anyhow::Result; +use async_trait::async_trait; +use kona_primitives::{ + block::L2BlockInfo, payload::L2ExecutionPayloadEnvelope, rollup_config::RollupConfig, + system_config::SystemConfig, +}; + +/// Describes the functionality of a data source that fetches safe blocks. +#[async_trait] +pub trait L2ChainProvider { + /// Returns the L2 block info given a block number. + /// Errors if the block does not exist. + async fn l2_block_info_by_number(&mut self, number: u64) -> Result; + + /// Returns an execution payload for a given number. + /// Errors if the execution payload does not exist. + async fn payload_by_number(&mut self, number: u64) -> Result; + + /// Returns the [SystemConfig] by L2 number. + async fn system_config_by_number( + &mut self, + number: u64, + rollup_config: Arc, + ) -> Result; +} diff --git a/crates/providers/src/lib.rs b/crates/providers/src/lib.rs new file mode 100644 index 000000000..5d13137bd --- /dev/null +++ b/crates/providers/src/lib.rs @@ -0,0 +1,16 @@ +#![doc = include_str!("../README.md")] +#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)] +#![deny(unused_must_use, rust_2018_idioms)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![no_std] + +extern crate alloc; + +pub mod chain_provider; +pub use chain_provider::ChainProvider; + +pub mod l2_chain_provider; +pub use l2_chain_provider::L2ChainProvider; + +#[cfg(any(test, feature = "test-utils"))] +pub mod test_utils; diff --git a/crates/derive/src/traits/test_utils/data_sources.rs b/crates/providers/src/test_utils.rs similarity index 93% rename from crates/derive/src/traits/test_utils/data_sources.rs rename to crates/providers/src/test_utils.rs index 5540f1a40..357641329 100644 --- a/crates/derive/src/traits/test_utils/data_sources.rs +++ b/crates/providers/src/test_utils.rs @@ -1,72 +1,15 @@ -//! Data Sources Test Utilities +//! Test utilities for kona providers. -use crate::{ - traits::{ChainProvider, L2ChainProvider}, - types::{BlockInfo, L2BlockInfo, L2ExecutionPayloadEnvelope, RollupConfig, SystemConfig}, -}; +use crate::{ChainProvider, L2ChainProvider}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::{Header, Receipt, TxEnvelope}; use alloy_primitives::B256; use anyhow::Result; use async_trait::async_trait; use hashbrown::HashMap; - -/// A mock block fetcher. -#[derive(Debug, Default)] -pub struct MockBlockFetcher { - /// Blocks - pub blocks: Vec, - /// Short circuit the block return to be the first block. - pub short_circuit: bool, - /// Payloads - pub payloads: Vec, - /// System configs - pub system_configs: HashMap, -} - -impl MockBlockFetcher { - /// Creates a new [MockBlockFetcher] with the given origin and batches. - pub fn new( - blocks: Vec, - payloads: Vec, - system_configs: HashMap, - ) -> Self { - Self { blocks, short_circuit: false, payloads, system_configs } - } -} - -#[async_trait] -impl L2ChainProvider for MockBlockFetcher { - async fn l2_block_info_by_number(&mut self, number: u64) -> Result { - if self.short_circuit { - return self.blocks.first().copied().ok_or_else(|| anyhow::anyhow!("Block not found")); - } - self.blocks - .iter() - .find(|b| b.block_info.number == number) - .cloned() - .ok_or_else(|| anyhow::anyhow!("Block not found")) - } - - async fn payload_by_number(&mut self, number: u64) -> Result { - self.payloads - .iter() - .find(|p| p.execution_payload.block_number == number) - .cloned() - .ok_or_else(|| anyhow::anyhow!("Payload not found")) - } - - async fn system_config_by_number( - &mut self, - number: u64, - _: Arc, - ) -> Result { - self.system_configs - .get(&number) - .ok_or_else(|| anyhow::anyhow!("System config not found")) - .cloned() - } -} +use kona_primitives::{ + BlockInfo, L2BlockInfo, L2ExecutionPayloadEnvelope, RollupConfig, SystemConfig, +}; /// A mock chain provider for testing. #[derive(Debug, Clone, Default)] @@ -157,3 +100,60 @@ impl ChainProvider for TestChainProvider { Ok((block, Vec::new())) } } + +/// An [L2ChainProvider] implementation for testing. +#[derive(Debug, Default)] +pub struct TestL2ChainProvider { + /// Blocks + pub blocks: Vec, + /// Short circuit the block return to be the first block. + pub short_circuit: bool, + /// Payloads + pub payloads: Vec, + /// System configs + pub system_configs: HashMap, +} + +impl TestL2ChainProvider { + /// Creates a new [MockBlockFetcher] with the given origin and batches. + pub fn new( + blocks: Vec, + payloads: Vec, + system_configs: HashMap, + ) -> Self { + Self { blocks, short_circuit: false, payloads, system_configs } + } +} + +#[async_trait] +impl L2ChainProvider for TestL2ChainProvider { + async fn l2_block_info_by_number(&mut self, number: u64) -> Result { + if self.short_circuit { + return self.blocks.first().copied().ok_or_else(|| anyhow::anyhow!("Block not found")); + } + self.blocks + .iter() + .find(|b| b.block_info.number == number) + .cloned() + .ok_or_else(|| anyhow::anyhow!("Block not found")) + } + + async fn payload_by_number(&mut self, number: u64) -> Result { + self.payloads + .iter() + .find(|p| p.execution_payload.block_number == number) + .cloned() + .ok_or_else(|| anyhow::anyhow!("Payload not found")) + } + + async fn system_config_by_number( + &mut self, + number: u64, + _: Arc, + ) -> Result { + self.system_configs + .get(&number) + .ok_or_else(|| anyhow::anyhow!("System config not found")) + .cloned() + } +} From d4bfc05d42bcbf0d4f30cad727d1ecebd322d057 Mon Sep 17 00:00:00 2001 From: refcell Date: Sat, 27 Apr 2024 10:02:04 -0700 Subject: [PATCH 6/6] fix(derive): batch queue test fix --- crates/derive/src/stages/batch_queue.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crates/derive/src/stages/batch_queue.rs b/crates/derive/src/stages/batch_queue.rs index c2e884e6f..47931c549 100644 --- a/crates/derive/src/stages/batch_queue.rs +++ b/crates/derive/src/stages/batch_queue.rs @@ -638,14 +638,13 @@ mod tests { }; let res = bq.next_batch(parent).await.unwrap_err(); let logs = trace_store.get_by_level(Level::INFO); - assert_eq!(logs.len(), 4); + assert_eq!(logs.len(), 2); let str = alloc::format!("Advancing batch queue origin: {:?}", origin); assert!(logs[0].contains(&str)); - assert!(logs[1].contains("need more l1 blocks to check entire origins of span batch")); - assert!(logs[2].contains("Deriving next batch for epoch: 16988980031808077784")); - assert!(logs[3].contains("need more l1 blocks to check entire origins of span batch")); + assert!(logs[1].contains("Deriving next batch for epoch: 16988980031808077784")); let warns = trace_store.get_by_level(Level::WARN); - assert_eq!(warns.len(), 0); + assert_eq!(warns.len(), 1); + assert!(warns[0].contains("batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid")); assert_eq!(res, StageError::NotEnoughData); }