From 01391143ea226b6decbe1d57b9c83a52e3d71e0b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 24 May 2022 18:27:36 +0200 Subject: [PATCH 001/102] refactor: move backed to separate module --- cli/src/cmd/cast/run.rs | 7 ++-- cli/src/cmd/forge/run.rs | 17 +++++----- evm/src/executor/backend.rs | 62 ++++++++++++++++++++++++++++++++++ evm/src/executor/builder.rs | 62 +++------------------------------- evm/src/executor/fork/mod.rs | 2 ++ evm/src/executor/fork/multi.rs | 4 +++ evm/src/executor/mod.rs | 42 +++++++++++------------ forge/src/lib.rs | 10 +++--- forge/src/multi_runner.rs | 9 ++--- 9 files changed, 115 insertions(+), 100 deletions(-) create mode 100644 evm/src/executor/backend.rs create mode 100644 evm/src/executor/fork/multi.rs diff --git a/cli/src/cmd/cast/run.rs b/cli/src/cmd/cast/run.rs index 3350c9052b5a..b8a3535778d7 100644 --- a/cli/src/cmd/cast/run.rs +++ b/cli/src/cmd/cast/run.rs @@ -9,8 +9,8 @@ use ethers::{ }; use forge::{ debug::DebugArena, - executor::{builder::Backend, opts::EvmOpts, DeployResult, ExecutorBuilder, RawCallResult}, - trace::{identifier::EtherscanIdentifier, CallTraceArena, CallTraceDecoderBuilder, TraceKind}, + executor::{DeployResult, ExecutorBuilder, opts::EvmOpts, RawCallResult}, + trace::{CallTraceArena, CallTraceDecoderBuilder, identifier::EtherscanIdentifier, TraceKind}, }; use foundry_config::Config; use std::{ @@ -18,8 +18,9 @@ use std::{ str::FromStr, time::Duration, }; -use ui::{TUIExitReason, Tui, Ui}; +use ui::{Tui, TUIExitReason, Ui}; use yansi::Paint; +use foundry_evm::executor::Backend; #[derive(Debug, Clone, Parser)] pub struct RunArgs { diff --git a/cli/src/cmd/forge/run.rs b/cli/src/cmd/forge/run.rs index 23764c1bfa88..f8af28fa95c9 100644 --- a/cli/src/cmd/forge/run.rs +++ b/cli/src/cmd/forge/run.rs @@ -1,5 +1,5 @@ use crate::{ - cmd::{forge::build::CoreBuildArgs, Cmd}, + cmd::{Cmd, forge::build::CoreBuildArgs}, compile, utils, }; use clap::{Parser, ValueHint}; @@ -8,27 +8,28 @@ use ethers::{ prelude::ArtifactId, solc::{ artifacts::{CompactContractBytecode, ContractBytecode, ContractBytecodeSome}, - utils::RuntimeOrHandle, Project, + utils::RuntimeOrHandle, }, types::{Address, Bytes, Log, U256}, }; use forge::{ + CALLER, debug::DebugArena, decode::decode_console_logs, executor::{ - builder::Backend, opts::EvmOpts, CallResult, DatabaseRef, DeployResult, EvmError, Executor, - ExecutorBuilder, RawCallResult, + CallResult, DatabaseRef, DeployResult, EvmError, Executor, ExecutorBuilder, + opts::EvmOpts, RawCallResult, }, - trace::{identifier::LocalTraceIdentifier, CallTraceArena, CallTraceDecoderBuilder, TraceKind}, - CALLER, + trace::{CallTraceArena, CallTraceDecoderBuilder, identifier::LocalTraceIdentifier, TraceKind}, }; use foundry_common::evm::EvmArgs; -use foundry_config::{figment::Figment, Config}; +use foundry_config::{Config, figment::Figment}; use foundry_utils::{encode_args, format_token, IntoFunction, PostLinkInput}; use std::{collections::BTreeMap, path::PathBuf}; -use ui::{TUIExitReason, Tui, Ui}; +use ui::{Tui, TUIExitReason, Ui}; use yansi::Paint; +use foundry_evm::executor::Backend; // Loads project's figment and merges the build cli arguments into it foundry_config::impl_figment_convert!(RunArgs, opts, evm_opts); diff --git a/evm/src/executor/backend.rs b/evm/src/executor/backend.rs new file mode 100644 index 000000000000..1eb9a2781c6e --- /dev/null +++ b/evm/src/executor/backend.rs @@ -0,0 +1,62 @@ +use revm::db::{DatabaseRef, EmptyDB}; +use ethers::prelude::{H160, H256, U256}; +use revm::{AccountInfo, Env}; +use crate::executor::Fork; +use crate::executor::fork::SharedBackend; + +/// Variants of a [revm::Database] +#[derive(Debug, Clone)] +pub enum Backend { + /// Simple in memory [revm::Database] + Simple(EmptyDB), + /// A [revm::Database] that forks of a remote location and can have multiple consumers of the + /// same data + Forked(SharedBackend), + + // TODO +} + +impl Backend { + /// Instantiates a new backend union based on whether there was or not a fork url specified + pub async fn new(fork: Option, env: &Env) -> Self { + if let Some(fork) = fork { + Backend::Forked(fork.spawn_backend(env).await) + } else { + Self::simple() + } + } + + pub fn simple() -> Self { + Backend::Simple(EmptyDB()) + } +} + +impl DatabaseRef for Backend { + fn basic(&self, address: H160) -> AccountInfo { + match self { + Backend::Simple(inner) => inner.basic(address), + Backend::Forked(inner) => inner.basic(address), + } + } + + fn code_by_hash(&self, address: H256) -> bytes::Bytes { + match self { + Backend::Simple(inner) => inner.code_by_hash(address), + Backend::Forked(inner) => inner.code_by_hash(address), + } + } + + fn storage(&self, address: H160, index: U256) -> U256 { + match self { + Backend::Simple(inner) => inner.storage(address, index), + Backend::Forked(inner) => inner.storage(address, index), + } + } + + fn block_hash(&self, number: U256) -> H256 { + match self { + Backend::Simple(inner) => inner.block_hash(number), + Backend::Forked(inner) => inner.block_hash(number), + } + } +} diff --git a/evm/src/executor/builder.rs b/evm/src/executor/builder.rs index 4d07063b8f54..6f9d43359ab7 100644 --- a/evm/src/executor/builder.rs +++ b/evm/src/executor/builder.rs @@ -1,21 +1,21 @@ use ethers::prelude::Provider; use revm::{ - db::{DatabaseRef, EmptyDB}, Env, SpecId, }; use std::{path::PathBuf, sync::Arc}; use super::{ + Executor, fork::SharedBackend, inspector::{Cheatcodes, InspectorStackConfig}, - Executor, }; -use ethers::types::{H160, H256, U256}; +use ethers::types::{U256}; use crate::executor::fork::{BlockchainDb, BlockchainDbMeta}; -use revm::AccountInfo; + +use crate::executor::backend::Backend; #[derive(Default, Debug)] pub struct ExecutorBuilder { @@ -69,60 +69,6 @@ impl Fork { SharedBackend::spawn_backend(provider, db, pin_block.map(Into::into)).await } } -/// Variants of a [revm::Database] -#[derive(Debug, Clone)] -pub enum Backend { - /// Simple in memory [revm::Database] - Simple(EmptyDB), - /// A [revm::Database] that forks of a remote location and can have multiple consumers of the - /// same data - Forked(SharedBackend), -} - -impl Backend { - /// Instantiates a new backend union based on whether there was or not a fork url specified - pub async fn new(fork: Option, env: &Env) -> Self { - if let Some(fork) = fork { - Backend::Forked(fork.spawn_backend(env).await) - } else { - Self::simple() - } - } - - pub fn simple() -> Self { - Backend::Simple(EmptyDB()) - } -} - -impl DatabaseRef for Backend { - fn basic(&self, address: H160) -> AccountInfo { - match self { - Backend::Simple(inner) => inner.basic(address), - Backend::Forked(inner) => inner.basic(address), - } - } - - fn code_by_hash(&self, address: H256) -> bytes::Bytes { - match self { - Backend::Simple(inner) => inner.code_by_hash(address), - Backend::Forked(inner) => inner.code_by_hash(address), - } - } - - fn storage(&self, address: H160, index: U256) -> U256 { - match self { - Backend::Simple(inner) => inner.storage(address, index), - Backend::Forked(inner) => inner.storage(address, index), - } - } - - fn block_hash(&self, number: U256) -> H256 { - match self { - Backend::Simple(inner) => inner.block_hash(number), - Backend::Forked(inner) => inner.block_hash(number), - } - } -} impl ExecutorBuilder { #[must_use] diff --git a/evm/src/executor/fork/mod.rs b/evm/src/executor/fork/mod.rs index 7be7a30e4da3..a56c16400a17 100644 --- a/evm/src/executor/fork/mod.rs +++ b/evm/src/executor/fork/mod.rs @@ -6,3 +6,5 @@ pub use init::environment; mod cache; pub use cache::{BlockchainDb, BlockchainDbMeta, JsonBlockCacheDB, MemDb}; + +mod multi; \ No newline at end of file diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs new file mode 100644 index 000000000000..4ef5ec5f208a --- /dev/null +++ b/evm/src/executor/fork/multi.rs @@ -0,0 +1,4 @@ +//! Support for running multiple fork backend + +// TODO move some types from avil fork to evm +// \ No newline at end of file diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index b0dd076ff7e9..14ebd7240017 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -4,28 +4,6 @@ pub use abi::{ patch_hardhat_console_selector, HardhatConsoleCalls, CHEATCODE_ADDRESS, CONSOLE_ABI, HARDHAT_CONSOLE_ABI, HARDHAT_CONSOLE_ADDRESS, }; - -/// Executor configuration -pub mod opts; - -/// Executor inspectors -pub mod inspector; - -/// Forking provider -pub mod fork; - -/// Executor builder -pub mod builder; -pub use builder::{ExecutorBuilder, Fork}; - -/// Executor EVM spec identifiers -pub use revm::SpecId; - -/// Executor database trait -pub use revm::db::DatabaseRef; - -pub use revm::Env; - use self::inspector::{InspectorData, InspectorStackConfig}; use crate::{debug::DebugArena, trace::CallTraceArena, CALLER}; use bytes::Bytes; @@ -42,6 +20,25 @@ use revm::{ return_ok, Account, BlockEnv, CreateScheme, Return, TransactOut, TransactTo, TxEnv, EVM, }; use std::collections::BTreeMap; +/// Reexport commonly used revm types +pub use revm::{ + db::DatabaseRef, + SpecId, + Env +}; + +/// Executor configuration +pub mod opts; +/// Executor inspectors +pub mod inspector; +/// Forking provider +pub mod fork; +/// Executor builder +pub mod builder; +/// custom revm database implementations +mod backend; + +pub use builder::{ExecutorBuilder, Fork, Backend}; /// A mapping of addresses to their changed state. pub type StateChangeset = HashMap; @@ -164,6 +161,7 @@ pub struct Executor { // Also, if we stored the VM here we would still need to // take `&mut self` when we are not committing to the database, since // we need to set `evm.env`. + // TODO in order to support multiforks we need to move this to the `Backend` pub db: CacheDB, env: Env, inspector_config: InspectorStackConfig, diff --git a/forge/src/lib.rs b/forge/src/lib.rs index 71cc0042b33d..1c1c7edea244 100644 --- a/forge/src/lib.rs +++ b/forge/src/lib.rs @@ -23,19 +23,19 @@ pub mod test_helpers { use crate::TestFilter; use ethers::{ prelude::{artifacts::Settings, Lazy, ProjectCompileOutput, SolcConfig}, - solc::{artifacts::Libraries, utils::RuntimeOrHandle, Project, ProjectPathsConfig}, + solc::{artifacts::Libraries, Project, ProjectPathsConfig, utils::RuntimeOrHandle}, types::{Address, U256}, }; use foundry_evm::{ + CALLER, executor::{ - builder::Backend, - opts::{Env, EvmOpts}, - DatabaseRef, Executor, ExecutorBuilder, + DatabaseRef, + Executor, ExecutorBuilder, opts::{Env, EvmOpts}, }, fuzz::FuzzedExecutor, - CALLER, }; use std::str::FromStr; + use foundry_evm::executor::backend::Backend; pub static PROJECT: Lazy = Lazy::new(|| { let paths = ProjectPathsConfig::builder() diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index df0a3d78bc34..297031c85865 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -1,18 +1,19 @@ use crate::{ContractRunner, SuiteResult, TestFilter}; use ethers::{ abi::Abi, - prelude::{artifacts::CompactContractBytecode, ArtifactId, ArtifactOutput}, - solc::{utils::RuntimeOrHandle, Artifact, ProjectCompileOutput}, + prelude::{ArtifactId, ArtifactOutput, artifacts::CompactContractBytecode}, + solc::{Artifact, ProjectCompileOutput, utils::RuntimeOrHandle}, types::{Address, Bytes, U256}, }; use eyre::Result; use foundry_evm::executor::{ - builder::Backend, opts::EvmOpts, DatabaseRef, Executor, ExecutorBuilder, Fork, SpecId, + DatabaseRef, Executor, ExecutorBuilder, Fork, opts::EvmOpts, SpecId, }; use foundry_utils::PostLinkInput; use proptest::test_runner::TestRunner; use rayon::prelude::*; use std::{collections::BTreeMap, marker::Sync, path::Path, sync::mpsc::Sender}; +use foundry_evm::executor::backend::Backend; /// Builder used for instantiating the multi-contract runner #[derive(Debug, Default)] @@ -316,7 +317,7 @@ mod tests { use crate::{ decode::decode_console_logs, test_helpers::{ - filter::Filter, COMPILED, COMPILED_WITH_LIBS, EVM_OPTS, LIBS_PROJECT, PROJECT, + COMPILED, COMPILED_WITH_LIBS, EVM_OPTS, filter::Filter, LIBS_PROJECT, PROJECT, }, }; use foundry_evm::trace::TraceKind; From 74059f4b775ec0faa0f30f485a89882528bf19d5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 24 May 2022 19:02:45 +0200 Subject: [PATCH 002/102] refactor: move fork db to evm crate --- anvil/src/eth/backend/fork.rs | 2 +- anvil/src/eth/backend/mem/fork_db.rs | 232 +---------------- anvil/src/eth/backend/mem/in_memory_db.rs | 3 +- anvil/src/eth/backend/mem/mod.rs | 1 - evm/src/executor/backend.rs | 10 +- evm/src/executor/builder.rs | 22 +- evm/src/executor/fork/database.rs | 245 ++++++++++++++++++ evm/src/executor/fork/mod.rs | 4 +- evm/src/executor/fork/multi.rs | 1 - evm/src/executor/mod.rs | 33 ++- .../mem => evm/src/executor}/snapshot.rs | 2 +- forge/src/multi_runner.rs | 2 +- 12 files changed, 288 insertions(+), 269 deletions(-) create mode 100644 evm/src/executor/fork/database.rs rename {anvil/src/eth/backend/mem => evm/src/executor}/snapshot.rs (92%) diff --git a/anvil/src/eth/backend/fork.rs b/anvil/src/eth/backend/fork.rs index 678a80aa7bd3..db226aa183be 100644 --- a/anvil/src/eth/backend/fork.rs +++ b/anvil/src/eth/backend/fork.rs @@ -49,7 +49,7 @@ impl ClientFork { block_number: Option, ) -> Result<(), BlockchainError> { { - self.database.write().reset(url.clone(), block_number)?; + self.database.write().reset(url.clone(), block_number).map_err(BlockchainError::Internal)?; } if let Some(url) = url { diff --git a/anvil/src/eth/backend/mem/fork_db.rs b/anvil/src/eth/backend/mem/fork_db.rs index 0cacb0690236..0da9e9d4449f 100644 --- a/anvil/src/eth/backend/mem/fork_db.rs +++ b/anvil/src/eth/backend/mem/fork_db.rs @@ -1,251 +1,31 @@ use crate::{ eth::{ backend::db::{Db, StateDb}, - error::BlockchainError, }, - mem::snapshot::Snapshots, - revm::{db::DatabaseRef, Account, AccountInfo, Database, DatabaseCommit}, + revm::{AccountInfo}, Address, U256, }; -use bytes::Bytes; -use ethers::prelude::{H160, H256}; -use forge::HashMap as Map; -use foundry_evm::{ - executor::fork::{BlockchainDb, SharedBackend}, - revm::db::CacheDB, -}; -use parking_lot::Mutex; -use std::{collections::BTreeMap, sync::Arc}; -use tracing::{trace, warn}; +pub use foundry_evm::executor::fork::database::ForkedDatabase; /// Implement the helper for the fork database impl Db for ForkedDatabase { fn insert_account(&mut self, address: Address, account: AccountInfo) { - self.cache_db.insert_account(address, account) + self.database_mut().insert_account(address, account) } fn set_storage_at(&mut self, address: Address, slot: U256, val: U256) { - self.cache_db.set_storage_at(address, slot, val) + self.database_mut().set_storage_at(address, slot, val) } fn snapshot(&mut self) -> U256 { - let snapshot = self.create_snapshot(); - let mut snapshots = self.snapshots.lock(); - let id = snapshots.insert(snapshot); - trace!(target: "backend::forkdb", "Created new snapshot {}", id); - id + self.insert_snapshot() } fn revert(&mut self, id: U256) -> bool { - let snapshot = { self.snapshots.lock().remove(id) }; - if let Some(snapshot) = snapshot { - let DbSnapshot { accounts, storage, block_hashes, local } = snapshot; - let db = self.db.db(); - { - let mut accounts_lock = db.accounts.write(); - accounts_lock.clear(); - accounts_lock.extend(accounts); - } - { - let mut storage_lock = db.storage.write(); - storage_lock.clear(); - storage_lock.extend(storage); - } - { - let mut block_hashes_lock = db.block_hashes.write(); - block_hashes_lock.clear(); - block_hashes_lock.extend(block_hashes); - } - - self.cache_db = local; - - trace!(target: "backend::forkdb", "Reverted snapshot {}", id); - true - } else { - warn!(target: "backend::forkdb", "No snapshot to revert for {}", id); - false - } + self.revert_snapshot(id) } fn current_state(&self) -> StateDb { StateDb::new(self.create_snapshot()) } } - -/// a [revm::Database] that's forked off another client -/// -/// The `backend` is used to retrieve (missing) data, which is then fetched from the remote -/// endpoint. The inner in-memory database holds this storage and will be used for write operations. -/// This database uses the `backend` for read and the `db` for write operations. But note the -/// `backend` will also write (missing) data to the `db` in the background -#[derive(Debug, Clone)] -pub struct ForkedDatabase { - /// responsible for fetching missing data - /// - /// This is responsible for getting data - backend: SharedBackend, - /// Cached Database layer, ensures that changes are not written to the database that - /// exclusively stores the state of the remote client. - /// - /// This separates Read/Write operations - /// - reads from the `SharedBackend as DatabaseRef` writes to the internal cache storage - cache_db: CacheDB, - /// Contains all the data already fetched - /// - /// This exclusively stores the _unchanged_ remote client state - db: BlockchainDb, - /// holds the snapshot state of a blockchain - snapshots: Arc>>, -} - -impl ForkedDatabase { - /// Creates a new instance of this DB - pub fn new(backend: SharedBackend, db: BlockchainDb) -> Self { - Self { - cache_db: CacheDB::new(backend.clone()), - backend, - db, - snapshots: Arc::new(Mutex::new(Default::default())), - } - } - - /// Reset the fork to a fresh forked state, and optionally update the fork config - pub fn reset( - &mut self, - _url: Option, - block_number: Option, - ) -> Result<(), BlockchainError> { - if let Some(block_number) = block_number { - self.backend - .set_pinned_block(block_number) - .map_err(|err| BlockchainError::Internal(err.to_string()))?; - } - - // TODO need to find a way to update generic provider via url - - // wipe the storage retrieved from remote - self.db.db().clear(); - // create a fresh `CacheDB`, effectively wiping modified state - self.cache_db = CacheDB::new(self.backend.clone()); - trace!(target: "backend::forkdb", "Cleared database"); - Ok(()) - } - - /// Flushes the cache to disk if configured - pub fn flush_cache(&self) { - self.db.cache().flush() - } - - /// Returns the database that holds the remote state - pub fn inner(&self) -> &BlockchainDb { - &self.db - } - - fn create_snapshot(&self) -> DbSnapshot { - let db = self.db.db(); - DbSnapshot { - local: self.cache_db.clone(), - accounts: db.accounts.read().clone(), - storage: db.storage.read().clone(), - block_hashes: db.block_hashes.read().clone(), - } - } -} - -impl Database for ForkedDatabase { - fn basic(&mut self, address: Address) -> AccountInfo { - self.cache_db.basic(address) - } - - fn code_by_hash(&mut self, code_hash: H256) -> bytes::Bytes { - self.cache_db.code_by_hash(code_hash) - } - - fn storage(&mut self, address: Address, index: U256) -> U256 { - Database::storage(&mut self.cache_db, address, index) - } - - fn block_hash(&mut self, number: U256) -> H256 { - self.cache_db.block_hash(number) - } -} - -impl DatabaseRef for ForkedDatabase { - fn basic(&self, address: Address) -> AccountInfo { - self.cache_db.basic(address) - } - - fn code_by_hash(&self, code_hash: H256) -> bytes::Bytes { - self.cache_db.code_by_hash(code_hash) - } - - fn storage(&self, address: Address, index: U256) -> U256 { - DatabaseRef::storage(&self.cache_db, address, index) - } - - fn block_hash(&self, number: U256) -> H256 { - self.cache_db.block_hash(number) - } -} - -impl DatabaseCommit for ForkedDatabase { - fn commit(&mut self, changes: Map) { - self.cache_db.commit(changes) - } -} - -/// Represents a snapshot of the database -#[derive(Debug)] -struct DbSnapshot { - local: CacheDB, - accounts: BTreeMap, - storage: BTreeMap>, - block_hashes: BTreeMap, -} - -// === impl DbSnapshot === - -impl DbSnapshot { - fn get_storage(&self, address: H160, index: U256) -> Option { - self.local.storage().get(&address).and_then(|entry| entry.get(&index)).copied() - } -} - -// This `DatabaseRef` implementation works similar to `CacheDB` which prioritizes modified elements, -// and uses another db as fallback -// We prioritize stored changed accounts/storage -impl DatabaseRef for DbSnapshot { - fn basic(&self, address: H160) -> AccountInfo { - match self.local.cache().get(&address) { - Some(info) => info.clone(), - None => { - self.accounts.get(&address).cloned().unwrap_or_else(|| self.local.basic(address)) - } - } - } - - fn code_by_hash(&self, code_hash: H256) -> Bytes { - self.local.code_by_hash(code_hash) - } - - fn storage(&self, address: H160, index: U256) -> U256 { - match self.local.storage().get(&address) { - Some(entry) => match entry.get(&index) { - Some(entry) => *entry, - None => self - .get_storage(address, index) - .unwrap_or_else(|| DatabaseRef::storage(&self.local, address, index)), - }, - None => self - .get_storage(address, index) - .unwrap_or_else(|| DatabaseRef::storage(&self.local, address, index)), - } - } - - fn block_hash(&self, number: U256) -> H256 { - self.block_hashes - .get(&number.as_u64()) - .copied() - .unwrap_or_else(|| self.local.block_hash(number)) - } -} diff --git a/anvil/src/eth/backend/mem/in_memory_db.rs b/anvil/src/eth/backend/mem/in_memory_db.rs index 6c1f150334a7..ed453f4b6b5c 100644 --- a/anvil/src/eth/backend/mem/in_memory_db.rs +++ b/anvil/src/eth/backend/mem/in_memory_db.rs @@ -2,7 +2,7 @@ use crate::{ eth::backend::db::{Db, StateDb}, - mem::{snapshot::Snapshots, state::state_merkle_trie_root}, + mem::{state::state_merkle_trie_root}, revm::{db::DatabaseRef, Account, AccountInfo, Database, DatabaseCommit}, Address, U256, }; @@ -10,6 +10,7 @@ use bytes::Bytes; use ethers::prelude::{H160, H256}; use foundry_evm::{revm::InMemoryDB, HashMap as Map}; use tracing::{trace, warn}; +use foundry_evm::executor::snapshot::Snapshots; /// In memory Database for anvil /// diff --git a/anvil/src/eth/backend/mem/mod.rs b/anvil/src/eth/backend/mem/mod.rs index f17c3e8f9576..4109f6fbf53e 100644 --- a/anvil/src/eth/backend/mem/mod.rs +++ b/anvil/src/eth/backend/mem/mod.rs @@ -57,7 +57,6 @@ use tracing::{trace, warn}; pub mod fork_db; pub mod in_memory_db; -pub mod snapshot; pub mod state; pub mod storage; diff --git a/evm/src/executor/backend.rs b/evm/src/executor/backend.rs index 1eb9a2781c6e..3f810c8d21bc 100644 --- a/evm/src/executor/backend.rs +++ b/evm/src/executor/backend.rs @@ -1,8 +1,9 @@ -use revm::db::{DatabaseRef, EmptyDB}; +use crate::executor::{fork::SharedBackend, Fork}; use ethers::prelude::{H160, H256, U256}; -use revm::{AccountInfo, Env}; -use crate::executor::Fork; -use crate::executor::fork::SharedBackend; +use revm::{ + db::{DatabaseRef, EmptyDB}, + AccountInfo, Env, +}; /// Variants of a [revm::Database] #[derive(Debug, Clone)] @@ -12,7 +13,6 @@ pub enum Backend { /// A [revm::Database] that forks of a remote location and can have multiple consumers of the /// same data Forked(SharedBackend), - // TODO } diff --git a/evm/src/executor/builder.rs b/evm/src/executor/builder.rs index 6f9d43359ab7..ee1e24dafe72 100644 --- a/evm/src/executor/builder.rs +++ b/evm/src/executor/builder.rs @@ -1,21 +1,15 @@ -use ethers::prelude::Provider; -use revm::{ - Env, SpecId, -}; -use std::{path::PathBuf, sync::Arc}; - use super::{ - Executor, fork::SharedBackend, inspector::{Cheatcodes, InspectorStackConfig}, + Executor, }; - -use ethers::types::{U256}; - -use crate::executor::fork::{BlockchainDb, BlockchainDbMeta}; - - -use crate::executor::backend::Backend; +use crate::executor::{ + backend::Backend, + fork::{BlockchainDb, BlockchainDbMeta}, +}; +use ethers::{prelude::Provider, types::U256}; +use revm::{Env, SpecId}; +use std::{path::PathBuf, sync::Arc}; #[derive(Default, Debug)] pub struct ExecutorBuilder { diff --git a/evm/src/executor/fork/database.rs b/evm/src/executor/fork/database.rs new file mode 100644 index 000000000000..f982935915ce --- /dev/null +++ b/evm/src/executor/fork/database.rs @@ -0,0 +1,245 @@ +//! A revm database that forks off a remote client + +use crate::{ + executor::{ + fork::{BlockchainDb, SharedBackend}, + snapshot::Snapshots, + }, + revm::db::CacheDB, +}; +use bytes::Bytes; +use ethers::prelude::{Address, H256, U256}; +use hashbrown::HashMap as Map; +use parking_lot::Mutex; +use revm::{db::DatabaseRef, Account, AccountInfo, Database, DatabaseCommit}; +use std::{collections::BTreeMap, sync::Arc}; +use tracing::{trace, warn}; + +/// a [revm::Database] that's forked off another client +/// +/// The `backend` is used to retrieve (missing) data, which is then fetched from the remote +/// endpoint. The inner in-memory database holds this storage and will be used for write operations. +/// This database uses the `backend` for read and the `db` for write operations. But note the +/// `backend` will also write (missing) data to the `db` in the background +#[derive(Debug, Clone)] +pub struct ForkedDatabase { + /// responsible for fetching missing data + /// + /// This is responsible for getting data + backend: SharedBackend, + /// Cached Database layer, ensures that changes are not written to the database that + /// exclusively stores the state of the remote client. + /// + /// This separates Read/Write operations + /// - reads from the `SharedBackend as DatabaseRef` writes to the internal cache storage + cache_db: CacheDB, + /// Contains all the data already fetched + /// + /// This exclusively stores the _unchanged_ remote client state + db: BlockchainDb, + /// holds the snapshot state of a blockchain + snapshots: Arc>>, +} + +impl ForkedDatabase { + /// Creates a new instance of this DB + pub fn new(backend: SharedBackend, db: BlockchainDb) -> Self { + Self { + cache_db: CacheDB::new(backend.clone()), + backend, + db, + snapshots: Arc::new(Mutex::new(Default::default())), + } + } + + pub fn database(&self) -> &CacheDB { + &self.cache_db + } + + pub fn database_mut(&mut self) -> &mut CacheDB { + &mut self.cache_db + } + + pub fn snapshots(&self) -> &Arc>> { + &self.snapshots + } + + /// Reset the fork to a fresh forked state, and optionally update the fork config + pub fn reset( + &mut self, + _url: Option, + block_number: Option, + ) -> Result<(), String> { + if let Some(block_number) = block_number { + self.backend + .set_pinned_block(block_number) + .map_err(|err| err.to_string())?; + } + + // TODO need to find a way to update generic provider via url + + // wipe the storage retrieved from remote + self.inner().db().clear(); + // create a fresh `CacheDB`, effectively wiping modified state + self.cache_db = CacheDB::new(self.backend.clone()); + trace!(target: "backend::forkdb", "Cleared database"); + Ok(()) + } + + /// Flushes the cache to disk if configured + pub fn flush_cache(&self) { + self.db.cache().flush() + } + + /// Returns the database that holds the remote state + pub fn inner(&self) -> &BlockchainDb { + &self.db + } + + pub fn create_snapshot(&self) -> DbSnapshot { + let db = self.db.db(); + DbSnapshot { + local: self.cache_db.clone(), + accounts: db.accounts.read().clone(), + storage: db.storage.read().clone(), + block_hashes: db.block_hashes.read().clone(), + } + } + + pub fn insert_snapshot(&self) -> U256 { + let snapshot = self.create_snapshot(); + let mut snapshots = self.snapshots().lock(); + let id = snapshots.insert(snapshot); + trace!(target: "backend::forkdb", "Created new snapshot {}", id); + id + } + + pub fn revert_snapshot(&mut self, id: U256) -> bool { + let snapshot = { self.snapshots().lock().remove(id) }; + if let Some(snapshot) = snapshot { + let DbSnapshot { accounts, storage, block_hashes, local } = snapshot; + let db = self.inner().db(); + { + let mut accounts_lock = db.accounts.write(); + accounts_lock.clear(); + accounts_lock.extend(accounts); + } + { + let mut storage_lock = db.storage.write(); + storage_lock.clear(); + storage_lock.extend(storage); + } + { + let mut block_hashes_lock = db.block_hashes.write(); + block_hashes_lock.clear(); + block_hashes_lock.extend(block_hashes); + } + + self.cache_db = local; + + trace!(target: "backend::forkdb", "Reverted snapshot {}", id); + true + } else { + warn!(target: "backend::forkdb", "No snapshot to revert for {}", id); + false + } + } +} + +impl Database for ForkedDatabase { + fn basic(&mut self, address: Address) -> AccountInfo { + self.cache_db.basic(address) + } + + fn code_by_hash(&mut self, code_hash: H256) -> bytes::Bytes { + self.cache_db.code_by_hash(code_hash) + } + + fn storage(&mut self, address: Address, index: U256) -> U256 { + Database::storage(&mut self.cache_db, address, index) + } + + fn block_hash(&mut self, number: U256) -> H256 { + self.cache_db.block_hash(number) + } +} + +impl DatabaseRef for ForkedDatabase { + fn basic(&self, address: Address) -> AccountInfo { + self.cache_db.basic(address) + } + + fn code_by_hash(&self, code_hash: H256) -> bytes::Bytes { + self.cache_db.code_by_hash(code_hash) + } + + fn storage(&self, address: Address, index: U256) -> U256 { + DatabaseRef::storage(&self.cache_db, address, index) + } + + fn block_hash(&self, number: U256) -> H256 { + self.cache_db.block_hash(number) + } +} + +impl DatabaseCommit for ForkedDatabase { + fn commit(&mut self, changes: Map) { + self.database_mut().commit(changes) + } +} + +/// Represents a snapshot of the database +#[derive(Debug)] +pub struct DbSnapshot { + local: CacheDB, + accounts: BTreeMap, + storage: BTreeMap>, + block_hashes: BTreeMap, +} + +// === impl DbSnapshot === + +impl DbSnapshot { + fn get_storage(&self, address: Address, index: U256) -> Option { + self.local.storage().get(&address).and_then(|entry| entry.get(&index)).copied() + } +} + +// This `DatabaseRef` implementation works similar to `CacheDB` which prioritizes modified elements, +// and uses another db as fallback +// We prioritize stored changed accounts/storage +impl DatabaseRef for DbSnapshot { + fn basic(&self, address: Address) -> AccountInfo { + match self.local.cache().get(&address) { + Some(info) => info.clone(), + None => { + self.accounts.get(&address).cloned().unwrap_or_else(|| self.local.basic(address)) + } + } + } + + fn code_by_hash(&self, code_hash: H256) -> Bytes { + self.local.code_by_hash(code_hash) + } + + fn storage(&self, address: Address, index: U256) -> U256 { + match self.local.storage().get(&address) { + Some(entry) => match entry.get(&index) { + Some(entry) => *entry, + None => self + .get_storage(address, index) + .unwrap_or_else(|| DatabaseRef::storage(&self.local, address, index)), + }, + None => self + .get_storage(address, index) + .unwrap_or_else(|| DatabaseRef::storage(&self.local, address, index)), + } + } + + fn block_hash(&self, number: U256) -> H256 { + self.block_hashes + .get(&number.as_u64()) + .copied() + .unwrap_or_else(|| self.local.block_hash(number)) + } +} diff --git a/evm/src/executor/fork/mod.rs b/evm/src/executor/fork/mod.rs index a56c16400a17..bff41d746962 100644 --- a/evm/src/executor/fork/mod.rs +++ b/evm/src/executor/fork/mod.rs @@ -7,4 +7,6 @@ pub use init::environment; mod cache; pub use cache::{BlockchainDb, BlockchainDbMeta, JsonBlockCacheDB, MemDb}; -mod multi; \ No newline at end of file +pub mod database; + +mod multi; diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 4ef5ec5f208a..72b499b8f4fe 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -1,4 +1,3 @@ //! Support for running multiple fork backend // TODO move some types from avil fork to evm -// \ No newline at end of file diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 14ebd7240017..ee4add3a8556 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -1,11 +1,11 @@ /// ABIs used internally in the executor pub mod abi; +use self::inspector::{InspectorData, InspectorStackConfig}; +use crate::{debug::DebugArena, trace::CallTraceArena, CALLER}; pub use abi::{ patch_hardhat_console_selector, HardhatConsoleCalls, CHEATCODE_ADDRESS, CONSOLE_ABI, HARDHAT_CONSOLE_ABI, HARDHAT_CONSOLE_ADDRESS, }; -use self::inspector::{InspectorData, InspectorStackConfig}; -use crate::{debug::DebugArena, trace::CallTraceArena, CALLER}; use bytes::Bytes; use ethers::{ abi::{Abi, Detokenize, Tokenize}, @@ -15,30 +15,29 @@ use ethers::{ use eyre::Result; use foundry_utils::IntoFunction; use hashbrown::HashMap; +/// Reexport commonly used revm types +pub use revm::{db::DatabaseRef, Env, SpecId}; use revm::{ db::{CacheDB, DatabaseCommit, EmptyDB}, return_ok, Account, BlockEnv, CreateScheme, Return, TransactOut, TransactTo, TxEnv, EVM, }; use std::collections::BTreeMap; -/// Reexport commonly used revm types -pub use revm::{ - db::DatabaseRef, - SpecId, - Env -}; -/// Executor configuration -pub mod opts; -/// Executor inspectors -pub mod inspector; -/// Forking provider -pub mod fork; -/// Executor builder -pub mod builder; /// custom revm database implementations mod backend; +/// Executor builder +pub mod builder; +/// Forking provider +pub mod fork; +/// Executor inspectors +pub mod inspector; +/// Executor configuration +pub mod opts; +pub use backend::Backend; + +pub mod snapshot; -pub use builder::{ExecutorBuilder, Fork, Backend}; +pub use builder::{ExecutorBuilder, Fork}; /// A mapping of addresses to their changed state. pub type StateChangeset = HashMap; diff --git a/anvil/src/eth/backend/mem/snapshot.rs b/evm/src/executor/snapshot.rs similarity index 92% rename from anvil/src/eth/backend/mem/snapshot.rs rename to evm/src/executor/snapshot.rs index 53188736fc75..8d30d8ce6eee 100644 --- a/anvil/src/eth/backend/mem/snapshot.rs +++ b/evm/src/executor/snapshot.rs @@ -1,4 +1,4 @@ -//! support for snapshotting the state of the blockchain in memory +//! support for snapshotting different states use ethers::types::U256; use std::collections::HashMap; diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 297031c85865..627335b177da 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -13,7 +13,7 @@ use foundry_utils::PostLinkInput; use proptest::test_runner::TestRunner; use rayon::prelude::*; use std::{collections::BTreeMap, marker::Sync, path::Path, sync::mpsc::Sender}; -use foundry_evm::executor::backend::Backend; +use foundry_evm::executor::Backend; /// Builder used for instantiating the multi-contract runner #[derive(Debug, Default)] From b04b3fb2f8b258a071e365e4679224c2271f9775 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 24 May 2022 19:51:34 +0200 Subject: [PATCH 003/102] feat: design multifork --- evm/src/executor/fork/database.rs | 10 +--- evm/src/executor/fork/multi.rs | 78 +++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 8 deletions(-) diff --git a/evm/src/executor/fork/database.rs b/evm/src/executor/fork/database.rs index f982935915ce..43f4adc9828e 100644 --- a/evm/src/executor/fork/database.rs +++ b/evm/src/executor/fork/database.rs @@ -65,15 +65,9 @@ impl ForkedDatabase { } /// Reset the fork to a fresh forked state, and optionally update the fork config - pub fn reset( - &mut self, - _url: Option, - block_number: Option, - ) -> Result<(), String> { + pub fn reset(&mut self, _url: Option, block_number: Option) -> Result<(), String> { if let Some(block_number) = block_number { - self.backend - .set_pinned_block(block_number) - .map_err(|err| err.to_string())?; + self.backend.set_pinned_block(block_number).map_err(|err| err.to_string())?; } // TODO need to find a way to update generic provider via url diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 72b499b8f4fe..23e7dcc1b12d 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -1,3 +1,81 @@ //! Support for running multiple fork backend +//! +//! The design is similar to the single `SharedBackend`, `BackendHandler` but supports multiple +//! concurrently active pairs at once. + +use crate::executor::fork::{database::ForkedDatabase, BackendHandler}; +use ethers::providers::{Http, Provider}; +use futures::{ + channel::mpsc::{channel, Receiver, Sender}, + stream::Stream, + task::{Context, Poll}, + Future, FutureExt, +}; +use std::{collections::HashMap, pin::Pin}; // TODO move some types from avil fork to evm + +/// The identifier for a specific fork, this could be the name of the network a custom descriptive +/// name. +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct ForkId(pub String); + +/// A database type that maintains multiple forks +#[derive(Debug, Clone)] +pub struct MutltiFork { + /// Channel to send `Request`s to the handler + handler: Sender, + /// All created databases for forks identified by their `ForkId` + forks: HashMap, + /// The currently active Database + active: ForkId, +} + +// === impl MultiFork === + +impl MutltiFork { + /// Creates a new pair of `MutltiFork` and its handler `MutltiForkHandler` + pub fn new(id: ForkId, db: ForkedDatabase) -> (MutltiFork, MutltiForkHandler) { + todo!() + } + + /// Creates a new pair and spawns the `MutltiForkHandler` on a background thread + pub fn spawn(id: ForkId, db: ForkedDatabase) -> MutltiFork { + todo!() + } + + /// Returns the identifier of the currently active fork + pub fn active_id(&self) -> &ForkId { + &self.active + } + + /// Returns the currently active database + pub fn active(&self) -> &ForkedDatabase { + &self.forks[self.active_id()] + } +} + +/// The type that manages connections in the background +#[derive(Debug)] +pub struct MutltiForkHandler { + /// Incoming requests from the `MultiFork`. + incoming: Receiver, + /// All active handlers + /// + /// It's expected that this list will be rather small + handlers: Vec<(ForkId, BackendHandler>)>, +} + +// Drives all handler to completion +// This future will finish once all underlying BackendHandler are completed +impl Future for MutltiForkHandler { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + todo!() + } +} + +/// Request that's send to the handler +#[derive(Debug)] +enum Request {} From 90e626b7d2f839052cba7009105074dd25344379 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 25 May 2022 00:52:42 +0200 Subject: [PATCH 004/102] feat: more multi handler work --- evm/src/executor/fork/multi.rs | 77 +++++++++++++++++++++++++++++++--- 1 file changed, 71 insertions(+), 6 deletions(-) diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 23e7dcc1b12d..7850da4ebb4e 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -12,6 +12,9 @@ use futures::{ Future, FutureExt, }; use std::{collections::HashMap, pin::Pin}; +use ethers::types::BlockId; +use futures::stream::Fuse; +use tracing::trace; // TODO move some types from avil fork to evm @@ -55,15 +58,40 @@ impl MutltiFork { } } +/// Request that's send to the handler +#[derive(Debug)] +enum Request { + Create { + fork_id: ForkId, + + endpoint: String, + + chain_id: Option, + + block: Option, + } +} + +type RequestFuture = +Pin >>; + /// The type that manages connections in the background #[derive(Debug)] pub struct MutltiForkHandler { /// Incoming requests from the `MultiFork`. - incoming: Receiver, + incoming: Fuse>, /// All active handlers /// /// It's expected that this list will be rather small handlers: Vec<(ForkId, BackendHandler>)>, + // requests currently in progress + requests: Vec +} + +// === impl MutltiForkHandler === + +impl MutltiForkHandler { + fn on_request(&mut self, req: Request) {} } // Drives all handler to completion @@ -72,10 +100,47 @@ impl Future for MutltiForkHandler { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - todo!() + let pin = self.get_mut(); + + // receive new requests + loop { + match Pin::new(&mut pin.incoming).poll_next(cx) { + Poll::Ready(Some(req)) => { + pin.on_request(req); + } + Poll::Ready(None) => { + // channel closed, but we still need to drive the fork handlers to completion + trace!(target: "fork::multi", "request channel closed"); + break + } + Poll::Pending => break, + } + } + + // advance all jobs + for n in (0..pin.requests.len()).rev() { + let mut request = pin.requests.swap_remove(n); + // TODO poll future + } + + // advance all handlers + for n in (0..pin.handlers.len()).rev() { + let (id, mut handler) = pin.handlers.swap_remove(n); + match handler.poll_unpin(cx) { + Poll::Ready(_) => { + trace!(target: "fork::multi", "fork {:?} completed", id); + } + Poll::Pending => { + pin.handlers.push((id, handler)); + } + } + } + + if pin.handlers.is_empty() && pin.incoming.is_done() { + trace!(target: "fork::multi", "completed"); + return Poll::Ready(()) + } + + Poll::Pending } } - -/// Request that's send to the handler -#[derive(Debug)] -enum Request {} From e065ad70a1bd7b5755e72d033cab624cf933ebb7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 25 May 2022 09:09:10 +0200 Subject: [PATCH 005/102] fix: use new paths --- anvil/src/eth/backend/fork.rs | 5 ++++- anvil/src/eth/backend/mem/fork_db.rs | 8 +++---- anvil/src/eth/backend/mem/in_memory_db.rs | 5 ++--- cli/src/cmd/cast/run.rs | 7 +++--- cli/src/cmd/forge/run.rs | 17 +++++++------- evm/src/executor/fork/multi.rs | 27 ++++++++--------------- forge/src/lib.rs | 10 ++++----- forge/src/multi_runner.rs | 9 ++++---- 8 files changed, 38 insertions(+), 50 deletions(-) diff --git a/anvil/src/eth/backend/fork.rs b/anvil/src/eth/backend/fork.rs index db226aa183be..33b3c959b97c 100644 --- a/anvil/src/eth/backend/fork.rs +++ b/anvil/src/eth/backend/fork.rs @@ -49,7 +49,10 @@ impl ClientFork { block_number: Option, ) -> Result<(), BlockchainError> { { - self.database.write().reset(url.clone(), block_number).map_err(BlockchainError::Internal)?; + self.database + .write() + .reset(url.clone(), block_number) + .map_err(BlockchainError::Internal)?; } if let Some(url) = url { diff --git a/anvil/src/eth/backend/mem/fork_db.rs b/anvil/src/eth/backend/mem/fork_db.rs index 0da9e9d4449f..0a85e4fa434f 100644 --- a/anvil/src/eth/backend/mem/fork_db.rs +++ b/anvil/src/eth/backend/mem/fork_db.rs @@ -1,8 +1,6 @@ use crate::{ - eth::{ - backend::db::{Db, StateDb}, - }, - revm::{AccountInfo}, + eth::backend::db::{Db, StateDb}, + revm::AccountInfo, Address, U256, }; pub use foundry_evm::executor::fork::database::ForkedDatabase; @@ -22,7 +20,7 @@ impl Db for ForkedDatabase { } fn revert(&mut self, id: U256) -> bool { - self.revert_snapshot(id) + self.revert_snapshot(id) } fn current_state(&self) -> StateDb { diff --git a/anvil/src/eth/backend/mem/in_memory_db.rs b/anvil/src/eth/backend/mem/in_memory_db.rs index ed453f4b6b5c..df21dc146cc2 100644 --- a/anvil/src/eth/backend/mem/in_memory_db.rs +++ b/anvil/src/eth/backend/mem/in_memory_db.rs @@ -2,15 +2,14 @@ use crate::{ eth::backend::db::{Db, StateDb}, - mem::{state::state_merkle_trie_root}, + mem::state::state_merkle_trie_root, revm::{db::DatabaseRef, Account, AccountInfo, Database, DatabaseCommit}, Address, U256, }; use bytes::Bytes; use ethers::prelude::{H160, H256}; -use foundry_evm::{revm::InMemoryDB, HashMap as Map}; +use foundry_evm::{executor::snapshot::Snapshots, revm::InMemoryDB, HashMap as Map}; use tracing::{trace, warn}; -use foundry_evm::executor::snapshot::Snapshots; /// In memory Database for anvil /// diff --git a/cli/src/cmd/cast/run.rs b/cli/src/cmd/cast/run.rs index b8a3535778d7..dc4a666165bf 100644 --- a/cli/src/cmd/cast/run.rs +++ b/cli/src/cmd/cast/run.rs @@ -9,8 +9,8 @@ use ethers::{ }; use forge::{ debug::DebugArena, - executor::{DeployResult, ExecutorBuilder, opts::EvmOpts, RawCallResult}, - trace::{CallTraceArena, CallTraceDecoderBuilder, identifier::EtherscanIdentifier, TraceKind}, + executor::{opts::EvmOpts, Backend, DeployResult, ExecutorBuilder, RawCallResult}, + trace::{identifier::EtherscanIdentifier, CallTraceArena, CallTraceDecoderBuilder, TraceKind}, }; use foundry_config::Config; use std::{ @@ -18,9 +18,8 @@ use std::{ str::FromStr, time::Duration, }; -use ui::{Tui, TUIExitReason, Ui}; +use ui::{TUIExitReason, Tui, Ui}; use yansi::Paint; -use foundry_evm::executor::Backend; #[derive(Debug, Clone, Parser)] pub struct RunArgs { diff --git a/cli/src/cmd/forge/run.rs b/cli/src/cmd/forge/run.rs index f8af28fa95c9..bd23a291e505 100644 --- a/cli/src/cmd/forge/run.rs +++ b/cli/src/cmd/forge/run.rs @@ -1,5 +1,5 @@ use crate::{ - cmd::{Cmd, forge::build::CoreBuildArgs}, + cmd::{forge::build::CoreBuildArgs, Cmd}, compile, utils, }; use clap::{Parser, ValueHint}; @@ -8,28 +8,27 @@ use ethers::{ prelude::ArtifactId, solc::{ artifacts::{CompactContractBytecode, ContractBytecode, ContractBytecodeSome}, - Project, utils::RuntimeOrHandle, + Project, }, types::{Address, Bytes, Log, U256}, }; use forge::{ - CALLER, debug::DebugArena, decode::decode_console_logs, executor::{ - CallResult, DatabaseRef, DeployResult, EvmError, Executor, ExecutorBuilder, - opts::EvmOpts, RawCallResult, + opts::EvmOpts, Backend, CallResult, DatabaseRef, DeployResult, EvmError, Executor, + ExecutorBuilder, RawCallResult, }, - trace::{CallTraceArena, CallTraceDecoderBuilder, identifier::LocalTraceIdentifier, TraceKind}, + trace::{identifier::LocalTraceIdentifier, CallTraceArena, CallTraceDecoderBuilder, TraceKind}, + CALLER, }; use foundry_common::evm::EvmArgs; -use foundry_config::{Config, figment::Figment}; +use foundry_config::{figment::Figment, Config}; use foundry_utils::{encode_args, format_token, IntoFunction, PostLinkInput}; use std::{collections::BTreeMap, path::PathBuf}; -use ui::{Tui, TUIExitReason, Ui}; +use ui::{TUIExitReason, Tui, Ui}; use yansi::Paint; -use foundry_evm::executor::Backend; // Loads project's figment and merges the build cli arguments into it foundry_config::impl_figment_convert!(RunArgs, opts, evm_opts); diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 7850da4ebb4e..028920f032ad 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -4,16 +4,17 @@ //! concurrently active pairs at once. use crate::executor::fork::{database::ForkedDatabase, BackendHandler}; -use ethers::providers::{Http, Provider}; +use ethers::{ + providers::{Http, Provider}, + types::BlockId, +}; use futures::{ channel::mpsc::{channel, Receiver, Sender}, - stream::Stream, + stream::{Fuse, Stream}, task::{Context, Poll}, Future, FutureExt, }; use std::{collections::HashMap, pin::Pin}; -use ethers::types::BlockId; -use futures::stream::Fuse; use tracing::trace; // TODO move some types from avil fork to evm @@ -61,22 +62,12 @@ impl MutltiFork { /// Request that's send to the handler #[derive(Debug)] enum Request { - Create { - fork_id: ForkId, - - endpoint: String, - - chain_id: Option, - - block: Option, - } + Create { fork_id: ForkId, endpoint: String, chain_id: Option, block: Option }, } -type RequestFuture = -Pin >>; +type RequestFuture = Pin + 'static + Send>>; /// The type that manages connections in the background -#[derive(Debug)] pub struct MutltiForkHandler { /// Incoming requests from the `MultiFork`. incoming: Fuse>, @@ -85,7 +76,7 @@ pub struct MutltiForkHandler { /// It's expected that this list will be rather small handlers: Vec<(ForkId, BackendHandler>)>, // requests currently in progress - requests: Vec + requests: Vec, } // === impl MutltiForkHandler === @@ -136,7 +127,7 @@ impl Future for MutltiForkHandler { } } - if pin.handlers.is_empty() && pin.incoming.is_done() { + if pin.handlers.is_empty() && pin.incoming.is_done() { trace!(target: "fork::multi", "completed"); return Poll::Ready(()) } diff --git a/forge/src/lib.rs b/forge/src/lib.rs index 1c1c7edea244..253ef57acb7a 100644 --- a/forge/src/lib.rs +++ b/forge/src/lib.rs @@ -23,19 +23,19 @@ pub mod test_helpers { use crate::TestFilter; use ethers::{ prelude::{artifacts::Settings, Lazy, ProjectCompileOutput, SolcConfig}, - solc::{artifacts::Libraries, Project, ProjectPathsConfig, utils::RuntimeOrHandle}, + solc::{artifacts::Libraries, utils::RuntimeOrHandle, Project, ProjectPathsConfig}, types::{Address, U256}, }; use foundry_evm::{ - CALLER, executor::{ - DatabaseRef, - Executor, ExecutorBuilder, opts::{Env, EvmOpts}, + backend::Backend, + opts::{Env, EvmOpts}, + DatabaseRef, Executor, ExecutorBuilder, }, fuzz::FuzzedExecutor, + CALLER, }; use std::str::FromStr; - use foundry_evm::executor::backend::Backend; pub static PROJECT: Lazy = Lazy::new(|| { let paths = ProjectPathsConfig::builder() diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 627335b177da..19f4a6a9ae19 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -1,19 +1,18 @@ use crate::{ContractRunner, SuiteResult, TestFilter}; use ethers::{ abi::Abi, - prelude::{ArtifactId, ArtifactOutput, artifacts::CompactContractBytecode}, - solc::{Artifact, ProjectCompileOutput, utils::RuntimeOrHandle}, + prelude::{artifacts::CompactContractBytecode, ArtifactId, ArtifactOutput}, + solc::{utils::RuntimeOrHandle, Artifact, ProjectCompileOutput}, types::{Address, Bytes, U256}, }; use eyre::Result; use foundry_evm::executor::{ - DatabaseRef, Executor, ExecutorBuilder, Fork, opts::EvmOpts, SpecId, + opts::EvmOpts, Backend, DatabaseRef, Executor, ExecutorBuilder, Fork, SpecId, }; use foundry_utils::PostLinkInput; use proptest::test_runner::TestRunner; use rayon::prelude::*; use std::{collections::BTreeMap, marker::Sync, path::Path, sync::mpsc::Sender}; -use foundry_evm::executor::Backend; /// Builder used for instantiating the multi-contract runner #[derive(Debug, Default)] @@ -317,7 +316,7 @@ mod tests { use crate::{ decode::decode_console_logs, test_helpers::{ - COMPILED, COMPILED_WITH_LIBS, EVM_OPTS, filter::Filter, LIBS_PROJECT, PROJECT, + filter::Filter, COMPILED, COMPILED_WITH_LIBS, EVM_OPTS, LIBS_PROJECT, PROJECT, }, }; use foundry_evm::trace::TraceKind; From 9088be55ad9718c7402d687bfaf6bd2fa7d19c29 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 2 Jun 2022 17:15:20 +0200 Subject: [PATCH 006/102] describe cheatcodes --- evm/src/executor/abi.rs | 2 ++ evm/src/executor/fork/multi.rs | 4 ++-- testdata/cheats/Cheats.sol | 13 +++++++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/evm/src/executor/abi.rs b/evm/src/executor/abi.rs index 8ce415a8bfa8..48bb45cba39b 100644 --- a/evm/src/executor/abi.rs +++ b/evm/src/executor/abi.rs @@ -68,6 +68,8 @@ ethers::contract::abigen!( startBroadcast() startBroadcast(address) stopBroadcast() + snapshot()(uint256) + revertTo(uint256) ]"#, ); pub use hevm_mod::{HEVMCalls, HEVM_ABI}; diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 028920f032ad..695a7535d499 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -73,13 +73,13 @@ pub struct MutltiForkHandler { incoming: Fuse>, /// All active handlers /// - /// It's expected that this list will be rather small + /// It's expected that this list will be rather small (<10) handlers: Vec<(ForkId, BackendHandler>)>, // requests currently in progress requests: Vec, } -// === impl MutltiForkHandler === +// === impl MultiForkHandler === impl MutltiForkHandler { fn on_request(&mut self, req: Request) {} diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index c2b89c6d9085..978f0838a9bc 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -103,4 +103,17 @@ interface Cheats { function startBroadcast(address) external; // Stops collecting onchain transactions function stopBroadcast() external; + // Snapshot the current state of the evm. + // Returns the id of the snapshot that was created. + // To revert a snapshot use `evmRevert` + function snapshot() external returns(uint256); + // Revert the state of the evm to a previous snapshot + // takes the snapshot id to revert to. This deletes the snapshot and all snapshots taken after the given snapshot id. + function revertTo(uint256) external; + // manually enables forking mode for the current test + function setFork(string,uint256) external; + // manually enables forking mode for the current test with the latest block number + function setFork(string) external; + // forks the `block` variable from the given endpoint + function forkBlockVariable(string, uint256) external; } From 3db5c4b021879d714a7b8446c80b92db4b3ebc00 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 Jun 2022 13:09:51 +0200 Subject: [PATCH 007/102] chore: tune cheatcodes --- testdata/cheats/Cheats.sol | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index 978f0838a9bc..e0073f29a402 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -105,15 +105,17 @@ interface Cheats { function stopBroadcast() external; // Snapshot the current state of the evm. // Returns the id of the snapshot that was created. - // To revert a snapshot use `evmRevert` + // To revert a snapshot use `revertTo` function snapshot() external returns(uint256); // Revert the state of the evm to a previous snapshot // takes the snapshot id to revert to. This deletes the snapshot and all snapshots taken after the given snapshot id. function revertTo(uint256) external; - // manually enables forking mode for the current test - function setFork(string,uint256) external; - // manually enables forking mode for the current test with the latest block number - function setFork(string) external; + // Creates a new fork with the given endpoint and block and returns the identifier of the fork + function createFork(string,uint256) external returns(uint256); + // Creates a new fork with the given endpoint and the latest block and returns the identifier of the fork + function createFork(string) external returns(uint256); + // takes a fork identifier created by `createFork` and changes the state + function switchFork(uint256) external; // forks the `block` variable from the given endpoint function forkBlockVariable(string, uint256) external; } From 1e811693d6dbbed2a882c93713b6da8b7ed26bf0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 Jun 2022 14:23:20 +0200 Subject: [PATCH 008/102] refactor: move in memory db to evm --- anvil/src/eth/backend/mem/in_memory_db.rs | 65 ++----------------- evm/src/executor/backend/in_memory_db.rs | 65 +++++++++++++++++++ .../executor/{backend.rs => backend/mod.rs} | 21 +++++- evm/src/executor/builder.rs | 5 +- evm/src/executor/fork/multi.rs | 10 +-- evm/src/executor/mod.rs | 2 +- 6 files changed, 96 insertions(+), 72 deletions(-) create mode 100644 evm/src/executor/backend/in_memory_db.rs rename evm/src/executor/{backend.rs => backend/mod.rs} (73%) diff --git a/anvil/src/eth/backend/mem/in_memory_db.rs b/anvil/src/eth/backend/mem/in_memory_db.rs index df21dc146cc2..e3028f55a41c 100644 --- a/anvil/src/eth/backend/mem/in_memory_db.rs +++ b/anvil/src/eth/backend/mem/in_memory_db.rs @@ -3,70 +3,13 @@ use crate::{ eth::backend::db::{Db, StateDb}, mem::state::state_merkle_trie_root, - revm::{db::DatabaseRef, Account, AccountInfo, Database, DatabaseCommit}, + revm::{db::DatabaseRef, AccountInfo, Database}, Address, U256, }; -use bytes::Bytes; -use ethers::prelude::{H160, H256}; -use foundry_evm::{executor::snapshot::Snapshots, revm::InMemoryDB, HashMap as Map}; +use ethers::prelude::H256; use tracing::{trace, warn}; - -/// In memory Database for anvil -/// -/// This acts like a wrapper type for [InMemoryDB] but is capable of applying snapshots -#[derive(Debug)] -pub struct MemDb { - inner: InMemoryDB, - snapshots: Snapshots, -} - -impl Default for MemDb { - fn default() -> Self { - Self { inner: InMemoryDB::default(), snapshots: Default::default() } - } -} - -impl DatabaseRef for MemDb { - fn basic(&self, address: H160) -> AccountInfo { - DatabaseRef::basic(&self.inner, address) - } - - fn code_by_hash(&self, code_hash: H256) -> Bytes { - DatabaseRef::code_by_hash(&self.inner, code_hash) - } - - fn storage(&self, address: H160, index: U256) -> U256 { - DatabaseRef::storage(&self.inner, address, index) - } - - fn block_hash(&self, number: U256) -> H256 { - DatabaseRef::block_hash(&self.inner, number) - } -} - -impl Database for MemDb { - fn basic(&mut self, address: H160) -> AccountInfo { - Database::basic(&mut self.inner, address) - } - - fn code_by_hash(&mut self, code_hash: H256) -> Bytes { - Database::code_by_hash(&mut self.inner, code_hash) - } - - fn storage(&mut self, address: H160, index: U256) -> U256 { - Database::storage(&mut self.inner, address, index) - } - - fn block_hash(&mut self, number: U256) -> H256 { - Database::block_hash(&mut self.inner, number) - } -} - -impl DatabaseCommit for MemDb { - fn commit(&mut self, changes: Map) { - DatabaseCommit::commit(&mut self.inner, changes) - } -} +// reexport for convenience +pub use foundry_evm::executor::backend::MemDb; impl Db for MemDb { fn insert_account(&mut self, address: Address, account: AccountInfo) { diff --git a/evm/src/executor/backend/in_memory_db.rs b/evm/src/executor/backend/in_memory_db.rs new file mode 100644 index 000000000000..be30bfca1600 --- /dev/null +++ b/evm/src/executor/backend/in_memory_db.rs @@ -0,0 +1,65 @@ +//! The in memory DB + +use bytes::Bytes; +use ethers::prelude::{H160, H256, U256}; +use hashbrown::HashMap as Map; +use revm::{db::DatabaseRef, Account, AccountInfo, Database, DatabaseCommit, InMemoryDB}; + +use crate::executor::snapshot::Snapshots; + +/// In memory Database for anvil +/// +/// This acts like a wrapper type for [InMemoryDB] but is capable of applying snapshots +#[derive(Debug)] +pub struct MemDb { + pub inner: InMemoryDB, + pub snapshots: Snapshots, +} + +impl Default for MemDb { + fn default() -> Self { + Self { inner: InMemoryDB::default(), snapshots: Default::default() } + } +} + +impl DatabaseRef for MemDb { + fn basic(&self, address: H160) -> AccountInfo { + DatabaseRef::basic(&self.inner, address) + } + + fn code_by_hash(&self, code_hash: H256) -> Bytes { + DatabaseRef::code_by_hash(&self.inner, code_hash) + } + + fn storage(&self, address: H160, index: U256) -> U256 { + DatabaseRef::storage(&self.inner, address, index) + } + + fn block_hash(&self, number: U256) -> H256 { + DatabaseRef::block_hash(&self.inner, number) + } +} + +impl Database for MemDb { + fn basic(&mut self, address: H160) -> AccountInfo { + Database::basic(&mut self.inner, address) + } + + fn code_by_hash(&mut self, code_hash: H256) -> Bytes { + Database::code_by_hash(&mut self.inner, code_hash) + } + + fn storage(&mut self, address: H160, index: U256) -> U256 { + Database::storage(&mut self.inner, address, index) + } + + fn block_hash(&mut self, number: U256) -> H256 { + Database::block_hash(&mut self.inner, number) + } +} + +impl DatabaseCommit for MemDb { + fn commit(&mut self, changes: Map) { + DatabaseCommit::commit(&mut self.inner, changes) + } +} diff --git a/evm/src/executor/backend.rs b/evm/src/executor/backend/mod.rs similarity index 73% rename from evm/src/executor/backend.rs rename to evm/src/executor/backend/mod.rs index 3f810c8d21bc..e6a005130a4a 100644 --- a/evm/src/executor/backend.rs +++ b/evm/src/executor/backend/mod.rs @@ -5,6 +5,24 @@ use revm::{ AccountInfo, Env, }; +mod in_memory_db; +pub use in_memory_db::MemDb; + +/// Provides the underlying `revm::Database` implementation. +/// +/// A `Backend` can be initialised in two forms: +/// +/// # 1. Empty in-memory Database +/// This is the default variant: an empty `revm::Database` +/// +/// # 2. Forked Database +/// A `revm::Database` that forks off a remote client +/// +/// In addition to that we support forking manually on the fly. +/// Additional forks can be created and their state can be switched manually. +#[derive(Debug, Clone)] +pub struct Backend2 {} + /// Variants of a [revm::Database] #[derive(Debug, Clone)] pub enum Backend { @@ -26,8 +44,9 @@ impl Backend { } } + /// Creates an empty in memory database pub fn simple() -> Self { - Backend::Simple(EmptyDB()) + Backend::Simple(EmptyDB::default()) } } diff --git a/evm/src/executor/builder.rs b/evm/src/executor/builder.rs index d436bd1e9386..83f04778fc42 100644 --- a/evm/src/executor/builder.rs +++ b/evm/src/executor/builder.rs @@ -11,10 +11,7 @@ use ethers::{ providers::{Http, Provider, RetryClient}, types::U256, }; -use revm::{ - db::{DatabaseRef, EmptyDB}, - Env, SpecId, -}; +use revm::{Env, SpecId}; use std::{path::PathBuf, sync::Arc}; #[derive(Default, Debug)] diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 695a7535d499..a0da1c99166a 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -9,7 +9,7 @@ use ethers::{ types::BlockId, }; use futures::{ - channel::mpsc::{channel, Receiver, Sender}, + channel::mpsc::{Receiver, Sender}, stream::{Fuse, Stream}, task::{Context, Poll}, Future, FutureExt, @@ -39,12 +39,12 @@ pub struct MutltiFork { impl MutltiFork { /// Creates a new pair of `MutltiFork` and its handler `MutltiForkHandler` - pub fn new(id: ForkId, db: ForkedDatabase) -> (MutltiFork, MutltiForkHandler) { + pub fn new(_id: ForkId, _db: ForkedDatabase) -> (MutltiFork, MutltiForkHandler) { todo!() } /// Creates a new pair and spawns the `MutltiForkHandler` on a background thread - pub fn spawn(id: ForkId, db: ForkedDatabase) -> MutltiFork { + pub fn spawn(_id: ForkId, _db: ForkedDatabase) -> MutltiFork { todo!() } @@ -82,7 +82,7 @@ pub struct MutltiForkHandler { // === impl MultiForkHandler === impl MutltiForkHandler { - fn on_request(&mut self, req: Request) {} + fn on_request(&mut self, _req: Request) {} } // Drives all handler to completion @@ -110,7 +110,7 @@ impl Future for MutltiForkHandler { // advance all jobs for n in (0..pin.requests.len()).rev() { - let mut request = pin.requests.swap_remove(n); + let _request = pin.requests.swap_remove(n); // TODO poll future } diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index a13df8e2e85d..23ad837c2562 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -24,7 +24,7 @@ use revm::{ use std::collections::{BTreeMap, VecDeque}; /// custom revm database implementations -mod backend; +pub mod backend; /// Executor builder pub mod builder; /// Forking provider From 2426feb14935d031dc2765ea007852650f98f362 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 Jun 2022 14:36:38 +0200 Subject: [PATCH 009/102] docs: add some docs --- evm/src/executor/backend/mod.rs | 7 ++++++- evm/src/executor/fork/mod.rs | 1 + evm/src/executor/fork/multi.rs | 4 +--- evm/src/executor/mod.rs | 1 - forge/src/multi_runner.rs | 8 +++++++- 5 files changed, 15 insertions(+), 6 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index e6a005130a4a..779cf661d3a4 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -6,6 +6,7 @@ use revm::{ }; mod in_memory_db; +use crate::executor::fork::MutltiFork; pub use in_memory_db::MemDb; /// Provides the underlying `revm::Database` implementation. @@ -20,8 +21,12 @@ pub use in_memory_db::MemDb; /// /// In addition to that we support forking manually on the fly. /// Additional forks can be created and their state can be switched manually. +/// #[derive(Debug, Clone)] -pub struct Backend2 {} +pub struct Backend2 { + /// The access point for managing forks + forks: MutltiFork, +} /// Variants of a [revm::Database] #[derive(Debug, Clone)] diff --git a/evm/src/executor/fork/mod.rs b/evm/src/executor/fork/mod.rs index bff41d746962..bee6e8830806 100644 --- a/evm/src/executor/fork/mod.rs +++ b/evm/src/executor/fork/mod.rs @@ -10,3 +10,4 @@ pub use cache::{BlockchainDb, BlockchainDbMeta, JsonBlockCacheDB, MemDb}; pub mod database; mod multi; +pub use multi::{MutltiFork, MutltiForkHandler}; diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index a0da1c99166a..f2dab78506dc 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -17,14 +17,12 @@ use futures::{ use std::{collections::HashMap, pin::Pin}; use tracing::trace; -// TODO move some types from avil fork to evm - /// The identifier for a specific fork, this could be the name of the network a custom descriptive /// name. #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct ForkId(pub String); -/// A database type that maintains multiple forks +/// A database type that can maintain multiple forks #[derive(Debug, Clone)] pub struct MutltiFork { /// Channel to send `Request`s to the handler diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 23ad837c2562..ce6b3b13ca98 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -166,7 +166,6 @@ pub struct Executor { // Also, if we stored the VM here we would still need to // take `&mut self` when we are not committing to the database, since // we need to set `evm.env`. - // TODO in order to support multiforks we need to move this to the `Backend` pub db: CacheDB, env: Env, inspector_config: InspectorStackConfig, diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 784019307eb9..d6adcc391ad5 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -229,6 +229,12 @@ impl MultiContractRunner { }) } + /// Executes all tests that match the given `filter` + /// + /// This will create the runtime based on the configured `evm` ops and create the `Backend` + /// before executing all contracts and their tests in _parallel_. + /// + /// Each Executor gets its own instance of the `Backend`. pub fn test( &mut self, filter: &(impl TestFilter + Send + Sync), @@ -238,7 +244,7 @@ impl MultiContractRunner { let runtime = RuntimeOrHandle::new(); let env = runtime.block_on(self.evm_opts.evm_env()); - // the db backend that serves all the data + // the db backend that serves all the data, each contract gets its own clone let db = runtime.block_on(Backend::new(self.fork.take(), &env)); let results = self From ae54fb07c78c08dddf7afeee29f26e5cb946d887 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 Jun 2022 17:03:19 +0200 Subject: [PATCH 010/102] refactor: redesign multifork backend --- Cargo.lock | 37 +++++++++++++++++++-- evm/Cargo.toml | 2 +- evm/src/executor/backend/mod.rs | 54 ++++++++++++++++++++++++++++--- evm/src/executor/fork/database.rs | 16 ++++----- evm/src/executor/fork/mod.rs | 2 +- evm/src/executor/fork/multi.rs | 40 ++++++++++------------- 6 files changed, 110 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a89c88fd6ffb..4a0351c27fd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1995,7 +1995,7 @@ dependencies = [ "once_cell", "parking_lot 0.12.0", "proptest", - "revm", + "revm 1.3.1", "serde", "serde_json", "tempfile", @@ -4018,12 +4018,43 @@ dependencies = [ "hex", "num_enum", "primitive-types", - "revm_precompiles", + "revm_precompiles 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "rlp", "serde", "sha3 0.10.1", ] +[[package]] +name = "revm" +version = "1.3.1" +dependencies = [ + "arrayref", + "auto_impl", + "bytes", + "hashbrown 0.12.0", + "hex", + "num_enum", + "primitive-types", + "revm_precompiles 1.0.0", + "rlp", + "serde", + "sha3 0.10.1", +] + +[[package]] +name = "revm_precompiles" +version = "1.0.0" +dependencies = [ + "bytes", + "k256", + "num 0.4.0", + "primitive-types", + "ripemd", + "sha2 0.10.2", + "sha3 0.10.1", + "substrate-bn", +] + [[package]] name = "revm_precompiles" version = "1.0.0" @@ -5197,7 +5228,7 @@ dependencies = [ "eyre", "forge", "hex", - "revm", + "revm 1.3.0", "tui", ] diff --git a/evm/Cargo.toml b/evm/Cargo.toml index 8be0758b6a03..403a66e67778 100644 --- a/evm/Cargo.toml +++ b/evm/Cargo.toml @@ -36,7 +36,7 @@ once_cell = "1.9.0" # EVM bytes = "1.1.0" hashbrown = "0.12" -revm = { version="1.3", default-features = false, features = ["std", "k256", "with-serde", "memory_limit"] } +revm = { path = "../../revm/crates/revm", version="1.3", default-features = false, features = ["std", "k256", "with-serde", "memory_limit"] } # Fuzzer proptest = "1.0.0" diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 779cf661d3a4..a1393cf33fca 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -1,12 +1,15 @@ use crate::executor::{fork::SharedBackend, Fork}; use ethers::prelude::{H160, H256, U256}; use revm::{ - db::{DatabaseRef, EmptyDB}, - AccountInfo, Env, + db::{CacheDB, DatabaseRef, EmptyDB}, + AccountInfo, Env, InMemoryDB, }; mod in_memory_db; -use crate::executor::fork::MutltiFork; +use crate::executor::{ + fork::{database::ForkDbSnapshot, ForkId, MultiFork}, + snapshot::Snapshots, +}; pub use in_memory_db::MemDb; /// Provides the underlying `revm::Database` implementation. @@ -21,11 +24,52 @@ pub use in_memory_db::MemDb; /// /// In addition to that we support forking manually on the fly. /// Additional forks can be created and their state can be switched manually. -/// #[derive(Debug, Clone)] pub struct Backend2 { /// The access point for managing forks - forks: MutltiFork, + pub forks: MultiFork, + /// The database that holds the entire state, uses an internal database depending on current + /// state + pub db: CacheDB, + /// Contains snapshots made at a certain point + snapshots: Snapshots, +} + +// === impl Backend === + +impl Backend2 { + /// Creates a new instance of `Backend` + /// + /// This will spawn a new background thread that manages forks and will establish a fork if + /// `fork` is `Some`. If `fork` is `None` this `Backend` will launch with an in-memory + /// Database + pub fn new(fork: Option, env: &Env) -> Self { + todo!() + } + + pub fn insert_snapshot(&self) -> U256 { + todo!() + } + + pub fn revert_snapshot(&mut self, id: U256) -> bool { + todo!() + } +} + +/// The Database that holds the state +#[derive(Debug, Clone)] +enum BackendDatabase { + /// Backend is an in-memory `revm::Database` + Memory(InMemoryDB), + /// Backed is currently serving data from the remote endpoint identified by the `ForkId` + Fork(SharedBackend, ForkId), +} + +/// Represents a snapshot of the entire state +#[derive(Debug, Clone)] +enum BackendSnapshot { + Memory(InMemoryDB), + Fork(ForkDbSnapshot), } /// Variants of a [revm::Database] diff --git a/evm/src/executor/fork/database.rs b/evm/src/executor/fork/database.rs index 43f4adc9828e..a9ab6fbe4846 100644 --- a/evm/src/executor/fork/database.rs +++ b/evm/src/executor/fork/database.rs @@ -38,7 +38,7 @@ pub struct ForkedDatabase { /// This exclusively stores the _unchanged_ remote client state db: BlockchainDb, /// holds the snapshot state of a blockchain - snapshots: Arc>>, + snapshots: Arc>>, } impl ForkedDatabase { @@ -60,7 +60,7 @@ impl ForkedDatabase { &mut self.cache_db } - pub fn snapshots(&self) -> &Arc>> { + pub fn snapshots(&self) -> &Arc>> { &self.snapshots } @@ -90,9 +90,9 @@ impl ForkedDatabase { &self.db } - pub fn create_snapshot(&self) -> DbSnapshot { + pub fn create_snapshot(&self) -> ForkDbSnapshot { let db = self.db.db(); - DbSnapshot { + ForkDbSnapshot { local: self.cache_db.clone(), accounts: db.accounts.read().clone(), storage: db.storage.read().clone(), @@ -111,7 +111,7 @@ impl ForkedDatabase { pub fn revert_snapshot(&mut self, id: U256) -> bool { let snapshot = { self.snapshots().lock().remove(id) }; if let Some(snapshot) = snapshot { - let DbSnapshot { accounts, storage, block_hashes, local } = snapshot; + let ForkDbSnapshot { accounts, storage, block_hashes, local } = snapshot; let db = self.inner().db(); { let mut accounts_lock = db.accounts.write(); @@ -184,7 +184,7 @@ impl DatabaseCommit for ForkedDatabase { /// Represents a snapshot of the database #[derive(Debug)] -pub struct DbSnapshot { +pub struct ForkDbSnapshot { local: CacheDB, accounts: BTreeMap, storage: BTreeMap>, @@ -193,7 +193,7 @@ pub struct DbSnapshot { // === impl DbSnapshot === -impl DbSnapshot { +impl ForkDbSnapshot { fn get_storage(&self, address: Address, index: U256) -> Option { self.local.storage().get(&address).and_then(|entry| entry.get(&index)).copied() } @@ -202,7 +202,7 @@ impl DbSnapshot { // This `DatabaseRef` implementation works similar to `CacheDB` which prioritizes modified elements, // and uses another db as fallback // We prioritize stored changed accounts/storage -impl DatabaseRef for DbSnapshot { +impl DatabaseRef for ForkDbSnapshot { fn basic(&self, address: Address) -> AccountInfo { match self.local.cache().get(&address) { Some(info) => info.clone(), diff --git a/evm/src/executor/fork/mod.rs b/evm/src/executor/fork/mod.rs index bee6e8830806..94f846df0c59 100644 --- a/evm/src/executor/fork/mod.rs +++ b/evm/src/executor/fork/mod.rs @@ -10,4 +10,4 @@ pub use cache::{BlockchainDb, BlockchainDbMeta, JsonBlockCacheDB, MemDb}; pub mod database; mod multi; -pub use multi::{MutltiFork, MutltiForkHandler}; +pub use multi::{ForkId, MultiFork, MultiForkHandler}; diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index f2dab78506dc..25adeeb736c9 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -3,7 +3,13 @@ //! The design is similar to the single `SharedBackend`, `BackendHandler` but supports multiple //! concurrently active pairs at once. -use crate::executor::fork::{database::ForkedDatabase, BackendHandler}; +use crate::executor::{ + fork::{ + database::{ForkDbSnapshot, ForkedDatabase}, + BackendHandler, SharedBackend, + }, + snapshot::Snapshots, +}; use ethers::{ providers::{Http, Provider}, types::BlockId, @@ -24,37 +30,25 @@ pub struct ForkId(pub String); /// A database type that can maintain multiple forks #[derive(Debug, Clone)] -pub struct MutltiFork { +pub struct MultiFork { /// Channel to send `Request`s to the handler handler: Sender, /// All created databases for forks identified by their `ForkId` forks: HashMap, - /// The currently active Database - active: ForkId, } -// === impl MultiFork === +// === impl MultiForkBackend === -impl MutltiFork { - /// Creates a new pair of `MutltiFork` and its handler `MutltiForkHandler` - pub fn new(_id: ForkId, _db: ForkedDatabase) -> (MutltiFork, MutltiForkHandler) { +impl MultiFork { + /// Creates a new pair of `MutltiFork` and its handler `MultiForkHandler` + pub fn new(_id: ForkId, _db: ForkedDatabase) -> (MultiFork, MultiForkHandler) { todo!() } - /// Creates a new pair and spawns the `MutltiForkHandler` on a background thread - pub fn spawn(_id: ForkId, _db: ForkedDatabase) -> MutltiFork { + /// Creates a new pair and spawns the `MultiForkHandler` on a background thread + pub fn spawn(_id: ForkId, _db: ForkedDatabase) -> MultiFork { todo!() } - - /// Returns the identifier of the currently active fork - pub fn active_id(&self) -> &ForkId { - &self.active - } - - /// Returns the currently active database - pub fn active(&self) -> &ForkedDatabase { - &self.forks[self.active_id()] - } } /// Request that's send to the handler @@ -66,7 +60,7 @@ enum Request { type RequestFuture = Pin + 'static + Send>>; /// The type that manages connections in the background -pub struct MutltiForkHandler { +pub struct MultiForkHandler { /// Incoming requests from the `MultiFork`. incoming: Fuse>, /// All active handlers @@ -79,13 +73,13 @@ pub struct MutltiForkHandler { // === impl MultiForkHandler === -impl MutltiForkHandler { +impl MultiForkHandler { fn on_request(&mut self, _req: Request) {} } // Drives all handler to completion // This future will finish once all underlying BackendHandler are completed -impl Future for MutltiForkHandler { +impl Future for MultiForkHandler { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { From f10b2c067330bbde0c97b61afa549f762faa9b15 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 Jun 2022 17:32:18 +0200 Subject: [PATCH 011/102] feat: api improvements --- evm/src/executor/backend/mod.rs | 20 ++++++++++++++++++-- evm/src/executor/fork/mod.rs | 14 ++++++++++++++ evm/src/executor/fork/multi.rs | 8 ++++++-- 3 files changed, 38 insertions(+), 4 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index a1393cf33fca..63934654481b 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -7,7 +7,7 @@ use revm::{ mod in_memory_db; use crate::executor::{ - fork::{database::ForkDbSnapshot, ForkId, MultiFork}, + fork::{database::ForkDbSnapshot, CreateFork, ForkId, MultiFork}, snapshot::Snapshots, }; pub use in_memory_db::MemDb; @@ -27,7 +27,7 @@ pub use in_memory_db::MemDb; #[derive(Debug, Clone)] pub struct Backend2 { /// The access point for managing forks - pub forks: MultiFork, + forks: MultiFork, /// The database that holds the entire state, uses an internal database depending on current /// state pub db: CacheDB, @@ -54,6 +54,22 @@ impl Backend2 { pub fn revert_snapshot(&mut self, id: U256) -> bool { todo!() } + + /// Creates a new fork but does _not_ select it + pub fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { + self.forks.create_fork(fork) + } + + /// Selects the fork's state + /// + /// **Note**: this does not change the local state, but swaps the remote state + /// + /// # Errors + /// + /// Returns an error if no fork with the given `id` exists + pub fn select_fork(&mut self, id: ForkId) -> eyre::Result<()> { + todo!() + } } /// The Database that holds the state diff --git a/evm/src/executor/fork/mod.rs b/evm/src/executor/fork/mod.rs index 94f846df0c59..9c9dcf407a0e 100644 --- a/evm/src/executor/fork/mod.rs +++ b/evm/src/executor/fork/mod.rs @@ -1,5 +1,8 @@ mod backend; + pub use backend::{BackendHandler, SharedBackend}; +use ethers::types::BlockNumber; +use std::path::PathBuf; mod init; pub use init::environment; @@ -11,3 +14,14 @@ pub mod database; mod multi; pub use multi::{ForkId, MultiFork, MultiForkHandler}; + +/// Represents a _fork_ of a remote chain whose data is available only via the `url` endpoint. +#[derive(Debug)] +pub struct CreateFork { + /// Where to read the cached storage from + pub cache_path: Option, + /// The URL to a node for fetching remote state + pub url: String, + /// The block to fork against + pub block: BlockNumber, +} diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 25adeeb736c9..ec048902e05e 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -6,7 +6,7 @@ use crate::executor::{ fork::{ database::{ForkDbSnapshot, ForkedDatabase}, - BackendHandler, SharedBackend, + BackendHandler, CreateFork, SharedBackend, }, snapshot::Snapshots, }; @@ -49,12 +49,16 @@ impl MultiFork { pub fn spawn(_id: ForkId, _db: ForkedDatabase) -> MultiFork { todo!() } + + pub fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { + todo!() + } } /// Request that's send to the handler #[derive(Debug)] enum Request { - Create { fork_id: ForkId, endpoint: String, chain_id: Option, block: Option }, + Create(CreateFork), } type RequestFuture = Pin + 'static + Send>>; From d7b1119c967a8293ea6c146b3de1b30b55d2f244 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 Jun 2022 17:33:05 +0200 Subject: [PATCH 012/102] chore: bump revm --- Cargo.lock | 41 +++++------------------------------------ evm/Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4a0351c27fd0..d99341bed089 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1995,7 +1995,7 @@ dependencies = [ "once_cell", "parking_lot 0.12.0", "proptest", - "revm 1.3.1", + "revm", "serde", "serde_json", "tempfile", @@ -4007,9 +4007,9 @@ dependencies = [ [[package]] name = "revm" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806e90d2a967e11e3d704fac1ccf39978756991c26cf1dcecc80d9a763d6b559" +checksum = "61c4fff7e8fb0b20699c4ff4a132c342763883004abca680a25037a07cb8a828" dependencies = [ "arrayref", "auto_impl", @@ -4018,43 +4018,12 @@ dependencies = [ "hex", "num_enum", "primitive-types", - "revm_precompiles 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "revm_precompiles", "rlp", "serde", "sha3 0.10.1", ] -[[package]] -name = "revm" -version = "1.3.1" -dependencies = [ - "arrayref", - "auto_impl", - "bytes", - "hashbrown 0.12.0", - "hex", - "num_enum", - "primitive-types", - "revm_precompiles 1.0.0", - "rlp", - "serde", - "sha3 0.10.1", -] - -[[package]] -name = "revm_precompiles" -version = "1.0.0" -dependencies = [ - "bytes", - "k256", - "num 0.4.0", - "primitive-types", - "ripemd", - "sha2 0.10.2", - "sha3 0.10.1", - "substrate-bn", -] - [[package]] name = "revm_precompiles" version = "1.0.0" @@ -5228,7 +5197,7 @@ dependencies = [ "eyre", "forge", "hex", - "revm 1.3.0", + "revm", "tui", ] diff --git a/evm/Cargo.toml b/evm/Cargo.toml index 403a66e67778..c83e8a055f6d 100644 --- a/evm/Cargo.toml +++ b/evm/Cargo.toml @@ -36,7 +36,7 @@ once_cell = "1.9.0" # EVM bytes = "1.1.0" hashbrown = "0.12" -revm = { path = "../../revm/crates/revm", version="1.3", default-features = false, features = ["std", "k256", "with-serde", "memory_limit"] } +revm = { version="1.4", default-features = false, features = ["std", "k256", "with-serde", "memory_limit"] } # Fuzzer proptest = "1.0.0" From 7c7928c55fba2e0423f625b0bf53cb5c76013893 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 3 Jun 2022 17:59:22 +0200 Subject: [PATCH 013/102] docs: more backend docs --- evm/src/executor/backend/mod.rs | 45 ++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 63934654481b..0adaa46b11ba 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -22,8 +22,42 @@ pub use in_memory_db::MemDb; /// # 2. Forked Database /// A `revm::Database` that forks off a remote client /// +/// /// In addition to that we support forking manually on the fly. -/// Additional forks can be created and their state can be switched manually. +/// Additional forks can be created. Each unique fork is identified by its unique `ForkId`. We treat +/// forks as unique if they have the same `(endpoint, block number)` pair. +/// +/// When it comes to testing, it's intended that each contract will use its own `Backend` +/// (`Backend::clone`). This way each contract uses its own encapsulated evm state. For in-memory +/// testing, the database is just an owned `revm::InMemoryDB`. +/// +/// The `db` if fork-mode basically consists of 2 halves: +/// - everything fetched from the remote is readonly +/// - all local changes (instructed by the contract) are written to the backend's `db` and don't +/// alter the state of the remote client. This way a fork (`SharedBackend`), can be used by +/// multiple contracts at the same time. +/// +/// # Fork swapping +/// +/// Multiple "forks" can be created `Backend::create_fork()`, however only 1 can be used by the +/// `db`. However, their state can be hot-swapped by swapping the read half of `db` from one fork to +/// another. +/// +/// **Note:** this only affects the readonly half of the `db`, local changes are persistent across +/// fork-state swaps. +/// +/// # Snapshotting +/// +/// A snapshot of the current overall state can be taken at any point in time. A snapshot is +/// identified by a unique id that's returned when a snapshot is created. A snapshot can only be +/// reverted _once_. After a successful revert, the same snapshot id cannot be used again. Reverting +/// a snapshot replaces the current active state with the snapshot state, the snapshot is deleted +/// afterwards, as well as any snapshots taken after the reverted snapshot, (e.g.: reverting to id +/// 0x1 will delete snapshots with ids 0x1, 0x2, etc.) +/// +/// **Note:** Snapshots work across fork-swaps, e.g. if fork `A` is currently active, then a +/// snapshot is created before fork `B` is selected, then fork `A` will be the active fork again +/// after reverting the snapshot. #[derive(Debug, Clone)] pub struct Backend2 { /// The access point for managing forks @@ -32,7 +66,7 @@ pub struct Backend2 { /// state pub db: CacheDB, /// Contains snapshots made at a certain point - snapshots: Snapshots, + snapshots: Snapshots>, } // === impl Backend === @@ -81,13 +115,6 @@ enum BackendDatabase { Fork(SharedBackend, ForkId), } -/// Represents a snapshot of the entire state -#[derive(Debug, Clone)] -enum BackendSnapshot { - Memory(InMemoryDB), - Fork(ForkDbSnapshot), -} - /// Variants of a [revm::Database] #[derive(Debug, Clone)] pub enum Backend { From c27657be06288ab3e4351ac07b250660ad295de7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 4 Jun 2022 16:31:17 +0200 Subject: [PATCH 014/102] feat: implement multifork creation --- evm/src/executor/backend/mod.rs | 43 ++++++---- evm/src/executor/fork/mod.rs | 3 + evm/src/executor/fork/multi.rs | 143 ++++++++++++++++++++++++++------ forge/src/runner.rs | 7 ++ 4 files changed, 154 insertions(+), 42 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 0adaa46b11ba..bfafb38ccc9a 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -2,12 +2,13 @@ use crate::executor::{fork::SharedBackend, Fork}; use ethers::prelude::{H160, H256, U256}; use revm::{ db::{CacheDB, DatabaseRef, EmptyDB}, - AccountInfo, Env, InMemoryDB, + AccountInfo, Env, }; +use tracing::{trace, warn}; mod in_memory_db; use crate::executor::{ - fork::{database::ForkDbSnapshot, CreateFork, ForkId, MultiFork}, + fork::{CreateFork, ForkId, MultiFork}, snapshot::Snapshots, }; pub use in_memory_db::MemDb; @@ -77,21 +78,36 @@ impl Backend2 { /// This will spawn a new background thread that manages forks and will establish a fork if /// `fork` is `Some`. If `fork` is `None` this `Backend` will launch with an in-memory /// Database - pub fn new(fork: Option, env: &Env) -> Self { + pub fn new(_fork: Option, _env: &Env) -> Self { todo!() } - pub fn insert_snapshot(&self) -> U256 { - todo!() + /// Creates a new snapshot + pub fn snapshot(&mut self) -> U256 { + let id = self.snapshots.insert(self.db.clone()); + trace!(target: "backend", "Created new snapshot {}", id); + id } - pub fn revert_snapshot(&mut self, id: U256) -> bool { - todo!() + /// Reverts the snapshot if it exists + /// + /// Returns `true` if the snapshot was successfully reverted, `false` if no snapshot for that id + /// exists. + pub fn revert(&mut self, id: U256) -> bool { + if let Some(snapshot) = self.snapshots.remove(id) { + self.db = snapshot; + trace!(target: "backend", "Reverted snapshot {}", id); + true + } else { + warn!(target: "backend", "No snapshot to revert for {}", id); + false + } } /// Creates a new fork but does _not_ select it pub fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { - self.forks.create_fork(fork) + self.forks.create_fork(fork); + todo!() } /// Selects the fork's state @@ -101,20 +117,11 @@ impl Backend2 { /// # Errors /// /// Returns an error if no fork with the given `id` exists - pub fn select_fork(&mut self, id: ForkId) -> eyre::Result<()> { + pub fn select_fork(&mut self, _id: ForkId) -> eyre::Result<()> { todo!() } } -/// The Database that holds the state -#[derive(Debug, Clone)] -enum BackendDatabase { - /// Backend is an in-memory `revm::Database` - Memory(InMemoryDB), - /// Backed is currently serving data from the remote endpoint identified by the `ForkId` - Fork(SharedBackend, ForkId), -} - /// Variants of a [revm::Database] #[derive(Debug, Clone)] pub enum Backend { diff --git a/evm/src/executor/fork/mod.rs b/evm/src/executor/fork/mod.rs index 9c9dcf407a0e..641b6c104f82 100644 --- a/evm/src/executor/fork/mod.rs +++ b/evm/src/executor/fork/mod.rs @@ -2,6 +2,7 @@ mod backend; pub use backend::{BackendHandler, SharedBackend}; use ethers::types::BlockNumber; +use revm::Env; use std::path::PathBuf; mod init; @@ -24,4 +25,6 @@ pub struct CreateFork { pub url: String, /// The block to fork against pub block: BlockNumber, + /// The env to create this fork, main purpose is to provide some metadata for the fork + pub env: Env, } diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index ec048902e05e..ec682d163f1b 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -3,24 +3,28 @@ //! The design is similar to the single `SharedBackend`, `BackendHandler` but supports multiple //! concurrently active pairs at once. -use crate::executor::{ - fork::{ - database::{ForkDbSnapshot, ForkedDatabase}, - BackendHandler, CreateFork, SharedBackend, - }, - snapshot::Snapshots, -}; +use crate::executor::fork::{database::ForkedDatabase, BackendHandler, CreateFork, SharedBackend}; use ethers::{ - providers::{Http, Provider}, - types::BlockId, + providers::{Http, Provider, RetryClient}, + types::BlockNumber, }; + +use crate::executor::fork::{BlockchainDb, BlockchainDbMeta}; +use ethers::prelude::Middleware; use futures::{ channel::mpsc::{Receiver, Sender}, stream::{Fuse, Stream}, task::{Context, Poll}, Future, FutureExt, }; -use std::{collections::HashMap, pin::Pin}; +use std::{ + collections::HashMap, + pin::Pin, + sync::{ + mpsc::{channel as oneshot_channel, Sender as OneshotSender}, + Arc, + }, +}; use tracing::trace; /// The identifier for a specific fork, this could be the name of the network a custom descriptive @@ -33,8 +37,6 @@ pub struct ForkId(pub String); pub struct MultiFork { /// Channel to send `Request`s to the handler handler: Sender, - /// All created databases for forks identified by their `ForkId` - forks: HashMap, } // === impl MultiForkBackend === @@ -50,18 +52,33 @@ impl MultiFork { todo!() } - pub fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { - todo!() + pub fn create_fork(&self, fork: CreateFork) -> eyre::Result<(ForkId, SharedBackend)> { + let (sender, rx) = oneshot_channel(); + let req = Request::CreateFork(Box::new(fork), sender); + self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?; + rx.recv()? } } +type Handler = BackendHandler>>>; + +type CreateFuture = Pin> + Send>>; + +type CreateSender = OneshotSender>; + /// Request that's send to the handler #[derive(Debug)] enum Request { - Create(CreateFork), + /// Creates a new ForkBackend + CreateFork(Box, CreateSender), + /// Returns the Fork backend for the `ForkId` if it exists + GetBacked(ForkId, OneshotSender>), } -type RequestFuture = Pin + 'static + Send>>; +enum ForkTask { + /// Contains the future that will establish a new fork + Create(CreateFuture, ForkId, CreateSender), +} /// The type that manages connections in the background pub struct MultiForkHandler { @@ -70,15 +87,41 @@ pub struct MultiForkHandler { /// All active handlers /// /// It's expected that this list will be rather small (<10) - handlers: Vec<(ForkId, BackendHandler>)>, - // requests currently in progress - requests: Vec, + handlers: Vec<(ForkId, Handler)>, + // tasks currently in progress + pending_tasks: Vec, + /// All created Forks + forks: HashMap, + + /// The retries to allow for new providers + retries: u32, + /// Initial backoff delay for requests + backoff: u64, } // === impl MultiForkHandler === impl MultiForkHandler { - fn on_request(&mut self, _req: Request) {} + fn on_request(&mut self, req: Request) { + match req { + Request::CreateFork(fork, sender) => { + let fork_id = create_fork_id(&fork.url, fork.block); + if let Some(fork) = self.forks.get(&fork_id).cloned() { + let _ = sender.send(Ok((fork_id, fork))); + } else { + let retries = self.retries; + let backoff = self.backoff; + // need to create a new fork + let task = Box::pin(async move { create_fork(*fork, retries, backoff).await }); + self.pending_tasks.push(ForkTask::Create(task, fork_id, sender)); + } + } + Request::GetBacked(fork_id, sender) => { + let fork = self.forks.get(&fork_id).cloned(); + let _ = sender.send(fork); + } + } + } } // Drives all handler to completion @@ -104,10 +147,27 @@ impl Future for MultiForkHandler { } } - // advance all jobs - for n in (0..pin.requests.len()).rev() { - let _request = pin.requests.swap_remove(n); - // TODO poll future + // advance all tasks + for n in (0..pin.pending_tasks.len()).rev() { + let task = pin.pending_tasks.swap_remove(n); + match task { + ForkTask::Create(mut fut, id, sender) => { + if let Poll::Ready(resp) = fut.poll_unpin(cx) { + match resp { + Ok((fork, handler)) => { + pin.handlers.push((id.clone(), handler)); + pin.forks.insert(id.clone(), fork.clone()); + let _ = sender.send(Ok((id, fork))); + } + Err(err) => { + let _ = sender.send(Err(err)); + } + } + } else { + pin.pending_tasks.push(ForkTask::Create(fut, id, sender)); + } + } + } } // advance all handlers @@ -131,3 +191,38 @@ impl Future for MultiForkHandler { Poll::Pending } } + +/// Returns the identifier for a Fork which consists of the url and the block number +fn create_fork_id(url: &str, num: BlockNumber) -> ForkId { + ForkId(format!("{url}@{num:?}")) +} + +/// Creates a new fork +/// +/// This will establish a new `Provider` to the endpoint and return the Fork Backend +async fn create_fork( + fork: CreateFork, + retries: u32, + backoff: u64, +) -> eyre::Result<(SharedBackend, Handler)> { + let CreateFork { cache_path, url, block: block_number, env } = fork; + let provider = Arc::new(Provider::>::new_client( + url.clone().as_str(), + retries, + backoff, + )?); + let mut meta = BlockchainDbMeta::new(env, url); + + // update the meta to match the forked config + meta.cfg_env.chain_id = provider.get_chainid().await?; + + let number = match block_number { + BlockNumber::Pending | BlockNumber::Latest => provider.get_block_number().await?.as_u64(), + BlockNumber::Earliest => 0, + BlockNumber::Number(num) => num.as_u64(), + }; + meta.block_env.number = number.into(); + + let db = BlockchainDb::new(meta, cache_path); + todo!() +} diff --git a/forge/src/runner.rs b/forge/src/runner.rs index 65c63057faef..568d4fd7e5bc 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -385,6 +385,8 @@ impl<'a, DB: DatabaseRef + Send + Sync> ContractRunner<'a, DB> { .map(|func| (func, func.name.starts_with("testFail"))) .collect(); + // TODO(mattsse): while tests don't modify the state we have cheatcodes that affect the state (fork cheat codes, snapshots), so in order to execute all tests in parallel they need their own copy of the `Executor`, + let test_results = tests .par_iter() .filter_map(|(func, should_fail)| { @@ -413,6 +415,11 @@ impl<'a, DB: DatabaseRef + Send + Sync> ContractRunner<'a, DB> { Ok(SuiteResult::new(duration, test_results, warnings)) } + /// Runs a single test + /// + /// Calls the given functions and returns the `TestResult`. + /// + /// State modifications are not committed to the evm database but discarded after the call, similar to `eth_call`. #[tracing::instrument(name = "test", skip_all, fields(name = %func.signature(), %should_fail))] pub fn run_test( &self, From 2a3507f2a2821a39dafe18e39d48682c8fa77db7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 4 Jun 2022 21:20:58 +0200 Subject: [PATCH 015/102] style: simplify locking --- evm/src/executor/fork/backend.rs | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/evm/src/executor/fork/backend.rs b/evm/src/executor/fork/backend.rs index cbd7cbe53605..468d7a7f21e8 100644 --- a/evm/src/executor/fork/backend.rs +++ b/evm/src/executor/fork/backend.rs @@ -1,7 +1,7 @@ //! Smart caching and deduplication of requests when using a forking provider use revm::{db::DatabaseRef, AccountInfo, KECCAK_EMPTY}; -use crate::executor::fork::BlockchainDb; +use crate::executor::fork::{cache::FlushJsonBlockCacheDB, BlockchainDb}; use ethers::{ core::abi::ethereum_types::BigEndianHash, providers::Middleware, @@ -14,8 +14,6 @@ use futures::{ task::{Context, Poll}, Future, FutureExt, }; - -use crate::executor::fork::cache::FlushJsonBlockCacheDB; use std::{ collections::{hash_map::Entry, HashMap, VecDeque}, pin::Pin, @@ -110,21 +108,15 @@ where match req { BackendRequest::Basic(addr, sender) => { trace!(target: "backendhandler", "received request basic address={:?}", addr); - let lock = self.db.accounts().read(); - let basic = lock.get(&addr).cloned(); - // release the lock - drop(lock); - if let Some(basic) = basic { + let acc = self.db.accounts().read().get(&addr).cloned(); + if let Some(basic) = acc { let _ = sender.send(basic); } else { self.request_account(addr, sender); } } BackendRequest::BlockHash(number, sender) => { - let lock = self.db.block_hashes().read(); - let hash = lock.get(&number).cloned(); - // release the lock - drop(lock); + let hash = self.db.block_hashes().read().get(&number).cloned(); if let Some(hash) = hash { let _ = sender.send(hash); } else { @@ -132,13 +124,9 @@ where } } BackendRequest::Storage(addr, idx, sender) => { - let lock = self.db.storage().read(); - let acc = lock.get(&addr); - let value = acc.and_then(|acc| acc.get(&idx).copied()); - // release the lock - drop(lock); - // account is already stored in the cache + let value = + self.db.storage().read().get(&addr).and_then(|acc| acc.get(&idx).copied()); if let Some(value) = value { let _ = sender.send(value); } else { From 28769a48db89086669a369facd17355a2ce4ee5f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 4 Jun 2022 21:21:15 +0200 Subject: [PATCH 016/102] feat: add spawning --- evm/src/executor/fork/multi.rs | 57 +++++++++++++++++++++++++++------- 1 file changed, 45 insertions(+), 12 deletions(-) diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index ec682d163f1b..e6f9736f7db0 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -3,19 +3,19 @@ //! The design is similar to the single `SharedBackend`, `BackendHandler` but supports multiple //! concurrently active pairs at once. -use crate::executor::fork::{database::ForkedDatabase, BackendHandler, CreateFork, SharedBackend}; +use crate::executor::fork::{ + BackendHandler, BlockchainDb, BlockchainDbMeta, CreateFork, SharedBackend, +}; use ethers::{ + prelude::Middleware, providers::{Http, Provider, RetryClient}, types::BlockNumber, }; - -use crate::executor::fork::{BlockchainDb, BlockchainDbMeta}; -use ethers::prelude::Middleware; use futures::{ - channel::mpsc::{Receiver, Sender}, + channel::mpsc::{channel, Receiver, Sender}, stream::{Fuse, Stream}, task::{Context, Poll}, - Future, FutureExt, + Future, FutureExt, StreamExt, }; use std::{ collections::HashMap, @@ -42,14 +42,30 @@ pub struct MultiFork { // === impl MultiForkBackend === impl MultiFork { - /// Creates a new pair of `MutltiFork` and its handler `MultiForkHandler` - pub fn new(_id: ForkId, _db: ForkedDatabase) -> (MultiFork, MultiForkHandler) { - todo!() + /// Creates a new pair multi fork pair + pub fn new() -> (Self, MultiForkHandler) { + let (handler, handler_rx) = channel(1); + (Self { handler }, MultiForkHandler::new(handler_rx)) } /// Creates a new pair and spawns the `MultiForkHandler` on a background thread - pub fn spawn(_id: ForkId, _db: ForkedDatabase) -> MultiFork { - todo!() + pub fn spawn() -> Self { + let (fork, handler) = Self::new(); + // spawn a light-weight thread with a thread-local async runtime just for + // sending and receiving data from the remote client(s) + let _ = std::thread::Builder::new() + .name("multi-fork-backend-thread".to_string()) + .spawn(move || { + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("failed to create multi-fork-backend-thread tokio runtime"); + + rt.block_on(async move { handler.await }); + }) + .expect("failed to spawn multi fork handler thread"); + trace!(target: "fork::multi", "spawned MultiForkHandler thread"); + fork } pub fn create_fork(&self, fork: CreateFork) -> eyre::Result<(ForkId, SharedBackend)> { @@ -81,20 +97,25 @@ enum ForkTask { } /// The type that manages connections in the background +#[must_use = "MultiForkHandler does nothing unless polled."] pub struct MultiForkHandler { /// Incoming requests from the `MultiFork`. incoming: Fuse>, + /// All active handlers /// /// It's expected that this list will be rather small (<10) handlers: Vec<(ForkId, Handler)>, + // tasks currently in progress pending_tasks: Vec, - /// All created Forks + + /// All created Forks in order to reuse them forks: HashMap, /// The retries to allow for new providers retries: u32, + /// Initial backoff delay for requests backoff: u64, } @@ -102,6 +123,18 @@ pub struct MultiForkHandler { // === impl MultiForkHandler === impl MultiForkHandler { + fn new(incoming: Receiver) -> Self { + Self { + incoming: incoming.fuse(), + handlers: Default::default(), + pending_tasks: Default::default(), + forks: Default::default(), + retries: 8, + // 800ms + backoff: 800, + } + } + fn on_request(&mut self, req: Request) { match req { Request::CreateFork(fork, sender) => { From a35a5e63c038bc81fcdbe320b950cfea1ea42479 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 4 Jun 2022 22:43:07 +0200 Subject: [PATCH 017/102] feat: refactor backend types --- evm/src/executor/backend/mod.rs | 61 ++++++++++++++++++++++++++++----- evm/src/executor/fork/multi.rs | 11 +++--- 2 files changed, 60 insertions(+), 12 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index bfafb38ccc9a..97dd5df1b96b 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -65,9 +65,9 @@ pub struct Backend2 { forks: MultiFork, /// The database that holds the entire state, uses an internal database depending on current /// state - pub db: CacheDB, + pub db: CacheDB, /// Contains snapshots made at a certain point - snapshots: Snapshots>, + snapshots: Snapshots>, } // === impl Backend === @@ -75,11 +75,17 @@ pub struct Backend2 { impl Backend2 { /// Creates a new instance of `Backend` /// - /// This will spawn a new background thread that manages forks and will establish a fork if - /// `fork` is `Some`. If `fork` is `None` this `Backend` will launch with an in-memory - /// Database - pub fn new(_fork: Option, _env: &Env) -> Self { - todo!() + /// if `fork` is `Some` this will launch with a `fork` database, otherwise with an in-memory + /// database + pub fn new(forks: MultiFork, fork: Option) -> Self { + let db = if let Some(f) = fork { + let (id, fork) = forks.create_fork(f).expect("Unable to fork"); + CacheDB::new(BackendDatabase::Forked(fork, id)) + } else { + CacheDB::new(BackendDatabase::Simple(EmptyDB())) + }; + + Self { forks, db, snapshots: Default::default() } } /// Creates a new snapshot @@ -122,6 +128,46 @@ impl Backend2 { } } +/// Variants of a [revm::Database] +#[derive(Debug, Clone)] +pub enum BackendDatabase { + /// Simple in memory [revm::Database] + Simple(EmptyDB), + /// A [revm::Database] that forks of a remote location and can have multiple consumers of the + /// same data + Forked(SharedBackend, ForkId), +} + +impl DatabaseRef for BackendDatabase { + fn basic(&self, address: H160) -> AccountInfo { + match self { + BackendDatabase::Simple(inner) => inner.basic(address), + BackendDatabase::Forked(inner, _) => inner.basic(address), + } + } + + fn code_by_hash(&self, address: H256) -> bytes::Bytes { + match self { + BackendDatabase::Simple(inner) => inner.code_by_hash(address), + BackendDatabase::Forked(inner, _) => inner.code_by_hash(address), + } + } + + fn storage(&self, address: H160, index: U256) -> U256 { + match self { + BackendDatabase::Simple(inner) => inner.storage(address, index), + BackendDatabase::Forked(inner, _) => inner.storage(address, index), + } + } + + fn block_hash(&self, number: U256) -> H256 { + match self { + BackendDatabase::Simple(inner) => inner.block_hash(number), + BackendDatabase::Forked(inner, _) => inner.block_hash(number), + } + } +} + /// Variants of a [revm::Database] #[derive(Debug, Clone)] pub enum Backend { @@ -130,7 +176,6 @@ pub enum Backend { /// A [revm::Database] that forks of a remote location and can have multiple consumers of the /// same data Forked(SharedBackend), - // TODO } impl Backend { diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index e6f9736f7db0..adf0fca1f2a4 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -32,7 +32,8 @@ use tracing::trace; #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct ForkId(pub String); -/// A database type that can maintain multiple forks +/// The Sender half of multi fork pair. +/// Can send requests to the `MultiForkHandler` to create forks #[derive(Debug, Clone)] pub struct MultiFork { /// Channel to send `Request`s to the handler @@ -49,11 +50,13 @@ impl MultiFork { } /// Creates a new pair and spawns the `MultiForkHandler` on a background thread - pub fn spawn() -> Self { + /// + /// Also returns the `JoinHandle` of the spawned thread. + pub fn spawn() -> (Self, std::thread::JoinHandle<()>) { let (fork, handler) = Self::new(); // spawn a light-weight thread with a thread-local async runtime just for // sending and receiving data from the remote client(s) - let _ = std::thread::Builder::new() + let handle = std::thread::Builder::new() .name("multi-fork-backend-thread".to_string()) .spawn(move || { let rt = tokio::runtime::Builder::new_current_thread() @@ -65,7 +68,7 @@ impl MultiFork { }) .expect("failed to spawn multi fork handler thread"); trace!(target: "fork::multi", "spawned MultiForkHandler thread"); - fork + (fork, handle) } pub fn create_fork(&self, fork: CreateFork) -> eyre::Result<(ForkId, SharedBackend)> { From 8d8839ff29f42b69db2bf0a0a62d6be31aeb52c9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 5 Jun 2022 16:25:50 +0200 Subject: [PATCH 018/102] feat: complete fork api --- Cargo.lock | 26 ++++++++--------- evm/src/executor/backend/mod.rs | 35 +++++++++++++++-------- evm/src/executor/fork/multi.rs | 49 +++++++++++++++++++++++++++++---- 3 files changed, 80 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 509b28acb386..ebb4e9d2c8e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1450,7 +1450,7 @@ dependencies = [ [[package]] name = "ethers" version = "0.6.2" -source = "git+https://github.com/gakonst/ethers-rs#00b38c437a7cae6fbe9f846ffbb12a03fbed0df5" +source = "git+https://github.com/gakonst/ethers-rs#a532eb4d29c963f5b7baa404c3a821e26561ed94" dependencies = [ "ethers-addressbook", "ethers-contract", @@ -1465,7 +1465,7 @@ dependencies = [ [[package]] name = "ethers-addressbook" version = "0.1.0" -source = "git+https://github.com/gakonst/ethers-rs#00b38c437a7cae6fbe9f846ffbb12a03fbed0df5" +source = "git+https://github.com/gakonst/ethers-rs#a532eb4d29c963f5b7baa404c3a821e26561ed94" dependencies = [ "ethers-core", "once_cell", @@ -1476,7 +1476,7 @@ dependencies = [ [[package]] name = "ethers-contract" version = "0.6.2" -source = "git+https://github.com/gakonst/ethers-rs#00b38c437a7cae6fbe9f846ffbb12a03fbed0df5" +source = "git+https://github.com/gakonst/ethers-rs#a532eb4d29c963f5b7baa404c3a821e26561ed94" dependencies = [ "ethers-contract-abigen", "ethers-contract-derive", @@ -1494,7 +1494,7 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" version = "0.6.3" -source = "git+https://github.com/gakonst/ethers-rs#00b38c437a7cae6fbe9f846ffbb12a03fbed0df5" +source = "git+https://github.com/gakonst/ethers-rs#a532eb4d29c963f5b7baa404c3a821e26561ed94" dependencies = [ "Inflector", "cfg-if 1.0.0", @@ -1516,7 +1516,7 @@ dependencies = [ [[package]] name = "ethers-contract-derive" version = "0.6.3" -source = "git+https://github.com/gakonst/ethers-rs#00b38c437a7cae6fbe9f846ffbb12a03fbed0df5" +source = "git+https://github.com/gakonst/ethers-rs#a532eb4d29c963f5b7baa404c3a821e26561ed94" dependencies = [ "ethers-contract-abigen", "ethers-core", @@ -1530,7 +1530,7 @@ dependencies = [ [[package]] name = "ethers-core" version = "0.6.3" -source = "git+https://github.com/gakonst/ethers-rs#00b38c437a7cae6fbe9f846ffbb12a03fbed0df5" +source = "git+https://github.com/gakonst/ethers-rs#a532eb4d29c963f5b7baa404c3a821e26561ed94" dependencies = [ "arrayvec 0.7.2", "bytes", @@ -1560,7 +1560,7 @@ dependencies = [ [[package]] name = "ethers-etherscan" version = "0.2.2" -source = "git+https://github.com/gakonst/ethers-rs#00b38c437a7cae6fbe9f846ffbb12a03fbed0df5" +source = "git+https://github.com/gakonst/ethers-rs#a532eb4d29c963f5b7baa404c3a821e26561ed94" dependencies = [ "ethers-core", "ethers-solc", @@ -1576,7 +1576,7 @@ dependencies = [ [[package]] name = "ethers-middleware" version = "0.6.2" -source = "git+https://github.com/gakonst/ethers-rs#00b38c437a7cae6fbe9f846ffbb12a03fbed0df5" +source = "git+https://github.com/gakonst/ethers-rs#a532eb4d29c963f5b7baa404c3a821e26561ed94" dependencies = [ "async-trait", "ethers-contract", @@ -1600,7 +1600,7 @@ dependencies = [ [[package]] name = "ethers-providers" version = "0.6.2" -source = "git+https://github.com/gakonst/ethers-rs#00b38c437a7cae6fbe9f846ffbb12a03fbed0df5" +source = "git+https://github.com/gakonst/ethers-rs#a532eb4d29c963f5b7baa404c3a821e26561ed94" dependencies = [ "async-trait", "auto_impl 1.0.1", @@ -1635,7 +1635,7 @@ dependencies = [ [[package]] name = "ethers-signers" version = "0.6.2" -source = "git+https://github.com/gakonst/ethers-rs#00b38c437a7cae6fbe9f846ffbb12a03fbed0df5" +source = "git+https://github.com/gakonst/ethers-rs#a532eb4d29c963f5b7baa404c3a821e26561ed94" dependencies = [ "async-trait", "coins-bip32", @@ -1658,7 +1658,7 @@ dependencies = [ [[package]] name = "ethers-solc" version = "0.3.0" -source = "git+https://github.com/gakonst/ethers-rs#00b38c437a7cae6fbe9f846ffbb12a03fbed0df5" +source = "git+https://github.com/gakonst/ethers-rs#a532eb4d29c963f5b7baa404c3a821e26561ed94" dependencies = [ "cfg-if 1.0.0", "colored", @@ -4707,9 +4707,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942" +checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" dependencies = [ "proc-macro2", "quote", diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 97dd5df1b96b..d4e42b14614a 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -4,6 +4,7 @@ use revm::{ db::{CacheDB, DatabaseRef, EmptyDB}, AccountInfo, Env, }; +use std::collections::HashMap; use tracing::{trace, warn}; mod in_memory_db; @@ -63,6 +64,8 @@ pub use in_memory_db::MemDb; pub struct Backend2 { /// The access point for managing forks forks: MultiFork, + /// tracks all created forks + created_forks: HashMap, /// The database that holds the entire state, uses an internal database depending on current /// state pub db: CacheDB, @@ -82,10 +85,10 @@ impl Backend2 { let (id, fork) = forks.create_fork(f).expect("Unable to fork"); CacheDB::new(BackendDatabase::Forked(fork, id)) } else { - CacheDB::new(BackendDatabase::Simple(EmptyDB())) + CacheDB::new(BackendDatabase::InMemory(EmptyDB())) }; - Self { forks, db, snapshots: Default::default() } + Self { forks, db, created_forks: Default::default(), snapshots: Default::default() } } /// Creates a new snapshot @@ -112,8 +115,9 @@ impl Backend2 { /// Creates a new fork but does _not_ select it pub fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { - self.forks.create_fork(fork); - todo!() + let (id, fork) = self.forks.create_fork(fork)?; + self.created_forks.insert(id.clone(), fork); + Ok(id) } /// Selects the fork's state @@ -123,16 +127,23 @@ impl Backend2 { /// # Errors /// /// Returns an error if no fork with the given `id` exists - pub fn select_fork(&mut self, _id: ForkId) -> eyre::Result<()> { - todo!() + pub fn select_fork(&mut self, id: impl Into) -> eyre::Result<()> { + let id = id.into(); + let fork = self + .created_forks + .get(&id) + .cloned() + .ok_or_else(|| eyre::eyre!("Fork Id {} does not exist", id))?; + *self.db.db_mut() = BackendDatabase::Forked(fork, id); + Ok(()) } } /// Variants of a [revm::Database] #[derive(Debug, Clone)] pub enum BackendDatabase { - /// Simple in memory [revm::Database] - Simple(EmptyDB), + /// Simple in-memory [revm::Database] + InMemory(EmptyDB), /// A [revm::Database] that forks of a remote location and can have multiple consumers of the /// same data Forked(SharedBackend, ForkId), @@ -141,28 +152,28 @@ pub enum BackendDatabase { impl DatabaseRef for BackendDatabase { fn basic(&self, address: H160) -> AccountInfo { match self { - BackendDatabase::Simple(inner) => inner.basic(address), + BackendDatabase::InMemory(inner) => inner.basic(address), BackendDatabase::Forked(inner, _) => inner.basic(address), } } fn code_by_hash(&self, address: H256) -> bytes::Bytes { match self { - BackendDatabase::Simple(inner) => inner.code_by_hash(address), + BackendDatabase::InMemory(inner) => inner.code_by_hash(address), BackendDatabase::Forked(inner, _) => inner.code_by_hash(address), } } fn storage(&self, address: H160, index: U256) -> U256 { match self { - BackendDatabase::Simple(inner) => inner.storage(address, index), + BackendDatabase::InMemory(inner) => inner.storage(address, index), BackendDatabase::Forked(inner, _) => inner.storage(address, index), } } fn block_hash(&self, number: U256) -> H256 { match self { - BackendDatabase::Simple(inner) => inner.block_hash(number), + BackendDatabase::InMemory(inner) => inner.block_hash(number), BackendDatabase::Forked(inner, _) => inner.block_hash(number), } } diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index adf0fca1f2a4..3dc19ffe168a 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -7,9 +7,10 @@ use crate::executor::fork::{ BackendHandler, BlockchainDb, BlockchainDbMeta, CreateFork, SharedBackend, }; use ethers::{ + abi::{AbiDecode, AbiEncode, AbiError}, prelude::Middleware, providers::{Http, Provider, RetryClient}, - types::BlockNumber, + types::{BlockId, BlockNumber}, }; use futures::{ channel::mpsc::{channel, Receiver, Sender}, @@ -19,6 +20,7 @@ use futures::{ }; use std::{ collections::HashMap, + fmt, pin::Pin, sync::{ mpsc::{channel as oneshot_channel, Sender as OneshotSender}, @@ -32,6 +34,30 @@ use tracing::trace; #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct ForkId(pub String); +impl fmt::Display for ForkId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl> From for ForkId { + fn from(id: T) -> Self { + Self(id.into()) + } +} + +impl AbiEncode for ForkId { + fn encode(self) -> Vec { + AbiEncode::encode(self.0) + } +} + +impl AbiDecode for ForkId { + fn decode(bytes: impl AsRef<[u8]>) -> Result { + Ok(Self(String::decode(bytes)?)) + } +} + /// The Sender half of multi fork pair. /// Can send requests to the `MultiForkHandler` to create forks #[derive(Debug, Clone)] @@ -71,12 +97,25 @@ impl MultiFork { (fork, handle) } + /// Returns a fork backend + /// + /// If no matching fork backend exists it will be created pub fn create_fork(&self, fork: CreateFork) -> eyre::Result<(ForkId, SharedBackend)> { let (sender, rx) = oneshot_channel(); let req = Request::CreateFork(Box::new(fork), sender); self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?; rx.recv()? } + + /// Returns the corresponding fork if it exist + /// + /// Returns `None` if no matching fork backend is available. + pub fn get_fork(&self, id: impl Into) -> eyre::Result> { + let (sender, rx) = oneshot_channel(); + let req = Request::GetFork(id.into(), sender); + self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?; + Ok(rx.recv()?) + } } type Handler = BackendHandler>>>; @@ -91,7 +130,7 @@ enum Request { /// Creates a new ForkBackend CreateFork(Box, CreateSender), /// Returns the Fork backend for the `ForkId` if it exists - GetBacked(ForkId, OneshotSender>), + GetFork(ForkId, OneshotSender>), } enum ForkTask { @@ -152,7 +191,7 @@ impl MultiForkHandler { self.pending_tasks.push(ForkTask::Create(task, fork_id, sender)); } } - Request::GetBacked(fork_id, sender) => { + Request::GetFork(fork_id, sender) => { let fork = self.forks.get(&fork_id).cloned(); let _ = sender.send(fork); } @@ -230,7 +269,7 @@ impl Future for MultiForkHandler { /// Returns the identifier for a Fork which consists of the url and the block number fn create_fork_id(url: &str, num: BlockNumber) -> ForkId { - ForkId(format!("{url}@{num:?}")) + ForkId(format!("{url}@{num}")) } /// Creates a new fork @@ -260,5 +299,5 @@ async fn create_fork( meta.block_env.number = number.into(); let db = BlockchainDb::new(meta, cache_path); - todo!() + Ok(SharedBackend::new(provider, db, Some(BlockId::Number(BlockNumber::Number(number.into()))))) } From 7b32b73e9048c654dd7ea7532d50c4dc5cb5e67d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 5 Jun 2022 16:42:15 +0200 Subject: [PATCH 019/102] refactor: simplify TestFilter --- forge/src/lib.rs | 2 +- forge/src/multi_runner.rs | 6 +++--- forge/src/runner.rs | 7 +++++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/forge/src/lib.rs b/forge/src/lib.rs index 166f8b91276c..480733199908 100644 --- a/forge/src/lib.rs +++ b/forge/src/lib.rs @@ -12,7 +12,7 @@ pub use multi_runner::{MultiContractRunner, MultiContractRunnerBuilder}; mod utils; pub use utils::deploy_create2_deployer; -pub trait TestFilter { +pub trait TestFilter: Send + Sync { fn matches_test(&self, test_name: impl AsRef) -> bool; fn matches_contract(&self, contract_name: impl AsRef) -> bool; fn matches_path(&self, path: impl AsRef) -> bool; diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index d6adcc391ad5..3c1a3bf33893 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -188,7 +188,7 @@ impl MultiContractRunner { } // Get all tests of matching path and contract - pub fn get_tests(&self, filter: &(impl TestFilter + Send + Sync)) -> Vec { + pub fn get_tests(&self, filter: &impl TestFilter) -> Vec { self.contracts .iter() .filter(|(id, _)| { @@ -202,7 +202,7 @@ impl MultiContractRunner { pub fn list( &self, - filter: &(impl TestFilter + Send + Sync), + filter: &impl TestFilter, ) -> BTreeMap>> { self.contracts .iter() @@ -237,7 +237,7 @@ impl MultiContractRunner { /// Each Executor gets its own instance of the `Backend`. pub fn test( &mut self, - filter: &(impl TestFilter + Send + Sync), + filter: &impl TestFilter, stream_result: Option>, include_fuzz_tests: bool, ) -> Result> { diff --git a/forge/src/runner.rs b/forge/src/runner.rs index 568d4fd7e5bc..f7319463272a 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -385,7 +385,9 @@ impl<'a, DB: DatabaseRef + Send + Sync> ContractRunner<'a, DB> { .map(|func| (func, func.name.starts_with("testFail"))) .collect(); - // TODO(mattsse): while tests don't modify the state we have cheatcodes that affect the state (fork cheat codes, snapshots), so in order to execute all tests in parallel they need their own copy of the `Executor`, + // TODO(mattsse): while tests don't modify the state we have cheatcodes that affect the + // state (fork cheat codes, snapshots), so in order to execute all tests in parallel they + // need their own copy of the `Executor`, let test_results = tests .par_iter() @@ -419,7 +421,8 @@ impl<'a, DB: DatabaseRef + Send + Sync> ContractRunner<'a, DB> { /// /// Calls the given functions and returns the `TestResult`. /// - /// State modifications are not committed to the evm database but discarded after the call, similar to `eth_call`. + /// State modifications are not committed to the evm database but discarded after the call, + /// similar to `eth_call`. #[tracing::instrument(name = "test", skip_all, fields(name = %func.signature(), %should_fail))] pub fn run_test( &self, From 35ddffe03a153b39f2f5188fac7a707b703817a2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 5 Jun 2022 16:57:04 +0200 Subject: [PATCH 020/102] refactor: extract helper types --- cli/src/cmd/forge/snapshot.rs | 2 +- cli/src/cmd/forge/test.rs | 13 +-- forge/src/lib.rs | 138 ++----------------------------- forge/src/multi_runner.rs | 2 +- forge/src/result.rs | 147 ++++++++++++++++++++++++++++++++++ forge/src/runner.rs | 144 ++------------------------------- forge/src/types.rs | 132 ++++++++++++++++++++++++++++++ 7 files changed, 299 insertions(+), 279 deletions(-) create mode 100644 forge/src/result.rs create mode 100644 forge/src/types.rs diff --git a/cli/src/cmd/forge/snapshot.rs b/cli/src/cmd/forge/snapshot.rs index 1aae9f2f52c4..1d536727e7fc 100644 --- a/cli/src/cmd/forge/snapshot.rs +++ b/cli/src/cmd/forge/snapshot.rs @@ -9,7 +9,7 @@ use crate::cmd::{ }; use clap::{Parser, ValueHint}; use eyre::Context; -use forge::TestKindGas; +use forge::result::TestKindGas; use once_cell::sync::Lazy; use regex::Regex; use std::{ diff --git a/cli/src/cmd/forge/test.rs b/cli/src/cmd/forge/test.rs index cf3417d68c1b..c9d7b2d2bdb1 100644 --- a/cli/src/cmd/forge/test.rs +++ b/cli/src/cmd/forge/test.rs @@ -1,8 +1,8 @@ //! Test command use crate::{ cmd::{ - forge::{build::CoreBuildArgs, debug::DebugArgs, watch::WatchArgs}, Cmd, + forge::{build::CoreBuildArgs, debug::DebugArgs, watch::WatchArgs}, }, compile::ProjectCompiler, suggestions, utils, @@ -14,14 +14,14 @@ use forge::{ decode::decode_console_logs, executor::opts::EvmOpts, gas_report::GasReport, - trace::{ - identifier::{EtherscanIdentifier, LocalTraceIdentifier}, - CallTraceDecoderBuilder, TraceKind, + MultiContractRunner, + MultiContractRunnerBuilder, TestFilter, trace::{ + CallTraceDecoderBuilder, + identifier::{EtherscanIdentifier, LocalTraceIdentifier}, TraceKind, }, - MultiContractRunner, MultiContractRunnerBuilder, SuiteResult, TestFilter, TestKind, }; use foundry_common::evm::EvmArgs; -use foundry_config::{figment::Figment, Config}; +use foundry_config::{Config, figment::Figment}; use regex::Regex; use std::{ collections::BTreeMap, @@ -33,6 +33,7 @@ use std::{ }; use watchexec::config::{InitConfig, RuntimeConfig}; use yansi::Paint; +use forge::result::{SuiteResult, TestKind}; #[derive(Debug, Clone, Parser)] pub struct Filter { diff --git a/forge/src/lib.rs b/forge/src/lib.rs index 480733199908..5a9f3b6bb364 100644 --- a/forge/src/lib.rs +++ b/forge/src/lib.rs @@ -3,7 +3,7 @@ pub mod gas_report; /// The Forge test runner mod runner; -pub use runner::{ContractRunner, SuiteResult, TestKind, TestKindGas, TestResult}; +pub use runner::ContractRunner; /// Forge test runners for multiple contracts mod multi_runner; @@ -12,137 +12,11 @@ pub use multi_runner::{MultiContractRunner, MultiContractRunnerBuilder}; mod utils; pub use utils::deploy_create2_deployer; -pub trait TestFilter: Send + Sync { - fn matches_test(&self, test_name: impl AsRef) -> bool; - fn matches_contract(&self, contract_name: impl AsRef) -> bool; - fn matches_path(&self, path: impl AsRef) -> bool; -} +mod types; +pub use types::*; + +mod result; +pub use result::*; /// The Forge EVM backend pub use foundry_evm::*; - -#[cfg(test)] -pub mod test_helpers { - use crate::TestFilter; - use ethers::{ - prelude::{artifacts::Settings, Lazy, ProjectCompileOutput, SolcConfig}, - solc::{artifacts::Libraries, utils::RuntimeOrHandle, Project, ProjectPathsConfig}, - types::{Address, U256}, - }; - use foundry_evm::{ - executor::{ - backend::Backend, - opts::{Env, EvmOpts}, - DatabaseRef, Executor, ExecutorBuilder, - }, - fuzz::FuzzedExecutor, - CALLER, - }; - use std::str::FromStr; - - pub static PROJECT: Lazy = Lazy::new(|| { - let paths = ProjectPathsConfig::builder() - .root("../testdata") - .sources("../testdata") - .build() - .unwrap(); - Project::builder().paths(paths).ephemeral().no_artifacts().build().unwrap() - }); - - pub static LIBS_PROJECT: Lazy = Lazy::new(|| { - let paths = ProjectPathsConfig::builder() - .root("../testdata") - .sources("../testdata") - .build() - .unwrap(); - let libs = - ["fork/Fork.t.sol:DssExecLib:0xfD88CeE74f7D78697775aBDAE53f9Da1559728E4".to_string()]; - - let settings = - Settings { libraries: Libraries::parse(&libs).unwrap(), ..Default::default() }; - - let solc_config = SolcConfig::builder().settings(settings).build(); - Project::builder() - .paths(paths) - .ephemeral() - .no_artifacts() - .solc_config(solc_config) - .build() - .unwrap() - }); - - pub static COMPILED: Lazy = Lazy::new(|| (*PROJECT).compile().unwrap()); - - pub static COMPILED_WITH_LIBS: Lazy = - Lazy::new(|| (*LIBS_PROJECT).compile().unwrap()); - - pub static EVM_OPTS: Lazy = Lazy::new(|| EvmOpts { - env: Env { - gas_limit: 18446744073709551615, - chain_id: Some(foundry_common::DEV_CHAIN_ID), - tx_origin: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), - block_number: 1, - block_timestamp: 1, - ..Default::default() - }, - sender: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), - initial_balance: U256::MAX, - ffi: true, - memory_limit: 2u64.pow(24), - ..Default::default() - }); - - pub fn test_executor() -> Executor { - let env = RuntimeOrHandle::new().block_on((*EVM_OPTS).evm_env()); - ExecutorBuilder::new().with_cheatcodes(false).with_config(env).build(Backend::simple()) - } - - pub fn fuzz_executor(executor: &Executor) -> FuzzedExecutor { - let cfg = proptest::test_runner::Config { failure_persistence: None, ..Default::default() }; - - FuzzedExecutor::new(executor, proptest::test_runner::TestRunner::new(cfg), *CALLER) - } - - pub mod filter { - use super::*; - use regex::Regex; - - pub struct Filter { - test_regex: Regex, - contract_regex: Regex, - path_regex: Regex, - } - - impl Filter { - pub fn new(test_pattern: &str, contract_pattern: &str, path_pattern: &str) -> Self { - Filter { - test_regex: Regex::new(test_pattern).unwrap(), - contract_regex: Regex::new(contract_pattern).unwrap(), - path_regex: Regex::new(path_pattern).unwrap(), - } - } - - pub fn matches_all() -> Self { - Filter { - test_regex: Regex::new(".*").unwrap(), - contract_regex: Regex::new(".*").unwrap(), - path_regex: Regex::new(".*").unwrap(), - } - } - } - - impl TestFilter for Filter { - fn matches_test(&self, test_name: impl AsRef) -> bool { - self.test_regex.is_match(test_name.as_ref()) - } - - fn matches_contract(&self, contract_name: impl AsRef) -> bool { - self.contract_regex.is_match(contract_name.as_ref()) - } - - fn matches_path(&self, path: impl AsRef) -> bool { - self.path_regex.is_match(path.as_ref()) - } - } - } -} diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 3c1a3bf33893..10dd6f303c35 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -1,4 +1,4 @@ -use crate::{ContractRunner, SuiteResult, TestFilter}; +use crate::{result::SuiteResult, ContractRunner, TestFilter}; use ethers::{ abi::Abi, prelude::{artifacts::CompactContractBytecode, ArtifactId, ArtifactOutput}, diff --git a/forge/src/result.rs b/forge/src/result.rs new file mode 100644 index 000000000000..f6c144c60dda --- /dev/null +++ b/forge/src/result.rs @@ -0,0 +1,147 @@ +//! test outcomes + +use crate::Address; +use ethers::prelude::Log; +use foundry_evm::{ + fuzz::{CounterExample, FuzzedCases}, + trace::{CallTraceArena, TraceKind}, +}; +use std::{collections::BTreeMap, fmt, time::Duration}; + +/// Results and duration for a set of tests included in the same test contract +#[derive(Clone, Serialize)] +pub struct SuiteResult { + /// Total duration of the test run for this block of tests + pub duration: Duration, + /// Individual test results. `test method name -> TestResult` + pub test_results: BTreeMap, + // Warnings + pub warnings: Vec, +} + +impl SuiteResult { + pub fn new( + duration: Duration, + test_results: BTreeMap, + warnings: Vec, + ) -> Self { + Self { duration, test_results, warnings } + } + + pub fn is_empty(&self) -> bool { + self.test_results.is_empty() + } + + pub fn len(&self) -> usize { + self.test_results.len() + } +} + +/// The result of an executed solidity test +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct TestResult { + /// Whether the test case was successful. This means that the transaction executed + /// properly, or that there was a revert and that the test was expected to fail + /// (prefixed with `testFail`) + pub success: bool, + + /// If there was a revert, this field will be populated. Note that the test can + /// still be successful (i.e self.success == true) when it's expected to fail. + pub reason: Option, + + /// Minimal reproduction test case for failing fuzz tests + pub counterexample: Option, + + /// Any captured & parsed as strings logs along the test's execution which should + /// be printed to the user. + #[serde(skip)] + pub logs: Vec, + + /// What kind of test this was + pub kind: TestKind, + + /// Traces + pub traces: Vec<(TraceKind, CallTraceArena)>, + + /// Labeled addresses + pub labeled_addresses: BTreeMap, +} + +impl TestResult { + /// Returns `true` if this is the result of a fuzz test + pub fn is_fuzz(&self) -> bool { + matches!(self.kind, TestKind::Fuzz(_)) + } +} + +/// Used gas by a test +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum TestKindGas { + Standard(u64), + Fuzz { runs: usize, mean: u64, median: u64 }, +} + +impl fmt::Display for TestKindGas { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TestKindGas::Standard(gas) => { + write!(f, "(gas: {})", gas) + } + TestKindGas::Fuzz { runs, mean, median } => { + write!(f, "(runs: {}, μ: {}, ~: {})", runs, mean, median) + } + } + } +} + +impl TestKindGas { + /// Returns the main gas value to compare against + pub fn gas(&self) -> u64 { + match self { + TestKindGas::Standard(gas) => *gas, + // We use the median for comparisons + TestKindGas::Fuzz { median, .. } => *median, + } + } +} + +/// Various types of tests +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum TestKind { + /// A standard test that consists of calling the defined solidity function + /// + /// Holds the consumed gas + Standard(u64), + /// A solidity fuzz test, that stores all test cases + Fuzz(FuzzedCases), +} + +impl TestKind { + /// The gas consumed by this test + pub fn gas_used(&self) -> TestKindGas { + match self { + TestKind::Standard(gas) => TestKindGas::Standard(*gas), + TestKind::Fuzz(fuzzed) => TestKindGas::Fuzz { + runs: fuzzed.cases().len(), + median: fuzzed.median_gas(false), + mean: fuzzed.mean_gas(false), + }, + } + } +} + +#[derive(Clone, Debug, Default)] +pub struct TestSetup { + /// The address at which the test contract was deployed + pub address: Address, + /// The logs emitted during setup + pub logs: Vec, + /// Call traces of the setup + pub traces: Vec<(TraceKind, CallTraceArena)>, + /// Addresses labeled during setup + pub labeled_addresses: BTreeMap, + /// Whether the setup failed + pub setup_failed: bool, + /// The reason the setup failed + pub reason: Option, +} diff --git a/forge/src/runner.rs b/forge/src/runner.rs index f7319463272a..ee4f9529e9b5 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -1,4 +1,8 @@ -use crate::{deploy_create2_deployer, TestFilter}; +use crate::{ + deploy_create2_deployer, + result::{SuiteResult, TestKind, TestResult, TestSetup}, + TestFilter, +}; use ethers::{ abi::{Abi, Function}, types::{Address, Bytes, Log, U256}, @@ -19,144 +23,6 @@ use std::{ time::{Duration, Instant}, }; -/// Results and duration for a set of tests included in the same test contract -#[derive(Clone, Serialize)] -pub struct SuiteResult { - /// Total duration of the test run for this block of tests - pub duration: Duration, - /// Individual test results. `test method name -> TestResult` - pub test_results: BTreeMap, - // Warnings - pub warnings: Vec, -} - -impl SuiteResult { - pub fn new( - duration: Duration, - test_results: BTreeMap, - warnings: Vec, - ) -> Self { - Self { duration, test_results, warnings } - } - - pub fn is_empty(&self) -> bool { - self.test_results.is_empty() - } - - pub fn len(&self) -> usize { - self.test_results.len() - } -} - -/// The result of an executed solidity test -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct TestResult { - /// Whether the test case was successful. This means that the transaction executed - /// properly, or that there was a revert and that the test was expected to fail - /// (prefixed with `testFail`) - pub success: bool, - - /// If there was a revert, this field will be populated. Note that the test can - /// still be successful (i.e self.success == true) when it's expected to fail. - pub reason: Option, - - /// Minimal reproduction test case for failing fuzz tests - pub counterexample: Option, - - /// Any captured & parsed as strings logs along the test's execution which should - /// be printed to the user. - #[serde(skip)] - pub logs: Vec, - - /// What kind of test this was - pub kind: TestKind, - - /// Traces - pub traces: Vec<(TraceKind, CallTraceArena)>, - - /// Labeled addresses - pub labeled_addresses: BTreeMap, -} - -impl TestResult { - /// Returns `true` if this is the result of a fuzz test - pub fn is_fuzz(&self) -> bool { - matches!(self.kind, TestKind::Fuzz(_)) - } -} - -/// Used gas by a test -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum TestKindGas { - Standard(u64), - Fuzz { runs: usize, mean: u64, median: u64 }, -} - -impl fmt::Display for TestKindGas { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - TestKindGas::Standard(gas) => { - write!(f, "(gas: {})", gas) - } - TestKindGas::Fuzz { runs, mean, median } => { - write!(f, "(runs: {}, μ: {}, ~: {})", runs, mean, median) - } - } - } -} - -impl TestKindGas { - /// Returns the main gas value to compare against - pub fn gas(&self) -> u64 { - match self { - TestKindGas::Standard(gas) => *gas, - // We use the median for comparisons - TestKindGas::Fuzz { median, .. } => *median, - } - } -} - -/// Various types of tests -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum TestKind { - /// A standard test that consists of calling the defined solidity function - /// - /// Holds the consumed gas - Standard(u64), - /// A solidity fuzz test, that stores all test cases - Fuzz(FuzzedCases), -} - -impl TestKind { - /// The gas consumed by this test - pub fn gas_used(&self) -> TestKindGas { - match self { - TestKind::Standard(gas) => TestKindGas::Standard(*gas), - TestKind::Fuzz(fuzzed) => TestKindGas::Fuzz { - runs: fuzzed.cases().len(), - median: fuzzed.median_gas(false), - mean: fuzzed.mean_gas(false), - }, - } - } -} - -#[derive(Clone, Debug, Default)] -pub struct TestSetup { - /// The address at which the test contract was deployed - pub address: Address, - /// The logs emitted during setup - pub logs: Vec, - /// Call traces of the setup - pub traces: Vec<(TraceKind, CallTraceArena)>, - /// Addresses labeled during setup - pub labeled_addresses: BTreeMap, - /// Whether the setup failed - pub setup_failed: bool, - /// The reason the setup failed - pub reason: Option, -} - pub struct ContractRunner<'a, DB: DatabaseRef> { /// The executor used by the runner. pub executor: Executor, diff --git a/forge/src/types.rs b/forge/src/types.rs new file mode 100644 index 000000000000..ae1c4810db33 --- /dev/null +++ b/forge/src/types.rs @@ -0,0 +1,132 @@ +/// Extension trait for matching tests +pub trait TestFilter: Send + Sync { + fn matches_test(&self, test_name: impl AsRef) -> bool; + fn matches_contract(&self, contract_name: impl AsRef) -> bool; + fn matches_path(&self, path: impl AsRef) -> bool; +} + +#[cfg(test)] +pub mod test_helpers { + use super::*; + use ethers::{ + prelude::{artifacts::Settings, Lazy, ProjectCompileOutput, SolcConfig}, + solc::{artifacts::Libraries, utils::RuntimeOrHandle, Project, ProjectPathsConfig}, + types::{Address, U256}, + }; + use foundry_evm::{ + executor::{ + backend::Backend, + opts::{Env, EvmOpts}, + DatabaseRef, Executor, ExecutorBuilder, + }, + fuzz::FuzzedExecutor, + CALLER, + }; + use std::str::FromStr; + + pub static PROJECT: Lazy = Lazy::new(|| { + let paths = ProjectPathsConfig::builder() + .root("../testdata") + .sources("../testdata") + .build() + .unwrap(); + Project::builder().paths(paths).ephemeral().no_artifacts().build().unwrap() + }); + + pub static LIBS_PROJECT: Lazy = Lazy::new(|| { + let paths = ProjectPathsConfig::builder() + .root("../testdata") + .sources("../testdata") + .build() + .unwrap(); + let libs = + ["fork/Fork.t.sol:DssExecLib:0xfD88CeE74f7D78697775aBDAE53f9Da1559728E4".to_string()]; + + let settings = + Settings { libraries: Libraries::parse(&libs).unwrap(), ..Default::default() }; + + let solc_config = SolcConfig::builder().settings(settings).build(); + Project::builder() + .paths(paths) + .ephemeral() + .no_artifacts() + .solc_config(solc_config) + .build() + .unwrap() + }); + + pub static COMPILED: Lazy = Lazy::new(|| (*PROJECT).compile().unwrap()); + + pub static COMPILED_WITH_LIBS: Lazy = + Lazy::new(|| (*LIBS_PROJECT).compile().unwrap()); + + pub static EVM_OPTS: Lazy = Lazy::new(|| EvmOpts { + env: Env { + gas_limit: 18446744073709551615, + chain_id: Some(foundry_common::DEV_CHAIN_ID), + tx_origin: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), + block_number: 1, + block_timestamp: 1, + ..Default::default() + }, + sender: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), + initial_balance: U256::MAX, + ffi: true, + memory_limit: 2u64.pow(24), + ..Default::default() + }); + + pub fn test_executor() -> Executor { + let env = RuntimeOrHandle::new().block_on((*EVM_OPTS).evm_env()); + ExecutorBuilder::new().with_cheatcodes(false).with_config(env).build(Backend::simple()) + } + + pub fn fuzz_executor(executor: &Executor) -> FuzzedExecutor { + let cfg = proptest::test_runner::Config { failure_persistence: None, ..Default::default() }; + + FuzzedExecutor::new(executor, proptest::test_runner::TestRunner::new(cfg), *CALLER) + } + + pub mod filter { + use super::*; + use regex::Regex; + + pub struct Filter { + test_regex: Regex, + contract_regex: Regex, + path_regex: Regex, + } + + impl Filter { + pub fn new(test_pattern: &str, contract_pattern: &str, path_pattern: &str) -> Self { + Filter { + test_regex: Regex::new(test_pattern).unwrap(), + contract_regex: Regex::new(contract_pattern).unwrap(), + path_regex: Regex::new(path_pattern).unwrap(), + } + } + + pub fn matches_all() -> Self { + Filter { + test_regex: Regex::new(".*").unwrap(), + contract_regex: Regex::new(".*").unwrap(), + path_regex: Regex::new(".*").unwrap(), + } + } + } + + impl TestFilter for Filter { + fn matches_test(&self, test_name: impl AsRef) -> bool { + self.test_regex.is_match(test_name.as_ref()) + } + + fn matches_contract(&self, contract_name: impl AsRef) -> bool { + self.contract_regex.is_match(contract_name.as_ref()) + } + + fn matches_path(&self, path: impl AsRef) -> bool { + self.path_regex.is_match(path.as_ref()) + } + } + } +} From c0d4c3467da92bd77879663adf601a92ba088053 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 5 Jun 2022 16:58:28 +0200 Subject: [PATCH 021/102] refactor: restructure runner --- forge/src/multi_runner.rs | 266 +++++++++++++++++++------------------- 1 file changed, 133 insertions(+), 133 deletions(-) diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 10dd6f303c35..285d4c76c3cc 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -14,141 +14,8 @@ use proptest::test_runner::TestRunner; use rayon::prelude::*; use std::{collections::BTreeMap, marker::Sync, path::Path, sync::mpsc::Sender}; -/// Builder used for instantiating the multi-contract runner -#[derive(Debug, Default)] -pub struct MultiContractRunnerBuilder { - /// The fuzzer to be used for running fuzz tests - pub fuzzer: Option, - /// The address which will be used to deploy the initial contracts and send all - /// transactions - pub sender: Option
, - /// The initial balance for each one of the deployed smart contracts - pub initial_balance: U256, - /// The EVM spec to use - pub evm_spec: Option, - /// The fork config - pub fork: Option, -} - pub type DeployableContracts = BTreeMap)>; -impl MultiContractRunnerBuilder { - /// Given an EVM, proceeds to return a runner which is able to execute all tests - /// against that evm - pub fn build( - self, - root: impl AsRef, - output: ProjectCompileOutput, - evm_opts: EvmOpts, - ) -> Result - where - A: ArtifactOutput, - { - // This is just the contracts compiled, but we need to merge this with the read cached - // artifacts - let contracts = output - .with_stripped_file_prefixes(root) - .into_artifacts() - .map(|(i, c)| (i, c.into_contract_bytecode())) - .collect::>(); - - let mut known_contracts: BTreeMap)> = Default::default(); - let source_paths = contracts - .iter() - .map(|(i, _)| (i.identifier(), i.source.to_string_lossy().into())) - .collect::>(); - - // create a mapping of name => (abi, deployment code, Vec) - let mut deployable_contracts = DeployableContracts::default(); - - foundry_utils::link_with_nonce_or_address( - BTreeMap::from_iter(contracts), - &mut known_contracts, - Default::default(), - evm_opts.sender, - U256::one(), - &mut deployable_contracts, - |file, key| (format!("{key}.json:{key}"), file, key), - |post_link_input| { - let PostLinkInput { - contract, - known_contracts, - id, - extra: deployable_contracts, - dependencies, - } = post_link_input; - - // get bytes - let bytecode = - if let Some(b) = contract.bytecode.expect("No bytecode").object.into_bytes() { - b - } else { - return Ok(()) - }; - - let abi = contract.abi.expect("We should have an abi by now"); - // if its a test, add it to deployable contracts - if abi.constructor.as_ref().map(|c| c.inputs.is_empty()).unwrap_or(true) && - abi.functions().any(|func| func.name.starts_with("test")) - { - deployable_contracts - .insert(id.clone(), (abi.clone(), bytecode, dependencies.to_vec())); - } - - contract - .deployed_bytecode - .and_then(|d_bcode| d_bcode.bytecode) - .and_then(|bcode| bcode.object.into_bytes()) - .and_then(|bytes| known_contracts.insert(id.clone(), (abi, bytes.to_vec()))); - Ok(()) - }, - )?; - - let execution_info = foundry_utils::flatten_known_contracts(&known_contracts); - Ok(MultiContractRunner { - contracts: deployable_contracts, - known_contracts, - evm_opts, - evm_spec: self.evm_spec.unwrap_or(SpecId::LONDON), - sender: self.sender, - fuzzer: self.fuzzer, - errors: Some(execution_info.2), - source_paths, - fork: self.fork, - }) - } - - #[must_use] - pub fn sender(mut self, sender: Address) -> Self { - self.sender = Some(sender); - self - } - - #[must_use] - pub fn initial_balance(mut self, initial_balance: U256) -> Self { - self.initial_balance = initial_balance; - self - } - - #[must_use] - pub fn fuzzer(mut self, fuzzer: TestRunner) -> Self { - self.fuzzer = Some(fuzzer); - self - } - - #[must_use] - pub fn evm_spec(mut self, spec: SpecId) -> Self { - self.evm_spec = Some(spec); - self - } - - #[must_use] - pub fn with_fork(mut self, fork: Option) -> Self { - self.fork = fork; - self - } -} - /// A multi contract runner receives a set of contracts deployed in an EVM instance and proceeds /// to run all test functions in these contracts. pub struct MultiContractRunner { @@ -318,6 +185,139 @@ impl MultiContractRunner { } } +/// Builder used for instantiating the multi-contract runner +#[derive(Debug, Default)] +pub struct MultiContractRunnerBuilder { + /// The fuzzer to be used for running fuzz tests + pub fuzzer: Option, + /// The address which will be used to deploy the initial contracts and send all + /// transactions + pub sender: Option
, + /// The initial balance for each one of the deployed smart contracts + pub initial_balance: U256, + /// The EVM spec to use + pub evm_spec: Option, + /// The fork config + pub fork: Option, +} + +impl MultiContractRunnerBuilder { + /// Given an EVM, proceeds to return a runner which is able to execute all tests + /// against that evm + pub fn build( + self, + root: impl AsRef, + output: ProjectCompileOutput, + evm_opts: EvmOpts, + ) -> Result + where + A: ArtifactOutput, + { + // This is just the contracts compiled, but we need to merge this with the read cached + // artifacts + let contracts = output + .with_stripped_file_prefixes(root) + .into_artifacts() + .map(|(i, c)| (i, c.into_contract_bytecode())) + .collect::>(); + + let mut known_contracts: BTreeMap)> = Default::default(); + let source_paths = contracts + .iter() + .map(|(i, _)| (i.identifier(), i.source.to_string_lossy().into())) + .collect::>(); + + // create a mapping of name => (abi, deployment code, Vec) + let mut deployable_contracts = DeployableContracts::default(); + + foundry_utils::link_with_nonce_or_address( + BTreeMap::from_iter(contracts), + &mut known_contracts, + Default::default(), + evm_opts.sender, + U256::one(), + &mut deployable_contracts, + |file, key| (format!("{key}.json:{key}"), file, key), + |post_link_input| { + let PostLinkInput { + contract, + known_contracts, + id, + extra: deployable_contracts, + dependencies, + } = post_link_input; + + // get bytes + let bytecode = + if let Some(b) = contract.bytecode.expect("No bytecode").object.into_bytes() { + b + } else { + return Ok(()) + }; + + let abi = contract.abi.expect("We should have an abi by now"); + // if its a test, add it to deployable contracts + if abi.constructor.as_ref().map(|c| c.inputs.is_empty()).unwrap_or(true) && + abi.functions().any(|func| func.name.starts_with("test")) + { + deployable_contracts + .insert(id.clone(), (abi.clone(), bytecode, dependencies.to_vec())); + } + + contract + .deployed_bytecode + .and_then(|d_bcode| d_bcode.bytecode) + .and_then(|bcode| bcode.object.into_bytes()) + .and_then(|bytes| known_contracts.insert(id.clone(), (abi, bytes.to_vec()))); + Ok(()) + }, + )?; + + let execution_info = foundry_utils::flatten_known_contracts(&known_contracts); + Ok(MultiContractRunner { + contracts: deployable_contracts, + known_contracts, + evm_opts, + evm_spec: self.evm_spec.unwrap_or(SpecId::LONDON), + sender: self.sender, + fuzzer: self.fuzzer, + errors: Some(execution_info.2), + source_paths, + fork: self.fork, + }) + } + + #[must_use] + pub fn sender(mut self, sender: Address) -> Self { + self.sender = Some(sender); + self + } + + #[must_use] + pub fn initial_balance(mut self, initial_balance: U256) -> Self { + self.initial_balance = initial_balance; + self + } + + #[must_use] + pub fn fuzzer(mut self, fuzzer: TestRunner) -> Self { + self.fuzzer = Some(fuzzer); + self + } + + #[must_use] + pub fn evm_spec(mut self, spec: SpecId) -> Self { + self.evm_spec = Some(spec); + self + } + + #[must_use] + pub fn with_fork(mut self, fork: Option) -> Self { + self.fork = fork; + self + } +} + #[cfg(test)] mod tests { use super::*; From 37e5210a3683eeba3661fff73460c4717fb75d1c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 5 Jun 2022 17:08:47 +0200 Subject: [PATCH 022/102] chore(clippy): make clippy happy --- forge/src/multi_runner.rs | 2 +- forge/src/result.rs | 1 + forge/src/runner.rs | 15 +++++---------- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 285d4c76c3cc..12542ccd3121 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -1037,7 +1037,7 @@ Reason: `setEnv` failed to set an environment variable `{}={}`", ); let suite_result = runner.test(&Filter::new(".*", ".*", ".*cheats"), None, true).unwrap(); - assert!(suite_result.len() > 0); + assert!(!suite_result.is_empty()); for (_, SuiteResult { test_results, .. }) in suite_result { for (test_name, result) in test_results { let logs = decode_console_logs(&result.logs); diff --git a/forge/src/result.rs b/forge/src/result.rs index f6c144c60dda..f4fd9c6d7e73 100644 --- a/forge/src/result.rs +++ b/forge/src/result.rs @@ -7,6 +7,7 @@ use foundry_evm::{ trace::{CallTraceArena, TraceKind}, }; use std::{collections::BTreeMap, fmt, time::Duration}; +use serde::{Serialize, Deserialize}; /// Results and duration for a set of tests included in the same test contract #[derive(Clone, Serialize)] diff --git a/forge/src/runner.rs b/forge/src/runner.rs index ee4f9529e9b5..37758273e601 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -5,22 +5,21 @@ use crate::{ }; use ethers::{ abi::{Abi, Function}, - types::{Address, Bytes, Log, U256}, + types::{Address, Bytes, U256}, }; use eyre::Result; use foundry_evm::{ executor::{CallResult, DatabaseRef, DeployResult, EvmError, Executor}, - fuzz::{CounterExample, FuzzedCases, FuzzedExecutor}, + fuzz::{FuzzedExecutor}, trace::{CallTraceArena, TraceKind}, CALLER, }; use proptest::test_runner::TestRunner; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; -use serde::{Deserialize, Serialize}; + use std::{ collections::BTreeMap, - fmt, - time::{Duration, Instant}, + time::{Instant}, }; pub struct ContractRunner<'a, DB: DatabaseRef> { @@ -88,11 +87,7 @@ impl<'a, DB: DatabaseRef + Send + Sync> ContractRunner<'a, DB> { Err(EvmError::Execution { reason, traces, logs, labels, .. }) => { // If we failed to call the constructor, force the tracekind to be setup so // a trace is shown. - let traces = if let Some(traces) = traces { - vec![(TraceKind::Setup, traces)] - } else { - vec![] - }; + let traces = traces.map(|traces|vec![(TraceKind::Setup, traces)]).unwrap_or_default(); return Ok(TestSetup { address: Address::zero(), From 6eb09b5c3efd75ed870e10c1ad6964ed7aacfbc9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 5 Jun 2022 18:15:27 +0200 Subject: [PATCH 023/102] refactor: extract types --- anvil/src/eth/backend/mem/in_memory_db.rs | 2 +- cli/src/cmd/forge/test.rs | 24 ++++++++--------- evm/src/executor/mod.rs | 1 + forge/src/lib.rs | 7 +++-- forge/src/result.rs | 2 +- forge/src/runner.rs | 32 +++++++++++------------ forge/src/{types.rs => traits.rs} | 15 +++++++++++ 7 files changed, 48 insertions(+), 35 deletions(-) rename forge/src/{types.rs => traits.rs} (91%) diff --git a/anvil/src/eth/backend/mem/in_memory_db.rs b/anvil/src/eth/backend/mem/in_memory_db.rs index e3028f55a41c..da8e733c59cb 100644 --- a/anvil/src/eth/backend/mem/in_memory_db.rs +++ b/anvil/src/eth/backend/mem/in_memory_db.rs @@ -3,7 +3,7 @@ use crate::{ eth::backend::db::{Db, StateDb}, mem::state::state_merkle_trie_root, - revm::{db::DatabaseRef, AccountInfo, Database}, + revm::AccountInfo, Address, U256, }; use ethers::prelude::H256; diff --git a/cli/src/cmd/forge/test.rs b/cli/src/cmd/forge/test.rs index c9d7b2d2bdb1..8a90be6454b7 100644 --- a/cli/src/cmd/forge/test.rs +++ b/cli/src/cmd/forge/test.rs @@ -1,8 +1,8 @@ //! Test command use crate::{ cmd::{ - Cmd, forge::{build::CoreBuildArgs, debug::DebugArgs, watch::WatchArgs}, + Cmd, }, compile::ProjectCompiler, suggestions, utils, @@ -14,14 +14,15 @@ use forge::{ decode::decode_console_logs, executor::opts::EvmOpts, gas_report::GasReport, - MultiContractRunner, - MultiContractRunnerBuilder, TestFilter, trace::{ - CallTraceDecoderBuilder, - identifier::{EtherscanIdentifier, LocalTraceIdentifier}, TraceKind, + result::{SuiteResult, TestKind, TestResult}, + trace::{ + identifier::{EtherscanIdentifier, LocalTraceIdentifier}, + CallTraceDecoderBuilder, TraceKind, }, + MultiContractRunner, MultiContractRunnerBuilder, TestFilter, }; use foundry_common::evm::EvmArgs; -use foundry_config::{Config, figment::Figment}; +use foundry_config::{figment::Figment, Config}; use regex::Regex; use std::{ collections::BTreeMap, @@ -33,7 +34,6 @@ use std::{ }; use watchexec::config::{InitConfig, RuntimeConfig}; use yansi::Paint; -use forge::result::{SuiteResult, TestKind}; #[derive(Debug, Clone, Parser)] pub struct Filter { @@ -331,7 +331,7 @@ pub struct Test { /// The signature of the solidity test pub signature: String, /// Result of the executed solidity test - pub result: forge::TestResult, + pub result: TestResult, } impl Test { @@ -364,17 +364,17 @@ impl TestOutcome { } /// Iterator over all succeeding tests and their names - pub fn successes(&self) -> impl Iterator { + pub fn successes(&self) -> impl Iterator { self.tests().filter(|(_, t)| t.success) } /// Iterator over all failing tests and their names - pub fn failures(&self) -> impl Iterator { + pub fn failures(&self) -> impl Iterator { self.tests().filter(|(_, t)| !t.success) } /// Iterator over all tests and their names - pub fn tests(&self) -> impl Iterator { + pub fn tests(&self) -> impl Iterator { self.results.values().flat_map(|SuiteResult { test_results, .. }| test_results.iter()) } @@ -431,7 +431,7 @@ impl TestOutcome { } } -fn short_test_result(name: &str, result: &forge::TestResult) { +fn short_test_result(name: &str, result: &TestResult) { let status = if result.success { Paint::green("[PASS]".to_string()) } else { diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index ce6b3b13ca98..0e4280193f86 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -157,6 +157,7 @@ impl Default for RawCallResult { } } +#[derive(Debug)] pub struct Executor { // Note: We do not store an EVM here, since we are really // only interested in the database. REVM's `EVM` is a thin diff --git a/forge/src/lib.rs b/forge/src/lib.rs index 5a9f3b6bb364..7228920fe895 100644 --- a/forge/src/lib.rs +++ b/forge/src/lib.rs @@ -12,11 +12,10 @@ pub use multi_runner::{MultiContractRunner, MultiContractRunnerBuilder}; mod utils; pub use utils::deploy_create2_deployer; -mod types; -pub use types::*; +mod traits; +pub use traits::*; -mod result; -pub use result::*; +pub mod result; /// The Forge EVM backend pub use foundry_evm::*; diff --git a/forge/src/result.rs b/forge/src/result.rs index f4fd9c6d7e73..6a41423c15fc 100644 --- a/forge/src/result.rs +++ b/forge/src/result.rs @@ -6,8 +6,8 @@ use foundry_evm::{ fuzz::{CounterExample, FuzzedCases}, trace::{CallTraceArena, TraceKind}, }; +use serde::{Deserialize, Serialize}; use std::{collections::BTreeMap, fmt, time::Duration}; -use serde::{Serialize, Deserialize}; /// Results and duration for a set of tests included in the same test contract #[derive(Clone, Serialize)] diff --git a/forge/src/runner.rs b/forge/src/runner.rs index 37758273e601..6e49bdc93536 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -10,18 +10,16 @@ use ethers::{ use eyre::Result; use foundry_evm::{ executor::{CallResult, DatabaseRef, DeployResult, EvmError, Executor}, - fuzz::{FuzzedExecutor}, - trace::{CallTraceArena, TraceKind}, + fuzz::FuzzedExecutor, + trace::TraceKind, CALLER, }; use proptest::test_runner::TestRunner; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; +use std::{collections::BTreeMap, time::Instant}; -use std::{ - collections::BTreeMap, - time::{Instant}, -}; - +/// A type that executes all tests of a contract +#[derive(Debug)] pub struct ContractRunner<'a, DB: DatabaseRef> { /// The executor used by the runner. pub executor: Executor, @@ -76,7 +74,7 @@ impl<'a, DB: DatabaseRef + Send + Sync> ContractRunner<'a, DB> { self.executor.set_nonce(self.sender, 1); // Deploy libraries - let mut traces: Vec<(TraceKind, CallTraceArena)> = vec![]; + let mut traces = Vec::with_capacity(self.predeploy_libs.len()); for code in self.predeploy_libs.iter() { match self.executor.deploy(self.sender, code.0.clone(), 0u32.into(), self.errors) { Ok(DeployResult { traces: tmp_traces, .. }) => { @@ -87,7 +85,8 @@ impl<'a, DB: DatabaseRef + Send + Sync> ContractRunner<'a, DB> { Err(EvmError::Execution { reason, traces, logs, labels, .. }) => { // If we failed to call the constructor, force the tracekind to be setup so // a trace is shown. - let traces = traces.map(|traces|vec![(TraceKind::Setup, traces)]).unwrap_or_default(); + let traces = + traces.map(|traces| vec![(TraceKind::Setup, traces)]).unwrap_or_default(); return Ok(TestSetup { address: Address::zero(), @@ -109,11 +108,8 @@ impl<'a, DB: DatabaseRef + Send + Sync> ContractRunner<'a, DB> { { Ok(d) => d, Err(EvmError::Execution { reason, traces, logs, labels, .. }) => { - let traces = if let Some(traces) = traces { - vec![(TraceKind::Setup, traces)] - } else { - vec![] - }; + let traces = + traces.map(|traces| vec![(TraceKind::Setup, traces)]).unwrap_or_default(); return Ok(TestSetup { address: Address::zero(), @@ -137,7 +133,7 @@ impl<'a, DB: DatabaseRef + Send + Sync> ContractRunner<'a, DB> { deploy_create2_deployer(&mut self.executor)?; // Optionally call the `setUp` function - Ok(if setup { + let setup = if setup { tracing::trace!("setting up"); let (setup_failed, setup_logs, setup_traces, labeled_addresses, reason) = match self .executor @@ -156,12 +152,14 @@ impl<'a, DB: DatabaseRef + Send + Sync> ContractRunner<'a, DB> { ), }; traces.extend(setup_traces.map(|traces| (TraceKind::Setup, traces)).into_iter()); - logs.extend_from_slice(&setup_logs); + logs.extend(setup_logs); TestSetup { address, logs, traces, labeled_addresses, setup_failed, reason } } else { TestSetup { address, logs, traces, ..Default::default() } - }) + }; + + Ok(setup) } /// Runs all tests for a contract whose names match the provided regular expression diff --git a/forge/src/types.rs b/forge/src/traits.rs similarity index 91% rename from forge/src/types.rs rename to forge/src/traits.rs index ae1c4810db33..431d4850dcb0 100644 --- a/forge/src/types.rs +++ b/forge/src/traits.rs @@ -1,3 +1,5 @@ +use ethers::abi::Function; + /// Extension trait for matching tests pub trait TestFilter: Send + Sync { fn matches_test(&self, test_name: impl AsRef) -> bool; @@ -5,6 +7,19 @@ pub trait TestFilter: Send + Sync { fn matches_path(&self, path: impl AsRef) -> bool; } +/// Extension trait for `Function` +pub(crate) trait TestFunctionExt { + /// Whether this function should be executed as fuzz test + fn is_fuzz_test(&self) -> bool; +} + +impl TestFunctionExt for Function { + fn is_fuzz_test(&self) -> bool { + // test functions that have inputs are considered fuzz tests as those inputs will be fuzzed + !self.inputs.is_empty() + } +} + #[cfg(test)] pub mod test_helpers { use super::*; From da478bbfe39061da10d3d0b9caef16c643c5ed9f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 5 Jun 2022 18:21:35 +0200 Subject: [PATCH 024/102] refactor: simplify create2 deployer fn --- cli/src/cmd/forge/script/runner.rs | 3 +-- evm/src/executor/mod.rs | 22 ++++++++++++++++++++++ forge/src/lib.rs | 3 --- forge/src/runner.rs | 3 +-- forge/src/utils.rs | 23 ----------------------- 5 files changed, 24 insertions(+), 30 deletions(-) delete mode 100644 forge/src/utils.rs diff --git a/cli/src/cmd/forge/script/runner.rs b/cli/src/cmd/forge/script/runner.rs index 75d363329fc3..c2624479a366 100644 --- a/cli/src/cmd/forge/script/runner.rs +++ b/cli/src/cmd/forge/script/runner.rs @@ -1,6 +1,5 @@ use ethers::types::{Address, Bytes, NameOrAddress, U256}; use forge::{ - deploy_create2_deployer, executor::{CallResult, DatabaseRef, DeployResult, EvmError, Executor, RawCallResult}, trace::{CallTraceArena, TraceKind}, CALLER, @@ -33,7 +32,7 @@ impl Runner { self.executor.set_balance(self.sender, U256::MAX); if need_create2_deployer { - deploy_create2_deployer(&mut self.executor)?; + self.executor.deploy_create2_deployer()?; } } diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 0e4280193f86..ae4f6af43472 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -37,6 +37,7 @@ pub use backend::Backend; pub mod snapshot; +use crate::executor::inspector::DEFAULT_CREATE2_DEPLOYER; pub use builder::{ExecutorBuilder, Fork}; /// A mapping of addresses to their changed state. @@ -176,6 +177,8 @@ pub struct Executor { gas_limit: U256, } +// === impl Executor === + impl Executor where DB: DatabaseRef, @@ -198,6 +201,25 @@ where Executor { db, env, inspector_config, gas_limit } } + /// Creates the default CREATE2 Contract Deployer for local tests and scripts. + pub fn deploy_create2_deployer(&mut self) -> eyre::Result<()> { + let create2_deployer_account = self.db.basic(DEFAULT_CREATE2_DEPLOYER); + + if create2_deployer_account.code.is_none() || + create2_deployer_account.code.as_ref().unwrap().is_empty() + { + let creator = "0x3fAB184622Dc19b6109349B94811493BF2a45362".parse().unwrap(); + self.set_balance(creator, U256::MAX); + self.deploy( + creator, + hex::decode("604580600e600039806000f350fe7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3").expect("Could not decode create2 deployer init_code").into(), + U256::zero(), + None + )?; + } + Ok(()) + } + /// Set the balance of an account. pub fn set_balance(&mut self, address: Address, amount: U256) -> &mut Self { let mut account = self.db.basic(address); diff --git a/forge/src/lib.rs b/forge/src/lib.rs index 7228920fe895..edaa0232b9fc 100644 --- a/forge/src/lib.rs +++ b/forge/src/lib.rs @@ -9,9 +9,6 @@ pub use runner::ContractRunner; mod multi_runner; pub use multi_runner::{MultiContractRunner, MultiContractRunnerBuilder}; -mod utils; -pub use utils::deploy_create2_deployer; - mod traits; pub use traits::*; diff --git a/forge/src/runner.rs b/forge/src/runner.rs index 6e49bdc93536..42b213fa6d92 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -1,5 +1,4 @@ use crate::{ - deploy_create2_deployer, result::{SuiteResult, TestKind, TestResult, TestSetup}, TestFilter, }; @@ -130,7 +129,7 @@ impl<'a, DB: DatabaseRef + Send + Sync> ContractRunner<'a, DB> { self.executor.set_balance(address, self.initial_balance); self.executor.set_balance(self.sender, self.initial_balance); - deploy_create2_deployer(&mut self.executor)?; + self.executor.deploy_create2_deployer()?; // Optionally call the `setUp` function let setup = if setup { diff --git a/forge/src/utils.rs b/forge/src/utils.rs deleted file mode 100644 index 389fa1188b21..000000000000 --- a/forge/src/utils.rs +++ /dev/null @@ -1,23 +0,0 @@ -use ethers::{abi::Address, types::U256}; -use foundry_evm::executor::{inspector::DEFAULT_CREATE2_DEPLOYER, DatabaseRef, Executor}; -use std::str::FromStr; - -/// Creates the default CREATE2 Contract Deployer for local tests and scripts. -pub fn deploy_create2_deployer(executor: &mut Executor) -> eyre::Result<()> { - let creator = Address::from_str("0x3fAB184622Dc19b6109349B94811493BF2a45362").unwrap(); - - let create2_deployer_account = executor.db.basic(DEFAULT_CREATE2_DEPLOYER); - - if create2_deployer_account.code.is_none() || - create2_deployer_account.code.as_ref().unwrap().is_empty() - { - executor.set_balance(creator, U256::MAX); - executor.deploy( - creator, - hex::decode("604580600e600039806000f350fe7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3").expect("Could not decode create2 deployer init_code").into(), - U256::zero(), - None - )?; - } - Ok(()) -} From 9ae19f5f7e6abb42c96a272dd2ebf909bf5ff0b5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 5 Jun 2022 19:20:44 +0200 Subject: [PATCH 025/102] cleanup --- forge/src/lib.rs | 127 +++++++++++++++++++ forge/src/multi_runner.rs | 11 +- forge/src/traits.rs | 253 +++++++++++++++++++------------------- 3 files changed, 263 insertions(+), 128 deletions(-) diff --git a/forge/src/lib.rs b/forge/src/lib.rs index edaa0232b9fc..38ad6ab20181 100644 --- a/forge/src/lib.rs +++ b/forge/src/lib.rs @@ -16,3 +16,130 @@ pub mod result; /// The Forge EVM backend pub use foundry_evm::*; + + +#[cfg(test)] +pub mod test_helpers { + use crate::TestFilter; + use ethers::{ + prelude::{artifacts::Settings, Lazy, ProjectCompileOutput, SolcConfig}, + solc::{artifacts::Libraries, utils::RuntimeOrHandle, Project, ProjectPathsConfig}, + types::{Address, U256}, + }; + use foundry_evm::{ + executor::{ + Backend, + opts::{Env, EvmOpts}, + DatabaseRef, Executor, ExecutorBuilder, + }, + fuzz::FuzzedExecutor, + CALLER, + }; + use std::str::FromStr; + + pub static PROJECT: Lazy = Lazy::new(|| { + let paths = ProjectPathsConfig::builder() + .root("../testdata") + .sources("../testdata") + .build() + .unwrap(); + Project::builder().paths(paths).ephemeral().no_artifacts().build().unwrap() + }); + + pub static LIBS_PROJECT: Lazy = Lazy::new(|| { + let paths = ProjectPathsConfig::builder() + .root("../testdata") + .sources("../testdata") + .build() + .unwrap(); + let libs = + ["fork/Fork.t.sol:DssExecLib:0xfD88CeE74f7D78697775aBDAE53f9Da1559728E4".to_string()]; + + let settings = + Settings { libraries: Libraries::parse(&libs).unwrap(), ..Default::default() }; + + let solc_config = SolcConfig::builder().settings(settings).build(); + Project::builder() + .paths(paths) + .ephemeral() + .no_artifacts() + .solc_config(solc_config) + .build() + .unwrap() + }); + + pub static COMPILED: Lazy = Lazy::new(|| (*PROJECT).compile().unwrap()); + + pub static COMPILED_WITH_LIBS: Lazy = + Lazy::new(|| (*LIBS_PROJECT).compile().unwrap()); + + pub static EVM_OPTS: Lazy = Lazy::new(|| EvmOpts { + env: Env { + gas_limit: 18446744073709551615, + chain_id: Some(foundry_common::DEV_CHAIN_ID), + tx_origin: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), + block_number: 1, + block_timestamp: 1, + ..Default::default() + }, + sender: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), + initial_balance: U256::MAX, + ffi: true, + memory_limit: 2u64.pow(24), + ..Default::default() + }); + + pub fn test_executor() -> Executor { + let env = RuntimeOrHandle::new().block_on((*EVM_OPTS).evm_env()); + ExecutorBuilder::new().with_cheatcodes(false).with_config(env).build(Backend::simple()) + } + + pub fn fuzz_executor(executor: &Executor) -> FuzzedExecutor { + let cfg = proptest::test_runner::Config { failure_persistence: None, ..Default::default() }; + + FuzzedExecutor::new(executor, proptest::test_runner::TestRunner::new(cfg), *CALLER) + } + + pub mod filter { + use super::*; + use regex::Regex; + + pub struct Filter { + test_regex: Regex, + contract_regex: Regex, + path_regex: Regex, + } + + impl Filter { + pub fn new(test_pattern: &str, contract_pattern: &str, path_pattern: &str) -> Self { + Filter { + test_regex: Regex::new(test_pattern).unwrap(), + contract_regex: Regex::new(contract_pattern).unwrap(), + path_regex: Regex::new(path_pattern).unwrap(), + } + } + + pub fn matches_all() -> Self { + Filter { + test_regex: Regex::new(".*").unwrap(), + contract_regex: Regex::new(".*").unwrap(), + path_regex: Regex::new(".*").unwrap(), + } + } + } + + impl TestFilter for Filter { + fn matches_test(&self, test_name: impl AsRef) -> bool { + self.test_regex.is_match(test_name.as_ref()) + } + + fn matches_contract(&self, contract_name: impl AsRef) -> bool { + self.contract_regex.is_match(contract_name.as_ref()) + } + + fn matches_path(&self, path: impl AsRef) -> bool { + self.path_regex.is_match(path.as_ref()) + } + } + } +} \ No newline at end of file diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 92fb97d87d6b..b5871a53c7a2 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -118,10 +118,14 @@ impl MultiContractRunner { .contracts .par_iter() .filter(|(id, _)| { + dbg!("filter matches"); filter.matches_path(id.source.to_string_lossy()) && filter.matches_contract(&id.name) }) - .filter(|(_, (abi, _, _))| abi.functions().any(|func| filter.matches_test(&func.name))) + .filter(|(_, (abi, _, _))| { + dbg!("filter funs"); + abi.functions().any(|func| filter.matches_test(&func.name)) + }) .map(|(id, (abi, deploy_code, libs))| { let mut builder = ExecutorBuilder::new() .with_cheatcodes(self.evm_opts.ffi) @@ -210,8 +214,8 @@ impl MultiContractRunnerBuilder { output: ProjectCompileOutput, evm_opts: EvmOpts, ) -> Result - where - A: ArtifactOutput, + where + A: ArtifactOutput, { // This is just the contracts compiled, but we need to merge this with the read cached // artifacts @@ -512,6 +516,7 @@ mod tests { #[test] fn test_logs() { let mut runner = runner(); + dbg!(runner.contracts.keys()); let results = runner.test(&Filter::new(".*", ".*", ".*logs"), None, true).unwrap(); assert_multiple( diff --git a/forge/src/traits.rs b/forge/src/traits.rs index 431d4850dcb0..efb31076166b 100644 --- a/forge/src/traits.rs +++ b/forge/src/traits.rs @@ -20,128 +20,131 @@ impl TestFunctionExt for Function { } } -#[cfg(test)] -pub mod test_helpers { - use super::*; - use ethers::{ - prelude::{artifacts::Settings, Lazy, ProjectCompileOutput, SolcConfig}, - solc::{artifacts::Libraries, utils::RuntimeOrHandle, Project, ProjectPathsConfig}, - types::{Address, U256}, - }; - use foundry_evm::{ - executor::{ - backend::Backend, - opts::{Env, EvmOpts}, - DatabaseRef, Executor, ExecutorBuilder, - }, - fuzz::FuzzedExecutor, - CALLER, - }; - use std::str::FromStr; - - pub static PROJECT: Lazy = Lazy::new(|| { - let paths = ProjectPathsConfig::builder() - .root("../testdata") - .sources("../testdata") - .build() - .unwrap(); - Project::builder().paths(paths).ephemeral().no_artifacts().build().unwrap() - }); - - pub static LIBS_PROJECT: Lazy = Lazy::new(|| { - let paths = ProjectPathsConfig::builder() - .root("../testdata") - .sources("../testdata") - .build() - .unwrap(); - let libs = - ["fork/Fork.t.sol:DssExecLib:0xfD88CeE74f7D78697775aBDAE53f9Da1559728E4".to_string()]; - - let settings = - Settings { libraries: Libraries::parse(&libs).unwrap(), ..Default::default() }; - - let solc_config = SolcConfig::builder().settings(settings).build(); - Project::builder() - .paths(paths) - .ephemeral() - .no_artifacts() - .solc_config(solc_config) - .build() - .unwrap() - }); - - pub static COMPILED: Lazy = Lazy::new(|| (*PROJECT).compile().unwrap()); - - pub static COMPILED_WITH_LIBS: Lazy = - Lazy::new(|| (*LIBS_PROJECT).compile().unwrap()); - - pub static EVM_OPTS: Lazy = Lazy::new(|| EvmOpts { - env: Env { - gas_limit: 18446744073709551615, - chain_id: Some(foundry_common::DEV_CHAIN_ID), - tx_origin: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), - block_number: 1, - block_timestamp: 1, - ..Default::default() - }, - sender: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), - initial_balance: U256::MAX, - ffi: true, - memory_limit: 2u64.pow(24), - ..Default::default() - }); - - pub fn test_executor() -> Executor { - let env = RuntimeOrHandle::new().block_on((*EVM_OPTS).evm_env()); - ExecutorBuilder::new().with_cheatcodes(false).with_config(env).build(Backend::simple()) - } - - pub fn fuzz_executor(executor: &Executor) -> FuzzedExecutor { - let cfg = proptest::test_runner::Config { failure_persistence: None, ..Default::default() }; - - FuzzedExecutor::new(executor, proptest::test_runner::TestRunner::new(cfg), *CALLER) - } - - pub mod filter { - use super::*; - use regex::Regex; - - pub struct Filter { - test_regex: Regex, - contract_regex: Regex, - path_regex: Regex, - } - - impl Filter { - pub fn new(test_pattern: &str, contract_pattern: &str, path_pattern: &str) -> Self { - Filter { - test_regex: Regex::new(test_pattern).unwrap(), - contract_regex: Regex::new(contract_pattern).unwrap(), - path_regex: Regex::new(path_pattern).unwrap(), - } - } - - pub fn matches_all() -> Self { - Filter { - test_regex: Regex::new(".*").unwrap(), - contract_regex: Regex::new(".*").unwrap(), - path_regex: Regex::new(".*").unwrap(), - } - } - } - - impl TestFilter for Filter { - fn matches_test(&self, test_name: impl AsRef) -> bool { - self.test_regex.is_match(test_name.as_ref()) - } - - fn matches_contract(&self, contract_name: impl AsRef) -> bool { - self.contract_regex.is_match(contract_name.as_ref()) - } - - fn matches_path(&self, path: impl AsRef) -> bool { - self.path_regex.is_match(path.as_ref()) - } - } - } -} +// #[cfg(test)] +// pub mod test_helpers { +// use std::path::PathBuf; +// use super::*; +// use ethers::{ +// prelude::{artifacts::Settings, Lazy, ProjectCompileOutput, SolcConfig}, +// solc::{artifacts::Libraries, utils::RuntimeOrHandle, Project, ProjectPathsConfig}, +// types::{Address, U256}, +// }; +// use foundry_evm::{ +// executor::{ +// backend::Backend, +// opts::{Env, EvmOpts}, +// DatabaseRef, Executor, ExecutorBuilder, +// }, +// fuzz::FuzzedExecutor, +// CALLER, +// }; +// use std::str::FromStr; +// +// pub static PROJECT: Lazy = Lazy::new(|| { +// let root = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../testdata"); +// let paths = ProjectPathsConfig::builder() +// .root(root.clone()) +// .sources(root.clone()) +// .build() +// .unwrap(); +// Project::builder().paths(paths).ephemeral().no_artifacts().build().unwrap() +// }); +// +// pub static LIBS_PROJECT: Lazy = Lazy::new(|| { +// let root = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../testdata"); +// let paths = ProjectPathsConfig::builder() +// .root(root.clone()) +// .sources(root.clone()) +// .build() +// .unwrap(); +// let libs = +// ["fork/Fork.t.sol:DssExecLib:0xfD88CeE74f7D78697775aBDAE53f9Da1559728E4".to_string()]; +// +// let settings = +// Settings { libraries: Libraries::parse(&libs).unwrap(), ..Default::default() }; +// +// let solc_config = SolcConfig::builder().settings(settings).build(); +// Project::builder() +// .paths(paths) +// .ephemeral() +// .no_artifacts() +// .solc_config(solc_config) +// .build() +// .unwrap() +// }); +// +// pub static COMPILED: Lazy = Lazy::new(|| (*PROJECT).compile().unwrap()); +// +// pub static COMPILED_WITH_LIBS: Lazy = +// Lazy::new(|| (*LIBS_PROJECT).compile().unwrap()); +// +// pub static EVM_OPTS: Lazy = Lazy::new(|| EvmOpts { +// env: Env { +// gas_limit: 18446744073709551615, +// chain_id: Some(foundry_common::DEV_CHAIN_ID), +// tx_origin: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), +// block_number: 1, +// block_timestamp: 1, +// ..Default::default() +// }, +// sender: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), +// initial_balance: U256::MAX, +// ffi: true, +// memory_limit: 2u64.pow(24), +// ..Default::default() +// }); +// +// pub fn test_executor() -> Executor { +// let env = RuntimeOrHandle::new().block_on((*EVM_OPTS).evm_env()); +// ExecutorBuilder::new().with_cheatcodes(false).with_config(env).build(Backend::simple()) +// } +// +// pub fn fuzz_executor(executor: &Executor) -> FuzzedExecutor { +// let cfg = proptest::test_runner::Config { failure_persistence: None, ..Default::default() }; +// +// FuzzedExecutor::new(executor, proptest::test_runner::TestRunner::new(cfg), *CALLER) +// } +// +// pub mod filter { +// use super::*; +// use regex::Regex; +// +// pub struct Filter { +// test_regex: Regex, +// contract_regex: Regex, +// path_regex: Regex, +// } +// +// impl Filter { +// pub fn new(test_pattern: &str, contract_pattern: &str, path_pattern: &str) -> Self { +// Filter { +// test_regex: Regex::new(test_pattern).unwrap(), +// contract_regex: Regex::new(contract_pattern).unwrap(), +// path_regex: Regex::new(path_pattern).unwrap(), +// } +// } +// +// pub fn matches_all() -> Self { +// Filter { +// test_regex: Regex::new(".*").unwrap(), +// contract_regex: Regex::new(".*").unwrap(), +// path_regex: Regex::new(".*").unwrap(), +// } +// } +// } +// +// impl TestFilter for Filter { +// fn matches_test(&self, test_name: impl AsRef) -> bool { +// self.test_regex.is_match(test_name.as_ref()) +// } +// +// fn matches_contract(&self, contract_name: impl AsRef) -> bool { +// self.contract_regex.is_match(contract_name.as_ref()) +// } +// +// fn matches_path(&self, path: impl AsRef) -> bool { +// self.path_regex.is_match(path.as_ref()) +// } +// } +// } +// } From fa36c68817c096306dae7880fc3782a9b64a1b24 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 5 Jun 2022 19:58:49 +0200 Subject: [PATCH 026/102] fix: failing tests --- forge/src/lib.rs | 130 +------------------------------------ forge/src/multi_runner.rs | 11 +--- forge/src/test_helpers.rs | 117 +++++++++++++++++++++++++++++++++ forge/src/traits.rs | 129 ------------------------------------ testdata/cheats/Cheats.sol | 30 ++++----- 5 files changed, 138 insertions(+), 279 deletions(-) create mode 100644 forge/src/test_helpers.rs diff --git a/forge/src/lib.rs b/forge/src/lib.rs index 38ad6ab20181..ed99b98fc78f 100644 --- a/forge/src/lib.rs +++ b/forge/src/lib.rs @@ -14,132 +14,8 @@ pub use traits::*; pub mod result; -/// The Forge EVM backend -pub use foundry_evm::*; - - #[cfg(test)] -pub mod test_helpers { - use crate::TestFilter; - use ethers::{ - prelude::{artifacts::Settings, Lazy, ProjectCompileOutput, SolcConfig}, - solc::{artifacts::Libraries, utils::RuntimeOrHandle, Project, ProjectPathsConfig}, - types::{Address, U256}, - }; - use foundry_evm::{ - executor::{ - Backend, - opts::{Env, EvmOpts}, - DatabaseRef, Executor, ExecutorBuilder, - }, - fuzz::FuzzedExecutor, - CALLER, - }; - use std::str::FromStr; - - pub static PROJECT: Lazy = Lazy::new(|| { - let paths = ProjectPathsConfig::builder() - .root("../testdata") - .sources("../testdata") - .build() - .unwrap(); - Project::builder().paths(paths).ephemeral().no_artifacts().build().unwrap() - }); - - pub static LIBS_PROJECT: Lazy = Lazy::new(|| { - let paths = ProjectPathsConfig::builder() - .root("../testdata") - .sources("../testdata") - .build() - .unwrap(); - let libs = - ["fork/Fork.t.sol:DssExecLib:0xfD88CeE74f7D78697775aBDAE53f9Da1559728E4".to_string()]; - - let settings = - Settings { libraries: Libraries::parse(&libs).unwrap(), ..Default::default() }; - - let solc_config = SolcConfig::builder().settings(settings).build(); - Project::builder() - .paths(paths) - .ephemeral() - .no_artifacts() - .solc_config(solc_config) - .build() - .unwrap() - }); - - pub static COMPILED: Lazy = Lazy::new(|| (*PROJECT).compile().unwrap()); - - pub static COMPILED_WITH_LIBS: Lazy = - Lazy::new(|| (*LIBS_PROJECT).compile().unwrap()); - - pub static EVM_OPTS: Lazy = Lazy::new(|| EvmOpts { - env: Env { - gas_limit: 18446744073709551615, - chain_id: Some(foundry_common::DEV_CHAIN_ID), - tx_origin: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), - block_number: 1, - block_timestamp: 1, - ..Default::default() - }, - sender: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), - initial_balance: U256::MAX, - ffi: true, - memory_limit: 2u64.pow(24), - ..Default::default() - }); +mod test_helpers; - pub fn test_executor() -> Executor { - let env = RuntimeOrHandle::new().block_on((*EVM_OPTS).evm_env()); - ExecutorBuilder::new().with_cheatcodes(false).with_config(env).build(Backend::simple()) - } - - pub fn fuzz_executor(executor: &Executor) -> FuzzedExecutor { - let cfg = proptest::test_runner::Config { failure_persistence: None, ..Default::default() }; - - FuzzedExecutor::new(executor, proptest::test_runner::TestRunner::new(cfg), *CALLER) - } - - pub mod filter { - use super::*; - use regex::Regex; - - pub struct Filter { - test_regex: Regex, - contract_regex: Regex, - path_regex: Regex, - } - - impl Filter { - pub fn new(test_pattern: &str, contract_pattern: &str, path_pattern: &str) -> Self { - Filter { - test_regex: Regex::new(test_pattern).unwrap(), - contract_regex: Regex::new(contract_pattern).unwrap(), - path_regex: Regex::new(path_pattern).unwrap(), - } - } - - pub fn matches_all() -> Self { - Filter { - test_regex: Regex::new(".*").unwrap(), - contract_regex: Regex::new(".*").unwrap(), - path_regex: Regex::new(".*").unwrap(), - } - } - } - - impl TestFilter for Filter { - fn matches_test(&self, test_name: impl AsRef) -> bool { - self.test_regex.is_match(test_name.as_ref()) - } - - fn matches_contract(&self, contract_name: impl AsRef) -> bool { - self.contract_regex.is_match(contract_name.as_ref()) - } - - fn matches_path(&self, path: impl AsRef) -> bool { - self.path_regex.is_match(path.as_ref()) - } - } - } -} \ No newline at end of file +/// The Forge EVM backend +pub use foundry_evm::*; diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index b5871a53c7a2..92fb97d87d6b 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -118,14 +118,10 @@ impl MultiContractRunner { .contracts .par_iter() .filter(|(id, _)| { - dbg!("filter matches"); filter.matches_path(id.source.to_string_lossy()) && filter.matches_contract(&id.name) }) - .filter(|(_, (abi, _, _))| { - dbg!("filter funs"); - abi.functions().any(|func| filter.matches_test(&func.name)) - }) + .filter(|(_, (abi, _, _))| abi.functions().any(|func| filter.matches_test(&func.name))) .map(|(id, (abi, deploy_code, libs))| { let mut builder = ExecutorBuilder::new() .with_cheatcodes(self.evm_opts.ffi) @@ -214,8 +210,8 @@ impl MultiContractRunnerBuilder { output: ProjectCompileOutput, evm_opts: EvmOpts, ) -> Result - where - A: ArtifactOutput, + where + A: ArtifactOutput, { // This is just the contracts compiled, but we need to merge this with the read cached // artifacts @@ -516,7 +512,6 @@ mod tests { #[test] fn test_logs() { let mut runner = runner(); - dbg!(runner.contracts.keys()); let results = runner.test(&Filter::new(".*", ".*", ".*logs"), None, true).unwrap(); assert_multiple( diff --git a/forge/src/test_helpers.rs b/forge/src/test_helpers.rs new file mode 100644 index 000000000000..a0fb94943867 --- /dev/null +++ b/forge/src/test_helpers.rs @@ -0,0 +1,117 @@ +#![allow(unused)] + +use super::*; +use ethers::{ + prelude::{artifacts::Settings, Lazy, ProjectCompileOutput, SolcConfig}, + solc::{artifacts::Libraries, utils::RuntimeOrHandle, Project, ProjectPathsConfig}, + types::{Address, U256}, +}; +use foundry_evm::{ + executor::{ + backend::Backend, + opts::{Env, EvmOpts}, + DatabaseRef, Executor, ExecutorBuilder, + }, + fuzz::FuzzedExecutor, + CALLER, +}; +use std::{path::PathBuf, str::FromStr}; + +pub static PROJECT: Lazy = Lazy::new(|| { + let root = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../testdata"); + let paths = ProjectPathsConfig::builder().root(root.clone()).sources(root).build().unwrap(); + Project::builder().paths(paths).ephemeral().no_artifacts().build().unwrap() +}); + +pub static LIBS_PROJECT: Lazy = Lazy::new(|| { + let root = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../testdata"); + let paths = ProjectPathsConfig::builder().root(root.clone()).sources(root).build().unwrap(); + let libs = + ["fork/Fork.t.sol:DssExecLib:0xfD88CeE74f7D78697775aBDAE53f9Da1559728E4".to_string()]; + + let settings = Settings { libraries: Libraries::parse(&libs).unwrap(), ..Default::default() }; + + let solc_config = SolcConfig::builder().settings(settings).build(); + Project::builder() + .paths(paths) + .ephemeral() + .no_artifacts() + .solc_config(solc_config) + .build() + .unwrap() +}); + +pub static COMPILED: Lazy = Lazy::new(|| (*PROJECT).compile().unwrap()); + +pub static COMPILED_WITH_LIBS: Lazy = + Lazy::new(|| (*LIBS_PROJECT).compile().unwrap()); + +pub static EVM_OPTS: Lazy = Lazy::new(|| EvmOpts { + env: Env { + gas_limit: 18446744073709551615, + chain_id: Some(foundry_common::DEV_CHAIN_ID), + tx_origin: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), + block_number: 1, + block_timestamp: 1, + ..Default::default() + }, + sender: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), + initial_balance: U256::MAX, + ffi: true, + memory_limit: 2u64.pow(24), + ..Default::default() +}); + +pub fn test_executor() -> Executor { + let env = RuntimeOrHandle::new().block_on((*EVM_OPTS).evm_env()); + ExecutorBuilder::new().with_cheatcodes(false).with_config(env).build(Backend::simple()) +} + +pub fn fuzz_executor(executor: &Executor) -> FuzzedExecutor { + let cfg = proptest::test_runner::Config { failure_persistence: None, ..Default::default() }; + + FuzzedExecutor::new(executor, proptest::test_runner::TestRunner::new(cfg), *CALLER) +} + +pub mod filter { + use super::*; + use regex::Regex; + + pub struct Filter { + test_regex: Regex, + contract_regex: Regex, + path_regex: Regex, + } + + impl Filter { + pub fn new(test_pattern: &str, contract_pattern: &str, path_pattern: &str) -> Self { + Filter { + test_regex: Regex::new(test_pattern).unwrap(), + contract_regex: Regex::new(contract_pattern).unwrap(), + path_regex: Regex::new(path_pattern).unwrap(), + } + } + + pub fn matches_all() -> Self { + Filter { + test_regex: Regex::new(".*").unwrap(), + contract_regex: Regex::new(".*").unwrap(), + path_regex: Regex::new(".*").unwrap(), + } + } + } + + impl TestFilter for Filter { + fn matches_test(&self, test_name: impl AsRef) -> bool { + self.test_regex.is_match(test_name.as_ref()) + } + + fn matches_contract(&self, contract_name: impl AsRef) -> bool { + self.contract_regex.is_match(contract_name.as_ref()) + } + + fn matches_path(&self, path: impl AsRef) -> bool { + self.path_regex.is_match(path.as_ref()) + } + } +} diff --git a/forge/src/traits.rs b/forge/src/traits.rs index efb31076166b..fa594f7bd62c 100644 --- a/forge/src/traits.rs +++ b/forge/src/traits.rs @@ -19,132 +19,3 @@ impl TestFunctionExt for Function { !self.inputs.is_empty() } } - -// #[cfg(test)] -// pub mod test_helpers { -// use std::path::PathBuf; -// use super::*; -// use ethers::{ -// prelude::{artifacts::Settings, Lazy, ProjectCompileOutput, SolcConfig}, -// solc::{artifacts::Libraries, utils::RuntimeOrHandle, Project, ProjectPathsConfig}, -// types::{Address, U256}, -// }; -// use foundry_evm::{ -// executor::{ -// backend::Backend, -// opts::{Env, EvmOpts}, -// DatabaseRef, Executor, ExecutorBuilder, -// }, -// fuzz::FuzzedExecutor, -// CALLER, -// }; -// use std::str::FromStr; -// -// pub static PROJECT: Lazy = Lazy::new(|| { -// let root = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../testdata"); -// let paths = ProjectPathsConfig::builder() -// .root(root.clone()) -// .sources(root.clone()) -// .build() -// .unwrap(); -// Project::builder().paths(paths).ephemeral().no_artifacts().build().unwrap() -// }); -// -// pub static LIBS_PROJECT: Lazy = Lazy::new(|| { -// let root = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../testdata"); -// let paths = ProjectPathsConfig::builder() -// .root(root.clone()) -// .sources(root.clone()) -// .build() -// .unwrap(); -// let libs = -// ["fork/Fork.t.sol:DssExecLib:0xfD88CeE74f7D78697775aBDAE53f9Da1559728E4".to_string()]; -// -// let settings = -// Settings { libraries: Libraries::parse(&libs).unwrap(), ..Default::default() }; -// -// let solc_config = SolcConfig::builder().settings(settings).build(); -// Project::builder() -// .paths(paths) -// .ephemeral() -// .no_artifacts() -// .solc_config(solc_config) -// .build() -// .unwrap() -// }); -// -// pub static COMPILED: Lazy = Lazy::new(|| (*PROJECT).compile().unwrap()); -// -// pub static COMPILED_WITH_LIBS: Lazy = -// Lazy::new(|| (*LIBS_PROJECT).compile().unwrap()); -// -// pub static EVM_OPTS: Lazy = Lazy::new(|| EvmOpts { -// env: Env { -// gas_limit: 18446744073709551615, -// chain_id: Some(foundry_common::DEV_CHAIN_ID), -// tx_origin: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), -// block_number: 1, -// block_timestamp: 1, -// ..Default::default() -// }, -// sender: Address::from_str("00a329c0648769a73afac7f9381e08fb43dbea72").unwrap(), -// initial_balance: U256::MAX, -// ffi: true, -// memory_limit: 2u64.pow(24), -// ..Default::default() -// }); -// -// pub fn test_executor() -> Executor { -// let env = RuntimeOrHandle::new().block_on((*EVM_OPTS).evm_env()); -// ExecutorBuilder::new().with_cheatcodes(false).with_config(env).build(Backend::simple()) -// } -// -// pub fn fuzz_executor(executor: &Executor) -> FuzzedExecutor { -// let cfg = proptest::test_runner::Config { failure_persistence: None, ..Default::default() }; -// -// FuzzedExecutor::new(executor, proptest::test_runner::TestRunner::new(cfg), *CALLER) -// } -// -// pub mod filter { -// use super::*; -// use regex::Regex; -// -// pub struct Filter { -// test_regex: Regex, -// contract_regex: Regex, -// path_regex: Regex, -// } -// -// impl Filter { -// pub fn new(test_pattern: &str, contract_pattern: &str, path_pattern: &str) -> Self { -// Filter { -// test_regex: Regex::new(test_pattern).unwrap(), -// contract_regex: Regex::new(contract_pattern).unwrap(), -// path_regex: Regex::new(path_pattern).unwrap(), -// } -// } -// -// pub fn matches_all() -> Self { -// Filter { -// test_regex: Regex::new(".*").unwrap(), -// contract_regex: Regex::new(".*").unwrap(), -// path_regex: Regex::new(".*").unwrap(), -// } -// } -// } -// -// impl TestFilter for Filter { -// fn matches_test(&self, test_name: impl AsRef) -> bool { -// self.test_regex.is_match(test_name.as_ref()) -// } -// -// fn matches_contract(&self, contract_name: impl AsRef) -> bool { -// self.contract_regex.is_match(contract_name.as_ref()) -// } -// -// fn matches_path(&self, path: impl AsRef) -> bool { -// self.path_regex.is_match(path.as_ref()) -// } -// } -// } -// } diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index e0073f29a402..66e7839f2933 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -103,19 +103,19 @@ interface Cheats { function startBroadcast(address) external; // Stops collecting onchain transactions function stopBroadcast() external; - // Snapshot the current state of the evm. - // Returns the id of the snapshot that was created. - // To revert a snapshot use `revertTo` - function snapshot() external returns(uint256); - // Revert the state of the evm to a previous snapshot - // takes the snapshot id to revert to. This deletes the snapshot and all snapshots taken after the given snapshot id. - function revertTo(uint256) external; - // Creates a new fork with the given endpoint and block and returns the identifier of the fork - function createFork(string,uint256) external returns(uint256); - // Creates a new fork with the given endpoint and the latest block and returns the identifier of the fork - function createFork(string) external returns(uint256); - // takes a fork identifier created by `createFork` and changes the state - function switchFork(uint256) external; - // forks the `block` variable from the given endpoint - function forkBlockVariable(string, uint256) external; +// // Snapshot the current state of the evm. +// // Returns the id of the snapshot that was created. +// // To revert a snapshot use `revertTo` +// function snapshot() external returns(uint256); +// // Revert the state of the evm to a previous snapshot +// // takes the snapshot id to revert to. This deletes the snapshot and all snapshots taken after the given snapshot id. +// function revertTo(uint256) external; +// // Creates a new fork with the given endpoint and block and returns the identifier of the fork +// function createFork(string,uint256) external returns(uint256); +// // Creates a new fork with the given endpoint and the latest block and returns the identifier of the fork +// function createFork(string) external returns(uint256); +// // takes a fork identifier created by `createFork` and changes the state +// function switchFork(uint256) external; +// // forks the `block` variable from the given endpoint +// function forkBlockVariable(string, uint256) external; } From 6acbfce6d63d17d6a02f306f6ba67fa9a3939716 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 5 Jun 2022 20:11:35 +0200 Subject: [PATCH 027/102] test: ensure solc finished successfully --- forge/src/multi_runner.rs | 2 ++ forge/src/test_helpers.rs | 19 ++++++++++++++++--- testdata/cheats/Cheats.sol | 30 +++++++++++++++--------------- 3 files changed, 33 insertions(+), 18 deletions(-) diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 92fb97d87d6b..dd47d744e273 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -337,6 +337,7 @@ mod tests { }, }; use foundry_evm::trace::TraceKind; + use foundry_utils::init_tracing_subscriber; use std::env; /// Builds a base runner @@ -511,6 +512,7 @@ mod tests { #[test] fn test_logs() { + init_tracing_subscriber(); let mut runner = runner(); let results = runner.test(&Filter::new(".*", ".*", ".*logs"), None, true).unwrap(); diff --git a/forge/src/test_helpers.rs b/forge/src/test_helpers.rs index a0fb94943867..a91df3763e39 100644 --- a/forge/src/test_helpers.rs +++ b/forge/src/test_helpers.rs @@ -41,10 +41,23 @@ pub static LIBS_PROJECT: Lazy = Lazy::new(|| { .unwrap() }); -pub static COMPILED: Lazy = Lazy::new(|| (*PROJECT).compile().unwrap()); +pub static COMPILED: Lazy = Lazy::new(|| { + let out = (*PROJECT).compile().unwrap(); + if out.has_compiler_errors() { + eprintln!("{}", out); + panic!("Compiled with errors"); + } + out +}); -pub static COMPILED_WITH_LIBS: Lazy = - Lazy::new(|| (*LIBS_PROJECT).compile().unwrap()); +pub static COMPILED_WITH_LIBS: Lazy = Lazy::new(|| { + let out = (*LIBS_PROJECT).compile().unwrap(); + if out.has_compiler_errors() { + eprintln!("{}", out); + panic!("Compiled with errors"); + } + out +}); pub static EVM_OPTS: Lazy = Lazy::new(|| EvmOpts { env: Env { diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index 66e7839f2933..79522fc7b5aa 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -103,19 +103,19 @@ interface Cheats { function startBroadcast(address) external; // Stops collecting onchain transactions function stopBroadcast() external; -// // Snapshot the current state of the evm. -// // Returns the id of the snapshot that was created. -// // To revert a snapshot use `revertTo` -// function snapshot() external returns(uint256); -// // Revert the state of the evm to a previous snapshot -// // takes the snapshot id to revert to. This deletes the snapshot and all snapshots taken after the given snapshot id. -// function revertTo(uint256) external; -// // Creates a new fork with the given endpoint and block and returns the identifier of the fork -// function createFork(string,uint256) external returns(uint256); -// // Creates a new fork with the given endpoint and the latest block and returns the identifier of the fork -// function createFork(string) external returns(uint256); -// // takes a fork identifier created by `createFork` and changes the state -// function switchFork(uint256) external; -// // forks the `block` variable from the given endpoint -// function forkBlockVariable(string, uint256) external; + // Snapshot the current state of the evm. + // Returns the id of the snapshot that was created. + // To revert a snapshot use `revertTo` + function snapshot() external returns(uint256); + // Revert the state of the evm to a previous snapshot + // takes the snapshot id to revert to. This deletes the snapshot and all snapshots taken after the given snapshot id. + function revertTo(uint256) external; + // Creates a new fork with the given endpoint and block and returns the identifier of the fork + function createFork(string calldata,uint256) external returns(uint256); + // Creates a new fork with the given endpoint and the latest block and returns the identifier of the fork + function createFork(string calldata) external returns(uint256); + // takes a fork identifier created by `createFork` and changes the state + function switchFork(uint256) external; + // forks the `block` variable from the given endpoint + function forkBlockVariable(string calldata, uint256) external; } From ca75693df333f316e189e321a2388fb7be819903 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 Jun 2022 13:20:01 +0200 Subject: [PATCH 028/102] refactor: introduce more types --- forge/src/lib.rs | 2 ++ forge/src/multi_runner.rs | 2 +- forge/src/traits.rs | 18 ++++++++++++++++++ forge/src/types.rs | 38 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 forge/src/types.rs diff --git a/forge/src/lib.rs b/forge/src/lib.rs index ed99b98fc78f..9bb6d0bf1369 100644 --- a/forge/src/lib.rs +++ b/forge/src/lib.rs @@ -12,6 +12,8 @@ pub use multi_runner::{MultiContractRunner, MultiContractRunnerBuilder}; mod traits; pub use traits::*; +mod types; + pub mod result; #[cfg(test)] diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index dd47d744e273..e302aec32d54 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -41,7 +41,7 @@ pub struct MultiContractRunner { } impl MultiContractRunner { - pub fn count_filtered_tests(&self, filter: &(impl TestFilter + Send + Sync)) -> usize { + pub fn count_filtered_tests(&self, filter: &impl TestFilter) -> usize { self.contracts .iter() .filter(|(id, _)| { diff --git a/forge/src/traits.rs b/forge/src/traits.rs index fa594f7bd62c..ecd5c4d922dc 100644 --- a/forge/src/traits.rs +++ b/forge/src/traits.rs @@ -11,6 +11,12 @@ pub trait TestFilter: Send + Sync { pub(crate) trait TestFunctionExt { /// Whether this function should be executed as fuzz test fn is_fuzz_test(&self) -> bool; + /// Whether this function is a test + fn is_test(&self) -> bool; + /// Whether this function is a test that should fail + fn is_test_fail(&self) -> bool; + /// Whether this function is a `setUp` function + fn is_setup(&self) -> bool; } impl TestFunctionExt for Function { @@ -18,4 +24,16 @@ impl TestFunctionExt for Function { // test functions that have inputs are considered fuzz tests as those inputs will be fuzzed !self.inputs.is_empty() } + + fn is_test(&self) -> bool { + self.name.starts_with("test") + } + + fn is_test_fail(&self) -> bool { + self.name.starts_with("testFail") + } + + fn is_setup(&self) -> bool { + self.name.to_lowercase() == "setup" + } } diff --git a/forge/src/types.rs b/forge/src/types.rs new file mode 100644 index 000000000000..595ca98c66f1 --- /dev/null +++ b/forge/src/types.rs @@ -0,0 +1,38 @@ +use std::collections::{BTreeMap, HashMap}; +use std::path::PathBuf; +use ethers::abi::{Event, Function, Abi, AbiError}; +use ethers::solc::artifacts::CompactContractBytecode; +use ethers::types::H256; + +/// Represents a solidity Contract that's a test target +#[derive(Debug, Clone)] +pub struct TestContract { + /// All functions keyed by their short signature + pub functions: BTreeMap<[u8; 4], TestFunction>, + + /// contract's bytecode objects + pub bytecode: CompactContractBytecode, + + /// location of the contract + pub source: PathBuf, + + /// all events of the contract + pub events: BTreeMap, + + /// all errors of the contract + pub errors: BTreeMap>, +} + +/// A solidity function that can be tested +#[derive(Debug, Clone)] +pub struct TestFunction { + pub function: Function, + /// the function's signature + pub signature: String, +} + +// === impl TestFunction === + +impl TestFunction { + +} \ No newline at end of file From a1e3f724430416599da59104918411db2e19c053 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 Jun 2022 16:55:38 +0200 Subject: [PATCH 029/102] feat: add a bunch of revm trait impls --- evm/src/executor/backend/mod.rs | 60 ++++++- evm/src/executor/builder.rs | 106 ++++++------ evm/src/executor/fork/mod.rs | 2 + evm/src/executor/fork/multi.rs | 11 +- evm/src/executor/mod.rs | 281 ++++++++++++++++---------------- evm/src/fuzz/mod.rs | 20 +-- forge/src/multi_runner.rs | 155 +++++++++++++----- forge/src/types.rs | 18 +- 8 files changed, 394 insertions(+), 259 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index d4e42b14614a..9b7365decb6c 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -1,8 +1,10 @@ use crate::executor::{fork::SharedBackend, Fork}; +use bytes::Bytes; use ethers::prelude::{H160, H256, U256}; +use hashbrown::HashMap as Map; use revm::{ db::{CacheDB, DatabaseRef, EmptyDB}, - AccountInfo, Env, + Account, AccountInfo, Database, DatabaseCommit, Env, }; use std::collections::HashMap; use tracing::{trace, warn}; @@ -91,6 +93,18 @@ impl Backend2 { Self { forks, db, created_forks: Default::default(), snapshots: Default::default() } } + /// Creates a new instance with a `BackendDatabase::InMemory` cache layer for the `CacheDB` + pub fn clone_empty(&self) -> Self { + let mut db = self.db.clone(); + *db.db_mut() = BackendDatabase::InMemory(EmptyDB()); + Self { + forks: self.forks.clone(), + created_forks: Default::default(), + db, + snapshots: Default::default(), + } + } + /// Creates a new snapshot pub fn snapshot(&mut self) -> U256 { let id = self.snapshots.insert(self.db.clone()); @@ -139,6 +153,50 @@ impl Backend2 { } } +// a bunch of delegate revm trait implementations + +impl DatabaseRef for Backend2 { + fn basic(&self, address: H160) -> AccountInfo { + self.db.basic(address) + } + + fn code_by_hash(&self, address: H256) -> bytes::Bytes { + self.db.code_by_hash(address) + } + + fn storage(&self, address: H160, index: U256) -> U256 { + DatabaseRef::storage(&self.db, address, index) + } + + fn block_hash(&self, number: U256) -> H256 { + self.db.block_hash(number) + } +} + +impl DatabaseCommit for Backend2 { + fn commit(&mut self, changes: Map) { + self.db.commit(changes) + } +} + +impl Database for Backend2 { + fn basic(&mut self, address: H160) -> AccountInfo { + self.db.basic(address) + } + + fn code_by_hash(&mut self, code_hash: H256) -> Bytes { + self.db.code_by_hash(address) + } + + fn storage(&mut self, address: H160, index: U256) -> U256 { + Database::storage(&mut self.db, address, index) + } + + fn block_hash(&mut self, number: U256) -> H256 { + self.db.block_hash(number) + } +} + /// Variants of a [revm::Database] #[derive(Debug, Clone)] pub enum BackendDatabase { diff --git a/evm/src/executor/builder.rs b/evm/src/executor/builder.rs index 83f04778fc42..df703b86bda4 100644 --- a/evm/src/executor/builder.rs +++ b/evm/src/executor/builder.rs @@ -4,7 +4,7 @@ use super::{ Executor, }; use crate::executor::{ - backend::Backend, + backend::{Backend, Backend2}, fork::{BlockchainDb, BlockchainDbMeta}, }; use ethers::{ @@ -23,51 +23,7 @@ pub struct ExecutorBuilder { gas_limit: Option, } -/// Represents a _fork_ of a live chain whose data is available only via the `url` endpoint. -/// -/// *Note:* this type intentionally does not implement `Clone` to prevent [Fork::spawn_backend()] -/// from being called multiple times. -#[derive(Debug)] -pub struct Fork { - /// Where to read the cached storage from - pub cache_path: Option, - /// The URL to a node for fetching remote state - pub url: String, - /// The block to fork against - pub pin_block: Option, - /// chain id retrieved from the endpoint - pub chain_id: u64, -} - -impl Fork { - /// Initialises and spawns the Storage Backend, the [revm::Database] - /// - /// If configured, then this will initialise the backend with the storage cache. - /// - /// The `SharedBackend` returned is connected to a background thread that communicates with the - /// endpoint via channels and is intended to be cloned when multiple [revm::Database] are - /// required. See also [crate::executor::fork::SharedBackend] - pub async fn spawn_backend(self, env: &Env) -> SharedBackend { - let Fork { cache_path, url, pin_block, chain_id } = self; - - let provider = Arc::new( - Provider::>::new_client(url.clone().as_str(), 10, 1000) - .expect("Failed to establish provider"), - ); - - let mut meta = BlockchainDbMeta::new(env.clone(), url); - - // update the meta to match the forked config - meta.cfg_env.chain_id = chain_id.into(); - if let Some(pin) = pin_block { - meta.block_env.number = pin.into(); - } - - let db = BlockchainDb::new(meta, cache_path); - - SharedBackend::spawn_backend(provider, db, pin_block.map(Into::into)).await - } -} +// === impl ExecutorBuilder === impl ExecutorBuilder { #[must_use] @@ -86,7 +42,13 @@ impl ExecutorBuilder { /// Enables tracing #[must_use] pub fn with_tracing(mut self) -> Self { - self.inspector_config.tracing = true; + self.set_tracing(true) + } + + /// Sets the tracing verbosity + #[must_use] + pub fn set_tracing(mut self, with_tracing: bool) -> Self { + self.inspector_config.tracing = with_tracing; self } @@ -123,8 +85,54 @@ impl ExecutorBuilder { } /// Builds the executor as configured. - pub fn build(self, db: impl Into) -> Executor { + pub fn build(self, db: Backend2) -> Executor { let gas_limit = self.gas_limit.unwrap_or(self.env.block.gas_limit); - Executor::new(db.into(), self.env, self.inspector_config, gas_limit) + Executor::new(db, self.env, self.inspector_config, gas_limit) + } +} + +/// Represents a _fork_ of a live chain whose data is available only via the `url` endpoint. +/// +/// *Note:* this type intentionally does not implement `Clone` to prevent [Fork::spawn_backend()] +/// from being called multiple times. +#[derive(Debug)] +pub struct Fork { + /// Where to read the cached storage from + pub cache_path: Option, + /// The URL to a node for fetching remote state + pub url: String, + /// The block to fork against + pub pin_block: Option, + /// chain id retrieved from the endpoint + pub chain_id: u64, +} + +impl Fork { + /// Initialises and spawns the Storage Backend, the [revm::Database] + /// + /// If configured, then this will initialise the backend with the storage cache. + /// + /// The `SharedBackend` returned is connected to a background thread that communicates with the + /// endpoint via channels and is intended to be cloned when multiple [revm::Database] are + /// required. See also [crate::executor::fork::SharedBackend] + pub async fn spawn_backend(self, env: &Env) -> SharedBackend { + let Fork { cache_path, url, pin_block, chain_id } = self; + + let provider = Arc::new( + Provider::>::new_client(url.clone().as_str(), 10, 1000) + .expect("Failed to establish provider"), + ); + + let mut meta = BlockchainDbMeta::new(env.clone(), url); + + // update the meta to match the forked config + meta.cfg_env.chain_id = chain_id.into(); + if let Some(pin) = pin_block { + meta.block_env.number = pin.into(); + } + + let db = BlockchainDb::new(meta, cache_path); + + SharedBackend::spawn_backend(provider, db, pin_block.map(Into::into)).await } } diff --git a/evm/src/executor/fork/mod.rs b/evm/src/executor/fork/mod.rs index 641b6c104f82..0ed46bfbbc2d 100644 --- a/evm/src/executor/fork/mod.rs +++ b/evm/src/executor/fork/mod.rs @@ -25,6 +25,8 @@ pub struct CreateFork { pub url: String, /// The block to fork against pub block: BlockNumber, + /// chain id to use, if `None` then the chain_id will be fetched from the endpoint + pub chain_id: Option, /// The env to create this fork, main purpose is to provide some metadata for the fork pub env: Env, } diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 3dc19ffe168a..2fa15f17251a 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -10,7 +10,7 @@ use ethers::{ abi::{AbiDecode, AbiEncode, AbiError}, prelude::Middleware, providers::{Http, Provider, RetryClient}, - types::{BlockId, BlockNumber}, + types::{BlockId, BlockNumber, U256}, }; use futures::{ channel::mpsc::{channel, Receiver, Sender}, @@ -280,7 +280,7 @@ async fn create_fork( retries: u32, backoff: u64, ) -> eyre::Result<(SharedBackend, Handler)> { - let CreateFork { cache_path, url, block: block_number, env } = fork; + let CreateFork { cache_path, url, block: block_number, env, chain_id } = fork; let provider = Arc::new(Provider::>::new_client( url.clone().as_str(), retries, @@ -289,7 +289,12 @@ async fn create_fork( let mut meta = BlockchainDbMeta::new(env, url); // update the meta to match the forked config - meta.cfg_env.chain_id = provider.get_chainid().await?; + let chain_id = if let Some(chain_id) = chain_id { + U256::from(chain_id) + } else { + provider.get_chainid().await? + }; + meta.cfg_env.chain_id = chain_id; let number = match block_number { BlockNumber::Pending | BlockNumber::Latest => provider.get_block_number().await?.as_u64(), diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index ae4f6af43472..1db6610d5484 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -37,138 +37,21 @@ pub use backend::Backend; pub mod snapshot; -use crate::executor::inspector::DEFAULT_CREATE2_DEPLOYER; +use crate::executor::{backend::Backend2, inspector::DEFAULT_CREATE2_DEPLOYER}; pub use builder::{ExecutorBuilder, Fork}; /// A mapping of addresses to their changed state. pub type StateChangeset = HashMap; -#[derive(thiserror::Error, Debug)] -pub enum EvmError { - /// Error which occurred during execution of a transaction - #[error("Execution reverted: {reason} (gas: {gas})")] - Execution { - reverted: bool, - reason: String, - gas: u64, - stipend: u64, - logs: Vec, - traces: Option, - debug: Option, - labels: BTreeMap, - transactions: Option>, - state_changeset: Option, - }, - /// Error which occurred during ABI encoding/decoding - #[error(transparent)] - AbiError(#[from] ethers::contract::AbiError), - /// Any other error. - #[error(transparent)] - Eyre(#[from] eyre::Error), -} - -/// The result of a deployment. -#[derive(Debug)] -pub struct DeployResult { - /// The address of the deployed contract - pub address: Address, - /// The gas cost of the deployment - pub gas: u64, - /// The logs emitted during the deployment - pub logs: Vec, - /// The traces of the deployment - pub traces: Option, - /// The debug nodes of the call - pub debug: Option, -} - -/// The result of a call. -#[derive(Debug)] -pub struct CallResult { - /// Whether the call reverted or not - pub reverted: bool, - /// The decoded result of the call - pub result: D, - /// The gas used for the call - pub gas: u64, - /// The initial gas stipend for the transaction - pub stipend: u64, - /// The logs emitted during the call - pub logs: Vec, - /// The labels assigned to addresses during the call - pub labels: BTreeMap, - /// The traces of the call - pub traces: Option, - /// The debug nodes of the call - pub debug: Option, - /// Scripted transactions generated from this call - pub transactions: Option>, - /// The changeset of the state. - /// - /// This is only present if the changed state was not committed to the database (i.e. if you - /// used `call` and `call_raw` not `call_committing` or `call_raw_committing`). - pub state_changeset: Option, -} - -/// The result of a raw call. -#[derive(Debug)] -pub struct RawCallResult { - /// The status of the call - status: Return, - /// Whether the call reverted or not - pub reverted: bool, - /// The raw result of the call - pub result: Bytes, - /// The gas used for the call - pub gas: u64, - /// The initial gas stipend for the transaction - pub stipend: u64, - /// The logs emitted during the call - pub logs: Vec, - /// The labels assigned to addresses during the call - pub labels: BTreeMap, - /// The traces of the call - pub traces: Option, - /// The debug nodes of the call - pub debug: Option, - /// Scripted transactions generated from this call - pub transactions: Option>, - /// The changeset of the state. - /// - /// This is only present if the changed state was not committed to the database (i.e. if you - /// used `call` and `call_raw` not `call_committing` or `call_raw_committing`). - pub state_changeset: Option, -} - -impl Default for RawCallResult { - fn default() -> Self { - Self { - status: Return::Continue, - reverted: false, - result: Bytes::new(), - gas: 0, - stipend: 0, - logs: Vec::new(), - labels: BTreeMap::new(), - traces: None, - debug: None, - transactions: None, - state_changeset: None, - } - } -} - +/// #[derive(Debug)] -pub struct Executor { +pub struct Executor { + /// The underlying `revm::Database` that contains the EVM storage // Note: We do not store an EVM here, since we are really // only interested in the database. REVM's `EVM` is a thin // wrapper around spawning a new EVM on every call anyway, // so the performance difference should be negligible. - // - // Also, if we stored the VM here we would still need to - // take `&mut self` when we are not committing to the database, since - // we need to set `evm.env`. - pub db: CacheDB, + pub backend: Backend2, env: Env, inspector_config: InspectorStackConfig, /// The gas limit for calls and deployments. This is different from the gas limit imposed by @@ -179,31 +62,26 @@ pub struct Executor { // === impl Executor === -impl Executor -where - DB: DatabaseRef, -{ +impl Executor { pub fn new( - inner_db: DB, + mut backend: Backend2, env: Env, inspector_config: InspectorStackConfig, gas_limit: U256, ) -> Self { - let mut db = CacheDB::new(inner_db); - // Need to create a non-empty contract on the cheatcodes address so `extcodesize` checks // does not fail - db.insert_cache( + backend.db.insert_cache( CHEATCODE_ADDRESS, revm::AccountInfo { code: Some(Bytes::from_static(&[1])), ..Default::default() }, ); - Executor { db, env, inspector_config, gas_limit } + Executor { backend, env, inspector_config, gas_limit } } /// Creates the default CREATE2 Contract Deployer for local tests and scripts. pub fn deploy_create2_deployer(&mut self) -> eyre::Result<()> { - let create2_deployer_account = self.db.basic(DEFAULT_CREATE2_DEPLOYER); + let create2_deployer_account = self.backend.basic(DEFAULT_CREATE2_DEPLOYER); if create2_deployer_account.code.is_none() || create2_deployer_account.code.as_ref().unwrap().is_empty() @@ -222,24 +100,24 @@ where /// Set the balance of an account. pub fn set_balance(&mut self, address: Address, amount: U256) -> &mut Self { - let mut account = self.db.basic(address); + let mut account = self.backend.basic(address); account.balance = amount; - self.db.insert_cache(address, account); + self.backend.insert_cache(address, account); self } /// Gets the balance of an account pub fn get_balance(&self, address: Address) -> U256 { - self.db.basic(address).balance + self.backend.basic(address).balance } /// Set the nonce of an account. pub fn set_nonce(&mut self, address: Address, nonce: u64) -> &mut Self { - let mut account = self.db.basic(address); + let mut account = self.backend.basic(address); account.nonce = nonce; - self.db.insert_cache(address, account); + self.backend.insert_cache(address, account); self } @@ -345,7 +223,7 @@ where // Build VM let mut evm = EVM::new(); evm.env = self.build_env(from, TransactTo::Call(to), calldata, value); - evm.database(&mut self.db); + evm.database(&mut self.backend); // Run the call let mut inspector = self.inspector_config.stack(); @@ -472,7 +350,7 @@ where // Build VM let mut evm = EVM::new(); evm.env = self.build_env(from, TransactTo::Call(to), calldata, value); - evm.database(&self.db); + evm.database(&self.backend); // Run the call let mut inspector = self.inspector_config.stack(); @@ -520,7 +398,7 @@ where ) -> std::result::Result { let mut evm = EVM::new(); evm.env = self.build_env(from, TransactTo::Create(CreateScheme::Create), code, value); - evm.database(&mut self.db); + evm.database(&mut self.backend); let mut inspector = self.inspector_config.stack(); let (status, out, gas, _) = evm.inspect_commit(&mut inspector); @@ -585,7 +463,7 @@ where /// DSTest will not revert inside its `assertEq`-like functions which allows /// to test multiple assertions in 1 test function while also preserving logs. /// - /// Instead it sets `failed` to `true` which we must check. + /// Instead, it sets `failed` to `true` which we must check. pub fn is_success( &self, address: Address, @@ -594,9 +472,9 @@ where should_fail: bool, ) -> bool { // Construct a new VM with the state changeset - let mut db = CacheDB::new(EmptyDB()); - db.insert_cache(address, self.db.basic(address)); - db.commit(state_changeset); + let mut backend = self.backend.clone_empty(); + backend.insert_cache(address, self.backend.basic(address)); + backend.commit(state_changeset); let executor = Executor::new(db, self.env.clone(), self.inspector_config.clone(), self.gas_limit); @@ -641,6 +519,121 @@ where } } +#[derive(thiserror::Error, Debug)] +pub enum EvmError { + /// Error which occurred during execution of a transaction + #[error("Execution reverted: {reason} (gas: {gas})")] + Execution { + reverted: bool, + reason: String, + gas: u64, + stipend: u64, + logs: Vec, + traces: Option, + debug: Option, + labels: BTreeMap, + transactions: Option>, + state_changeset: Option, + }, + /// Error which occurred during ABI encoding/decoding + #[error(transparent)] + AbiError(#[from] ethers::contract::AbiError), + /// Any other error. + #[error(transparent)] + Eyre(#[from] eyre::Error), +} + +/// The result of a deployment. +#[derive(Debug)] +pub struct DeployResult { + /// The address of the deployed contract + pub address: Address, + /// The gas cost of the deployment + pub gas: u64, + /// The logs emitted during the deployment + pub logs: Vec, + /// The traces of the deployment + pub traces: Option, + /// The debug nodes of the call + pub debug: Option, +} + +/// The result of a call. +#[derive(Debug)] +pub struct CallResult { + /// Whether the call reverted or not + pub reverted: bool, + /// The decoded result of the call + pub result: D, + /// The gas used for the call + pub gas: u64, + /// The initial gas stipend for the transaction + pub stipend: u64, + /// The logs emitted during the call + pub logs: Vec, + /// The labels assigned to addresses during the call + pub labels: BTreeMap, + /// The traces of the call + pub traces: Option, + /// The debug nodes of the call + pub debug: Option, + /// Scripted transactions generated from this call + pub transactions: Option>, + /// The changeset of the state. + /// + /// This is only present if the changed state was not committed to the database (i.e. if you + /// used `call` and `call_raw` not `call_committing` or `call_raw_committing`). + pub state_changeset: Option, +} + +/// The result of a raw call. +#[derive(Debug)] +pub struct RawCallResult { + /// The status of the call + status: Return, + /// Whether the call reverted or not + pub reverted: bool, + /// The raw result of the call + pub result: Bytes, + /// The gas used for the call + pub gas: u64, + /// The initial gas stipend for the transaction + pub stipend: u64, + /// The logs emitted during the call + pub logs: Vec, + /// The labels assigned to addresses during the call + pub labels: BTreeMap, + /// The traces of the call + pub traces: Option, + /// The debug nodes of the call + pub debug: Option, + /// Scripted transactions generated from this call + pub transactions: Option>, + /// The changeset of the state. + /// + /// This is only present if the changed state was not committed to the database (i.e. if you + /// used `call` and `call_raw` not `call_committing` or `call_raw_committing`). + pub state_changeset: Option, +} + +impl Default for RawCallResult { + fn default() -> Self { + Self { + status: Return::Continue, + reverted: false, + result: Bytes::new(), + gas: 0, + stipend: 0, + logs: Vec::new(), + labels: BTreeMap::new(), + traces: None, + debug: None, + transactions: None, + state_changeset: None, + } + } +} + /// Calculates the initial gas stipend for a transaction fn stipend(calldata: &[u8], spec: SpecId) -> u64 { let non_zero_data_cost = if SpecId::enabled(spec, SpecId::ISTANBUL) { 16 } else { 68 }; diff --git a/evm/src/fuzz/mod.rs b/evm/src/fuzz/mod.rs index 15db8ef98469..fc3f4b8e4b7c 100644 --- a/evm/src/fuzz/mod.rs +++ b/evm/src/fuzz/mod.rs @@ -1,7 +1,3 @@ -mod strategies; - -pub use proptest::test_runner::{Config as FuzzConfig, Reason}; - use crate::{ executor::{Executor, RawCallResult}, trace::CallTraceArena, @@ -10,6 +6,7 @@ use ethers::{ abi::{Abi, Function, Token}, types::{Address, Bytes, Log}, }; +pub use proptest::test_runner::{Config as FuzzConfig, Reason}; use proptest::test_runner::{TestCaseError, TestError, TestRunner}; use revm::db::DatabaseRef; use serde::{Deserialize, Serialize}; @@ -19,6 +16,8 @@ use strategies::{ EvmFuzzState, }; +mod strategies; + /// Magic return code for the `assume` cheatcode pub const ASSUME_MAGIC_RETURN_CODE: &[u8] = b"FOUNDRY::ASSUME"; @@ -27,21 +26,18 @@ pub const ASSUME_MAGIC_RETURN_CODE: &[u8] = b"FOUNDRY::ASSUME"; /// After instantiation, calling `fuzz` will proceed to hammer the deployed smart contract with /// inputs, until it finds a counterexample. The provided [`TestRunner`] contains all the /// configuration which can be overridden via [environment variables](https://docs.rs/proptest/1.0.0/proptest/test_runner/struct.Config.html) -pub struct FuzzedExecutor<'a, DB: DatabaseRef> { +pub struct FuzzedExecutor<'a> { /// The VM - executor: &'a Executor, + executor: &'a Executor, /// The fuzzer runner: TestRunner, /// The account that calls tests sender: Address, } -impl<'a, DB> FuzzedExecutor<'a, DB> -where - DB: DatabaseRef, -{ +impl<'a> FuzzedExecutor<'a> { /// Instantiates a fuzzed executor given a testrunner - pub fn new(executor: &'a Executor, runner: TestRunner, sender: Address) -> Self { + pub fn new(executor: &'a Executor, runner: TestRunner, sender: Address) -> Self { Self { executor, runner, sender } } @@ -64,7 +60,7 @@ where let counterexample: RefCell<(Bytes, RawCallResult)> = RefCell::new(Default::default()); // Stores fuzz state for use with [fuzz_calldata_from_state] - let state: EvmFuzzState = build_initial_state(&self.executor.db); + let state: EvmFuzzState = build_initial_state(&self.executor.backend); // TODO: We should have a `FuzzerOpts` struct where we can configure the fuzzer. When we // have that, we should add a way to configure strategy weights diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index e302aec32d54..7c9a2d415024 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -7,7 +7,10 @@ use ethers::{ }; use eyre::Result; use foundry_evm::executor::{ - opts::EvmOpts, Backend, DatabaseRef, Executor, ExecutorBuilder, Fork, SpecId, + backend::Backend2, + fork::{CreateFork, MultiFork}, + opts::EvmOpts, + Backend, DatabaseRef, Executor, ExecutorBuilder, Fork, SpecId, }; use foundry_utils::PostLinkInput; use proptest::test_runner::TestRunner; @@ -38,6 +41,8 @@ pub struct MultiContractRunner { pub source_paths: BTreeMap, /// The fork config pub fork: Option, + /// The fork to use at launch + pub fork2: Option, } impl MultiContractRunner { @@ -96,66 +101,131 @@ impl MultiContractRunner { }) } - /// Executes all tests that match the given `filter` + /// Executes _all_ tests that match the given `filter` /// /// This will create the runtime based on the configured `evm` ops and create the `Backend` /// before executing all contracts and their tests in _parallel_. /// /// Each Executor gets its own instance of the `Backend`. - pub fn test( + pub fn test2( &mut self, filter: &impl TestFilter, stream_result: Option>, include_fuzz_tests: bool, ) -> Result> { + // TODO move to builder let runtime = RuntimeOrHandle::new(); let env = runtime.block_on(self.evm_opts.evm_env()); - // the db backend that serves all the data, each contract gets its own clone - let db = runtime.block_on(Backend::new(self.fork.take(), &env)); + let (forks, fork_handler) = MultiFork::spawn(); - let results = self - .contracts - .par_iter() - .filter(|(id, _)| { - filter.matches_path(id.source.to_string_lossy()) && - filter.matches_contract(&id.name) - }) - .filter(|(_, (abi, _, _))| abi.functions().any(|func| filter.matches_test(&func.name))) - .map(|(id, (abi, deploy_code, libs))| { - let mut builder = ExecutorBuilder::new() - .with_cheatcodes(self.evm_opts.ffi) - .with_config(env.clone()) - .with_spec(self.evm_spec) - .with_gas_limit(self.evm_opts.gas_limit()); + { + // the db backend that serves all the data, each contract gets its own instance + let db = Backend2::new(forks, self.fork2.take()); - if self.evm_opts.verbosity >= 3 { - builder = builder.with_tracing(); - } + let results = self + .contracts + .par_iter() + .filter(|(id, _)| { + filter.matches_path(id.source.to_string_lossy()) && + filter.matches_contract(&id.name) + }) + .filter(|(_, (abi, _, _))| { + abi.functions().any(|func| filter.matches_test(&func.name)) + }) + .map(|(id, (abi, deploy_code, libs))| { + let mut executor = ExecutorBuilder::new() + .with_cheatcodes(self.evm_opts.ffi) + .with_config(env.clone()) + .with_spec(self.evm_spec) + .with_gas_limit(self.evm_opts.gas_limit()) + .set_tracing(self.evm_opts.verbosity >= 3) + .build(db.clone()); + + let result = self.run_tests( + &id.identifier(), + abi, + executor, + deploy_code.clone(), + libs, + (filter, include_fuzz_tests), + )?; + Ok((id.identifier(), result)) + }) + .filter_map(Result::<_>::ok) + .filter(|(_, results)| !results.is_empty()) + .map_with(stream_result, |stream_result, (name, result)| { + if let Some(stream_result) = stream_result.as_ref() { + stream_result.send((name.clone(), result.clone())).unwrap(); + } + (name, result) + }) + .collect::>(); + } + + // the spawned handler contains some resources, rpc caches, that will get flushed on drop, + // in order to ensure everything is flushed properly we wait for the thread to finish which + // will happen when all the channels (MultiFork) are dropped + fork_handler.join()?; - let executor = builder.build(db.clone()); - let result = self.run_tests( - &id.identifier(), - abi, - executor, - deploy_code.clone(), - libs, - (filter, include_fuzz_tests), - )?; - Ok((id.identifier(), result)) - }) - .filter_map(Result::<_>::ok) - .filter(|(_, results)| !results.is_empty()) - .map_with(stream_result, |stream_result, (name, result)| { - if let Some(stream_result) = stream_result.as_ref() { - stream_result.send((name.clone(), result.clone())).unwrap(); - } - (name, result) - }) - .collect::>(); Ok(results) } + pub fn test( + &mut self, + filter: &impl TestFilter, + stream_result: Option>, + include_fuzz_tests: bool, + ) -> Result> { + // let runtime = RuntimeOrHandle::new(); + // let env = runtime.block_on(self.evm_opts.evm_env()); + // + // // the db backend that serves all the data, each contract gets its own clone + // let db = runtime.block_on(Backend::new(self.fork.take(), &env)); + // + // let results = self + // .contracts + // .par_iter() + // .filter(|(id, _)| { + // filter.matches_path(id.source.to_string_lossy()) && + // filter.matches_contract(&id.name) + // }) + // .filter(|(_, (abi, _, _))| abi.functions().any(|func| + // filter.matches_test(&func.name))) .map(|(id, (abi, deploy_code, libs))| { + // let mut builder = ExecutorBuilder::new() + // .with_cheatcodes(self.evm_opts.ffi) + // .with_config(env.clone()) + // .with_spec(self.evm_spec) + // .with_gas_limit(self.evm_opts.gas_limit()); + // + // if self.evm_opts.verbosity >= 3 { + // builder = builder.with_tracing(); + // } + // + // let executor = builder.build(db.clone()); + // let result = self.run_tests( + // &id.identifier(), + // abi, + // executor, + // deploy_code.clone(), + // libs, + // (filter, include_fuzz_tests), + // )?; + // Ok((id.identifier(), result)) + // }) + // .filter_map(Result::<_>::ok) + // .filter(|(_, results)| !results.is_empty()) + // .map_with(stream_result, |stream_result, (name, result)| { + // if let Some(stream_result) = stream_result.as_ref() { + // stream_result.send((name.clone(), result.clone())).unwrap(); + // } + // (name, result) + // }) + // .collect::>(); + // Ok(results) + todo!() + } + // The _name field is unused because we only want it for tracing #[tracing::instrument( name = "contract", @@ -293,6 +363,7 @@ impl MultiContractRunnerBuilder { errors: Some(execution_info.2), source_paths, fork: self.fork, + fork2: None, }) } diff --git a/forge/src/types.rs b/forge/src/types.rs index 595ca98c66f1..9f1ce767ddc7 100644 --- a/forge/src/types.rs +++ b/forge/src/types.rs @@ -1,8 +1,12 @@ -use std::collections::{BTreeMap, HashMap}; -use std::path::PathBuf; -use ethers::abi::{Event, Function, Abi, AbiError}; -use ethers::solc::artifacts::CompactContractBytecode; -use ethers::types::H256; +use ethers::{ + abi::{Abi, AbiError, Event, Function}, + solc::artifacts::CompactContractBytecode, + types::H256, +}; +use std::{ + collections::{BTreeMap, HashMap}, + path::PathBuf, +}; /// Represents a solidity Contract that's a test target #[derive(Debug, Clone)] @@ -33,6 +37,4 @@ pub struct TestFunction { // === impl TestFunction === -impl TestFunction { - -} \ No newline at end of file +impl TestFunction {} From b1a9833adf74f14f7eb163de9101f1c70dca7fda Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 Jun 2022 16:58:23 +0200 Subject: [PATCH 030/102] clean up types --- evm/src/executor/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 1db6610d5484..babcdb762ae0 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -12,7 +12,6 @@ use ethers::{ prelude::{decode_function_data, encode_function_data, Address, U256}, types::{transaction::eip2718::TypedTransaction, Log}, }; -use eyre::Result; use foundry_utils::IntoFunction; use hashbrown::HashMap; /// Reexport commonly used revm types @@ -217,7 +216,7 @@ impl Executor { to: Address, calldata: Bytes, value: U256, - ) -> Result { + ) -> eyre::Result { let stipend = stipend(&calldata, self.env.cfg.spec_id); // Build VM @@ -344,7 +343,7 @@ impl Executor { to: Address, calldata: Bytes, value: U256, - ) -> Result { + ) -> eyre::Result { let stipend = stipend(&calldata, self.env.cfg.spec_id); // Build VM From a63c141d026d672eed8e8383dc72976762437a04 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 Jun 2022 16:59:42 +0200 Subject: [PATCH 031/102] refactor: remove generic Inspector impl --- evm/src/executor/inspector/cheatcodes/mod.rs | 143 +++++++++---------- 1 file changed, 71 insertions(+), 72 deletions(-) diff --git a/evm/src/executor/inspector/cheatcodes/mod.rs b/evm/src/executor/inspector/cheatcodes/mod.rs index df4730ed8baf..9d8d8239b8fa 100644 --- a/evm/src/executor/inspector/cheatcodes/mod.rs +++ b/evm/src/executor/inspector/cheatcodes/mod.rs @@ -34,6 +34,7 @@ use revm::{ Return, }; use std::collections::{BTreeMap, VecDeque}; +use crate::executor::backend::Backend2; /// An inspector that handles calls to various cheatcodes, each with their own behavior. /// @@ -98,9 +99,9 @@ impl Cheatcodes { } } - fn apply_cheatcode( + fn apply_cheatcode( &mut self, - data: &mut EVMData<'_, DB>, + data: &mut EVMData<'_, Backend2>, caller: Address, call: &CallInputs, ) -> Result { @@ -117,13 +118,74 @@ impl Cheatcodes { } } -impl Inspector for Cheatcodes -where - DB: Database, +impl Inspector for Cheatcodes { + fn initialize_interp( + &mut self, + _: &mut Interpreter, + data: &mut EVMData<'_, Backend2>, + _: bool, + ) -> Return { + // When the first interpreter is initialized we've circumvented the balance and gas checks, + // so we apply our actual block data with the correct fees and all. + if let Some(block) = self.block.take() { + data.env.block = block; + } + if let Some(gas_price) = self.gas_price.take() { + data.env.tx.gas_price = gas_price; + } + + Return::Continue + } + + fn step(&mut self, interpreter: &mut Interpreter, _: &mut EVMData<'_, Backend2>, _: bool) -> Return { + // Record writes and reads if `record` has been called + if let Some(storage_accesses) = &mut self.accesses { + match interpreter.contract.code[interpreter.program_counter()] { + opcode::SLOAD => { + let key = try_or_continue!(interpreter.stack().peek(0)); + storage_accesses + .reads + .entry(interpreter.contract().address) + .or_insert_with(Vec::new) + .push(key); + } + opcode::SSTORE => { + let key = try_or_continue!(interpreter.stack().peek(0)); + + // An SSTORE does an SLOAD internally + storage_accesses + .reads + .entry(interpreter.contract().address) + .or_insert_with(Vec::new) + .push(key); + storage_accesses + .writes + .entry(interpreter.contract().address) + .or_insert_with(Vec::new) + .push(key); + } + _ => (), + } + } + + Return::Continue + } + + fn log(&mut self, _: &mut EVMData<'_, Backend2>, address: &Address, topics: &[H256], data: &Bytes) { + // Match logs if `expectEmit` has been called + if !self.expected_emits.is_empty() { + handle_expect_emit( + self, + RawLog { topics: topics.to_vec(), data: data.to_vec() }, + address, + ); + } + } + fn call( &mut self, - data: &mut EVMData<'_, DB>, + data: &mut EVMData<'_, Backend2>, call: &mut CallInputs, is_static: bool, ) -> (Return, Gas, Bytes) { @@ -233,72 +295,9 @@ where } } - fn initialize_interp( - &mut self, - _: &mut Interpreter, - data: &mut EVMData<'_, DB>, - _: bool, - ) -> Return { - // When the first interpreter is initialized we've circumvented the balance and gas checks, - // so we apply our actual block data with the correct fees and all. - if let Some(block) = self.block.take() { - data.env.block = block; - } - if let Some(gas_price) = self.gas_price.take() { - data.env.tx.gas_price = gas_price; - } - - Return::Continue - } - - fn step(&mut self, interpreter: &mut Interpreter, _: &mut EVMData<'_, DB>, _: bool) -> Return { - // Record writes and reads if `record` has been called - if let Some(storage_accesses) = &mut self.accesses { - match interpreter.contract.code[interpreter.program_counter()] { - opcode::SLOAD => { - let key = try_or_continue!(interpreter.stack().peek(0)); - storage_accesses - .reads - .entry(interpreter.contract().address) - .or_insert_with(Vec::new) - .push(key); - } - opcode::SSTORE => { - let key = try_or_continue!(interpreter.stack().peek(0)); - - // An SSTORE does an SLOAD internally - storage_accesses - .reads - .entry(interpreter.contract().address) - .or_insert_with(Vec::new) - .push(key); - storage_accesses - .writes - .entry(interpreter.contract().address) - .or_insert_with(Vec::new) - .push(key); - } - _ => (), - } - } - - Return::Continue - } - - fn log(&mut self, _: &mut EVMData<'_, DB>, address: &Address, topics: &[H256], data: &Bytes) { - // Match logs if `expectEmit` has been called - if !self.expected_emits.is_empty() { - handle_expect_emit( - self, - RawLog { topics: topics.to_vec(), data: data.to_vec() }, - address, - ); - } - } - fn call_end( &mut self, - data: &mut EVMData<'_, DB>, + data: &mut EVMData<'_, Backend2>, call: &CallInputs, remaining_gas: Gas, status: Return, @@ -392,7 +391,7 @@ where fn create( &mut self, - data: &mut EVMData<'_, DB>, + data: &mut EVMData<'_, Backend2>, call: &mut CreateInputs, ) -> (Return, Option
, Gas, Bytes) { // Apply our prank @@ -438,7 +437,7 @@ where fn create_end( &mut self, - data: &mut EVMData<'_, DB>, + data: &mut EVMData<'_, Backend2>, _: &CreateInputs, status: Return, address: Option
, From 968c71a70c36521cfab350b13a3f0cb98dcd463f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 Jun 2022 17:55:00 +0200 Subject: [PATCH 032/102] feat: introduce Backendtrait --- Cargo.lock | 1 + evm/Cargo.toml | 3 + evm/src/executor/backend/mod.rs | 23 ++- evm/src/executor/builder.rs | 4 +- evm/src/executor/inspector/cheatcodes/mod.rs | 143 ++++++++++--------- evm/src/executor/inspector/stack.rs | 5 +- evm/src/executor/mod.rs | 4 +- evm/src/fuzz/mod.rs | 4 +- 8 files changed, 105 insertions(+), 82 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ebb4e9d2c8e1..43fa38b5580d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1996,6 +1996,7 @@ dependencies = [ name = "foundry-evm" version = "0.2.0" dependencies = [ + "auto_impl 1.0.1", "bytes", "ethers", "eyre", diff --git a/evm/Cargo.toml b/evm/Cargo.toml index c83e8a055f6d..1e81bbcc9e6f 100644 --- a/evm/Cargo.toml +++ b/evm/Cargo.toml @@ -43,7 +43,10 @@ proptest = "1.0.0" # Display yansi = "0.5.1" + +# misc url = "2.2.2" +auto_impl = "1.0" [dev-dependencies] tempfile = "3.3.0" diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 9b7365decb6c..e188cf1af654 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -7,6 +7,7 @@ use revm::{ Account, AccountInfo, Database, DatabaseCommit, Env, }; use std::collections::HashMap; +use revm::db::RefDBWrapper; use tracing::{trace, warn}; mod in_memory_db; @@ -16,6 +17,17 @@ use crate::executor::{ }; pub use in_memory_db::MemDb; +#[auto_impl::auto_impl(&mut, Box)] +pub trait BackendTrait : Database { + +} + +impl BackendTrait for Backend2 {} + +impl<'a> BackendTrait for RefDBWrapper<'a> { + +} + /// Provides the underlying `revm::Database` implementation. /// /// A `Backend` can be initialised in two forms: @@ -151,8 +163,13 @@ impl Backend2 { *self.db.db_mut() = BackendDatabase::Forked(fork, id); Ok(()) } + + pub fn insert_cache(&mut self, address: H160, account: AccountInfo) { + self.db.insert_cache(address, account) + } } + // a bunch of delegate revm trait implementations impl DatabaseRef for Backend2 { @@ -160,8 +177,8 @@ impl DatabaseRef for Backend2 { self.db.basic(address) } - fn code_by_hash(&self, address: H256) -> bytes::Bytes { - self.db.code_by_hash(address) + fn code_by_hash(&self, code_hash: H256) -> bytes::Bytes { + self.db.code_by_hash(code_hash) } fn storage(&self, address: H160, index: U256) -> U256 { @@ -185,7 +202,7 @@ impl Database for Backend2 { } fn code_by_hash(&mut self, code_hash: H256) -> Bytes { - self.db.code_by_hash(address) + self.db.code_by_hash(code_hash) } fn storage(&mut self, address: H160, index: U256) -> U256 { diff --git a/evm/src/executor/builder.rs b/evm/src/executor/builder.rs index df703b86bda4..6ad36ea4b6e5 100644 --- a/evm/src/executor/builder.rs +++ b/evm/src/executor/builder.rs @@ -4,7 +4,7 @@ use super::{ Executor, }; use crate::executor::{ - backend::{Backend, Backend2}, + backend::{Backend2}, fork::{BlockchainDb, BlockchainDbMeta}, }; use ethers::{ @@ -41,7 +41,7 @@ impl ExecutorBuilder { /// Enables tracing #[must_use] - pub fn with_tracing(mut self) -> Self { + pub fn with_tracing(self) -> Self { self.set_tracing(true) } diff --git a/evm/src/executor/inspector/cheatcodes/mod.rs b/evm/src/executor/inspector/cheatcodes/mod.rs index 9d8d8239b8fa..df4730ed8baf 100644 --- a/evm/src/executor/inspector/cheatcodes/mod.rs +++ b/evm/src/executor/inspector/cheatcodes/mod.rs @@ -34,7 +34,6 @@ use revm::{ Return, }; use std::collections::{BTreeMap, VecDeque}; -use crate::executor::backend::Backend2; /// An inspector that handles calls to various cheatcodes, each with their own behavior. /// @@ -99,9 +98,9 @@ impl Cheatcodes { } } - fn apply_cheatcode( + fn apply_cheatcode( &mut self, - data: &mut EVMData<'_, Backend2>, + data: &mut EVMData<'_, DB>, caller: Address, call: &CallInputs, ) -> Result { @@ -118,74 +117,13 @@ impl Cheatcodes { } } -impl Inspector for Cheatcodes +impl Inspector for Cheatcodes +where + DB: Database, { - fn initialize_interp( - &mut self, - _: &mut Interpreter, - data: &mut EVMData<'_, Backend2>, - _: bool, - ) -> Return { - // When the first interpreter is initialized we've circumvented the balance and gas checks, - // so we apply our actual block data with the correct fees and all. - if let Some(block) = self.block.take() { - data.env.block = block; - } - if let Some(gas_price) = self.gas_price.take() { - data.env.tx.gas_price = gas_price; - } - - Return::Continue - } - - fn step(&mut self, interpreter: &mut Interpreter, _: &mut EVMData<'_, Backend2>, _: bool) -> Return { - // Record writes and reads if `record` has been called - if let Some(storage_accesses) = &mut self.accesses { - match interpreter.contract.code[interpreter.program_counter()] { - opcode::SLOAD => { - let key = try_or_continue!(interpreter.stack().peek(0)); - storage_accesses - .reads - .entry(interpreter.contract().address) - .or_insert_with(Vec::new) - .push(key); - } - opcode::SSTORE => { - let key = try_or_continue!(interpreter.stack().peek(0)); - - // An SSTORE does an SLOAD internally - storage_accesses - .reads - .entry(interpreter.contract().address) - .or_insert_with(Vec::new) - .push(key); - storage_accesses - .writes - .entry(interpreter.contract().address) - .or_insert_with(Vec::new) - .push(key); - } - _ => (), - } - } - - Return::Continue - } - - fn log(&mut self, _: &mut EVMData<'_, Backend2>, address: &Address, topics: &[H256], data: &Bytes) { - // Match logs if `expectEmit` has been called - if !self.expected_emits.is_empty() { - handle_expect_emit( - self, - RawLog { topics: topics.to_vec(), data: data.to_vec() }, - address, - ); - } - } - fn call( &mut self, - data: &mut EVMData<'_, Backend2>, + data: &mut EVMData<'_, DB>, call: &mut CallInputs, is_static: bool, ) -> (Return, Gas, Bytes) { @@ -295,9 +233,72 @@ impl Inspector for Cheatcodes } } + fn initialize_interp( + &mut self, + _: &mut Interpreter, + data: &mut EVMData<'_, DB>, + _: bool, + ) -> Return { + // When the first interpreter is initialized we've circumvented the balance and gas checks, + // so we apply our actual block data with the correct fees and all. + if let Some(block) = self.block.take() { + data.env.block = block; + } + if let Some(gas_price) = self.gas_price.take() { + data.env.tx.gas_price = gas_price; + } + + Return::Continue + } + + fn step(&mut self, interpreter: &mut Interpreter, _: &mut EVMData<'_, DB>, _: bool) -> Return { + // Record writes and reads if `record` has been called + if let Some(storage_accesses) = &mut self.accesses { + match interpreter.contract.code[interpreter.program_counter()] { + opcode::SLOAD => { + let key = try_or_continue!(interpreter.stack().peek(0)); + storage_accesses + .reads + .entry(interpreter.contract().address) + .or_insert_with(Vec::new) + .push(key); + } + opcode::SSTORE => { + let key = try_or_continue!(interpreter.stack().peek(0)); + + // An SSTORE does an SLOAD internally + storage_accesses + .reads + .entry(interpreter.contract().address) + .or_insert_with(Vec::new) + .push(key); + storage_accesses + .writes + .entry(interpreter.contract().address) + .or_insert_with(Vec::new) + .push(key); + } + _ => (), + } + } + + Return::Continue + } + + fn log(&mut self, _: &mut EVMData<'_, DB>, address: &Address, topics: &[H256], data: &Bytes) { + // Match logs if `expectEmit` has been called + if !self.expected_emits.is_empty() { + handle_expect_emit( + self, + RawLog { topics: topics.to_vec(), data: data.to_vec() }, + address, + ); + } + } + fn call_end( &mut self, - data: &mut EVMData<'_, Backend2>, + data: &mut EVMData<'_, DB>, call: &CallInputs, remaining_gas: Gas, status: Return, @@ -391,7 +392,7 @@ impl Inspector for Cheatcodes fn create( &mut self, - data: &mut EVMData<'_, Backend2>, + data: &mut EVMData<'_, DB>, call: &mut CreateInputs, ) -> (Return, Option
, Gas, Bytes) { // Apply our prank @@ -437,7 +438,7 @@ impl Inspector for Cheatcodes fn create_end( &mut self, - data: &mut EVMData<'_, Backend2>, + data: &mut EVMData<'_, DB>, _: &CreateInputs, status: Return, address: Option
, diff --git a/evm/src/executor/inspector/stack.rs b/evm/src/executor/inspector/stack.rs index 2ee44a0f579c..03fda4277d09 100644 --- a/evm/src/executor/inspector/stack.rs +++ b/evm/src/executor/inspector/stack.rs @@ -2,8 +2,9 @@ use super::{Cheatcodes, Debugger, LogCollector, Tracer}; use crate::{debug::DebugArena, trace::CallTraceArena}; use bytes::Bytes; use ethers::types::{Address, Log, H256}; -use revm::{db::Database, CallInputs, CreateInputs, EVMData, Gas, Inspector, Interpreter, Return}; +use revm::{CallInputs, CreateInputs, EVMData, Gas, Inspector, Interpreter, Return}; use std::collections::BTreeMap; +use crate::executor::backend::BackendTrait; /// Helper macro to call the same method on multiple inspectors without resorting to dynamic /// dispatch @@ -55,7 +56,7 @@ impl InspectorStack { impl Inspector for InspectorStack where - DB: Database, + DB: BackendTrait, { fn initialize_interp( &mut self, diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index babcdb762ae0..f1171c52bb4b 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -17,7 +17,7 @@ use hashbrown::HashMap; /// Reexport commonly used revm types pub use revm::{db::DatabaseRef, Env, SpecId}; use revm::{ - db::{CacheDB, DatabaseCommit, EmptyDB}, + db::{DatabaseCommit}, return_ok, Account, BlockEnv, CreateScheme, Return, TransactOut, TransactTo, TxEnv, EVM, }; use std::collections::{BTreeMap, VecDeque}; @@ -475,7 +475,7 @@ impl Executor { backend.insert_cache(address, self.backend.basic(address)); backend.commit(state_changeset); let executor = - Executor::new(db, self.env.clone(), self.inspector_config.clone(), self.gas_limit); + Executor::new(backend, self.env.clone(), self.inspector_config.clone(), self.gas_limit); let mut success = !reverted; if success { diff --git a/evm/src/fuzz/mod.rs b/evm/src/fuzz/mod.rs index fc3f4b8e4b7c..076752f2209a 100644 --- a/evm/src/fuzz/mod.rs +++ b/evm/src/fuzz/mod.rs @@ -8,7 +8,7 @@ use ethers::{ }; pub use proptest::test_runner::{Config as FuzzConfig, Reason}; use proptest::test_runner::{TestCaseError, TestError, TestRunner}; -use revm::db::DatabaseRef; + use serde::{Deserialize, Serialize}; use std::{cell::RefCell, collections::BTreeMap, fmt}; use strategies::{ @@ -60,7 +60,7 @@ impl<'a> FuzzedExecutor<'a> { let counterexample: RefCell<(Bytes, RawCallResult)> = RefCell::new(Default::default()); // Stores fuzz state for use with [fuzz_calldata_from_state] - let state: EvmFuzzState = build_initial_state(&self.executor.backend); + let state: EvmFuzzState = build_initial_state(&self.executor.backend.db); // TODO: We should have a `FuzzerOpts` struct where we can configure the fuzzer. When we // have that, we should add a way to configure strategy weights From 09e9b533a41e1673a04702ab7133c442fa255378 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 Jun 2022 17:57:20 +0200 Subject: [PATCH 033/102] chore: remove old types --- evm/src/executor/backend/mod.rs | 69 ++--------------------------- evm/src/executor/builder.rs | 2 +- evm/src/executor/inspector/stack.rs | 3 +- evm/src/executor/mod.rs | 8 ++-- 4 files changed, 10 insertions(+), 72 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index e188cf1af654..e9078c8329a2 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -3,11 +3,10 @@ use bytes::Bytes; use ethers::prelude::{H160, H256, U256}; use hashbrown::HashMap as Map; use revm::{ - db::{CacheDB, DatabaseRef, EmptyDB}, + db::{CacheDB, DatabaseRef, EmptyDB, RefDBWrapper}, Account, AccountInfo, Database, DatabaseCommit, Env, }; use std::collections::HashMap; -use revm::db::RefDBWrapper; use tracing::{trace, warn}; mod in_memory_db; @@ -17,16 +16,13 @@ use crate::executor::{ }; pub use in_memory_db::MemDb; +/// An extension trait that allows us to easily extend the `revm::Inspector` capabilities #[auto_impl::auto_impl(&mut, Box)] -pub trait BackendTrait : Database { - -} +pub trait BackendTrait: Database {} impl BackendTrait for Backend2 {} -impl<'a> BackendTrait for RefDBWrapper<'a> { - -} +impl<'a> BackendTrait for RefDBWrapper<'a> {} /// Provides the underlying `revm::Database` implementation. /// @@ -169,7 +165,6 @@ impl Backend2 { } } - // a bunch of delegate revm trait implementations impl DatabaseRef for Backend2 { @@ -253,59 +248,3 @@ impl DatabaseRef for BackendDatabase { } } } - -/// Variants of a [revm::Database] -#[derive(Debug, Clone)] -pub enum Backend { - /// Simple in memory [revm::Database] - Simple(EmptyDB), - /// A [revm::Database] that forks of a remote location and can have multiple consumers of the - /// same data - Forked(SharedBackend), -} - -impl Backend { - /// Instantiates a new backend union based on whether there was or not a fork url specified - pub async fn new(fork: Option, env: &Env) -> Self { - if let Some(fork) = fork { - Backend::Forked(fork.spawn_backend(env).await) - } else { - Self::simple() - } - } - - /// Creates an empty in memory database - pub fn simple() -> Self { - Backend::Simple(EmptyDB::default()) - } -} - -impl DatabaseRef for Backend { - fn basic(&self, address: H160) -> AccountInfo { - match self { - Backend::Simple(inner) => inner.basic(address), - Backend::Forked(inner) => inner.basic(address), - } - } - - fn code_by_hash(&self, address: H256) -> bytes::Bytes { - match self { - Backend::Simple(inner) => inner.code_by_hash(address), - Backend::Forked(inner) => inner.code_by_hash(address), - } - } - - fn storage(&self, address: H160, index: U256) -> U256 { - match self { - Backend::Simple(inner) => inner.storage(address, index), - Backend::Forked(inner) => inner.storage(address, index), - } - } - - fn block_hash(&self, number: U256) -> H256 { - match self { - Backend::Simple(inner) => inner.block_hash(number), - Backend::Forked(inner) => inner.block_hash(number), - } - } -} diff --git a/evm/src/executor/builder.rs b/evm/src/executor/builder.rs index 6ad36ea4b6e5..9253fefb6b5e 100644 --- a/evm/src/executor/builder.rs +++ b/evm/src/executor/builder.rs @@ -4,7 +4,7 @@ use super::{ Executor, }; use crate::executor::{ - backend::{Backend2}, + backend::Backend2, fork::{BlockchainDb, BlockchainDbMeta}, }; use ethers::{ diff --git a/evm/src/executor/inspector/stack.rs b/evm/src/executor/inspector/stack.rs index 03fda4277d09..d365da2d8c43 100644 --- a/evm/src/executor/inspector/stack.rs +++ b/evm/src/executor/inspector/stack.rs @@ -1,10 +1,9 @@ use super::{Cheatcodes, Debugger, LogCollector, Tracer}; -use crate::{debug::DebugArena, trace::CallTraceArena}; +use crate::{debug::DebugArena, executor::backend::BackendTrait, trace::CallTraceArena}; use bytes::Bytes; use ethers::types::{Address, Log, H256}; use revm::{CallInputs, CreateInputs, EVMData, Gas, Inspector, Interpreter, Return}; use std::collections::BTreeMap; -use crate::executor::backend::BackendTrait; /// Helper macro to call the same method on multiple inspectors without resorting to dynamic /// dispatch diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index f1171c52bb4b..2e2adfd0a536 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -14,12 +14,12 @@ use ethers::{ }; use foundry_utils::IntoFunction; use hashbrown::HashMap; -/// Reexport commonly used revm types -pub use revm::{db::DatabaseRef, Env, SpecId}; use revm::{ - db::{DatabaseCommit}, - return_ok, Account, BlockEnv, CreateScheme, Return, TransactOut, TransactTo, TxEnv, EVM, + db::DatabaseCommit, return_ok, Account, BlockEnv, CreateScheme, Return, TransactOut, + TransactTo, TxEnv, EVM, }; +/// Reexport commonly used revm types +pub use revm::{db::DatabaseRef, Env, SpecId}; use std::collections::{BTreeMap, VecDeque}; /// custom revm database implementations From 13fadcf5e69b12d1b4ca9d1fac95891ae0ac126c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 Jun 2022 17:57:53 +0200 Subject: [PATCH 034/102] refactor: rename Backend type --- evm/src/executor/backend/mod.rs | 12 ++++++------ evm/src/executor/builder.rs | 4 ++-- evm/src/executor/mod.rs | 6 +++--- forge/src/multi_runner.rs | 4 ++-- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index e9078c8329a2..80ef6a1ad8a7 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -20,7 +20,7 @@ pub use in_memory_db::MemDb; #[auto_impl::auto_impl(&mut, Box)] pub trait BackendTrait: Database {} -impl BackendTrait for Backend2 {} +impl BackendTrait for Backend {} impl<'a> BackendTrait for RefDBWrapper<'a> {} @@ -71,7 +71,7 @@ impl<'a> BackendTrait for RefDBWrapper<'a> {} /// snapshot is created before fork `B` is selected, then fork `A` will be the active fork again /// after reverting the snapshot. #[derive(Debug, Clone)] -pub struct Backend2 { +pub struct Backend { /// The access point for managing forks forks: MultiFork, /// tracks all created forks @@ -85,7 +85,7 @@ pub struct Backend2 { // === impl Backend === -impl Backend2 { +impl Backend { /// Creates a new instance of `Backend` /// /// if `fork` is `Some` this will launch with a `fork` database, otherwise with an in-memory @@ -167,7 +167,7 @@ impl Backend2 { // a bunch of delegate revm trait implementations -impl DatabaseRef for Backend2 { +impl DatabaseRef for Backend { fn basic(&self, address: H160) -> AccountInfo { self.db.basic(address) } @@ -185,13 +185,13 @@ impl DatabaseRef for Backend2 { } } -impl DatabaseCommit for Backend2 { +impl DatabaseCommit for Backend { fn commit(&mut self, changes: Map) { self.db.commit(changes) } } -impl Database for Backend2 { +impl Database for Backend { fn basic(&mut self, address: H160) -> AccountInfo { self.db.basic(address) } diff --git a/evm/src/executor/builder.rs b/evm/src/executor/builder.rs index 9253fefb6b5e..cd5d0a996d58 100644 --- a/evm/src/executor/builder.rs +++ b/evm/src/executor/builder.rs @@ -4,7 +4,7 @@ use super::{ Executor, }; use crate::executor::{ - backend::Backend2, + backend::Backend, fork::{BlockchainDb, BlockchainDbMeta}, }; use ethers::{ @@ -85,7 +85,7 @@ impl ExecutorBuilder { } /// Builds the executor as configured. - pub fn build(self, db: Backend2) -> Executor { + pub fn build(self, db: Backend) -> Executor { let gas_limit = self.gas_limit.unwrap_or(self.env.block.gas_limit); Executor::new(db, self.env, self.inspector_config, gas_limit) } diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 2e2adfd0a536..28e6db4aa409 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -36,7 +36,7 @@ pub use backend::Backend; pub mod snapshot; -use crate::executor::{backend::Backend2, inspector::DEFAULT_CREATE2_DEPLOYER}; +use crate::executor::{backend::Backend, inspector::DEFAULT_CREATE2_DEPLOYER}; pub use builder::{ExecutorBuilder, Fork}; /// A mapping of addresses to their changed state. @@ -50,7 +50,7 @@ pub struct Executor { // only interested in the database. REVM's `EVM` is a thin // wrapper around spawning a new EVM on every call anyway, // so the performance difference should be negligible. - pub backend: Backend2, + pub backend: Backend, env: Env, inspector_config: InspectorStackConfig, /// The gas limit for calls and deployments. This is different from the gas limit imposed by @@ -63,7 +63,7 @@ pub struct Executor { impl Executor { pub fn new( - mut backend: Backend2, + mut backend: Backend, env: Env, inspector_config: InspectorStackConfig, gas_limit: U256, diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 7c9a2d415024..ab2223c3c003 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -7,7 +7,7 @@ use ethers::{ }; use eyre::Result; use foundry_evm::executor::{ - backend::Backend2, + backend::Backend, fork::{CreateFork, MultiFork}, opts::EvmOpts, Backend, DatabaseRef, Executor, ExecutorBuilder, Fork, SpecId, @@ -121,7 +121,7 @@ impl MultiContractRunner { { // the db backend that serves all the data, each contract gets its own instance - let db = Backend2::new(forks, self.fork2.take()); + let db = Backend::new(forks, self.fork2.take()); let results = self .contracts From 6f1ebcc17cb234e9ba078c5dde9da9e4f0b8371f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 Jun 2022 17:59:00 +0200 Subject: [PATCH 035/102] refactor: rename DatbaseExt trait --- evm/src/executor/backend/mod.rs | 6 +- evm/src/executor/inspector/cheatcodes/mod.rs | 130 +++++++++---------- evm/src/executor/inspector/stack.rs | 4 +- 3 files changed, 70 insertions(+), 70 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 80ef6a1ad8a7..b2382b0e3ddc 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -18,11 +18,11 @@ pub use in_memory_db::MemDb; /// An extension trait that allows us to easily extend the `revm::Inspector` capabilities #[auto_impl::auto_impl(&mut, Box)] -pub trait BackendTrait: Database {} +pub trait DatabaseExt: Database {} -impl BackendTrait for Backend {} +impl DatabaseExt for Backend {} -impl<'a> BackendTrait for RefDBWrapper<'a> {} +impl<'a> DatabaseExt for RefDBWrapper<'a> {} /// Provides the underlying `revm::Database` implementation. /// diff --git a/evm/src/executor/inspector/cheatcodes/mod.rs b/evm/src/executor/inspector/cheatcodes/mod.rs index df4730ed8baf..a4dac468adab 100644 --- a/evm/src/executor/inspector/cheatcodes/mod.rs +++ b/evm/src/executor/inspector/cheatcodes/mod.rs @@ -19,7 +19,7 @@ use self::{ }; use crate::{ abi::HEVMCalls, - executor::{CHEATCODE_ADDRESS, HARDHAT_CONSOLE_ADDRESS}, + executor::{backend::DatabaseExt, CHEATCODE_ADDRESS, HARDHAT_CONSOLE_ADDRESS}, }; use bytes::Bytes; use ethers::{ @@ -119,8 +119,71 @@ impl Cheatcodes { impl Inspector for Cheatcodes where - DB: Database, + DB: DatabaseExt, { + fn initialize_interp( + &mut self, + _: &mut Interpreter, + data: &mut EVMData<'_, DB>, + _: bool, + ) -> Return { + // When the first interpreter is initialized we've circumvented the balance and gas checks, + // so we apply our actual block data with the correct fees and all. + if let Some(block) = self.block.take() { + data.env.block = block; + } + if let Some(gas_price) = self.gas_price.take() { + data.env.tx.gas_price = gas_price; + } + + Return::Continue + } + + fn step(&mut self, interpreter: &mut Interpreter, _: &mut EVMData<'_, DB>, _: bool) -> Return { + // Record writes and reads if `record` has been called + if let Some(storage_accesses) = &mut self.accesses { + match interpreter.contract.code[interpreter.program_counter()] { + opcode::SLOAD => { + let key = try_or_continue!(interpreter.stack().peek(0)); + storage_accesses + .reads + .entry(interpreter.contract().address) + .or_insert_with(Vec::new) + .push(key); + } + opcode::SSTORE => { + let key = try_or_continue!(interpreter.stack().peek(0)); + + // An SSTORE does an SLOAD internally + storage_accesses + .reads + .entry(interpreter.contract().address) + .or_insert_with(Vec::new) + .push(key); + storage_accesses + .writes + .entry(interpreter.contract().address) + .or_insert_with(Vec::new) + .push(key); + } + _ => (), + } + } + + Return::Continue + } + + fn log(&mut self, _: &mut EVMData<'_, DB>, address: &Address, topics: &[H256], data: &Bytes) { + // Match logs if `expectEmit` has been called + if !self.expected_emits.is_empty() { + handle_expect_emit( + self, + RawLog { topics: topics.to_vec(), data: data.to_vec() }, + address, + ); + } + } + fn call( &mut self, data: &mut EVMData<'_, DB>, @@ -233,69 +296,6 @@ where } } - fn initialize_interp( - &mut self, - _: &mut Interpreter, - data: &mut EVMData<'_, DB>, - _: bool, - ) -> Return { - // When the first interpreter is initialized we've circumvented the balance and gas checks, - // so we apply our actual block data with the correct fees and all. - if let Some(block) = self.block.take() { - data.env.block = block; - } - if let Some(gas_price) = self.gas_price.take() { - data.env.tx.gas_price = gas_price; - } - - Return::Continue - } - - fn step(&mut self, interpreter: &mut Interpreter, _: &mut EVMData<'_, DB>, _: bool) -> Return { - // Record writes and reads if `record` has been called - if let Some(storage_accesses) = &mut self.accesses { - match interpreter.contract.code[interpreter.program_counter()] { - opcode::SLOAD => { - let key = try_or_continue!(interpreter.stack().peek(0)); - storage_accesses - .reads - .entry(interpreter.contract().address) - .or_insert_with(Vec::new) - .push(key); - } - opcode::SSTORE => { - let key = try_or_continue!(interpreter.stack().peek(0)); - - // An SSTORE does an SLOAD internally - storage_accesses - .reads - .entry(interpreter.contract().address) - .or_insert_with(Vec::new) - .push(key); - storage_accesses - .writes - .entry(interpreter.contract().address) - .or_insert_with(Vec::new) - .push(key); - } - _ => (), - } - } - - Return::Continue - } - - fn log(&mut self, _: &mut EVMData<'_, DB>, address: &Address, topics: &[H256], data: &Bytes) { - // Match logs if `expectEmit` has been called - if !self.expected_emits.is_empty() { - handle_expect_emit( - self, - RawLog { topics: topics.to_vec(), data: data.to_vec() }, - address, - ); - } - } - fn call_end( &mut self, data: &mut EVMData<'_, DB>, diff --git a/evm/src/executor/inspector/stack.rs b/evm/src/executor/inspector/stack.rs index d365da2d8c43..a370278ed4a6 100644 --- a/evm/src/executor/inspector/stack.rs +++ b/evm/src/executor/inspector/stack.rs @@ -1,5 +1,5 @@ use super::{Cheatcodes, Debugger, LogCollector, Tracer}; -use crate::{debug::DebugArena, executor::backend::BackendTrait, trace::CallTraceArena}; +use crate::{debug::DebugArena, executor::backend::DatabaseExt, trace::CallTraceArena}; use bytes::Bytes; use ethers::types::{Address, Log, H256}; use revm::{CallInputs, CreateInputs, EVMData, Gas, Inspector, Interpreter, Return}; @@ -55,7 +55,7 @@ impl InspectorStack { impl Inspector for InspectorStack where - DB: BackendTrait, + DB: DatabaseExt, { fn initialize_interp( &mut self, From 30de43315e36de7593c676bf7f9e57cfafec97cf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 Jun 2022 20:10:03 +0200 Subject: [PATCH 036/102] feat: integrate new Backend type --- Cargo.lock | 6 +-- evm/Cargo.toml | 2 +- evm/src/executor/backend/mod.rs | 69 ++++++++++++++++++++++++++------ evm/src/executor/mod.rs | 70 ++++++++++++++++++++++----------- evm/src/fuzz/mod.rs | 2 +- 5 files changed, 111 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 43fa38b5580d..c00bdaadefc3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4020,12 +4020,12 @@ dependencies = [ [[package]] name = "revm" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c4fff7e8fb0b20699c4ff4a132c342763883004abca680a25037a07cb8a828" +checksum = "c7b8f9b89afbb38c33ebe39c3c68faab1b8328b3de404c1a1531dda646897aec" dependencies = [ "arrayref", - "auto_impl 0.5.0", + "auto_impl 1.0.1", "bytes", "hashbrown 0.12.0", "hex", diff --git a/evm/Cargo.toml b/evm/Cargo.toml index 1e81bbcc9e6f..e6257de65ef3 100644 --- a/evm/Cargo.toml +++ b/evm/Cargo.toml @@ -36,7 +36,7 @@ once_cell = "1.9.0" # EVM bytes = "1.1.0" hashbrown = "0.12" -revm = { version="1.4", default-features = false, features = ["std", "k256", "with-serde", "memory_limit"] } +revm = { version = "1.4", default-features = false, features = ["std", "k256", "with-serde", "memory_limit"] } # Fuzzer proptest = "1.0.0" diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index b2382b0e3ddc..5952e0f3ceee 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -1,19 +1,20 @@ -use crate::executor::{fork::SharedBackend, Fork}; +use crate::executor::{ + fork::{CreateFork, ForkId, MultiFork, SharedBackend}, + snapshot::Snapshots, +}; use bytes::Bytes; -use ethers::prelude::{H160, H256, U256}; +use ethers::{ + prelude::{H160, H256, U256}, + types::Address, +}; use hashbrown::HashMap as Map; use revm::{ - db::{CacheDB, DatabaseRef, EmptyDB, RefDBWrapper}, - Account, AccountInfo, Database, DatabaseCommit, Env, + db::{CacheDB, DatabaseRef, EmptyDB}, + Account, AccountInfo, Database, DatabaseCommit, Env, Inspector, Log, Return, TransactOut, }; use std::collections::HashMap; use tracing::{trace, warn}; - mod in_memory_db; -use crate::executor::{ - fork::{CreateFork, ForkId, MultiFork}, - snapshot::Snapshots, -}; pub use in_memory_db::MemDb; /// An extension trait that allows us to easily extend the `revm::Inspector` capabilities @@ -22,8 +23,6 @@ pub trait DatabaseExt: Database {} impl DatabaseExt for Backend {} -impl<'a> DatabaseExt for RefDBWrapper<'a> {} - /// Provides the underlying `revm::Database` implementation. /// /// A `Backend` can be initialised in two forms: @@ -248,3 +247,51 @@ impl DatabaseRef for BackendDatabase { } } } + +/// A wrapper around `Backend` that ensures only `revm::DatabaseRef` functions are called +pub(crate) struct RefBackendWrapper<'a> { + pub inner: &'a mut Backend, +} + +// === impl RefBackendWrapper === + +impl<'a> RefBackendWrapper<'a> { + pub fn new(inner: &'a mut Backend) -> Self { + Self { inner } + } + + pub fn inspect_ref( + &mut self, + mut env: Env, + mut inspector: INSP, + ) -> (Return, TransactOut, u64, Map, Vec) + where + INSP: Inspector, + { + revm::evm_inner::(&mut env, self, &mut inspector).transact() + } +} + +impl<'a> DatabaseExt for RefBackendWrapper<'a> {} + +impl<'a> Drop for RefBackendWrapper<'a> { + fn drop(&mut self) { + // TODO revert all things snapshots etc created + } +} + +impl<'a> Database for RefBackendWrapper<'a> { + fn basic(&mut self, address: H160) -> AccountInfo { + DatabaseRef::basic(self.inner, address) + } + fn code_by_hash(&mut self, code_hash: H256) -> Bytes { + DatabaseRef::code_by_hash(self.inner, code_hash) + } + fn storage(&mut self, address: H160, index: U256) -> U256 { + DatabaseRef::storage(self.inner, address, index) + } + + fn block_hash(&mut self, number: U256) -> H256 { + DatabaseRef::block_hash(self.inner, number) + } +} diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 28e6db4aa409..7e8bea721dc2 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -1,5 +1,6 @@ /// ABIs used internally in the executor pub mod abi; + use self::inspector::{InspectorData, InspectorStackConfig}; use crate::{debug::DebugArena, trace::CallTraceArena, CALLER}; pub use abi::{ @@ -20,7 +21,10 @@ use revm::{ }; /// Reexport commonly used revm types pub use revm::{db::DatabaseRef, Env, SpecId}; -use std::collections::{BTreeMap, VecDeque}; +use std::{ + cell::{Ref, RefCell}, + collections::{BTreeMap, VecDeque}, +}; /// custom revm database implementations pub mod backend; @@ -32,25 +36,37 @@ pub mod fork; pub mod inspector; /// Executor configuration pub mod opts; -pub use backend::Backend; - pub mod snapshot; -use crate::executor::{backend::Backend, inspector::DEFAULT_CREATE2_DEPLOYER}; +use crate::executor::{ + backend::{Backend, RefBackendWrapper}, + inspector::DEFAULT_CREATE2_DEPLOYER, +}; pub use builder::{ExecutorBuilder, Fork}; /// A mapping of addresses to their changed state. pub type StateChangeset = HashMap; +/// A type that can execute calls /// -#[derive(Debug)] +/// The executor can be configured with various `revm::Inspector`s, like `Cheatcodes`. +/// +/// There are two ways of executing calls: +/// - `committing`: any state changes made during the call are recorded and are persisting +/// - `raw`: state changes only exist for the duration of the call and are discarded afterwards, in +/// other words: the state of the underlying database remains unchanged. +/// +/// Some more advanced cheatcodes (`fork`, `snapshot`) always require mutable access in the +/// `Backend`. However, the constraint `committing`/`raw` still applies. +/// Therefore, this type is not thread-safe. +#[derive(Debug, Clone)] pub struct Executor { /// The underlying `revm::Database` that contains the EVM storage // Note: We do not store an EVM here, since we are really // only interested in the database. REVM's `EVM` is a thin // wrapper around spawning a new EVM on every call anyway, // so the performance difference should be negligible. - pub backend: Backend, + backend: RefCell, env: Env, inspector_config: InspectorStackConfig, /// The gas limit for calls and deployments. This is different from the gas limit imposed by @@ -75,12 +91,21 @@ impl Executor { revm::AccountInfo { code: Some(Bytes::from_static(&[1])), ..Default::default() }, ); - Executor { backend, env, inspector_config, gas_limit } + Executor { backend: RefCell::new(backend), env, inspector_config, gas_limit } + } + + /// Returns a mutable reference to the Backend + pub fn backend_mut(&mut self) -> &mut Backend { + self.backend.get_mut() + } + + pub fn backend(&self) -> Ref<'_, Backend> { + self.backend.borrow() } /// Creates the default CREATE2 Contract Deployer for local tests and scripts. pub fn deploy_create2_deployer(&mut self) -> eyre::Result<()> { - let create2_deployer_account = self.backend.basic(DEFAULT_CREATE2_DEPLOYER); + let create2_deployer_account = self.backend_mut().basic(DEFAULT_CREATE2_DEPLOYER); if create2_deployer_account.code.is_none() || create2_deployer_account.code.as_ref().unwrap().is_empty() @@ -99,24 +124,24 @@ impl Executor { /// Set the balance of an account. pub fn set_balance(&mut self, address: Address, amount: U256) -> &mut Self { - let mut account = self.backend.basic(address); + let mut account = self.backend_mut().basic(address); account.balance = amount; - self.backend.insert_cache(address, account); + self.backend_mut().insert_cache(address, account); self } /// Gets the balance of an account pub fn get_balance(&self, address: Address) -> U256 { - self.backend.basic(address).balance + self.backend().basic(address).balance } /// Set the nonce of an account. pub fn set_nonce(&mut self, address: Address, nonce: u64) -> &mut Self { - let mut account = self.backend.basic(address); + let mut account = self.backend_mut().basic(address); account.nonce = nonce; - self.backend.insert_cache(address, account); + self.backend_mut().insert_cache(address, account); self } @@ -222,10 +247,10 @@ impl Executor { // Build VM let mut evm = EVM::new(); evm.env = self.build_env(from, TransactTo::Call(to), calldata, value); - evm.database(&mut self.backend); + let mut inspector = self.inspector_config.stack(); + evm.database(self.backend_mut()); // Run the call - let mut inspector = self.inspector_config.stack(); let (status, out, gas, _) = evm.inspect_commit(&mut inspector); let result = match out { TransactOut::Call(data) => data, @@ -347,13 +372,13 @@ impl Executor { let stipend = stipend(&calldata, self.env.cfg.spec_id); // Build VM - let mut evm = EVM::new(); - evm.env = self.build_env(from, TransactTo::Call(to), calldata, value); - evm.database(&self.backend); + let env = self.build_env(from, TransactTo::Call(to), calldata, value); + let mut db = self.backend.borrow_mut(); + let mut db = RefBackendWrapper::new(&mut *db); // Run the call let mut inspector = self.inspector_config.stack(); - let (status, out, gas, state_changeset, _) = evm.inspect_ref(&mut inspector); + let (status, out, gas, state_changeset, _) = db.inspect_ref(env, &mut inspector); let result = match out { TransactOut::Call(data) => data, _ => Bytes::default(), @@ -397,9 +422,10 @@ impl Executor { ) -> std::result::Result { let mut evm = EVM::new(); evm.env = self.build_env(from, TransactTo::Create(CreateScheme::Create), code, value); - evm.database(&mut self.backend); let mut inspector = self.inspector_config.stack(); + evm.database(self.backend_mut()); + let (status, out, gas, _) = evm.inspect_commit(&mut inspector); let InspectorData { logs, labels, traces, debug, cheatcodes, .. } = inspector.collect_inspector_states(); @@ -471,8 +497,8 @@ impl Executor { should_fail: bool, ) -> bool { // Construct a new VM with the state changeset - let mut backend = self.backend.clone_empty(); - backend.insert_cache(address, self.backend.basic(address)); + let mut backend = self.backend().clone_empty(); + backend.insert_cache(address, self.backend().basic(address)); backend.commit(state_changeset); let executor = Executor::new(backend, self.env.clone(), self.inspector_config.clone(), self.gas_limit); diff --git a/evm/src/fuzz/mod.rs b/evm/src/fuzz/mod.rs index 076752f2209a..0f39565a59b5 100644 --- a/evm/src/fuzz/mod.rs +++ b/evm/src/fuzz/mod.rs @@ -60,7 +60,7 @@ impl<'a> FuzzedExecutor<'a> { let counterexample: RefCell<(Bytes, RawCallResult)> = RefCell::new(Default::default()); // Stores fuzz state for use with [fuzz_calldata_from_state] - let state: EvmFuzzState = build_initial_state(&self.executor.backend.db); + let state: EvmFuzzState = build_initial_state(&self.executor.backend().db); // TODO: We should have a `FuzzerOpts` struct where we can configure the fuzzer. When we // have that, we should add a way to configure strategy weights From fa4e6940d84ca0092582708c5d97ec34f96caa24 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 6 Jun 2022 20:43:07 +0200 Subject: [PATCH 037/102] revertuse refcell again --- evm/src/executor/backend/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 5952e0f3ceee..526f8a3b3356 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -19,7 +19,11 @@ pub use in_memory_db::MemDb; /// An extension trait that allows us to easily extend the `revm::Inspector` capabilities #[auto_impl::auto_impl(&mut, Box)] -pub trait DatabaseExt: Database {} +pub trait DatabaseExt: Database { + fn snapshot(&mut self) -> U256 { + todo!() + } +} impl DatabaseExt for Backend {} From 752312b51e1a75c73e21c90b8858e5fb932a2b6b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 Jun 2022 18:40:20 +0200 Subject: [PATCH 038/102] refactor: change to dedicated fuzz backend --- evm/src/executor/backend/mod.rs | 221 ++++++++++++++++++++++---------- evm/src/executor/mod.rs | 160 +++++++++++++++++------ evm/src/executor/snapshot.rs | 5 + 3 files changed, 285 insertions(+), 101 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 526f8a3b3356..3ae93b7856c8 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -20,12 +20,62 @@ pub use in_memory_db::MemDb; /// An extension trait that allows us to easily extend the `revm::Inspector` capabilities #[auto_impl::auto_impl(&mut, Box)] pub trait DatabaseExt: Database { + /// Creates a new snapshot + fn snapshot(&mut self) -> U256; + /// Reverts the snapshot if it exists + /// + /// Returns `true` if the snapshot was successfully reverted, `false` if no snapshot for that id + /// exists. + fn revert(&mut self, id: U256) -> bool; + + /// Creates a new fork but does _not_ select it + fn create_fork(&mut self, fork: CreateFork) -> eyre::Result; + + /// Selects the fork's state + /// + /// **Note**: this does not change the local state, but swaps the remote state + /// + /// # Errors + /// + /// Returns an error if no fork with the given `id` exists + fn select_fork(&mut self, id: impl Into) -> eyre::Result<()>; +} + +impl DatabaseExt for Backend { fn snapshot(&mut self) -> U256 { - todo!() + let id = self.snapshots.insert(self.db.clone()); + trace!(target: "backend", "Created new snapshot {}", id); + id } -} -impl DatabaseExt for Backend {} + fn revert(&mut self, id: U256) -> bool { + if let Some(snapshot) = self.snapshots.remove(id) { + self.db = snapshot; + trace!(target: "backend", "Reverted snapshot {}", id); + true + } else { + warn!(target: "backend", "No snapshot to revert for {}", id); + false + } + } + + fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { + let (id, fork) = self.forks.create_fork(fork)?; + self.created_forks.insert(id.clone(), fork); + Ok(id) + } + + fn select_fork(&mut self, id: impl Into) -> eyre::Result<()> { + let id = id.into(); + let fork = self + .created_forks + .get(&id) + .cloned() + .ok_or_else(|| eyre::eyre!("Fork Id {} does not exist", id))?; + *self.db.db_mut() = BackendDatabase::Forked(fork, id); + Ok(()) + } +} /// Provides the underlying `revm::Database` implementation. /// @@ -116,53 +166,6 @@ impl Backend { } } - /// Creates a new snapshot - pub fn snapshot(&mut self) -> U256 { - let id = self.snapshots.insert(self.db.clone()); - trace!(target: "backend", "Created new snapshot {}", id); - id - } - - /// Reverts the snapshot if it exists - /// - /// Returns `true` if the snapshot was successfully reverted, `false` if no snapshot for that id - /// exists. - pub fn revert(&mut self, id: U256) -> bool { - if let Some(snapshot) = self.snapshots.remove(id) { - self.db = snapshot; - trace!(target: "backend", "Reverted snapshot {}", id); - true - } else { - warn!(target: "backend", "No snapshot to revert for {}", id); - false - } - } - - /// Creates a new fork but does _not_ select it - pub fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { - let (id, fork) = self.forks.create_fork(fork)?; - self.created_forks.insert(id.clone(), fork); - Ok(id) - } - - /// Selects the fork's state - /// - /// **Note**: this does not change the local state, but swaps the remote state - /// - /// # Errors - /// - /// Returns an error if no fork with the given `id` exists - pub fn select_fork(&mut self, id: impl Into) -> eyre::Result<()> { - let id = id.into(); - let fork = self - .created_forks - .get(&id) - .cloned() - .ok_or_else(|| eyre::eyre!("Fork Id {} does not exist", id))?; - *self.db.db_mut() = BackendDatabase::Forked(fork, id); - Ok(()) - } - pub fn insert_cache(&mut self, address: H160, account: AccountInfo) { self.db.insert_cache(address, account) } @@ -252,16 +255,41 @@ impl DatabaseRef for BackendDatabase { } } -/// A wrapper around `Backend` that ensures only `revm::DatabaseRef` functions are called -pub(crate) struct RefBackendWrapper<'a> { - pub inner: &'a mut Backend, +/// A wrapper around `Backend` that ensures only `revm::DatabaseRef` functions are called. +/// +/// Any changes made during its existence that affect the caching layer of the underlying Database +/// will result in a clone of the initial Database. +/// +/// Main purpose for this type is for fuzzing. A test function fuzzer will repeatedly call the +/// function via immutable raw (no state changes). +/// +/// **N.B.**: we're assuming cheatcodes that alter the state (like multi fork swapping) are niche. +/// If they executed during fuzzing, it will require a clone of the initial input database. This way +/// we can support these cheatcodes in fuzzing cheaply without adding overhead for fuzz tests that +/// don't make use of them. Alternatively each test case would require its own `Backend` clone, +/// which would add significant overhead for large fuzz sets even if the Database is not big after +/// setup. +pub(crate) struct FuzzBackendWrapper<'a> { + pub inner: &'a Backend, + /// active database clone that holds the currently active db, like reverted snapshots, selected + /// fork, etc. + db_override: Option>, + /// tracks all created forks + created_forks: HashMap, + /// Contains snapshots made at a certain point + snapshots: Snapshots>, } // === impl RefBackendWrapper === -impl<'a> RefBackendWrapper<'a> { - pub fn new(inner: &'a mut Backend) -> Self { - Self { inner } +impl<'a> FuzzBackendWrapper<'a> { + pub fn new(inner: &'a Backend) -> Self { + Self { + inner, + db_override: None, + created_forks: Default::default(), + snapshots: Default::default(), + } } pub fn inspect_ref( @@ -274,28 +302,91 @@ impl<'a> RefBackendWrapper<'a> { { revm::evm_inner::(&mut env, self, &mut inspector).transact() } + + /// Returns the currently active database + fn active_db(&self) -> &CacheDB { + self.db_override.as_ref().unwrap_or(&self.inner.db) + } + + /// Sets the database override + fn set_active(&mut self, db: CacheDB) { + self.db_override = Some(db) + } } -impl<'a> DatabaseExt for RefBackendWrapper<'a> {} +impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { + fn snapshot(&mut self) -> U256 { + let id = self.snapshots.insert(self.active_db().clone()); + trace!(target: "backend", "Created new snapshot {}", id); + id + } + + fn revert(&mut self, id: U256) -> bool { + if let Some(snapshot) = + self.snapshots.remove(id).or_else(|| self.inner.snapshots.get(id).cloned()) + { + self.set_active(snapshot); + trace!(target: "backend", "Reverted snapshot {}", id); + true + } else { + warn!(target: "backend", "No snapshot to revert for {}", id); + false + } + } + + fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { + let (id, fork) = self.inner.forks.create_fork(fork)?; + self.created_forks.insert(id.clone(), fork); + Ok(id) + } -impl<'a> Drop for RefBackendWrapper<'a> { - fn drop(&mut self) { - // TODO revert all things snapshots etc created + fn select_fork(&mut self, id: impl Into) -> eyre::Result<()> { + let id = id.into(); + let fork = self + .created_forks + .get(&id) + .or_else(|| self.inner.created_forks.get(&id)) + .cloned() + .ok_or_else(|| eyre::eyre!("Fork Id {} does not exist", id))?; + if let Some(ref mut db) = self.db_override { + *db.db_mut() = BackendDatabase::Forked(fork, id); + } else { + let mut db = self.inner.db.clone(); + *db.db_mut() = BackendDatabase::Forked(fork, id); + self.set_active(db); + } + Ok(()) } } -impl<'a> Database for RefBackendWrapper<'a> { +impl<'a> Database for FuzzBackendWrapper<'a> { fn basic(&mut self, address: H160) -> AccountInfo { - DatabaseRef::basic(self.inner, address) + if let Some(ref db) = self.db_override { + DatabaseRef::basic(db, address) + } else { + DatabaseRef::basic(self.inner, address) + } } fn code_by_hash(&mut self, code_hash: H256) -> Bytes { - DatabaseRef::code_by_hash(self.inner, code_hash) + if let Some(ref db) = self.db_override { + DatabaseRef::code_by_hash(db, code_hash) + } else { + DatabaseRef::code_by_hash(self.inner, code_hash) + } } fn storage(&mut self, address: H160, index: U256) -> U256 { - DatabaseRef::storage(self.inner, address, index) + if let Some(ref db) = self.db_override { + DatabaseRef::storage(db, address, index) + } else { + DatabaseRef::storage(self.inner, address, index) + } } fn block_hash(&mut self, number: U256) -> H256 { - DatabaseRef::block_hash(self.inner, number) + if let Some(ref db) = self.db_override { + DatabaseRef::block_hash(db, number) + } else { + DatabaseRef::block_hash(self.inner, number) + } } } diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 7e8bea721dc2..1743e5dbdbce 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -9,7 +9,7 @@ pub use abi::{ }; use bytes::Bytes; use ethers::{ - abi::{Abi, Detokenize, Tokenize}, + abi::{Abi, Contract, Detokenize, Function, Tokenize}, prelude::{decode_function_data, encode_function_data, Address, U256}, types::{transaction::eip2718::TypedTransaction, Log}, }; @@ -21,10 +21,7 @@ use revm::{ }; /// Reexport commonly used revm types pub use revm::{db::DatabaseRef, Env, SpecId}; -use std::{ - cell::{Ref, RefCell}, - collections::{BTreeMap, VecDeque}, -}; +use std::collections::{BTreeMap, VecDeque}; /// custom revm database implementations pub mod backend; @@ -39,8 +36,8 @@ pub mod opts; pub mod snapshot; use crate::executor::{ - backend::{Backend, RefBackendWrapper}, - inspector::DEFAULT_CREATE2_DEPLOYER, + backend::{Backend, FuzzBackendWrapper}, + inspector::{InspectorStack, DEFAULT_CREATE2_DEPLOYER}, }; pub use builder::{ExecutorBuilder, Fork}; @@ -55,10 +52,6 @@ pub type StateChangeset = HashMap; /// - `committing`: any state changes made during the call are recorded and are persisting /// - `raw`: state changes only exist for the duration of the call and are discarded afterwards, in /// other words: the state of the underlying database remains unchanged. -/// -/// Some more advanced cheatcodes (`fork`, `snapshot`) always require mutable access in the -/// `Backend`. However, the constraint `committing`/`raw` still applies. -/// Therefore, this type is not thread-safe. #[derive(Debug, Clone)] pub struct Executor { /// The underlying `revm::Database` that contains the EVM storage @@ -66,7 +59,7 @@ pub struct Executor { // only interested in the database. REVM's `EVM` is a thin // wrapper around spawning a new EVM on every call anyway, // so the performance difference should be negligible. - backend: RefCell, + backend: Backend, env: Env, inspector_config: InspectorStackConfig, /// The gas limit for calls and deployments. This is different from the gas limit imposed by @@ -91,16 +84,16 @@ impl Executor { revm::AccountInfo { code: Some(Bytes::from_static(&[1])), ..Default::default() }, ); - Executor { backend: RefCell::new(backend), env, inspector_config, gas_limit } + Executor { backend, env, inspector_config, gas_limit } } /// Returns a mutable reference to the Backend pub fn backend_mut(&mut self) -> &mut Backend { - self.backend.get_mut() + &mut self.backend } - pub fn backend(&self) -> Ref<'_, Backend> { - self.backend.borrow() + pub fn backend(&self) -> &Backend { + &self.backend } /// Creates the default CREATE2 Contract Deployer for local tests and scripts. @@ -242,7 +235,7 @@ impl Executor { calldata: Bytes, value: U256, ) -> eyre::Result { - let stipend = stipend(&calldata, self.env.cfg.spec_id); + let stipend = calc_stipend(&calldata, self.env.cfg.spec_id); // Build VM let mut evm = EVM::new(); @@ -300,8 +293,8 @@ impl Executor { /// Performs a call to an account on the current state of the VM. /// /// The state after the call is not persisted. - pub fn call( - &self, + pub fn execute( + mut self, from: Address, to: Address, func: F, @@ -362,23 +355,12 @@ impl Executor { /// Performs a raw call to an account on the current state of the VM. /// /// The state after the call is not persisted. - pub fn call_raw( - &self, - from: Address, - to: Address, - calldata: Bytes, - value: U256, - ) -> eyre::Result { - let stipend = stipend(&calldata, self.env.cfg.spec_id); + fn execute_with(&self, mut inspector: InspectorStack, f: F) -> eyre::Result + where + F: FnOnce(&mut InspectorStack) -> ExecutedCall, + { + let ExecutedCall { status, out, gas, state_changeset, stipend, .. } = f(&mut inspector); - // Build VM - let env = self.build_env(from, TransactTo::Call(to), calldata, value); - let mut db = self.backend.borrow_mut(); - let mut db = RefBackendWrapper::new(&mut *db); - - // Run the call - let mut inspector = self.inspector_config.stack(); - let (status, out, gas, state_changeset, _) = db.inspect_ref(env, &mut inspector); let result = match out { TransactOut::Call(data) => data, _ => Bytes::default(), @@ -412,6 +394,47 @@ impl Executor { }) } + /// Performs a call to an account on the current state of the VM. + /// + /// The state after the call is not persisted. + pub fn call( + &self, + from: Address, + to: Address, + func: F, + args: T, + value: U256, + abi: Option<&Abi>, + ) -> std::result::Result, EvmError> { + let func = func.into(); + let calldata = Bytes::from(encode_function_data(&func, args)?.to_vec()); + let call_result = self.call_raw(from, to, calldata, value)?; + + convert_call_result(abi, &func, call_result) + } + + /// Performs a raw call to an account on the current state of the VM. + /// + /// The state after the call is not persisted. + pub fn call_raw( + &self, + from: Address, + to: Address, + calldata: Bytes, + value: U256, + ) -> eyre::Result { + // execute the call + let inspector = self.inspector_config.stack(); + self.execute_with(inspector, |inspector| { + let stipend = calc_stipend(&calldata, self.env.cfg.spec_id); + // Build VM + let env = self.build_env(from, TransactTo::Call(to), calldata, value); + let mut db = FuzzBackendWrapper::new(self.backend()); + let (status, out, gas, state_changeset, logs) = db.inspect_ref(env, inspector); + ExecutedCall { status, out, gas, state_changeset, logs, stipend } + }) + } + /// Deploys a contract and commits the new state to the underlying database. pub fn deploy( &mut self, @@ -659,8 +682,73 @@ impl Default for RawCallResult { } } +/// Helper type to bundle all call related items +struct ExecutedCall { + status: Return, + out: TransactOut, + gas: u64, + state_changeset: HashMap, + #[allow(unused)] + logs: Vec, + stipend: u64, +} + /// Calculates the initial gas stipend for a transaction -fn stipend(calldata: &[u8], spec: SpecId) -> u64 { +fn calc_stipend(calldata: &[u8], spec: SpecId) -> u64 { let non_zero_data_cost = if SpecId::enabled(spec, SpecId::ISTANBUL) { 16 } else { 68 }; calldata.iter().fold(21000, |sum, byte| sum + if *byte == 0 { 4 } else { non_zero_data_cost }) } + +fn convert_call_result( + abi: Option<&Contract>, + func: &Function, + call_result: RawCallResult, +) -> Result, EvmError> { + let RawCallResult { + result, + status, + reverted, + gas, + stipend, + logs, + labels, + traces, + debug, + transactions, + state_changeset, + } = call_result; + + match status { + return_ok!() => { + let result = decode_function_data(func, result, false)?; + Ok(CallResult { + reverted, + result, + gas, + stipend, + logs, + labels, + traces, + debug, + transactions, + state_changeset, + }) + } + _ => { + let reason = foundry_utils::decode_revert(result.as_ref(), abi) + .unwrap_or_else(|_| format!("{:?}", status)); + Err(EvmError::Execution { + reverted, + reason, + gas, + stipend, + logs, + traces, + debug, + labels, + transactions, + state_changeset, + }) + } + } +} diff --git a/evm/src/executor/snapshot.rs b/evm/src/executor/snapshot.rs index 8d30d8ce6eee..7e00e88d5c69 100644 --- a/evm/src/executor/snapshot.rs +++ b/evm/src/executor/snapshot.rs @@ -19,6 +19,11 @@ impl Snapshots { id } + /// Returns the snapshot with the given id `id` + pub fn get(&self, id: U256) -> Option<&T> { + self.snapshots.get(&id) + } + /// Removes the snapshot with the given `id` pub fn remove(&mut self, id: U256) -> Option { self.snapshots.remove(&id) From b3f955bf156d318e05a1a86b71038370548a7c32 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 Jun 2022 18:59:09 +0200 Subject: [PATCH 039/102] refactor: refactor conversion --- evm/src/executor/backend/mod.rs | 29 +++++++++++ evm/src/executor/mod.rs | 90 ++++++++++----------------------- 2 files changed, 56 insertions(+), 63 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 3ae93b7856c8..92d11b3b51a5 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -169,6 +169,17 @@ impl Backend { pub fn insert_cache(&mut self, address: H160, account: AccountInfo) { self.db.insert_cache(address, account) } + + pub fn inspect_ref( + &mut self, + mut env: Env, + mut inspector: INSP, + ) -> (Return, TransactOut, u64, Map, Vec) + where + INSP: Inspector, + { + revm::evm_inner::(&mut env, self, &mut inspector).transact() + } } // a bunch of delegate revm trait implementations @@ -191,6 +202,24 @@ impl DatabaseRef for Backend { } } +impl<'a> DatabaseRef for &'a mut Backend { + fn basic(&self, address: H160) -> AccountInfo { + DatabaseRef::basic(&self.db, address) + } + + fn code_by_hash(&self, code_hash: H256) -> bytes::Bytes { + DatabaseRef::code_by_hash(&self.db, code_hash) + } + + fn storage(&self, address: H160, index: U256) -> U256 { + DatabaseRef::storage(&self.db, address, index) + } + + fn block_hash(&self, number: U256) -> H256 { + DatabaseRef::block_hash(&self.db, number) + } +} + impl DatabaseCommit for Backend { fn commit(&mut self, changes: Map) { self.db.commit(changes) diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 1743e5dbdbce..80a68a00ea8e 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -290,9 +290,7 @@ impl Executor { }) } - /// Performs a call to an account on the current state of the VM. - /// - /// The state after the call is not persisted. + /// Executes the test function call pub fn execute( mut self, from: Address, @@ -304,62 +302,29 @@ impl Executor { ) -> std::result::Result, EvmError> { let func = func.into(); let calldata = Bytes::from(encode_function_data(&func, args)?.to_vec()); - let RawCallResult { - result, - status, - reverted, - gas, - stipend, - logs, - labels, - traces, - debug, - transactions, - state_changeset, - } = self.call_raw(from, to, calldata, value)?; - match status { - return_ok!() => { - let result = decode_function_data(&func, result, false)?; - Ok(CallResult { - reverted, - result, - gas, - stipend, - logs, - labels, - traces, - debug, - transactions, - state_changeset, - }) - } - _ => { - let reason = foundry_utils::decode_revert(result.as_ref(), abi) - .unwrap_or_else(|_| format!("{:?}", status)); - Err(EvmError::Execution { - reverted, - reason, - gas, - stipend, - logs, - traces, - debug, - labels, - transactions, - state_changeset, - }) - } - } + + // execute the call + let mut inspector = self.inspector_config.stack(); + let stipend = calc_stipend(&calldata, self.env.cfg.spec_id); + let env = self.build_env(from, TransactTo::Call(to), calldata, value); + let (status, out, gas, state_changeset, logs) = + self.backend_mut().inspect_ref(env, &mut inspector); + + let executed_call = ExecutedCall { status, out, gas, state_changeset, logs, stipend }; + let call_result = self.convert_executed_call(inspector, executed_call)?; + + convert_call_result(abi, &func, call_result) } /// Performs a raw call to an account on the current state of the VM. /// /// The state after the call is not persisted. - fn execute_with(&self, mut inspector: InspectorStack, f: F) -> eyre::Result - where - F: FnOnce(&mut InspectorStack) -> ExecutedCall, - { - let ExecutedCall { status, out, gas, state_changeset, stipend, .. } = f(&mut inspector); + fn convert_executed_call( + &self, + mut inspector: InspectorStack, + call: ExecutedCall, + ) -> eyre::Result { + let ExecutedCall { status, out, gas, state_changeset, stipend, .. } = call; let result = match out { TransactOut::Call(data) => data, @@ -424,15 +389,14 @@ impl Executor { value: U256, ) -> eyre::Result { // execute the call - let inspector = self.inspector_config.stack(); - self.execute_with(inspector, |inspector| { - let stipend = calc_stipend(&calldata, self.env.cfg.spec_id); - // Build VM - let env = self.build_env(from, TransactTo::Call(to), calldata, value); - let mut db = FuzzBackendWrapper::new(self.backend()); - let (status, out, gas, state_changeset, logs) = db.inspect_ref(env, inspector); - ExecutedCall { status, out, gas, state_changeset, logs, stipend } - }) + let mut inspector = self.inspector_config.stack(); + let stipend = calc_stipend(&calldata, self.env.cfg.spec_id); + // Build VM + let env = self.build_env(from, TransactTo::Call(to), calldata, value); + let mut db = FuzzBackendWrapper::new(self.backend()); + let (status, out, gas, state_changeset, logs) = db.inspect_ref(env, &mut inspector); + let executed_call = ExecutedCall { status, out, gas, state_changeset, logs, stipend }; + self.convert_executed_call(inspector, executed_call) } /// Deploys a contract and commits the new state to the underlying database. From f102cf1412efaf28d84987e9fc43a0cc19c94aac Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 Jun 2022 19:04:35 +0200 Subject: [PATCH 040/102] chore: some cleanup --- evm/src/executor/mod.rs | 87 ++++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 45 deletions(-) diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 80a68a00ea8e..6b5055361863 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -311,54 +311,11 @@ impl Executor { self.backend_mut().inspect_ref(env, &mut inspector); let executed_call = ExecutedCall { status, out, gas, state_changeset, logs, stipend }; - let call_result = self.convert_executed_call(inspector, executed_call)?; + let call_result = convert_executed_call(inspector, executed_call)?; convert_call_result(abi, &func, call_result) } - /// Performs a raw call to an account on the current state of the VM. - /// - /// The state after the call is not persisted. - fn convert_executed_call( - &self, - mut inspector: InspectorStack, - call: ExecutedCall, - ) -> eyre::Result { - let ExecutedCall { status, out, gas, state_changeset, stipend, .. } = call; - - let result = match out { - TransactOut::Call(data) => data, - _ => Bytes::default(), - }; - - let InspectorData { logs, labels, traces, debug, cheatcodes, .. } = - inspector.collect_inspector_states(); - - let transactions = if let Some(cheats) = cheatcodes { - if !cheats.broadcastable_transactions.is_empty() { - Some(cheats.broadcastable_transactions) - } else { - None - } - } else { - None - }; - - Ok(RawCallResult { - status, - reverted: !matches!(status, return_ok!()), - result, - gas, - stipend, - logs: logs.to_vec(), - labels, - traces, - debug, - transactions, - state_changeset: Some(state_changeset), - }) - } - /// Performs a call to an account on the current state of the VM. /// /// The state after the call is not persisted. @@ -396,7 +353,7 @@ impl Executor { let mut db = FuzzBackendWrapper::new(self.backend()); let (status, out, gas, state_changeset, logs) = db.inspect_ref(env, &mut inspector); let executed_call = ExecutedCall { status, out, gas, state_changeset, logs, stipend }; - self.convert_executed_call(inspector, executed_call) + convert_executed_call(inspector, executed_call) } /// Deploys a contract and commits the new state to the underlying database. @@ -663,6 +620,46 @@ fn calc_stipend(calldata: &[u8], spec: SpecId) -> u64 { calldata.iter().fold(21000, |sum, byte| sum + if *byte == 0 { 4 } else { non_zero_data_cost }) } +/// Converts the data aggregated in the `inspector` and `call` to a `RawCallResult` +fn convert_executed_call( + inspector: InspectorStack, + call: ExecutedCall, +) -> eyre::Result { + let ExecutedCall { status, out, gas, state_changeset, stipend, .. } = call; + + let result = match out { + TransactOut::Call(data) => data, + _ => Bytes::default(), + }; + + let InspectorData { logs, labels, traces, debug, cheatcodes, .. } = + inspector.collect_inspector_states(); + + let transactions = if let Some(cheats) = cheatcodes { + if !cheats.broadcastable_transactions.is_empty() { + Some(cheats.broadcastable_transactions) + } else { + None + } + } else { + None + }; + + Ok(RawCallResult { + status, + reverted: !matches!(status, return_ok!()), + result, + gas, + stipend, + logs: logs.to_vec(), + labels, + traces, + debug, + transactions, + state_changeset: Some(state_changeset), + }) +} + fn convert_call_result( abi: Option<&Contract>, func: &Function, From 5258a5b9d909bc435dbfe8b63917e4fe75b440a8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jun 2022 16:22:58 +0200 Subject: [PATCH 041/102] refactor: extract Fuzzbackend wrapper --- evm/src/executor/backend/fuzz.rs | 153 ++++++++++++++++++++++ evm/src/executor/backend/mod.rs | 213 ++++++------------------------- evm/src/executor/mod.rs | 3 +- 3 files changed, 195 insertions(+), 174 deletions(-) create mode 100644 evm/src/executor/backend/fuzz.rs diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs new file mode 100644 index 000000000000..2284c09400a5 --- /dev/null +++ b/evm/src/executor/backend/fuzz.rs @@ -0,0 +1,153 @@ +use crate::{ + executor::{ + backend::{Backend, BackendDatabase, DatabaseExt}, + fork::{CreateFork, ForkId, SharedBackend}, + snapshot::Snapshots, + }, + Address, +}; +use bytes::Bytes; +use ethers::prelude::{H160, H256, U256}; +use hashbrown::HashMap as Map; +use revm::{ + db::{CacheDB, DatabaseRef}, + Account, AccountInfo, Database, Env, Inspector, Log, Return, TransactOut, +}; +use std::collections::HashMap; + +/// A wrapper around `Backend` that ensures only `revm::DatabaseRef` functions are called. +/// +/// Any changes made during its existence that affect the caching layer of the underlying Database +/// will result in a clone of the initial Database. +/// +/// Main purpose for this type is for fuzzing. A test function fuzzer will repeatedly call the +/// function via immutable raw (no state changes). +/// +/// **N.B.**: we're assuming cheatcodes that alter the state (like multi fork swapping) are niche. +/// If they executed during fuzzing, it will require a clone of the initial input database. This way +/// we can support these cheatcodes in fuzzing cheaply without adding overhead for fuzz tests that +/// don't make use of them. Alternatively each test case would require its own `Backend` clone, +/// which would add significant overhead for large fuzz sets even if the Database is not big after +/// setup. +pub(crate) struct FuzzBackendWrapper<'a> { + pub inner: &'a Backend, + /// active database clone that holds the currently active db, like reverted snapshots, selected + /// fork, etc. + db_override: Option>, + /// tracks all created forks + created_forks: HashMap, + /// Contains snapshots made at a certain point + snapshots: Snapshots>, +} + +// === impl FuzzBackendWrapper === + +impl<'a> FuzzBackendWrapper<'a> { + pub fn new(inner: &'a Backend) -> Self { + Self { + inner, + db_override: None, + created_forks: Default::default(), + snapshots: Default::default(), + } + } + + /// Executes the configured transaction of the `env` without commiting state changes + pub fn inspect_ref( + &mut self, + mut env: Env, + mut inspector: INSP, + ) -> (Return, TransactOut, u64, Map, Vec) + where + INSP: Inspector, + { + revm::evm_inner::(&mut env, self, &mut inspector).transact() + } + + /// Returns the currently active database + fn active_db(&self) -> &CacheDB { + self.db_override.as_ref().unwrap_or(&self.inner.db) + } + + /// Sets the database override + fn set_active(&mut self, db: CacheDB) { + self.db_override = Some(db) + } +} + +impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { + fn snapshot(&mut self) -> U256 { + let id = self.snapshots.insert(self.active_db().clone()); + trace!(target: "backend", "Created new snapshot {}", id); + id + } + + fn revert(&mut self, id: U256) -> bool { + if let Some(snapshot) = + self.snapshots.remove(id).or_else(|| self.inner.snapshots.get(id).cloned()) + { + self.set_active(snapshot); + trace!(target: "backend", "Reverted snapshot {}", id); + true + } else { + warn!(target: "backend", "No snapshot to revert for {}", id); + false + } + } + + fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { + let (id, fork) = self.inner.forks.create_fork(fork)?; + self.created_forks.insert(id.clone(), fork); + Ok(id) + } + + fn select_fork(&mut self, id: impl Into) -> eyre::Result<()> { + let id = id.into(); + let fork = self + .created_forks + .get(&id) + .or_else(|| self.inner.created_forks.get(&id)) + .cloned() + .ok_or_else(|| eyre::eyre!("Fork Id {} does not exist", id))?; + if let Some(ref mut db) = self.db_override { + *db.db_mut() = BackendDatabase::Forked(fork, id); + } else { + let mut db = self.inner.db.clone(); + *db.db_mut() = BackendDatabase::Forked(fork, id); + self.set_active(db); + } + Ok(()) + } +} + +impl<'a> Database for FuzzBackendWrapper<'a> { + fn basic(&mut self, address: H160) -> AccountInfo { + if let Some(ref db) = self.db_override { + DatabaseRef::basic(db, address) + } else { + DatabaseRef::basic(self.inner, address) + } + } + fn code_by_hash(&mut self, code_hash: H256) -> Bytes { + if let Some(ref db) = self.db_override { + DatabaseRef::code_by_hash(db, code_hash) + } else { + DatabaseRef::code_by_hash(self.inner, code_hash) + } + } + fn storage(&mut self, address: H160, index: U256) -> U256 { + if let Some(ref db) = self.db_override { + DatabaseRef::storage(db, address, index) + } else { + DatabaseRef::storage(self.inner, address, index) + } + } + + fn block_hash(&mut self, number: U256) -> H256 { + if let Some(ref db) = self.db_override { + DatabaseRef::block_hash(db, number) + } else { + DatabaseRef::block_hash(self.inner, number) + } + } +} diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 92d11b3b51a5..93ee7e6a2aef 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -14,7 +14,9 @@ use revm::{ }; use std::collections::HashMap; use tracing::{trace, warn}; +mod fuzz; mod in_memory_db; + pub use in_memory_db::MemDb; /// An extension trait that allows us to easily extend the `revm::Inspector` capabilities @@ -41,42 +43,6 @@ pub trait DatabaseExt: Database { fn select_fork(&mut self, id: impl Into) -> eyre::Result<()>; } -impl DatabaseExt for Backend { - fn snapshot(&mut self) -> U256 { - let id = self.snapshots.insert(self.db.clone()); - trace!(target: "backend", "Created new snapshot {}", id); - id - } - - fn revert(&mut self, id: U256) -> bool { - if let Some(snapshot) = self.snapshots.remove(id) { - self.db = snapshot; - trace!(target: "backend", "Reverted snapshot {}", id); - true - } else { - warn!(target: "backend", "No snapshot to revert for {}", id); - false - } - } - - fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { - let (id, fork) = self.forks.create_fork(fork)?; - self.created_forks.insert(id.clone(), fork); - Ok(id) - } - - fn select_fork(&mut self, id: impl Into) -> eyre::Result<()> { - let id = id.into(); - let fork = self - .created_forks - .get(&id) - .cloned() - .ok_or_else(|| eyre::eyre!("Fork Id {} does not exist", id))?; - *self.db.db_mut() = BackendDatabase::Forked(fork, id); - Ok(()) - } -} - /// Provides the underlying `revm::Database` implementation. /// /// A `Backend` can be initialised in two forms: @@ -170,6 +136,7 @@ impl Backend { self.db.insert_cache(address, account) } + /// Executes the configured transaction of the `env` without commiting state changes pub fn inspect_ref( &mut self, mut env: Env, @@ -182,7 +149,43 @@ impl Backend { } } -// a bunch of delegate revm trait implementations +// === impl a bunch of `revm::Database` adjacent implementations === + +impl DatabaseExt for Backend { + fn snapshot(&mut self) -> U256 { + let id = self.snapshots.insert(self.db.clone()); + trace!(target: "backend", "Created new snapshot {}", id); + id + } + + fn revert(&mut self, id: U256) -> bool { + if let Some(snapshot) = self.snapshots.remove(id) { + self.db = snapshot; + trace!(target: "backend", "Reverted snapshot {}", id); + true + } else { + warn!(target: "backend", "No snapshot to revert for {}", id); + false + } + } + + fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { + let (id, fork) = self.forks.create_fork(fork)?; + self.created_forks.insert(id.clone(), fork); + Ok(id) + } + + fn select_fork(&mut self, id: impl Into) -> eyre::Result<()> { + let id = id.into(); + let fork = self + .created_forks + .get(&id) + .cloned() + .ok_or_else(|| eyre::eyre!("Fork Id {} does not exist", id))?; + *self.db.db_mut() = BackendDatabase::Forked(fork, id); + Ok(()) + } +} impl DatabaseRef for Backend { fn basic(&self, address: H160) -> AccountInfo { @@ -283,139 +286,3 @@ impl DatabaseRef for BackendDatabase { } } } - -/// A wrapper around `Backend` that ensures only `revm::DatabaseRef` functions are called. -/// -/// Any changes made during its existence that affect the caching layer of the underlying Database -/// will result in a clone of the initial Database. -/// -/// Main purpose for this type is for fuzzing. A test function fuzzer will repeatedly call the -/// function via immutable raw (no state changes). -/// -/// **N.B.**: we're assuming cheatcodes that alter the state (like multi fork swapping) are niche. -/// If they executed during fuzzing, it will require a clone of the initial input database. This way -/// we can support these cheatcodes in fuzzing cheaply without adding overhead for fuzz tests that -/// don't make use of them. Alternatively each test case would require its own `Backend` clone, -/// which would add significant overhead for large fuzz sets even if the Database is not big after -/// setup. -pub(crate) struct FuzzBackendWrapper<'a> { - pub inner: &'a Backend, - /// active database clone that holds the currently active db, like reverted snapshots, selected - /// fork, etc. - db_override: Option>, - /// tracks all created forks - created_forks: HashMap, - /// Contains snapshots made at a certain point - snapshots: Snapshots>, -} - -// === impl RefBackendWrapper === - -impl<'a> FuzzBackendWrapper<'a> { - pub fn new(inner: &'a Backend) -> Self { - Self { - inner, - db_override: None, - created_forks: Default::default(), - snapshots: Default::default(), - } - } - - pub fn inspect_ref( - &mut self, - mut env: Env, - mut inspector: INSP, - ) -> (Return, TransactOut, u64, Map, Vec) - where - INSP: Inspector, - { - revm::evm_inner::(&mut env, self, &mut inspector).transact() - } - - /// Returns the currently active database - fn active_db(&self) -> &CacheDB { - self.db_override.as_ref().unwrap_or(&self.inner.db) - } - - /// Sets the database override - fn set_active(&mut self, db: CacheDB) { - self.db_override = Some(db) - } -} - -impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { - fn snapshot(&mut self) -> U256 { - let id = self.snapshots.insert(self.active_db().clone()); - trace!(target: "backend", "Created new snapshot {}", id); - id - } - - fn revert(&mut self, id: U256) -> bool { - if let Some(snapshot) = - self.snapshots.remove(id).or_else(|| self.inner.snapshots.get(id).cloned()) - { - self.set_active(snapshot); - trace!(target: "backend", "Reverted snapshot {}", id); - true - } else { - warn!(target: "backend", "No snapshot to revert for {}", id); - false - } - } - - fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { - let (id, fork) = self.inner.forks.create_fork(fork)?; - self.created_forks.insert(id.clone(), fork); - Ok(id) - } - - fn select_fork(&mut self, id: impl Into) -> eyre::Result<()> { - let id = id.into(); - let fork = self - .created_forks - .get(&id) - .or_else(|| self.inner.created_forks.get(&id)) - .cloned() - .ok_or_else(|| eyre::eyre!("Fork Id {} does not exist", id))?; - if let Some(ref mut db) = self.db_override { - *db.db_mut() = BackendDatabase::Forked(fork, id); - } else { - let mut db = self.inner.db.clone(); - *db.db_mut() = BackendDatabase::Forked(fork, id); - self.set_active(db); - } - Ok(()) - } -} - -impl<'a> Database for FuzzBackendWrapper<'a> { - fn basic(&mut self, address: H160) -> AccountInfo { - if let Some(ref db) = self.db_override { - DatabaseRef::basic(db, address) - } else { - DatabaseRef::basic(self.inner, address) - } - } - fn code_by_hash(&mut self, code_hash: H256) -> Bytes { - if let Some(ref db) = self.db_override { - DatabaseRef::code_by_hash(db, code_hash) - } else { - DatabaseRef::code_by_hash(self.inner, code_hash) - } - } - fn storage(&mut self, address: H160, index: U256) -> U256 { - if let Some(ref db) = self.db_override { - DatabaseRef::storage(db, address, index) - } else { - DatabaseRef::storage(self.inner, address, index) - } - } - - fn block_hash(&mut self, number: U256) -> H256 { - if let Some(ref db) = self.db_override { - DatabaseRef::block_hash(db, number) - } else { - DatabaseRef::block_hash(self.inner, number) - } - } -} diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 6b5055361863..0b3b1076119a 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -7,6 +7,7 @@ pub use abi::{ patch_hardhat_console_selector, HardhatConsoleCalls, CHEATCODE_ADDRESS, CONSOLE_ABI, HARDHAT_CONSOLE_ABI, HARDHAT_CONSOLE_ADDRESS, }; +use backend::fuzz::FuzzBackendWrapper; use bytes::Bytes; use ethers::{ abi::{Abi, Contract, Detokenize, Function, Tokenize}, @@ -36,7 +37,7 @@ pub mod opts; pub mod snapshot; use crate::executor::{ - backend::{Backend, FuzzBackendWrapper}, + backend::Backend, inspector::{InspectorStack, DEFAULT_CREATE2_DEPLOYER}, }; pub use builder::{ExecutorBuilder, Fork}; From 112d05b6f329b501eea14cf27185581cdf81b06b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jun 2022 17:09:51 +0200 Subject: [PATCH 042/102] feat: implement cheat codes --- evm/src/executor/abi.rs | 5 +- evm/src/executor/backend/fuzz.rs | 3 +- evm/src/executor/backend/mod.rs | 2 +- evm/src/executor/inspector/cheatcodes/fork.rs | 49 +++++++++++++++++++ evm/src/executor/inspector/cheatcodes/mod.rs | 36 +++++++------- evm/src/executor/mod.rs | 9 +++- testdata/cheats/Cheats.sol | 8 +-- 7 files changed, 86 insertions(+), 26 deletions(-) create mode 100644 evm/src/executor/inspector/cheatcodes/fork.rs diff --git a/evm/src/executor/abi.rs b/evm/src/executor/abi.rs index 48bb45cba39b..d38197623ded 100644 --- a/evm/src/executor/abi.rs +++ b/evm/src/executor/abi.rs @@ -69,7 +69,10 @@ ethers::contract::abigen!( startBroadcast(address) stopBroadcast() snapshot()(uint256) - revertTo(uint256) + revertTo(uint256)(bool) + createFork(string,uint256)(string) + createFork(string)(string) + selectFork(string) ]"#, ); pub use hevm_mod::{HEVMCalls, HEVM_ABI}; diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index 2284c09400a5..661c744465b3 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -14,6 +14,7 @@ use revm::{ Account, AccountInfo, Database, Env, Inspector, Log, Return, TransactOut, }; use std::collections::HashMap; +use tracing::{trace, warn}; /// A wrapper around `Backend` that ensures only `revm::DatabaseRef` functions are called. /// @@ -29,7 +30,7 @@ use std::collections::HashMap; /// don't make use of them. Alternatively each test case would require its own `Backend` clone, /// which would add significant overhead for large fuzz sets even if the Database is not big after /// setup. -pub(crate) struct FuzzBackendWrapper<'a> { +pub struct FuzzBackendWrapper<'a> { pub inner: &'a Backend, /// active database clone that holds the currently active db, like reverted snapshots, selected /// fork, etc. diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 93ee7e6a2aef..62dfb141081b 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -15,8 +15,8 @@ use revm::{ use std::collections::HashMap; use tracing::{trace, warn}; mod fuzz; +pub use fuzz::FuzzBackendWrapper; mod in_memory_db; - pub use in_memory_db::MemDb; /// An extension trait that allows us to easily extend the `revm::Inspector` capabilities diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs new file mode 100644 index 000000000000..f7faf9bf1c2a --- /dev/null +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -0,0 +1,49 @@ +use super::Cheatcodes; +use crate::{ + abi::HEVMCalls, + executor::{backend::DatabaseExt, fork::CreateFork}, +}; +use bytes::Bytes; +use ethers::{abi::AbiEncode, types::BlockNumber}; +use revm::EVMData; + +/// Handles fork related cheatcodes +pub fn apply( + state: &mut Cheatcodes, + data: &mut EVMData<'_, DB>, + call: &HEVMCalls, +) -> Option> { + Some(match call { + HEVMCalls::Snapshot(_) => Ok(data.db.snapshot().encode().into()), + HEVMCalls::RevertTo(snapshot) => Ok(data.db.revert(snapshot.0).encode().into()), + HEVMCalls::CreateFork0(fork) => { + create_fork(state, data, fork.0.clone(), BlockNumber::Latest) + } + HEVMCalls::CreateFork1(fork) => { + create_fork(state, data, fork.0.clone(), fork.1.as_u64().into()) + } + HEVMCalls::SelectFork(fork_id) => match data.db.select_fork(fork_id.0.clone()) { + Ok(_) => Ok(Bytes::new()), + Err(err) => Err(err.to_string().encode().into()), + }, + _ => return None, + }) +} + +/// Creates a new fork +fn create_fork( + state: &mut Cheatcodes, + data: &mut EVMData<'_, DB>, + url: String, + block: BlockNumber, +) -> Result { + let create = CreateFork { + // TODO refactor rpc cache config + cache_path: None, + url, + block, + chain_id: None, + env: data.env.clone(), + }; + todo!() +} diff --git a/evm/src/executor/inspector/cheatcodes/mod.rs b/evm/src/executor/inspector/cheatcodes/mod.rs index a4dac468adab..ceec3b4684d6 100644 --- a/evm/src/executor/inspector/cheatcodes/mod.rs +++ b/evm/src/executor/inspector/cheatcodes/mod.rs @@ -1,17 +1,3 @@ -/// Cheatcodes related to the execution environment. -mod env; -pub use env::{Prank, RecordAccess}; -/// Assertion helpers (such as `expectEmit`) -mod expect; -pub use expect::{ExpectedCallData, ExpectedEmit, ExpectedRevert, MockCallDataContext}; -/// Cheatcodes that interact with the external environment (FFI etc.) -mod ext; -/// Cheatcodes that configure the fuzzer -mod fuzz; -/// Utility cheatcodes (`sign` etc.) -mod util; -pub use util::{DEFAULT_CREATE2_DEPLOYER, MISSING_CREATE2_DEPLOYER}; - use self::{ env::Broadcast, expect::{handle_expect_emit, handle_expect_revert}, @@ -30,11 +16,26 @@ use ethers::{ }, }; use revm::{ - opcode, BlockEnv, CallInputs, CreateInputs, Database, EVMData, Gas, Inspector, Interpreter, - Return, + opcode, BlockEnv, CallInputs, CreateInputs, EVMData, Gas, Inspector, Interpreter, Return, }; use std::collections::{BTreeMap, VecDeque}; +/// Cheatcodes related to the execution environment. +mod env; +pub use env::{Prank, RecordAccess}; +/// Assertion helpers (such as `expectEmit`) +mod expect; +pub use expect::{ExpectedCallData, ExpectedEmit, ExpectedRevert, MockCallDataContext}; +/// Cheatcodes that interact with the external environment (FFI etc.) +mod ext; +/// Fork related cheatcodes +mod fork; +/// Cheatcodes that configure the fuzzer +mod fuzz; +/// Utility cheatcodes (`sign` etc.) +mod util; +pub use util::{DEFAULT_CREATE2_DEPLOYER, MISSING_CREATE2_DEPLOYER}; + /// An inspector that handles calls to various cheatcodes, each with their own behavior. /// /// Cheatcodes can be called by contracts during execution to modify the VM environment, such as @@ -98,7 +99,7 @@ impl Cheatcodes { } } - fn apply_cheatcode( + fn apply_cheatcode( &mut self, data: &mut EVMData<'_, DB>, caller: Address, @@ -113,6 +114,7 @@ impl Cheatcodes { .or_else(|| expect::apply(self, data, &decoded)) .or_else(|| fuzz::apply(data, &decoded)) .or_else(|| ext::apply(self.ffi, &decoded)) + .or_else(|| fork::apply(self, data, &decoded)) .ok_or_else(|| "Cheatcode was unhandled. This is a bug.".to_string().encode())? } } diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 0b3b1076119a..9e018c74cb1c 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -7,7 +7,7 @@ pub use abi::{ patch_hardhat_console_selector, HardhatConsoleCalls, CHEATCODE_ADDRESS, CONSOLE_ABI, HARDHAT_CONSOLE_ABI, HARDHAT_CONSOLE_ADDRESS, }; -use backend::fuzz::FuzzBackendWrapper; +use backend::FuzzBackendWrapper; use bytes::Bytes; use ethers::{ abi::{Abi, Contract, Detokenize, Function, Tokenize}, @@ -155,6 +155,8 @@ impl Executor { } /// Calls the `setUp()` function on a contract. + /// + /// This will commit any state changes to the underlying database pub fn setup( &mut self, from: Option
, @@ -338,7 +340,7 @@ impl Executor { /// Performs a raw call to an account on the current state of the VM. /// - /// The state after the call is not persisted. + /// Any state modifications made by the call are not committed. pub fn call_raw( &self, from: Address, @@ -358,6 +360,9 @@ impl Executor { } /// Deploys a contract and commits the new state to the underlying database. + /// + /// Executes a CREATE transaction with the contract `code` and persistent database state + /// modifications pub fn deploy( &mut self, from: Address, diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index 79522fc7b5aa..975c63978667 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -109,13 +109,13 @@ interface Cheats { function snapshot() external returns(uint256); // Revert the state of the evm to a previous snapshot // takes the snapshot id to revert to. This deletes the snapshot and all snapshots taken after the given snapshot id. - function revertTo(uint256) external; + function revertTo(uint256) external returns(bool); // Creates a new fork with the given endpoint and block and returns the identifier of the fork - function createFork(string calldata,uint256) external returns(uint256); + function createFork(string calldata,uint256) external returns(string memory); // Creates a new fork with the given endpoint and the latest block and returns the identifier of the fork - function createFork(string calldata) external returns(uint256); + function createFork(string calldata) external returns(string memory); // takes a fork identifier created by `createFork` and changes the state - function switchFork(uint256) external; + function selectFork(string calldata) external; // forks the `block` variable from the given endpoint function forkBlockVariable(string calldata, uint256) external; } From 30e0b8485490221fd28244e419511c4e36cb5c0d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jun 2022 17:59:18 +0200 Subject: [PATCH 043/102] feat: implement fork cheat codes --- Cargo.lock | 1 + cli/src/cmd/cast/run.rs | 2 +- cli/src/cmd/forge/script/executor.rs | 7 +++++-- evm/Cargo.toml | 10 ++++++---- evm/src/executor/builder.rs | 16 ++++++++-------- evm/src/executor/fork/mod.rs | 5 ++--- evm/src/executor/fork/multi.rs | 10 +++++++++- evm/src/executor/inspector/cheatcodes/fork.rs | 11 +++++++---- evm/src/executor/inspector/cheatcodes/mod.rs | 13 ++++++++++++- evm/src/executor/inspector/mod.rs | 1 + forge/src/multi_runner.rs | 18 +++++++++++++----- forge/src/runner.rs | 10 +++++----- forge/src/test_helpers.rs | 5 ++++- 13 files changed, 74 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d40164272d68..114c48b093f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2002,6 +2002,7 @@ dependencies = [ "ethers", "eyre", "foundry-common", + "foundry-config", "foundry-utils", "futures", "hashbrown 0.12.0", diff --git a/cli/src/cmd/cast/run.rs b/cli/src/cmd/cast/run.rs index aa720d88f97c..daa86a191ffd 100644 --- a/cli/src/cmd/cast/run.rs +++ b/cli/src/cmd/cast/run.rs @@ -73,7 +73,7 @@ impl RunArgs { let db = Backend::new(utils::get_fork(&evm_opts, &config.rpc_storage_caching), &env).await; - let builder = ExecutorBuilder::new() + let builder = ExecutorBuilder::default() .with_config(env) .with_spec(crate::utils::evm_spec(&config.evm_version)); diff --git a/cli/src/cmd/forge/script/executor.rs b/cli/src/cmd/forge/script/executor.rs index e3d07bf0d61f..700e046e24d5 100644 --- a/cli/src/cmd/forge/script/executor.rs +++ b/cli/src/cmd/forge/script/executor.rs @@ -156,8 +156,11 @@ impl ScriptArgs { ) .await; - let mut builder = ExecutorBuilder::new() - .with_cheatcodes(script_config.evm_opts.ffi) + let mut builder = ExecutorBuilder::default() + .with_cheatcodes( + script_config.evm_opts.ffi, + script_config.config.rpc_storage_caching.clone(), + ) .with_config(env) .with_spec(crate::utils::evm_spec(&script_config.config.evm_version)) .with_gas_limit(script_config.evm_opts.gas_limit()); diff --git a/evm/Cargo.toml b/evm/Cargo.toml index e6257de65ef3..2d051516bc02 100644 --- a/evm/Cargo.toml +++ b/evm/Cargo.toml @@ -9,14 +9,15 @@ keywords = ["ethereum", "web3", "evm"] # TODO: We can probably reduce dependencies here or in the forge crate [dependencies] +# foundry interal foundry-utils = { path = "./../utils" } foundry-common = { path = "./../common" } +foundry-config = { path = "./../config" } # Encoding/decoding serde_json = "1.0.67" serde = "1.0.130" hex = "0.4.3" -ethers = { git = "https://github.com/gakonst/ethers-rs", default-features = false, features = ["solc-full"] } # Error handling eyre = "0.6.5" @@ -27,16 +28,17 @@ tracing = "0.1.26" tracing-subscriber = "0.3.11" tracing-error = "0.2.0" -# Threading/futures +# Threading/futures/async tokio = { version = "1.10.1", features = ["time"] } parking_lot = "0.12.0" futures = "0.3.21" once_cell = "1.9.0" # EVM -bytes = "1.1.0" -hashbrown = "0.12" +ethers = { git = "https://github.com/gakonst/ethers-rs", default-features = false, features = ["solc-full"] } revm = { version = "1.4", default-features = false, features = ["std", "k256", "with-serde", "memory_limit"] } +hashbrown = "0.12" +bytes = "1.1.0" # Fuzzer proptest = "1.0.0" diff --git a/evm/src/executor/builder.rs b/evm/src/executor/builder.rs index df94243b13e2..68535aa96b20 100644 --- a/evm/src/executor/builder.rs +++ b/evm/src/executor/builder.rs @@ -11,6 +11,7 @@ use ethers::{ providers::{Http, Provider, RetryClient}, types::U256, }; +use foundry_config::cache::StorageCachingConfig; use revm::{Env, SpecId}; use std::{path::PathBuf, sync::Arc}; @@ -26,16 +27,15 @@ pub struct ExecutorBuilder { // === impl ExecutorBuilder === impl ExecutorBuilder { - #[must_use] - pub fn new() -> Self { - Default::default() - } - /// Enables cheatcodes on the executor. #[must_use] - pub fn with_cheatcodes(mut self, ffi: bool) -> Self { - self.inspector_config.cheatcodes = - Some(Cheatcodes::new(ffi, self.env.block.clone(), self.env.tx.gas_price)); + pub fn with_cheatcodes(mut self, ffi: bool, rpc_storage_caching: StorageCachingConfig) -> Self { + self.inspector_config.cheatcodes = Some(Cheatcodes::new( + ffi, + self.env.block.clone(), + self.env.tx.gas_price, + rpc_storage_caching, + )); self } diff --git a/evm/src/executor/fork/mod.rs b/evm/src/executor/fork/mod.rs index 0ed46bfbbc2d..2db7c40cf9cb 100644 --- a/evm/src/executor/fork/mod.rs +++ b/evm/src/executor/fork/mod.rs @@ -3,7 +3,6 @@ mod backend; pub use backend::{BackendHandler, SharedBackend}; use ethers::types::BlockNumber; use revm::Env; -use std::path::PathBuf; mod init; pub use init::environment; @@ -19,8 +18,8 @@ pub use multi::{ForkId, MultiFork, MultiForkHandler}; /// Represents a _fork_ of a remote chain whose data is available only via the `url` endpoint. #[derive(Debug)] pub struct CreateFork { - /// Where to read the cached storage from - pub cache_path: Option, + /// Whether to enable rpc storage caching for this fork + pub enable_caching: bool, /// The URL to a node for fetching remote state pub url: String, /// The block to fork against diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 2fa15f17251a..9d6904daf5bb 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -12,6 +12,7 @@ use ethers::{ providers::{Http, Provider, RetryClient}, types::{BlockId, BlockNumber, U256}, }; +use foundry_config::Config; use futures::{ channel::mpsc::{channel, Receiver, Sender}, stream::{Fuse, Stream}, @@ -280,7 +281,7 @@ async fn create_fork( retries: u32, backoff: u64, ) -> eyre::Result<(SharedBackend, Handler)> { - let CreateFork { cache_path, url, block: block_number, env, chain_id } = fork; + let CreateFork { enable_caching, url, block: block_number, env, chain_id } = fork; let provider = Arc::new(Provider::>::new_client( url.clone().as_str(), retries, @@ -303,6 +304,13 @@ async fn create_fork( }; meta.block_env.number = number.into(); + // determine the cache path if caching is enabled + let cache_path = if enable_caching { + Config::foundry_block_cache_dir(meta.cfg_env.chain_id.as_u64(), number) + } else { + None + }; + let db = BlockchainDb::new(meta, cache_path); Ok(SharedBackend::new(provider, db, Some(BlockId::Number(BlockNumber::Number(number.into()))))) } diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index f7faf9bf1c2a..51c7590a5021 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -5,6 +5,7 @@ use crate::{ }; use bytes::Bytes; use ethers::{abi::AbiEncode, types::BlockNumber}; + use revm::EVMData; /// Handles fork related cheatcodes @@ -37,13 +38,15 @@ fn create_fork( url: String, block: BlockNumber, ) -> Result { - let create = CreateFork { - // TODO refactor rpc cache config - cache_path: None, + let fork = CreateFork { + enable_caching: state.rpc_storage_caching.enable_for_endpoint(&url), url, block, chain_id: None, env: data.env.clone(), }; - todo!() + match data.db.create_fork(fork) { + Ok(id) => Ok(id.encode().into()), + Err(err) => Err(err.to_string().encode().into()), + } } diff --git a/evm/src/executor/inspector/cheatcodes/mod.rs b/evm/src/executor/inspector/cheatcodes/mod.rs index ceec3b4684d6..2788088d3eae 100644 --- a/evm/src/executor/inspector/cheatcodes/mod.rs +++ b/evm/src/executor/inspector/cheatcodes/mod.rs @@ -26,6 +26,8 @@ pub use env::{Prank, RecordAccess}; /// Assertion helpers (such as `expectEmit`) mod expect; pub use expect::{ExpectedCallData, ExpectedEmit, ExpectedRevert, MockCallDataContext}; +use foundry_config::cache::StorageCachingConfig; + /// Cheatcodes that interact with the external environment (FFI etc.) mod ext; /// Fork related cheatcodes @@ -86,15 +88,24 @@ pub struct Cheatcodes { /// Scripting based transactions pub broadcastable_transactions: VecDeque, + + /// RPC storage caching settings determines what chains and endpoints to cache + pub rpc_storage_caching: StorageCachingConfig, } impl Cheatcodes { - pub fn new(ffi: bool, block: BlockEnv, gas_price: U256) -> Self { + pub fn new( + ffi: bool, + block: BlockEnv, + gas_price: U256, + rpc_storage_caching: StorageCachingConfig, + ) -> Self { Self { ffi, corrected_nonce: false, block: Some(block), gas_price: Some(gas_price), + rpc_storage_caching, ..Default::default() } } diff --git a/evm/src/executor/inspector/mod.rs b/evm/src/executor/inspector/mod.rs index 197a00427197..4ea3d7eeb8ba 100644 --- a/evm/src/executor/inspector/mod.rs +++ b/evm/src/executor/inspector/mod.rs @@ -17,6 +17,7 @@ mod cheatcodes; pub use cheatcodes::{Cheatcodes, DEFAULT_CREATE2_DEPLOYER}; use ethers::types::U256; + use revm::BlockEnv; #[derive(Default, Clone, Debug)] diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 0395897e86df..0e54bd3bb6aa 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -6,6 +6,7 @@ use ethers::{ types::{Address, Bytes, U256}, }; use eyre::Result; +use foundry_config::cache::StorageCachingConfig; use foundry_evm::executor::{ backend::Backend, fork::{CreateFork, MultiFork}, @@ -43,6 +44,8 @@ pub struct MultiContractRunner { pub fork: Option, /// The fork to use at launch pub fork2: Option, + /// RPC storage caching settings determines what chains and endpoints to cache + pub rpc_storage_caching: StorageCachingConfig, } impl MultiContractRunner { @@ -134,8 +137,8 @@ impl MultiContractRunner { abi.functions().any(|func| filter.matches_test(&func.name)) }) .map(|(id, (abi, deploy_code, libs))| { - let mut executor = ExecutorBuilder::new() - .with_cheatcodes(self.evm_opts.ffi) + let mut executor = ExecutorBuilder::default() + .with_cheatcodes(self.evm_opts.ffi, self.rpc_storage_caching.clone()) .with_config(env.clone()) .with_spec(self.evm_spec) .with_gas_limit(self.evm_opts.gas_limit()) @@ -233,11 +236,11 @@ impl MultiContractRunner { err, fields(name = %_name) )] - fn run_tests( + fn run_tests( &self, _name: &str, contract: &Abi, - executor: Executor, + executor: Executor, deploy_code: Bytes, libs: &[Bytes], (filter, include_fuzz_tests): (&impl TestFilter, bool), @@ -269,6 +272,10 @@ pub struct MultiContractRunnerBuilder { pub evm_spec: Option, /// The fork config pub fork: Option, + /// The fork to use at launch + pub fork2: Option, + /// RPC storage caching settings determines what chains and endpoints to cache + pub rpc_storage_caching: StorageCachingConfig, } impl MultiContractRunnerBuilder { @@ -363,7 +370,8 @@ impl MultiContractRunnerBuilder { errors: Some(execution_info.2), source_paths, fork: self.fork, - fork2: None, + fork2: self.fork2, + rpc_storage_caching: self.rpc_storage_caching, }) } diff --git a/forge/src/runner.rs b/forge/src/runner.rs index 42b213fa6d92..5fba71fd90b8 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -19,9 +19,9 @@ use std::{collections::BTreeMap, time::Instant}; /// A type that executes all tests of a contract #[derive(Debug)] -pub struct ContractRunner<'a, DB: DatabaseRef> { +pub struct ContractRunner<'a> { /// The executor used by the runner. - pub executor: Executor, + pub executor: Executor, /// Library contracts to be deployed before the test contract pub predeploy_libs: &'a [Bytes], @@ -38,10 +38,10 @@ pub struct ContractRunner<'a, DB: DatabaseRef> { pub sender: Address, } -impl<'a, DB: DatabaseRef> ContractRunner<'a, DB> { +impl<'a> ContractRunner<'a> { #[allow(clippy::too_many_arguments)] pub fn new( - executor: Executor, + executor: Executor, contract: &'a Abi, code: Bytes, initial_balance: U256, @@ -61,7 +61,7 @@ impl<'a, DB: DatabaseRef> ContractRunner<'a, DB> { } } -impl<'a, DB: DatabaseRef + Send + Sync> ContractRunner<'a, DB> { +impl<'a> ContractRunner<'a> { /// Deploys the test contract inside the runner from the sending account, and optionally runs /// the `setUp` function on the test contract. pub fn setup(&mut self, setup: bool) -> Result { diff --git a/forge/src/test_helpers.rs b/forge/src/test_helpers.rs index a91df3763e39..bb92fe6ecb27 100644 --- a/forge/src/test_helpers.rs +++ b/forge/src/test_helpers.rs @@ -77,7 +77,10 @@ pub static EVM_OPTS: Lazy = Lazy::new(|| EvmOpts { pub fn test_executor() -> Executor { let env = RuntimeOrHandle::new().block_on((*EVM_OPTS).evm_env()); - ExecutorBuilder::new().with_cheatcodes(false).with_config(env).build(Backend::simple()) + ExecutorBuilder::default() + .with_cheatcodes(false, Default::default()) + .with_config(env) + .build(Backend::simple()) } pub fn fuzz_executor(executor: &Executor) -> FuzzedExecutor { From 3f667c150fb7dcd52a9e0d4d0ca3b73627a1f690 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jun 2022 18:35:02 +0200 Subject: [PATCH 044/102] refactor: make it compile again --- forge/src/multi_runner.rs | 23 +++++++++++------------ forge/src/runner.rs | 2 +- forge/src/test_helpers.rs | 10 +--------- forge/src/types.rs | 6 +++--- 4 files changed, 16 insertions(+), 25 deletions(-) diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 0e54bd3bb6aa..3aa57a220164 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -10,13 +10,12 @@ use foundry_config::cache::StorageCachingConfig; use foundry_evm::executor::{ backend::Backend, fork::{CreateFork, MultiFork}, - opts::EvmOpts, - Backend, DatabaseRef, Executor, ExecutorBuilder, Fork, SpecId, + opts::EvmOpts, Executor, ExecutorBuilder, Fork, SpecId, }; use foundry_utils::PostLinkInput; use proptest::test_runner::TestRunner; use rayon::prelude::*; -use std::{collections::BTreeMap, marker::Sync, path::Path, sync::mpsc::Sender}; +use std::{collections::BTreeMap, path::Path, sync::mpsc::Sender}; pub type DeployableContracts = BTreeMap)>; @@ -122,11 +121,11 @@ impl MultiContractRunner { let (forks, fork_handler) = MultiFork::spawn(); - { + let results = { // the db backend that serves all the data, each contract gets its own instance let db = Backend::new(forks, self.fork2.take()); - let results = self + self .contracts .par_iter() .filter(|(id, _)| { @@ -137,7 +136,7 @@ impl MultiContractRunner { abi.functions().any(|func| filter.matches_test(&func.name)) }) .map(|(id, (abi, deploy_code, libs))| { - let mut executor = ExecutorBuilder::default() + let executor = ExecutorBuilder::default() .with_cheatcodes(self.evm_opts.ffi, self.rpc_storage_caching.clone()) .with_config(env.clone()) .with_spec(self.evm_spec) @@ -163,22 +162,22 @@ impl MultiContractRunner { } (name, result) }) - .collect::>(); - } + .collect::>() + }; // the spawned handler contains some resources, rpc caches, that will get flushed on drop, // in order to ensure everything is flushed properly we wait for the thread to finish which // will happen when all the channels (MultiFork) are dropped - fork_handler.join()?; + fork_handler.join().unwrap(); Ok(results) } pub fn test( &mut self, - filter: &impl TestFilter, - stream_result: Option>, - include_fuzz_tests: bool, + _filter: &impl TestFilter, + _stream_result: Option>, + _include_fuzz_tests: bool, ) -> Result> { // let runtime = RuntimeOrHandle::new(); // let env = runtime.block_on(self.evm_opts.evm_env()); diff --git a/forge/src/runner.rs b/forge/src/runner.rs index 5fba71fd90b8..beddb034dee2 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -8,7 +8,7 @@ use ethers::{ }; use eyre::Result; use foundry_evm::{ - executor::{CallResult, DatabaseRef, DeployResult, EvmError, Executor}, + executor::{CallResult, DeployResult, EvmError, Executor}, fuzz::FuzzedExecutor, trace::TraceKind, CALLER, diff --git a/forge/src/test_helpers.rs b/forge/src/test_helpers.rs index bb92fe6ecb27..60f384e40f7a 100644 --- a/forge/src/test_helpers.rs +++ b/forge/src/test_helpers.rs @@ -75,15 +75,7 @@ pub static EVM_OPTS: Lazy = Lazy::new(|| EvmOpts { ..Default::default() }); -pub fn test_executor() -> Executor { - let env = RuntimeOrHandle::new().block_on((*EVM_OPTS).evm_env()); - ExecutorBuilder::default() - .with_cheatcodes(false, Default::default()) - .with_config(env) - .build(Backend::simple()) -} - -pub fn fuzz_executor(executor: &Executor) -> FuzzedExecutor { +pub fn fuzz_executor(executor: &Executor) -> FuzzedExecutor { let cfg = proptest::test_runner::Config { failure_persistence: None, ..Default::default() }; FuzzedExecutor::new(executor, proptest::test_runner::TestRunner::new(cfg), *CALLER) diff --git a/forge/src/types.rs b/forge/src/types.rs index 9f1ce767ddc7..ff49a665100e 100644 --- a/forge/src/types.rs +++ b/forge/src/types.rs @@ -1,10 +1,10 @@ use ethers::{ - abi::{Abi, AbiError, Event, Function}, + abi::{Event, Function}, solc::artifacts::CompactContractBytecode, types::H256, }; use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap}, path::PathBuf, }; @@ -24,7 +24,7 @@ pub struct TestContract { pub events: BTreeMap, /// all errors of the contract - pub errors: BTreeMap>, + pub errors: BTreeMap>, } /// A solidity function that can be tested From c4b4002f8aa0580e6fb93d2bfafae0b66ce2341b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jun 2022 19:39:58 +0200 Subject: [PATCH 045/102] refactor: add shutdown routine to Multifork --- cli/src/cmd/forge/script/build.rs | 7 +-- cli/src/cmd/forge/script/cmd.rs | 1 + cli/src/cmd/forge/script/executor.rs | 26 ++++----- cli/src/cmd/forge/script/mod.rs | 85 ++++++++++++++-------------- cli/src/cmd/forge/script/receipts.rs | 3 +- cli/src/cmd/forge/script/runner.rs | 10 ++-- evm/src/executor/backend/mod.rs | 5 ++ evm/src/executor/fork/cache.rs | 2 +- evm/src/executor/fork/multi.rs | 45 +++++++++++++-- forge/src/multi_runner.rs | 16 ++---- forge/src/types.rs | 5 +- 11 files changed, 113 insertions(+), 92 deletions(-) diff --git a/cli/src/cmd/forge/script/build.rs b/cli/src/cmd/forge/script/build.rs index af4a9219c054..28a09b70b583 100644 --- a/cli/src/cmd/forge/script/build.rs +++ b/cli/src/cmd/forge/script/build.rs @@ -1,10 +1,9 @@ +use super::*; use crate::{ cmd::{get_cached_entry_by_name, unwrap_contracts, VerifyBundle}, compile, opts::forge::ContractInfo, }; -use eyre::{Context, ContextCompat}; - use ethers::{ prelude::{ artifacts::Libraries, cache::SolFilesCache, ArtifactId, Graph, Project, @@ -13,13 +12,11 @@ use ethers::{ solc::artifacts::{CompactContractBytecode, ContractBytecode, ContractBytecodeSome}, types::{Address, U256}, }; - +use eyre::{Context, ContextCompat}; use foundry_utils::PostLinkInput; use std::{collections::BTreeMap, str::FromStr}; use tracing::warn; -use super::*; - impl ScriptArgs { /// Compiles the file or project and the verify metadata. pub fn compile( diff --git a/cli/src/cmd/forge/script/cmd.rs b/cli/src/cmd/forge/script/cmd.rs index 7ca0487e043d..180e3689cf9e 100644 --- a/cli/src/cmd/forge/script/cmd.rs +++ b/cli/src/cmd/forge/script/cmd.rs @@ -13,6 +13,7 @@ use foundry_config::{figment::Figment, Config}; use super::*; impl ScriptArgs { + /// Exxecutes the script pub async fn run_script(mut self) -> eyre::Result<()> { let figment: Figment = From::from(&self); let evm_opts = figment.extract::()?; diff --git a/cli/src/cmd/forge/script/executor.rs b/cli/src/cmd/forge/script/executor.rs index 6f5b4d0872ca..3ef688870cb4 100644 --- a/cli/src/cmd/forge/script/executor.rs +++ b/cli/src/cmd/forge/script/executor.rs @@ -1,5 +1,8 @@ -use crate::{cmd::needs_setup, utils}; - +use super::*; +use crate::{ + cmd::{forge::script::*, needs_setup}, + utils, +}; use cast::executor::inspector::DEFAULT_CREATE2_DEPLOYER; use ethers::{ prelude::NameOrAddress, @@ -10,11 +13,8 @@ use forge::{ executor::{Backend, ExecutorBuilder}, trace::CallTraceDecoder, }; - use std::collections::VecDeque; -use crate::cmd::forge::script::*; - impl ScriptArgs { /// Locally deploys and executes the contract method that will collect all broadcastable /// transactions. @@ -139,11 +139,8 @@ impl ScriptArgs { } } - async fn prepare_runner( - &self, - script_config: &ScriptConfig, - sender: Address, - ) -> Runner { + /// Creates the Runner that drives script execution + async fn prepare_runner(&self, script_config: &ScriptConfig, sender: Address) -> ScriptRunner { let env = script_config.evm_opts.evm_env().await; // the db backend that serves all the data @@ -159,17 +156,14 @@ impl ScriptArgs { script_config.config.rpc_storage_caching.clone(), ) .with_config(env) - .with_spec(crate::utils::evm_spec(&script_config.config.evm_version)) + .with_spec(utils::evm_spec(&script_config.config.evm_version)) + .set_tracing(script_config.evm_opts.verbosity >= 3) .with_gas_limit(script_config.evm_opts.gas_limit()); - if script_config.evm_opts.verbosity >= 3 { - builder = builder.with_tracing(); - } - if self.debug { builder = builder.with_tracing().with_debugger(); } - Runner::new(builder.build(db), script_config.evm_opts.initial_balance, sender) + ScriptRunner::new(builder.build(db), script_config.evm_opts.initial_balance, sender) } } diff --git a/cli/src/cmd/forge/script/mod.rs b/cli/src/cmd/forge/script/mod.rs index 7de1d28c0c55..60a1a027f6be 100644 --- a/cli/src/cmd/forge/script/mod.rs +++ b/cli/src/cmd/forge/script/mod.rs @@ -1,18 +1,4 @@ -mod build; -use build::BuildOutput; - -mod runner; -use runner::Runner; - -mod broadcast; -use ui::{TUIExitReason, Tui, Ui}; - -mod cmd; - -mod executor; - -mod receipts; - +//! script command use crate::{cmd::forge::build::BuildArgs, opts::MultiWallet, utils::parse_ether_value}; use clap::{Parser, ValueHint}; use ethers::{ @@ -32,21 +18,30 @@ use forge::{ CallTraceArena, CallTraceDecoder, CallTraceDecoderBuilder, TraceKind, }, }; - use foundry_common::evm::EvmArgs; use foundry_config::Config; use foundry_utils::{encode_args, format_token, IntoFunction}; - +use serde::{Deserialize, Serialize}; use std::{ - collections::{BTreeMap, VecDeque}, + collections::{BTreeMap, HashMap, VecDeque}, path::PathBuf, time::Duration, }; - -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; use yansi::Paint; +mod build; +use build::BuildOutput; + +mod runner; +use runner::ScriptRunner; + +mod broadcast; +use ui::{TUIExitReason, Tui, Ui}; + +mod cmd; +mod executor; +mod receipts; + // Loads project's figment and merges the build cli arguments into it foundry_config::impl_figment_convert!(ScriptArgs, opts, evm_opts); @@ -130,29 +125,7 @@ pub struct ScriptArgs { pub with_gas_price: Option, } -pub struct ScriptResult { - pub success: bool, - pub logs: Vec, - pub traces: Vec<(TraceKind, CallTraceArena)>, - pub debug: Option>, - pub gas: u64, - pub labeled_addresses: BTreeMap, - pub transactions: Option>, - pub returned: bytes::Bytes, -} - -#[derive(Serialize, Deserialize)] -pub struct JsonResult { - pub logs: Vec, - pub gas_used: u64, - pub results: HashMap, -} - -#[derive(Serialize, Deserialize)] -pub struct NestedValue { - pub internal_type: String, - pub value: String, -} +// === impl ScriptArgs === impl ScriptArgs { pub fn decode_traces( @@ -420,6 +393,30 @@ impl ScriptArgs { } } +pub struct ScriptResult { + pub success: bool, + pub logs: Vec, + pub traces: Vec<(TraceKind, CallTraceArena)>, + pub debug: Option>, + pub gas: u64, + pub labeled_addresses: BTreeMap, + pub transactions: Option>, + pub returned: bytes::Bytes, +} + +#[derive(Serialize, Deserialize)] +pub struct JsonResult { + pub logs: Vec, + pub gas_used: u64, + pub results: HashMap, +} + +#[derive(Serialize, Deserialize)] +pub struct NestedValue { + pub internal_type: String, + pub value: String, +} + pub struct ScriptConfig { pub config: foundry_config::Config, pub evm_opts: EvmOpts, diff --git a/cli/src/cmd/forge/script/receipts.rs b/cli/src/cmd/forge/script/receipts.rs index a109584dcd1b..171861763370 100644 --- a/cli/src/cmd/forge/script/receipts.rs +++ b/cli/src/cmd/forge/script/receipts.rs @@ -1,9 +1,8 @@ -use std::sync::Arc; - use crate::{cmd::ScriptSequence, init_progress, update_progress, utils::print_receipt}; use ethers::prelude::{Http, PendingTransaction, Provider, RetryClient, TxHash}; use futures::StreamExt; use indicatif::{ProgressBar, ProgressStyle}; +use std::sync::Arc; /// Gets the receipts of previously pending transactions. pub async fn wait_for_pending( diff --git a/cli/src/cmd/forge/script/runner.rs b/cli/src/cmd/forge/script/runner.rs index c2624479a366..bd8640035052 100644 --- a/cli/src/cmd/forge/script/runner.rs +++ b/cli/src/cmd/forge/script/runner.rs @@ -6,14 +6,16 @@ use forge::{ }; use super::*; -pub struct Runner { - pub executor: Executor, + +/// Drives script execution +pub struct ScriptRunner { + pub executor: Executor, pub initial_balance: U256, pub sender: Address, } -impl Runner { - pub fn new(executor: Executor, initial_balance: U256, sender: Address) -> Self { +impl ScriptRunner { + pub fn new(executor: Executor, initial_balance: U256, sender: Address) -> Self { Self { executor, initial_balance, sender } } diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 62dfb141081b..a0ffc5bf9f6e 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -105,6 +105,11 @@ pub struct Backend { // === impl Backend === impl Backend { + /// Creates a new Backend with a spawned multi fork thread + pub fn spawn(fork: Option) -> Self { + Self::new(MultiFork::spawn(), fork) + } + /// Creates a new instance of `Backend` /// /// if `fork` is `Some` this will launch with a `fork` database, otherwise with an in-memory diff --git a/evm/src/executor/fork/cache.rs b/evm/src/executor/fork/cache.rs index b861322fc2d5..afb196f7a719 100644 --- a/evm/src/executor/fork/cache.rs +++ b/evm/src/executor/fork/cache.rs @@ -378,7 +378,7 @@ impl<'de> Deserialize<'de> for JsonBlockCacheData { /// A type that flushes a `JsonBlockCacheDB` on drop /// -/// This type intentionally does not implement `Clone` since it's intendent that there's only once +/// This type intentionally does not implement `Clone` since it's intended that there's only once /// instance that will flush the cache. #[derive(Debug)] pub struct FlushJsonBlockCacheDB(pub Arc); diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 9d6904daf5bb..6d71d5d7a9b3 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -65,6 +65,8 @@ impl AbiDecode for ForkId { pub struct MultiFork { /// Channel to send `Request`s to the handler handler: Sender, + /// Ensures that all rpc resources get flushed properly + _shutdown: Arc, } // === impl MultiForkBackend === @@ -73,17 +75,18 @@ impl MultiFork { /// Creates a new pair multi fork pair pub fn new() -> (Self, MultiForkHandler) { let (handler, handler_rx) = channel(1); - (Self { handler }, MultiForkHandler::new(handler_rx)) + let _shutdown = Arc::new(ShutDownMultiFork { handler: Some(handler.clone()) }); + (Self { handler, _shutdown }, MultiForkHandler::new(handler_rx)) } /// Creates a new pair and spawns the `MultiForkHandler` on a background thread /// /// Also returns the `JoinHandle` of the spawned thread. - pub fn spawn() -> (Self, std::thread::JoinHandle<()>) { + pub fn spawn() -> Self { let (fork, handler) = Self::new(); // spawn a light-weight thread with a thread-local async runtime just for // sending and receiving data from the remote client(s) - let handle = std::thread::Builder::new() + let _ = std::thread::Builder::new() .name("multi-fork-backend-thread".to_string()) .spawn(move || { let rt = tokio::runtime::Builder::new_current_thread() @@ -95,7 +98,7 @@ impl MultiFork { }) .expect("failed to spawn multi fork handler thread"); trace!(target: "fork::multi", "spawned MultiForkHandler thread"); - (fork, handle) + fork } /// Returns a fork backend @@ -132,6 +135,8 @@ enum Request { CreateFork(Box, CreateSender), /// Returns the Fork backend for the `ForkId` if it exists GetFork(ForkId, OneshotSender>), + /// Shutdowns the entire `MultiForkHandler`, see `ShutDownMultiFork` + ShutDown(OneshotSender<()>), } enum ForkTask { @@ -196,6 +201,13 @@ impl MultiForkHandler { let fork = self.forks.get(&fork_id).cloned(); let _ = sender.send(fork); } + Request::ShutDown(sender) => { + trace!(target: "fork::multi", "received shutdown signal"); + // we're emptying all fork backends, this way we ensure all caches get flushed + self.forks.clear(); + self.handlers.clear(); + let _ = sender.send(()); + } } } } @@ -268,6 +280,31 @@ impl Future for MultiForkHandler { } } +/// A type that's used to signaling the `MultiForkHandler` when it's time to shutdown. +/// +/// This is essentially a sync on drop, so that the `MultiForkHandler` can flush all rpc cashes +/// +/// This type intentionally does not implement `Clone` since it's intended that there's only once +/// instance. +#[derive(Debug)] +struct ShutDownMultiFork { + handler: Option>, +} + +impl Drop for ShutDownMultiFork { + fn drop(&mut self) { + trace!(target: "fork::multi", "initiating shutdown"); + let (sender, rx) = oneshot_channel(); + let req = Request::ShutDown(sender); + if let Some(mut handler) = self.handler.take() { + if handler.try_send(req).is_ok() { + let _ = rx.recv(); + trace!(target: "fork::cache", "multifork backend shutdown"); + } + } + } +} + /// Returns the identifier for a Fork which consists of the url and the block number fn create_fork_id(url: &str, num: BlockNumber) -> ForkId { ForkId(format!("{url}@{num}")) diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 3aa57a220164..5550c3090dba 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -8,9 +8,7 @@ use ethers::{ use eyre::Result; use foundry_config::cache::StorageCachingConfig; use foundry_evm::executor::{ - backend::Backend, - fork::{CreateFork, MultiFork}, - opts::EvmOpts, Executor, ExecutorBuilder, Fork, SpecId, + backend::Backend, fork::CreateFork, opts::EvmOpts, Executor, ExecutorBuilder, Fork, SpecId, }; use foundry_utils::PostLinkInput; use proptest::test_runner::TestRunner; @@ -119,11 +117,10 @@ impl MultiContractRunner { let runtime = RuntimeOrHandle::new(); let env = runtime.block_on(self.evm_opts.evm_env()); - let (forks, fork_handler) = MultiFork::spawn(); + let db = Backend::spawn(self.fork2.take()); - let results = { + let results = // the db backend that serves all the data, each contract gets its own instance - let db = Backend::new(forks, self.fork2.take()); self .contracts @@ -163,12 +160,7 @@ impl MultiContractRunner { (name, result) }) .collect::>() - }; - - // the spawned handler contains some resources, rpc caches, that will get flushed on drop, - // in order to ensure everything is flushed properly we wait for the thread to finish which - // will happen when all the channels (MultiFork) are dropped - fork_handler.join().unwrap(); + ; Ok(results) } diff --git a/forge/src/types.rs b/forge/src/types.rs index ff49a665100e..3ea374153f60 100644 --- a/forge/src/types.rs +++ b/forge/src/types.rs @@ -3,10 +3,7 @@ use ethers::{ solc::artifacts::CompactContractBytecode, types::H256, }; -use std::{ - collections::{BTreeMap}, - path::PathBuf, -}; +use std::{collections::BTreeMap, path::PathBuf}; /// Represents a solidity Contract that's a test target #[derive(Debug, Clone)] From fa47f9c7fe8df009ecf597f69121d1df66e4dd5e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jun 2022 20:23:34 +0200 Subject: [PATCH 046/102] refactor: improve backend --- cli/src/cmd/cast/run.rs | 6 ++++-- cli/src/cmd/forge/script/executor.rs | 9 +++------ evm/src/executor/mod.rs | 2 +- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/cli/src/cmd/cast/run.rs b/cli/src/cmd/cast/run.rs index daa86a191ffd..bef9cc8265ef 100644 --- a/cli/src/cmd/cast/run.rs +++ b/cli/src/cmd/cast/run.rs @@ -70,12 +70,14 @@ impl RunArgs { // Set up the execution environment let env = evm_opts.evm_env().await; + // TODO(mattsse) + // utils::get_fork(&evm_opts, &config.rpc_storage_caching), &env let db = - Backend::new(utils::get_fork(&evm_opts, &config.rpc_storage_caching), &env).await; + Backend::spawn(None); let builder = ExecutorBuilder::default() .with_config(env) - .with_spec(crate::utils::evm_spec(&config.evm_version)); + .with_spec(utils::evm_spec(&config.evm_version)); let mut executor = builder.build(db); diff --git a/cli/src/cmd/forge/script/executor.rs b/cli/src/cmd/forge/script/executor.rs index 3ef688870cb4..1ed5ec19512b 100644 --- a/cli/src/cmd/forge/script/executor.rs +++ b/cli/src/cmd/forge/script/executor.rs @@ -133,7 +133,7 @@ impl ScriptArgs { }); if failed { - Err(eyre::Report::msg("Simulated execution failed")) + eyre::bail!("Simulated execution failed") } else { Ok((final_txs, create2_contracts)) } @@ -144,11 +144,8 @@ impl ScriptArgs { let env = script_config.evm_opts.evm_env().await; // the db backend that serves all the data - let db = Backend::new( - utils::get_fork(&script_config.evm_opts, &script_config.config.rpc_storage_caching), - &env, - ) - .await; + // utils::get_fork(&script_config.evm_opts, &script_config.config.rpc_storage_caching), + let db = Backend::spawn(None); let mut builder = ExecutorBuilder::default() .with_cheatcodes( diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 9e018c74cb1c..254515751d75 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -26,6 +26,7 @@ use std::collections::{BTreeMap, VecDeque}; /// custom revm database implementations pub mod backend; +pub use backend::Backend; /// Executor builder pub mod builder; /// Forking provider @@ -37,7 +38,6 @@ pub mod opts; pub mod snapshot; use crate::executor::{ - backend::Backend, inspector::{InspectorStack, DEFAULT_CREATE2_DEPLOYER}, }; pub use builder::{ExecutorBuilder, Fork}; From 7a4792a1e61bfdfacbaee8557a4e769cc7e219c9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 9 Jun 2022 01:10:34 +0200 Subject: [PATCH 047/102] make everything compile again --- cli/src/cmd/cast/run.rs | 9 ++------- cli/src/cmd/forge/script/executor.rs | 5 +---- cli/src/cmd/forge/script/runner.rs | 2 +- evm/src/executor/mod.rs | 4 +--- 4 files changed, 5 insertions(+), 15 deletions(-) diff --git a/cli/src/cmd/cast/run.rs b/cli/src/cmd/cast/run.rs index bef9cc8265ef..df70260d8bf7 100644 --- a/cli/src/cmd/cast/run.rs +++ b/cli/src/cmd/cast/run.rs @@ -72,8 +72,7 @@ impl RunArgs { let env = evm_opts.evm_env().await; // TODO(mattsse) // utils::get_fork(&evm_opts, &config.rpc_storage_caching), &env - let db = - Backend::spawn(None); + let db = Backend::spawn(None); let builder = ExecutorBuilder::default() .with_config(env) @@ -108,11 +107,7 @@ impl RunArgs { // Execute our transaction let mut result = { - executor.set_tracing(true).set_gas_limit(tx.gas); - - if self.debug { - executor.set_debugger(true); - } + executor.set_tracing(true).set_gas_limit(tx.gas).set_debugger(self.debug); if let Some(to) = tx.to { let RawCallResult { reverted, gas, traces, debug: run_debug, .. } = diff --git a/cli/src/cmd/forge/script/executor.rs b/cli/src/cmd/forge/script/executor.rs index 1ed5ec19512b..ac030e0cc543 100644 --- a/cli/src/cmd/forge/script/executor.rs +++ b/cli/src/cmd/forge/script/executor.rs @@ -1,8 +1,5 @@ use super::*; -use crate::{ - cmd::{forge::script::*, needs_setup}, - utils, -}; +use crate::{cmd::needs_setup, utils}; use cast::executor::inspector::DEFAULT_CREATE2_DEPLOYER; use ethers::{ prelude::NameOrAddress, diff --git a/cli/src/cmd/forge/script/runner.rs b/cli/src/cmd/forge/script/runner.rs index bd8640035052..3f4e466d3780 100644 --- a/cli/src/cmd/forge/script/runner.rs +++ b/cli/src/cmd/forge/script/runner.rs @@ -1,6 +1,6 @@ use ethers::types::{Address, Bytes, NameOrAddress, U256}; use forge::{ - executor::{CallResult, DatabaseRef, DeployResult, EvmError, Executor, RawCallResult}, + executor::{CallResult, DeployResult, EvmError, Executor, RawCallResult}, trace::{CallTraceArena, TraceKind}, CALLER, }; diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 254515751d75..e36ae75b3084 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -37,9 +37,7 @@ pub mod inspector; pub mod opts; pub mod snapshot; -use crate::executor::{ - inspector::{InspectorStack, DEFAULT_CREATE2_DEPLOYER}, -}; +use crate::executor::inspector::{InspectorStack, DEFAULT_CREATE2_DEPLOYER}; pub use builder::{ExecutorBuilder, Fork}; /// A mapping of addresses to their changed state. From ded855bd21ba8c392c7a0a761300ba92d9631cb6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 10 Jun 2022 12:38:51 +0200 Subject: [PATCH 048/102] add auto impl --- evm/Cargo.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/evm/Cargo.toml b/evm/Cargo.toml index c98912d81721..711307e51af0 100644 --- a/evm/Cargo.toml +++ b/evm/Cargo.toml @@ -43,7 +43,10 @@ proptest = "1.0.0" # Display yansi = "0.5.1" + +# Misc url = "2.2.2" +auto_impl = "1.0.1" [dev-dependencies] -tempfile = "3.3.0" \ No newline at end of file +tempfile = "3.3.0" From 54f42828c8276a59d3dd8013112e2a1bcba09cb6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 10 Jun 2022 12:39:19 +0200 Subject: [PATCH 049/102] add config --- Cargo.lock | 1 + evm/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 2dadf32e05b0..1d75259630b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1997,6 +1997,7 @@ dependencies = [ name = "foundry-evm" version = "0.2.0" dependencies = [ + "auto_impl 1.0.1", "bytes", "ethers", "eyre", diff --git a/evm/Cargo.toml b/evm/Cargo.toml index 711307e51af0..13864632d5d6 100644 --- a/evm/Cargo.toml +++ b/evm/Cargo.toml @@ -11,6 +11,7 @@ keywords = ["ethereum", "web3", "evm"] [dependencies] foundry-utils = { path = "./../utils" } foundry-common = { path = "./../common" } +foundry-common = { path = "./../config" } # Encoding/decoding serde_json = "1.0.67" From b2eb70b1a99f41dc2262c48f5247751847a0c558 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 10 Jun 2022 15:15:05 +0200 Subject: [PATCH 050/102] refactor: update outdated code --- Cargo.lock | 1 + cli/src/cmd/cast/run.rs | 4 +- cli/src/cmd/forge/script/executor.rs | 3 +- cli/src/cmd/forge/test.rs | 6 +- cli/src/utils.rs | 61 +------------- evm/Cargo.toml | 3 +- evm/src/executor/opts.rs | 121 +++++++++++++++++++-------- forge/src/multi_runner.rs | 115 +++++++------------------ 8 files changed, 129 insertions(+), 185 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1d75259630b6..e301e97ca9e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2002,6 +2002,7 @@ dependencies = [ "ethers", "eyre", "foundry-common", + "foundry-config", "foundry-utils", "futures", "hashbrown 0.12.0", diff --git a/cli/src/cmd/cast/run.rs b/cli/src/cmd/cast/run.rs index df70260d8bf7..244e2e3e3ef5 100644 --- a/cli/src/cmd/cast/run.rs +++ b/cli/src/cmd/cast/run.rs @@ -70,9 +70,7 @@ impl RunArgs { // Set up the execution environment let env = evm_opts.evm_env().await; - // TODO(mattsse) - // utils::get_fork(&evm_opts, &config.rpc_storage_caching), &env - let db = Backend::spawn(None); + let db = Backend::spawn(evm_opts.get_fork(env.clone())); let builder = ExecutorBuilder::default() .with_config(env) diff --git a/cli/src/cmd/forge/script/executor.rs b/cli/src/cmd/forge/script/executor.rs index ac030e0cc543..f2b915f60e4f 100644 --- a/cli/src/cmd/forge/script/executor.rs +++ b/cli/src/cmd/forge/script/executor.rs @@ -141,8 +141,7 @@ impl ScriptArgs { let env = script_config.evm_opts.evm_env().await; // the db backend that serves all the data - // utils::get_fork(&script_config.evm_opts, &script_config.config.rpc_storage_caching), - let db = Backend::spawn(None); + let db = Backend::spawn(script_config.evm_opts.get_fork(env.clone())); let mut builder = ExecutorBuilder::default() .with_cheatcodes( diff --git a/cli/src/cmd/forge/test.rs b/cli/src/cmd/forge/test.rs index 942f3a1a50f7..3e358868a3b1 100644 --- a/cli/src/cmd/forge/test.rs +++ b/cli/src/cmd/forge/test.rs @@ -488,6 +488,8 @@ pub fn custom_run(args: TestArgs, include_fuzz_tests: bool) -> eyre::Result eyre::Result(future: F) -> F::Output { rt.block_on(future) } -/// Helper function that returns the [Fork] to use, if any. -/// -/// storage caching for the [Fork] will be enabled if -/// - `fork_url` is present -/// - `fork_block_number` is present -/// - [StorageCachingConfig] allows the `fork_url` + chain id pair -/// - storage is allowed (`no_storage_caching = false`) -/// -/// If all these criteria are met, then storage caching is enabled and storage info will be written -/// to [Config::foundry_cache_dir()]///storage.json -/// -/// for `mainnet` and `--fork-block-number 14435000` on mac the corresponding storage cache will be -/// at `~/.foundry/cache/mainnet/14435000/storage.json` -pub fn get_fork(evm_opts: &EvmOpts, config: &StorageCachingConfig) -> Option { - /// Returns the path where the cache file should be stored - /// - /// or `None` if caching should not be enabled - /// - /// See also [ Config::foundry_block_cache_file()] - fn get_block_storage_path( - evm_opts: &EvmOpts, - config: &StorageCachingConfig, - chain_id: u64, - ) -> Option { - if evm_opts.no_storage_caching { - // storage caching explicitly opted out of - return None - } - let url = evm_opts.fork_url.as_ref()?; - // cache only if block explicitly pinned - let block = evm_opts.fork_block_number?; - - if config.enable_for_endpoint(url) && config.enable_for_chain_id(chain_id) { - return Config::foundry_block_cache_file(chain_id, block) - } - - None - } - - if let Some(ref url) = evm_opts.fork_url { - let chain_id = evm_opts.get_chain_id(); - let cache_storage = get_block_storage_path(evm_opts, config, chain_id); - let fork = Fork { - url: url.clone(), - pin_block: evm_opts.fork_block_number, - cache_path: cache_storage, - chain_id, - initial_backoff: evm_opts.fork_retry_backoff.unwrap_or(50), - }; - return Some(fork) - } - - None -} - /// Conditionally print a message /// /// This macro accepts a predicate and the message to print if the predicate is tru diff --git a/evm/Cargo.toml b/evm/Cargo.toml index 13864632d5d6..f4e90ebeffb0 100644 --- a/evm/Cargo.toml +++ b/evm/Cargo.toml @@ -7,11 +7,10 @@ readme = "../README.md" repository = "https://github.com/foundry-rs/foundry" keywords = ["ethereum", "web3", "evm"] -# TODO: We can probably reduce dependencies here or in the forge crate [dependencies] foundry-utils = { path = "./../utils" } foundry-common = { path = "./../common" } -foundry-common = { path = "./../config" } +foundry-config = { path = "./../config" } # Encoding/decoding serde_json = "1.0.67" diff --git a/evm/src/executor/opts.rs b/evm/src/executor/opts.rs index 13a9295888da..4733cbe24ebc 100644 --- a/evm/src/executor/opts.rs +++ b/evm/src/executor/opts.rs @@ -1,4 +1,5 @@ use ethers::{ + prelude::BlockNumber, providers::{Middleware, Provider}, solc::utils::RuntimeOrHandle, types::{Address, Chain, U256}, @@ -6,6 +7,7 @@ use ethers::{ use revm::{BlockEnv, CfgEnv, SpecId, TxEnv}; use serde::{Deserialize, Deserializer, Serialize}; +use crate::executor::fork::CreateFork; use foundry_common; use super::fork::environment; @@ -45,46 +47,97 @@ pub struct EvmOpts { } impl EvmOpts { + /// Configures a new `revm::Env` + /// + /// If a `fork_url` is set, it gets configured with settings fetched from the endpoint (chain + /// id, ) pub async fn evm_env(&self) -> revm::Env { if let Some(ref fork_url) = self.fork_url { - let provider = - Provider::try_from(fork_url.as_str()).expect("could not instantiated provider"); - environment( - &provider, - self.memory_limit, - self.env.gas_price, - self.env.chain_id, - self.fork_block_number, - self.sender, - ) - .await - .expect("could not instantiate forked environment") + self.fork_evm_env(fork_url).await } else { - revm::Env { - block: BlockEnv { - number: self.env.block_number.into(), - coinbase: self.env.block_coinbase, - timestamp: self.env.block_timestamp.into(), - difficulty: self.env.block_difficulty.into(), - basefee: self.env.block_base_fee_per_gas.into(), - gas_limit: self.gas_limit(), - }, - cfg: CfgEnv { - chain_id: self.env.chain_id.unwrap_or(foundry_common::DEV_CHAIN_ID).into(), - spec_id: SpecId::LONDON, - perf_all_precompiles_have_balance: false, - memory_limit: self.memory_limit, - }, - tx: TxEnv { - gas_price: self.env.gas_price.unwrap_or_default().into(), - gas_limit: self.gas_limit().as_u64(), - caller: self.sender, - ..Default::default() - }, - } + self.local_evm_env() + } + } + + /// Convenience implementation to configure a `revm::Env` from non async code + /// + /// This only attaches are creates a temporary tokio runtime if `fork_url` is set + pub fn evm_env_blocking(&self) -> revm::Env { + if let Some(ref fork_url) = self.fork_url { + RuntimeOrHandle::new().block_on(async { self.fork_evm_env(fork_url).await }) + } else { + self.local_evm_env() + } + } + + /// Returns the `revm::Env` configured with settings retrieved from the endpoints + async fn fork_evm_env(&self, fork_url: impl AsRef) -> revm::Env { + let provider = + Provider::try_from(fork_url.as_ref()).expect("could not instantiated provider"); + environment( + &provider, + self.memory_limit, + self.env.gas_price, + self.env.chain_id, + self.fork_block_number, + self.sender, + ) + .await + .expect("could not instantiate forked environment") + } + + /// Returns the `revm::Env` configured with only local settings + pub fn local_evm_env(&self) -> revm::Env { + revm::Env { + block: BlockEnv { + number: self.env.block_number.into(), + coinbase: self.env.block_coinbase, + timestamp: self.env.block_timestamp.into(), + difficulty: self.env.block_difficulty.into(), + basefee: self.env.block_base_fee_per_gas.into(), + gas_limit: self.gas_limit(), + }, + cfg: CfgEnv { + chain_id: self.env.chain_id.unwrap_or(foundry_common::DEV_CHAIN_ID).into(), + spec_id: SpecId::LONDON, + perf_all_precompiles_have_balance: false, + memory_limit: self.memory_limit, + }, + tx: TxEnv { + gas_price: self.env.gas_price.unwrap_or_default().into(), + gas_limit: self.gas_limit().as_u64(), + caller: self.sender, + ..Default::default() + }, } } + /// Helper function that returns the [CreateFork] to use, if any. + /// + /// storage caching for the [CreateFork] will be enabled if + /// - `fork_url` is present + /// - `fork_block_number` is present + /// - [StorageCachingConfig] allows the `fork_url` + chain id pair + /// - storage is allowed (`no_storage_caching = false`) + /// + /// If all these criteria are met, then storage caching is enabled and storage info will be + /// written to [Config::foundry_cache_dir()]///storage.json + /// + /// for `mainnet` and `--fork-block-number 14435000` on mac the corresponding storage cache will + /// be at `~/.foundry/cache/mainnet/14435000/storage.json` + pub fn get_fork(&self, env: revm::Env) -> Option { + Some(CreateFork { + url: self.fork_url.clone()?, + enable_caching: self.no_storage_caching, + block: self + .fork_block_number + .map(|num| BlockNumber::Number(num.into())) + .unwrap_or(BlockNumber::Latest), + chain_id: self.env.chain_id, + env, + }) + } + /// Returns the gas limit to use pub fn gas_limit(&self) -> U256 { self.env.block_gas_limit.unwrap_or(self.env.gas_limit).into() diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 5550c3090dba..75012dace259 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -2,13 +2,16 @@ use crate::{result::SuiteResult, ContractRunner, TestFilter}; use ethers::{ abi::Abi, prelude::{artifacts::CompactContractBytecode, ArtifactId, ArtifactOutput}, - solc::{utils::RuntimeOrHandle, Artifact, ProjectCompileOutput}, + solc::{Artifact, ProjectCompileOutput}, types::{Address, Bytes, U256}, }; use eyre::Result; use foundry_config::cache::StorageCachingConfig; -use foundry_evm::executor::{ - backend::Backend, fork::CreateFork, opts::EvmOpts, Executor, ExecutorBuilder, Fork, SpecId, +use foundry_evm::{ + executor::{ + backend::Backend, fork::CreateFork, opts::EvmOpts, Executor, ExecutorBuilder, SpecId, + }, + revm, }; use foundry_utils::PostLinkInput; use proptest::test_runner::TestRunner; @@ -27,6 +30,8 @@ pub struct MultiContractRunner { pub known_contracts: BTreeMap)>, /// The EVM instance used in the test runner pub evm_opts: EvmOpts, + /// The configured evm + pub env: revm::Env, /// The EVM spec pub evm_spec: SpecId, /// All known errors, used for decoding reverts @@ -37,10 +42,8 @@ pub struct MultiContractRunner { sender: Option
, /// A map of contract names to absolute source file paths pub source_paths: BTreeMap, - /// The fork config - pub fork: Option, /// The fork to use at launch - pub fork2: Option, + pub fork: Option, /// RPC storage caching settings determines what chains and endpoints to cache pub rpc_storage_caching: StorageCachingConfig, } @@ -107,17 +110,13 @@ impl MultiContractRunner { /// before executing all contracts and their tests in _parallel_. /// /// Each Executor gets its own instance of the `Backend`. - pub fn test2( + pub fn test( &mut self, filter: &impl TestFilter, stream_result: Option>, include_fuzz_tests: bool, ) -> Result> { - // TODO move to builder - let runtime = RuntimeOrHandle::new(); - let env = runtime.block_on(self.evm_opts.evm_env()); - - let db = Backend::spawn(self.fork2.take()); + let db = Backend::spawn(self.fork.take()); let results = // the db backend that serves all the data, each contract gets its own instance @@ -135,7 +134,7 @@ impl MultiContractRunner { .map(|(id, (abi, deploy_code, libs))| { let executor = ExecutorBuilder::default() .with_cheatcodes(self.evm_opts.ffi, self.rpc_storage_caching.clone()) - .with_config(env.clone()) + .with_config(self.env.clone()) .with_spec(self.evm_spec) .with_gas_limit(self.evm_opts.gas_limit()) .set_tracing(self.evm_opts.verbosity >= 3) @@ -165,61 +164,6 @@ impl MultiContractRunner { Ok(results) } - pub fn test( - &mut self, - _filter: &impl TestFilter, - _stream_result: Option>, - _include_fuzz_tests: bool, - ) -> Result> { - // let runtime = RuntimeOrHandle::new(); - // let env = runtime.block_on(self.evm_opts.evm_env()); - // - // // the db backend that serves all the data, each contract gets its own clone - // let db = runtime.block_on(Backend::new(self.fork.take(), &env)); - // - // let results = self - // .contracts - // .par_iter() - // .filter(|(id, _)| { - // filter.matches_path(id.source.to_string_lossy()) && - // filter.matches_contract(&id.name) - // }) - // .filter(|(_, (abi, _, _))| abi.functions().any(|func| - // filter.matches_test(&func.name))) .map(|(id, (abi, deploy_code, libs))| { - // let mut builder = ExecutorBuilder::new() - // .with_cheatcodes(self.evm_opts.ffi) - // .with_config(env.clone()) - // .with_spec(self.evm_spec) - // .with_gas_limit(self.evm_opts.gas_limit()); - // - // if self.evm_opts.verbosity >= 3 { - // builder = builder.with_tracing(); - // } - // - // let executor = builder.build(db.clone()); - // let result = self.run_tests( - // &id.identifier(), - // abi, - // executor, - // deploy_code.clone(), - // libs, - // (filter, include_fuzz_tests), - // )?; - // Ok((id.identifier(), result)) - // }) - // .filter_map(Result::<_>::ok) - // .filter(|(_, results)| !results.is_empty()) - // .map_with(stream_result, |stream_result, (name, result)| { - // if let Some(stream_result) = stream_result.as_ref() { - // stream_result.send((name.clone(), result.clone())).unwrap(); - // } - // (name, result) - // }) - // .collect::>(); - // Ok(results) - todo!() - } - // The _name field is unused because we only want it for tracing #[tracing::instrument( name = "contract", @@ -261,10 +205,8 @@ pub struct MultiContractRunnerBuilder { pub initial_balance: U256, /// The EVM spec to use pub evm_spec: Option, - /// The fork config - pub fork: Option, /// The fork to use at launch - pub fork2: Option, + pub fork: Option, /// RPC storage caching settings determines what chains and endpoints to cache pub rpc_storage_caching: StorageCachingConfig, } @@ -276,6 +218,7 @@ impl MultiContractRunnerBuilder { self, root: impl AsRef, output: ProjectCompileOutput, + env: revm::Env, evm_opts: EvmOpts, ) -> Result where @@ -355,13 +298,13 @@ impl MultiContractRunnerBuilder { contracts: deployable_contracts, known_contracts, evm_opts, + env, evm_spec: self.evm_spec.unwrap_or(SpecId::LONDON), sender: self.sender, fuzzer: self.fuzzer, errors: Some(execution_info.2), source_paths, fork: self.fork, - fork2: self.fork2, rpc_storage_caching: self.rpc_storage_caching, }) } @@ -391,7 +334,7 @@ impl MultiContractRunnerBuilder { } #[must_use] - pub fn with_fork(mut self, fork: Option) -> Self { + pub fn with_fork(mut self, fork: Option) -> Self { self.fork = fork; self } @@ -417,14 +360,23 @@ mod tests { /// Builds a non-tracing runner fn runner() -> MultiContractRunner { - base_runner().build(&(*PROJECT).paths.root, (*COMPILED).clone(), EVM_OPTS.clone()).unwrap() + base_runner() + .build( + &(*PROJECT).paths.root, + (*COMPILED).clone(), + EVM_OPTS.evm_env_blocking(), + EVM_OPTS.clone(), + ) + .unwrap() } /// Builds a tracing runner fn tracing_runner() -> MultiContractRunner { let mut opts = EVM_OPTS.clone(); opts.verbosity = 5; - base_runner().build(&(*PROJECT).paths.root, (*COMPILED).clone(), opts).unwrap() + base_runner() + .build(&(*PROJECT).paths.root, (*COMPILED).clone(), EVM_OPTS.evm_env_blocking(), opts) + .unwrap() } // Builds a runner that runs against forked state @@ -433,18 +385,13 @@ mod tests { opts.env.chain_id = None; // clear chain id so the correct one gets fetched from the RPC opts.fork_url = Some(rpc.to_string()); - let chain_id = opts.get_chain_id(); - let fork = Some(Fork { - cache_path: None, - url: rpc.to_string(), - pin_block: None, - chain_id, - initial_backoff: 50, - }); + let env = opts.evm_env_blocking(); + let fork = opts.get_fork(env.clone()); + base_runner() .with_fork(fork) - .build(&(*LIBS_PROJECT).paths.root, (*COMPILED_WITH_LIBS).clone(), opts) + .build(&(*LIBS_PROJECT).paths.root, (*COMPILED_WITH_LIBS).clone(), env, opts) .unwrap() } From 243d0ddea5292444aa20146eeda4292e501eb23f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 10 Jun 2022 15:40:43 +0200 Subject: [PATCH 051/102] chore: cleanup some code --- evm/src/executor/builder.rs | 55 ++------------------------------ evm/src/executor/fork/backend.rs | 19 ++++++----- evm/src/executor/mod.rs | 2 +- forge/src/multi_runner.rs | 2 +- 4 files changed, 13 insertions(+), 65 deletions(-) diff --git a/evm/src/executor/builder.rs b/evm/src/executor/builder.rs index 68535aa96b20..283e4bafc245 100644 --- a/evm/src/executor/builder.rs +++ b/evm/src/executor/builder.rs @@ -1,19 +1,16 @@ use super::{ - fork::SharedBackend, inspector::{Cheatcodes, InspectorStackConfig}, Executor, }; use crate::executor::{ backend::Backend, - fork::{BlockchainDb, BlockchainDbMeta}, }; use ethers::{ - providers::{Http, Provider, RetryClient}, types::U256, }; use foundry_config::cache::StorageCachingConfig; use revm::{Env, SpecId}; -use std::{path::PathBuf, sync::Arc}; + #[derive(Default, Debug)] pub struct ExecutorBuilder { @@ -89,52 +86,4 @@ impl ExecutorBuilder { let gas_limit = self.gas_limit.unwrap_or(self.env.block.gas_limit); Executor::new(db, self.env, self.inspector_config, gas_limit) } -} - -/// Represents a _fork_ of a live chain whose data is available only via the `url` endpoint. -/// -/// *Note:* this type intentionally does not implement `Clone` to prevent [Fork::spawn_backend()] -/// from being called multiple times. -#[derive(Debug)] -pub struct Fork { - /// Where to read the cached storage from - pub cache_path: Option, - /// The URL to a node for fetching remote state - pub url: String, - /// The block to fork against - pub pin_block: Option, - /// chain id retrieved from the endpoint - pub chain_id: u64, - /// The initial retry backoff - pub initial_backoff: u64, -} - -impl Fork { - /// Initialises and spawns the Storage Backend, the [revm::Database] - /// - /// If configured, then this will initialise the backend with the storage cache. - /// - /// The `SharedBackend` returned is connected to a background thread that communicates with the - /// endpoint via channels and is intended to be cloned when multiple [revm::Database] are - /// required. See also [crate::executor::fork::SharedBackend] - pub async fn spawn_backend(self, env: &Env) -> SharedBackend { - let Fork { cache_path, url, pin_block, chain_id, initial_backoff } = self; - - let provider = Arc::new( - Provider::>::new_client(url.clone().as_str(), 10, initial_backoff) - .expect("Failed to establish provider"), - ); - - let mut meta = BlockchainDbMeta::new(env.clone(), url); - - // update the meta to match the forked config - meta.cfg_env.chain_id = chain_id.into(); - if let Some(pin) = pin_block { - meta.block_env.number = pin.into(); - } - - let db = BlockchainDb::new(meta, cache_path); - - SharedBackend::spawn_backend(provider, db, pin_block.map(Into::into)).await - } -} +} \ No newline at end of file diff --git a/evm/src/executor/fork/backend.rs b/evm/src/executor/fork/backend.rs index 00f7ff639690..301f99741d9c 100644 --- a/evm/src/executor/fork/backend.rs +++ b/evm/src/executor/fork/backend.rs @@ -510,10 +510,7 @@ impl DatabaseRef for SharedBackend { #[cfg(test)] mod tests { - use crate::executor::{ - fork::{BlockchainDbMeta, JsonBlockCacheDB}, - Fork, - }; + use crate::executor::{Backend, fork::{BlockchainDbMeta, JsonBlockCacheDB}}; use ethers::{ providers::{Http, Provider}, solc::utils::RuntimeOrHandle, @@ -521,6 +518,8 @@ mod tests { }; use std::{collections::BTreeSet, convert::TryFrom, path::PathBuf, sync::Arc}; + use ethers::types::BlockNumber; + use crate::executor::fork::CreateFork; use super::*; const ENDPOINT: &str = "https://mainnet.infura.io/v3/c60b0bb42f8a4c6481ecd229eddaca27"; @@ -587,15 +586,15 @@ mod tests { let block_num = runtime.block_on(provider.get_block_number()).unwrap().as_u64(); let env = revm::Env::default(); - let fork = Fork { - cache_path: Some(cache_path.clone()), + let fork = CreateFork { + enable_caching: true, url: ENDPOINT.to_string(), - pin_block: Some(block_num), - chain_id: 1, - initial_backoff: 50, + block: BlockNumber::Number(block_num.into()), + chain_id: Some(1), + env }; - let backend = runtime.block_on(fork.spawn_backend(&env)); + let backend = Backend::spawn(Some(fork)); // some rng contract from etherscan let address: Address = "63091244180ae240c87d1f528f5f269134cb07b3".parse().unwrap(); diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index e36ae75b3084..6442c50967b4 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -38,7 +38,7 @@ pub mod opts; pub mod snapshot; use crate::executor::inspector::{InspectorStack, DEFAULT_CREATE2_DEPLOYER}; -pub use builder::{ExecutorBuilder, Fork}; +pub use builder::{ExecutorBuilder}; /// A mapping of addresses to their changed state. pub type StateChangeset = HashMap; diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 75012dace259..36276a301c7c 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -267,7 +267,7 @@ impl MultiContractRunnerBuilder { }; let abi = contract.abi.expect("We should have an abi by now"); - // if its a test, add it to deployable contracts + // if it's a test, add it to deployable contracts if abi.constructor.as_ref().map(|c| c.inputs.is_empty()).unwrap_or(true) && abi.functions().any(|func| func.name.starts_with("test")) { From e1e65d82bb9f04ce731c0a802c2a34bf491cbdcf Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 13 Jun 2022 14:33:09 +0200 Subject: [PATCH 052/102] chore: make it compile again --- cli/src/cmd/forge/script/executor.rs | 8 ++++---- cli/src/cmd/forge/script/mod.rs | 1 + cli/src/cmd/forge/script/receipts.rs | 5 ++++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/cli/src/cmd/forge/script/executor.rs b/cli/src/cmd/forge/script/executor.rs index 69a9c6eb9aaa..692c0ce2e752 100644 --- a/cli/src/cmd/forge/script/executor.rs +++ b/cli/src/cmd/forge/script/executor.rs @@ -1,7 +1,9 @@ use super::*; -use crate::{cmd::needs_setup, utils}; +use crate::{ + cmd::{forge::script::sequence::TransactionWithMetadata, needs_setup}, + utils, +}; use cast::executor::inspector::DEFAULT_CREATE2_DEPLOYER; - use ethers::{ solc::artifacts::CompactContractBytecode, types::{transaction::eip2718::TypedTransaction, Address, U256}, @@ -12,8 +14,6 @@ use forge::{ }; use std::collections::VecDeque; -use crate::cmd::forge::script::{sequence::TransactionWithMetadata, *}; - impl ScriptArgs { /// Locally deploys and executes the contract method that will collect all broadcastable /// transactions. diff --git a/cli/src/cmd/forge/script/mod.rs b/cli/src/cmd/forge/script/mod.rs index 4bc7734981a3..d3ab73707c27 100644 --- a/cli/src/cmd/forge/script/mod.rs +++ b/cli/src/cmd/forge/script/mod.rs @@ -41,6 +41,7 @@ use ui::{TUIExitReason, Tui, Ui}; mod cmd; mod executor; mod receipts; +mod sequence; use super::build::ProjectPathsArgs; diff --git a/cli/src/cmd/forge/script/receipts.rs b/cli/src/cmd/forge/script/receipts.rs index 171861763370..82d87a92a783 100644 --- a/cli/src/cmd/forge/script/receipts.rs +++ b/cli/src/cmd/forge/script/receipts.rs @@ -1,4 +1,7 @@ -use crate::{cmd::ScriptSequence, init_progress, update_progress, utils::print_receipt}; +use crate::{ + cmd::forge::script::sequence::ScriptSequence, init_progress, update_progress, + utils::print_receipt, +}; use ethers::prelude::{Http, PendingTransaction, Provider, RetryClient, TxHash}; use futures::StreamExt; use indicatif::{ProgressBar, ProgressStyle}; From 2edda131d0adb3b7fff1de92aca7bd4c4c988377 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 13 Jun 2022 14:40:42 +0200 Subject: [PATCH 053/102] test: update failing tests --- evm/src/executor/fork/backend.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/evm/src/executor/fork/backend.rs b/evm/src/executor/fork/backend.rs index 6bedcddab9cc..71ebe5c1d49d 100644 --- a/evm/src/executor/fork/backend.rs +++ b/evm/src/executor/fork/backend.rs @@ -521,7 +521,8 @@ mod tests { }; use crate::executor::fork::CreateFork; - use ethers::types::BlockNumber; + use ethers::types::{BlockNumber, Chain}; + use foundry_config::Config; use std::{collections::BTreeSet, convert::TryFrom, path::PathBuf, sync::Arc}; use super::*; @@ -579,14 +580,11 @@ mod tests { assert!(!json.db().accounts.read().is_empty()); } - #[test] - fn can_read_write_cache() { + #[tokio::test(flavor = "multi_thread")] + async fn can_read_write_cache() { let provider = Provider::::try_from(ENDPOINT).unwrap(); - let tmpdir = tempfile::tempdir().unwrap(); - let cache_path = tmpdir.path().join("storage.json"); - let runtime = RuntimeOrHandle::new(); - let block_num = runtime.block_on(provider.get_block_number()).unwrap().as_u64(); + let block_num = provider.get_block_number().await.unwrap().as_u64(); let env = revm::Env::default(); let fork = CreateFork { @@ -612,7 +610,6 @@ mod tests { let _ = backend.storage(address, idx.into()); } drop(backend); - drop(runtime); let meta = BlockchainDbMeta { cfg_env: Default::default(), @@ -620,7 +617,10 @@ mod tests { hosts: Default::default(), }; - let db = BlockchainDb::new(meta, Some(cache_path)); + let db = BlockchainDb::new( + meta, + Some(Config::foundry_block_cache_dir(Chain::Mainnet, block_num).unwrap()), + ); assert!(db.accounts().read().contains_key(&address)); assert!(db.storage().read().contains_key(&address)); assert_eq!(db.storage().read().get(&address).unwrap().len(), num_slots as usize); From 61e563a1d8983f5066d9c9006082b2617ebbfb06 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 13 Jun 2022 16:04:52 +0200 Subject: [PATCH 054/102] chore(clippy): make clippy happy --- cli/src/cmd/forge/script/executor.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/cli/src/cmd/forge/script/executor.rs b/cli/src/cmd/forge/script/executor.rs index 692c0ce2e752..9be0838698d7 100644 --- a/cli/src/cmd/forge/script/executor.rs +++ b/cli/src/cmd/forge/script/executor.rs @@ -3,7 +3,6 @@ use crate::{ cmd::{forge::script::sequence::TransactionWithMetadata, needs_setup}, utils, }; -use cast::executor::inspector::DEFAULT_CREATE2_DEPLOYER; use ethers::{ solc::artifacts::CompactContractBytecode, types::{transaction::eip2718::TypedTransaction, Address, U256}, From c14473de5ff8f9d6820a1dcc74883012b8bebca6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 13 Jun 2022 18:25:33 +0200 Subject: [PATCH 055/102] test: add simple fork cheatcode tests --- forge/src/multi_runner.rs | 34 ++++++++++++++++-- testdata/cheats/Fork.t.sol | 72 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+), 2 deletions(-) create mode 100644 testdata/cheats/Fork.t.sol diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 36276a301c7c..9c882f4732ce 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -1053,7 +1053,7 @@ mod tests { } #[test] - fn test_cheats() { + fn test_envs() { let mut runner = runner(); // test `setEnv` first, and confirm that it can correctly set environment variables, @@ -1069,9 +1069,39 @@ Reason: `setEnv` failed to set an environment variable `{}={}`", env_var_key, env_var_val ); + } + + /// Executes all fork cheatcodes + #[test] + fn test_cheats_fork() { + let mut runner = runner(); + let suite_result = + runner.test(&Filter::new(".*", ".*", ".*cheats/Fork"), None, true).unwrap(); + assert!(!suite_result.is_empty()); + + for (_, SuiteResult { test_results, .. }) in suite_result { + for (test_name, result) in test_results { + dbg!(test_name.clone()); + let logs = decode_console_logs(&result.logs); + assert!( + result.success, + "Test {} did not pass as expected.\nReason: {:?}\nLogs:\n{}", + test_name, + result.reason, + logs.join("\n") + ); + } + } + } - let suite_result = runner.test(&Filter::new(".*", ".*", ".*cheats"), None, true).unwrap(); + /// Executes all cheat code tests but not fork cheat codes + #[test] + fn test_cheats_local() { + let mut runner = runner(); + let suite_result = + runner.test(&Filter::new(".*", ".*", ".*cheats/[^Fork]"), None, true).unwrap(); assert!(!suite_result.is_empty()); + for (_, SuiteResult { test_results, .. }) in suite_result { for (test_name, result) in test_results { let logs = decode_console_logs(&result.logs); diff --git a/testdata/cheats/Fork.t.sol b/testdata/cheats/Fork.t.sol new file mode 100644 index 000000000000..9a2c75261a8a --- /dev/null +++ b/testdata/cheats/Fork.t.sol @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: Unlicense +pragma solidity >=0.8.0; + +import "ds-test/test.sol"; +import "./Cheats.sol"; + +interface IWETH { + function deposit() external payable; + function balanceOf(address) external view returns (uint); +} + +contract ForkTest is DSTest { + address constant WETH_TOKEN_ADDR = 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2; + uint256 constant mainblock = 14_608_400; + + Cheats constant cheats = Cheats(HEVM_ADDRESS); + IWETH WETH = IWETH(WETH_TOKEN_ADDR); + + + string forkA; + string forkB; + + // this will create two _different_ forks during setup + function setUp() public { + forkA = cheats.createFork("https://eth-mainnet.alchemyapi.io/v2/Lc7oIGYeL_QvInzI0Wiu_pOZZDEKBrdf", mainblock); + forkB = cheats.createFork("https://eth-mainnet.alchemyapi.io/v2/9VWGraLx0tMiSWx05WH-ywgSVmMxs66W", mainblock - 1); + } + + // ensures forks use different ids + function testForkIdDiffer() public { + assert(keccak256(bytes(forkA)) != keccak256(bytes(forkB))); + } + + // ensures forks use different ids + function testCanSwitchForks() public { + cheats.selectFork(forkA); + cheats.selectFork(forkB); + cheats.selectFork(forkB); + cheats.selectFork(forkA); + } + + function testLocalStatePersistent() public { + cheats.selectFork(forkA); + // read state from forkA + assert( + WETH.balanceOf(0x0000000000000000000000000000000000000000) != 1 + ); + + cheats.selectFork(forkB); + // read state from forkB + assert( + WETH.balanceOf(0x0000000000000000000000000000000000000000) != 1 + ); + + cheats.selectFork(forkA); + + // modify state + bytes32 value = bytes32(uint(1)); + // "0x3617319a054d772f909f7c479a2cebe5066e836a939412e32403c99029b92eff" is the slot storing the balance of zero address for the weth contract + // `cast index address uint 0x0000000000000000000000000000000000000000 3` + bytes32 zero_address_balance_slot = 0x3617319a054d772f909f7c479a2cebe5066e836a939412e32403c99029b92eff; + cheats.store(WETH_TOKEN_ADDR, zero_address_balance_slot, value); + assertEq(WETH.balanceOf(0x0000000000000000000000000000000000000000), 1, "Cheatcode did not change value at the storage slot."); + + // switch forks and ensure local modified state is persistent + cheats.selectFork(forkB); + assertEq(WETH.balanceOf(0x0000000000000000000000000000000000000000), 1, "Cheatcode did not change value at the storage slot."); + } + + + +} \ No newline at end of file From 032931f6acf890bf38f26ac41ae982f58cf171d8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 13 Jun 2022 20:27:21 +0200 Subject: [PATCH 056/102] refactor: use execute function --- evm/src/executor/backend/mod.rs | 6 ++++- evm/src/executor/mod.rs | 4 +++- forge/src/multi_runner.rs | 4 +++- forge/src/runner.rs | 13 ++++++----- testdata/cheats/Fork.t.sol | 3 --- testdata/cheats/Snapshots.t.sol | 40 +++++++++++++++++++++++++++++++++ 6 files changed, 59 insertions(+), 11 deletions(-) create mode 100644 testdata/cheats/Snapshots.t.sol diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index a0ffc5bf9f6e..16f95073e9d0 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -166,6 +166,7 @@ impl DatabaseExt for Backend { fn revert(&mut self, id: U256) -> bool { if let Some(snapshot) = self.snapshots.remove(id) { self.db = snapshot; + dbg!("reverted snapshot"); trace!(target: "backend", "Reverted snapshot {}", id); true } else { @@ -202,7 +203,9 @@ impl DatabaseRef for Backend { } fn storage(&self, address: H160, index: U256) -> U256 { - DatabaseRef::storage(&self.db, address, index) + let val = DatabaseRef::storage(&self.db, address, index); + dbg!(val); + val } fn block_hash(&self, number: U256) -> H256 { @@ -278,6 +281,7 @@ impl DatabaseRef for BackendDatabase { } fn storage(&self, address: H160, index: U256) -> U256 { + dbg!("get storage DBREF"); match self { BackendDatabase::InMemory(inner) => inner.storage(address, index), BackendDatabase::Forked(inner, _) => inner.storage(address, index), diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 09ef0616a123..ab54094bc002 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -293,7 +293,7 @@ impl Executor { /// Executes the test function call pub fn execute( - mut self, + &mut self, from: Address, to: Address, func: F, @@ -346,6 +346,7 @@ impl Executor { calldata: Bytes, value: U256, ) -> eyre::Result { + dbg!("call raw"); // execute the call let mut inspector = self.inspector_config.stack(); let stipend = calc_stipend(&calldata, self.env.cfg.spec_id); @@ -444,6 +445,7 @@ impl Executor { state_changeset: StateChangeset, should_fail: bool, ) -> bool { + dbg!("is success"); // Construct a new VM with the state changeset let mut backend = self.backend().clone_empty(); backend.insert_cache(address, self.backend().basic(address)); diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 9c882f4732ce..804239fc23d2 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -1098,8 +1098,10 @@ Reason: `setEnv` failed to set an environment variable `{}={}`", #[test] fn test_cheats_local() { let mut runner = runner(); + // let suite_result = + // runner.test(&Filter::new(".*", ".*", ".*cheats/[^Fork]"), None, true).unwrap(); let suite_result = - runner.test(&Filter::new(".*", ".*", ".*cheats/[^Fork]"), None, true).unwrap(); + runner.test(&Filter::new(".*", ".*", ".*cheats/Snapsh"), None, true).unwrap(); assert!(!suite_result.is_empty()); for (_, SuiteResult { test_results, .. }) in suite_result { diff --git a/forge/src/runner.rs b/forge/src/runner.rs index beddb034dee2..3cc8adcf58fe 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -18,7 +18,7 @@ use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use std::{collections::BTreeMap, time::Instant}; /// A type that executes all tests of a contract -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ContractRunner<'a> { /// The executor used by the runner. pub executor: Executor, @@ -163,7 +163,7 @@ impl<'a> ContractRunner<'a> { /// Runs all tests for a contract whose names match the provided regular expression pub fn run_tests( - &mut self, + mut self, filter: &impl TestFilter, fuzzer: Option, include_fuzz_tests: bool, @@ -251,9 +251,10 @@ impl<'a> ContractRunner<'a> { .par_iter() .filter_map(|(func, should_fail)| { let result = if func.inputs.is_empty() { - Some(self.run_test(func, *should_fail, setup.clone())) + Some(self.clone().run_test(func, *should_fail, setup.clone())) } else { fuzzer.as_ref().map(|fuzzer| { + // TODO(mattsse) use fuzz wrapper backend self.run_fuzz_test(func, *should_fail, fuzzer.clone(), setup.clone()) }) }; @@ -283,7 +284,7 @@ impl<'a> ContractRunner<'a> { /// similar to `eth_call`. #[tracing::instrument(name = "test", skip_all, fields(name = %func.signature(), %should_fail))] pub fn run_test( - &self, + mut self, func: &Function, should_fail: bool, setup: TestSetup, @@ -294,7 +295,7 @@ impl<'a> ContractRunner<'a> { let start = Instant::now(); let (reverted, reason, gas, stipend, execution_traces, state_changeset) = match self .executor - .call::<(), _, _>(self.sender, address, func.clone(), (), 0.into(), self.errors) + .execute::<(), _, _>(self.sender, address, func.clone(), (), 0.into(), self.errors) { Ok(CallResult { reverted, @@ -332,6 +333,8 @@ impl<'a> ContractRunner<'a> { }; traces.extend(execution_traces.map(|traces| (TraceKind::Execution, traces)).into_iter()); + dbg!("END"); + let success = self.executor.is_success( setup.address, reverted, diff --git a/testdata/cheats/Fork.t.sol b/testdata/cheats/Fork.t.sol index 9a2c75261a8a..cac5700bce46 100644 --- a/testdata/cheats/Fork.t.sol +++ b/testdata/cheats/Fork.t.sol @@ -66,7 +66,4 @@ contract ForkTest is DSTest { cheats.selectFork(forkB); assertEq(WETH.balanceOf(0x0000000000000000000000000000000000000000), 1, "Cheatcode did not change value at the storage slot."); } - - - } \ No newline at end of file diff --git a/testdata/cheats/Snapshots.t.sol b/testdata/cheats/Snapshots.t.sol new file mode 100644 index 000000000000..a8efced08332 --- /dev/null +++ b/testdata/cheats/Snapshots.t.sol @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: Unlicense +pragma solidity >=0.8.0; + +import "ds-test/test.sol"; +import "./Cheats.sol"; + +struct Storage { + uint slot0; + uint slot1; +} + +contract SnapshotTest is DSTest { + Cheats constant cheats = Cheats(HEVM_ADDRESS); + + Storage store; + + function setUp() public { + store.slot0 = 10; + store.slot1 = 20; + } + +// function testStore() public { +// assertEq(store.slot0, 10, "initial value for slot 0 is incorrect"); +// assertEq(store.slot1, 20, "initial value for slot 1 is incorrect"); +// } + + function testSnapshot() public { + uint256 snapshot = cheats.snapshot(); + store.slot0 = 300; + store.slot1 = 400; + +// assertEq(store.slot0, 300); +// assertEq(store.slot1, 400); + + cheats.revertTo(snapshot); + assertEq(store.slot0, 10, "snapshot revert for slot 0 unsuccessful"); + assertEq(store.slot1, 20, "snapshot revert for slot 1 unsuccessful"); + } + +} \ No newline at end of file From 727a0ea392e2a06c859cc0cd43a8d7a7e3982005 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 13 Jun 2022 20:31:43 +0200 Subject: [PATCH 057/102] refactor: move snapshot cheatcode impl to separate mod --- evm/src/executor/backend/mod.rs | 1 - evm/src/executor/inspector/cheatcodes/fork.rs | 3 --- evm/src/executor/inspector/cheatcodes/mod.rs | 3 +++ .../executor/inspector/cheatcodes/snapshot.rs | 21 +++++++++++++++++++ 4 files changed, 24 insertions(+), 4 deletions(-) create mode 100644 evm/src/executor/inspector/cheatcodes/snapshot.rs diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 16f95073e9d0..972636ef23c1 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -281,7 +281,6 @@ impl DatabaseRef for BackendDatabase { } fn storage(&self, address: H160, index: U256) -> U256 { - dbg!("get storage DBREF"); match self { BackendDatabase::InMemory(inner) => inner.storage(address, index), BackendDatabase::Forked(inner, _) => inner.storage(address, index), diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index 51c7590a5021..29f12323c281 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -5,7 +5,6 @@ use crate::{ }; use bytes::Bytes; use ethers::{abi::AbiEncode, types::BlockNumber}; - use revm::EVMData; /// Handles fork related cheatcodes @@ -15,8 +14,6 @@ pub fn apply( call: &HEVMCalls, ) -> Option> { Some(match call { - HEVMCalls::Snapshot(_) => Ok(data.db.snapshot().encode().into()), - HEVMCalls::RevertTo(snapshot) => Ok(data.db.revert(snapshot.0).encode().into()), HEVMCalls::CreateFork0(fork) => { create_fork(state, data, fork.0.clone(), BlockNumber::Latest) } diff --git a/evm/src/executor/inspector/cheatcodes/mod.rs b/evm/src/executor/inspector/cheatcodes/mod.rs index 2788088d3eae..244c6ad8ee9a 100644 --- a/evm/src/executor/inspector/cheatcodes/mod.rs +++ b/evm/src/executor/inspector/cheatcodes/mod.rs @@ -32,6 +32,8 @@ use foundry_config::cache::StorageCachingConfig; mod ext; /// Fork related cheatcodes mod fork; +/// Snapshot related cheatcodes +mod snapshot; /// Cheatcodes that configure the fuzzer mod fuzz; /// Utility cheatcodes (`sign` etc.) @@ -125,6 +127,7 @@ impl Cheatcodes { .or_else(|| expect::apply(self, data, &decoded)) .or_else(|| fuzz::apply(data, &decoded)) .or_else(|| ext::apply(self.ffi, &decoded)) + .or_else(|| snapshot::apply(self, data, &decoded)) .or_else(|| fork::apply(self, data, &decoded)) .ok_or_else(|| "Cheatcode was unhandled. This is a bug.".to_string().encode())? } diff --git a/evm/src/executor/inspector/cheatcodes/snapshot.rs b/evm/src/executor/inspector/cheatcodes/snapshot.rs new file mode 100644 index 000000000000..fc186e5e147e --- /dev/null +++ b/evm/src/executor/inspector/cheatcodes/snapshot.rs @@ -0,0 +1,21 @@ +use super::Cheatcodes; +use crate::{ + abi::HEVMCalls, + executor::{backend::DatabaseExt}, +}; +use bytes::Bytes; +use ethers::{abi::AbiEncode}; +use revm::EVMData; + +/// Handles fork related cheatcodes +pub fn apply( + _state: &mut Cheatcodes, + data: &mut EVMData<'_, DB>, + call: &HEVMCalls, +) -> Option> { + Some(match call { + HEVMCalls::Snapshot(_) => Ok(data.db.snapshot().encode().into()), + HEVMCalls::RevertTo(snapshot) => Ok(data.db.revert(snapshot.0).encode().into()), + _ => return None, + }) +} \ No newline at end of file From c7c394059ef5f8cb2799fe6e8bd2bb1c15641636 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 13 Jun 2022 21:22:40 +0200 Subject: [PATCH 058/102] feat: store subroutine with snapshot --- Cargo.lock | 4 --- Cargo.toml | 3 ++ evm/src/executor/backend/fuzz.rs | 22 +++++++------- evm/src/executor/backend/mod.rs | 30 +++++++++---------- evm/src/executor/backend/snapshot.rs | 17 +++++++++++ evm/src/executor/inspector/cheatcodes/mod.rs | 4 +-- .../executor/inspector/cheatcodes/snapshot.rs | 22 +++++++++----- evm/src/executor/mod.rs | 2 -- forge/src/multi_runner.rs | 2 +- forge/src/runner.rs | 2 -- testdata/cheats/Snapshots.t.sol | 4 +-- 11 files changed, 66 insertions(+), 46 deletions(-) create mode 100644 evm/src/executor/backend/snapshot.rs diff --git a/Cargo.lock b/Cargo.lock index 4f34ab4063fb..c7f08e85c630 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4036,8 +4036,6 @@ dependencies = [ [[package]] name = "revm" version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1affcd38e2d1a4570699f8cf35f0668c886febdba1b857771b20cf81025ada1d" dependencies = [ "arrayref", "auto_impl 1.0.1", @@ -4055,8 +4053,6 @@ dependencies = [ [[package]] name = "revm_precompiles" version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd6aae8f44783ef6ff39fc22c9c999dfa0e17b79d663b752730c02a025935185" dependencies = [ "bytes", "k256", diff --git a/Cargo.toml b/Cargo.toml index 72b2bfcadce5..0bdf4dda1eb2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -57,3 +57,6 @@ debug = 0 #ethers-signers = { path = "../ethers-rs/ethers-signers" } #ethers-etherscan = { path = "../ethers-rs/ethers-etherscan" } #ethers-solc = { path = "../ethers-rs/ethers-solc" } + +[patch.crates-io] +revm = { path = "../revm/crates/revm" , default-features = false, features = ["std", "k256", "with-serde", "memory_limit"] } \ No newline at end of file diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index 661c744465b3..95175c08fa29 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -1,6 +1,6 @@ use crate::{ executor::{ - backend::{Backend, BackendDatabase, DatabaseExt}, + backend::{snapshot::BackendSnapshot, Backend, BackendDatabase, DatabaseExt}, fork::{CreateFork, ForkId, SharedBackend}, snapshot::Snapshots, }, @@ -11,7 +11,7 @@ use ethers::prelude::{H160, H256, U256}; use hashbrown::HashMap as Map; use revm::{ db::{CacheDB, DatabaseRef}, - Account, AccountInfo, Database, Env, Inspector, Log, Return, TransactOut, + Account, AccountInfo, Database, Env, Inspector, Log, Return, SubRoutine, TransactOut, }; use std::collections::HashMap; use tracing::{trace, warn}; @@ -38,7 +38,7 @@ pub struct FuzzBackendWrapper<'a> { /// tracks all created forks created_forks: HashMap, /// Contains snapshots made at a certain point - snapshots: Snapshots>, + snapshots: Snapshots>>, } // === impl FuzzBackendWrapper === @@ -77,22 +77,24 @@ impl<'a> FuzzBackendWrapper<'a> { } impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { - fn snapshot(&mut self) -> U256 { - let id = self.snapshots.insert(self.active_db().clone()); + fn snapshot(&mut self, subroutine: &SubRoutine) -> U256 { + let id = self + .snapshots + .insert(BackendSnapshot::new(self.active_db().clone(), subroutine.clone())); trace!(target: "backend", "Created new snapshot {}", id); id } - fn revert(&mut self, id: U256) -> bool { - if let Some(snapshot) = + fn revert(&mut self, id: U256) -> Option { + if let Some(BackendSnapshot { db, subroutine }) = self.snapshots.remove(id).or_else(|| self.inner.snapshots.get(id).cloned()) { - self.set_active(snapshot); + self.set_active(db); trace!(target: "backend", "Reverted snapshot {}", id); - true + Some(subroutine) } else { warn!(target: "backend", "No snapshot to revert for {}", id); - false + None } } diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 972636ef23c1..d107f0bd856e 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -10,25 +10,28 @@ use ethers::{ use hashbrown::HashMap as Map; use revm::{ db::{CacheDB, DatabaseRef, EmptyDB}, - Account, AccountInfo, Database, DatabaseCommit, Env, Inspector, Log, Return, TransactOut, + Account, AccountInfo, Database, DatabaseCommit, Env, Inspector, Log, Return, SubRoutine, + TransactOut, }; use std::collections::HashMap; use tracing::{trace, warn}; mod fuzz; +mod snapshot; pub use fuzz::FuzzBackendWrapper; mod in_memory_db; +use crate::executor::backend::snapshot::BackendSnapshot; pub use in_memory_db::MemDb; /// An extension trait that allows us to easily extend the `revm::Inspector` capabilities #[auto_impl::auto_impl(&mut, Box)] pub trait DatabaseExt: Database { /// Creates a new snapshot - fn snapshot(&mut self) -> U256; + fn snapshot(&mut self, subroutine: &SubRoutine) -> U256; /// Reverts the snapshot if it exists /// /// Returns `true` if the snapshot was successfully reverted, `false` if no snapshot for that id /// exists. - fn revert(&mut self, id: U256) -> bool; + fn revert(&mut self, id: U256) -> Option; /// Creates a new fork but does _not_ select it fn create_fork(&mut self, fork: CreateFork) -> eyre::Result; @@ -99,7 +102,7 @@ pub struct Backend { /// state pub db: CacheDB, /// Contains snapshots made at a certain point - snapshots: Snapshots>, + snapshots: Snapshots>>, } // === impl Backend === @@ -157,21 +160,20 @@ impl Backend { // === impl a bunch of `revm::Database` adjacent implementations === impl DatabaseExt for Backend { - fn snapshot(&mut self) -> U256 { - let id = self.snapshots.insert(self.db.clone()); + fn snapshot(&mut self, subroutine: &SubRoutine) -> U256 { + let id = self.snapshots.insert(BackendSnapshot::new(self.db.clone(), subroutine.clone())); trace!(target: "backend", "Created new snapshot {}", id); id } - fn revert(&mut self, id: U256) -> bool { - if let Some(snapshot) = self.snapshots.remove(id) { - self.db = snapshot; - dbg!("reverted snapshot"); + fn revert(&mut self, id: U256) -> Option { + if let Some(BackendSnapshot { db, subroutine }) = self.snapshots.remove(id) { + self.db = db; trace!(target: "backend", "Reverted snapshot {}", id); - true + Some(subroutine) } else { warn!(target: "backend", "No snapshot to revert for {}", id); - false + None } } @@ -203,9 +205,7 @@ impl DatabaseRef for Backend { } fn storage(&self, address: H160, index: U256) -> U256 { - let val = DatabaseRef::storage(&self.db, address, index); - dbg!(val); - val + DatabaseRef::storage(&self.db, address, index) } fn block_hash(&self, number: U256) -> H256 { diff --git a/evm/src/executor/backend/snapshot.rs b/evm/src/executor/backend/snapshot.rs new file mode 100644 index 000000000000..e450030c9112 --- /dev/null +++ b/evm/src/executor/backend/snapshot.rs @@ -0,0 +1,17 @@ +use revm::SubRoutine; + +/// Represents a snapshot taken during evm execution +#[derive(Clone, Debug)] +pub struct BackendSnapshot { + pub db: T, + /// The subroutine state at a specific point + pub subroutine: SubRoutine, +} + +// === impl BackendSnapshot === + +impl BackendSnapshot { + pub fn new(db: T, subroutine: SubRoutine) -> Self { + Self { db, subroutine } + } +} diff --git a/evm/src/executor/inspector/cheatcodes/mod.rs b/evm/src/executor/inspector/cheatcodes/mod.rs index 244c6ad8ee9a..6ca28ab0129a 100644 --- a/evm/src/executor/inspector/cheatcodes/mod.rs +++ b/evm/src/executor/inspector/cheatcodes/mod.rs @@ -32,10 +32,10 @@ use foundry_config::cache::StorageCachingConfig; mod ext; /// Fork related cheatcodes mod fork; -/// Snapshot related cheatcodes -mod snapshot; /// Cheatcodes that configure the fuzzer mod fuzz; +/// Snapshot related cheatcodes +mod snapshot; /// Utility cheatcodes (`sign` etc.) mod util; pub use util::{DEFAULT_CREATE2_DEPLOYER, MISSING_CREATE2_DEPLOYER}; diff --git a/evm/src/executor/inspector/cheatcodes/snapshot.rs b/evm/src/executor/inspector/cheatcodes/snapshot.rs index fc186e5e147e..332806e29403 100644 --- a/evm/src/executor/inspector/cheatcodes/snapshot.rs +++ b/evm/src/executor/inspector/cheatcodes/snapshot.rs @@ -1,10 +1,7 @@ use super::Cheatcodes; -use crate::{ - abi::HEVMCalls, - executor::{backend::DatabaseExt}, -}; +use crate::{abi::HEVMCalls, executor::backend::DatabaseExt}; use bytes::Bytes; -use ethers::{abi::AbiEncode}; +use ethers::abi::AbiEncode; use revm::EVMData; /// Handles fork related cheatcodes @@ -14,8 +11,17 @@ pub fn apply( call: &HEVMCalls, ) -> Option> { Some(match call { - HEVMCalls::Snapshot(_) => Ok(data.db.snapshot().encode().into()), - HEVMCalls::RevertTo(snapshot) => Ok(data.db.revert(snapshot.0).encode().into()), + HEVMCalls::Snapshot(_) => Ok(data.db.snapshot(&data.subroutine).encode().into()), + HEVMCalls::RevertTo(snapshot) => { + let res = if let Some(subroutine) = data.db.revert(snapshot.0) { + // we reset the evm's subroutine to the state of the snapshot previous state + data.subroutine = subroutine; + true + } else { + false + }; + Ok(res.encode().into()) + } _ => return None, }) -} \ No newline at end of file +} diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index ab54094bc002..b5b2c57c5cc5 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -346,7 +346,6 @@ impl Executor { calldata: Bytes, value: U256, ) -> eyre::Result { - dbg!("call raw"); // execute the call let mut inspector = self.inspector_config.stack(); let stipend = calc_stipend(&calldata, self.env.cfg.spec_id); @@ -445,7 +444,6 @@ impl Executor { state_changeset: StateChangeset, should_fail: bool, ) -> bool { - dbg!("is success"); // Construct a new VM with the state changeset let mut backend = self.backend().clone_empty(); backend.insert_cache(address, self.backend().basic(address)); diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 804239fc23d2..cb8276fb8c19 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -180,7 +180,7 @@ impl MultiContractRunner { libs: &[Bytes], (filter, include_fuzz_tests): (&impl TestFilter, bool), ) -> Result { - let mut runner = ContractRunner::new( + let runner = ContractRunner::new( executor, contract, deploy_code, diff --git a/forge/src/runner.rs b/forge/src/runner.rs index 3cc8adcf58fe..5fbc3a72e33f 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -333,8 +333,6 @@ impl<'a> ContractRunner<'a> { }; traces.extend(execution_traces.map(|traces| (TraceKind::Execution, traces)).into_iter()); - dbg!("END"); - let success = self.executor.is_success( setup.address, reverted, diff --git a/testdata/cheats/Snapshots.t.sol b/testdata/cheats/Snapshots.t.sol index a8efced08332..c66c729187de 100644 --- a/testdata/cheats/Snapshots.t.sol +++ b/testdata/cheats/Snapshots.t.sol @@ -29,8 +29,8 @@ contract SnapshotTest is DSTest { store.slot0 = 300; store.slot1 = 400; -// assertEq(store.slot0, 300); -// assertEq(store.slot1, 400); + assertEq(store.slot0, 300); + assertEq(store.slot1, 400); cheats.revertTo(snapshot); assertEq(store.slot0, 10, "snapshot revert for slot 0 unsuccessful"); From c4d2097de51363bf867f59f7879595362a7a7380 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 13 Jun 2022 21:33:08 +0200 Subject: [PATCH 059/102] feat: add subroutine to revert call --- evm/src/executor/backend/fuzz.rs | 2 +- evm/src/executor/backend/mod.rs | 6 ++++-- evm/src/executor/inspector/cheatcodes/snapshot.rs | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index 95175c08fa29..a38dbbc7bc0a 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -85,7 +85,7 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { id } - fn revert(&mut self, id: U256) -> Option { + fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option { if let Some(BackendSnapshot { db, subroutine }) = self.snapshots.remove(id).or_else(|| self.inner.snapshots.get(id).cloned()) { diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index d107f0bd856e..570e96f54bd1 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -31,7 +31,7 @@ pub trait DatabaseExt: Database { /// /// Returns `true` if the snapshot was successfully reverted, `false` if no snapshot for that id /// exists. - fn revert(&mut self, id: U256) -> Option; + fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option; /// Creates a new fork but does _not_ select it fn create_fork(&mut self, fork: CreateFork) -> eyre::Result; @@ -166,8 +166,10 @@ impl DatabaseExt for Backend { id } - fn revert(&mut self, id: U256) -> Option { + fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option { if let Some(BackendSnapshot { db, subroutine }) = self.snapshots.remove(id) { + // TODO needs to store additioanl logs and whether there was a failure by looking at the + // subroutine self.db = db; trace!(target: "backend", "Reverted snapshot {}", id); Some(subroutine) diff --git a/evm/src/executor/inspector/cheatcodes/snapshot.rs b/evm/src/executor/inspector/cheatcodes/snapshot.rs index 332806e29403..e30251925124 100644 --- a/evm/src/executor/inspector/cheatcodes/snapshot.rs +++ b/evm/src/executor/inspector/cheatcodes/snapshot.rs @@ -13,7 +13,7 @@ pub fn apply( Some(match call { HEVMCalls::Snapshot(_) => Ok(data.db.snapshot(&data.subroutine).encode().into()), HEVMCalls::RevertTo(snapshot) => { - let res = if let Some(subroutine) = data.db.revert(snapshot.0) { + let res = if let Some(subroutine) = data.db.revert(snapshot.0, &data.subroutine) { // we reset the evm's subroutine to the state of the snapshot previous state data.subroutine = subroutine; true From c0563f30a88e0dff9713af5c3e07d086691412d7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 17 Jun 2022 18:01:36 +0200 Subject: [PATCH 060/102] feat: add Cheats config type --- Cargo.lock | 2 +- cli/src/cmd/forge/script/executor.rs | 6 ++-- cli/src/cmd/forge/script/runner.rs | 2 -- cli/src/cmd/forge/test.rs | 4 ++- evm/src/executor/backend/fuzz.rs | 2 +- evm/src/executor/backend/mod.rs | 2 +- evm/src/executor/builder.rs | 14 ++++----- .../executor/inspector/cheatcodes/config.rs | 30 +++++++++++++++++++ evm/src/executor/inspector/cheatcodes/fork.rs | 2 +- evm/src/executor/inspector/cheatcodes/mod.rs | 20 +++++-------- evm/src/executor/inspector/mod.rs | 2 +- forge/src/multi_runner.rs | 22 +++++++++----- 12 files changed, 67 insertions(+), 41 deletions(-) create mode 100644 evm/src/executor/inspector/cheatcodes/config.rs diff --git a/Cargo.lock b/Cargo.lock index 9e226b3ef435..8b1b49521469 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4037,7 +4037,7 @@ dependencies = [ [[package]] name = "revm" -version = "1.5.0" +version = "1.6.0" dependencies = [ "arrayref", "auto_impl 1.0.1", diff --git a/cli/src/cmd/forge/script/executor.rs b/cli/src/cmd/forge/script/executor.rs index 86de41cb4f21..0d427d9d57d0 100644 --- a/cli/src/cmd/forge/script/executor.rs +++ b/cli/src/cmd/forge/script/executor.rs @@ -3,6 +3,7 @@ use crate::{ cmd::{forge::script::sequence::TransactionWithMetadata, needs_setup}, utils, }; +use cast::executor::inspector::CheatsConfig; use ethers::{ solc::artifacts::CompactContractBytecode, types::{transaction::eip2718::TypedTransaction, Address, U256}, @@ -150,10 +151,7 @@ impl ScriptArgs { let db = Backend::spawn(script_config.evm_opts.get_fork(env.clone())); let mut builder = ExecutorBuilder::default() - .with_cheatcodes( - script_config.evm_opts.ffi, - script_config.config.rpc_storage_caching.clone(), - ) + .with_cheatcodes(CheatsConfig::new(&script_config.config, &script_config.evm_opts)) .with_config(env) .with_spec(utils::evm_spec(&script_config.config.evm_version)) .set_tracing(script_config.evm_opts.verbosity >= 3) diff --git a/cli/src/cmd/forge/script/runner.rs b/cli/src/cmd/forge/script/runner.rs index a1cbc2a6eb1d..5728891b2791 100644 --- a/cli/src/cmd/forge/script/runner.rs +++ b/cli/src/cmd/forge/script/runner.rs @@ -6,8 +6,6 @@ use forge::{ CALLER, }; -use super::*; - /// Drives script execution pub struct ScriptRunner { pub executor: Executor, diff --git a/cli/src/cmd/forge/test.rs b/cli/src/cmd/forge/test.rs index 0ece69cbcdb2..ea1f8c85c327 100644 --- a/cli/src/cmd/forge/test.rs +++ b/cli/src/cmd/forge/test.rs @@ -13,7 +13,7 @@ use clap::{AppSettings, Parser}; use ethers::solc::{utils::RuntimeOrHandle, FileFilter}; use forge::{ decode::decode_console_logs, - executor::opts::EvmOpts, + executor::{inspector::CheatsConfig, opts::EvmOpts}, gas_report::GasReport, result::{SuiteResult, TestKind, TestResult}, trace::{ @@ -494,12 +494,14 @@ pub fn custom_run(args: TestArgs, include_fuzz_tests: bool) -> eyre::Result DatabaseExt for FuzzBackendWrapper<'a> { id } - fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option { + fn revert(&mut self, id: U256, _subroutine: &SubRoutine) -> Option { if let Some(BackendSnapshot { db, subroutine }) = self.snapshots.remove(id).or_else(|| self.inner.snapshots.get(id).cloned()) { diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 570e96f54bd1..4dc24f6dd743 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -166,7 +166,7 @@ impl DatabaseExt for Backend { id } - fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option { + fn revert(&mut self, id: U256, _subroutine: &SubRoutine) -> Option { if let Some(BackendSnapshot { db, subroutine }) = self.snapshots.remove(id) { // TODO needs to store additioanl logs and whether there was a failure by looking at the // subroutine diff --git a/evm/src/executor/builder.rs b/evm/src/executor/builder.rs index 090bbff0d11a..8d3f6bf5bc53 100644 --- a/evm/src/executor/builder.rs +++ b/evm/src/executor/builder.rs @@ -2,9 +2,9 @@ use super::{ inspector::{Cheatcodes, InspectorStackConfig}, Executor, }; -use crate::executor::backend::Backend; +use crate::executor::{backend::Backend, inspector::CheatsConfig}; use ethers::types::U256; -use foundry_config::cache::StorageCachingConfig; + use revm::{Env, SpecId}; #[derive(Default, Debug)] @@ -21,13 +21,9 @@ pub struct ExecutorBuilder { impl ExecutorBuilder { /// Enables cheatcodes on the executor. #[must_use] - pub fn with_cheatcodes(mut self, ffi: bool, rpc_storage_caching: StorageCachingConfig) -> Self { - self.inspector_config.cheatcodes = Some(Cheatcodes::new( - ffi, - self.env.block.clone(), - self.env.tx.gas_price, - rpc_storage_caching, - )); + pub fn with_cheatcodes(mut self, config: CheatsConfig) -> Self { + self.inspector_config.cheatcodes = + Some(Cheatcodes::new(self.env.block.clone(), self.env.tx.gas_price, config)); self } diff --git a/evm/src/executor/inspector/cheatcodes/config.rs b/evm/src/executor/inspector/cheatcodes/config.rs new file mode 100644 index 000000000000..e7baa920bf9b --- /dev/null +++ b/evm/src/executor/inspector/cheatcodes/config.rs @@ -0,0 +1,30 @@ +use crate::executor::opts::EvmOpts; +use foundry_config::{cache::StorageCachingConfig, Config, RpcEndpoints}; +use std::sync::Arc; + +/// Additional, configurable context the `Cheatcodes` inspector has access to +/// +/// This is essentially a subset of various `Config` settings `Cheatcodes` needs to know. +/// Since each test gets its own cheatcode inspector, but these values here are expected to be +/// constant for all test runs, everything is `Arc'ed` here to avoid unnecessary, expensive clones. +#[derive(Debug, Clone, Default)] +pub struct CheatsConfig { + pub ffi: bool, + /// RPC storage caching settings determines what chains and endpoints to cache + pub rpc_storage_caching: Arc, + /// All known endpoints and their aliases + pub rpc_endpoints: Arc, +} + +// === impl CheatsConfig === + +impl CheatsConfig { + /// Extracts the necessary settings from the Config + pub fn new(config: &Config, evm_opts: &EvmOpts) -> Self { + Self { + ffi: evm_opts.ffi, + rpc_storage_caching: Arc::new(config.rpc_storage_caching.clone()), + rpc_endpoints: Arc::new(config.rpc_endpoints.clone()), + } + } +} diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index 29f12323c281..5e195684beff 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -36,7 +36,7 @@ fn create_fork( block: BlockNumber, ) -> Result { let fork = CreateFork { - enable_caching: state.rpc_storage_caching.enable_for_endpoint(&url), + enable_caching: state.config.rpc_storage_caching.enable_for_endpoint(&url), url, block, chain_id: None, diff --git a/evm/src/executor/inspector/cheatcodes/mod.rs b/evm/src/executor/inspector/cheatcodes/mod.rs index 6ca28ab0129a..dfacdf30b41d 100644 --- a/evm/src/executor/inspector/cheatcodes/mod.rs +++ b/evm/src/executor/inspector/cheatcodes/mod.rs @@ -26,7 +26,6 @@ pub use env::{Prank, RecordAccess}; /// Assertion helpers (such as `expectEmit`) mod expect; pub use expect::{ExpectedCallData, ExpectedEmit, ExpectedRevert, MockCallDataContext}; -use foundry_config::cache::StorageCachingConfig; /// Cheatcodes that interact with the external environment (FFI etc.) mod ext; @@ -40,6 +39,9 @@ mod snapshot; mod util; pub use util::{DEFAULT_CREATE2_DEPLOYER, MISSING_CREATE2_DEPLOYER}; +mod config; +pub use config::CheatsConfig; + /// An inspector that handles calls to various cheatcodes, each with their own behavior. /// /// Cheatcodes can be called by contracts during execution to modify the VM environment, such as @@ -91,23 +93,17 @@ pub struct Cheatcodes { /// Scripting based transactions pub broadcastable_transactions: VecDeque, - /// RPC storage caching settings determines what chains and endpoints to cache - pub rpc_storage_caching: StorageCachingConfig, + /// Additional, user configurable context this Inspector has access to when inspecting a call + pub config: CheatsConfig, } impl Cheatcodes { - pub fn new( - ffi: bool, - block: BlockEnv, - gas_price: U256, - rpc_storage_caching: StorageCachingConfig, - ) -> Self { + pub fn new(block: BlockEnv, gas_price: U256, config: CheatsConfig) -> Self { Self { - ffi, corrected_nonce: false, block: Some(block), gas_price: Some(gas_price), - rpc_storage_caching, + config, ..Default::default() } } @@ -126,7 +122,7 @@ impl Cheatcodes { .or_else(|| util::apply(self, data, &decoded)) .or_else(|| expect::apply(self, data, &decoded)) .or_else(|| fuzz::apply(data, &decoded)) - .or_else(|| ext::apply(self.ffi, &decoded)) + .or_else(|| ext::apply(self.config.ffi, &decoded)) .or_else(|| snapshot::apply(self, data, &decoded)) .or_else(|| fork::apply(self, data, &decoded)) .ok_or_else(|| "Cheatcode was unhandled. This is a bug.".to_string().encode())? diff --git a/evm/src/executor/inspector/mod.rs b/evm/src/executor/inspector/mod.rs index 4ea3d7eeb8ba..dd0125e260b3 100644 --- a/evm/src/executor/inspector/mod.rs +++ b/evm/src/executor/inspector/mod.rs @@ -14,7 +14,7 @@ mod stack; pub use stack::{InspectorData, InspectorStack}; mod cheatcodes; -pub use cheatcodes::{Cheatcodes, DEFAULT_CREATE2_DEPLOYER}; +pub use cheatcodes::{Cheatcodes, CheatsConfig, DEFAULT_CREATE2_DEPLOYER}; use ethers::types::U256; diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index cb8276fb8c19..16d433dae5dc 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -6,10 +6,10 @@ use ethers::{ types::{Address, Bytes, U256}, }; use eyre::Result; -use foundry_config::cache::StorageCachingConfig; use foundry_evm::{ executor::{ - backend::Backend, fork::CreateFork, opts::EvmOpts, Executor, ExecutorBuilder, SpecId, + backend::Backend, fork::CreateFork, inspector::CheatsConfig, opts::EvmOpts, Executor, + ExecutorBuilder, SpecId, }, revm, }; @@ -44,8 +44,8 @@ pub struct MultiContractRunner { pub source_paths: BTreeMap, /// The fork to use at launch pub fork: Option, - /// RPC storage caching settings determines what chains and endpoints to cache - pub rpc_storage_caching: StorageCachingConfig, + /// Additional cheatcode inspector related settings derived from the `Config` + pub cheats_config: CheatsConfig, } impl MultiContractRunner { @@ -133,7 +133,7 @@ impl MultiContractRunner { }) .map(|(id, (abi, deploy_code, libs))| { let executor = ExecutorBuilder::default() - .with_cheatcodes(self.evm_opts.ffi, self.rpc_storage_caching.clone()) + .with_cheatcodes(self.cheats_config.clone()) .with_config(self.env.clone()) .with_spec(self.evm_spec) .with_gas_limit(self.evm_opts.gas_limit()) @@ -207,8 +207,8 @@ pub struct MultiContractRunnerBuilder { pub evm_spec: Option, /// The fork to use at launch pub fork: Option, - /// RPC storage caching settings determines what chains and endpoints to cache - pub rpc_storage_caching: StorageCachingConfig, + /// Additional cheatcode inspector related settings derived from the `Config` + pub cheats_config: Option, } impl MultiContractRunnerBuilder { @@ -305,7 +305,7 @@ impl MultiContractRunnerBuilder { errors: Some(execution_info.2), source_paths, fork: self.fork, - rpc_storage_caching: self.rpc_storage_caching, + cheats_config: self.cheats_config.unwrap_or_default(), }) } @@ -338,6 +338,12 @@ impl MultiContractRunnerBuilder { self.fork = fork; self } + + #[must_use] + pub fn with_cheats_config(mut self, cheats_config: CheatsConfig) -> Self { + self.cheats_config = Some(cheats_config); + self + } } #[cfg(test)] From 5f0d59878ac8641f0b5679550ac3f14c61793363 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 17 Jun 2022 19:44:30 +0200 Subject: [PATCH 061/102] work on snapshots --- evm/src/executor/backend/mod.rs | 16 +++++++++++++--- evm/src/executor/backend/snapshot.rs | 6 ++++++ evm/src/executor/snapshot.rs | 16 ++++++++++++++-- 3 files changed, 33 insertions(+), 5 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 4dc24f6dd743..c41a1a0cf7ce 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -25,12 +25,20 @@ pub use in_memory_db::MemDb; /// An extension trait that allows us to easily extend the `revm::Inspector` capabilities #[auto_impl::auto_impl(&mut, Box)] pub trait DatabaseExt: Database { - /// Creates a new snapshot + /// Creates a new snapshot at the current point of execution. + /// + /// A snapshot is associated with a new unique id that's created for the snapshot. + /// Snapshots can be reverted: [DatabaseExt::revert], however a snapshot can only be reverted + /// once. After a successful revert, the same snapshot id cannot be used again. fn snapshot(&mut self, subroutine: &SubRoutine) -> U256; /// Reverts the snapshot if it exists /// /// Returns `true` if the snapshot was successfully reverted, `false` if no snapshot for that id /// exists. + /// + /// **N.B.** While this reverts the state of the evm to the snapshot, it keeps new logs made + /// since the snapshots was created. This way we can show logs that were emitted between + /// snapshot and its revert. fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option; /// Creates a new fork but does _not_ select it @@ -166,9 +174,11 @@ impl DatabaseExt for Backend { id } - fn revert(&mut self, id: U256, _subroutine: &SubRoutine) -> Option { - if let Some(BackendSnapshot { db, subroutine }) = self.snapshots.remove(id) { + fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option { + if let Some(mut snapshot) = self.snapshots.remove(id) { // TODO needs to store additioanl logs and whether there was a failure by looking at the + + let BackendSnapshot { db, subroutine } = snapshot; // subroutine self.db = db; trace!(target: "backend", "Reverted snapshot {}", id); diff --git a/evm/src/executor/backend/snapshot.rs b/evm/src/executor/backend/snapshot.rs index e450030c9112..2f859c80cc6d 100644 --- a/evm/src/executor/backend/snapshot.rs +++ b/evm/src/executor/backend/snapshot.rs @@ -11,7 +11,13 @@ pub struct BackendSnapshot { // === impl BackendSnapshot === impl BackendSnapshot { + /// Takes a new snapshot pub fn new(db: T, subroutine: SubRoutine) -> Self { Self { db, subroutine } } + + /// + pub fn revert(&mut self, current: &SubRoutine) { + + } } diff --git a/evm/src/executor/snapshot.rs b/evm/src/executor/snapshot.rs index 7e00e88d5c69..c5b39a02dcc1 100644 --- a/evm/src/executor/snapshot.rs +++ b/evm/src/executor/snapshot.rs @@ -24,9 +24,21 @@ impl Snapshots { self.snapshots.get(&id) } - /// Removes the snapshot with the given `id` + /// Removes the snapshot with the given `id`. + /// + /// This will also remove any snapshots taken after the snapshot with the `id`. e.g.: reverting + /// to id 1 will delete snapshots with ids 1, 2, 3, etc.) pub fn remove(&mut self, id: U256) -> Option { - self.snapshots.remove(&id) + let snapshot = self.snapshots.remove(&id); + + // revert all snapshots taken after the snapshot + let mut to_revert = id + 1; + while to_revert < self.id { + self.snapshots.remove(&to_revert); + to_revert = to_revert + 1; + } + + snapshot } /// Inserts the new snapshot and returns the id From a7e5d5ae969a92cb19be7def54012a0b377fd107 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 17 Jun 2022 20:25:34 +0200 Subject: [PATCH 062/102] docs: write additional revert docs --- evm/src/executor/backend/mod.rs | 17 ++++++++++++++++- evm/src/executor/backend/snapshot.rs | 9 +++++++-- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index c41a1a0cf7ce..2b4d5f090d10 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -111,6 +111,15 @@ pub struct Backend { pub db: CacheDB, /// Contains snapshots made at a certain point snapshots: Snapshots>>, + /// Tracks whether there was a failure in a snapshot that was reverted + /// + /// The Test contract contains a bool variable that is set to true when an `assert` function + /// failed. When a snapshot is reverted, it reverts the state of the evm, but we still want + /// to know if there was an assert that failed after the snapshot was taken so that we can + /// check if the test function passed all asserts even across snapshots. When a snapshot is + /// reverted we get the _current_ `revm::Subroutine` which contains the state that we can check + /// if the `failed` variable is set + has_failure_in_reverted_snapshot: bool, } // === impl Backend === @@ -133,7 +142,13 @@ impl Backend { CacheDB::new(BackendDatabase::InMemory(EmptyDB())) }; - Self { forks, db, created_forks: Default::default(), snapshots: Default::default() } + Self { + forks, + db, + created_forks: Default::default(), + snapshots: Default::default(), + has_failure_in_reverted_snapshot: false, + } } /// Creates a new instance with a `BackendDatabase::InMemory` cache layer for the `CacheDB` diff --git a/evm/src/executor/backend/snapshot.rs b/evm/src/executor/backend/snapshot.rs index 2f859c80cc6d..24e84e234ad4 100644 --- a/evm/src/executor/backend/snapshot.rs +++ b/evm/src/executor/backend/snapshot.rs @@ -16,8 +16,13 @@ impl BackendSnapshot { Self { db, subroutine } } + /// Called when this snapshot is reverted. /// - pub fn revert(&mut self, current: &SubRoutine) { - + /// Since we want to keep all additional logs that were emitted since the snapshot was taken + /// we'll merge additional logs into the snapshot's `revm::Subroutine`. Additional logs are + /// those logs that are missing in the snapshot's subroutine, since the current subroutine + /// includes the same logs, we can simply replace use that See also `DatabaseExt::revert` + pub fn merge(&mut self, current: &SubRoutine) { + self.subroutine.logs = current.logs.clone(); } } From 0b9b6f78fb110ae32bc7c023562a5456d650c7f4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 Jun 2022 17:27:29 +0200 Subject: [PATCH 063/102] feat: check for failures --- evm/src/executor/backend/mod.rs | 74 ++++++++++++++++++++++++++++----- evm/src/executor/mod.rs | 6 +-- forge/src/runner.rs | 2 +- 3 files changed, 68 insertions(+), 14 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 4f7b6bd71696..302d241d007b 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -11,7 +11,7 @@ use hashbrown::HashMap as Map; use revm::{ db::{CacheDB, DatabaseRef, EmptyDB}, Account, AccountInfo, Database, DatabaseCommit, Env, Inspector, Log, Return, SubRoutine, - TransactOut, + TransactOut, TransactTo, }; use std::collections::HashMap; use tracing::{trace, warn}; @@ -115,11 +115,17 @@ pub struct Backend { /// /// The Test contract contains a bool variable that is set to true when an `assert` function /// failed. When a snapshot is reverted, it reverts the state of the evm, but we still want - /// to know if there was an assert that failed after the snapshot was taken so that we can + /// to know if there was an `assert` that failed after the snapshot was taken so that we can /// check if the test function passed all asserts even across snapshots. When a snapshot is /// reverted we get the _current_ `revm::Subroutine` which contains the state that we can check - /// if the `failed` variable is set - has_failure_in_reverted_snapshot: bool, + /// if the `_failed` variable is set, + /// additionally + has_failure__snapshot: bool, + /// Tracks the address of a Test contract + /// + /// This address can be used to inspect the state of the contract when a test is being + /// executed. E.g. the `_failed` variable of `DSTest` + test_contract: Option
, } // === impl Backend === @@ -147,7 +153,9 @@ impl Backend { db, created_forks: Default::default(), snapshots: Default::default(), - has_failure_in_reverted_snapshot: false, + has_failure__snapshot: false, + // not yet known + test_contract: None, } } @@ -160,7 +168,8 @@ impl Backend { created_forks: Default::default(), db, snapshots: Default::default(), - has_failure_in_reverted_snapshot: false, + has_failure__snapshot: false, + test_contract: None, } } @@ -168,7 +177,43 @@ impl Backend { self.db.insert_cache(address, account) } - /// Executes the configured transaction of the `env` without commiting state changes + /// Sets the address of the `DSTest` contract that is being executed + pub fn set_test_contract(&mut self, addr: Address) -> &mut Self { + self.test_contract = Some(addr); + self + } + + /// Returns the address of the set `DSTest` contract + pub fn test_contract_address(&self) -> Option
{ + self.test_contract + } + + /// Checks if a test function failed + /// + /// DSTest will not revert inside its `assertEq`-like functions which allows + /// to test multiple assertions in 1 test function while also preserving logs. + /// Instead, it stores whether an `assert` failed in a boolean variable that we can read + pub fn is_failed(&self) -> bool { + self.has_failure__snapshot || + self.test_contract_address() + .map(|addr| self.is_failed_test_contract(addr)) + .unwrap_or_default() + } + + pub fn is_failed_test_contract(&self, address: Address) -> bool { + /** + contract DSTest { + bool public IS_TEST = true; + // slot 0 offset 1 => second byte of slot0 + bool private _failed; + } + */ + let value = self.storage(address, U256::zero()); + let failed = value != 0; + failed + } + + /// Executes the configured test call of the `env` without commiting state changes pub fn inspect_ref( &mut self, mut env: Env, @@ -177,6 +222,9 @@ impl Backend { where INSP: Inspector, { + if let TransactTo::Call(to) = env.tx.transact_to { + self.test_contract = Some(to); + } revm::evm_inner::(&mut env, self, &mut inspector).transact() } } @@ -192,11 +240,17 @@ impl DatabaseExt for Backend { fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option { if let Some(mut snapshot) = self.snapshots.remove(id) { - // TODO needs to store additioanl logs and whether there was a failure by looking at the - + // need to check whether DSTest's `failed` variable is set to `true` which means an + // error occurred either during the snapshot or even before + if self.is_failed() { + self.has_failure__snapshot = true; + } + + // merge additional logs + snapshot.merge(subroutine); let BackendSnapshot { db, subroutine } = snapshot; - // subroutine self.db = db; + trace!(target: "backend", "Reverted snapshot {}", id); Some(subroutine) } else { diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 6c9d9f5c9916..ebe81d52a81e 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -298,10 +298,10 @@ impl Executor { } /// Executes the test function call - pub fn execute( + pub fn execute_test( &mut self, from: Address, - to: Address, + test_contract: Address, func: F, args: T, value: U256, @@ -313,7 +313,7 @@ impl Executor { // execute the call let mut inspector = self.inspector_config.stack(); let stipend = calc_stipend(&calldata, self.env.cfg.spec_id); - let env = self.build_env(from, TransactTo::Call(to), calldata, value); + let env = self.build_env(from, TransactTo::Call(test_contract), calldata, value); let (status, out, gas, state_changeset, logs) = self.backend_mut().inspect_ref(env, &mut inspector); diff --git a/forge/src/runner.rs b/forge/src/runner.rs index ac801779cdc8..fe18cfe026e3 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -296,7 +296,7 @@ impl<'a> ContractRunner<'a> { // Run unit test let start = Instant::now(); let (reverted, reason, gas, stipend, execution_traces, coverage, state_changeset) = - match self.executor.execute::<(), _, _>( + match self.executor.execute_test::<(), _, _>( self.sender, address, func.clone(), From 0ef157af0a3910e71c1840bc09ca0739b11b9b64 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 23 Jun 2022 17:42:14 +0200 Subject: [PATCH 064/102] fix: make compile again --- evm/src/executor/backend/mod.rs | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 302d241d007b..9606232c5d2d 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -120,7 +120,7 @@ pub struct Backend { /// reverted we get the _current_ `revm::Subroutine` which contains the state that we can check /// if the `_failed` variable is set, /// additionally - has_failure__snapshot: bool, + has_failure_snapshot: bool, /// Tracks the address of a Test contract /// /// This address can be used to inspect the state of the contract when a test is being @@ -153,7 +153,7 @@ impl Backend { db, created_forks: Default::default(), snapshots: Default::default(), - has_failure__snapshot: false, + has_failure_snapshot: false, // not yet known test_contract: None, } @@ -168,7 +168,7 @@ impl Backend { created_forks: Default::default(), db, snapshots: Default::default(), - has_failure__snapshot: false, + has_failure_snapshot: false, test_contract: None, } } @@ -188,20 +188,21 @@ impl Backend { self.test_contract } - /// Checks if a test function failed - /// - /// DSTest will not revert inside its `assertEq`-like functions which allows - /// to test multiple assertions in 1 test function while also preserving logs. - /// Instead, it stores whether an `assert` failed in a boolean variable that we can read + /// Checks if the test contract associated with this backend failed, See [Self::is_failed_test_contract] pub fn is_failed(&self) -> bool { - self.has_failure__snapshot || + self.has_failure_snapshot || self.test_contract_address() .map(|addr| self.is_failed_test_contract(addr)) .unwrap_or_default() } + /// Checks if the given test function failed + /// + /// DSTest will not revert inside its `assertEq`-like functions which allows + /// to test multiple assertions in 1 test function while also preserving logs. + /// Instead, it stores whether an `assert` failed in a boolean variable that we can read pub fn is_failed_test_contract(&self, address: Address) -> bool { - /** + /* contract DSTest { bool public IS_TEST = true; // slot 0 offset 1 => second byte of slot0 @@ -209,8 +210,8 @@ impl Backend { } */ let value = self.storage(address, U256::zero()); - let failed = value != 0; - failed + + value.byte(1) != 0 } /// Executes the configured test call of the `env` without commiting state changes @@ -243,7 +244,7 @@ impl DatabaseExt for Backend { // need to check whether DSTest's `failed` variable is set to `true` which means an // error occurred either during the snapshot or even before if self.is_failed() { - self.has_failure__snapshot = true; + self.has_failure_snapshot = true; } // merge additional logs From 5dd540c3f9ce6390dcbfefce37eebabf8a79195b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 24 Jun 2022 14:37:11 +0200 Subject: [PATCH 065/102] refactor: rename --- evm/src/executor/abi.rs | 1 + evm/src/executor/backend/mod.rs | 28 +++++++++++++++++++--------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/evm/src/executor/abi.rs b/evm/src/executor/abi.rs index 91ced5b62240..d5ae7cd8345d 100644 --- a/evm/src/executor/abi.rs +++ b/evm/src/executor/abi.rs @@ -5,6 +5,7 @@ use std::collections::HashMap; /// The cheatcode handler address (0x7109709ECfa91a80626fF3989D68f67F5b1DD12D). /// /// This is the same address as the one used in DappTools's HEVM. +/// `address(bytes20(uint160(uint256(keccak256('hevm cheat code')))))` pub static CHEATCODE_ADDRESS: Address = H160([ 0x71, 0x09, 0x70, 0x9E, 0xcf, 0xa9, 0x1a, 0x80, 0x62, 0x6f, 0xf3, 0x98, 0x9d, 0x68, 0xf6, 0x7f, 0x5b, 0x1d, 0xd1, 0x2d, diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 9606232c5d2d..b10cd94bd7f5 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -19,7 +19,7 @@ mod fuzz; mod snapshot; pub use fuzz::FuzzBackendWrapper; mod in_memory_db; -use crate::executor::backend::snapshot::BackendSnapshot; +use crate::{abi::CHEATCODE_ADDRESS, executor::backend::snapshot::BackendSnapshot}; pub use in_memory_db::MemDb; /// An extension trait that allows us to easily extend the `revm::Inspector` capabilities @@ -125,7 +125,7 @@ pub struct Backend { /// /// This address can be used to inspect the state of the contract when a test is being /// executed. E.g. the `_failed` variable of `DSTest` - test_contract: Option
, + test_contract_context: Option
, } // === impl Backend === @@ -155,7 +155,7 @@ impl Backend { snapshots: Default::default(), has_failure_snapshot: false, // not yet known - test_contract: None, + test_contract_context: None, } } @@ -169,7 +169,7 @@ impl Backend { db, snapshots: Default::default(), has_failure_snapshot: false, - test_contract: None, + test_contract_context: None, } } @@ -179,16 +179,17 @@ impl Backend { /// Sets the address of the `DSTest` contract that is being executed pub fn set_test_contract(&mut self, addr: Address) -> &mut Self { - self.test_contract = Some(addr); + self.test_contract_context = Some(addr); self } /// Returns the address of the set `DSTest` contract pub fn test_contract_address(&self) -> Option
{ - self.test_contract + self.test_contract_context } - /// Checks if the test contract associated with this backend failed, See [Self::is_failed_test_contract] + /// Checks if the test contract associated with this backend failed, See + /// [Self::is_failed_test_contract] pub fn is_failed(&self) -> bool { self.has_failure_snapshot || self.test_contract_address() @@ -210,10 +211,19 @@ impl Backend { } */ let value = self.storage(address, U256::zero()); - + value.byte(1) != 0 } + /// In addition to the `_failed` variable, `DSTest::fail()` stores a failure + /// in "failed" + /// See + pub fn is_global_failure(&self) -> bool { + let index = U256::from(&b"failed"[..]); + let value = self.storage(CHEATCODE_ADDRESS, index); + value == U256::one() + } + /// Executes the configured test call of the `env` without commiting state changes pub fn inspect_ref( &mut self, @@ -224,7 +234,7 @@ impl Backend { INSP: Inspector, { if let TransactTo::Call(to) = env.tx.transact_to { - self.test_contract = Some(to); + self.test_contract_context = Some(to); } revm::evm_inner::(&mut env, self, &mut inspector).transact() } From 0ed37081fa284205a0b8974ca987f862081c4a1a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 1 Jul 2022 14:47:19 +0200 Subject: [PATCH 066/102] refactor: unify backend code --- evm/src/executor/backend/fuzz.rs | 172 ++++++++++++++++++++++--------- evm/src/executor/backend/mod.rs | 110 +++++++++++--------- evm/src/executor/mod.rs | 7 +- forge/src/multi_runner.rs | 2 +- forge/src/runner.rs | 7 +- 5 files changed, 194 insertions(+), 104 deletions(-) diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index b30aa7d5a8ee..1530a3497cf5 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -1,8 +1,8 @@ use crate::{ + abi::CHEATCODE_ADDRESS, executor::{ - backend::{snapshot::BackendSnapshot, Backend, BackendDatabase, DatabaseExt}, - fork::{CreateFork, ForkId, SharedBackend}, - snapshot::Snapshots, + backend::{snapshot::BackendSnapshot, Backend, BackendDatabase, BackendInner, DatabaseExt}, + fork::{CreateFork, ForkId}, }, Address, }; @@ -12,17 +12,18 @@ use hashbrown::HashMap as Map; use revm::{ db::{CacheDB, DatabaseRef}, Account, AccountInfo, Database, Env, Inspector, Log, Return, SubRoutine, TransactOut, + TransactTo, }; -use std::collections::HashMap; use tracing::{trace, warn}; /// A wrapper around `Backend` that ensures only `revm::DatabaseRef` functions are called. /// /// Any changes made during its existence that affect the caching layer of the underlying Database -/// will result in a clone of the initial Database. +/// will result in a clone of the initial Database. Therefor, this backend type is something akin to +/// a clone-on-write `Backend` type. /// -/// Main purpose for this type is for fuzzing. A test function fuzzer will repeatedly call the -/// function via immutable raw (no state changes). +/// Main purpose for this type is for fuzzing. A test function fuzzer will repeatedly execute the +/// function via immutable raw (no state changes) calls. /// /// **N.B.**: we're assuming cheatcodes that alter the state (like multi fork swapping) are niche. /// If they executed during fuzzing, it will require a clone of the initial input database. This way @@ -30,27 +31,82 @@ use tracing::{trace, warn}; /// don't make use of them. Alternatively each test case would require its own `Backend` clone, /// which would add significant overhead for large fuzz sets even if the Database is not big after /// setup. +#[derive(Debug, Clone)] pub struct FuzzBackendWrapper<'a> { - pub inner: &'a Backend, + /// The underlying immutable `Backend` + /// + /// No calls on the `FuzzBackendWrapper` will ever persistently modify the `backend`'s state. + pub backend: &'a Backend, /// active database clone that holds the currently active db, like reverted snapshots, selected /// fork, etc. db_override: Option>, - /// tracks all created forks - created_forks: HashMap, - /// Contains snapshots made at a certain point - snapshots: Snapshots>>, + /// holds additional Backend data + inner: BackendInner, } // === impl FuzzBackendWrapper === impl<'a> FuzzBackendWrapper<'a> { pub fn new(inner: &'a Backend) -> Self { - Self { - inner, - db_override: None, - created_forks: Default::default(), - snapshots: Default::default(), - } + Self { backend: inner, db_override: None, inner: Default::default() } + } + + /// Returns the currently active database + fn active_db(&self) -> &CacheDB { + self.db_override.as_ref().unwrap_or(&self.backend.db) + } + + /// Sets the database override + fn set_active(&mut self, db: CacheDB) { + self.db_override = Some(db) + } + + /// Sets the address of the `DSTest` contract that is being executed + pub fn set_test_contract(&mut self, addr: Address) -> &mut Self { + self.inner.test_contract_context = Some(addr); + self + } + + /// Returns the address of the set `DSTest` contract + pub fn test_contract_address(&self) -> Option
{ + self.inner.test_contract_context + } + + /// Checks if the test contract associated with this backend failed, See + /// [Self::is_failed_test_contract] + pub fn is_failed(&self) -> bool { + self.backend.is_failed() || + self.inner.has_failure_snapshot || + self.test_contract_address() + .map(|addr| self.is_failed_test_contract(addr)) + .unwrap_or_default() + } + + /// Checks if the given test function failed + /// + /// DSTest will not revert inside its `assertEq`-like functions which allows + /// to test multiple assertions in 1 test function while also preserving logs. + /// Instead, it stores whether an `assert` failed in a boolean variable that we can read + pub fn is_failed_test_contract(&self, address: Address) -> bool { + /* + contract DSTest { + bool public IS_TEST = true; + // slot 0 offset 1 => second byte of slot0 + bool private _failed; + } + */ + let value = self.storage(address, U256::zero()); + + value.byte(1) != 0 + } + + /// In addition to the `_failed` variable, `DSTest::fail()` stores a failure + /// in "failed" + /// See + pub fn is_global_failure(&self) -> bool { + let index = U256::from(&b"failed"[..]); + let value = self.storage(CHEATCODE_ADDRESS, index); + value == U256::one() } /// Executes the configured transaction of the `env` without commiting state changes @@ -62,60 +118,64 @@ impl<'a> FuzzBackendWrapper<'a> { where INSP: Inspector, { + if let TransactTo::Call(to) = env.tx.transact_to { + self.inner.test_contract_context = Some(to); + } revm::evm_inner::(&mut env, self, &mut inspector).transact() } - - /// Returns the currently active database - fn active_db(&self) -> &CacheDB { - self.db_override.as_ref().unwrap_or(&self.inner.db) - } - - /// Sets the database override - fn set_active(&mut self, db: CacheDB) { - self.db_override = Some(db) - } } impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { fn snapshot(&mut self, subroutine: &SubRoutine) -> U256 { let id = self + .inner .snapshots .insert(BackendSnapshot::new(self.active_db().clone(), subroutine.clone())); - trace!(target: "backend", "Created new snapshot {}", id); + trace!(target: "backend::fuzz", "Created new snapshot {}", id); id } - fn revert(&mut self, id: U256, _subroutine: &SubRoutine) -> Option { - if let Some(BackendSnapshot { db, subroutine }) = - self.snapshots.remove(id).or_else(|| self.inner.snapshots.get(id).cloned()) + fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option { + if let Some(mut snapshot) = + self.inner.snapshots.remove(id).or_else(|| self.backend.snapshots().get(id).cloned()) { + // need to check whether DSTest's `failed` variable is set to `true` which means an + // error occurred either during the snapshot or even before + if self.is_failed() { + self.inner.has_failure_snapshot = true; + } + // merge additional logs + snapshot.merge(subroutine); + let BackendSnapshot { db, subroutine } = snapshot; + self.set_active(db); - trace!(target: "backend", "Reverted snapshot {}", id); + trace!(target: "backend::fuzz", "Reverted snapshot {}", id); Some(subroutine) } else { - warn!(target: "backend", "No snapshot to revert for {}", id); + warn!(target: "backend::fuzz", "No snapshot to revert for {}", id); None } } fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { - let (id, fork) = self.inner.forks.create_fork(fork)?; - self.created_forks.insert(id.clone(), fork); + let (id, fork) = self.backend.forks.create_fork(fork)?; + self.inner.created_forks.insert(id.clone(), fork); Ok(id) } fn select_fork(&mut self, id: impl Into) -> eyre::Result<()> { let id = id.into(); let fork = self + .inner .created_forks .get(&id) - .or_else(|| self.inner.created_forks.get(&id)) + .or_else(|| self.backend.created_forks().get(&id)) .cloned() .ok_or_else(|| eyre::eyre!("Fork Id {} does not exist", id))?; if let Some(ref mut db) = self.db_override { *db.db_mut() = BackendDatabase::Forked(fork, id); } else { - let mut db = self.inner.db.clone(); + let mut db = self.backend.db.clone(); *db.db_mut() = BackendDatabase::Forked(fork, id); self.set_active(db); } @@ -123,34 +183,52 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { } } -impl<'a> Database for FuzzBackendWrapper<'a> { - fn basic(&mut self, address: H160) -> AccountInfo { +impl<'a> DatabaseRef for FuzzBackendWrapper<'a> { + fn basic(&self, address: H160) -> AccountInfo { if let Some(ref db) = self.db_override { DatabaseRef::basic(db, address) } else { - DatabaseRef::basic(self.inner, address) + DatabaseRef::basic(self.backend, address) } } - fn code_by_hash(&mut self, code_hash: H256) -> Bytes { + + fn code_by_hash(&self, code_hash: H256) -> Bytes { if let Some(ref db) = self.db_override { DatabaseRef::code_by_hash(db, code_hash) } else { - DatabaseRef::code_by_hash(self.inner, code_hash) + DatabaseRef::code_by_hash(self.backend, code_hash) } } - fn storage(&mut self, address: H160, index: U256) -> U256 { + + fn storage(&self, address: H160, index: U256) -> U256 { if let Some(ref db) = self.db_override { DatabaseRef::storage(db, address, index) } else { - DatabaseRef::storage(self.inner, address, index) + DatabaseRef::storage(self.backend, address, index) } } - fn block_hash(&mut self, number: U256) -> H256 { + fn block_hash(&self, number: U256) -> H256 { if let Some(ref db) = self.db_override { DatabaseRef::block_hash(db, number) } else { - DatabaseRef::block_hash(self.inner, number) + DatabaseRef::block_hash(self.backend, number) } } } + +impl<'a> Database for FuzzBackendWrapper<'a> { + fn basic(&mut self, address: H160) -> AccountInfo { + DatabaseRef::basic(self, address) + } + fn code_by_hash(&mut self, code_hash: H256) -> Bytes { + DatabaseRef::code_by_hash(self, code_hash) + } + fn storage(&mut self, address: H160, index: U256) -> U256 { + DatabaseRef::storage(self, address, index) + } + + fn block_hash(&mut self, number: U256) -> H256 { + DatabaseRef::block_hash(self, number) + } +} diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index b10cd94bd7f5..47c50254d62b 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -104,28 +104,11 @@ pub trait DatabaseExt: Database { pub struct Backend { /// The access point for managing forks forks: MultiFork, - /// tracks all created forks - created_forks: HashMap, /// The database that holds the entire state, uses an internal database depending on current /// state pub db: CacheDB, - /// Contains snapshots made at a certain point - snapshots: Snapshots>>, - /// Tracks whether there was a failure in a snapshot that was reverted - /// - /// The Test contract contains a bool variable that is set to true when an `assert` function - /// failed. When a snapshot is reverted, it reverts the state of the evm, but we still want - /// to know if there was an `assert` that failed after the snapshot was taken so that we can - /// check if the test function passed all asserts even across snapshots. When a snapshot is - /// reverted we get the _current_ `revm::Subroutine` which contains the state that we can check - /// if the `_failed` variable is set, - /// additionally - has_failure_snapshot: bool, - /// Tracks the address of a Test contract - /// - /// This address can be used to inspect the state of the contract when a test is being - /// executed. E.g. the `_failed` variable of `DSTest` - test_contract_context: Option
, + /// holds additional Backend data + inner: BackendInner, } // === impl Backend === @@ -148,50 +131,45 @@ impl Backend { CacheDB::new(BackendDatabase::InMemory(EmptyDB())) }; - Self { - forks, - db, - created_forks: Default::default(), - snapshots: Default::default(), - has_failure_snapshot: false, - // not yet known - test_contract_context: None, - } + Self { forks, db, inner: Default::default() } } /// Creates a new instance with a `BackendDatabase::InMemory` cache layer for the `CacheDB` pub fn clone_empty(&self) -> Self { let mut db = self.db.clone(); *db.db_mut() = BackendDatabase::InMemory(EmptyDB()); - Self { - forks: self.forks.clone(), - created_forks: Default::default(), - db, - snapshots: Default::default(), - has_failure_snapshot: false, - test_contract_context: None, - } + Self { forks: self.forks.clone(), db, inner: Default::default() } } pub fn insert_cache(&mut self, address: H160, account: AccountInfo) { self.db.insert_cache(address, account) } + /// Returns all forks created by this backend + pub fn created_forks(&self) -> &HashMap { + &self.inner.created_forks + } + + /// Returns all snapshots created in this backend + pub fn snapshots(&self) -> &Snapshots>> { + &self.inner.snapshots + } + /// Sets the address of the `DSTest` contract that is being executed pub fn set_test_contract(&mut self, addr: Address) -> &mut Self { - self.test_contract_context = Some(addr); + self.inner.test_contract_context = Some(addr); self } /// Returns the address of the set `DSTest` contract pub fn test_contract_address(&self) -> Option
{ - self.test_contract_context + self.inner.test_contract_context } /// Checks if the test contract associated with this backend failed, See /// [Self::is_failed_test_contract] pub fn is_failed(&self) -> bool { - self.has_failure_snapshot || + self.inner.has_failure_snapshot || self.test_contract_address() .map(|addr| self.is_failed_test_contract(addr)) .unwrap_or_default() @@ -234,7 +212,7 @@ impl Backend { INSP: Inspector, { if let TransactTo::Call(to) = env.tx.transact_to { - self.test_contract_context = Some(to); + self.inner.test_contract_context = Some(to); } revm::evm_inner::(&mut env, self, &mut inspector).transact() } @@ -244,17 +222,18 @@ impl Backend { impl DatabaseExt for Backend { fn snapshot(&mut self, subroutine: &SubRoutine) -> U256 { - let id = self.snapshots.insert(BackendSnapshot::new(self.db.clone(), subroutine.clone())); + let id = + self.inner.snapshots.insert(BackendSnapshot::new(self.db.clone(), subroutine.clone())); trace!(target: "backend", "Created new snapshot {}", id); id } fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option { - if let Some(mut snapshot) = self.snapshots.remove(id) { + if let Some(mut snapshot) = self.inner.snapshots.remove(id) { // need to check whether DSTest's `failed` variable is set to `true` which means an // error occurred either during the snapshot or even before if self.is_failed() { - self.has_failure_snapshot = true; + self.inner.has_failure_snapshot = true; } // merge additional logs @@ -272,13 +251,14 @@ impl DatabaseExt for Backend { fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { let (id, fork) = self.forks.create_fork(fork)?; - self.created_forks.insert(id.clone(), fork); + self.inner.created_forks.insert(id.clone(), fork); Ok(id) } fn select_fork(&mut self, id: impl Into) -> eyre::Result<()> { let id = id.into(); let fork = self + .inner .created_forks .get(&id) .cloned() @@ -293,7 +273,7 @@ impl DatabaseRef for Backend { self.db.basic(address) } - fn code_by_hash(&self, code_hash: H256) -> bytes::Bytes { + fn code_by_hash(&self, code_hash: H256) -> Bytes { self.db.code_by_hash(code_hash) } @@ -311,7 +291,7 @@ impl<'a> DatabaseRef for &'a mut Backend { DatabaseRef::basic(&self.db, address) } - fn code_by_hash(&self, code_hash: H256) -> bytes::Bytes { + fn code_by_hash(&self, code_hash: H256) -> Bytes { DatabaseRef::code_by_hash(&self.db, code_hash) } @@ -366,7 +346,7 @@ impl DatabaseRef for BackendDatabase { } } - fn code_by_hash(&self, address: H256) -> bytes::Bytes { + fn code_by_hash(&self, address: H256) -> Bytes { match self { BackendDatabase::InMemory(inner) => inner.code_by_hash(address), BackendDatabase::Forked(inner, _) => inner.code_by_hash(address), @@ -387,3 +367,39 @@ impl DatabaseRef for BackendDatabase { } } } + +/// Container type for various Backend related data +#[derive(Debug, Clone)] +pub struct BackendInner { + /// tracks all created forks + pub created_forks: HashMap, + /// Contains snapshots made at a certain point + pub snapshots: Snapshots>>, + /// Tracks whether there was a failure in a snapshot that was reverted + /// + /// The Test contract contains a bool variable that is set to true when an `assert` function + /// failed. When a snapshot is reverted, it reverts the state of the evm, but we still want + /// to know if there was an `assert` that failed after the snapshot was taken so that we can + /// check if the test function passed all asserts even across snapshots. When a snapshot is + /// reverted we get the _current_ `revm::Subroutine` which contains the state that we can check + /// if the `_failed` variable is set, + /// additionally + pub has_failure_snapshot: bool, + /// Tracks the address of a Test contract + /// + /// This address can be used to inspect the state of the contract when a test is being + /// executed. E.g. the `_failed` variable of `DSTest` + pub test_contract_context: Option
, +} + +impl Default for BackendInner { + fn default() -> Self { + Self { + created_forks: Default::default(), + snapshots: Default::default(), + has_failure_snapshot: false, + // not yet known + test_contract_context: None, + } + } +} diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 7d34781c0ff0..bd5f1c8676ed 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -307,7 +307,7 @@ impl Executor { args: T, value: U256, abi: Option<&Abi>, - ) -> std::result::Result, EvmError> { + ) -> Result, EvmError> { let func = func.into(); let calldata = Bytes::from(encode_function_data(&func, args)?.to_vec()); @@ -335,7 +335,7 @@ impl Executor { args: T, value: U256, abi: Option<&Abi>, - ) -> std::result::Result, EvmError> { + ) -> Result, EvmError> { let func = func.into(); let calldata = Bytes::from(encode_function_data(&func, args)?.to_vec()); let call_result = self.call_raw(from, to, calldata, value)?; @@ -374,7 +374,7 @@ impl Executor { code: Bytes, value: U256, abi: Option<&Abi>, - ) -> std::result::Result { + ) -> Result { trace!(sender=?from, "deploying contract"); let mut evm = EVM::new(); evm.env = self.build_env(from, TransactTo::Create(CreateScheme::Create), code, value); @@ -447,6 +447,7 @@ impl Executor { /// to test multiple assertions in 1 test function while also preserving logs. /// /// Instead, it sets `failed` to `true` which we must check. + // TODO(mattsse): check if safe to replace with `Backend::is_failed()` pub fn is_success( &self, address: Address, diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 559bbbe895b2..6751c9e487e8 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -46,7 +46,7 @@ pub struct MultiContractRunner { pub fork: Option, /// Additional cheatcode inspector related settings derived from the `Config` pub cheats_config: CheatsConfig, - /// Whether or not to collect coverage info + /// Whether to collect coverage info pub coverage: bool, } diff --git a/forge/src/runner.rs b/forge/src/runner.rs index 79daa58b1061..19e3d2182c7e 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -251,10 +251,6 @@ impl<'a> ContractRunner<'a> { .map(|func| (func, func.name.starts_with("testFail"))) .collect(); - // TODO(mattsse): while tests don't modify the state we have cheatcodes that affect the - // state (fork cheat codes, snapshots), so in order to execute all tests in parallel they - // need their own copy of the `Executor`, - let test_results = tests .par_iter() .filter_map(|(func, should_fail)| { @@ -262,7 +258,6 @@ impl<'a> ContractRunner<'a> { Some(self.clone().run_test(func, *should_fail, setup.clone())) } else { fuzzer.as_ref().map(|fuzzer| { - // TODO(mattsse) use fuzz wrapper backend self.run_fuzz_test(func, *should_fail, fuzzer.clone(), setup.clone()) }) }; @@ -341,7 +336,7 @@ impl<'a> ContractRunner<'a> { (reverted, Some(reason), gas, stipend, execution_trace, None, state_changeset) } Err(err) => { - tracing::error!(?err); + error!(?err); return Err(err.into()) } }; From e629551e47156c6c56cc6af9db03e4c7e22b24a4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 1 Jul 2022 18:37:15 +0200 Subject: [PATCH 067/102] feat: resolve rpc aliases --- config/src/lib.rs | 2 +- config/src/rpc.rs | 67 ++++++++++++++++--- .../executor/inspector/cheatcodes/config.rs | 34 +++++++++- evm/src/executor/inspector/cheatcodes/fork.rs | 3 +- 4 files changed, 90 insertions(+), 16 deletions(-) diff --git a/config/src/lib.rs b/config/src/lib.rs index 04fbeb6bdc8b..29bff6b49550 100644 --- a/config/src/lib.rs +++ b/config/src/lib.rs @@ -41,7 +41,7 @@ pub mod utils; pub use crate::utils::*; mod rpc; -pub use rpc::RpcEndpoints; +pub use rpc::{ResolvedRpcEndpoints, RpcEndpoint, RpcEndpoints, UnresolvedEnvVarError}; pub mod cache; use cache::{Cache, ChainCache}; diff --git a/config/src/rpc.rs b/config/src/rpc.rs index 08ca489befc2..af2e868bb8b7 100644 --- a/config/src/rpc.rs +++ b/config/src/rpc.rs @@ -1,7 +1,7 @@ //! Support for multiple RPC-endpoints use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::{collections::BTreeMap, env, env::VarError, fmt}; +use std::{collections::BTreeMap, env, env::VarError, fmt, ops::Deref}; /// Container type for rpc endpoints #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] @@ -24,12 +24,18 @@ impl RpcEndpoints { } /// Returns all (alias -> url) pairs - /// - /// # Errors - /// - /// returns an error if it contains a reference to an env var that is not set - pub fn resolve_all(self) -> Result, VarError> { - self.endpoints.into_iter().map(|(name, e)| (e.resolve().map(|url| (name, url)))).collect() + pub fn resolved(self) -> ResolvedRpcEndpoints { + ResolvedRpcEndpoints { + endpoints: self.endpoints.into_iter().map(|(name, e)| (name, e.resolve())).collect(), + } + } +} + +impl Deref for RpcEndpoints { + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.endpoints } } @@ -43,7 +49,7 @@ impl RpcEndpoints { pub enum RpcEndpoint { /// A raw Url (ws, http) Url(String), - // Reference to an env var in the form of `${ENV_VAR}` + /// Reference to an env var in the form of `${ENV_VAR}` Env(String), } @@ -71,10 +77,12 @@ impl RpcEndpoint { /// # Error /// /// Returns an error if the type holds a reference to an env var and the env var is not set - pub fn resolve(self) -> Result { + pub fn resolve(self) -> Result { match self { RpcEndpoint::Url(url) => Ok(url), - RpcEndpoint::Env(v) => env::var(v), + RpcEndpoint::Env(var) => { + env::var(&var).map_err(|source| UnresolvedEnvVarError { var, source }) + } } } } @@ -91,7 +99,7 @@ impl fmt::Display for RpcEndpoint { } impl TryFrom for String { - type Error = VarError; + type Error = UnresolvedEnvVarError; fn try_from(value: RpcEndpoint) -> Result { value.resolve() @@ -123,6 +131,43 @@ impl<'de> Deserialize<'de> for RpcEndpoint { } } +/// Container type for _resolved_ RPC endpoints, see [RpcEndpoints::resolve_all()] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct ResolvedRpcEndpoints { + /// contains all named endpoints and their URL or an error if we failed to resolve the env var + /// alias + endpoints: BTreeMap>, +} + +impl Deref for ResolvedRpcEndpoints { + type Target = BTreeMap>; + + fn deref(&self) -> &Self::Target { + &self.endpoints + } +} + +/// Error when we failed to resolve an env var +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct UnresolvedEnvVarError { + /// Var that couldn't be resolved + pub var: String, + /// the `env::var` error + pub source: VarError, +} + +impl fmt::Display for UnresolvedEnvVarError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Failed to resolve env var `{}`: {}", self.var, self.source) + } +} + +impl std::error::Error for UnresolvedEnvVarError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + Some(&self.source) + } +} + /// Extracts the value surrounded by `${}` /// /// TODO(mattsse): make this a bit more sophisticated diff --git a/evm/src/executor/inspector/cheatcodes/config.rs b/evm/src/executor/inspector/cheatcodes/config.rs index 3fa71ca9e56d..71f0b8fe3623 100644 --- a/evm/src/executor/inspector/cheatcodes/config.rs +++ b/evm/src/executor/inspector/cheatcodes/config.rs @@ -1,5 +1,7 @@ use crate::executor::opts::EvmOpts; -use foundry_config::{cache::StorageCachingConfig, Config, RpcEndpoints}; +use bytes::Bytes; +use ethers::abi::AbiEncode; +use foundry_config::{cache::StorageCachingConfig, Config, ResolvedRpcEndpoints}; use std::path::{Path, PathBuf}; /// Additional, configurable context the `Cheatcodes` inspector has access to @@ -13,7 +15,7 @@ pub struct CheatsConfig { /// RPC storage caching settings determines what chains and endpoints to cache pub rpc_storage_caching: StorageCachingConfig, /// All known endpoints and their aliases - pub rpc_endpoints: RpcEndpoints, + pub rpc_endpoints: ResolvedRpcEndpoints, /// Project root pub root: PathBuf, @@ -34,7 +36,7 @@ impl CheatsConfig { Self { ffi: evm_opts.ffi, rpc_storage_caching: config.rpc_storage_caching.clone(), - rpc_endpoints: config.rpc_endpoints.clone(), + rpc_endpoints: config.rpc_endpoints.clone().resolved(), root: config.__root.0.clone(), allowed_paths, } @@ -51,4 +53,30 @@ impl CheatsConfig { Ok(()) } + + /// Returns the RPC to use + /// + /// If `url_or_alias` is a known alias in the `ResolvedRpcEndpoints` then it returns the + /// corresponding URL of that alias. otherwise this assumes `url_or_alias` is itself a URL + /// if it starts with a `http` or `ws` scheme + /// + /// # Errors + /// + /// - Returns an error if `url_or_alias` is a known alias but references an unresolved env var. + /// - Returns an error if `url_or_alias` is not an alias but does not start with a `http` or + /// `scheme` + pub fn get_rpc_url(&self, url_or_alias: impl Into) -> Result { + let url_or_alias = url_or_alias.into(); + match self.rpc_endpoints.get(&url_or_alias) { + Some(Ok(url)) => Ok(url.clone()), + Some(Err(err)) => Err(err.to_string().encode().into()), + None => { + if !url_or_alias.starts_with("http") && !url_or_alias.starts_with("ws") { + Err(format!("invalid rpc url {}", url_or_alias).encode().into()) + } else { + Ok(url_or_alias) + } + } + } + } } diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index 5e195684beff..42aab268a5c7 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -32,9 +32,10 @@ pub fn apply( fn create_fork( state: &mut Cheatcodes, data: &mut EVMData<'_, DB>, - url: String, + url_or_alias: String, block: BlockNumber, ) -> Result { + let url = state.config.get_rpc_url(url_or_alias)?; let fork = CreateFork { enable_caching: state.config.rpc_storage_caching.enable_for_endpoint(&url), url, From 2a105ed63b6cc8c312f5a87dd24f33fd8b603fa6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 1 Jul 2022 18:55:29 +0200 Subject: [PATCH 068/102] feat: periodically flush rpc cache --- evm/src/executor/fork/backend.rs | 11 ++++++++--- evm/src/executor/fork/multi.rs | 20 ++++++++++++++++++++ 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/evm/src/executor/fork/backend.rs b/evm/src/executor/fork/backend.rs index 71ebe5c1d49d..12114cafcece 100644 --- a/evm/src/executor/fork/backend.rs +++ b/evm/src/executor/fork/backend.rs @@ -370,7 +370,7 @@ pub struct SharedBackend { /// /// There is only one instance of the type, so as soon as the last `SharedBackend` is deleted, /// `FlushJsonBlockCacheDB` is also deleted and the cache is flushed. - _cache: Arc, + cache: Arc, } impl SharedBackend { @@ -432,9 +432,9 @@ impl SharedBackend { M: Middleware + Unpin + 'static + Clone, { let (backend, backend_rx) = channel(1); - let _cache = Arc::new(FlushJsonBlockCacheDB(Arc::clone(db.cache()))); + let cache = Arc::new(FlushJsonBlockCacheDB(Arc::clone(db.cache()))); let handler = BackendHandler::new(provider, db, backend_rx, pin_block); - (Self { backend, _cache }, handler) + (Self { backend, cache }, handler) } /// Updates the pinned block to fetch data from @@ -471,6 +471,11 @@ impl SharedBackend { Ok(rx.recv()?) }) } + + /// Flushes the DB to disk if caching is enabled + pub(crate) fn flush_cache(&self) { + self.cache.0.flush(); + } } impl DatabaseRef for SharedBackend { diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 6d71d5d7a9b3..8b068fd0a570 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -166,12 +166,18 @@ pub struct MultiForkHandler { /// Initial backoff delay for requests backoff: u64, + + /// Periodic interval to flush rpc cache + flush_cache_interval: tokio::time::Interval, } // === impl MultiForkHandler === impl MultiForkHandler { fn new(incoming: Receiver) -> Self { + // flush cache every 60s, this ensures that long-running fork tests get their cache flushed + let flush_interval = std::time::Duration::from_secs(60); + Self { incoming: incoming.fuse(), handlers: Default::default(), @@ -180,6 +186,11 @@ impl MultiForkHandler { retries: 8, // 800ms backoff: 800, + // when to periodically flush caches + flush_cache_interval: tokio::time::interval_at( + tokio::time::Instant::now() + flush_interval, + flush_interval, + ), } } @@ -276,6 +287,15 @@ impl Future for MultiForkHandler { return Poll::Ready(()) } + if pin.flush_cache_interval.poll_tick(cx).is_ready() && !pin.forks.is_empty() { + trace!(target: "fork::multi", "tick flushing caches"); + let forks = pin.forks.values().cloned().collect::>(); + // flush this on new thread to not block here + std::thread::spawn(move || { + forks.into_iter().for_each(|fork| fork.flush_cache()); + }); + } + Poll::Pending } } From 5ba6f8aaf49c428f5bda9d6e15c8e20091d097a2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 1 Jul 2022 18:56:48 +0200 Subject: [PATCH 069/102] chore: derive default --- evm/src/executor/backend/mod.rs | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 47c50254d62b..71a2676f8ebc 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -369,7 +369,7 @@ impl DatabaseRef for BackendDatabase { } /// Container type for various Backend related data -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct BackendInner { /// tracks all created forks pub created_forks: HashMap, @@ -391,15 +391,3 @@ pub struct BackendInner { /// executed. E.g. the `_failed` variable of `DSTest` pub test_contract_context: Option
, } - -impl Default for BackendInner { - fn default() -> Self { - Self { - created_forks: Default::default(), - snapshots: Default::default(), - has_failure_snapshot: false, - // not yet known - test_contract_context: None, - } - } -} From 0470107713c19159b332d1537fb6215e6ad13598 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Jun 2022 19:42:28 +0200 Subject: [PATCH 070/102] chore: use revm head --- Cargo.lock | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ece221ff8c21..4f3981dfd306 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4106,9 +4106,8 @@ dependencies = [ [[package]] name = "revm" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1affcd38e2d1a4570699f8cf35f0668c886febdba1b857771b20cf81025ada1d" +version = "1.6.0" +source = "git+https://github.com/bluealloy/revm?rev=a95390f991942323f90725087b7b513c68af4df5#a95390f991942323f90725087b7b513c68af4df5" dependencies = [ "arrayref", "auto_impl 1.0.1", @@ -4126,8 +4125,7 @@ dependencies = [ [[package]] name = "revm_precompiles" version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd6aae8f44783ef6ff39fc22c9c999dfa0e17b79d663b752730c02a025935185" +source = "git+https://github.com/bluealloy/revm?rev=a95390f991942323f90725087b7b513c68af4df5#a95390f991942323f90725087b7b513c68af4df5" dependencies = [ "bytes", "k256", From cc48ce8dba621fb5885957319c2f460166a5a04e Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Fri, 1 Jul 2022 09:59:25 -0700 Subject: [PATCH 071/102] chore: bump revm --- Cargo.lock | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4f3981dfd306..ece221ff8c21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4106,8 +4106,9 @@ dependencies = [ [[package]] name = "revm" -version = "1.6.0" -source = "git+https://github.com/bluealloy/revm?rev=a95390f991942323f90725087b7b513c68af4df5#a95390f991942323f90725087b7b513c68af4df5" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1affcd38e2d1a4570699f8cf35f0668c886febdba1b857771b20cf81025ada1d" dependencies = [ "arrayref", "auto_impl 1.0.1", @@ -4125,7 +4126,8 @@ dependencies = [ [[package]] name = "revm_precompiles" version = "1.0.0" -source = "git+https://github.com/bluealloy/revm?rev=a95390f991942323f90725087b7b513c68af4df5#a95390f991942323f90725087b7b513c68af4df5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd6aae8f44783ef6ff39fc22c9c999dfa0e17b79d663b752730c02a025935185" dependencies = [ "bytes", "k256", From 987bbd910c13ee8de7d46faf4d6f1928ac1eb627 Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Sun, 3 Jul 2022 14:54:52 +0200 Subject: [PATCH 072/102] build: use revm 1.6.0 --- Cargo.lock | 4 ++-- evm/Cargo.toml | 2 +- ui/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ece221ff8c21..09b46278169e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4106,9 +4106,9 @@ dependencies = [ [[package]] name = "revm" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1affcd38e2d1a4570699f8cf35f0668c886febdba1b857771b20cf81025ada1d" +checksum = "b60030444003ac25474f5281e7e91f15e8475c173b729aac1c10aced56b3adac" dependencies = [ "arrayref", "auto_impl 1.0.1", diff --git a/evm/Cargo.toml b/evm/Cargo.toml index 00f9dcb0965b..96a76e5e57c7 100644 --- a/evm/Cargo.toml +++ b/evm/Cargo.toml @@ -37,7 +37,7 @@ once_cell = "1.9.0" # EVM bytes = "1.1.0" hashbrown = "0.12" -revm = { version="1.5", default-features = false, features = ["std", "k256", "with-serde", "memory_limit"] } +revm = { version = "1.6", default-features = false, features = ["std", "k256", "with-serde", "memory_limit"] } # Fuzzer proptest = "1.0.0" diff --git a/ui/Cargo.toml b/ui/Cargo.toml index 5df90688764c..34ae9a71bac6 100644 --- a/ui/Cargo.toml +++ b/ui/Cargo.toml @@ -14,4 +14,4 @@ eyre = "0.6.5" hex = "0.4.3" ethers = { git = "https://github.com/gakonst/ethers-rs" } forge = { path = "../forge" } -revm = { version="1.5", default-features = false, features = ["std", "k256", "with-serde"] } +revm = { version = "1.6", default-features = false, features = ["std", "k256", "with-serde"] } From 6a50138baa19efa320c54a4a980616f90a86e19d Mon Sep 17 00:00:00 2001 From: Oliver Nordbjerg Date: Sun, 3 Jul 2022 15:09:53 +0200 Subject: [PATCH 073/102] fix: use new revm data structures --- anvil/src/eth/backend/mem/in_memory_db.rs | 2 +- anvil/src/eth/backend/mem/state.rs | 24 ++++++++++------------- evm/src/executor/fork/database.rs | 10 +++++----- evm/src/fuzz/strategies/state.rs | 4 ++-- 4 files changed, 18 insertions(+), 22 deletions(-) diff --git a/anvil/src/eth/backend/mem/in_memory_db.rs b/anvil/src/eth/backend/mem/in_memory_db.rs index da8e733c59cb..58982e878277 100644 --- a/anvil/src/eth/backend/mem/in_memory_db.rs +++ b/anvil/src/eth/backend/mem/in_memory_db.rs @@ -39,7 +39,7 @@ impl Db for MemDb { } fn maybe_state_root(&self) -> Option { - Some(state_merkle_trie_root(self.inner.cache(), self.inner.storage())) + Some(state_merkle_trie_root(self.inner.accounts())) } fn current_state(&self) -> StateDb { diff --git a/anvil/src/eth/backend/mem/state.rs b/anvil/src/eth/backend/mem/state.rs index 54fdd8851aaa..85f8a9c518f7 100644 --- a/anvil/src/eth/backend/mem/state.rs +++ b/anvil/src/eth/backend/mem/state.rs @@ -1,14 +1,14 @@ //! Support for generating the state root for memdb storage +use std::collections::BTreeMap; + use anvil_core::eth::trie::{sec_trie_root, trie_root}; use bytes::Bytes; use ethers::{ types::{Address, H256, U256}, utils::{rlp, rlp::RlpStream}, }; -use foundry_evm::{ - revm::{AccountInfo, Log}, - HashMap as Map, -}; +use forge::revm::db::DbAccount; +use foundry_evm::revm::{AccountInfo, Log}; /// Returns the log hash for all `logs` /// @@ -29,15 +29,11 @@ pub fn log_rlp_hash(logs: Vec) -> H256 { H256::from_slice(out.as_slice()) } -pub fn state_merkle_trie_root( - accounts: &Map, - storage: &Map>, -) -> H256 { +pub fn state_merkle_trie_root(accounts: &BTreeMap) -> H256 { let vec = accounts .iter() - .map(|(address, info)| { - let storage = storage.get(address).cloned().unwrap_or_default(); - let storage_root = trie_account_rlp(info, storage); + .map(|(address, account)| { + let storage_root = trie_account_rlp(&account.info, &account.storage); (*address, storage_root) }) .collect::>(); @@ -46,15 +42,15 @@ pub fn state_merkle_trie_root( } /// Returns the RLP for this account. -pub fn trie_account_rlp(info: &AccountInfo, storage: Map) -> Bytes { +pub fn trie_account_rlp(info: &AccountInfo, storage: &BTreeMap) -> Bytes { let mut stream = RlpStream::new_list(4); stream.append(&info.nonce); stream.append(&info.balance); stream.append(&{ - sec_trie_root(storage.into_iter().filter(|(_k, v)| v != &U256::zero()).map(|(k, v)| { + sec_trie_root(storage.iter().filter(|(_k, v)| *v != &U256::zero()).map(|(k, v)| { let mut temp: [u8; 32] = [0; 32]; k.to_big_endian(&mut temp); - (H256::from(temp), rlp::encode(&v)) + (H256::from(temp), rlp::encode(v)) })) }); stream.append(&info.code_hash.as_bytes()); diff --git a/evm/src/executor/fork/database.rs b/evm/src/executor/fork/database.rs index a9ab6fbe4846..136f050386b5 100644 --- a/evm/src/executor/fork/database.rs +++ b/evm/src/executor/fork/database.rs @@ -195,7 +195,7 @@ pub struct ForkDbSnapshot { impl ForkDbSnapshot { fn get_storage(&self, address: Address, index: U256) -> Option { - self.local.storage().get(&address).and_then(|entry| entry.get(&index)).copied() + self.local.accounts().get(&address).and_then(|account| account.storage.get(&index)).copied() } } @@ -204,8 +204,8 @@ impl ForkDbSnapshot { // We prioritize stored changed accounts/storage impl DatabaseRef for ForkDbSnapshot { fn basic(&self, address: Address) -> AccountInfo { - match self.local.cache().get(&address) { - Some(info) => info.clone(), + match self.local.accounts().get(&address) { + Some(account) => account.info.clone(), None => { self.accounts.get(&address).cloned().unwrap_or_else(|| self.local.basic(address)) } @@ -217,8 +217,8 @@ impl DatabaseRef for ForkDbSnapshot { } fn storage(&self, address: Address, index: U256) -> U256 { - match self.local.storage().get(&address) { - Some(entry) => match entry.get(&index) { + match self.local.accounts().get(&address) { + Some(account) => match account.storage.get(&index) { Some(entry) => *entry, None => self .get_storage(address, index) diff --git a/evm/src/fuzz/strategies/state.rs b/evm/src/fuzz/strategies/state.rs index acd917e10f72..4d7aedd1334e 100644 --- a/evm/src/fuzz/strategies/state.rs +++ b/evm/src/fuzz/strategies/state.rs @@ -49,7 +49,7 @@ This is a bug, please open an issue: https://github.com/foundry-rs/foundry/issue /// Builds the initial [EvmFuzzState] from a database. pub fn build_initial_state(db: &CacheDB) -> EvmFuzzState { let mut state: HashSet<[u8; 32]> = HashSet::new(); - for (address, storage) in db.storage() { + for (address, account) in db.accounts() { let info = db.basic(*address); // Insert basic account information @@ -58,7 +58,7 @@ pub fn build_initial_state(db: &CacheDB) -> EvmFuzzState { state.insert(utils::u256_to_h256_le(U256::from(info.nonce)).into()); // Insert storage - for (slot, value) in storage { + for (slot, value) in &account.storage { state.insert(utils::u256_to_h256_le(*slot).into()); state.insert(utils::u256_to_h256_le(*value).into()); } From 03054d6dee7c0859de301154adeaa1bff2f2d65d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 4 Jul 2022 16:34:47 +0200 Subject: [PATCH 074/102] feat: add roll fork cheat codes --- evm/src/executor/abi.rs | 2 ++ evm/src/executor/backend/fuzz.rs | 4 ++++ evm/src/executor/backend/mod.rs | 11 +++++++++++ evm/src/executor/inspector/cheatcodes/fork.rs | 10 ++++++++++ testdata/cheats/Cheats.sol | 7 +++++++ 5 files changed, 34 insertions(+) diff --git a/evm/src/executor/abi.rs b/evm/src/executor/abi.rs index 1b89b873181f..6b8ab244906d 100644 --- a/evm/src/executor/abi.rs +++ b/evm/src/executor/abi.rs @@ -87,6 +87,8 @@ ethers::contract::abigen!( createFork(string,uint256)(string) createFork(string)(string) selectFork(string) + rollFork(uint256) + rollFork(string,uint256) ]"#, ); pub use hevm_mod::{HEVMCalls, HEVM_ABI}; diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index 1530a3497cf5..dc853e569be9 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -181,6 +181,10 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { } Ok(()) } + + fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result { + todo!() + } } impl<'a> DatabaseRef for FuzzBackendWrapper<'a> { diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 71a2676f8ebc..ef4e4662cc58 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -52,6 +52,13 @@ pub trait DatabaseExt: Database { /// /// Returns an error if no fork with the given `id` exists fn select_fork(&mut self, id: impl Into) -> eyre::Result<()>; + + /// Updates the fork to given block number. + /// + /// This will essentially create a new fork at the given block height. + /// + /// Returns false if no matching fork was found. + fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result; } /// Provides the underlying `revm::Database` implementation. @@ -266,6 +273,10 @@ impl DatabaseExt for Backend { *self.db.db_mut() = BackendDatabase::Forked(fork, id); Ok(()) } + + fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result { + todo!() + } } impl DatabaseRef for Backend { diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index 42aab268a5c7..cfdb15c9f525 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -24,6 +24,16 @@ pub fn apply( Ok(_) => Ok(Bytes::new()), Err(err) => Err(err.to_string().encode().into()), }, + HEVMCalls::RollFork0(fork) => match data.db.roll_fork(fork.0, None) { + Ok(b) => Ok(b.encode().into()), + Err(err) => Err(err.to_string().encode().into()), + }, + HEVMCalls::RollFork1(fork) => { + match data.db.roll_fork(fork.1, Some(fork.0.clone().into())) { + Ok(b) => Ok(b.encode().into()), + Err(err) => Err(err.to_string().encode().into()), + } + } _ => return None, }) } diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index 37bcd9a7391f..852426b5af42 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -144,4 +144,11 @@ interface Cheats { function selectFork(string calldata) external; // forks the `block` variable from the given endpoint function forkBlockVariable(string calldata, uint256) external; + // Updates the currently active fork to given block number + // This is similar to `roll` but for the fork + // Returns false if no fork is active + function rollFork(uint256) external returns(bool); + // Updates the given fork to given block number + // Returns false if no matching fork was found + function rollFork(string calldata, uint256) external returns(bool); } From 16a5f399134d01daec0b85c7893de8d7eaad3b5e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 4 Jul 2022 16:58:46 +0200 Subject: [PATCH 075/102] feat: add rpc helper functions --- evm/src/executor/abi.rs | 6 ++++-- evm/src/executor/inspector/cheatcodes/fork.rs | 13 +++++++++++++ testdata/cheats/Cheats.sol | 4 ++++ 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/evm/src/executor/abi.rs b/evm/src/executor/abi.rs index 6b8ab244906d..efb560bd3df8 100644 --- a/evm/src/executor/abi.rs +++ b/evm/src/executor/abi.rs @@ -87,8 +87,10 @@ ethers::contract::abigen!( createFork(string,uint256)(string) createFork(string)(string) selectFork(string) - rollFork(uint256) - rollFork(string,uint256) + rollFork(uint256)(bool) + rollFork(string,uint256)(bool) + rpcUrl(string)(string) + rpcUrls(string)(string[2][]) ]"#, ); pub use hevm_mod::{HEVMCalls, HEVM_ABI}; diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index cfdb15c9f525..ff40e2a4ac24 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -34,6 +34,19 @@ pub fn apply( Err(err) => Err(err.to_string().encode().into()), } } + HEVMCalls::RpcUrl(rpc) => state.config.get_rpc_url(&rpc.0).map(|url| url.encode().into()), + HEVMCalls::RpcUrls(_) => { + let mut urls = Vec::with_capacity(state.config.rpc_endpoints.len()); + for alias in state.config.rpc_endpoints.keys().cloned() { + match state.config.get_rpc_url(&alias) { + Ok(url) => { + urls.push([alias, url]); + } + Err(err) => return Some(Err(err)), + } + } + Ok(urls.encode().into()) + } _ => return None, }) } diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index 852426b5af42..450de4c9802b 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -151,4 +151,8 @@ interface Cheats { // Updates the given fork to given block number // Returns false if no matching fork was found function rollFork(string calldata, uint256) external returns(bool); + /// Returns the RPC url for the given alias + function rpcUrl(string calldata) external returns(string memory); + /// Returns all rpc urls and their aliases `[alias, url][]` + function rpcUrls() external returns(string[2][] memory); } From 5a7c6416bbe3e47701434d28243f260571477d7f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 4 Jul 2022 18:10:52 +0200 Subject: [PATCH 076/102] docs: document rpc endpoints table --- config/README.md | 22 ++++++++++++++++++++++ config/src/lib.rs | 12 ++++++++++++ 2 files changed, 34 insertions(+) diff --git a/config/README.md b/config/README.md index 7bbc09d0350f..dee128523b3e 100644 --- a/config/README.md +++ b/config/README.md @@ -143,6 +143,28 @@ stackAllocation = true optimizerSteps = 'dhfoDgvulfnTUtnIf' ``` +##### RPC-Endpoints settings + +The `rpc_endpoints` value accepts a list of `alias = ""` pairs. + +The following example declares two pairs: +The alias `optimism` references the endpoint URL directly. +The alias `mainnet` references the environment variable `RPC_MAINNET` which holds the actual URL. + +Environment variables need to be wrapped in `${}` + +```toml +rpc_endpoints = { optimism = "https://optimism.alchemyapi.io/v2/...", mainnet = "${RPC_MAINNET}" } +``` + +Alternatively the following form is accepted, note the `profile` prefix: + +```toml +[default.rpc_endpoints] +optimism = "https://optimism.alchemyapi.io/v2/..." +mainnet = "${RPC_MAINNET}" +``` + ##### Additional Model Checker settings [Solidity's built-in model checker](https://docs.soliditylang.org/en/latest/smtchecker.html#tutorial) diff --git a/config/src/lib.rs b/config/src/lib.rs index 29bff6b49550..66800d74f0c2 100644 --- a/config/src/lib.rs +++ b/config/src/lib.rs @@ -2489,6 +2489,10 @@ mod tests { chains = 'all' endpoints = 'all' + [default.rpc_endpoints] + optimism = "https://example.com/" + mainnet = "${RPC_MAINNET}" + "#, )?; @@ -2498,6 +2502,14 @@ mod tests { vec![Remapping::from_str("nested/=lib/nested/").unwrap().into()] ); + assert_eq!( + config.rpc_endpoints, + RpcEndpoints::new([ + ("optimism", RpcEndpoint::Url("https://example.com/".to_string())), + ("mainnet", RpcEndpoint::Env("RPC_MAINNET".to_string())) + ]), + ); + Ok(()) }); } From 7745ecd74ac70dece3511c8e52efd8ed6e793b1f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 4 Jul 2022 19:11:24 +0200 Subject: [PATCH 077/102] test: add rpc endpoint tests --- Cargo.lock | 56 +++++++++++++++++++++------------- anvil/Cargo.toml | 6 ++-- anvil/server/Cargo.toml | 2 +- binder/Cargo.toml | 2 +- cast/Cargo.toml | 2 +- cli/Cargo.toml | 4 +-- evm/Cargo.toml | 6 ++-- evm/src/executor/fork/multi.rs | 41 +++++++++++++++++-------- forge/Cargo.toml | 8 ++--- forge/src/multi_runner.rs | 25 ++++++++++++--- testdata/cheats/RpcUrls.t.sol | 42 +++++++++++++++++++++++++ utils/Cargo.toml | 4 +-- 12 files changed, 142 insertions(+), 56 deletions(-) create mode 100644 testdata/cheats/RpcUrls.t.sol diff --git a/Cargo.lock b/Cargo.lock index ec3f1d283ebd..ace1ae93f786 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -816,9 +816,9 @@ dependencies = [ [[package]] name = "coins-ledger" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "498841803f751d49bb08fe4a88b514087c52e5df885575ed4757f14ab151e239" +checksum = "bbfa8a730d02735d8d53888a95d8f33aaa9dda9979862de113202421db939b2a" dependencies = [ "async-trait", "blake2b_simd", @@ -898,6 +898,18 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "comfy-table" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121d8a5b0346092c18a4b2fd6f620d7a06f0eb7ac0a45860939a0884bc579c56" +dependencies = [ + "crossterm 0.23.2", + "strum 0.24.0", + "strum_macros 0.24.0", + "unicode-width", +] + [[package]] name = "command-group" version = "1.0.8" @@ -1470,7 +1482,7 @@ dependencies = [ [[package]] name = "ethers" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#5d4addb7151b9b26f8ffe8610344bd45957b82f2" +source = "git+https://github.com/gakonst/ethers-rs#fe267049c8deb218f8b9086eab29f55867e71dee" dependencies = [ "ethers-addressbook", "ethers-contract", @@ -1485,7 +1497,7 @@ dependencies = [ [[package]] name = "ethers-addressbook" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#5d4addb7151b9b26f8ffe8610344bd45957b82f2" +source = "git+https://github.com/gakonst/ethers-rs#fe267049c8deb218f8b9086eab29f55867e71dee" dependencies = [ "ethers-core", "once_cell", @@ -1496,7 +1508,7 @@ dependencies = [ [[package]] name = "ethers-contract" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#5d4addb7151b9b26f8ffe8610344bd45957b82f2" +source = "git+https://github.com/gakonst/ethers-rs#fe267049c8deb218f8b9086eab29f55867e71dee" dependencies = [ "ethers-contract-abigen", "ethers-contract-derive", @@ -1514,7 +1526,7 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#5d4addb7151b9b26f8ffe8610344bd45957b82f2" +source = "git+https://github.com/gakonst/ethers-rs#fe267049c8deb218f8b9086eab29f55867e71dee" dependencies = [ "Inflector", "cfg-if 1.0.0", @@ -1536,7 +1548,7 @@ dependencies = [ [[package]] name = "ethers-contract-derive" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#5d4addb7151b9b26f8ffe8610344bd45957b82f2" +source = "git+https://github.com/gakonst/ethers-rs#fe267049c8deb218f8b9086eab29f55867e71dee" dependencies = [ "ethers-contract-abigen", "ethers-core", @@ -1550,7 +1562,7 @@ dependencies = [ [[package]] name = "ethers-core" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#5d4addb7151b9b26f8ffe8610344bd45957b82f2" +source = "git+https://github.com/gakonst/ethers-rs#fe267049c8deb218f8b9086eab29f55867e71dee" dependencies = [ "arrayvec 0.7.2", "bytes", @@ -1580,7 +1592,7 @@ dependencies = [ [[package]] name = "ethers-etherscan" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#5d4addb7151b9b26f8ffe8610344bd45957b82f2" +source = "git+https://github.com/gakonst/ethers-rs#fe267049c8deb218f8b9086eab29f55867e71dee" dependencies = [ "ethers-core", "getrandom 0.2.6", @@ -1596,7 +1608,7 @@ dependencies = [ [[package]] name = "ethers-middleware" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#5d4addb7151b9b26f8ffe8610344bd45957b82f2" +source = "git+https://github.com/gakonst/ethers-rs#fe267049c8deb218f8b9086eab29f55867e71dee" dependencies = [ "async-trait", "ethers-contract", @@ -1620,7 +1632,7 @@ dependencies = [ [[package]] name = "ethers-providers" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#5d4addb7151b9b26f8ffe8610344bd45957b82f2" +source = "git+https://github.com/gakonst/ethers-rs#fe267049c8deb218f8b9086eab29f55867e71dee" dependencies = [ "async-trait", "auto_impl 1.0.1", @@ -1656,7 +1668,7 @@ dependencies = [ [[package]] name = "ethers-signers" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#5d4addb7151b9b26f8ffe8610344bd45957b82f2" +source = "git+https://github.com/gakonst/ethers-rs#fe267049c8deb218f8b9086eab29f55867e71dee" dependencies = [ "async-trait", "coins-bip32", @@ -1679,7 +1691,7 @@ dependencies = [ [[package]] name = "ethers-solc" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#5d4addb7151b9b26f8ffe8610344bd45957b82f2" +source = "git+https://github.com/gakonst/ethers-rs#fe267049c8deb218f8b9086eab29f55867e71dee" dependencies = [ "cfg-if 1.0.0", "colored", @@ -1866,7 +1878,7 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" name = "forge" version = "0.2.0" dependencies = [ - "comfy-table", + "comfy-table 6.0.0", "ethers", "eyre", "foundry-common", @@ -1936,7 +1948,7 @@ dependencies = [ "clap", "clap_complete", "color-eyre", - "comfy-table", + "comfy-table 5.0.1", "console 0.15.0", "dunce", "ethers", @@ -4395,9 +4407,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d92beeab217753479be2f74e54187a6aed4c125ff0703a866c3147a02f0c6dd" +checksum = "a2333e6df6d6598f2b1974829f853c2b4c5f4a6e503c10af918081aa6f8564e1" dependencies = [ "serde", ] @@ -5201,9 +5213,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709595b8878a4965ce5e87ebf880a7d39c9afc6837721b21a5a816a8117d921" +checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" dependencies = [ "once_cell", "valuable", @@ -5242,13 +5254,13 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.11" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" +checksum = "3a713421342a5a666b7577783721d3117f1b69a393df803ee17bb73b1e122a59" dependencies = [ "ansi_term", - "lazy_static", "matchers", + "once_cell", "regex", "sharded-slab", "smallvec", diff --git a/anvil/Cargo.toml b/anvil/Cargo.toml index f1d57cff4b32..bbc41c50aed6 100644 --- a/anvil/Cargo.toml +++ b/anvil/Cargo.toml @@ -43,11 +43,11 @@ tower = "0.4" tower-http = { version = "0.3", features = ["trace"] } # tracing -tracing = "0.1.32" +tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] } # async -tokio = { version = "1.10", features = ["time"] } +tokio = { version = "1", features = ["time"] } parking_lot = "0.12" futures = "0.3" @@ -73,7 +73,7 @@ fdlimit = { version = "0.2.1", optional = true } ethers = { git = "https://github.com/gakonst/ethers-rs", features = ["abigen"] } ethers-solc = { git = "https://github.com/gakonst/ethers-rs", features = ["project-util", "full"] } pretty_assertions = "1.2.1" -tokio = { version = "1.10", features = ["full"] } +tokio = { version = "1", features = ["full"] } [features] default = ["cli"] diff --git a/anvil/server/Cargo.toml b/anvil/server/Cargo.toml index 1ba713389459..aed9dc490302 100644 --- a/anvil/server/Cargo.toml +++ b/anvil/server/Cargo.toml @@ -16,7 +16,7 @@ hyper = "0.14" tower-http = { version = "0.3", features = ["trace", "cors"] } # tracing -tracing = "0.1.32" +tracing = "0.1" # async parking_lot = "0.12" diff --git a/binder/Cargo.toml b/binder/Cargo.toml index 6c589ca9eb2c..1f0d7b8d22a2 100644 --- a/binder/Cargo.toml +++ b/binder/Cargo.toml @@ -19,5 +19,5 @@ curl = { version = "0.4", default-features = false, features = ["http2"] } eyre = "0.6" git2 = { version = "0.13", default-features = false } url = "2.2" -tracing = "0.1.33" +tracing = "0.1" tempfile = "3.3.0" diff --git a/cast/Cargo.toml b/cast/Cargo.toml index d0472f3a5979..111dcb52a16c 100644 --- a/cast/Cargo.toml +++ b/cast/Cargo.toml @@ -28,7 +28,7 @@ hex = "0.4.3" [dev-dependencies] async-trait = "0.1.53" -tokio = "1.17.0" +tokio = "1" thiserror = "1.0.30" [features] diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 1a761a6c62a0..29641b06b4d0 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -39,14 +39,14 @@ clap_complete = "3.0.4" yansi = "0.5.1" tracing-error = "0.2.0" tracing-subscriber = { version = "0.3", features = ["registry", "env-filter", "fmt"] } -tracing = "0.1.26" +tracing = "0.1" console = "0.15.0" watchexec = "2.0.0" atty = "0.2.14" comfy-table = "5.0.0" # async / parallel -tokio = { version = "1.11.0", features = ["macros"] } +tokio = { version = "1", features = ["macros"] } futures = "0.3.17" rayon = "1.5.1" diff --git a/evm/Cargo.toml b/evm/Cargo.toml index 4a973fc66948..670a2bf39a71 100644 --- a/evm/Cargo.toml +++ b/evm/Cargo.toml @@ -23,12 +23,12 @@ eyre = "0.6.5" thiserror = "1.0.29" # Logging -tracing = "0.1.26" -tracing-subscriber = "0.3.11" +tracing = "0.1" +tracing-subscriber = "0.3" tracing-error = "0.2.0" # Threading/futures -tokio = { version = "1.10.1", features = ["time"] } +tokio = { version = "1", features = ["time"] } parking_lot = "0.12.0" futures = "0.3.21" once_cell = "1.9.0" diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 8b068fd0a570..31c18fbc04eb 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -27,6 +27,7 @@ use std::{ mpsc::{channel as oneshot_channel, Sender as OneshotSender}, Arc, }, + time::Duration, }; use tracing::trace; @@ -83,7 +84,7 @@ impl MultiFork { /// /// Also returns the `JoinHandle` of the spawned thread. pub fn spawn() -> Self { - let (fork, handler) = Self::new(); + let (fork, mut handler) = Self::new(); // spawn a light-weight thread with a thread-local async runtime just for // sending and receiving data from the remote client(s) let _ = std::thread::Builder::new() @@ -94,7 +95,14 @@ impl MultiFork { .build() .expect("failed to create multi-fork-backend-thread tokio runtime"); - rt.block_on(async move { handler.await }); + rt.block_on(async move { + // flush cache every 60s, this ensures that long-running fork tests get their + // cache flushed from time to time + // NOTE: we install the interval here because the `tokio::timer::Interval` + // requires a rt + handler.set_flush_cache_interval(Duration::from_secs(60)); + handler.await + }); }) .expect("failed to spawn multi fork handler thread"); trace!(target: "fork::multi", "spawned MultiForkHandler thread"); @@ -167,17 +175,14 @@ pub struct MultiForkHandler { /// Initial backoff delay for requests backoff: u64, - /// Periodic interval to flush rpc cache - flush_cache_interval: tokio::time::Interval, + /// Optional periodic interval to flush rpc cache + flush_cache_interval: Option, } // === impl MultiForkHandler === impl MultiForkHandler { fn new(incoming: Receiver) -> Self { - // flush cache every 60s, this ensures that long-running fork tests get their cache flushed - let flush_interval = std::time::Duration::from_secs(60); - Self { incoming: incoming.fuse(), handlers: Default::default(), @@ -186,14 +191,17 @@ impl MultiForkHandler { retries: 8, // 800ms backoff: 800, - // when to periodically flush caches - flush_cache_interval: tokio::time::interval_at( - tokio::time::Instant::now() + flush_interval, - flush_interval, - ), + flush_cache_interval: None, } } + /// Sets the interval after which all rpc caches should be flushed periodically + pub fn set_flush_cache_interval(&mut self, period: Duration) -> &mut Self { + self.flush_cache_interval = + Some(tokio::time::interval_at(tokio::time::Instant::now() + period, period)); + self + } + fn on_request(&mut self, req: Request) { match req { Request::CreateFork(fork, sender) => { @@ -287,7 +295,14 @@ impl Future for MultiForkHandler { return Poll::Ready(()) } - if pin.flush_cache_interval.poll_tick(cx).is_ready() && !pin.forks.is_empty() { + // periodically flush cached RPC state + if pin + .flush_cache_interval + .as_mut() + .map(|interval| interval.poll_tick(cx).is_ready()) + .unwrap_or_default() && + !pin.forks.is_empty() + { trace!(target: "fork::multi", "tick flushing caches"); let forks = pin.forks.values().cloned().collect::>(); // flush this on new thread to not block here diff --git a/forge/Cargo.toml b/forge/Cargo.toml index dfdcf37b3629..fd4a7080483b 100644 --- a/forge/Cargo.toml +++ b/forge/Cargo.toml @@ -19,14 +19,14 @@ regex = { version = "1.5.4", default-features = false } hex = "0.4.3" glob = "0.3.0" # TODO: Trim down -tokio = { version = "1.10.1" } -tracing = "0.1.26" -tracing-subscriber = "0.3.11" +tokio = { version = "1", features = ["time"] } +tracing = "0.1" +tracing-subscriber = "0.3" proptest = "1.0.0" rayon = "1.5" rlp = "0.5.1" once_cell = "1.9.0" -comfy-table = "5.0.0" +comfy-table = "6.0.0" [dev-dependencies] ethers = { git = "https://github.com/gakonst/ethers-rs", default-features = false, features = ["solc-full", "solc-tests"] } diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 6751c9e487e8..fb973c25669e 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -373,7 +373,7 @@ mod tests { filter::Filter, COMPILED, COMPILED_WITH_LIBS, EVM_OPTS, LIBS_PROJECT, PROJECT, }, }; - use foundry_config::Config; + use foundry_config::{Config, RpcEndpoint, RpcEndpoints}; use foundry_evm::trace::TraceKind; use foundry_utils::init_tracing_subscriber; use std::env; @@ -385,8 +385,11 @@ mod tests { /// Builds a non-tracing runner fn runner() -> MultiContractRunner { + let mut config = Config::with_root(PROJECT.root()); + config.rpc_endpoints = rpc_endpoints(); + base_runner() - .with_cheats_config(CheatsConfig::new(&Config::with_root(PROJECT.root()), &*EVM_OPTS)) + .with_cheats_config(CheatsConfig::new(&config, &*EVM_OPTS)) .build( &(*PROJECT).paths.root, (*COMPILED).clone(), @@ -421,6 +424,20 @@ mod tests { .unwrap() } + /// the RPC endpoints used during tests + fn rpc_endpoints() -> RpcEndpoints { + RpcEndpoints::new([ + ( + "rpcAlias", + RpcEndpoint::Url( + "https://eth-mainnet.alchemyapi.io/v2/Lc7oIGYeL_QvInzI0Wiu_pOZZDEKBrdf" + .to_string(), + ), + ), + ("rpcEnvAlias", RpcEndpoint::Env("RPC_ENV_ALIAS".to_string())), + ]) + } + /// A helper to assert the outcome of multiple tests with helpful assert messages fn assert_multiple( actuals: &BTreeMap, @@ -1079,7 +1096,7 @@ mod tests { } #[test] - fn test_cheats() { + fn test_env_vars() { let mut runner = runner(); // test `setEnv` first, and confirm that it can correctly set environment variables, @@ -1127,7 +1144,7 @@ Reason: `setEnv` failed to set an environment variable `{}={}`", // let suite_result = // runner.test(&Filter::new(".*", ".*", ".*cheats/[^Fork]"), None, true).unwrap(); let suite_result = - runner.test(&Filter::new(".*", ".*", ".*cheats/Snapsh"), None, true).unwrap(); + runner.test(&Filter::new(".*", ".*", ".*cheats/RpcUrl"), None, true).unwrap(); assert!(!suite_result.is_empty()); for (_, SuiteResult { test_results, .. }) in suite_result { diff --git a/testdata/cheats/RpcUrls.t.sol b/testdata/cheats/RpcUrls.t.sol new file mode 100644 index 000000000000..faea5795096d --- /dev/null +++ b/testdata/cheats/RpcUrls.t.sol @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: Unlicense +pragma solidity >=0.8.0; + +import "ds-test/test.sol"; +import "./Cheats.sol"; + +contract RpcUrlTest is DSTest { + Cheats constant cheats = Cheats(HEVM_ADDRESS); + + // returns the correct url + function testCanGetRpcUrl() public { + string memory url = cheats.rpcUrl("rpcAlias"); + assertEq(url, "https://eth-mainnet.alchemyapi.io/v2/Lc7oIGYeL_QvInzI0Wiu_pOZZDEKBrdf"); + } + + // returns an error if env alias does not exists + function testRevertsOnMissingEnv() public { + cheats.expectRevert(); + string memory url = cheats.rpcUrl("rpcEnvAlias"); + } + + // can set env and return correct url + function testCanSetAndGetURLAndAllUrls() public { + // this will fail because alias is not set + cheats.expectRevert(); + string[2][] memory _urls = cheats.rpcUrls(); + + string memory url = cheats.rpcUrl("rpcAlias"); + cheats.setEnv("RPC_ENV_ALIAS", url); + string memory envUrl = cheats.rpcUrl("rpcEnvAlias"); + assertEq(url, envUrl); + + string[2][] memory allUrls = cheats.rpcUrls(); + assertEq(allUrls.length, 2); + + string[2] memory val = allUrls[0]; + assertEq(val[0], "rpcAlias"); + + string[2] memory env = allUrls[1]; + assertEq(env[0], "rpcEnvAlias"); + } +} \ No newline at end of file diff --git a/utils/Cargo.toml b/utils/Cargo.toml index 48d3799b6745..b65760fc9cb9 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -21,10 +21,10 @@ reqwest = { version = "0.11.8", default-features = false, features = ["json", "r rustc-hex = { version = "2.1.0", default-features = false } serde = "1.0.132" serde_json = { version = "1.0.67", default-features = false } -tokio = { version = "1.12.0", features = ["rt-multi-thread", "macros"] } +tokio = { version = "1", features = ["rt-multi-thread", "macros"] } rlp = "0.5.1" futures = "0.3.17" -tracing = "0.1.26" +tracing = "0.1" [dev-dependencies] ethers = { git = "https://github.com/gakonst/ethers-rs", default-features = false, features = ["solc-full"] } From 0c30cd1f7f7c1d3359026ba6f51590510c30e99c Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Mon, 4 Jul 2022 12:31:20 -0700 Subject: [PATCH 078/102] Delete run.rs --- cli/src/cmd/forge/run.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 cli/src/cmd/forge/run.rs diff --git a/cli/src/cmd/forge/run.rs b/cli/src/cmd/forge/run.rs deleted file mode 100644 index e69de29bb2d1..000000000000 From e9b707d5abfe9ae141be323c3fd6ddf813d74046 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 6 Jul 2022 18:44:04 +0200 Subject: [PATCH 079/102] work on roll fork --- evm/src/executor/backend/mod.rs | 70 ++++++++++++++++++++++++++++++-- evm/src/executor/fork/mod.rs | 2 +- evm/src/executor/fork/multi.rs | 72 +++++++++++++++++++++++---------- 3 files changed, 118 insertions(+), 26 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index ef4e4662cc58..c8de3f698ccc 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -131,14 +131,14 @@ impl Backend { /// if `fork` is `Some` this will launch with a `fork` database, otherwise with an in-memory /// database pub fn new(forks: MultiFork, fork: Option) -> Self { - let db = if let Some(f) = fork { + let (db, launched_with) = if let Some(f) = fork { let (id, fork) = forks.create_fork(f).expect("Unable to fork"); - CacheDB::new(BackendDatabase::Forked(fork, id)) + (CacheDB::new(BackendDatabase::Forked(fork.clone(), id.clone())), Some((id, fork))) } else { - CacheDB::new(BackendDatabase::InMemory(EmptyDB())) + (CacheDB::new(BackendDatabase::InMemory(EmptyDB())), None) }; - Self { forks, db, inner: Default::default() } + Self { forks, db, inner: BackendInner::new(launched_with) } } /// Creates a new instance with a `BackendDatabase::InMemory` cache layer for the `CacheDB` @@ -209,6 +209,35 @@ impl Backend { value == U256::one() } + /// Returns the `ForkId` that's currently used in the database, if fork mode is on + pub fn active_fork(&self) -> Option<&ForkId> { + self.db.db().as_fork() + } + + /// Ensures that an appropriate fork exits + /// + /// If `id` contains a requested `Fork` this will ensure it exits. + /// Otherwise this returns the currently active fork. + /// + /// # Errors + /// + /// Returns an error if the given `id` does not match any forks + /// + /// Returns an error if no fork exits + pub fn ensure_fork(&self, id: Option) -> eyre::Result { + if let Some(id) = id { + if self.inner.created_forks.contains_key(&id) { + return Ok(id) + } + eyre::bail!("Requested fork `{}` does not exit", id) + } + if let Some(id) = self.active_fork().cloned() { + Ok(id) + } else { + eyre::bail!("No fork active") + } + } + /// Executes the configured test call of the `env` without commiting state changes pub fn inspect_ref( &mut self, @@ -275,6 +304,8 @@ impl DatabaseExt for Backend { } fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result { + let id = self.ensure_fork(id)?; + todo!() } } @@ -349,6 +380,18 @@ pub enum BackendDatabase { Forked(SharedBackend, ForkId), } +// === impl BackendDatabase === + +impl BackendDatabase { + /// Returns the `ForkId` if in fork mode + pub fn as_fork(&self) -> Option<&ForkId> { + match self { + BackendDatabase::InMemory(_) => None, + BackendDatabase::Forked(_, id) => Some(id), + } + } +} + impl DatabaseRef for BackendDatabase { fn basic(&self, address: H160) -> AccountInfo { match self { @@ -382,6 +425,11 @@ impl DatabaseRef for BackendDatabase { /// Container type for various Backend related data #[derive(Debug, Clone, Default)] pub struct BackendInner { + /// Stores the `ForkId` of the fork the `Backend` launched with from the start. + /// + /// In other words if [`Backend::spawn()`] was called with a `CreateFork` command, to launch + /// directly in fork mode, this holds the corresponding `ForkId` of this fork. + pub launched_with_fork: Option, /// tracks all created forks pub created_forks: HashMap, /// Contains snapshots made at a certain point @@ -402,3 +450,17 @@ pub struct BackendInner { /// executed. E.g. the `_failed` variable of `DSTest` pub test_contract_context: Option
, } + +// === impl BackendInner === + +impl BackendInner { + /// Creates a new instance that tracks the fork used at launch + pub fn new(launched_with: Option<(ForkId, SharedBackend)>) -> Self { + let (launched_with_fork, created_forks) = if let Some((id, fork)) = launched_with { + (Some(id.clone()), HashMap::from([(id, fork)])) + } else { + (None, Default::default()) + }; + Self { launched_with_fork, created_forks, ..Default::default() } + } +} diff --git a/evm/src/executor/fork/mod.rs b/evm/src/executor/fork/mod.rs index 2db7c40cf9cb..df891a48b853 100644 --- a/evm/src/executor/fork/mod.rs +++ b/evm/src/executor/fork/mod.rs @@ -16,7 +16,7 @@ mod multi; pub use multi::{ForkId, MultiFork, MultiForkHandler}; /// Represents a _fork_ of a remote chain whose data is available only via the `url` endpoint. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct CreateFork { /// Whether to enable rpc storage caching for this fork pub enable_caching: bool, diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index 31c18fbc04eb..cf9d0b848e39 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -132,7 +132,7 @@ impl MultiFork { type Handler = BackendHandler>>>; -type CreateFuture = Pin> + Send>>; +type CreateFuture = Pin> + Send>>; type CreateSender = OneshotSender>; @@ -143,6 +143,8 @@ enum Request { CreateFork(Box, CreateSender), /// Returns the Fork backend for the `ForkId` if it exists GetFork(ForkId, OneshotSender>), + /// Adjusts the block that's being forked + RollFork(ForkId, U256, CreateSender), /// Shutdowns the entire `MultiForkHandler`, see `ShutDownMultiFork` ShutDown(OneshotSender<()>), } @@ -167,7 +169,7 @@ pub struct MultiForkHandler { pending_tasks: Vec, /// All created Forks in order to reuse them - forks: HashMap, + forks: HashMap, /// The retries to allow for new providers retries: u32, @@ -202,24 +204,29 @@ impl MultiForkHandler { self } + fn create_fork(&mut self, fork: CreateFork, sender: CreateSender) { + let fork_id = create_fork_id(&fork.url, fork.block); + if let Some(fork) = self.forks.get(&fork_id) { + let _ = sender.send(Ok((fork_id, fork.backend.clone()))); + } else { + let retries = self.retries; + let backoff = self.backoff; + // need to create a new fork + let task = Box::pin(async move { create_fork(fork, retries, backoff).await }); + self.pending_tasks.push(ForkTask::Create(task, fork_id, sender)); + } + } + fn on_request(&mut self, req: Request) { match req { - Request::CreateFork(fork, sender) => { - let fork_id = create_fork_id(&fork.url, fork.block); - if let Some(fork) = self.forks.get(&fork_id).cloned() { - let _ = sender.send(Ok((fork_id, fork))); - } else { - let retries = self.retries; - let backoff = self.backoff; - // need to create a new fork - let task = Box::pin(async move { create_fork(*fork, retries, backoff).await }); - self.pending_tasks.push(ForkTask::Create(task, fork_id, sender)); - } - } + Request::CreateFork(fork, sender) => self.create_fork(*fork, sender), Request::GetFork(fork_id, sender) => { - let fork = self.forks.get(&fork_id).cloned(); + let fork = self.forks.get(&fork_id).map(|f| f.backend.clone()); let _ = sender.send(fork); } + Request::RollFork(fork_id, block, sender) => { + trace!(target: "fork::multi", "rolling {} to {}", fork_id, block); + } Request::ShutDown(sender) => { trace!(target: "fork::multi", "received shutdown signal"); // we're emptying all fork backends, this way we ensure all caches get flushed @@ -263,8 +270,9 @@ impl Future for MultiForkHandler { match resp { Ok((fork, handler)) => { pin.handlers.push((id.clone(), handler)); - pin.forks.insert(id.clone(), fork.clone()); - let _ = sender.send(Ok((id, fork))); + let backend = fork.backend.clone(); + pin.forks.insert(id.clone(), fork); + let _ = sender.send(Ok((id, backend))); } Err(err) => { let _ = sender.send(Err(err)); @@ -304,7 +312,7 @@ impl Future for MultiForkHandler { !pin.forks.is_empty() { trace!(target: "fork::multi", "tick flushing caches"); - let forks = pin.forks.values().cloned().collect::>(); + let forks = pin.forks.values().map(|f| f.backend.clone()).collect::>(); // flush this on new thread to not block here std::thread::spawn(move || { forks.into_iter().for_each(|fork| fork.flush_cache()); @@ -315,6 +323,25 @@ impl Future for MultiForkHandler { } } +/// Tracks the created Fork +struct CreatedFork { + /// How the fork was initially created + opts: CreateFork, + /// Copy of the sender + backend: SharedBackend, + /// How many different consumers there are, since a `SharedBacked` can be used by multiple + /// consumers + num_senders: usize, +} + +// === impl CreatedFork === + +impl CreatedFork { + pub fn new(opts: CreateFork, backend: SharedBackend) -> Self { + Self { opts, backend, num_senders: 1 } + } +} + /// A type that's used to signaling the `MultiForkHandler` when it's time to shutdown. /// /// This is essentially a sync on drop, so that the `MultiForkHandler` can flush all rpc cashes @@ -352,8 +379,8 @@ async fn create_fork( fork: CreateFork, retries: u32, backoff: u64, -) -> eyre::Result<(SharedBackend, Handler)> { - let CreateFork { enable_caching, url, block: block_number, env, chain_id } = fork; +) -> eyre::Result<(CreatedFork, Handler)> { + let CreateFork { enable_caching, url, block: block_number, env, chain_id } = fork.clone(); let provider = Arc::new(Provider::>::new_client( url.clone().as_str(), retries, @@ -384,5 +411,8 @@ async fn create_fork( }; let db = BlockchainDb::new(meta, cache_path); - Ok(SharedBackend::new(provider, db, Some(BlockId::Number(BlockNumber::Number(number.into()))))) + let (backend, handler) = + SharedBackend::new(provider, db, Some(BlockId::Number(BlockNumber::Number(number.into())))); + let fork = CreatedFork::new(fork, backend); + Ok((fork, handler)) } From 16a54352a6905cf881c69280d61ab45728c8d470 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 6 Jul 2022 19:43:29 +0200 Subject: [PATCH 080/102] refactor: use local fork ids as ints --- evm/src/executor/abi.rs | 8 +- evm/src/executor/backend/fuzz.rs | 17 +-- evm/src/executor/backend/mod.rs | 106 +++++++++++++----- evm/src/executor/fork/multi.rs | 24 +++- evm/src/executor/inspector/cheatcodes/fork.rs | 4 +- 5 files changed, 107 insertions(+), 52 deletions(-) diff --git a/evm/src/executor/abi.rs b/evm/src/executor/abi.rs index efb560bd3df8..1c9581360087 100644 --- a/evm/src/executor/abi.rs +++ b/evm/src/executor/abi.rs @@ -84,11 +84,11 @@ ethers::contract::abigen!( toString(bool) snapshot()(uint256) revertTo(uint256)(bool) - createFork(string,uint256)(string) - createFork(string)(string) - selectFork(string) + createFork(string,uint256)(uint256) + createFork(string)(uint256) + selectFork(uint256) rollFork(uint256)(bool) - rollFork(string,uint256)(bool) + rollFork(uint256,uint256)(bool) rpcUrl(string)(string) rpcUrls(string)(string[2][]) ]"#, diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index dc853e569be9..43bae45ffc1e 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -157,21 +157,14 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { } } - fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { + fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { let (id, fork) = self.backend.forks.create_fork(fork)?; - self.inner.created_forks.insert(id.clone(), fork); + let id = self.inner.insert_new_fork(id, fork); Ok(id) } - fn select_fork(&mut self, id: impl Into) -> eyre::Result<()> { - let id = id.into(); - let fork = self - .inner - .created_forks - .get(&id) - .or_else(|| self.backend.created_forks().get(&id)) - .cloned() - .ok_or_else(|| eyre::eyre!("Fork Id {} does not exist", id))?; + fn select_fork(&mut self, id: U256) -> eyre::Result<()> { + let fork = self.inner.ensure_backend(id).cloned()?; if let Some(ref mut db) = self.db_override { *db.db_mut() = BackendDatabase::Forked(fork, id); } else { @@ -182,7 +175,7 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { Ok(()) } - fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result { + fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result<()> { todo!() } } diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index c8de3f698ccc..18d9f91d636c 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -42,7 +42,7 @@ pub trait DatabaseExt: Database { fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option; /// Creates a new fork but does _not_ select it - fn create_fork(&mut self, fork: CreateFork) -> eyre::Result; + fn create_fork(&mut self, fork: CreateFork) -> eyre::Result; /// Selects the fork's state /// @@ -51,14 +51,16 @@ pub trait DatabaseExt: Database { /// # Errors /// /// Returns an error if no fork with the given `id` exists - fn select_fork(&mut self, id: impl Into) -> eyre::Result<()>; + fn select_fork(&mut self, id: U256) -> eyre::Result<()>; /// Updates the fork to given block number. /// /// This will essentially create a new fork at the given block height. /// - /// Returns false if no matching fork was found. - fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result; + /// # Errors + /// + /// Returns an error if not matching fork was found. + fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result<()>; } /// Provides the underlying `revm::Database` implementation. @@ -133,7 +135,7 @@ impl Backend { pub fn new(forks: MultiFork, fork: Option) -> Self { let (db, launched_with) = if let Some(f) = fork { let (id, fork) = forks.create_fork(f).expect("Unable to fork"); - (CacheDB::new(BackendDatabase::Forked(fork.clone(), id.clone())), Some((id, fork))) + (CacheDB::new(BackendDatabase::Forked(fork.clone(), U256::zero())), Some((id, fork))) } else { (CacheDB::new(BackendDatabase::InMemory(EmptyDB())), None) }; @@ -210,7 +212,7 @@ impl Backend { } /// Returns the `ForkId` that's currently used in the database, if fork mode is on - pub fn active_fork(&self) -> Option<&ForkId> { + pub fn active_fork(&self) -> Option { self.db.db().as_fork() } @@ -224,14 +226,14 @@ impl Backend { /// Returns an error if the given `id` does not match any forks /// /// Returns an error if no fork exits - pub fn ensure_fork(&self, id: Option) -> eyre::Result { + pub fn ensure_fork(&self, id: Option) -> eyre::Result { if let Some(id) = id { - if self.inner.created_forks.contains_key(&id) { + if self.inner.issued_local_fork_ids.contains_key(&id) { return Ok(id) } eyre::bail!("Requested fork `{}` does not exit", id) } - if let Some(id) = self.active_fork().cloned() { + if let Some(id) = self.active_fork() { Ok(id) } else { eyre::bail!("No fork active") @@ -285,28 +287,24 @@ impl DatabaseExt for Backend { } } - fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { + fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { let (id, fork) = self.forks.create_fork(fork)?; - self.inner.created_forks.insert(id.clone(), fork); + let id = self.inner.insert_new_fork(id, fork); Ok(id) } - fn select_fork(&mut self, id: impl Into) -> eyre::Result<()> { - let id = id.into(); - let fork = self - .inner - .created_forks - .get(&id) - .cloned() - .ok_or_else(|| eyre::eyre!("Fork Id {} does not exist", id))?; + fn select_fork(&mut self, id: U256) -> eyre::Result<()> { + let fork = self.inner.ensure_backend(id).cloned()?; *self.db.db_mut() = BackendDatabase::Forked(fork, id); Ok(()) } - fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result { + fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result<()> { let id = self.ensure_fork(id)?; - - todo!() + let (id, fork) = + self.forks.roll_fork(self.inner.ensure_fork_id(id).cloned()?, block_number.as_u64())?; + self.inner.created_forks.insert(id.clone(), fork); + Ok(()) } } @@ -377,17 +375,17 @@ pub enum BackendDatabase { InMemory(EmptyDB), /// A [revm::Database] that forks of a remote location and can have multiple consumers of the /// same data - Forked(SharedBackend, ForkId), + Forked(SharedBackend, U256), } // === impl BackendDatabase === impl BackendDatabase { /// Returns the `ForkId` if in fork mode - pub fn as_fork(&self) -> Option<&ForkId> { + pub fn as_fork(&self) -> Option { match self { BackendDatabase::InMemory(_) => None, - BackendDatabase::Forked(_, id) => Some(id), + BackendDatabase::Forked(_, id) => Some(*id), } } } @@ -430,6 +428,18 @@ pub struct BackendInner { /// In other words if [`Backend::spawn()`] was called with a `CreateFork` command, to launch /// directly in fork mode, this holds the corresponding `ForkId` of this fork. pub launched_with_fork: Option, + /// This tracks numeric fork ids and the `ForkId` used by the handler. + /// + /// This is neceessary, because there can be multiple `Backends` associated with a single + /// `ForkId` which is only a pair of endpoint + block. Since an existing fork can be + /// modified (e.g. `roll_fork`), but this should only affect the fork that's unique for the + /// test and not the `ForkId` + /// + /// This ensures we can treat forks as unique from the context of a test, so rolling to another + /// is basically creating(or reusing) another `ForkId` that's then mapped to the previous + /// issued _local_ numeric identifier, that remains constant, even if the underlying fork + /// backend changes. + pub issued_local_fork_ids: HashMap, /// tracks all created forks pub created_forks: HashMap, /// Contains snapshots made at a certain point @@ -449,6 +459,8 @@ pub struct BackendInner { /// This address can be used to inspect the state of the contract when a test is being /// executed. E.g. the `_failed` variable of `DSTest` pub test_contract_context: Option
, + /// Tracks numeric identifiers for forks + pub next_fork_id: U256, } // === impl BackendInner === @@ -456,11 +468,43 @@ pub struct BackendInner { impl BackendInner { /// Creates a new instance that tracks the fork used at launch pub fn new(launched_with: Option<(ForkId, SharedBackend)>) -> Self { - let (launched_with_fork, created_forks) = if let Some((id, fork)) = launched_with { - (Some(id.clone()), HashMap::from([(id, fork)])) - } else { - (None, Default::default()) - }; - Self { launched_with_fork, created_forks, ..Default::default() } + launched_with.map(|(id, backend)| Self::forked(id, backend)).unwrap_or_default() + } + + pub fn forked(id: ForkId, fork: SharedBackend) -> Self { + let launched_with_fork = Some(id.clone()); + let mut backend = Self { launched_with_fork, ..Default::default() }; + backend.insert_new_fork(id, fork); + backend + } + + pub fn ensure_fork_id(&self, id: U256) -> eyre::Result<&ForkId> { + self.issued_local_fork_ids + .get(&id) + .ok_or_else(|| eyre::eyre!("No matching fork found for {}", id)) + } + + /// Ensures that the `SharedBackend` for the given `id` exits + pub fn ensure_backend(&self, id: U256) -> eyre::Result<&SharedBackend> { + self.get_backend(id).ok_or_else(|| eyre::eyre!("No matching fork found for {}", id)) + } + + /// Returns the matching `SharedBackend` for the givne id + fn get_backend(&self, id: U256) -> Option<&SharedBackend> { + self.issued_local_fork_ids.get(&id).and_then(|id| self.created_forks.get(id)) + } + + /// Issues a new local fork identifier + pub fn insert_new_fork(&mut self, fork: ForkId, backend: SharedBackend) -> U256 { + self.created_forks.insert(fork.clone(), backend); + let id = self.next_id(); + self.issued_local_fork_ids.insert(id, fork); + id + } + + fn next_id(&mut self) -> U256 { + let id = self.next_fork_id; + self.next_fork_id += U256::one(); + id } } diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index cf9d0b848e39..da5c9e9f41f8 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -119,6 +119,16 @@ impl MultiFork { rx.recv()? } + /// Rolls the block of the fork + /// + /// If no matching fork backend exists it will be created + pub fn roll_fork(&self, fork: ForkId, block: u64) -> eyre::Result<(ForkId, SharedBackend)> { + let (sender, rx) = oneshot_channel(); + let req = Request::RollFork(fork, block, sender); + self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?; + rx.recv()? + } + /// Returns the corresponding fork if it exist /// /// Returns `None` if no matching fork backend is available. @@ -144,7 +154,7 @@ enum Request { /// Returns the Fork backend for the `ForkId` if it exists GetFork(ForkId, OneshotSender>), /// Adjusts the block that's being forked - RollFork(ForkId, U256, CreateSender), + RollFork(ForkId, u64, CreateSender), /// Shutdowns the entire `MultiForkHandler`, see `ShutDownMultiFork` ShutDown(OneshotSender<()>), } @@ -206,7 +216,8 @@ impl MultiForkHandler { fn create_fork(&mut self, fork: CreateFork, sender: CreateSender) { let fork_id = create_fork_id(&fork.url, fork.block); - if let Some(fork) = self.forks.get(&fork_id) { + if let Some(fork) = self.forks.get_mut(&fork_id) { + fork.num_senders += 1; let _ = sender.send(Ok((fork_id, fork.backend.clone()))); } else { let retries = self.retries; @@ -225,7 +236,14 @@ impl MultiForkHandler { let _ = sender.send(fork); } Request::RollFork(fork_id, block, sender) => { - trace!(target: "fork::multi", "rolling {} to {}", fork_id, block); + if let Some(fork) = self.forks.get(&fork_id) { + trace!(target: "fork::multi", "rolling {} to {}", fork_id, block); + let mut opts = fork.opts.clone(); + opts.block = block.into(); + self.create_fork(opts, sender) + } else { + let _ = sender.send(Err(eyre::eyre!("No matching fork exits for {}", fork_id))); + } } Request::ShutDown(sender) => { trace!(target: "fork::multi", "received shutdown signal"); diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index ff40e2a4ac24..927135b5ca60 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -25,12 +25,12 @@ pub fn apply( Err(err) => Err(err.to_string().encode().into()), }, HEVMCalls::RollFork0(fork) => match data.db.roll_fork(fork.0, None) { - Ok(b) => Ok(b.encode().into()), + Ok(_) => Ok(Default::default()), Err(err) => Err(err.to_string().encode().into()), }, HEVMCalls::RollFork1(fork) => { match data.db.roll_fork(fork.1, Some(fork.0.clone().into())) { - Ok(b) => Ok(b.encode().into()), + Ok(_) => Ok(Default::default()), Err(err) => Err(err.to_string().encode().into()), } } From 8479b7d0b1ee0ce006a9d8c6201324f338c6996f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Jul 2022 16:02:49 +0200 Subject: [PATCH 081/102] test: update fork test --- testdata/cheats/Fork.t.sol | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testdata/cheats/Fork.t.sol b/testdata/cheats/Fork.t.sol index cac5700bce46..533c80086236 100644 --- a/testdata/cheats/Fork.t.sol +++ b/testdata/cheats/Fork.t.sol @@ -17,8 +17,8 @@ contract ForkTest is DSTest { IWETH WETH = IWETH(WETH_TOKEN_ADDR); - string forkA; - string forkB; + uint256 forkA; + uint256 forkB; // this will create two _different_ forks during setup function setUp() public { @@ -28,7 +28,7 @@ contract ForkTest is DSTest { // ensures forks use different ids function testForkIdDiffer() public { - assert(keccak256(bytes(forkA)) != keccak256(bytes(forkB))); + assert(forkA != forkB); } // ensures forks use different ids From c19f0c8780c0ad6d07da024258215a15d47f82a8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Jul 2022 16:36:26 +0200 Subject: [PATCH 082/102] extend trait --- evm/src/executor/backend/fuzz.rs | 39 +++++++++++++++++- evm/src/executor/backend/mod.rs | 70 +++++++++++++++++++------------- 2 files changed, 78 insertions(+), 31 deletions(-) diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index 43bae45ffc1e..d8896d7a3bc8 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -164,7 +164,11 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { } fn select_fork(&mut self, id: U256) -> eyre::Result<()> { - let fork = self.inner.ensure_backend(id).cloned()?; + let fork = self + .inner + .ensure_backend(id) + .or_else(|_| self.backend.inner.ensure_backend(id)) + .cloned()?; if let Some(ref mut db) = self.db_override { *db.db_mut() = BackendDatabase::Forked(fork, id); } else { @@ -176,7 +180,38 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { } fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result<()> { - todo!() + let id = self.ensure_fork(id)?; + let (id, fork) = self + .backend + .forks + .roll_fork(self.inner.ensure_fork_id(id).cloned()?, block_number.as_u64())?; + // this will update the local mapping + self.inner.created_forks.insert(id.clone(), fork); + Ok(()) + } + + fn active_fork(&self) -> Option { + self.active_db().db().as_fork() + } + + fn ensure_fork(&self, id: Option) -> eyre::Result { + if let Some(id) = id { + if self.inner.issued_local_fork_ids.contains_key(&id) || + self.backend.inner.issued_local_fork_ids.contains_key(&id) + { + return Ok(id) + } + eyre::bail!("Requested fork `{}` does not exit", id) + } + if let Some(id) = self.active_fork() { + Ok(id) + } else { + eyre::bail!("No fork active") + } + } + + fn ensure_fork_id(&self, id: U256) -> eyre::Result<&ForkId> { + self.inner.ensure_fork_id(id).or_else(|_| self.backend.ensure_fork_id(id)) } } diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 18d9f91d636c..1c5d9e442f82 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -61,6 +61,24 @@ pub trait DatabaseExt: Database { /// /// Returns an error if not matching fork was found. fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result<()>; + + /// Returns the `ForkId` that's currently used in the database, if fork mode is on + fn active_fork(&self) -> Option; + + /// Ensures that an appropriate fork exits + /// + /// If `id` contains a requested `Fork` this will ensure it exits. + /// Otherwise this returns the currently active fork. + /// + /// # Errors + /// + /// Returns an error if the given `id` does not match any forks + /// + /// Returns an error if no fork exits + fn ensure_fork(&self, id: Option) -> eyre::Result; + + /// Ensures that a corresponding `ForkId` exists for the given local `id` + fn ensure_fork_id(&self, id: U256) -> eyre::Result<&ForkId>; } /// Provides the underlying `revm::Database` implementation. @@ -211,35 +229,6 @@ impl Backend { value == U256::one() } - /// Returns the `ForkId` that's currently used in the database, if fork mode is on - pub fn active_fork(&self) -> Option { - self.db.db().as_fork() - } - - /// Ensures that an appropriate fork exits - /// - /// If `id` contains a requested `Fork` this will ensure it exits. - /// Otherwise this returns the currently active fork. - /// - /// # Errors - /// - /// Returns an error if the given `id` does not match any forks - /// - /// Returns an error if no fork exits - pub fn ensure_fork(&self, id: Option) -> eyre::Result { - if let Some(id) = id { - if self.inner.issued_local_fork_ids.contains_key(&id) { - return Ok(id) - } - eyre::bail!("Requested fork `{}` does not exit", id) - } - if let Some(id) = self.active_fork() { - Ok(id) - } else { - eyre::bail!("No fork active") - } - } - /// Executes the configured test call of the `env` without commiting state changes pub fn inspect_ref( &mut self, @@ -303,9 +292,32 @@ impl DatabaseExt for Backend { let id = self.ensure_fork(id)?; let (id, fork) = self.forks.roll_fork(self.inner.ensure_fork_id(id).cloned()?, block_number.as_u64())?; + // this will update the local mapping self.inner.created_forks.insert(id.clone(), fork); Ok(()) } + + fn active_fork(&self) -> Option { + self.db.db().as_fork() + } + + fn ensure_fork(&self, id: Option) -> eyre::Result { + if let Some(id) = id { + if self.inner.issued_local_fork_ids.contains_key(&id) { + return Ok(id) + } + eyre::bail!("Requested fork `{}` does not exit", id) + } + if let Some(id) = self.active_fork() { + Ok(id) + } else { + eyre::bail!("No fork active") + } + } + + fn ensure_fork_id(&self, id: U256) -> eyre::Result<&ForkId> { + self.inner.ensure_fork_id(id) + } } impl DatabaseRef for Backend { From 8d8a8db3ac8135eeeb6133568f6be20f8213ed5a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Jul 2022 17:07:15 +0200 Subject: [PATCH 083/102] fix: migrate new revm api --- Cargo.lock | 264 ++++++++-------------- Cargo.toml | 5 +- anvil/src/eth/backend/db.rs | 4 +- anvil/src/eth/backend/mem/in_memory_db.rs | 6 +- evm/src/executor/fork/database.rs | 6 +- evm/src/executor/mod.rs | 8 +- evm/src/fuzz/strategies/state.rs | 2 +- 7 files changed, 114 insertions(+), 181 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 03f124627415..4fb0f972451c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -439,13 +439,11 @@ dependencies = [ [[package]] name = "blake2" -version = "0.9.2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" +checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest 0.10.3", ] [[package]] @@ -465,7 +463,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -477,7 +475,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding 0.2.1", "generic-array 0.14.5", ] @@ -499,12 +496,6 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "bs58" version = "0.4.0" @@ -759,66 +750,66 @@ dependencies = [ [[package]] name = "coins-bip32" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "471b39eadc9323de375dce5eff149a5a1ebd21c67f1da34a56f87ee62191d4ea" +checksum = "634c509653de24b439672164bbf56f5f582a2ab0e313d3b0f6af0b7345cf2560" dependencies = [ "bincode", "bs58", "coins-core", - "digest 0.9.0", + "digest 0.10.3", "getrandom 0.2.6", - "hmac 0.11.0", + "hmac", "k256", "lazy_static", "serde", - "sha2 0.9.9", + "sha2 0.10.2", "thiserror", ] [[package]] name = "coins-bip39" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f473ea37dfc9d2cb94fdde50c3d41f28c3f384b367573d66386fea38d76d466" +checksum = "2a11892bcac83b4c6e95ab84b5b06c76d9d70ad73548dd07418269c5c7977171" dependencies = [ "bitvec 0.17.4", "coins-bip32", "getrandom 0.2.6", "hex", - "hmac 0.11.0", - "pbkdf2 0.8.0", + "hmac", + "pbkdf2 0.11.0", "rand 0.8.5", - "sha2 0.9.9", + "sha2 0.10.2", "thiserror", ] [[package]] name = "coins-core" -version = "0.2.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d257d975731955ee86fa7f348000c3fea09c262e84c70c11e994a85aa4f467a7" +checksum = "c94090a6663f224feae66ab01e41a2555a8296ee07b5f20dab8888bdefc9f617" dependencies = [ "base58check", "base64 0.12.3", "bech32", "blake2", - "digest 0.9.0", + "digest 0.10.3", "generic-array 0.14.5", "hex", - "ripemd160", + "ripemd", "serde", "serde_derive", - "sha2 0.9.9", - "sha3 0.9.1", + "sha2 0.10.2", + "sha3", "thiserror", ] [[package]] name = "coins-ledger" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfa8a730d02735d8d53888a95d8f33aaa9dda9979862de113202421db939b2a" +checksum = "d9766e413812861a04ceb82c8008e7fea9fe75845b68ed41241c34274702ed9d" dependencies = [ "async-trait", "blake2b_simd", @@ -942,9 +933,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.7.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" +checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" [[package]] name = "constant_time_eq" @@ -1111,9 +1102,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.3.2" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +checksum = "9f2b443d17d49dad5ef0ede301c3179cc923b8822f3393b4d2c28c269dd4a122" dependencies = [ "generic-array 0.14.5", "rand_core 0.6.3", @@ -1131,26 +1122,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array 0.14.5", - "subtle", -] - -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array 0.14.5", - "subtle", -] - [[package]] name = "ctor" version = "0.1.22" @@ -1210,11 +1181,12 @@ dependencies = [ [[package]] name = "der" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" dependencies = [ "const-oid", + "zeroize", ] [[package]] @@ -1323,9 +1295,9 @@ checksum = "453440c271cf5577fd2a40e4942540cb7d0d2f85e27c8d07dd0023c925a67541" [[package]] name = "ecdsa" -version = "0.13.4" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" +checksum = "3bd46e0c364655e5baf2f5e99b603e7a09905da9966d7928d7470af393b28670" dependencies = [ "der", "elliptic-curve", @@ -1341,16 +1313,18 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "elliptic-curve" -version = "0.11.12" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" +checksum = "c47abd0a791d2ac0c7aa1118715f85b83689e4522c4e3a244e159d4fc9848a8d" dependencies = [ "base16ct", "crypto-bigint", "der", + "digest 0.10.3", "ff", "generic-array 0.14.5", "group", + "pkcs8", "rand_core 0.6.3", "sec1", "subtle", @@ -1411,14 +1385,14 @@ dependencies = [ "ctr", "digest 0.10.3", "hex", - "hmac 0.12.1", + "hmac", "pbkdf2 0.10.1", "rand 0.8.5", "scrypt", "serde", "serde_json", "sha2 0.10.2", - "sha3 0.10.1", + "sha3", "thiserror", "uuid", ] @@ -1435,7 +1409,7 @@ dependencies = [ "regex", "serde", "serde_json", - "sha3 0.10.1", + "sha3", "thiserror", "uint", ] @@ -1470,7 +1444,7 @@ dependencies = [ [[package]] name = "ethers" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#57f6d59b39f1263970fcf327a45498208f30398d" +source = "git+https://github.com/gakonst/ethers-rs#fb3fb161c060b2bc5e3b8eab0a24c72f399cde97" dependencies = [ "ethers-addressbook", "ethers-contract", @@ -1485,7 +1459,7 @@ dependencies = [ [[package]] name = "ethers-addressbook" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#57f6d59b39f1263970fcf327a45498208f30398d" +source = "git+https://github.com/gakonst/ethers-rs#fb3fb161c060b2bc5e3b8eab0a24c72f399cde97" dependencies = [ "ethers-core", "once_cell", @@ -1496,7 +1470,7 @@ dependencies = [ [[package]] name = "ethers-contract" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#57f6d59b39f1263970fcf327a45498208f30398d" +source = "git+https://github.com/gakonst/ethers-rs#fb3fb161c060b2bc5e3b8eab0a24c72f399cde97" dependencies = [ "ethers-contract-abigen", "ethers-contract-derive", @@ -1514,7 +1488,7 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#57f6d59b39f1263970fcf327a45498208f30398d" +source = "git+https://github.com/gakonst/ethers-rs#fb3fb161c060b2bc5e3b8eab0a24c72f399cde97" dependencies = [ "Inflector", "cfg-if 1.0.0", @@ -1536,7 +1510,7 @@ dependencies = [ [[package]] name = "ethers-contract-derive" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#57f6d59b39f1263970fcf327a45498208f30398d" +source = "git+https://github.com/gakonst/ethers-rs#fb3fb161c060b2bc5e3b8eab0a24c72f399cde97" dependencies = [ "ethers-contract-abigen", "ethers-core", @@ -1550,7 +1524,7 @@ dependencies = [ [[package]] name = "ethers-core" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#57f6d59b39f1263970fcf327a45498208f30398d" +source = "git+https://github.com/gakonst/ethers-rs#fb3fb161c060b2bc5e3b8eab0a24c72f399cde97" dependencies = [ "arrayvec 0.7.2", "bytes", @@ -1581,7 +1555,7 @@ dependencies = [ [[package]] name = "ethers-etherscan" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#57f6d59b39f1263970fcf327a45498208f30398d" +source = "git+https://github.com/gakonst/ethers-rs#fb3fb161c060b2bc5e3b8eab0a24c72f399cde97" dependencies = [ "ethers-core", "getrandom 0.2.6", @@ -1597,7 +1571,7 @@ dependencies = [ [[package]] name = "ethers-middleware" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#57f6d59b39f1263970fcf327a45498208f30398d" +source = "git+https://github.com/gakonst/ethers-rs#fb3fb161c060b2bc5e3b8eab0a24c72f399cde97" dependencies = [ "async-trait", "ethers-contract", @@ -1621,7 +1595,7 @@ dependencies = [ [[package]] name = "ethers-providers" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#57f6d59b39f1263970fcf327a45498208f30398d" +source = "git+https://github.com/gakonst/ethers-rs#fb3fb161c060b2bc5e3b8eab0a24c72f399cde97" dependencies = [ "async-trait", "auto_impl 1.0.1", @@ -1657,7 +1631,7 @@ dependencies = [ [[package]] name = "ethers-signers" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#57f6d59b39f1263970fcf327a45498208f30398d" +source = "git+https://github.com/gakonst/ethers-rs#fb3fb161c060b2bc5e3b8eab0a24c72f399cde97" dependencies = [ "async-trait", "coins-bip32", @@ -1672,7 +1646,7 @@ dependencies = [ "home", "rand 0.8.5", "semver", - "sha2 0.9.9", + "sha2 0.10.2", "thiserror", "trezor-client", ] @@ -1680,7 +1654,7 @@ dependencies = [ [[package]] name = "ethers-solc" version = "0.13.0" -source = "git+https://github.com/gakonst/ethers-rs#57f6d59b39f1263970fcf327a45498208f30398d" +source = "git+https://github.com/gakonst/ethers-rs#fb3fb161c060b2bc5e3b8eab0a24c72f399cde97" dependencies = [ "cfg-if 1.0.0", "colored", @@ -1695,14 +1669,14 @@ dependencies = [ "md-5", "num_cpus", "once_cell", - "path-slash 0.1.5", + "path-slash", "rand 0.8.5", "rayon", "regex", "semver", "serde", "serde_json", - "sha2 0.9.9", + "sha2 0.10.2", "solang-parser", "svm-rs", "svm-rs-builds", @@ -1803,9 +1777,9 @@ dependencies = [ [[package]] name = "ff" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2958d04124b9f27f175eaeb9a9f383d026098aa837eadd8ba22c11f13a05b9e" +checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e" dependencies = [ "rand_core 0.6.3", "subtle", @@ -2048,7 +2022,7 @@ dependencies = [ "figment", "globset", "number_prefix", - "path-slash 0.2.0", + "path-slash", "pretty_assertions", "regex", "semver", @@ -2391,9 +2365,9 @@ dependencies = [ [[package]] name = "group" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" +checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d" dependencies = [ "ff", "rand_core 0.6.3", @@ -2503,16 +2477,6 @@ dependencies = [ "rusb", ] -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.1", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.12.1" @@ -2823,16 +2787,15 @@ dependencies = [ [[package]] name = "k256" -version = "0.10.4" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" +checksum = "2c8a5a96d92d849c4499d99461da81c9cdc1467418a8ed2aaeb407e8d85940ed" dependencies = [ "cfg-if 1.0.0", "ecdsa", "elliptic-curve", - "sec1", - "sha2 0.9.9", - "sha3 0.9.1", + "sha2 0.10.2", + "sha3", ] [[package]] @@ -3496,9 +3459,9 @@ dependencies = [ [[package]] name = "password-hash" -version = "0.2.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77e0b28ace46c5a396546bcf443bf422b57049617433d8854227352a4a9b24e7" +checksum = "1d791538a6dcc1e7cb7fe6f6b58aca40e7f79403c45b2bc274008b5e647af1d8" dependencies = [ "base64ct", "rand_core 0.6.3", @@ -3507,21 +3470,15 @@ dependencies = [ [[package]] name = "password-hash" -version = "0.3.2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d791538a6dcc1e7cb7fe6f6b58aca40e7f79403c45b2bc274008b5e647af1d8" +checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", "rand_core 0.6.3", "subtle", ] -[[package]] -name = "path-slash" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "498a099351efa4becc6a19c72aa9270598e8fd274ca47052e37455241c88b696" - [[package]] name = "path-slash" version = "0.2.0" @@ -3530,26 +3487,25 @@ checksum = "c54014ba3c1880122928735226f78b6f5bf5bd1fed15e41e92cf7aa20278ce28" [[package]] name = "pbkdf2" -version = "0.8.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" +checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" dependencies = [ - "base64ct", - "crypto-mac 0.11.1", - "hmac 0.11.0", - "password-hash 0.2.3", - "sha2 0.9.9", + "digest 0.10.3", + "hmac", + "password-hash 0.3.2", + "sha2 0.10.2", ] [[package]] name = "pbkdf2" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ "digest 0.10.3", - "hmac 0.12.1", - "password-hash 0.3.2", + "hmac", + "password-hash 0.4.2", "sha2 0.10.2", ] @@ -3692,18 +3648,18 @@ checksum = "db8bcd96cb740d03149cbad5518db9fd87126a10ab519c011893b1754134c468" [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" dependencies = [ "proc-macro2", "quote", @@ -3724,13 +3680,12 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ "der", "spki", - "zeroize", ] [[package]] @@ -4132,8 +4087,6 @@ dependencies = [ [[package]] name = "revm" version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b60030444003ac25474f5281e7e91f15e8475c173b729aac1c10aced56b3adac" dependencies = [ "arrayref", "auto_impl 1.0.1", @@ -4145,14 +4098,12 @@ dependencies = [ "revm_precompiles", "rlp", "serde", - "sha3 0.10.1", + "sha3", ] [[package]] name = "revm_precompiles" version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd6aae8f44783ef6ff39fc22c9c999dfa0e17b79d663b752730c02a025935185" dependencies = [ "bytes", "k256", @@ -4160,18 +4111,18 @@ dependencies = [ "primitive-types", "ripemd", "sha2 0.10.2", - "sha3 0.10.1", + "sha3", "substrate-bn", ] [[package]] name = "rfc6979" -version = "0.1.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" +checksum = "88c86280f057430a52f4861551b092a01b419b8eacefc7c995eacb9dc132fe32" dependencies = [ "crypto-bigint", - "hmac 0.11.0", + "hmac", "zeroize", ] @@ -4199,17 +4150,6 @@ dependencies = [ "digest 0.10.3", ] -[[package]] -name = "ripemd160" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - [[package]] name = "rlp" version = "0.5.1" @@ -4368,7 +4308,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e73d6d7c6311ebdbd9184ad6c4447b2f36337e327bda107d3ba9e3c374f9d325" dependencies = [ - "hmac 0.12.1", + "hmac", "password-hash 0.3.2", "pbkdf2 0.10.1", "salsa20", @@ -4387,10 +4327,11 @@ dependencies = [ [[package]] name = "sec1" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ + "base16ct", "der", "generic-array 0.14.5", "pkcs8", @@ -4581,6 +4522,7 @@ dependencies = [ "cfg-if 1.0.0", "cpufeatures", "digest 0.10.3", + "sha2-asm", ] [[package]] @@ -4592,18 +4534,6 @@ dependencies = [ "cc", ] -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.0", -] - [[package]] name = "sha3" version = "0.10.1" @@ -4656,11 +4586,11 @@ dependencies = [ [[package]] name = "signature" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" +checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4" dependencies = [ - "digest 0.9.0", + "digest 0.10.3", "rand_core 0.6.3", ] @@ -4719,9 +4649,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spki" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", "der", @@ -5849,7 +5779,7 @@ dependencies = [ "crc32fast", "crossbeam-utils", "flate2", - "hmac 0.12.1", + "hmac", "pbkdf2 0.10.1", "sha1", "time 0.3.9", diff --git a/Cargo.toml b/Cargo.toml index 83cc47c88cd7..fbac673fa41d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,4 +61,7 @@ debug = 0 #ethers-providers = { path = "../ethers-rs/ethers-providers" } #ethers-signers = { path = "../ethers-rs/ethers-signers" } #ethers-etherscan = { path = "../ethers-rs/ethers-etherscan" } -#ethers-solc = { path = "../ethers-rs/ethers-solc" } \ No newline at end of file +#ethers-solc = { path = "../ethers-rs/ethers-solc" } + +[patch.crates-io] +revm = { path = "../revm/crates/revm" } \ No newline at end of file diff --git a/anvil/src/eth/backend/db.rs b/anvil/src/eth/backend/db.rs index 0d1c9fe36d91..edec91a61c36 100644 --- a/anvil/src/eth/backend/db.rs +++ b/anvil/src/eth/backend/db.rs @@ -70,11 +70,11 @@ pub trait Db: DatabaseRef + Database + DatabaseCommit + Send + Sync { /// [Backend::pending_block()](crate::eth::backend::mem::Backend::pending_block()) impl Db for CacheDB { fn insert_account(&mut self, address: Address, account: AccountInfo) { - self.insert_cache(address, account) + self.insert_account_info(address, account) } fn set_storage_at(&mut self, address: Address, slot: U256, val: U256) { - self.insert_cache_storage(address, slot, val) + self.insert_account_storage(address, slot, val) } fn snapshot(&mut self) -> U256 { diff --git a/anvil/src/eth/backend/mem/in_memory_db.rs b/anvil/src/eth/backend/mem/in_memory_db.rs index 58982e878277..328abad7899d 100644 --- a/anvil/src/eth/backend/mem/in_memory_db.rs +++ b/anvil/src/eth/backend/mem/in_memory_db.rs @@ -13,11 +13,11 @@ pub use foundry_evm::executor::backend::MemDb; impl Db for MemDb { fn insert_account(&mut self, address: Address, account: AccountInfo) { - self.inner.insert_cache(address, account) + self.inner.insert_account_info(address, account) } fn set_storage_at(&mut self, address: Address, slot: U256, val: U256) { - self.inner.insert_cache_storage(address, slot, val) + self.inner.insert_account_storage(address, slot, val) } /// Creates a new snapshot @@ -39,7 +39,7 @@ impl Db for MemDb { } fn maybe_state_root(&self) -> Option { - Some(state_merkle_trie_root(self.inner.accounts())) + Some(state_merkle_trie_root(&self.inner.accounts)) } fn current_state(&self) -> StateDb { diff --git a/evm/src/executor/fork/database.rs b/evm/src/executor/fork/database.rs index 136f050386b5..bc40de245a4d 100644 --- a/evm/src/executor/fork/database.rs +++ b/evm/src/executor/fork/database.rs @@ -195,7 +195,7 @@ pub struct ForkDbSnapshot { impl ForkDbSnapshot { fn get_storage(&self, address: Address, index: U256) -> Option { - self.local.accounts().get(&address).and_then(|account| account.storage.get(&index)).copied() + self.local.accounts.get(&address).and_then(|account| account.storage.get(&index)).copied() } } @@ -204,7 +204,7 @@ impl ForkDbSnapshot { // We prioritize stored changed accounts/storage impl DatabaseRef for ForkDbSnapshot { fn basic(&self, address: Address) -> AccountInfo { - match self.local.accounts().get(&address) { + match self.local.accounts.get(&address) { Some(account) => account.info.clone(), None => { self.accounts.get(&address).cloned().unwrap_or_else(|| self.local.basic(address)) @@ -217,7 +217,7 @@ impl DatabaseRef for ForkDbSnapshot { } fn storage(&self, address: Address, index: U256) -> U256 { - match self.local.accounts().get(&address) { + match self.local.accounts.get(&address) { Some(account) => match account.storage.get(&index) { Some(entry) => *entry, None => self diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 2218ff6794b6..a4146f4e2940 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -206,7 +206,7 @@ where // Need to create a non-empty contract on the cheatcodes address so `extcodesize` checks // does not fail - db.insert_cache( + db.insert_account_info( CHEATCODE_ADDRESS, revm::AccountInfo { code: Some(Bytes::from_static(&[1])), ..Default::default() }, ); @@ -238,7 +238,7 @@ where let mut account = self.db.basic(address); account.balance = amount; - self.db.insert_cache(address, account); + self.db.insert_account_info(address, account); self } @@ -252,7 +252,7 @@ where let mut account = self.db.basic(address); account.nonce = nonce; - self.db.insert_cache(address, account); + self.db.insert_account_info(address, account); self } @@ -618,7 +618,7 @@ where ) -> bool { // Construct a new VM with the state changeset let mut db = CacheDB::new(EmptyDB()); - db.insert_cache(address, self.db.basic(address)); + db.insert_account_info(address, self.db.basic(address)); db.commit(state_changeset); let executor = Executor::new(db, self.env.clone(), self.inspector_config.clone(), self.gas_limit); diff --git a/evm/src/fuzz/strategies/state.rs b/evm/src/fuzz/strategies/state.rs index 4d7aedd1334e..fd6163502996 100644 --- a/evm/src/fuzz/strategies/state.rs +++ b/evm/src/fuzz/strategies/state.rs @@ -49,7 +49,7 @@ This is a bug, please open an issue: https://github.com/foundry-rs/foundry/issue /// Builds the initial [EvmFuzzState] from a database. pub fn build_initial_state(db: &CacheDB) -> EvmFuzzState { let mut state: HashSet<[u8; 32]> = HashSet::new(); - for (address, account) in db.accounts() { + for (address, account) in db.accounts.iter() { let info = db.basic(*address); // Insert basic account information From cf058362391e01b7027d552921ca1ad6d1ff75ae Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Jul 2022 17:17:05 +0200 Subject: [PATCH 084/102] patch revm git --- Cargo.lock | 2 ++ Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 4fb0f972451c..b73d7ef2c251 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4087,6 +4087,7 @@ dependencies = [ [[package]] name = "revm" version = "1.6.0" +source = "git+https://github.com/bluealloy/revm#51f3f8feb5625ca39a16492d569f6985e69121a7" dependencies = [ "arrayref", "auto_impl 1.0.1", @@ -4104,6 +4105,7 @@ dependencies = [ [[package]] name = "revm_precompiles" version = "1.0.0" +source = "git+https://github.com/bluealloy/revm#51f3f8feb5625ca39a16492d569f6985e69121a7" dependencies = [ "bytes", "k256", diff --git a/Cargo.toml b/Cargo.toml index fbac673fa41d..146d09f1fe01 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,4 +64,4 @@ debug = 0 #ethers-solc = { path = "../ethers-rs/ethers-solc" } [patch.crates-io] -revm = { path = "../revm/crates/revm" } \ No newline at end of file +revm = { git = "https://github.com/bluealloy/revm", package = "revm" } \ No newline at end of file From fd65aa9f43a6744508364ae5027ee825bd9ebd5c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Jul 2022 17:26:32 +0200 Subject: [PATCH 085/102] use revm naming --- evm/src/executor/backend/mod.rs | 2 +- evm/src/executor/mod.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 6dde82caa3b2..dd2744c1b187 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -168,7 +168,7 @@ impl Backend { Self { forks: self.forks.clone(), db, inner: Default::default() } } - pub fn insert_cache(&mut self, address: H160, account: AccountInfo) { + pub fn insert_account_info(&mut self, address: H160, account: AccountInfo) { self.db.insert_account_info(address, account) } diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 384c1c94e111..b3d79d69c8c6 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -123,7 +123,7 @@ impl Executor { let mut account = self.backend_mut().basic(address); account.balance = amount; - self.backend_mut().insert_cache(address, account); + self.backend_mut().insert_account_info(address, account); self } @@ -137,7 +137,7 @@ impl Executor { let mut account = self.backend_mut().basic(address); account.nonce = nonce; - self.backend_mut().insert_cache(address, account); + self.backend_mut().insert_account_info(address, account); self } @@ -457,7 +457,7 @@ impl Executor { ) -> bool { // Construct a new VM with the state changeset let mut backend = self.backend().clone_empty(); - backend.insert_cache(address, self.backend().basic(address)); + backend.insert_account_info(address, self.backend().basic(address)); backend.commit(state_changeset); let executor = Executor::new(backend, self.env.clone(), self.inspector_config.clone(), self.gas_limit); From 49920d85dd3e42f481e10b55570f8e8e1be49e61 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Jul 2022 19:34:09 +0200 Subject: [PATCH 086/102] fix: rpc urls api --- evm/src/executor/abi.rs | 4 ++-- testdata/cheats/Cheats.sol | 11 +++++------ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/evm/src/executor/abi.rs b/evm/src/executor/abi.rs index 7163f76aefc5..00caafc514ac 100644 --- a/evm/src/executor/abi.rs +++ b/evm/src/executor/abi.rs @@ -91,9 +91,9 @@ ethers::contract::abigen!( createFork(string)(uint256) selectFork(uint256) rollFork(uint256)(bool) - rollFork(uint256,uint256)(bool) + rollFork(uint256,uint256) rpcUrl(string)(string) - rpcUrls(string)(string[2][]) + rpcUrls()(string[2][]) ]"#, ); pub use hevm_mod::{HEVMCalls, HEVM_ABI}; diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index 502270c2b92b..1d4a5db6227d 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -143,20 +143,19 @@ interface Cheats { // takes the snapshot id to revert to. This deletes the snapshot and all snapshots taken after the given snapshot id. function revertTo(uint256) external returns(bool); // Creates a new fork with the given endpoint and block and returns the identifier of the fork - function createFork(string calldata,uint256) external returns(string memory); + function createFork(string calldata,uint256) external returns(uint256); // Creates a new fork with the given endpoint and the latest block and returns the identifier of the fork - function createFork(string calldata) external returns(string memory); + function createFork(string calldata) external returns(uint256); // takes a fork identifier created by `createFork` and changes the state - function selectFork(string calldata) external; + function selectFork(uint256) external; // forks the `block` variable from the given endpoint function forkBlockVariable(string calldata, uint256) external; // Updates the currently active fork to given block number // This is similar to `roll` but for the fork - // Returns false if no fork is active - function rollFork(uint256) external returns(bool); + function rollFork(uint256) external; // Updates the given fork to given block number // Returns false if no matching fork was found - function rollFork(string calldata, uint256) external returns(bool); + function rollFork(uint256, uint256) external returns(bool); /// Returns the RPC url for the given alias function rpcUrl(string calldata) external returns(string memory); /// Returns all rpc urls and their aliases `[alias, url][]` From 611f50ce662ab6a5f1930c0ce5e091f076b517ba Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Jul 2022 20:14:43 +0200 Subject: [PATCH 087/102] fix: return encoded errors --- evm/src/executor/backend/fuzz.rs | 2 +- evm/src/executor/backend/mod.rs | 2 +- .../executor/inspector/cheatcodes/config.rs | 7 ++-- evm/src/executor/inspector/cheatcodes/fork.rs | 32 ++++++++----------- forge/src/multi_runner.rs | 11 +++---- testdata/cheats/RpcUrls.t.sol | 4 +-- 6 files changed, 27 insertions(+), 31 deletions(-) diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index 6612e0b4b549..9753b6d01f44 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -186,7 +186,7 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { .forks .roll_fork(self.inner.ensure_fork_id(id).cloned()?, block_number.as_u64())?; // this will update the local mapping - self.inner.created_forks.insert(id.clone(), fork); + self.inner.created_forks.insert(id, fork); Ok(()) } diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index dd2744c1b187..19aa0dfa0bea 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -293,7 +293,7 @@ impl DatabaseExt for Backend { let (id, fork) = self.forks.roll_fork(self.inner.ensure_fork_id(id).cloned()?, block_number.as_u64())?; // this will update the local mapping - self.inner.created_forks.insert(id.clone(), fork); + self.inner.created_forks.insert(id, fork); Ok(()) } diff --git a/evm/src/executor/inspector/cheatcodes/config.rs b/evm/src/executor/inspector/cheatcodes/config.rs index 71f0b8fe3623..c6ef0fcdd935 100644 --- a/evm/src/executor/inspector/cheatcodes/config.rs +++ b/evm/src/executor/inspector/cheatcodes/config.rs @@ -1,9 +1,10 @@ use crate::executor::opts::EvmOpts; use bytes::Bytes; -use ethers::abi::AbiEncode; use foundry_config::{cache::StorageCachingConfig, Config, ResolvedRpcEndpoints}; use std::path::{Path, PathBuf}; +use super::util; + /// Additional, configurable context the `Cheatcodes` inspector has access to /// /// This is essentially a subset of various `Config` settings `Cheatcodes` needs to know. @@ -69,10 +70,10 @@ impl CheatsConfig { let url_or_alias = url_or_alias.into(); match self.rpc_endpoints.get(&url_or_alias) { Some(Ok(url)) => Ok(url.clone()), - Some(Err(err)) => Err(err.to_string().encode().into()), + Some(Err(err)) => Err(util::encode_error(err)), None => { if !url_or_alias.starts_with("http") && !url_or_alias.starts_with("ws") { - Err(format!("invalid rpc url {}", url_or_alias).encode().into()) + Err(util::encode_error(format!("invalid rpc url {}", url_or_alias))) } else { Ok(url_or_alias) } diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index 927135b5ca60..fabc4e6604f1 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -1,4 +1,4 @@ -use super::Cheatcodes; +use super::{util, Cheatcodes}; use crate::{ abi::HEVMCalls, executor::{backend::DatabaseExt, fork::CreateFork}, @@ -20,20 +20,19 @@ pub fn apply( HEVMCalls::CreateFork1(fork) => { create_fork(state, data, fork.0.clone(), fork.1.as_u64().into()) } - HEVMCalls::SelectFork(fork_id) => match data.db.select_fork(fork_id.0.clone()) { - Ok(_) => Ok(Bytes::new()), - Err(err) => Err(err.to_string().encode().into()), - }, - HEVMCalls::RollFork0(fork) => match data.db.roll_fork(fork.0, None) { - Ok(_) => Ok(Default::default()), - Err(err) => Err(err.to_string().encode().into()), - }, - HEVMCalls::RollFork1(fork) => { - match data.db.roll_fork(fork.1, Some(fork.0.clone().into())) { - Ok(_) => Ok(Default::default()), - Err(err) => Err(err.to_string().encode().into()), - } + HEVMCalls::SelectFork(fork_id) => data + .db + .select_fork(fork_id.0) + .map(|_| Default::default()) + .map_err(util::encode_error), + HEVMCalls::RollFork0(fork) => { + data.db.roll_fork(fork.0, None).map(|_| Default::default()).map_err(util::encode_error) } + HEVMCalls::RollFork1(fork) => data + .db + .roll_fork(fork.1, Some(fork.0)) + .map(|_| Default::default()) + .map_err(util::encode_error), HEVMCalls::RpcUrl(rpc) => state.config.get_rpc_url(&rpc.0).map(|url| url.encode().into()), HEVMCalls::RpcUrls(_) => { let mut urls = Vec::with_capacity(state.config.rpc_endpoints.len()); @@ -66,8 +65,5 @@ fn create_fork( chain_id: None, env: data.env.clone(), }; - match data.db.create_fork(fork) { - Ok(id) => Ok(id.encode().into()), - Err(err) => Err(err.to_string().encode().into()), - } + data.db.create_fork(fork).map_err(util::encode_error).map(|id| id.encode().into()) } diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 5bd2567484a9..89f3771968c0 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -389,9 +389,9 @@ mod tests { config.rpc_endpoints = rpc_endpoints(); base_runner() - .with_cheats_config(CheatsConfig::new(&config, &*EVM_OPTS)) + .with_cheats_config(CheatsConfig::new(&config, &EVM_OPTS)) .build( - &(*PROJECT).paths.root, + &PROJECT.paths.root, (*COMPILED).clone(), EVM_OPTS.evm_env_blocking(), EVM_OPTS.clone(), @@ -404,7 +404,7 @@ mod tests { let mut opts = EVM_OPTS.clone(); opts.verbosity = 5; base_runner() - .build(&(*PROJECT).paths.root, (*COMPILED).clone(), EVM_OPTS.evm_env_blocking(), opts) + .build(&PROJECT.paths.root, (*COMPILED).clone(), EVM_OPTS.evm_env_blocking(), opts) .unwrap() } @@ -420,7 +420,7 @@ mod tests { base_runner() .with_fork(fork) - .build(&(*LIBS_PROJECT).paths.root, (*COMPILED_WITH_LIBS).clone(), env, opts) + .build(&LIBS_PROJECT.paths.root, (*COMPILED_WITH_LIBS).clone(), env, opts) .unwrap() } @@ -1150,7 +1150,6 @@ Reason: `setEnv` failed to set an environment variable `{}={}`", for (_, SuiteResult { test_results, .. }) in suite_result { for (test_name, result) in test_results { - dbg!(test_name.clone()); let logs = decode_console_logs(&result.logs); assert!( result.success, @@ -1278,7 +1277,7 @@ Reason: `setEnv` failed to set an environment variable `{}={}`", fn test_doesnt_run_abstract_contract() { let mut runner = runner(); let results = runner - .test(&Filter::new(".*", ".*", format!(".*Abstract.t.sol").as_str()), None, true) + .test(&Filter::new(".*", ".*", ".*Abstract.t.sol".to_string().as_str()), None, true) .unwrap(); println!("{:?}", results.keys()); assert!(results diff --git a/testdata/cheats/RpcUrls.t.sol b/testdata/cheats/RpcUrls.t.sol index faea5795096d..2b0cef656dff 100644 --- a/testdata/cheats/RpcUrls.t.sol +++ b/testdata/cheats/RpcUrls.t.sol @@ -15,14 +15,14 @@ contract RpcUrlTest is DSTest { // returns an error if env alias does not exists function testRevertsOnMissingEnv() public { - cheats.expectRevert(); + cheats.expectRevert("Failed to resolve env var `RPC_ENV_ALIAS`: environment variable not found"); string memory url = cheats.rpcUrl("rpcEnvAlias"); } // can set env and return correct url function testCanSetAndGetURLAndAllUrls() public { // this will fail because alias is not set - cheats.expectRevert(); + cheats.expectRevert("Failed to resolve env var `RPC_ENV_ALIAS`: environment variable not found"); string[2][] memory _urls = cheats.rpcUrls(); string memory url = cheats.rpcUrl("rpcAlias"); From 1efefe2c47cfb258d7e55b5465546f75d04a3ad1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Jul 2022 20:26:10 +0200 Subject: [PATCH 088/102] chore: rustfmt --- evm/src/executor/inspector/cheatcodes/fork.rs | 8 +++----- forge/src/multi_runner.rs | 6 +----- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index fabc4e6604f1..4a04f5f08a30 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -20,11 +20,9 @@ pub fn apply( HEVMCalls::CreateFork1(fork) => { create_fork(state, data, fork.0.clone(), fork.1.as_u64().into()) } - HEVMCalls::SelectFork(fork_id) => data - .db - .select_fork(fork_id.0) - .map(|_| Default::default()) - .map_err(util::encode_error), + HEVMCalls::SelectFork(fork_id) => { + data.db.select_fork(fork_id.0).map(|_| Default::default()).map_err(util::encode_error) + } HEVMCalls::RollFork0(fork) => { data.db.roll_fork(fork.0, None).map(|_| Default::default()).map_err(util::encode_error) } diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 89f3771968c0..8cb211626d8e 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -375,7 +375,6 @@ mod tests { }; use foundry_config::{Config, RpcEndpoint, RpcEndpoints}; use foundry_evm::trace::TraceKind; - use foundry_utils::init_tracing_subscriber; use std::env; /// Builds a base runner @@ -604,7 +603,6 @@ mod tests { #[test] fn test_logs() { - init_tracing_subscriber(); let mut runner = runner(); let results = runner.test(&Filter::new(".*", ".*", ".*logs"), None, true).unwrap(); @@ -1166,10 +1164,8 @@ Reason: `setEnv` failed to set an environment variable `{}={}`", #[test] fn test_cheats_local() { let mut runner = runner(); - // let suite_result = - // runner.test(&Filter::new(".*", ".*", ".*cheats/[^Fork]"), None, true).unwrap(); let suite_result = - runner.test(&Filter::new(".*", ".*", ".*cheats/RpcUrl"), None, true).unwrap(); + runner.test(&Filter::new(".*", ".*", ".*cheats/[^Fork]"), None, true).unwrap(); assert!(!suite_result.is_empty()); for (_, SuiteResult { test_results, .. }) in suite_result { From c5059d13d51a5b0e03dd0077f08b10e5e77f16e7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Jul 2022 20:31:22 +0200 Subject: [PATCH 089/102] chore: rustfmt --- evm/src/executor/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index b3d79d69c8c6..b4c0b59d1255 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -1,6 +1,3 @@ -/// ABIs used internally in the executor -pub mod abi; - use self::inspector::{InspectorData, InspectorStackConfig}; use crate::{debug::DebugArena, trace::CallTraceArena, CALLER}; pub use abi::{ @@ -25,6 +22,8 @@ pub use revm::{db::DatabaseRef, Env, SpecId}; use std::collections::{BTreeMap, VecDeque}; use tracing::trace; +/// ABIs used internally in the executor +pub mod abi; /// custom revm database implementations pub mod backend; pub use backend::Backend; From c7e377c2be4acedd7612434725d3984741823abb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 7 Jul 2022 22:44:17 +0200 Subject: [PATCH 090/102] feat: update env when selecting fork --- evm/src/executor/backend/fuzz.rs | 9 +++- evm/src/executor/backend/mod.rs | 16 ++++++- evm/src/executor/fork/backend.rs | 5 +++ evm/src/executor/fork/mod.rs | 3 ++ evm/src/executor/fork/multi.rs | 45 ++++++++++--------- .../executor/inspector/cheatcodes/config.rs | 4 ++ evm/src/executor/inspector/cheatcodes/fork.rs | 36 ++++++++++----- evm/src/executor/opts.rs | 13 +++--- 8 files changed, 90 insertions(+), 41 deletions(-) diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index 9753b6d01f44..703b59134ad3 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -16,6 +16,8 @@ use revm::{ }; use tracing::{trace, warn}; +use super::update_current_env_with_fork_env; + /// A wrapper around `Backend` that ensures only `revm::DatabaseRef` functions are called. /// /// Any changes made during its existence that affect the caching layer of the underlying Database @@ -163,12 +165,15 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { Ok(id) } - fn select_fork(&mut self, id: U256) -> eyre::Result<()> { + fn select_fork(&mut self, id: U256, env: &mut Env) -> eyre::Result<()> { + let fork_id = self.ensure_fork_id(id).cloned()?; + let fork_env = self.backend.forks.get_env(fork_id)?.ok_or_else(|| eyre::eyre!("Requested fork `{}` does not exit", id))?; let fork = self .inner .ensure_backend(id) .or_else(|_| self.backend.inner.ensure_backend(id)) .cloned()?; + if let Some(ref mut db) = self.db_override { db.db = BackendDatabase::Forked(fork, id); } else { @@ -176,6 +181,8 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { db.db = BackendDatabase::Forked(fork, id); self.set_active(db); } + + update_current_env_with_fork_env(env, fork_env); Ok(()) } diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 19aa0dfa0bea..d85fc18764d5 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -46,12 +46,14 @@ pub trait DatabaseExt: Database { /// Selects the fork's state /// + /// This will also modify the current `Env`. + /// /// **Note**: this does not change the local state, but swaps the remote state /// /// # Errors /// /// Returns an error if no fork with the given `id` exists - fn select_fork(&mut self, id: U256) -> eyre::Result<()>; + fn select_fork(&mut self, id: U256, env: &mut Env) -> eyre::Result<()>; /// Updates the fork to given block number. /// @@ -282,9 +284,12 @@ impl DatabaseExt for Backend { Ok(id) } - fn select_fork(&mut self, id: U256) -> eyre::Result<()> { + fn select_fork(&mut self, id: U256, env: &mut Env) -> eyre::Result<()> { + let fork_id = self.ensure_fork_id(id).cloned()?; + let fork_env = self.forks.get_env(fork_id)?.ok_or_else(|| eyre::eyre!("Requested fork `{}` does not exit", id))?; let fork = self.inner.ensure_backend(id).cloned()?; self.db.db = BackendDatabase::Forked(fork, id); + update_current_env_with_fork_env(env, fork_env); Ok(()) } @@ -520,3 +525,10 @@ impl BackendInner { id } } + + +/// This updates the currently used env with the fork's environment +pub(crate) fn update_current_env_with_fork_env(current: &mut Env, fork: Env) { + current.block = fork.block; + current.cfg = fork.cfg; +} \ No newline at end of file diff --git a/evm/src/executor/fork/backend.rs b/evm/src/executor/fork/backend.rs index 12114cafcece..8111effe6453 100644 --- a/evm/src/executor/fork/backend.rs +++ b/evm/src/executor/fork/backend.rs @@ -517,6 +517,7 @@ impl DatabaseRef for SharedBackend { mod tests { use crate::executor::{ fork::{BlockchainDbMeta, JsonBlockCacheDB}, + opts::EvmOpts, Backend, }; use ethers::{ @@ -592,12 +593,16 @@ mod tests { let block_num = provider.get_block_number().await.unwrap().as_u64(); let env = revm::Env::default(); + let config = Config::figment(); + let evm_opts = config.extract::().unwrap(); + let fork = CreateFork { enable_caching: true, url: ENDPOINT.to_string(), block: BlockNumber::Number(block_num.into()), chain_id: Some(1), env, + evm_opts, }; let backend = Backend::spawn(Some(fork)); diff --git a/evm/src/executor/fork/mod.rs b/evm/src/executor/fork/mod.rs index df891a48b853..fa684eafdd54 100644 --- a/evm/src/executor/fork/mod.rs +++ b/evm/src/executor/fork/mod.rs @@ -1,5 +1,6 @@ mod backend; +use super::opts::EvmOpts; pub use backend::{BackendHandler, SharedBackend}; use ethers::types::BlockNumber; use revm::Env; @@ -28,4 +29,6 @@ pub struct CreateFork { pub chain_id: Option, /// The env to create this fork, main purpose is to provide some metadata for the fork pub env: Env, + /// All env settings as configured by the user + pub evm_opts: EvmOpts, } diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index da5c9e9f41f8..b579d821c345 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -8,9 +8,8 @@ use crate::executor::fork::{ }; use ethers::{ abi::{AbiDecode, AbiEncode, AbiError}, - prelude::Middleware, providers::{Http, Provider, RetryClient}, - types::{BlockId, BlockNumber, U256}, + types::{BlockId, BlockNumber}, }; use foundry_config::Config; use futures::{ @@ -19,6 +18,7 @@ use futures::{ task::{Context, Poll}, Future, FutureExt, StreamExt, }; +use revm::Env; use std::{ collections::HashMap, fmt, @@ -129,6 +129,14 @@ impl MultiFork { rx.recv()? } + /// Returns the `Env` of the given fork, if any + pub fn get_env(&self, fork: ForkId) -> eyre::Result> { + let (sender, rx) = oneshot_channel(); + let req = Request::GetEnv(fork, sender); + self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?; + Ok(rx.recv()?) + } + /// Returns the corresponding fork if it exist /// /// Returns `None` if no matching fork backend is available. @@ -143,8 +151,8 @@ impl MultiFork { type Handler = BackendHandler>>>; type CreateFuture = Pin> + Send>>; - type CreateSender = OneshotSender>; +type GetEnvSender = OneshotSender>; /// Request that's send to the handler #[derive(Debug)] @@ -155,6 +163,8 @@ enum Request { GetFork(ForkId, OneshotSender>), /// Adjusts the block that's being forked RollFork(ForkId, u64, CreateSender), + /// Returns the environment of the fork + GetEnv(ForkId, GetEnvSender), /// Shutdowns the entire `MultiForkHandler`, see `ShutDownMultiFork` ShutDown(OneshotSender<()>), } @@ -245,6 +255,9 @@ impl MultiForkHandler { let _ = sender.send(Err(eyre::eyre!("No matching fork exits for {}", fork_id))); } } + Request::GetEnv(fork_id, sender) => { + let _ = sender.send(self.forks.get(&fork_id).map(|fork| fork.opts.env.clone())); + } Request::ShutDown(sender) => { trace!(target: "fork::multi", "received shutdown signal"); // we're emptying all fork backends, this way we ensure all caches get flushed @@ -394,35 +407,23 @@ fn create_fork_id(url: &str, num: BlockNumber) -> ForkId { /// /// This will establish a new `Provider` to the endpoint and return the Fork Backend async fn create_fork( - fork: CreateFork, + mut fork: CreateFork, retries: u32, backoff: u64, ) -> eyre::Result<(CreatedFork, Handler)> { - let CreateFork { enable_caching, url, block: block_number, env, chain_id } = fork.clone(); let provider = Arc::new(Provider::>::new_client( - url.clone().as_str(), + fork.url.clone().as_str(), retries, backoff, )?); - let mut meta = BlockchainDbMeta::new(env, url); - // update the meta to match the forked config - let chain_id = if let Some(chain_id) = chain_id { - U256::from(chain_id) - } else { - provider.get_chainid().await? - }; - meta.cfg_env.chain_id = chain_id; - - let number = match block_number { - BlockNumber::Pending | BlockNumber::Latest => provider.get_block_number().await?.as_u64(), - BlockNumber::Earliest => 0, - BlockNumber::Number(num) => num.as_u64(), - }; - meta.block_env.number = number.into(); + // initialise the fork environment + fork.env = fork.evm_opts.fork_evm_env(&fork.url).await?; + let meta = BlockchainDbMeta::new(fork.env.clone(), fork.url.clone()); + let number = meta.block_env.number.as_u64(); // determine the cache path if caching is enabled - let cache_path = if enable_caching { + let cache_path = if fork.enable_caching { Config::foundry_block_cache_dir(meta.cfg_env.chain_id.as_u64(), number) } else { None diff --git a/evm/src/executor/inspector/cheatcodes/config.rs b/evm/src/executor/inspector/cheatcodes/config.rs index c6ef0fcdd935..5d1023fa03e6 100644 --- a/evm/src/executor/inspector/cheatcodes/config.rs +++ b/evm/src/executor/inspector/cheatcodes/config.rs @@ -23,6 +23,9 @@ pub struct CheatsConfig { /// Paths (directories) where file reading/writing is allowed pub allowed_paths: Vec, + + /// How the evm was configured by the user + pub evm_opts: EvmOpts, } // === impl CheatsConfig === @@ -40,6 +43,7 @@ impl CheatsConfig { rpc_endpoints: config.rpc_endpoints.clone().resolved(), root: config.__root.0.clone(), allowed_paths, + evm_opts: evm_opts.clone(), } } diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index 4a04f5f08a30..6e7d1e0277df 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -13,24 +13,37 @@ pub fn apply( data: &mut EVMData<'_, DB>, call: &HEVMCalls, ) -> Option> { - Some(match call { + let resp = match call { HEVMCalls::CreateFork0(fork) => { create_fork(state, data, fork.0.clone(), BlockNumber::Latest) } HEVMCalls::CreateFork1(fork) => { create_fork(state, data, fork.0.clone(), fork.1.as_u64().into()) } - HEVMCalls::SelectFork(fork_id) => { - data.db.select_fork(fork_id.0).map(|_| Default::default()).map_err(util::encode_error) - } - HEVMCalls::RollFork0(fork) => { - data.db.roll_fork(fork.0, None).map(|_| Default::default()).map_err(util::encode_error) - } - HEVMCalls::RollFork1(fork) => data + HEVMCalls::SelectFork(fork_id) => data .db - .roll_fork(fork.1, Some(fork.0)) + .select_fork(fork_id.0, data.env) .map(|_| Default::default()) .map_err(util::encode_error), + HEVMCalls::RollFork0(fork) => { + let block_number = fork.0; + let resp = data.db.roll_fork(block_number, None).map(|_| Default::default()).map_err(util::encode_error); + if resp.is_ok() { + data.env.block.number = block_number; + } + resp + } + HEVMCalls::RollFork1(fork) => { + let block_number = fork.1; + let resp = data.db + .roll_fork(block_number, Some(fork.0)) + .map(|_| Default::default()) + .map_err(util::encode_error); + if resp.is_ok() { + data.env.block.number = block_number; + } + resp + }, HEVMCalls::RpcUrl(rpc) => state.config.get_rpc_url(&rpc.0).map(|url| url.encode().into()), HEVMCalls::RpcUrls(_) => { let mut urls = Vec::with_capacity(state.config.rpc_endpoints.len()); @@ -45,7 +58,9 @@ pub fn apply( Ok(urls.encode().into()) } _ => return None, - }) + }; + + Some(resp) } /// Creates a new fork @@ -62,6 +77,7 @@ fn create_fork( block, chain_id: None, env: data.env.clone(), + evm_opts: state.config.evm_opts.clone(), }; data.db.create_fork(fork).map_err(util::encode_error).map(|id| id.encode().into()) } diff --git a/evm/src/executor/opts.rs b/evm/src/executor/opts.rs index 4733cbe24ebc..275089692228 100644 --- a/evm/src/executor/opts.rs +++ b/evm/src/executor/opts.rs @@ -53,7 +53,7 @@ impl EvmOpts { /// id, ) pub async fn evm_env(&self) -> revm::Env { if let Some(ref fork_url) = self.fork_url { - self.fork_evm_env(fork_url).await + self.fork_evm_env(fork_url).await.expect("could not instantiate forked environment") } else { self.local_evm_env() } @@ -64,16 +64,17 @@ impl EvmOpts { /// This only attaches are creates a temporary tokio runtime if `fork_url` is set pub fn evm_env_blocking(&self) -> revm::Env { if let Some(ref fork_url) = self.fork_url { - RuntimeOrHandle::new().block_on(async { self.fork_evm_env(fork_url).await }) + RuntimeOrHandle::new().block_on(async { + self.fork_evm_env(fork_url).await.expect("could not instantiate forked environment") + }) } else { self.local_evm_env() } } /// Returns the `revm::Env` configured with settings retrieved from the endpoints - async fn fork_evm_env(&self, fork_url: impl AsRef) -> revm::Env { - let provider = - Provider::try_from(fork_url.as_ref()).expect("could not instantiated provider"); + pub async fn fork_evm_env(&self, fork_url: impl AsRef) -> eyre::Result { + let provider = Provider::try_from(fork_url.as_ref())?; environment( &provider, self.memory_limit, @@ -83,7 +84,6 @@ impl EvmOpts { self.sender, ) .await - .expect("could not instantiate forked environment") } /// Returns the `revm::Env` configured with only local settings @@ -135,6 +135,7 @@ impl EvmOpts { .unwrap_or(BlockNumber::Latest), chain_id: self.env.chain_id, env, + evm_opts: self.clone(), }) } From 36389a886a54f85bc866b5d3a70ed2de5c46c4b1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 8 Jul 2022 00:11:53 +0200 Subject: [PATCH 091/102] fix: fix a ton of bugs --- evm/src/executor/abi.rs | 2 +- evm/src/executor/backend/fuzz.rs | 23 +++- evm/src/executor/backend/mod.rs | 38 ++++-- evm/src/executor/fork/backend.rs | 16 +-- evm/src/executor/fork/mod.rs | 6 +- evm/src/executor/fork/multi.rs | 8 +- evm/src/executor/inspector/cheatcodes/fork.rs | 39 +++---- evm/src/executor/opts.rs | 6 - testdata/cheats/Cheats.sol | 2 +- testdata/cheats/Fork2.t.sol | 110 ++++++++++++++++++ testdata/cheats/Snapshots.t.sol | 5 - 11 files changed, 187 insertions(+), 68 deletions(-) create mode 100644 testdata/cheats/Fork2.t.sol diff --git a/evm/src/executor/abi.rs b/evm/src/executor/abi.rs index 00caafc514ac..2a9f5be67fb1 100644 --- a/evm/src/executor/abi.rs +++ b/evm/src/executor/abi.rs @@ -90,7 +90,7 @@ ethers::contract::abigen!( createFork(string,uint256)(uint256) createFork(string)(uint256) selectFork(uint256) - rollFork(uint256)(bool) + rollFork(uint256) rollFork(uint256,uint256) rpcUrl(string)(string) rpcUrls()(string[2][]) diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index 703b59134ad3..1396f4d97ee9 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -167,7 +167,11 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { fn select_fork(&mut self, id: U256, env: &mut Env) -> eyre::Result<()> { let fork_id = self.ensure_fork_id(id).cloned()?; - let fork_env = self.backend.forks.get_env(fork_id)?.ok_or_else(|| eyre::eyre!("Requested fork `{}` does not exit", id))?; + let fork_env = self + .backend + .forks + .get_env(fork_id)? + .ok_or_else(|| eyre::eyre!("Requested fork `{}` does not exit", id))?; let fork = self .inner .ensure_backend(id) @@ -181,19 +185,28 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { db.db = BackendDatabase::Forked(fork, id); self.set_active(db); } - + update_current_env_with_fork_env(env, fork_env); Ok(()) } - fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result<()> { + fn roll_fork( + &mut self, + env: &mut Env, + block_number: U256, + id: Option, + ) -> eyre::Result<()> { let id = self.ensure_fork(id)?; - let (id, fork) = self + let (fork_id, fork) = self .backend .forks .roll_fork(self.inner.ensure_fork_id(id).cloned()?, block_number.as_u64())?; // this will update the local mapping - self.inner.created_forks.insert(id, fork); + self.inner.update_fork_mapping(id, fork_id, fork); + if self.active_fork() == Some(id) { + // need to update the block number right away + env.block.number = block_number; + } Ok(()) } diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index d85fc18764d5..88a1fc2b271e 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -62,7 +62,12 @@ pub trait DatabaseExt: Database { /// # Errors /// /// Returns an error if not matching fork was found. - fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result<()>; + fn roll_fork( + &mut self, + env: &mut Env, + block_number: U256, + id: Option, + ) -> eyre::Result<()>; /// Returns the `ForkId` that's currently used in the database, if fork mode is on fn active_fork(&self) -> Option; @@ -286,19 +291,31 @@ impl DatabaseExt for Backend { fn select_fork(&mut self, id: U256, env: &mut Env) -> eyre::Result<()> { let fork_id = self.ensure_fork_id(id).cloned()?; - let fork_env = self.forks.get_env(fork_id)?.ok_or_else(|| eyre::eyre!("Requested fork `{}` does not exit", id))?; + let fork_env = self + .forks + .get_env(fork_id)? + .ok_or_else(|| eyre::eyre!("Requested fork `{}` does not exit", id))?; let fork = self.inner.ensure_backend(id).cloned()?; self.db.db = BackendDatabase::Forked(fork, id); update_current_env_with_fork_env(env, fork_env); Ok(()) } - fn roll_fork(&mut self, block_number: U256, id: Option) -> eyre::Result<()> { + fn roll_fork( + &mut self, + env: &mut Env, + block_number: U256, + id: Option, + ) -> eyre::Result<()> { let id = self.ensure_fork(id)?; - let (id, fork) = + let (fork_id, fork) = self.forks.roll_fork(self.inner.ensure_fork_id(id).cloned()?, block_number.as_u64())?; // this will update the local mapping - self.inner.created_forks.insert(id, fork); + self.inner.update_fork_mapping(id, fork_id, fork); + if self.active_fork() == Some(id) { + // need to update the block number right away + env.block.number = block_number; + } Ok(()) } @@ -511,6 +528,12 @@ impl BackendInner { self.issued_local_fork_ids.get(&id).and_then(|id| self.created_forks.get(id)) } + /// Updates the fork and the local mapping + pub fn update_fork_mapping(&mut self, id: U256, fork: ForkId, backend: SharedBackend) { + self.created_forks.insert(fork.clone(), backend); + self.issued_local_fork_ids.insert(id, fork); + } + /// Issues a new local fork identifier pub fn insert_new_fork(&mut self, fork: ForkId, backend: SharedBackend) -> U256 { self.created_forks.insert(fork.clone(), backend); @@ -526,9 +549,8 @@ impl BackendInner { } } - /// This updates the currently used env with the fork's environment -pub(crate) fn update_current_env_with_fork_env(current: &mut Env, fork: Env) { +pub(crate) fn update_current_env_with_fork_env(current: &mut Env, fork: Env) { current.block = fork.block; current.cfg = fork.cfg; -} \ No newline at end of file +} diff --git a/evm/src/executor/fork/backend.rs b/evm/src/executor/fork/backend.rs index 8111effe6453..18e8917f3bb4 100644 --- a/evm/src/executor/fork/backend.rs +++ b/evm/src/executor/fork/backend.rs @@ -527,7 +527,7 @@ mod tests { }; use crate::executor::fork::CreateFork; - use ethers::types::{BlockNumber, Chain}; + use ethers::types::Chain; use foundry_config::Config; use std::{collections::BTreeSet, convert::TryFrom, path::PathBuf, sync::Arc}; @@ -594,16 +594,10 @@ mod tests { let env = revm::Env::default(); let config = Config::figment(); - let evm_opts = config.extract::().unwrap(); - - let fork = CreateFork { - enable_caching: true, - url: ENDPOINT.to_string(), - block: BlockNumber::Number(block_num.into()), - chain_id: Some(1), - env, - evm_opts, - }; + let mut evm_opts = config.extract::().unwrap(); + evm_opts.fork_block_number = Some(block_num); + + let fork = CreateFork { enable_caching: true, url: ENDPOINT.to_string(), env, evm_opts }; let backend = Backend::spawn(Some(fork)); diff --git a/evm/src/executor/fork/mod.rs b/evm/src/executor/fork/mod.rs index fa684eafdd54..6985b883017c 100644 --- a/evm/src/executor/fork/mod.rs +++ b/evm/src/executor/fork/mod.rs @@ -2,7 +2,7 @@ mod backend; use super::opts::EvmOpts; pub use backend::{BackendHandler, SharedBackend}; -use ethers::types::BlockNumber; + use revm::Env; mod init; @@ -23,10 +23,6 @@ pub struct CreateFork { pub enable_caching: bool, /// The URL to a node for fetching remote state pub url: String, - /// The block to fork against - pub block: BlockNumber, - /// chain id to use, if `None` then the chain_id will be fetched from the endpoint - pub chain_id: Option, /// The env to create this fork, main purpose is to provide some metadata for the fork pub env: Env, /// All env settings as configured by the user diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index b579d821c345..f2a8288171c1 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -225,7 +225,7 @@ impl MultiForkHandler { } fn create_fork(&mut self, fork: CreateFork, sender: CreateSender) { - let fork_id = create_fork_id(&fork.url, fork.block); + let fork_id = create_fork_id(&fork.url, fork.evm_opts.fork_block_number); if let Some(fork) = self.forks.get_mut(&fork_id) { fork.num_senders += 1; let _ = sender.send(Ok((fork_id, fork.backend.clone()))); @@ -249,7 +249,7 @@ impl MultiForkHandler { if let Some(fork) = self.forks.get(&fork_id) { trace!(target: "fork::multi", "rolling {} to {}", fork_id, block); let mut opts = fork.opts.clone(); - opts.block = block.into(); + opts.evm_opts.fork_block_number = Some(block); self.create_fork(opts, sender) } else { let _ = sender.send(Err(eyre::eyre!("No matching fork exits for {}", fork_id))); @@ -399,7 +399,8 @@ impl Drop for ShutDownMultiFork { } /// Returns the identifier for a Fork which consists of the url and the block number -fn create_fork_id(url: &str, num: BlockNumber) -> ForkId { +fn create_fork_id(url: &str, num: Option) -> ForkId { + let num = num.map(|num| BlockNumber::Number(num.into())).unwrap_or(BlockNumber::Latest); ForkId(format!("{url}@{num}")) } @@ -419,6 +420,7 @@ async fn create_fork( // initialise the fork environment fork.env = fork.evm_opts.fork_evm_env(&fork.url).await?; + let meta = BlockchainDbMeta::new(fork.env.clone(), fork.url.clone()); let number = meta.block_env.number.as_u64(); diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index 6e7d1e0277df..c21f369bb3ab 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -4,7 +4,7 @@ use crate::{ executor::{backend::DatabaseExt, fork::CreateFork}, }; use bytes::Bytes; -use ethers::{abi::AbiEncode, types::BlockNumber}; +use ethers::abi::AbiEncode; use revm::EVMData; /// Handles fork related cheatcodes @@ -14,11 +14,9 @@ pub fn apply( call: &HEVMCalls, ) -> Option> { let resp = match call { - HEVMCalls::CreateFork0(fork) => { - create_fork(state, data, fork.0.clone(), BlockNumber::Latest) - } + HEVMCalls::CreateFork0(fork) => create_fork(state, data, fork.0.clone(), None), HEVMCalls::CreateFork1(fork) => { - create_fork(state, data, fork.0.clone(), fork.1.as_u64().into()) + create_fork(state, data, fork.0.clone(), Some(fork.1.as_u64())) } HEVMCalls::SelectFork(fork_id) => data .db @@ -27,23 +25,18 @@ pub fn apply( .map_err(util::encode_error), HEVMCalls::RollFork0(fork) => { let block_number = fork.0; - let resp = data.db.roll_fork(block_number, None).map(|_| Default::default()).map_err(util::encode_error); - if resp.is_ok() { - data.env.block.number = block_number; - } - resp + data.db + .roll_fork(data.env, block_number, None) + .map(|_| Default::default()) + .map_err(util::encode_error) } HEVMCalls::RollFork1(fork) => { let block_number = fork.1; - let resp = data.db - .roll_fork(block_number, Some(fork.0)) - .map(|_| Default::default()) - .map_err(util::encode_error); - if resp.is_ok() { - data.env.block.number = block_number; - } - resp - }, + data.db + .roll_fork(data.env, block_number, Some(fork.0)) + .map(|_| Default::default()) + .map_err(util::encode_error) + } HEVMCalls::RpcUrl(rpc) => state.config.get_rpc_url(&rpc.0).map(|url| url.encode().into()), HEVMCalls::RpcUrls(_) => { let mut urls = Vec::with_capacity(state.config.rpc_endpoints.len()); @@ -68,16 +61,16 @@ fn create_fork( state: &mut Cheatcodes, data: &mut EVMData<'_, DB>, url_or_alias: String, - block: BlockNumber, + block: Option, ) -> Result { let url = state.config.get_rpc_url(url_or_alias)?; + let mut evm_opts = state.config.evm_opts.clone(); + evm_opts.fork_block_number = block; let fork = CreateFork { enable_caching: state.config.rpc_storage_caching.enable_for_endpoint(&url), url, - block, - chain_id: None, env: data.env.clone(), - evm_opts: state.config.evm_opts.clone(), + evm_opts, }; data.db.create_fork(fork).map_err(util::encode_error).map(|id| id.encode().into()) } diff --git a/evm/src/executor/opts.rs b/evm/src/executor/opts.rs index 275089692228..781c030bb805 100644 --- a/evm/src/executor/opts.rs +++ b/evm/src/executor/opts.rs @@ -1,5 +1,4 @@ use ethers::{ - prelude::BlockNumber, providers::{Middleware, Provider}, solc::utils::RuntimeOrHandle, types::{Address, Chain, U256}, @@ -129,11 +128,6 @@ impl EvmOpts { Some(CreateFork { url: self.fork_url.clone()?, enable_caching: self.no_storage_caching, - block: self - .fork_block_number - .map(|num| BlockNumber::Number(num.into())) - .unwrap_or(BlockNumber::Latest), - chain_id: self.env.chain_id, env, evm_opts: self.clone(), }) diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index 1d4a5db6227d..c127dbd6e2d2 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -155,7 +155,7 @@ interface Cheats { function rollFork(uint256) external; // Updates the given fork to given block number // Returns false if no matching fork was found - function rollFork(uint256, uint256) external returns(bool); + function rollFork(uint256, uint256) external; /// Returns the RPC url for the given alias function rpcUrl(string calldata) external returns(string memory); /// Returns all rpc urls and their aliases `[alias, url][]` diff --git a/testdata/cheats/Fork2.t.sol b/testdata/cheats/Fork2.t.sol new file mode 100644 index 000000000000..e3690db1f372 --- /dev/null +++ b/testdata/cheats/Fork2.t.sol @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: Unlicense +pragma solidity >=0.8.0; + +import "ds-test/test.sol"; +import "./Cheats.sol"; + +struct MyStruct { + uint256 value; +} + +contract MyContract { + uint256 forkId; + bytes32 blockHash; + + constructor(uint256 _forkId) public { + forkId = _forkId; + blockHash = blockhash(block.number - 1); + } + + function ensureForkId(uint256 _forkId) public view { + require(forkId == _forkId, "ForkId does not match"); + } + + function ensureBlockHash() public view { + require( + blockhash(block.number - 1) == blockHash, + "Block Hash does not match" + ); + } +} + +contract ForkTest is DSTest { + Cheats constant cheats = Cheats(HEVM_ADDRESS); + + uint256 mainnetFork; + uint256 optimismFork; + + // this will create two _different_ forks during setup + function setUp() public { + mainnetFork = cheats.createFork("rpcAlias"); + optimismFork = cheats.createFork( + "https://opt-mainnet.g.alchemy.com/v2/UVatYU2Ax0rX6bDiqddeTRDdcCxzdpoE" + ); + } + + // ensures forks use different ids + function testForkIdDiffer() public { + assert(mainnetFork != optimismFork); + } + + // ensures forks use different ids + function testCanSwitchForks() public { + cheats.selectFork(mainnetFork); + cheats.selectFork(optimismFork); + cheats.selectFork(optimismFork); + cheats.selectFork(mainnetFork); + } + + // ensures forks have different block hashes + function testBlockNumbersMimatch() public { + cheats.selectFork(mainnetFork); + uint256 num = block.number; + bytes32 mainHash = blockhash(block.number - 1); + cheats.selectFork(optimismFork); + uint256 num2 = block.number; + bytes32 optimismHash = blockhash(block.number - 1); + assert(mainHash != optimismHash); + } + + function testCanSwitchContracts() public { + cheats.selectFork(mainnetFork); + MyContract contract1 = new MyContract(mainnetFork); + + contract1.ensureForkId(mainnetFork); // Valid + contract1.ensureBlockHash(); // Valid + + cheats.selectFork(optimismFork); + + cheats.expectRevert("ForkId does not match"); + contract1.ensureForkId(optimismFork); + + contract1.ensureForkId(mainnetFork); // Valid + + cheats.expectRevert("Block Hash does not match"); + contract1.ensureBlockHash(); + } + + // test that we can switch between forks, and "roll" blocks + function testCanRollFork() public { + cheats.selectFork(mainnetFork); + uint256 otherMain = cheats.createFork("rpcAlias", block.number - 1); + cheats.selectFork(otherMain); + uint256 mainBlock = block.number; + + uint256 forkedBlock = 14_608_400; + uint256 otherFork = cheats.createFork("rpcAlias", forkedBlock); + cheats.selectFork(otherFork); + assertEq(block.number, forkedBlock); + + cheats.rollFork(forkedBlock + 1); + assertEq(block.number, forkedBlock + 1); + + // can also roll by id + cheats.rollFork(otherMain, mainBlock + 1); + assertEq(block.number, forkedBlock + 1); + + cheats.selectFork(otherMain); + assertEq(block.number, mainBlock + 1); + } +} diff --git a/testdata/cheats/Snapshots.t.sol b/testdata/cheats/Snapshots.t.sol index c66c729187de..c58897e3cf58 100644 --- a/testdata/cheats/Snapshots.t.sol +++ b/testdata/cheats/Snapshots.t.sol @@ -19,11 +19,6 @@ contract SnapshotTest is DSTest { store.slot1 = 20; } -// function testStore() public { -// assertEq(store.slot0, 10, "initial value for slot 0 is incorrect"); -// assertEq(store.slot1, 20, "initial value for slot 1 is incorrect"); -// } - function testSnapshot() public { uint256 snapshot = cheats.snapshot(); store.slot0 = 300; From 86ddd0ae4e8acb3d186db3508b2549a76280c4e5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 8 Jul 2022 00:28:20 +0200 Subject: [PATCH 092/102] fix: failing tests --- evm/src/executor/fork/backend.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/evm/src/executor/fork/backend.rs b/evm/src/executor/fork/backend.rs index 18e8917f3bb4..60434e0eb594 100644 --- a/evm/src/executor/fork/backend.rs +++ b/evm/src/executor/fork/backend.rs @@ -591,13 +591,19 @@ mod tests { let provider = Provider::::try_from(ENDPOINT).unwrap(); let block_num = provider.get_block_number().await.unwrap().as_u64(); - let env = revm::Env::default(); let config = Config::figment(); let mut evm_opts = config.extract::().unwrap(); evm_opts.fork_block_number = Some(block_num); - let fork = CreateFork { enable_caching: true, url: ENDPOINT.to_string(), env, evm_opts }; + let env = evm_opts.fork_evm_env(ENDPOINT).await.unwrap(); + + let fork = CreateFork { + enable_caching: true, + url: ENDPOINT.to_string(), + env: env.clone(), + evm_opts, + }; let backend = Backend::spawn(Some(fork)); @@ -615,11 +621,8 @@ mod tests { } drop(backend); - let meta = BlockchainDbMeta { - cfg_env: Default::default(), - block_env: revm::BlockEnv { number: block_num.into(), ..Default::default() }, - hosts: Default::default(), - }; + let meta = + BlockchainDbMeta { cfg_env: env.cfg, block_env: env.block, hosts: Default::default() }; let db = BlockchainDb::new( meta, From ce7c6018a1375561e1410b5a1c716750dd5b76c8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 8 Jul 2022 14:15:24 +0200 Subject: [PATCH 093/102] chore: rm unused cheat --- testdata/cheats/Cheats.sol | 2 -- 1 file changed, 2 deletions(-) diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index c127dbd6e2d2..485188b8873f 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -148,8 +148,6 @@ interface Cheats { function createFork(string calldata) external returns(uint256); // takes a fork identifier created by `createFork` and changes the state function selectFork(uint256) external; - // forks the `block` variable from the given endpoint - function forkBlockVariable(string calldata, uint256) external; // Updates the currently active fork to given block number // This is similar to `roll` but for the fork function rollFork(uint256) external; From 42d5485962345db9583dab43cf12a20c3931d928 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 8 Jul 2022 14:27:26 +0200 Subject: [PATCH 094/102] chore: rm unused types --- forge/src/lib.rs | 2 -- forge/src/types.rs | 37 ------------------------------------- 2 files changed, 39 deletions(-) delete mode 100644 forge/src/types.rs diff --git a/forge/src/lib.rs b/forge/src/lib.rs index d2099fd8713f..335af24b6b3e 100644 --- a/forge/src/lib.rs +++ b/forge/src/lib.rs @@ -15,8 +15,6 @@ pub use multi_runner::{MultiContractRunner, MultiContractRunnerBuilder}; mod traits; pub use traits::*; -mod types; - pub mod result; #[cfg(test)] diff --git a/forge/src/types.rs b/forge/src/types.rs deleted file mode 100644 index 3ea374153f60..000000000000 --- a/forge/src/types.rs +++ /dev/null @@ -1,37 +0,0 @@ -use ethers::{ - abi::{Event, Function}, - solc::artifacts::CompactContractBytecode, - types::H256, -}; -use std::{collections::BTreeMap, path::PathBuf}; - -/// Represents a solidity Contract that's a test target -#[derive(Debug, Clone)] -pub struct TestContract { - /// All functions keyed by their short signature - pub functions: BTreeMap<[u8; 4], TestFunction>, - - /// contract's bytecode objects - pub bytecode: CompactContractBytecode, - - /// location of the contract - pub source: PathBuf, - - /// all events of the contract - pub events: BTreeMap, - - /// all errors of the contract - pub errors: BTreeMap>, -} - -/// A solidity function that can be tested -#[derive(Debug, Clone)] -pub struct TestFunction { - pub function: Function, - /// the function's signature - pub signature: String, -} - -// === impl TestFunction === - -impl TestFunction {} From 60bce64755d7623c0adb1c5ad6b4c98224cb04e2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 11 Jul 2022 14:28:08 +0200 Subject: [PATCH 095/102] feat: add more util cheat codes --- evm/src/executor/abi.rs | 3 ++ evm/src/executor/backend/fuzz.rs | 3 +- evm/src/executor/inspector/cheatcodes/fork.rs | 44 +++++++++++++++---- testdata/cheats/Cheats.sol | 7 +++ 4 files changed, 47 insertions(+), 10 deletions(-) diff --git a/evm/src/executor/abi.rs b/evm/src/executor/abi.rs index 2a9f5be67fb1..63d098cfc65a 100644 --- a/evm/src/executor/abi.rs +++ b/evm/src/executor/abi.rs @@ -89,7 +89,10 @@ ethers::contract::abigen!( revertTo(uint256)(bool) createFork(string,uint256)(uint256) createFork(string)(uint256) + createSelectFork(string,uint256)(uint256) + createSelectFork(string)(uint256) selectFork(uint256) + activeFork()(uint256) rollFork(uint256) rollFork(uint256,uint256) rpcUrl(string)(string) diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index 1396f4d97ee9..d7fb1472a27f 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -1,3 +1,4 @@ +use super::update_current_env_with_fork_env; use crate::{ abi::CHEATCODE_ADDRESS, executor::{ @@ -16,8 +17,6 @@ use revm::{ }; use tracing::{trace, warn}; -use super::update_current_env_with_fork_env; - /// A wrapper around `Backend` that ensures only `revm::DatabaseRef` functions are called. /// /// Any changes made during its existence that affect the caching layer of the underlying Database diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index c21f369bb3ab..cebe72066cf9 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -4,7 +4,7 @@ use crate::{ executor::{backend::DatabaseExt, fork::CreateFork}, }; use bytes::Bytes; -use ethers::abi::AbiEncode; +use ethers::{abi::AbiEncode, prelude::U256}; use revm::EVMData; /// Handles fork related cheatcodes @@ -14,15 +14,26 @@ pub fn apply( call: &HEVMCalls, ) -> Option> { let resp = match call { - HEVMCalls::CreateFork0(fork) => create_fork(state, data, fork.0.clone(), None), + HEVMCalls::CreateFork0(fork) => { + create_fork(state, data, fork.0.clone(), None).map(|id| id.encode().into()) + } HEVMCalls::CreateFork1(fork) => { create_fork(state, data, fork.0.clone(), Some(fork.1.as_u64())) + .map(|id| id.encode().into()) + } + HEVMCalls::CreateSelectFork0(fork) => { + create_select_fork(state, data, fork.0.clone(), None).map(|id| id.encode().into()) + } + HEVMCalls::CreateSelectFork1(fork) => { + create_select_fork(state, data, fork.0.clone(), Some(fork.1.as_u64())) + .map(|id| id.encode().into()) } - HEVMCalls::SelectFork(fork_id) => data + HEVMCalls::SelectFork(fork_id) => select_fork(data, fork_id.0), + HEVMCalls::ActiveFork(_) => data .db - .select_fork(fork_id.0, data.env) - .map(|_| Default::default()) - .map_err(util::encode_error), + .active_fork() + .map(|id| id.encode().into()) + .ok_or_else(|| util::encode_error("No active fork")), HEVMCalls::RollFork0(fork) => { let block_number = fork.0; data.db @@ -56,13 +67,30 @@ pub fn apply( Some(resp) } +/// Selects the given fork id +fn select_fork(data: &mut EVMData, fork_id: U256) -> Result { + data.db.select_fork(fork_id, data.env).map(|_| Default::default()).map_err(util::encode_error) +} + +/// Creates and then also selects the new fork +fn create_select_fork( + state: &mut Cheatcodes, + data: &mut EVMData<'_, DB>, + url_or_alias: String, + block: Option, +) -> Result { + let fork_id = create_fork(state, data, url_or_alias, block)?; + select_fork(data, fork_id)?; + Ok(fork_id) +} + /// Creates a new fork fn create_fork( state: &mut Cheatcodes, data: &mut EVMData<'_, DB>, url_or_alias: String, block: Option, -) -> Result { +) -> Result { let url = state.config.get_rpc_url(url_or_alias)?; let mut evm_opts = state.config.evm_opts.clone(); evm_opts.fork_block_number = block; @@ -72,5 +100,5 @@ fn create_fork( env: data.env.clone(), evm_opts, }; - data.db.create_fork(fork).map_err(util::encode_error).map(|id| id.encode().into()) + data.db.create_fork(fork).map_err(util::encode_error) } diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index 485188b8873f..5495532a9c0c 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -146,8 +146,15 @@ interface Cheats { function createFork(string calldata,uint256) external returns(uint256); // Creates a new fork with the given endpoint and the latest block and returns the identifier of the fork function createFork(string calldata) external returns(uint256); + // Creates _and_ selects a new fork with the given endpoint and block and returns the identifier of the fork + function createSelectFork(string calldata,uint256) external returns(uint256); + // Creates _and_ also selects a new fork with the given endpoint and the latest block and returns the identifier of the fork + function createSelectFork(string calldata) external returns(uint256); // takes a fork identifier created by `createFork` and changes the state function selectFork(uint256) external; + /// Returns the currently active fork + /// Reverts if no fork is currently active + function activeFork() external returns(uint256); // Updates the currently active fork to given block number // This is similar to `roll` but for the fork function rollFork(uint256) external; From 1201afc827f4d997d312b724b62472377bd65d6c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 11 Jul 2022 14:43:44 +0200 Subject: [PATCH 096/102] style: simplify create select --- evm/src/executor/backend/mod.rs | 9 +++++++++ evm/src/executor/inspector/cheatcodes/fork.rs | 18 ++++++++++++++---- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 88a1fc2b271e..66d423066eb0 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -41,6 +41,15 @@ pub trait DatabaseExt: Database { /// snapshot and its revert. fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option; + /// Creates and also selects a new fork + /// + /// This is basically `create_fork` + `select_fork` + fn create_select_fork(&mut self, fork: CreateFork, env: &mut Env) -> eyre::Result { + let id = self.create_fork(fork)?; + self.select_fork(id, env)?; + Ok(id) + } + /// Creates a new fork but does _not_ select it fn create_fork(&mut self, fork: CreateFork) -> eyre::Result; diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index cebe72066cf9..78b832dabbff 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -79,9 +79,8 @@ fn create_select_fork( url_or_alias: String, block: Option, ) -> Result { - let fork_id = create_fork(state, data, url_or_alias, block)?; - select_fork(data, fork_id)?; - Ok(fork_id) + let fork = create_fork_request(state, url_or_alias, block, data)?; + data.db.create_select_fork(fork, data.env).map_err(util::encode_error) } /// Creates a new fork @@ -91,6 +90,17 @@ fn create_fork( url_or_alias: String, block: Option, ) -> Result { + let fork = create_fork_request(state, url_or_alias, block, data)?; + data.db.create_fork(fork).map_err(util::encode_error) +} + +/// Creates the request object for a new fork request +fn create_fork_request( + state: &Cheatcodes, + url_or_alias: String, + block: Option, + data: &EVMData, +) -> Result { let url = state.config.get_rpc_url(url_or_alias)?; let mut evm_opts = state.config.evm_opts.clone(); evm_opts.fork_block_number = block; @@ -100,5 +110,5 @@ fn create_fork( env: data.env.clone(), evm_opts, }; - data.db.create_fork(fork).map_err(util::encode_error) + Ok(fork) } From a0a7b8d00395b752915990580737a57ac9085f4f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 11 Jul 2022 14:48:09 +0200 Subject: [PATCH 097/102] docs: update docs --- evm/src/executor/backend/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 66d423066eb0..22f34405f045 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -127,6 +127,8 @@ pub trait DatabaseExt: Database { /// Multiple "forks" can be created `Backend::create_fork()`, however only 1 can be used by the /// `db`. However, their state can be hot-swapped by swapping the read half of `db` from one fork to /// another. +/// When swapping forks (`Backend::select_fork()`) we also update the current `Env` of the `EVM` +/// accordingly, so that all `block.*` config values match /// /// **Note:** this only affects the readonly half of the `db`, local changes are persistent across /// fork-state swaps. @@ -173,7 +175,7 @@ impl Backend { } else { (CacheDB::new(BackendDatabase::InMemory(EmptyDB())), None) }; - + // Note: this will take of registering the `fork` Self { forks, db, inner: BackendInner::new(launched_with) } } From fbdf0e24538b254d49862c1f156e54748395c934 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 11 Jul 2022 14:52:16 +0200 Subject: [PATCH 098/102] test: more fork tests --- testdata/cheats/Fork2.t.sol | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/testdata/cheats/Fork2.t.sol b/testdata/cheats/Fork2.t.sol index e3690db1f372..cffc1d445381 100644 --- a/testdata/cheats/Fork2.t.sol +++ b/testdata/cheats/Fork2.t.sol @@ -51,9 +51,18 @@ contract ForkTest is DSTest { // ensures forks use different ids function testCanSwitchForks() public { cheats.selectFork(mainnetFork); + assertEq(mainnetFork, cheats.activeFork()); cheats.selectFork(optimismFork); + assertEq(optimismFork, cheats.activeFork()); cheats.selectFork(optimismFork); + assertEq(optimismFork, cheats.activeFork()); cheats.selectFork(mainnetFork); + assertEq(mainnetFork, cheats.activeFork()); + } + + function testCanCreateSelect() public { + uint256 anotherFork = cheats.createSelectFork("rpcAlias"); + assertEq(anotherFork, cheats.activeFork()); } // ensures forks have different block hashes From b5c6cbd3fea1fe57efd056a7c409acb93fd8237c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 11 Jul 2022 14:55:44 +0200 Subject: [PATCH 099/102] add active fork test --- testdata/fork/Fork.t.sol | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/testdata/fork/Fork.t.sol b/testdata/fork/Fork.t.sol index 2ade480e9bc1..559e7e108703 100644 --- a/testdata/fork/Fork.t.sol +++ b/testdata/fork/Fork.t.sol @@ -6,6 +6,7 @@ import "./DssExecLib.sol"; interface Cheats { function store(address account, bytes32 slot, bytes32 value) external; + function activeFork() external returns(uint256); } @@ -27,6 +28,14 @@ contract ForkTest is DSTest { address constant DAI_TOKEN_ADDR = 0x6B175474E89094C44Da98b954EedeAC495271d0F; address constant WETH_TOKEN_ADDR = 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2; + // checks that we can retrieve the fork we launched with + function testActiveFork() public { + Cheats cheatvm = Cheats(HEVM_ADDRESS); + uint256 activeFork = cheatvm.activeFork(); + // launch fork has id `0` + assertEq(activeFork, 0); + } + function testReadState() public { ERC20 DAI = ERC20(DAI_TOKEN_ADDR); assertEq(uint(DAI.decimals()), uint(18), "Failed to read DAI token decimals."); From 1a5b7ad9693c2b1bc834cf62baf1513727909154 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 11 Jul 2022 15:06:40 +0200 Subject: [PATCH 100/102] docs: update cheatcode docs --- testdata/cheats/Cheats.sol | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index 5495532a9c0c..1b0ce1c952e0 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -140,27 +140,27 @@ interface Cheats { // To revert a snapshot use `revertTo` function snapshot() external returns(uint256); // Revert the state of the evm to a previous snapshot - // takes the snapshot id to revert to. This deletes the snapshot and all snapshots taken after the given snapshot id. + // Takes the snapshot id to revert to. + // This deletes the snapshot and all snapshots taken after the given snapshot id. function revertTo(uint256) external returns(bool); // Creates a new fork with the given endpoint and block and returns the identifier of the fork function createFork(string calldata,uint256) external returns(uint256); - // Creates a new fork with the given endpoint and the latest block and returns the identifier of the fork + // Creates a new fork with the given endpoint and the _latest_ block and returns the identifier of the fork function createFork(string calldata) external returns(uint256); - // Creates _and_ selects a new fork with the given endpoint and block and returns the identifier of the fork + // Creates _and_ also selects a new fork with the given endpoint and block and returns the identifier of the fork function createSelectFork(string calldata,uint256) external returns(uint256); // Creates _and_ also selects a new fork with the given endpoint and the latest block and returns the identifier of the fork function createSelectFork(string calldata) external returns(uint256); - // takes a fork identifier created by `createFork` and changes the state + // Takes a fork identifier created by `createFork` and sets the corresponding forked state as active. function selectFork(uint256) external; /// Returns the currently active fork /// Reverts if no fork is currently active function activeFork() external returns(uint256); // Updates the currently active fork to given block number - // This is similar to `roll` but for the fork + // This is similar to `roll` but for the currently active fork function rollFork(uint256) external; // Updates the given fork to given block number - // Returns false if no matching fork was found - function rollFork(uint256, uint256) external; + function rollFork(uint256 forkId, uint256 blockNumber) external; /// Returns the RPC url for the given alias function rpcUrl(string calldata) external returns(string memory); /// Returns all rpc urls and their aliases `[alias, url][]` From 5fe1c2bb7db4abf51c48449ae7eb9c5e82e6880f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 12 Jul 2022 11:48:03 +0200 Subject: [PATCH 101/102] fix: capture env in snapshot --- evm/src/executor/backend/fuzz.rs | 11 ++++++----- evm/src/executor/backend/mod.rs | 14 ++++++++------ evm/src/executor/backend/snapshot.rs | 8 +++++--- evm/src/executor/inspector/cheatcodes/snapshot.rs | 4 ++-- 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index d7fb1472a27f..71f6bc3fa420 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -127,16 +127,16 @@ impl<'a> FuzzBackendWrapper<'a> { } impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { - fn snapshot(&mut self, subroutine: &SubRoutine) -> U256 { + fn snapshot(&mut self, subroutine: &SubRoutine, env: &Env) -> U256 { let id = self .inner .snapshots - .insert(BackendSnapshot::new(self.active_db().clone(), subroutine.clone())); + .insert(BackendSnapshot::new(self.active_db().clone(), subroutine.clone(), env.clone())); trace!(target: "backend::fuzz", "Created new snapshot {}", id); id } - fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option { + fn revert(&mut self, id: U256, subroutine: &SubRoutine, current: &mut Env) -> Option { if let Some(mut snapshot) = self.inner.snapshots.remove(id).or_else(|| self.backend.snapshots().get(id).cloned()) { @@ -147,9 +147,10 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { } // merge additional logs snapshot.merge(subroutine); - let BackendSnapshot { db, subroutine } = snapshot; - + let BackendSnapshot { db, subroutine, env } = snapshot; self.set_active(db); + update_current_env_with_fork_env(current, env); + trace!(target: "backend::fuzz", "Reverted snapshot {}", id); Some(subroutine) } else { diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index 22f34405f045..f82def23793c 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -30,7 +30,7 @@ pub trait DatabaseExt: Database { /// A snapshot is associated with a new unique id that's created for the snapshot. /// Snapshots can be reverted: [DatabaseExt::revert], however a snapshot can only be reverted /// once. After a successful revert, the same snapshot id cannot be used again. - fn snapshot(&mut self, subroutine: &SubRoutine) -> U256; + fn snapshot(&mut self, subroutine: &SubRoutine, env: &Env) -> U256; /// Reverts the snapshot if it exists /// /// Returns `true` if the snapshot was successfully reverted, `false` if no snapshot for that id @@ -39,7 +39,8 @@ pub trait DatabaseExt: Database { /// **N.B.** While this reverts the state of the evm to the snapshot, it keeps new logs made /// since the snapshots was created. This way we can show logs that were emitted between /// snapshot and its revert. - fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option; + /// This will also revert any changes in the `Env` and replace it with the caputured `Env` of `Self::snapshot` + fn revert(&mut self, id: U256, subroutine: &SubRoutine, env: &mut Env) -> Option; /// Creates and also selects a new fork /// @@ -266,14 +267,14 @@ impl Backend { // === impl a bunch of `revm::Database` adjacent implementations === impl DatabaseExt for Backend { - fn snapshot(&mut self, subroutine: &SubRoutine) -> U256 { + fn snapshot(&mut self, subroutine: &SubRoutine, env: &Env) -> U256 { let id = - self.inner.snapshots.insert(BackendSnapshot::new(self.db.clone(), subroutine.clone())); + self.inner.snapshots.insert(BackendSnapshot::new(self.db.clone(), subroutine.clone(), env.clone())); trace!(target: "backend", "Created new snapshot {}", id); id } - fn revert(&mut self, id: U256, subroutine: &SubRoutine) -> Option { + fn revert(&mut self, id: U256, subroutine: &SubRoutine, current: &mut Env) -> Option { if let Some(mut snapshot) = self.inner.snapshots.remove(id) { // need to check whether DSTest's `failed` variable is set to `true` which means an // error occurred either during the snapshot or even before @@ -283,8 +284,9 @@ impl DatabaseExt for Backend { // merge additional logs snapshot.merge(subroutine); - let BackendSnapshot { db, subroutine } = snapshot; + let BackendSnapshot { db, subroutine,env } = snapshot; self.db = db; + update_current_env_with_fork_env(current, env); trace!(target: "backend", "Reverted snapshot {}", id); Some(subroutine) diff --git a/evm/src/executor/backend/snapshot.rs b/evm/src/executor/backend/snapshot.rs index 24e84e234ad4..37d8883cb700 100644 --- a/evm/src/executor/backend/snapshot.rs +++ b/evm/src/executor/backend/snapshot.rs @@ -1,4 +1,4 @@ -use revm::SubRoutine; +use revm::{SubRoutine, Env}; /// Represents a snapshot taken during evm execution #[derive(Clone, Debug)] @@ -6,14 +6,16 @@ pub struct BackendSnapshot { pub db: T, /// The subroutine state at a specific point pub subroutine: SubRoutine, + /// Contains the env at the time of the snapshot + pub env: Env } // === impl BackendSnapshot === impl BackendSnapshot { /// Takes a new snapshot - pub fn new(db: T, subroutine: SubRoutine) -> Self { - Self { db, subroutine } + pub fn new(db: T, subroutine: SubRoutine, env: Env) -> Self { + Self { db, subroutine, env } } /// Called when this snapshot is reverted. diff --git a/evm/src/executor/inspector/cheatcodes/snapshot.rs b/evm/src/executor/inspector/cheatcodes/snapshot.rs index e30251925124..2e92b6727234 100644 --- a/evm/src/executor/inspector/cheatcodes/snapshot.rs +++ b/evm/src/executor/inspector/cheatcodes/snapshot.rs @@ -11,9 +11,9 @@ pub fn apply( call: &HEVMCalls, ) -> Option> { Some(match call { - HEVMCalls::Snapshot(_) => Ok(data.db.snapshot(&data.subroutine).encode().into()), + HEVMCalls::Snapshot(_) => Ok(data.db.snapshot(&data.subroutine, data.env).encode().into()), HEVMCalls::RevertTo(snapshot) => { - let res = if let Some(subroutine) = data.db.revert(snapshot.0, &data.subroutine) { + let res = if let Some(subroutine) = data.db.revert(snapshot.0, &data.subroutine, data.env) { // we reset the evm's subroutine to the state of the snapshot previous state data.subroutine = subroutine; true From b9b91b57ecdd538dbc93864ecfd4bf16e94e9745 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 12 Jul 2022 11:56:07 +0200 Subject: [PATCH 102/102] test: add snapshot tests --- evm/src/executor/backend/fuzz.rs | 16 +++++++++++----- evm/src/executor/backend/mod.rs | 19 ++++++++++++++----- evm/src/executor/backend/snapshot.rs | 4 ++-- .../executor/inspector/cheatcodes/snapshot.rs | 15 ++++++++------- forge/src/multi_runner.rs | 1 + testdata/cheats/RpcUrls.t.sol | 4 ++-- testdata/cheats/Snapshots.t.sol | 19 +++++++++++++++++++ 7 files changed, 57 insertions(+), 21 deletions(-) diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index 71f6bc3fa420..c71f447d268c 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -128,15 +128,21 @@ impl<'a> FuzzBackendWrapper<'a> { impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { fn snapshot(&mut self, subroutine: &SubRoutine, env: &Env) -> U256 { - let id = self - .inner - .snapshots - .insert(BackendSnapshot::new(self.active_db().clone(), subroutine.clone(), env.clone())); + let id = self.inner.snapshots.insert(BackendSnapshot::new( + self.active_db().clone(), + subroutine.clone(), + env.clone(), + )); trace!(target: "backend::fuzz", "Created new snapshot {}", id); id } - fn revert(&mut self, id: U256, subroutine: &SubRoutine, current: &mut Env) -> Option { + fn revert( + &mut self, + id: U256, + subroutine: &SubRoutine, + current: &mut Env, + ) -> Option { if let Some(mut snapshot) = self.inner.snapshots.remove(id).or_else(|| self.backend.snapshots().get(id).cloned()) { diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index f82def23793c..dfcbb34fc495 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -39,7 +39,8 @@ pub trait DatabaseExt: Database { /// **N.B.** While this reverts the state of the evm to the snapshot, it keeps new logs made /// since the snapshots was created. This way we can show logs that were emitted between /// snapshot and its revert. - /// This will also revert any changes in the `Env` and replace it with the caputured `Env` of `Self::snapshot` + /// This will also revert any changes in the `Env` and replace it with the caputured `Env` of + /// `Self::snapshot` fn revert(&mut self, id: U256, subroutine: &SubRoutine, env: &mut Env) -> Option; /// Creates and also selects a new fork @@ -268,13 +269,21 @@ impl Backend { impl DatabaseExt for Backend { fn snapshot(&mut self, subroutine: &SubRoutine, env: &Env) -> U256 { - let id = - self.inner.snapshots.insert(BackendSnapshot::new(self.db.clone(), subroutine.clone(), env.clone())); + let id = self.inner.snapshots.insert(BackendSnapshot::new( + self.db.clone(), + subroutine.clone(), + env.clone(), + )); trace!(target: "backend", "Created new snapshot {}", id); id } - fn revert(&mut self, id: U256, subroutine: &SubRoutine, current: &mut Env) -> Option { + fn revert( + &mut self, + id: U256, + subroutine: &SubRoutine, + current: &mut Env, + ) -> Option { if let Some(mut snapshot) = self.inner.snapshots.remove(id) { // need to check whether DSTest's `failed` variable is set to `true` which means an // error occurred either during the snapshot or even before @@ -284,7 +293,7 @@ impl DatabaseExt for Backend { // merge additional logs snapshot.merge(subroutine); - let BackendSnapshot { db, subroutine,env } = snapshot; + let BackendSnapshot { db, subroutine, env } = snapshot; self.db = db; update_current_env_with_fork_env(current, env); diff --git a/evm/src/executor/backend/snapshot.rs b/evm/src/executor/backend/snapshot.rs index 37d8883cb700..e3ab113c849b 100644 --- a/evm/src/executor/backend/snapshot.rs +++ b/evm/src/executor/backend/snapshot.rs @@ -1,4 +1,4 @@ -use revm::{SubRoutine, Env}; +use revm::{Env, SubRoutine}; /// Represents a snapshot taken during evm execution #[derive(Clone, Debug)] @@ -7,7 +7,7 @@ pub struct BackendSnapshot { /// The subroutine state at a specific point pub subroutine: SubRoutine, /// Contains the env at the time of the snapshot - pub env: Env + pub env: Env, } // === impl BackendSnapshot === diff --git a/evm/src/executor/inspector/cheatcodes/snapshot.rs b/evm/src/executor/inspector/cheatcodes/snapshot.rs index 2e92b6727234..8315f51d76a9 100644 --- a/evm/src/executor/inspector/cheatcodes/snapshot.rs +++ b/evm/src/executor/inspector/cheatcodes/snapshot.rs @@ -13,13 +13,14 @@ pub fn apply( Some(match call { HEVMCalls::Snapshot(_) => Ok(data.db.snapshot(&data.subroutine, data.env).encode().into()), HEVMCalls::RevertTo(snapshot) => { - let res = if let Some(subroutine) = data.db.revert(snapshot.0, &data.subroutine, data.env) { - // we reset the evm's subroutine to the state of the snapshot previous state - data.subroutine = subroutine; - true - } else { - false - }; + let res = + if let Some(subroutine) = data.db.revert(snapshot.0, &data.subroutine, data.env) { + // we reset the evm's subroutine to the state of the snapshot previous state + data.subroutine = subroutine; + true + } else { + false + }; Ok(res.encode().into()) } _ => return None, diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index 8cb211626d8e..f1c9dd3a183b 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -77,6 +77,7 @@ impl MultiContractRunner { .collect() } + /// Returns all matching tests grouped by contract grouped by file (file -> (contract -> tests)) pub fn list( &self, filter: &impl TestFilter, diff --git a/testdata/cheats/RpcUrls.t.sol b/testdata/cheats/RpcUrls.t.sol index 2b0cef656dff..89e38f9c05a7 100644 --- a/testdata/cheats/RpcUrls.t.sol +++ b/testdata/cheats/RpcUrls.t.sol @@ -9,7 +9,7 @@ contract RpcUrlTest is DSTest { // returns the correct url function testCanGetRpcUrl() public { - string memory url = cheats.rpcUrl("rpcAlias"); + string memory url = cheats.rpcUrl("rpcAlias"); // note: this alias is pre-configured in the test runner assertEq(url, "https://eth-mainnet.alchemyapi.io/v2/Lc7oIGYeL_QvInzI0Wiu_pOZZDEKBrdf"); } @@ -22,7 +22,7 @@ contract RpcUrlTest is DSTest { // can set env and return correct url function testCanSetAndGetURLAndAllUrls() public { // this will fail because alias is not set - cheats.expectRevert("Failed to resolve env var `RPC_ENV_ALIAS`: environment variable not found"); + cheats.expectRevert("Failed to resolve env var `RPC_ENV_ALIAS`: environment variable not found"); string[2][] memory _urls = cheats.rpcUrls(); string memory url = cheats.rpcUrl("rpcAlias"); diff --git a/testdata/cheats/Snapshots.t.sol b/testdata/cheats/Snapshots.t.sol index c58897e3cf58..817b7a1388c6 100644 --- a/testdata/cheats/Snapshots.t.sol +++ b/testdata/cheats/Snapshots.t.sol @@ -32,4 +32,23 @@ contract SnapshotTest is DSTest { assertEq(store.slot1, 20, "snapshot revert for slot 1 unsuccessful"); } + // tests that snapshots can also revert changes to `block` + function testBlockValues() public { + uint256 num = block.number; + uint256 time = block.timestamp; + + uint256 snapshot = cheats.snapshot(); + + cheats.warp(1337); + assertEq(block.timestamp, 1337); + + cheats.roll(99); + assertEq(block.number, 99); + + assert(cheats.revertTo(snapshot)); + + assertEq(block.number, num, "snapshot revert for block.number unsuccessful"); + assertEq(block.timestamp, time, "snapshot revert for block.timestamp unsuccessful"); + } + } \ No newline at end of file