diff --git a/Cargo.lock b/Cargo.lock index b0713d6f..491ce9a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8632,4 +8632,4 @@ checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", -] +] \ No newline at end of file diff --git a/crates/cli/src/cli.rs b/crates/cli/src/cli.rs index 10fb392a..4159068c 100644 --- a/crates/cli/src/cli.rs +++ b/crates/cli/src/cli.rs @@ -220,6 +220,21 @@ pub struct Cli { #[arg(long, value_name = "PATH", value_parser= parse_genesis_file)] pub init: Option, + /// This is an alias for both --load-state and --dump-state. + /// + /// It initializes the chain with the state and block environment stored at the file, if it + /// exists, and dumps the chain's state on exit. + #[arg( + long, + value_name = "PATH", + conflicts_with_all = &[ + "init", + "dump_state", + "load_state" + ] + )] + pub state: Option, + /// Interval in seconds at which the state and block environment is to be dumped to disk. /// /// See --state and --dump-state @@ -241,6 +256,10 @@ pub struct Cli { #[arg(long, conflicts_with = "init", default_value = "false")] pub preserve_historical_states: bool, + /// Initialize the chain from a previously saved state snapshot. + #[arg(long, value_name = "PATH", conflicts_with = "init")] + pub load_state: Option, + /// BIP39 mnemonic phrase used for generating accounts. /// Cannot be used if `mnemonic_random` or `mnemonic_seed` are used. #[arg(long, short, conflicts_with_all = &["mnemonic_seed", "mnemonic_random"], help_heading = "Account Configuration")] @@ -436,9 +455,11 @@ impl Cli { .with_allow_origin(self.allow_origin) .with_no_cors(self.no_cors) .with_transaction_order(self.order) + .with_state(self.state) .with_state_interval(self.state_interval) .with_dump_state(self.dump_state) - .with_preserve_historical_states(self.preserve_historical_states); + .with_preserve_historical_states(self.preserve_historical_states) + .with_load_state(self.load_state); if self.emulate_evm && self.dev_system_contracts != Some(SystemContractsOptions::Local) { return Err(eyre::eyre!( @@ -623,6 +644,7 @@ mod tests { net::{IpAddr, Ipv4Addr}, }; use tempdir::TempDir; + use zksync_types::{H160, U256}; #[test] fn can_parse_host() { @@ -689,8 +711,6 @@ mod tests { TxPool::new(ImpersonationManager::default(), config.transaction_order), BlockSealer::new(BlockSealerMode::noop()), ); - let test_address = zksync_types::H160::random(); - node.set_rich_account(test_address, 1000000u64.into()); let mut state_dumper = PeriodicStateDumper::new( node.clone(), @@ -718,4 +738,70 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_load_state() -> anyhow::Result<()> { + let temp_dir = TempDir::new("state-load-test").expect("failed creating temporary dir"); + let state_path = temp_dir.path().join("state.json"); + + let config = anvil_zksync_config::TestNodeConfig { + dump_state: Some(state_path.clone()), + state_interval: Some(1), + preserve_historical_states: true, + ..Default::default() + }; + + let node = InMemoryNode::new( + None, + None, + &config, + TimestampManager::default(), + ImpersonationManager::default(), + TxPool::new(ImpersonationManager::default(), config.transaction_order), + BlockSealer::new(BlockSealerMode::noop()), + ); + let test_address = H160::from_low_u64_be(12345); + node.set_rich_account(test_address, U256::from(1000000u64)); + + let mut state_dumper = PeriodicStateDumper::new( + node.clone(), + config.dump_state.clone(), + std::time::Duration::from_secs(1), + config.preserve_historical_states, + ); + + let dumper_handle = tokio::spawn(async move { + tokio::select! { + _ = &mut state_dumper => {} + } + state_dumper.dump().await; + }); + + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + + dumper_handle.abort(); + let _ = dumper_handle.await; + + // assert the state json file was created + std::fs::read_to_string(&state_path).expect("Expected state file to be created"); + + let new_config = anvil_zksync_config::TestNodeConfig::default(); + let new_node = InMemoryNode::new( + None, + None, + &new_config, + TimestampManager::default(), + ImpersonationManager::default(), + TxPool::new(ImpersonationManager::default(), config.transaction_order), + BlockSealer::new(BlockSealerMode::noop()), + ); + + new_node.load_state(zksync_types::web3::Bytes(std::fs::read(&state_path)?))?; + + // assert the balance from the loaded state is correctly applied + let balance = new_node.get_balance_impl(test_address, None).await.unwrap(); + assert_eq!(balance, U256::from(1000000u64)); + + Ok(()) + } } diff --git a/crates/cli/src/main.rs b/crates/cli/src/main.rs index 37c3e519..3bf7f327 100644 --- a/crates/cli/src/main.rs +++ b/crates/cli/src/main.rs @@ -278,28 +278,33 @@ async fn main() -> anyhow::Result<()> { let any_server_stopped = futures::future::select_all(server_handles.into_iter().map(|h| Box::pin(h.stopped()))); - let dump_state = config.dump_state.clone(); + // Load state from `--load-state` if provided + if let Some(ref load_state_path) = config.load_state { + let bytes = std::fs::read(load_state_path).expect("Failed to read load state file"); + node.load_state(zksync_types::web3::Bytes(bytes))?; + } + if let Some(ref state_path) = config.state { + let bytes = std::fs::read(state_path).expect("Failed to read load state file"); + node.load_state(zksync_types::web3::Bytes(bytes))?; + } + + let state_path = config.dump_state.clone().or_else(|| config.state.clone()); let dump_interval = config .state_interval .map(Duration::from_secs) .unwrap_or(Duration::from_secs(60)); // Default to 60 seconds let preserve_historical_states = config.preserve_historical_states; let node_for_dumper = node.clone(); - let state_dumper = tokio::task::spawn(PeriodicStateDumper::new( + let state_dumper = PeriodicStateDumper::new( node_for_dumper, - dump_state, + state_path, dump_interval, preserve_historical_states, - )); + ); let system_contracts = SystemContracts::from_options(&config.system_contracts_options, config.use_evm_emulator); - let block_producer_handle = tokio::task::spawn(BlockProducer::new( - node, - pool, - block_sealer, - system_contracts, - )); + let block_producer_handle = BlockProducer::new(node, pool, block_sealer, system_contracts); config.print(fork_print_info.as_ref()); diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 88acf7c3..0cc553da 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -121,13 +121,16 @@ pub struct TestNodeConfig { pub no_cors: bool, /// How transactions are sorted in the mempool pub transaction_order: TransactionOrder, - /// State configuration + /// Path to load/dump the state from + pub state: Option, /// Path to dump the state to pub dump_state: Option, /// Interval to dump the state pub state_interval: Option, /// Preserve historical states pub preserve_historical_states: bool, + /// State to load + pub load_state: Option, } impl Default for TestNodeConfig { @@ -195,9 +198,11 @@ impl Default for TestNodeConfig { no_cors: false, // state configuration + state: None, dump_state: None, state_interval: None, preserve_historical_states: false, + load_state: None, } } } @@ -918,6 +923,13 @@ impl TestNodeConfig { self } + /// Set the state + #[must_use] + pub fn with_state(mut self, state: Option) -> Self { + self.state = state; + self + } + /// Set the state dump path #[must_use] pub fn with_dump_state(mut self, dump_state: Option) -> Self { @@ -938,4 +950,11 @@ impl TestNodeConfig { self.preserve_historical_states = preserve_historical_states; self } + + /// Set the state to load + #[must_use] + pub fn with_load_state(mut self, load_state: Option) -> Self { + self.load_state = load_state; + self + } } diff --git a/crates/core/src/fork.rs b/crates/core/src/fork.rs index 79368e8d..bbbdda41 100644 --- a/crates/core/src/fork.rs +++ b/crates/core/src/fork.rs @@ -539,7 +539,7 @@ impl ForkDetails { .get_block_by_hash(root_hash, true) .await .map_err(|error| eyre!(error))?; - let block = opt_block.ok_or_else(|| { + let mut block = opt_block.ok_or_else(|| { eyre!( "Could not find block #{:?} ({:#x}) in {:?}", miniblock, @@ -548,6 +548,7 @@ impl ForkDetails { ) })?; let l1_batch_number = block_details.l1_batch_number; + block.l1_batch_number = Some(l1_batch_number.0.into()); if !block_details .protocol_version diff --git a/e2e-tests-rust/Cargo.lock b/e2e-tests-rust/Cargo.lock index f9953dba..aa9071b1 100644 --- a/e2e-tests-rust/Cargo.lock +++ b/e2e-tests-rust/Cargo.lock @@ -944,8 +944,10 @@ dependencies = [ "anvil_zksync_core", "anyhow", "async-trait", + "flate2", "fs2", "futures 0.3.31", + "hex", "http 1.1.0", "itertools 0.13.0", "reqwest 0.12.9", diff --git a/e2e-tests-rust/Cargo.toml b/e2e-tests-rust/Cargo.toml index fa85b2f2..a7f3030b 100644 --- a/e2e-tests-rust/Cargo.toml +++ b/e2e-tests-rust/Cargo.toml @@ -25,6 +25,8 @@ tower = "0.5" http = "1.1.0" anvil_zksync_core = { path = "../crates/core" } tempdir = "0.3.7" +flate2 = "1.0" +hex = "0.4" [dev-dependencies] diff --git a/e2e-tests-rust/tests/lib.rs b/e2e-tests-rust/tests/lib.rs index 9ecca27c..23fc97f5 100644 --- a/e2e-tests-rust/tests/lib.rs +++ b/e2e-tests-rust/tests/lib.rs @@ -4,23 +4,26 @@ use alloy::providers::Provider; use alloy::{ network::primitives::BlockTransactionsKind, primitives::U256, signers::local::PrivateKeySigner, }; +use anvil_zksync_core::node::VersionedState; +use anvil_zksync_core::utils::write_json_file; use anvil_zksync_e2e_tests::{ get_node_binary_path, init_testing_provider, init_testing_provider_with_client, AnvilZKsyncApi, ReceiptExt, ZksyncWalletProviderExt, DEFAULT_TX_VALUE, }; +use anyhow::Context; +use flate2::read::GzDecoder; use http::header::{ HeaderMap, HeaderValue, ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ORIGIN, }; +use std::io::Read; +use std::{convert::identity, fs, thread::sleep, time::Duration}; +use tempdir::TempDir; const SOME_ORIGIN: HeaderValue = HeaderValue::from_static("http://some.origin"); const OTHER_ORIGIN: HeaderValue = HeaderValue::from_static("http://other.origin"); const ANY_ORIGIN: HeaderValue = HeaderValue::from_static("*"); -use anvil_zksync_core::node::VersionedState; -use std::{convert::identity, fs, thread::sleep, time::Duration}; -use tempdir::TempDir; - #[tokio::test] async fn interval_sealing_finalization() -> anyhow::Result<()> { // Test that we can submit a transaction and wait for it to finalize when anvil-zksync is @@ -564,9 +567,9 @@ async fn dump_state_on_run() -> anyhow::Result<()> { }) .await?; - provider.tx().finalize().await?; + let receipt = provider.tx().finalize().await?; + let tx_hash = receipt.transaction_hash().to_string(); - // Allow some time for the state to be dumped sleep(Duration::from_secs(2)); drop(provider); @@ -578,8 +581,8 @@ async fn dump_state_on_run() -> anyhow::Result<()> { ); let dumped_data = fs::read_to_string(&dump_path)?; - let state: VersionedState = serde_json::from_str(&dumped_data) - .map_err(|e| anyhow::anyhow!("Failed to deserialize state: {}", e))?; + let state: VersionedState = + serde_json::from_str(&dumped_data).context("Failed to deserialize state")?; match state { VersionedState::V1 { version: _, state } => { @@ -591,6 +594,17 @@ async fn dump_state_on_run() -> anyhow::Result<()> { !state.transactions.is_empty(), "state_dump.json should contain at least one transaction" ); + let tx_exists = state.transactions.iter().any(|tx| { + let tx_hash_full = + format!("0x{}", hex::encode(tx.receipt.transaction_hash.as_bytes())); + tx_hash_full == tx_hash + }); + + assert!( + tx_exists, + "The state dump should contain the transaction with hash: {:?}", + tx_hash + ); } VersionedState::Unknown { version } => { panic!("Encountered unknown state version: {}", version); @@ -601,6 +615,8 @@ async fn dump_state_on_run() -> anyhow::Result<()> { } #[tokio::test] +#[ignore] +// TODO: Investigate a better way to test against fork to avoid flakiness. See: https://github.com/matter-labs/anvil-zksync/issues/508 async fn dump_state_on_fork() -> anyhow::Result<()> { let temp_dir = TempDir::new("state-fork-test").expect("failed creating temporary dir"); let dump_path = temp_dir.path().join("state_dump_fork.json"); @@ -612,13 +628,13 @@ async fn dump_state_on_fork() -> anyhow::Result<()> { .arg("1") .arg("--dump-state") .arg(dump_path_clone.to_str().unwrap()) - .fork("mainnet") + .fork("sepolia-testnet") }) .await?; - provider.tx().finalize().await?; + let receipt = provider.tx().finalize().await?; + let tx_hash = receipt.transaction_hash().to_string(); - // Allow some time for the state to be dumped sleep(Duration::from_secs(2)); drop(provider); @@ -630,8 +646,8 @@ async fn dump_state_on_fork() -> anyhow::Result<()> { ); let dumped_data = fs::read_to_string(&dump_path)?; - let state: VersionedState = serde_json::from_str(&dumped_data) - .map_err(|e| anyhow::anyhow!("Failed to deserialize state: {}", e))?; + let state: VersionedState = + serde_json::from_str(&dumped_data).context("Failed to deserialize state")?; match state { VersionedState::V1 { version: _, state } => { @@ -643,6 +659,16 @@ async fn dump_state_on_fork() -> anyhow::Result<()> { !state.transactions.is_empty(), "state_dump_fork.json should contain at least one transaction" ); + let tx_exists = state.transactions.iter().any(|tx| { + let tx_hash_full = + format!("0x{}", hex::encode(tx.receipt.transaction_hash.as_bytes())); + tx_hash_full == tx_hash + }); + assert!( + tx_exists, + "The state dump should contain the transaction with hash: {:?}", + tx_hash + ); } VersionedState::Unknown { version } => { panic!("Encountered unknown state version: {}", version); @@ -651,3 +677,104 @@ async fn dump_state_on_fork() -> anyhow::Result<()> { Ok(()) } + +#[tokio::test] +async fn load_state_on_run() -> anyhow::Result<()> { + let temp_dir = TempDir::new("load-state-test").expect("failed creating temporary dir"); + let dump_path = temp_dir.path().join("load_state_run.json"); + let provider = init_testing_provider(identity).await?; + let receipts = [ + provider.tx().finalize().await?, + provider.tx().finalize().await?, + ]; + let blocks = provider.get_blocks_by_receipts(&receipts).await?; + let state_bytes = provider.anvil_dump_state().await?; + drop(provider); + + let mut decoder = GzDecoder::new(&state_bytes.0[..]); + let mut json_str = String::new(); + decoder.read_to_string(&mut json_str).unwrap(); + let state: VersionedState = serde_json::from_str(&json_str).unwrap(); + write_json_file(&dump_path, &state)?; + + let dump_path_clone = dump_path.clone(); + let new_provider = init_testing_provider(move |node| { + node.path(get_node_binary_path()) + .arg("--state-interval") + .arg("1") + .arg("--load-state") + .arg(dump_path_clone.to_str().unwrap()) + }) + .await?; + + new_provider.assert_has_receipts(&receipts).await?; + new_provider.assert_has_blocks(&blocks).await?; + new_provider + .assert_balance(receipts[0].sender()?, DEFAULT_TX_VALUE) + .await?; + new_provider + .assert_balance(receipts[1].sender()?, DEFAULT_TX_VALUE) + .await?; + + drop(new_provider); + + assert!( + dump_path.exists(), + "State dump file should still exist at {:?}", + dump_path + ); + + Ok(()) +} + +#[tokio::test] +#[ignore] +// TODO: Investigate a better way to test against fork to avoid flakiness. See: https://github.com/matter-labs/anvil-zksync/issues/508 +async fn load_state_on_fork() -> anyhow::Result<()> { + let temp_dir = TempDir::new("load-state-fork-test").expect("failed creating temporary dir"); + let dump_path = temp_dir.path().join("load_state_fork.json"); + let provider = init_testing_provider(identity).await?; + let receipts = [ + provider.tx().finalize().await?, + provider.tx().finalize().await?, + ]; + let blocks = provider.get_blocks_by_receipts(&receipts).await?; + let state_bytes = provider.anvil_dump_state().await?; + drop(provider); + + let mut decoder = GzDecoder::new(&state_bytes.0[..]); + let mut json_str = String::new(); + decoder.read_to_string(&mut json_str).unwrap(); + let state: VersionedState = serde_json::from_str(&json_str).unwrap(); + write_json_file(&dump_path, &state)?; + + let dump_path_clone = dump_path.clone(); + let new_provider = init_testing_provider(move |node| { + node.path(get_node_binary_path()) + .arg("--state-interval") + .arg("1") + .arg("--load-state") + .arg(dump_path_clone.to_str().unwrap()) + .fork("sepolia-testnet") + }) + .await?; + + new_provider.assert_has_receipts(&receipts).await?; + new_provider.assert_has_blocks(&blocks).await?; + new_provider + .assert_balance(receipts[0].sender()?, DEFAULT_TX_VALUE) + .await?; + new_provider + .assert_balance(receipts[1].sender()?, DEFAULT_TX_VALUE) + .await?; + + drop(new_provider); + + assert!( + dump_path.exists(), + "State dump file should still exist at {:?}", + dump_path + ); + + Ok(()) +}