diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 98b7d7ea1a02..191c69180631 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -29,6 +29,7 @@ jobs: - name: Init run: | ci_run zk + ci_run run_retried rustup show ci_run zk db setup # This does both linting and "building". We're using `zk lint prover` as it's common practice within our repo diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 9ee11016f95a..4b67a8ab5cd2 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -28,6 +28,7 @@ jobs: - name: Setup db run: | ci_run zk + ci_run run_retried rustup show ci_run zk db migrate - name: Lints diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 72e75e085b16..b15bc0c41997 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -53,6 +53,7 @@ jobs: - name: Init run: | ci_run zk + ci_run run_retried rustup show ci_run zk run yarn ci_run zk db setup ci_run zk compiler all @@ -192,6 +193,7 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run zk + ci_run run_retried rustup show if [[ "${{ matrix.deployment_mode }}" == "Validium" ]]; then ci_run zk env dev_validium_docker ci_run zk config compile dev_validium_docker @@ -333,6 +335,7 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run zk + ci_run run_retried rustup show if [[ "${{ matrix.deployment_mode }}" == "Rollup" ]]; then ci_run zk config compile elif [[ "${{ matrix.deployment_mode }}" == "Validium" ]]; then diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index b2afa7a6f60a..6a8813a0a343 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -60,6 +60,7 @@ jobs: - name: Init run: | ci_run zk + ci_run run_retried rustup show ci_run zk db setup - name: Prover unit tests diff --git a/Cargo.lock b/Cargo.lock index cfe47a2a4b1e..be0ffd1566b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8282,23 +8282,39 @@ name = "zksync_contract_verifier" version = "0.1.0" dependencies = [ "anyhow", - "chrono", "ctrlc", - "ethabi", "futures 0.3.28", + "prometheus_exporter", + "structopt", + "tokio", + "tracing", + "vlog", + "zksync_config", + "zksync_contract_verifier_lib", + "zksync_dal", + "zksync_env_config", + "zksync_queued_job_processor", + "zksync_utils", +] + +[[package]] +name = "zksync_contract_verifier_lib" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "ethabi", "hex", "lazy_static", - "prometheus_exporter", "regex", + "semver", "serde", "serde_json", - "structopt", "tempfile", "thiserror", "tokio", "tracing", "vise", - "vlog", "zksync_config", "zksync_contracts", "zksync_dal", @@ -8464,6 +8480,7 @@ dependencies = [ "tokio", "tracing", "vise", + "zksync_concurrency", "zksync_consensus_roles", "zksync_consensus_storage", "zksync_contracts", @@ -8471,6 +8488,7 @@ dependencies = [ "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", + "zksync_test_account", "zksync_types", "zksync_utils", ] @@ -8625,6 +8643,7 @@ dependencies = [ "zksync_node_consensus", "zksync_node_db_pruner", "zksync_node_fee_model", + "zksync_node_framework", "zksync_node_genesis", "zksync_node_sync", "zksync_object_store", @@ -8841,6 +8860,7 @@ dependencies = [ "zksync_consensus_roles", "zksync_consensus_storage", "zksync_consensus_utils", + "zksync_contracts", "zksync_dal", "zksync_l1_contract_interface", "zksync_merkle_tree", @@ -8935,6 +8955,7 @@ dependencies = [ "zksync_metadata_calculator", "zksync_node_api_server", "zksync_node_consensus", + "zksync_node_db_pruner", "zksync_node_fee_model", "zksync_node_sync", "zksync_object_store", @@ -9371,6 +9392,7 @@ version = "0.1.0" dependencies = [ "ethabi", "hex", + "rand 0.8.5", "zksync_contracts", "zksync_eth_signer", "zksync_system_constants", diff --git a/Cargo.toml b/Cargo.toml index de664288e150..5d9f6adf37ad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,6 +38,7 @@ members = [ "core/lib/basic_types", "core/lib/config", "core/lib/constants", + "core/lib/contract_verifier", "core/lib/contracts", "core/lib/crypto", "core/lib/circuit_breaker", @@ -212,6 +213,7 @@ zksync = { path = "sdk/zksync-rs" } zksync_basic_types = { path = "core/lib/basic_types" } zksync_circuit_breaker = { path = "core/lib/circuit_breaker" } zksync_config = { path = "core/lib/config" } +zksync_contract_verifier_lib = { path = "core/lib/contract_verifier" } zksync_contracts = { path = "core/lib/contracts" } zksync_core_leftovers = { path = "core/lib/zksync_core_leftovers" } zksync_crypto = { path = "core/lib/crypto" } diff --git a/checks-config/era.dic b/checks-config/era.dic index 0c84ce8c2552..3f4c8fc8fa4f 100644 --- a/checks-config/era.dic +++ b/checks-config/era.dic @@ -962,6 +962,7 @@ zksync_merkle_tree TreeMetadata delegator decrement +whitelisted Bbellman Sbellman DCMAKE diff --git a/core/bin/contract-verifier/Cargo.toml b/core/bin/contract-verifier/Cargo.toml index 49e5469998c8..3e9832f995f9 100644 --- a/core/bin/contract-verifier/Cargo.toml +++ b/core/bin/contract-verifier/Cargo.toml @@ -11,11 +11,10 @@ categories.workspace = true publish = false [dependencies] -zksync_types.workspace = true zksync_dal.workspace = true zksync_env_config.workspace = true zksync_config.workspace = true -zksync_contracts.workspace = true +zksync_contract_verifier_lib.workspace = true zksync_queued_job_processor.workspace = true zksync_utils.workspace = true prometheus_exporter.workspace = true @@ -25,15 +24,5 @@ anyhow.workspace = true tokio = { workspace = true, features = ["full"] } futures.workspace = true ctrlc.workspace = true -thiserror.workspace = true -chrono.workspace = true -serde_json.workspace = true -ethabi.workspace = true -vise.workspace = true -hex.workspace = true -serde = { workspace = true, features = ["derive"] } structopt.workspace = true -lazy_static.workspace = true -tempfile.workspace = true -regex.workspace = true tracing.workspace = true diff --git a/core/bin/contract-verifier/src/main.rs b/core/bin/contract-verifier/src/main.rs index f1a7c5f226cf..118e7f41be97 100644 --- a/core/bin/contract-verifier/src/main.rs +++ b/core/bin/contract-verifier/src/main.rs @@ -9,19 +9,12 @@ use zksync_config::{ configs::{ObservabilityConfig, PrometheusConfig}, ApiConfig, ContractVerifierConfig, }; +use zksync_contract_verifier_lib::ContractVerifier; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_env_config::FromEnv; use zksync_queued_job_processor::JobProcessor; use zksync_utils::{wait_for_tasks::ManagedTasks, workspace_dir_or_current_dir}; -use crate::verifier::ContractVerifier; - -pub mod error; -mod metrics; -pub mod verifier; -pub mod zksolc_utils; -pub mod zkvyper_utils; - async fn update_compiler_versions(connection_pool: &ConnectionPool) { let mut storage = connection_pool.connection().await.unwrap(); let mut transaction = storage.start_transaction().await.unwrap(); diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index d4a883b190f4..ee6aa08be9da 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -42,6 +42,7 @@ zksync_metadata_calculator.workspace = true zksync_node_sync.workspace = true zksync_node_api_server.workspace = true zksync_node_consensus.workspace = true +zksync_node_framework.workspace = true vlog.workspace = true zksync_concurrency.workspace = true diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index f4580af6d63e..0adf3ddf8cb5 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -3,6 +3,7 @@ use std::{collections::HashSet, net::Ipv4Addr, str::FromStr, sync::Arc, time::Du use anyhow::Context as _; use clap::Parser; use metrics::EN_METRICS; +use node_builder::ExternalNodeBuilder; use tokio::{ sync::{oneshot, watch, RwLock}, task::{self, JoinHandle}, @@ -63,6 +64,7 @@ mod config; mod init; mod metadata; mod metrics; +mod node_builder; #[cfg(test)] mod tests; @@ -426,10 +428,11 @@ async fn run_api( .build() .await .context("failed to build a proxy_cache_updater_pool")?; - task_handles.push(tokio::spawn(tx_proxy.run_account_nonce_sweeper( - proxy_cache_updater_pool.clone(), - stop_receiver.clone(), - ))); + task_handles.push(tokio::spawn( + tx_proxy + .account_nonce_sweeper_task(proxy_cache_updater_pool.clone()) + .run(stop_receiver.clone()), + )); let fee_params_fetcher_handle = tokio::spawn(fee_params_fetcher.clone().run(stop_receiver.clone())); @@ -701,6 +704,10 @@ struct Cli { /// Comma-separated list of components to launch. #[arg(long, default_value = "all")] components: ComponentsToRun, + + /// Run the node using the node framework. + #[arg(long)] + use_node_framework: bool, } #[derive(Debug, Clone, Copy, PartialEq, Hash, Eq)] @@ -784,6 +791,22 @@ async fn main() -> anyhow::Result<()> { .fetch_remote(main_node_client.as_ref()) .await .context("failed fetching remote part of node config from main node")?; + + // If the node framework is used, run the node. + if opt.use_node_framework { + // We run the node from a different thread, since the current thread is in tokio context. + std::thread::spawn(move || { + let node = + ExternalNodeBuilder::new(config).build(opt.components.0.into_iter().collect())?; + node.run()?; + anyhow::Ok(()) + }) + .join() + .expect("Failed to run the node")?; + + return Ok(()); + } + if let Some(threshold) = config.optional.slow_query_threshold() { ConnectionPool::::global_config().set_slow_query_threshold(threshold)?; } diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs new file mode 100644 index 000000000000..5eaff63d20a0 --- /dev/null +++ b/core/bin/external_node/src/node_builder.rs @@ -0,0 +1,508 @@ +//! This module provides a "builder" for the external node, +//! as well as an interface to run the node with the specified components. + +use anyhow::Context as _; +use zksync_config::{ + configs::{ + api::{HealthCheckConfig, MerkleTreeApiConfig}, + database::MerkleTreeMode, + DatabaseSecrets, + }, + PostgresConfig, +}; +use zksync_metadata_calculator::{MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig}; +use zksync_node_api_server::{tx_sender::ApiContracts, web3::Namespace}; +use zksync_node_framework::{ + implementations::layers::{ + batch_status_updater::BatchStatusUpdaterLayer, + commitment_generator::CommitmentGeneratorLayer, + consensus::{ConsensusLayer, Mode}, + consistency_checker::ConsistencyCheckerLayer, + healtcheck_server::HealthCheckLayer, + l1_batch_commitment_mode_validation::L1BatchCommitmentModeValidationLayer, + main_node_client::MainNodeClientLayer, + main_node_fee_params_fetcher::MainNodeFeeParamsFetcherLayer, + metadata_calculator::MetadataCalculatorLayer, + pools_layer::PoolsLayerBuilder, + postgres_metrics::PostgresMetricsLayer, + prometheus_exporter::PrometheusExporterLayer, + pruning::PruningLayer, + query_eth_client::QueryEthClientLayer, + sigint::SigintHandlerLayer, + state_keeper::{ + external_io::ExternalIOLayer, main_batch_executor::MainBatchExecutorLayer, + output_handler::OutputHandlerLayer, StateKeeperLayer, + }, + sync_state_updater::SyncStateUpdaterLayer, + tree_data_fetcher::TreeDataFetcherLayer, + validate_chain_ids::ValidateChainIdsLayer, + web3_api::{ + caches::MempoolCacheLayer, + server::{Web3ServerLayer, Web3ServerOptionalConfig}, + tree_api_client::TreeApiClientLayer, + tx_sender::{PostgresStorageCachesConfig, TxSenderLayer}, + tx_sink::TxSinkLayer, + }, + }, + service::{ZkStackService, ZkStackServiceBuilder}, +}; +use zksync_state::RocksdbStorageOptions; + +use crate::{ + config::{self, ExternalNodeConfig}, + Component, +}; + +/// Builder for the external node. +#[derive(Debug)] +pub(crate) struct ExternalNodeBuilder { + node: ZkStackServiceBuilder, + config: ExternalNodeConfig, +} + +impl ExternalNodeBuilder { + pub fn new(config: ExternalNodeConfig) -> Self { + Self { + node: ZkStackServiceBuilder::new(), + config, + } + } + + fn add_sigint_handler_layer(mut self) -> anyhow::Result { + self.node.add_layer(SigintHandlerLayer); + Ok(self) + } + + fn add_pools_layer(mut self) -> anyhow::Result { + // Note: the EN config doesn't currently support specifying configuration for replicas, + // so we reuse the master configuration for that purpose. + // Settings unconditionally set to `None` are either not supported by the EN configuration layer + // or are not used in the context of the external node. + let config = PostgresConfig { + max_connections: Some(self.config.postgres.max_connections), + max_connections_master: Some(self.config.postgres.max_connections), + acquire_timeout_sec: None, + statement_timeout_sec: None, + long_connection_threshold_ms: None, + slow_query_threshold_ms: self + .config + .optional + .slow_query_threshold() + .map(|d| d.as_millis() as u64), + test_server_url: None, + test_prover_url: None, + }; + let secrets = DatabaseSecrets { + server_url: Some(self.config.postgres.database_url()), + server_replica_url: Some(self.config.postgres.database_url()), + prover_url: None, + }; + let pools_layer = PoolsLayerBuilder::empty(config, secrets) + .with_master(true) + .with_replica(true) + .build(); + self.node.add_layer(pools_layer); + Ok(self) + } + + fn add_postgres_metrics_layer(mut self) -> anyhow::Result { + self.node.add_layer(PostgresMetricsLayer); + Ok(self) + } + + fn add_main_node_client_layer(mut self) -> anyhow::Result { + let layer = MainNodeClientLayer::new( + self.config.required.main_node_url.clone(), + self.config.optional.main_node_rate_limit_rps, + self.config.required.l2_chain_id, + ); + self.node.add_layer(layer); + Ok(self) + } + + fn add_healthcheck_layer(mut self) -> anyhow::Result { + let healthcheck_config = HealthCheckConfig { + port: self.config.required.healthcheck_port, + slow_time_limit_ms: self + .config + .optional + .healthcheck_slow_time_limit() + .map(|d| d.as_millis() as u64), + hard_time_limit_ms: self + .config + .optional + .healthcheck_hard_time_limit() + .map(|d| d.as_millis() as u64), + }; + self.node.add_layer(HealthCheckLayer(healthcheck_config)); + Ok(self) + } + + fn add_prometheus_exporter_layer(mut self) -> anyhow::Result { + if let Some(prom_config) = self.config.observability.prometheus() { + self.node.add_layer(PrometheusExporterLayer(prom_config)); + } else { + tracing::info!("No configuration for prometheus exporter, skipping"); + } + Ok(self) + } + + fn add_query_eth_client_layer(mut self) -> anyhow::Result { + let query_eth_client_layer = QueryEthClientLayer::new( + self.config.required.l1_chain_id, + self.config.required.eth_client_url.clone(), + ); + self.node.add_layer(query_eth_client_layer); + Ok(self) + } + + fn add_state_keeper_layer(mut self) -> anyhow::Result { + // While optional bytecode compression may be disabled on the main node, there are batches where + // optional bytecode compression was enabled. To process these batches (and also for the case where + // compression will become optional on the sequencer again), EN has to allow txs without bytecode + // compression. + const OPTIONAL_BYTECODE_COMPRESSION: bool = true; + + let persistence_layer = OutputHandlerLayer::new( + self.config + .remote + .l2_shared_bridge_addr + .expect("L2 shared bridge address is not set"), + self.config.optional.l2_block_seal_queue_capacity, + ) + .with_pre_insert_txs(true) // EN requires txs to be pre-inserted. + .with_protective_reads_persistence_enabled( + self.config.optional.protective_reads_persistence_enabled, + ); + + let io_layer = ExternalIOLayer::new(self.config.required.l2_chain_id); + + // We only need call traces on the external node if the `debug_` namespace is enabled. + let save_call_traces = self + .config + .optional + .api_namespaces() + .contains(&Namespace::Debug); + let main_node_batch_executor_builder_layer = + MainBatchExecutorLayer::new(save_call_traces, OPTIONAL_BYTECODE_COMPRESSION); + + let rocksdb_options = RocksdbStorageOptions { + block_cache_capacity: self + .config + .experimental + .state_keeper_db_block_cache_capacity(), + max_open_files: self.config.experimental.state_keeper_db_max_open_files, + }; + let state_keeper_layer = StateKeeperLayer::new( + self.config.required.state_cache_path.clone(), + rocksdb_options, + ); + self.node + .add_layer(persistence_layer) + .add_layer(io_layer) + .add_layer(main_node_batch_executor_builder_layer) + .add_layer(state_keeper_layer); + Ok(self) + } + + fn add_consensus_layer(mut self) -> anyhow::Result { + let config = self.config.consensus.clone(); + let secrets = + config::read_consensus_secrets().context("config::read_consensus_secrets()")?; + let layer = ConsensusLayer { + mode: Mode::External, + config, + secrets, + }; + self.node.add_layer(layer); + Ok(self) + } + + fn add_pruning_layer(mut self) -> anyhow::Result { + if self.config.optional.pruning_enabled { + let layer = PruningLayer::new( + self.config.optional.pruning_removal_delay(), + self.config.optional.pruning_chunk_size, + self.config.optional.pruning_data_retention(), + ); + self.node.add_layer(layer); + } else { + tracing::info!("Pruning is disabled"); + } + Ok(self) + } + + fn add_l1_batch_commitment_mode_validation_layer(mut self) -> anyhow::Result { + let layer = L1BatchCommitmentModeValidationLayer::new( + self.config.remote.diamond_proxy_addr, + self.config.optional.l1_batch_commit_data_generator_mode, + ); + self.node.add_layer(layer); + Ok(self) + } + + fn add_validate_chain_ids_layer(mut self) -> anyhow::Result { + let layer = ValidateChainIdsLayer::new( + self.config.required.l1_chain_id, + self.config.required.l2_chain_id, + ); + self.node.add_layer(layer); + Ok(self) + } + + fn add_consistency_checker_layer(mut self) -> anyhow::Result { + let max_batches_to_recheck = 10; // TODO (BFT-97): Make it a part of a proper EN config + let layer = ConsistencyCheckerLayer::new( + self.config.remote.diamond_proxy_addr, + max_batches_to_recheck, + self.config.optional.l1_batch_commit_data_generator_mode, + ); + self.node.add_layer(layer); + Ok(self) + } + + fn add_commitment_generator_layer(mut self) -> anyhow::Result { + let layer = + CommitmentGeneratorLayer::new(self.config.optional.l1_batch_commit_data_generator_mode) + .with_max_parallelism( + self.config + .experimental + .commitment_generator_max_parallelism, + ); + self.node.add_layer(layer); + Ok(self) + } + + fn add_batch_status_updater_layer(mut self) -> anyhow::Result { + let layer = BatchStatusUpdaterLayer; + self.node.add_layer(layer); + Ok(self) + } + + fn add_tree_data_fetcher_layer(mut self) -> anyhow::Result { + let layer = TreeDataFetcherLayer::new(self.config.remote.diamond_proxy_addr); + self.node.add_layer(layer); + Ok(self) + } + + fn add_sync_state_updater_layer(mut self) -> anyhow::Result { + // This layer may be used as a fallback for EN API if API server runs without the core component. + self.node.add_layer(SyncStateUpdaterLayer); + Ok(self) + } + + fn add_metadata_calculator_layer(mut self, with_tree_api: bool) -> anyhow::Result { + let metadata_calculator_config = MetadataCalculatorConfig { + db_path: self.config.required.merkle_tree_path.clone(), + max_open_files: self.config.optional.merkle_tree_max_open_files, + mode: MerkleTreeMode::Lightweight, + delay_interval: self.config.optional.merkle_tree_processing_delay(), + max_l1_batches_per_iter: self.config.optional.merkle_tree_max_l1_batches_per_iter, + multi_get_chunk_size: self.config.optional.merkle_tree_multi_get_chunk_size, + block_cache_capacity: self.config.optional.merkle_tree_block_cache_size(), + include_indices_and_filters_in_block_cache: self + .config + .optional + .merkle_tree_include_indices_and_filters_in_block_cache, + memtable_capacity: self.config.optional.merkle_tree_memtable_capacity(), + stalled_writes_timeout: self.config.optional.merkle_tree_stalled_writes_timeout(), + recovery: MetadataCalculatorRecoveryConfig { + desired_chunk_size: self.config.experimental.snapshots_recovery_tree_chunk_size, + parallel_persistence_buffer: self + .config + .experimental + .snapshots_recovery_tree_parallel_persistence_buffer, + }, + }; + + // Configure basic tree layer. + let mut layer = MetadataCalculatorLayer::new(metadata_calculator_config); + + // Add tree API if needed. + if with_tree_api { + let merkle_tree_api_config = MerkleTreeApiConfig { + port: self + .config + .tree_component + .api_port + .context("should contain tree api port")?, + }; + layer = layer.with_tree_api_config(merkle_tree_api_config); + } + + // Add tree pruning if needed. + if self.config.optional.pruning_enabled { + layer = layer.with_pruning_config(self.config.optional.pruning_removal_delay()); + } + + self.node.add_layer(layer); + Ok(self) + } + + fn add_tx_sender_layer(mut self) -> anyhow::Result { + let postgres_storage_config = PostgresStorageCachesConfig { + factory_deps_cache_size: self.config.optional.factory_deps_cache_size() as u64, + initial_writes_cache_size: self.config.optional.initial_writes_cache_size() as u64, + latest_values_cache_size: self.config.optional.latest_values_cache_size() as u64, + }; + let max_vm_concurrency = self.config.optional.vm_concurrency_limit; + let api_contracts = ApiContracts::load_from_disk_blocking(); // TODO (BFT-138): Allow to dynamically reload API contracts; + let tx_sender_layer = TxSenderLayer::new( + (&self.config).into(), + postgres_storage_config, + max_vm_concurrency, + api_contracts, + ) + .with_whitelisted_tokens_for_aa_cache(true); + + self.node.add_layer(TxSinkLayer::ProxySink); + self.node.add_layer(tx_sender_layer); + Ok(self) + } + + fn add_mempool_cache_layer(mut self) -> anyhow::Result { + self.node.add_layer(MempoolCacheLayer::new( + self.config.optional.mempool_cache_size, + self.config.optional.mempool_cache_update_interval(), + )); + Ok(self) + } + + fn add_tree_api_client_layer(mut self) -> anyhow::Result { + self.node.add_layer(TreeApiClientLayer::http( + self.config.api_component.tree_api_remote_url.clone(), + )); + Ok(self) + } + + fn add_main_node_fee_params_fetcher_layer(mut self) -> anyhow::Result { + self.node.add_layer(MainNodeFeeParamsFetcherLayer); + Ok(self) + } + + fn web3_api_optional_config(&self) -> Web3ServerOptionalConfig { + // The refresh interval should be several times lower than the pruning removal delay, so that + // soft-pruning will timely propagate to the API server. + let pruning_info_refresh_interval = self.config.optional.pruning_removal_delay() / 5; + + Web3ServerOptionalConfig { + namespaces: Some(self.config.optional.api_namespaces()), + filters_limit: Some(self.config.optional.filters_limit), + subscriptions_limit: Some(self.config.optional.filters_limit), + batch_request_size_limit: Some(self.config.optional.max_batch_request_size), + response_body_size_limit: Some(self.config.optional.max_response_body_size()), + with_extended_tracing: self.config.optional.extended_rpc_tracing, + pruning_info_refresh_interval: Some(pruning_info_refresh_interval), + websocket_requests_per_minute_limit: None, // To be set by WS server layer method if required. + replication_lag_limit: None, // TODO: Support replication lag limit + } + } + + fn add_http_web3_api_layer(mut self) -> anyhow::Result { + let optional_config = self.web3_api_optional_config(); + self.node.add_layer(Web3ServerLayer::http( + self.config.required.http_port, + (&self.config).into(), + optional_config, + )); + + Ok(self) + } + + fn add_ws_web3_api_layer(mut self) -> anyhow::Result { + // TODO: Support websocket requests per minute limit + let optional_config = self.web3_api_optional_config(); + self.node.add_layer(Web3ServerLayer::ws( + self.config.required.ws_port, + (&self.config).into(), + optional_config, + )); + + Ok(self) + } + + pub fn build(mut self, mut components: Vec) -> anyhow::Result { + // Add "base" layers + self = self + .add_sigint_handler_layer()? + .add_healthcheck_layer()? + .add_prometheus_exporter_layer()? + .add_pools_layer()? + .add_main_node_client_layer()? + .add_query_eth_client_layer()?; + + // Add preconditions for all the components. + self = self + .add_l1_batch_commitment_mode_validation_layer()? + .add_validate_chain_ids_layer()?; + + // Sort the components, so that the components they may depend on each other are added in the correct order. + components.sort_unstable_by_key(|component| match component { + // API consumes the resources provided by other layers (multiple ones), so it has to come the last. + Component::HttpApi | Component::WsApi => 1, + // Default priority. + _ => 0, + }); + + for component in &components { + match component { + Component::HttpApi => { + self = self + .add_sync_state_updater_layer()? + .add_mempool_cache_layer()? + .add_tree_api_client_layer()? + .add_main_node_fee_params_fetcher_layer()? + .add_tx_sender_layer()? + .add_http_web3_api_layer()?; + } + Component::WsApi => { + self = self + .add_sync_state_updater_layer()? + .add_mempool_cache_layer()? + .add_tree_api_client_layer()? + .add_main_node_fee_params_fetcher_layer()? + .add_tx_sender_layer()? + .add_ws_web3_api_layer()?; + } + Component::Tree => { + // Right now, distributed mode for EN is not fully supported, e.g. there are some + // issues with reorg detection and snapshot recovery. + // So we require the core component to be present, e.g. forcing the EN to run in a monolithic mode. + anyhow::ensure!( + components.contains(&Component::Core), + "Tree must run on the same machine as Core" + ); + let with_tree_api = components.contains(&Component::TreeApi); + self = self.add_metadata_calculator_layer(with_tree_api)?; + } + Component::TreeApi => { + anyhow::ensure!( + components.contains(&Component::Tree), + "Merkle tree API cannot be started without a tree component" + ); + // Do nothing, will be handled by the `Tree` component. + } + Component::TreeFetcher => { + self = self.add_tree_data_fetcher_layer()?; + } + Component::Core => { + // Core is a singleton & mandatory component, + // so until we have a dedicated component for "auxiliary" tasks, + // it's responsible for things like metrics. + self = self.add_postgres_metrics_layer()?; + + // Main tasks + self = self + .add_state_keeper_layer()? + .add_consensus_layer()? + .add_pruning_layer()? + .add_consistency_checker_layer()? + .add_commitment_generator_layer()? + .add_batch_status_updater_layer()?; + } + } + } + + Ok(self.node.build()?) + } +} diff --git a/core/bin/external_node/src/tests.rs b/core/bin/external_node/src/tests.rs index 6611ce145c4c..8966a7ac3f3b 100644 --- a/core/bin/external_node/src/tests.rs +++ b/core/bin/external_node/src/tests.rs @@ -34,6 +34,7 @@ fn block_details_base(hash: H256) -> api::BlockDetailsBase { executed_at: None, l1_gas_price: 0, l2_fair_gas_price: 0, + fair_pubdata_price: None, base_system_contracts_hashes: Default::default(), } } @@ -156,6 +157,7 @@ async fn external_node_basics(components_str: &'static str) { let opt = Cli { enable_consensus: false, components, + use_node_framework: false, }; let mut config = ExternalNodeConfig::mock(&temp_dir, &connection_pool); if opt.components.0.contains(&Component::TreeApi) { @@ -264,6 +266,7 @@ async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { let opt = Cli { enable_consensus: false, components: "core".parse().unwrap(), + use_node_framework: false, }; let mut config = ExternalNodeConfig::mock(&temp_dir, &connection_pool); if opt.components.0.contains(&Component::TreeApi) { diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 551683605479..096d5e783551 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -23,18 +23,20 @@ use zksync_node_framework::{ eth_watch::EthWatchLayer, healtcheck_server::HealthCheckLayer, house_keeper::HouseKeeperLayer, + l1_batch_commitment_mode_validation::L1BatchCommitmentModeValidationLayer, l1_gas::SequencerL1GasLayer, metadata_calculator::MetadataCalculatorLayer, object_store::ObjectStoreLayer, pk_signing_eth_client::PKSigningEthClientLayer, pools_layer::PoolsLayerBuilder, + postgres_metrics::PostgresMetricsLayer, prometheus_exporter::PrometheusExporterLayer, proof_data_handler::ProofDataHandlerLayer, query_eth_client::QueryEthClientLayer, sigint::SigintHandlerLayer, state_keeper::{ main_batch_executor::MainBatchExecutorLayer, mempool_io::MempoolIOLayer, - StateKeeperLayer, + output_handler::OutputHandlerLayer, RocksdbStorageOptions, StateKeeperLayer, }, tee_verifier_input_producer::TeeVerifierInputProducerLayer, vm_runner::protective_reads::ProtectiveReadsWriterLayer, @@ -111,6 +113,11 @@ impl MainNodeBuilder { Ok(self) } + fn add_postgres_metrics_layer(mut self) -> anyhow::Result { + self.node.add_layer(PostgresMetricsLayer); + Ok(self) + } + fn add_pk_signing_client_layer(mut self) -> anyhow::Result { let eth_config = try_load_config!(self.configs.eth); let wallets = try_load_config!(self.wallets.eth_sender); @@ -155,6 +162,15 @@ impl MainNodeBuilder { Ok(self) } + fn add_l1_batch_commitment_mode_validation_layer(mut self) -> anyhow::Result { + let layer = L1BatchCommitmentModeValidationLayer::new( + self.contracts_config.diamond_proxy_addr, + self.genesis_config.l1_batch_commit_data_generator_mode, + ); + self.node.add_layer(layer); + Ok(self) + } + fn add_metadata_calculator_layer(mut self, with_tree_api: bool) -> anyhow::Result { let merkle_tree_env_config = try_load_config!(self.configs.db_config).merkle_tree; let operations_manager_env_config = @@ -173,19 +189,37 @@ impl MainNodeBuilder { } fn add_state_keeper_layer(mut self) -> anyhow::Result { + // Bytecode compression is currently mandatory for the transactions processed by the sequencer. + const OPTIONAL_BYTECODE_COMPRESSION: bool = false; + let wallets = self.wallets.clone(); let sk_config = try_load_config!(self.configs.state_keeper_config); + let persistence_layer = OutputHandlerLayer::new( + self.contracts_config + .l2_shared_bridge_addr + .context("L2 shared bridge address")?, + sk_config.l2_block_seal_queue_capacity, + ); let mempool_io_layer = MempoolIOLayer::new( self.genesis_config.l2_chain_id, - self.contracts_config.clone(), sk_config.clone(), try_load_config!(self.configs.mempool_config), try_load_config!(wallets.state_keeper), ); let db_config = try_load_config!(self.configs.db_config); - let main_node_batch_executor_builder_layer = MainBatchExecutorLayer::new(sk_config); - let state_keeper_layer = StateKeeperLayer::new(db_config); + let main_node_batch_executor_builder_layer = + MainBatchExecutorLayer::new(sk_config.save_call_traces, OPTIONAL_BYTECODE_COMPRESSION); + + let rocksdb_options = RocksdbStorageOptions { + block_cache_capacity: db_config + .experimental + .state_keeper_db_block_cache_capacity(), + max_open_files: db_config.experimental.state_keeper_db_max_open_files, + }; + let state_keeper_layer = + StateKeeperLayer::new(db_config.state_keeper_db_path, rocksdb_options); self.node + .add_layer(persistence_layer) .add_layer(mempool_io_layer) .add_layer(main_node_batch_executor_builder_layer) .add_layer(state_keeper_layer); @@ -308,6 +342,7 @@ impl MainNodeBuilder { rpc_config.websocket_requests_per_minute_limit(), ), replication_lag_limit: circuit_breaker_config.replication_lag_limit(), + ..Default::default() }; self.node.add_layer(Web3ServerLayer::ws( rpc_config.ws_port, @@ -419,7 +454,8 @@ impl MainNodeBuilder { .add_healthcheck_layer()? .add_prometheus_exporter_layer()? .add_query_eth_client_layer()? - .add_sequencer_l1_gas_layer()?; + .add_sequencer_l1_gas_layer()? + .add_l1_batch_commitment_mode_validation_layer()?; // Sort the components, so that the components they may depend on each other are added in the correct order. components.sort_unstable_by_key(|component| match component { @@ -479,7 +515,9 @@ impl MainNodeBuilder { self = self.add_tee_verifier_input_producer_layer()?; } Component::Housekeeper => { - self = self.add_house_keeper_layer()?; + self = self + .add_house_keeper_layer()? + .add_postgres_metrics_layer()?; } Component::ProofDataHandler => { self = self.add_proof_data_handler_layer()?; diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index d8083c0f6a31..f0d12436e3b8 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -71,11 +71,11 @@ pub enum ProtocolVersionId { } impl ProtocolVersionId { - pub fn latest() -> Self { + pub const fn latest() -> Self { Self::Version24 } - pub fn next() -> Self { + pub const fn next() -> Self { Self::Version25 } diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index 1d741fac508c..5eb00dc63a4f 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -382,15 +382,3 @@ pub struct ProofCompressionJobInfo { pub time_taken: Option, pub picked_by: Option, } - -// This function corrects circuit IDs for the node witness generator. -// -// - Circuit IDs in the node witness generator are 2 higher than in other rounds. -// - The `EIP4844Repack` circuit (ID 255) is an exception and is set to 18. -pub fn correct_circuit_id(circuit_id: i16, aggregation_round: AggregationRound) -> u32 { - match (circuit_id, aggregation_round) { - (18, AggregationRound::NodeAggregation) => 255, - (circuit_id, AggregationRound::NodeAggregation) => (circuit_id as u32) - 2, - _ => circuit_id as u32, - } -} diff --git a/core/lib/contract_verifier/Cargo.toml b/core/lib/contract_verifier/Cargo.toml new file mode 100644 index 000000000000..ea84024cba98 --- /dev/null +++ b/core/lib/contract_verifier/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "zksync_contract_verifier_lib" +version = "0.1.0" +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +zksync_types.workspace = true +zksync_dal.workspace = true +zksync_env_config.workspace = true +zksync_config.workspace = true +zksync_contracts.workspace = true +zksync_queued_job_processor.workspace = true +zksync_utils.workspace = true + +anyhow.workspace = true +tokio = { workspace = true, features = ["full"] } +thiserror.workspace = true +chrono.workspace = true +serde_json.workspace = true +ethabi.workspace = true +vise.workspace = true +hex.workspace = true +serde = { workspace = true, features = ["derive"] } +lazy_static.workspace = true +tempfile.workspace = true +regex.workspace = true +tracing.workspace = true +semver.workspace = true diff --git a/core/bin/contract-verifier/src/error.rs b/core/lib/contract_verifier/src/error.rs similarity index 100% rename from core/bin/contract-verifier/src/error.rs rename to core/lib/contract_verifier/src/error.rs diff --git a/core/bin/contract-verifier/src/verifier.rs b/core/lib/contract_verifier/src/lib.rs similarity index 98% rename from core/bin/contract-verifier/src/verifier.rs rename to core/lib/contract_verifier/src/lib.rs index 8d5ba9fccfe2..224d4b292347 100644 --- a/core/bin/contract-verifier/src/verifier.rs +++ b/core/lib/contract_verifier/src/lib.rs @@ -30,6 +30,11 @@ use crate::{ zkvyper_utils::{ZkVyper, ZkVyperInput}, }; +pub mod error; +mod metrics; +mod zksolc_utils; +mod zkvyper_utils; + lazy_static! { static ref DEPLOYER_CONTRACT: Contract = zksync_contracts::deployer_contract(); } @@ -148,7 +153,11 @@ impl ContractVerifier { )); } - let zksolc = ZkSolc::new(zksolc_path, solc_path); + let zksolc = ZkSolc::new( + zksolc_path, + solc_path, + request.req.compiler_versions.zk_compiler_version(), + ); let output = time::timeout(config.compilation_timeout(), zksolc.async_compile(input)) .await @@ -274,7 +283,7 @@ impl ContractVerifier { Err(ContractVerifierError::MissingContract(contract_name)) } - async fn compile( + pub async fn compile( request: VerificationRequest, config: ContractVerifierConfig, ) -> Result { diff --git a/core/bin/contract-verifier/src/metrics.rs b/core/lib/contract_verifier/src/metrics.rs similarity index 100% rename from core/bin/contract-verifier/src/metrics.rs rename to core/lib/contract_verifier/src/metrics.rs diff --git a/core/bin/contract-verifier/src/zksolc_utils.rs b/core/lib/contract_verifier/src/zksolc_utils.rs similarity index 67% rename from core/bin/contract-verifier/src/zksolc_utils.rs rename to core/lib/contract_verifier/src/zksolc_utils.rs index 791d5ee5b6cc..08004632bcec 100644 --- a/core/bin/contract-verifier/src/zksolc_utils.rs +++ b/core/lib/contract_verifier/src/zksolc_utils.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, io::Write, path::PathBuf, process::Stdio}; +use semver::Version; use serde::{Deserialize, Serialize}; use crate::error::ContractVerifierError; @@ -74,28 +75,22 @@ impl Default for Optimizer { } } -impl Optimizer { - /// - /// A shortcut constructor. - /// - pub fn new(enabled: bool) -> Self { - Self { - enabled, - mode: None, - } - } -} - pub struct ZkSolc { zksolc_path: PathBuf, solc_path: PathBuf, + zksolc_version: String, } impl ZkSolc { - pub fn new(zksolc_path: impl Into, solc_path: impl Into) -> Self { + pub fn new( + zksolc_path: impl Into, + solc_path: impl Into, + zksolc_version: String, + ) -> Self { ZkSolc { zksolc_path: zksolc_path.into(), solc_path: solc_path.into(), + zksolc_version, } } @@ -105,26 +100,36 @@ impl ZkSolc { ) -> Result { use tokio::io::AsyncWriteExt; let mut command = tokio::process::Command::new(&self.zksolc_path); + command.stdout(Stdio::piped()).stderr(Stdio::piped()); + match &input { ZkSolcInput::StandardJson(input) => { - if input.settings.is_system { - command.arg("--system-mode"); - } - if input.settings.force_evmla { - command.arg("--force-evmla"); + if !self.is_post_1_5_0() { + if input.settings.is_system { + command.arg("--system-mode"); + } + if input.settings.force_evmla { + command.arg("--force-evmla"); + } } + + command.arg("--solc").arg(self.solc_path.to_str().unwrap()); } ZkSolcInput::YulSingleFile { is_system, .. } => { - if *is_system { - command.arg("--system-mode"); + if self.is_post_1_5_0() { + if *is_system { + command.arg("--enable-eravm-extensions"); + } else { + command.arg("--solc").arg(self.solc_path.to_str().unwrap()); + } + } else { + if *is_system { + command.arg("--system-mode"); + } + command.arg("--solc").arg(self.solc_path.to_str().unwrap()); } } } - command - .arg("--solc") - .arg(self.solc_path.to_str().unwrap()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()); match input { ZkSolcInput::StandardJson(input) => { let mut child = command @@ -193,4 +198,53 @@ impl ZkSolc { } } } + + pub fn is_post_1_5_0(&self) -> bool { + // Special case + if &self.zksolc_version == "vm-1.5.0-a167aa3" { + false + } else if let Some(version) = self.zksolc_version.strip_prefix("v") { + if let Ok(semver) = Version::parse(version) { + let target = Version::new(1, 5, 0); + semver >= target + } else { + true + } + } else { + true + } + } +} + +#[cfg(test)] +mod tests { + use crate::zksolc_utils::ZkSolc; + + #[test] + fn check_is_post_1_5_0() { + // Special case. + let mut zksolc = ZkSolc::new(".", ".", "vm-1.5.0-a167aa3".to_string()); + assert!(!zksolc.is_post_1_5_0(), "vm-1.5.0-a167aa3"); + + zksolc.zksolc_version = "v1.5.0".to_string(); + assert!(zksolc.is_post_1_5_0(), "v1.5.0"); + + zksolc.zksolc_version = "v1.5.1".to_string(); + assert!(zksolc.is_post_1_5_0(), "v1.5.1"); + + zksolc.zksolc_version = "v1.10.1".to_string(); + assert!(zksolc.is_post_1_5_0(), "v1.10.1"); + + zksolc.zksolc_version = "v2.0.0".to_string(); + assert!(zksolc.is_post_1_5_0(), "v2.0.0"); + + zksolc.zksolc_version = "v1.4.15".to_string(); + assert!(!zksolc.is_post_1_5_0(), "v1.4.15"); + + zksolc.zksolc_version = "v1.3.21".to_string(); + assert!(!zksolc.is_post_1_5_0(), "v1.3.21"); + + zksolc.zksolc_version = "v0.5.1".to_string(); + assert!(!zksolc.is_post_1_5_0(), "v0.5.1"); + } } diff --git a/core/bin/contract-verifier/src/zkvyper_utils.rs b/core/lib/contract_verifier/src/zkvyper_utils.rs similarity index 100% rename from core/bin/contract-verifier/src/zkvyper_utils.rs rename to core/lib/contract_verifier/src/zkvyper_utils.rs diff --git a/core/lib/dal/.sqlx/query-1074d0a2e4a4afb9a92f3822e133db7a71aca15698bafba051a8d9a91a4dbc76.json b/core/lib/dal/.sqlx/query-1074d0a2e4a4afb9a92f3822e133db7a71aca15698bafba051a8d9a91a4dbc76.json new file mode 100644 index 000000000000..13e4cdb9431d --- /dev/null +++ b/core/lib/dal/.sqlx/query-1074d0a2e4a4afb9a92f3822e133db7a71aca15698bafba051a8d9a91a4dbc76.json @@ -0,0 +1,112 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "root_hash?", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "commit_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "committed_at?", + "type_info": "Timestamp" + }, + { + "ordinal": 7, + "name": "prove_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 8, + "name": "proven_at?", + "type_info": "Timestamp" + }, + { + "ordinal": 9, + "name": "execute_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 10, + "name": "executed_at?", + "type_info": "Timestamp" + }, + { + "ordinal": 11, + "name": "l1_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 12, + "name": "l2_fair_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 13, + "name": "fair_pubdata_price", + "type_info": "Int8" + }, + { + "ordinal": 14, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 15, + "name": "default_aa_code_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + false, + true, + false, + true, + false, + true, + false, + false, + true, + true, + true + ] + }, + "hash": "1074d0a2e4a4afb9a92f3822e133db7a71aca15698bafba051a8d9a91a4dbc76" +} diff --git a/core/lib/dal/.sqlx/query-327974ef6d0c7edf56339d310ec60cd2f3d5223add676591cb0577e0a77403cb.json b/core/lib/dal/.sqlx/query-327974ef6d0c7edf56339d310ec60cd2f3d5223add676591cb0577e0a77403cb.json new file mode 100644 index 000000000000..7ecce5be1f35 --- /dev/null +++ b/core/lib/dal/.sqlx/query-327974ef6d0c7edf56339d310ec60cd2f3d5223add676591cb0577e0a77403cb.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM storage_logs USING UNNEST($1::bytea[], $2::BIGINT[], $3::INT[]) AS new_logs (hashed_key, miniblock_number, operation_number)\n WHERE\n storage_logs.hashed_key = new_logs.hashed_key\n AND (storage_logs.miniblock_number, storage_logs.operation_number) < (new_logs.miniblock_number, new_logs.operation_number)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "ByteaArray", + "Int8Array", + "Int4Array" + ] + }, + "nullable": [] + }, + "hash": "327974ef6d0c7edf56339d310ec60cd2f3d5223add676591cb0577e0a77403cb" +} diff --git a/core/lib/dal/.sqlx/query-362e20c4c2527f1585132ca85316ba34fd131682ee5414a9d0ae2cab349b2395.json b/core/lib/dal/.sqlx/query-362e20c4c2527f1585132ca85316ba34fd131682ee5414a9d0ae2cab349b2395.json deleted file mode 100644 index ef84a26a6e84..000000000000 --- a/core/lib/dal/.sqlx/query-362e20c4c2527f1585132ca85316ba34fd131682ee5414a9d0ae2cab349b2395.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM storage_logs\n WHERE\n storage_logs.miniblock_number < $1\n AND hashed_key IN (\n SELECT\n hashed_key\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "362e20c4c2527f1585132ca85316ba34fd131682ee5414a9d0ae2cab349b2395" -} diff --git a/core/lib/dal/.sqlx/query-44490ad52b8dbcd978a96677ffac5437752a4cf3ac92ec09b334089a8dc5b4ca.json b/core/lib/dal/.sqlx/query-44490ad52b8dbcd978a96677ffac5437752a4cf3ac92ec09b334089a8dc5b4ca.json deleted file mode 100644 index cb2d1b149ecf..000000000000 --- a/core/lib/dal/.sqlx/query-44490ad52b8dbcd978a96677ffac5437752a4cf3ac92ec09b334089a8dc5b4ca.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "root_hash?", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "commit_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 6, - "name": "committed_at?", - "type_info": "Timestamp" - }, - { - "ordinal": 7, - "name": "prove_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 8, - "name": "proven_at?", - "type_info": "Timestamp" - }, - { - "ordinal": 9, - "name": "execute_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 10, - "name": "executed_at?", - "type_info": "Timestamp" - }, - { - "ordinal": 11, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 12, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 13, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 14, - "name": "default_aa_code_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - true, - false, - true, - false, - true, - false, - true, - false, - false, - true, - true - ] - }, - "hash": "44490ad52b8dbcd978a96677ffac5437752a4cf3ac92ec09b334089a8dc5b4ca" -} diff --git a/core/lib/dal/.sqlx/query-fe06e06c04466429bb85709e6fe8dd6c2ad2793c06071f4a067dcc31306adebc.json b/core/lib/dal/.sqlx/query-45a968c6d667b13bbe9d895e7734fc05eaa158a6f38a87187d7f2c2068a0112a.json similarity index 60% rename from core/lib/dal/.sqlx/query-fe06e06c04466429bb85709e6fe8dd6c2ad2793c06071f4a067dcc31306adebc.json rename to core/lib/dal/.sqlx/query-45a968c6d667b13bbe9d895e7734fc05eaa158a6f38a87187d7f2c2068a0112a.json index 8f0065433012..36da129b5b77 100644 --- a/core/lib/dal/.sqlx/query-fe06e06c04466429bb85709e6fe8dd6c2ad2793c06071f4a067dcc31306adebc.json +++ b/core/lib/dal/.sqlx/query-45a968c6d667b13bbe9d895e7734fc05eaa158a6f38a87187d7f2c2068a0112a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n eth_txs_history (\n eth_tx_id,\n base_fee_per_gas,\n priority_fee_per_gas,\n tx_hash,\n signed_raw_tx,\n created_at,\n updated_at,\n blob_base_fee_per_gas\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW(), NOW(), $6)\n ON CONFLICT (tx_hash) DO NOTHING\n RETURNING\n id\n ", + "query": "\n INSERT INTO\n eth_txs_history (\n eth_tx_id,\n base_fee_per_gas,\n priority_fee_per_gas,\n tx_hash,\n signed_raw_tx,\n created_at,\n updated_at,\n blob_base_fee_per_gas,\n sent_at_block,\n sent_at\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7, NOW())\n ON CONFLICT (tx_hash) DO NOTHING\n RETURNING\n id\n ", "describe": { "columns": [ { @@ -16,12 +16,13 @@ "Int8", "Text", "Bytea", - "Int8" + "Int8", + "Int4" ] }, "nullable": [ false ] }, - "hash": "fe06e06c04466429bb85709e6fe8dd6c2ad2793c06071f4a067dcc31306adebc" + "hash": "45a968c6d667b13bbe9d895e7734fc05eaa158a6f38a87187d7f2c2068a0112a" } diff --git a/core/lib/dal/.sqlx/query-4cff62fad4a7044a824a60656050e8a100140875f95cd8cf5de3c6202d59a19c.json b/core/lib/dal/.sqlx/query-4cff62fad4a7044a824a60656050e8a100140875f95cd8cf5de3c6202d59a19c.json deleted file mode 100644 index 2c4d795f2f45..000000000000 --- a/core/lib/dal/.sqlx/query-4cff62fad4a7044a824a60656050e8a100140875f95cd8cf5de3c6202d59a19c.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM storage_logs USING (\n SELECT\n hashed_key,\n MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n GROUP BY\n hashed_key\n ) AS last_storage_logs\n WHERE\n storage_logs.miniblock_number BETWEEN $1 AND $2\n AND last_storage_logs.hashed_key = storage_logs.hashed_key\n AND (\n storage_logs.miniblock_number != last_storage_logs.op[1]\n OR storage_logs.operation_number != last_storage_logs.op[2]\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "4cff62fad4a7044a824a60656050e8a100140875f95cd8cf5de3c6202d59a19c" -} diff --git a/core/lib/dal/.sqlx/query-8c2f1f7bccc6af93714a74f732f94d8d631d56c5753f4e944f1cdf3e05b04a8c.json b/core/lib/dal/.sqlx/query-8c2f1f7bccc6af93714a74f732f94d8d631d56c5753f4e944f1cdf3e05b04a8c.json new file mode 100644 index 000000000000..ffb51e0dd865 --- /dev/null +++ b/core/lib/dal/.sqlx/query-8c2f1f7bccc6af93714a74f732f94d8d631d56c5753f4e944f1cdf3e05b04a8c.json @@ -0,0 +1,35 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT DISTINCT\n ON (hashed_key) hashed_key,\n miniblock_number,\n operation_number\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ORDER BY\n hashed_key,\n miniblock_number DESC,\n operation_number DESC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hashed_key", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "miniblock_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "operation_number", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "8c2f1f7bccc6af93714a74f732f94d8d631d56c5753f4e944f1cdf3e05b04a8c" +} diff --git a/core/lib/dal/.sqlx/query-6874b501c82e6062ab22622095070d67840b2484ea3a03ac49eb3d50ea153163.json b/core/lib/dal/.sqlx/query-ef70506e90e8add3b95940a7333f8222bd9fbe8ce82d8963f7da03fe6fcf9225.json similarity index 72% rename from core/lib/dal/.sqlx/query-6874b501c82e6062ab22622095070d67840b2484ea3a03ac49eb3d50ea153163.json rename to core/lib/dal/.sqlx/query-ef70506e90e8add3b95940a7333f8222bd9fbe8ce82d8963f7da03fe6fcf9225.json index 5ccda40f56fc..cf102b828aa8 100644 --- a/core/lib/dal/.sqlx/query-6874b501c82e6062ab22622095070d67840b2484ea3a03ac49eb3d50ea153163.json +++ b/core/lib/dal/.sqlx/query-ef70506e90e8add3b95940a7333f8222bd9fbe8ce82d8963f7da03fe6fcf9225.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -75,21 +75,26 @@ }, { "ordinal": 14, + "name": "fair_pubdata_price", + "type_info": "Int8" + }, + { + "ordinal": 15, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 15, + "ordinal": 16, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 16, + "ordinal": 17, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 17, + "ordinal": 18, "name": "fee_account_address", "type_info": "Bytea" } @@ -117,8 +122,9 @@ true, true, true, + true, false ] }, - "hash": "6874b501c82e6062ab22622095070d67840b2484ea3a03ac49eb3d50ea153163" + "hash": "ef70506e90e8add3b95940a7333f8222bd9fbe8ce82d8963f7da03fe6fcf9225" } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index 034f252f7e50..aa1d7097b9ba 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -49,5 +49,9 @@ strum = { workspace = true, features = ["derive"] } tracing.workspace = true chrono = { workspace = true, features = ["serde"] } +[dev-dependencies] +zksync_test_account.workspace = true +zksync_concurrency.workspace = true + [build-dependencies] zksync_protobuf_build.workspace = true diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index f7b88f94a673..1c7f912728cc 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -629,6 +629,7 @@ impl BlocksWeb3Dal<'_, '_> { execute_tx.confirmed_at AS "executed_at?", miniblocks.l1_gas_price, miniblocks.l2_fair_gas_price, + miniblocks.fair_pubdata_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, miniblocks.protocol_version, @@ -673,7 +674,8 @@ impl BlocksWeb3Dal<'_, '_> { mb AS ( SELECT l1_gas_price, - l2_fair_gas_price + l2_fair_gas_price, + fair_pubdata_price FROM miniblocks WHERE @@ -695,6 +697,7 @@ impl BlocksWeb3Dal<'_, '_> { execute_tx.confirmed_at AS "executed_at?", mb.l1_gas_price, mb.l2_fair_gas_price, + mb.fair_pubdata_price, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash FROM diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 8e1f246b657c..fac045ce2224 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -7,6 +7,7 @@ use anyhow::{anyhow, Context as _}; use zksync_consensus_roles::validator; use zksync_protobuf::{required, ProtoFmt, ProtoRepr}; use zksync_types::{ + abi, ethabi, fee::Fee, l1::{OpProcessingType, PriorityQueueType}, l2::TransactionType, @@ -38,38 +39,59 @@ pub struct Payload { impl ProtoFmt for Payload { type Proto = proto::Payload; - fn read(message: &Self::Proto) -> anyhow::Result { - let mut transactions = Vec::with_capacity(message.transactions.len()); - for (i, tx) in message.transactions.iter().enumerate() { - transactions.push(tx.read().with_context(|| format!("transactions[{i}]"))?) + fn read(r: &Self::Proto) -> anyhow::Result { + let protocol_version = required(&r.protocol_version) + .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) + .context("protocol_version")?; + let mut transactions = vec![]; + + match protocol_version { + v if v >= ProtocolVersionId::Version25 => { + anyhow::ensure!( + r.transactions.is_empty(), + "transactions should be empty in protocol_version {v}" + ); + for (i, tx) in r.transactions_v25.iter().enumerate() { + transactions.push( + tx.read() + .with_context(|| format!("transactions_v25[{i}]"))?, + ); + } + } + v => { + anyhow::ensure!( + r.transactions_v25.is_empty(), + "transactions_v25 should be empty in protocol_version {v}" + ); + for (i, tx) in r.transactions.iter().enumerate() { + transactions.push(tx.read().with_context(|| format!("transactions[{i}]"))?) + } + } } Ok(Self { - protocol_version: required(&message.protocol_version) - .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) - .context("protocol_version")?, - hash: required(&message.hash) + protocol_version, + hash: required(&r.hash) .and_then(|h| parse_h256(h)) .context("hash")?, l1_batch_number: L1BatchNumber( - *required(&message.l1_batch_number).context("l1_batch_number")?, + *required(&r.l1_batch_number).context("l1_batch_number")?, ), - timestamp: *required(&message.timestamp).context("timestamp")?, - l1_gas_price: *required(&message.l1_gas_price).context("l1_gas_price")?, - l2_fair_gas_price: *required(&message.l2_fair_gas_price) - .context("l2_fair_gas_price")?, - fair_pubdata_price: message.fair_pubdata_price, - virtual_blocks: *required(&message.virtual_blocks).context("virtual_blocks")?, - operator_address: required(&message.operator_address) + timestamp: *required(&r.timestamp).context("timestamp")?, + l1_gas_price: *required(&r.l1_gas_price).context("l1_gas_price")?, + l2_fair_gas_price: *required(&r.l2_fair_gas_price).context("l2_fair_gas_price")?, + fair_pubdata_price: r.fair_pubdata_price, + virtual_blocks: *required(&r.virtual_blocks).context("virtual_blocks")?, + operator_address: required(&r.operator_address) .and_then(|a| parse_h160(a)) .context("operator_address")?, transactions, - last_in_batch: *required(&message.last_in_batch).context("last_in_batch")?, + last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, }) } fn build(&self) -> Self::Proto { - Self::Proto { + let mut x = Self::Proto { protocol_version: Some((self.protocol_version as u16).into()), hash: Some(self.hash.as_bytes().into()), l1_batch_number: Some(self.l1_batch_number.0), @@ -80,13 +102,19 @@ impl ProtoFmt for Payload { virtual_blocks: Some(self.virtual_blocks), operator_address: Some(self.operator_address.as_bytes().into()), // Transactions are stored in execution order, therefore order is deterministic. - transactions: self - .transactions - .iter() - .map(proto::Transaction::build) - .collect(), + transactions: vec![], + transactions_v25: vec![], last_in_batch: Some(self.last_in_batch), + }; + match self.protocol_version { + v if v >= ProtocolVersionId::Version25 => { + x.transactions_v25 = self.transactions.iter().map(ProtoRepr::build).collect(); + } + _ => { + x.transactions = self.transactions.iter().map(ProtoRepr::build).collect(); + } } + x } } @@ -100,6 +128,50 @@ impl Payload { } } +impl ProtoRepr for proto::TransactionV25 { + type Type = Transaction; + + fn read(&self) -> anyhow::Result { + use proto::transaction_v25::T; + let tx = match required(&self.t)? { + T::L1(l1) => abi::Transaction::L1 { + tx: required(&l1.rlp) + .and_then(|x| { + let tokens = ethabi::decode(&[abi::L2CanonicalTransaction::schema()], x) + .context("ethabi::decode()")?; + // Unwrap is safe because `ethabi::decode` does the verification. + let tx = + abi::L2CanonicalTransaction::decode(tokens.into_iter().next().unwrap()) + .context("L2CanonicalTransaction::decode()")?; + Ok(tx) + }) + .context("rlp")? + .into(), + factory_deps: l1.factory_deps.clone(), + eth_block: 0, + }, + T::L2(l2) => abi::Transaction::L2(required(&l2.rlp).context("rlp")?.clone()), + }; + tx.try_into() + } + + fn build(tx: &Self::Type) -> Self { + let tx = abi::Transaction::try_from(tx.clone()).unwrap(); + use proto::transaction_v25::T; + Self { + t: Some(match tx { + abi::Transaction::L1 { + tx, factory_deps, .. + } => T::L1(proto::L1Transaction { + rlp: Some(ethabi::encode(&[tx.encode()])), + factory_deps, + }), + abi::Transaction::L2(tx) => T::L2(proto::L2Transaction { rlp: Some(tx) }), + }), + } + } +} + impl ProtoRepr for proto::Transaction { type Type = Transaction; diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index a53647611836..a7b5ea344152 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -13,10 +13,30 @@ message Payload { optional uint64 fair_pubdata_price = 11; // required since 1.4.1; gwei optional uint32 virtual_blocks = 6; // required optional bytes operator_address = 7; // required; H160 + // Set for protocol_version < 25. repeated Transaction transactions = 8; + // Set for protocol_version >= 25. + repeated TransactionV25 transactions_v25 = 12; optional bool last_in_batch = 10; // required } +message L1Transaction { + optional bytes rlp = 1; // required; RLP encoded L2CanonicalTransaction + repeated bytes factory_deps = 2; +} + +message L2Transaction { + optional bytes rlp = 1; // required; RLP encoded TransactionRequest +} + +message TransactionV25 { + // required + oneof t { + L1Transaction l1 = 1; + L2Transaction l2 = 2; + } +} + message Transaction { reserved 5; reserved "received_timestamp_ms"; diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index 694634f11a8c..4a69bebdc362 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -1,21 +1,75 @@ use std::fmt::Debug; +use rand::Rng; +use zksync_concurrency::ctx; use zksync_protobuf::{ repr::{decode, encode}, + testonly::test_encode, ProtoRepr, }; -use zksync_types::{web3::Bytes, Execute, ExecuteTransactionCommon, Transaction}; +use zksync_test_account::Account; +use zksync_types::{ + web3::Bytes, Execute, ExecuteTransactionCommon, L1BatchNumber, ProtocolVersionId, Transaction, +}; + +use super::{proto, Payload}; +use crate::tests::mock_protocol_upgrade_transaction; + +fn execute(rng: &mut impl Rng) -> Execute { + Execute { + contract_address: rng.gen(), + value: rng.gen::().into(), + calldata: (0..10 * 32).map(|_| rng.gen()).collect(), + // TODO: find a way to generate valid random bytecode. + factory_deps: vec![], + } +} -use crate::tests::{mock_l1_execute, mock_l2_transaction, mock_protocol_upgrade_transaction}; +fn l1_transaction(rng: &mut impl Rng) -> Transaction { + Account::random_using(rng).get_l1_tx(execute(rng), rng.gen()) +} + +fn l2_transaction(rng: &mut impl Rng) -> Transaction { + Account::random_using(rng).get_l2_tx_for_execute(execute(rng), None) +} + +fn payload(rng: &mut impl Rng, protocol_version: ProtocolVersionId) -> Payload { + Payload { + protocol_version, + hash: rng.gen(), + l1_batch_number: L1BatchNumber(rng.gen()), + timestamp: rng.gen(), + l1_gas_price: rng.gen(), + l2_fair_gas_price: rng.gen(), + fair_pubdata_price: Some(rng.gen()), + virtual_blocks: rng.gen(), + operator_address: rng.gen(), + transactions: (0..10) + .map(|_| match rng.gen() { + true => l1_transaction(rng), + false => l2_transaction(rng), + }) + .collect(), + last_in_batch: rng.gen(), + } +} /// Tests struct <-> proto struct conversions. #[test] fn test_encoding() { - encode_decode::(mock_l1_execute().into()); - encode_decode::(mock_l2_transaction().into()); - encode_decode::( + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + encode_decode::(l1_transaction(rng)); + encode_decode::(l2_transaction(rng)); + encode_decode::(l1_transaction(rng)); + encode_decode::(l2_transaction(rng)); + encode_decode::( mock_protocol_upgrade_transaction().into(), ); + let p = payload(rng, ProtocolVersionId::Version24); + test_encode(rng, &p); + let p = payload(rng, ProtocolVersionId::Version25); + test_encode(rng, &p); } fn encode_decode(msg: P::Type) diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index ad1e910af12e..d32ed082131e 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -221,6 +221,7 @@ impl EthSenderDal<'_, '_> { Ok(eth_tx.into()) } + #[allow(clippy::too_many_arguments)] pub async fn insert_tx_history( &mut self, eth_tx_id: u32, @@ -229,6 +230,7 @@ impl EthSenderDal<'_, '_> { blob_base_fee_per_gas: Option, tx_hash: H256, raw_signed_tx: &[u8], + sent_at_block: u32, ) -> anyhow::Result> { let priority_fee_per_gas = i64::try_from(priority_fee_per_gas).context("Can't convert u64 to i64")?; @@ -247,10 +249,12 @@ impl EthSenderDal<'_, '_> { signed_raw_tx, created_at, updated_at, - blob_base_fee_per_gas + blob_base_fee_per_gas, + sent_at_block, + sent_at ) VALUES - ($1, $2, $3, $4, $5, NOW(), NOW(), $6) + ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7, NOW()) ON CONFLICT (tx_hash) DO NOTHING RETURNING id @@ -261,6 +265,7 @@ impl EthSenderDal<'_, '_> { tx_hash, raw_signed_tx, blob_base_fee_per_gas.map(|v| v as i64), + sent_at_block as i32 ) .fetch_optional(self.storage.conn()) .await? diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index de6d1d9f06cd..95780e667784 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -269,6 +269,8 @@ pub(crate) struct StorageBlockDetails { pub l1_gas_price: i64, // L2 gas price assumed in the corresponding batch pub l2_fair_gas_price: i64, + // Cost of publishing 1 byte (in wei). + pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, pub fee_account_address: Vec, @@ -312,6 +314,7 @@ impl From for api::BlockDetails { .map(|executed_at| DateTime::::from_naive_utc_and_offset(executed_at, Utc)), l1_gas_price: details.l1_gas_price as u64, l2_fair_gas_price: details.l2_fair_gas_price as u64, + fair_pubdata_price: details.fair_pubdata_price.map(|x| x as u64), base_system_contracts_hashes: convert_base_system_contracts_hashes( details.bootloader_code_hash, details.default_aa_code_hash, @@ -344,6 +347,7 @@ pub(crate) struct StorageL1BatchDetails { pub executed_at: Option, pub l1_gas_price: i64, pub l2_fair_gas_price: i64, + pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, } @@ -385,6 +389,7 @@ impl From for api::L1BatchDetails { .map(|executed_at| DateTime::::from_naive_utc_and_offset(executed_at, Utc)), l1_gas_price: details.l1_gas_price as u64, l2_fair_gas_price: details.l2_fair_gas_price as u64, + fair_pubdata_price: details.fair_pubdata_price.map(|x| x as u64), base_system_contracts_hashes: convert_base_system_contracts_hashes( details.bootloader_code_hash, details.default_aa_code_hash, diff --git a/core/lib/dal/src/pruning_dal/mod.rs b/core/lib/dal/src/pruning_dal/mod.rs index 9a5356202aee..16f85f2e0fad 100644 --- a/core/lib/dal/src/pruning_dal/mod.rs +++ b/core/lib/dal/src/pruning_dal/mod.rs @@ -1,5 +1,6 @@ use std::ops; +use itertools::Itertools; use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; use zksync_types::{L1BatchNumber, L2BlockNumber}; @@ -27,8 +28,8 @@ pub struct PruningInfo { pub struct HardPruningStats { pub deleted_l1_batches: u64, pub deleted_l2_blocks: u64, - pub deleted_storage_logs_from_past_batches: u64, - pub deleted_storage_logs_from_pruned_batches: u64, + pub overwriting_logs: u64, + pub deleted_storage_logs: u64, pub deleted_events: u64, pub deleted_call_traces: u64, pub deleted_l2_to_l1_logs: u64, @@ -41,6 +42,14 @@ enum PruneType { Hard, } +/// Raw database presentation of a primary key in the `miniblocks` table. +#[derive(Debug)] +struct StorageLogPrimaryKey { + hashed_key: Vec, + miniblock_number: i64, + operation_number: i32, +} + impl PruningDal<'_, '_> { pub async fn get_pruning_info(&mut self) -> DalResult { let pruning_info = sqlx::query!( @@ -174,17 +183,18 @@ impl PruningDal<'_, '_> { self.clear_transaction_fields(first_l2_block_to_prune..=last_l2_block_to_prune) .await?; - // The deleting of logs is split into two queries to make it faster, - // only the first query has to go through all previous logs - // and the query optimizer should be happy with it - let deleted_storage_logs_from_past_batches = self - .prune_storage_logs_from_past_l2_blocks( - first_l2_block_to_prune..=last_l2_block_to_prune, - ) - .await?; - let deleted_storage_logs_from_pruned_batches = self - .prune_storage_logs_in_range(first_l2_block_to_prune..=last_l2_block_to_prune) + // Storage log pruning is designed to use deterministic indexes and thus have predictable performance. + // + // - `get_pks_for_latest_logs` is guaranteed to use the block number index (that's the only WHERE condition), + // and the supplied range of blocks should be reasonably small. + // - `prune_storage_logs` is virtually guaranteed to use the primary key index since the query removes ranges w.r.t. this index. + // + // Combining these two queries or using more sophisticated queries leads to fluctuating performance due to + // unpredictable indexes being used. + let new_logs = self + .get_pks_for_latest_logs(first_l2_block_to_prune..=last_l2_block_to_prune) .await?; + let deleted_storage_logs = self.prune_storage_logs(&new_logs).await?; let deleted_l1_batches = self.delete_l1_batches(last_l1_batch_to_prune).await?; let deleted_l2_blocks = self.delete_l2_blocks(last_l2_block_to_prune).await?; @@ -194,8 +204,8 @@ impl PruningDal<'_, '_> { deleted_events, deleted_l2_to_l1_logs, deleted_call_traces, - deleted_storage_logs_from_past_batches, - deleted_storage_logs_from_pruned_batches, + overwriting_logs: new_logs.len() as u64, + deleted_storage_logs, } } else { HardPruningStats::default() @@ -314,65 +324,62 @@ impl PruningDal<'_, '_> { Ok(execution_result.rows_affected()) } - async fn prune_storage_logs_from_past_l2_blocks( + /// Gets primary keys for all latest logs in the specified L2 block range. + async fn get_pks_for_latest_logs( &mut self, l2_blocks_to_prune: ops::RangeInclusive, - ) -> DalResult { - let execution_result = sqlx::query!( + ) -> DalResult> { + sqlx::query_as!( + StorageLogPrimaryKey, r#" - DELETE FROM storage_logs + SELECT DISTINCT + ON (hashed_key) hashed_key, + miniblock_number, + operation_number + FROM + storage_logs WHERE - storage_logs.miniblock_number < $1 - AND hashed_key IN ( - SELECT - hashed_key - FROM - storage_logs - WHERE - miniblock_number BETWEEN $1 AND $2 - ) + miniblock_number BETWEEN $1 AND $2 + ORDER BY + hashed_key, + miniblock_number DESC, + operation_number DESC "#, i64::from(l2_blocks_to_prune.start().0), i64::from(l2_blocks_to_prune.end().0) ) - .instrument("hard_prune_batches_range#prune_storage_logs_from_past_l2_blocks") + .instrument("hard_prune_batches_range#get_latest_logs") .with_arg("l2_blocks_to_prune", &l2_blocks_to_prune) .report_latency() - .execute(self.storage) - .await?; - Ok(execution_result.rows_affected()) + .fetch_all(self.storage) + .await } - async fn prune_storage_logs_in_range( - &mut self, - l2_blocks_to_prune: ops::RangeInclusive, - ) -> DalResult { + /// Removes storage logs overwritten by the specified new logs. + async fn prune_storage_logs(&mut self, new_logs: &[StorageLogPrimaryKey]) -> DalResult { + let (hashed_keys, block_numbers, operation_numbers): (Vec<_>, Vec<_>, Vec<_>) = new_logs + .iter() + .map(|log| { + ( + log.hashed_key.as_slice(), + log.miniblock_number, + log.operation_number, + ) + }) + .multiunzip(); let execution_result = sqlx::query!( r#" - DELETE FROM storage_logs USING ( - SELECT - hashed_key, - MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op - FROM - storage_logs - WHERE - miniblock_number BETWEEN $1 AND $2 - GROUP BY - hashed_key - ) AS last_storage_logs + DELETE FROM storage_logs USING UNNEST($1::bytea[], $2::BIGINT[], $3::INT[]) AS new_logs (hashed_key, miniblock_number, operation_number) WHERE - storage_logs.miniblock_number BETWEEN $1 AND $2 - AND last_storage_logs.hashed_key = storage_logs.hashed_key - AND ( - storage_logs.miniblock_number != last_storage_logs.op[1] - OR storage_logs.operation_number != last_storage_logs.op[2] - ) + storage_logs.hashed_key = new_logs.hashed_key + AND (storage_logs.miniblock_number, storage_logs.operation_number) < (new_logs.miniblock_number, new_logs.operation_number) "#, - i64::from(l2_blocks_to_prune.start().0), - i64::from(l2_blocks_to_prune.end().0) + &hashed_keys as &[&[u8]], + &block_numbers, + &operation_numbers ) - .instrument("hard_prune_batches_range#prune_storage_logs_in_range") - .with_arg("l2_blocks_to_prune", &l2_blocks_to_prune) + .instrument("hard_prune_batches_range#prune_storage_logs") + .with_arg("new_logs.len", &new_logs.len()) .report_latency() .execute(self.storage) .await?; diff --git a/core/lib/dal/src/pruning_dal/tests.rs b/core/lib/dal/src/pruning_dal/tests.rs index 4b2c6befcfaa..2670fe550c56 100644 --- a/core/lib/dal/src/pruning_dal/tests.rs +++ b/core/lib/dal/src/pruning_dal/tests.rs @@ -377,8 +377,7 @@ async fn storage_logs_pruning_works_correctly() { &[random_storage_log(2, 3), random_storage_log(3, 4)], ); assert_l2_block_storage_logs_equal(L2BlockNumber(1), &actual_logs, &[random_storage_log(1, 1)]); - assert_eq!(stats.deleted_storage_logs_from_past_batches, 0); - assert_eq!(stats.deleted_storage_logs_from_pruned_batches, 1); + assert_eq!(stats.deleted_storage_logs, 1); let stats = transaction .pruning_dal() @@ -402,8 +401,7 @@ async fn storage_logs_pruning_works_correctly() { &actual_logs, &[random_storage_log(5, 7)], ); - assert_eq!(stats.deleted_storage_logs_from_past_batches, 1); - assert_eq!(stats.deleted_storage_logs_from_pruned_batches, 1); + assert_eq!(stats.deleted_storage_logs, 2); } #[tokio::test] diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs index 85b18e203ce2..7c7dc6995d14 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs @@ -30,7 +30,7 @@ pub struct DecommitterOracle { /// Stores pages of memory where certain code hashes have already been decommitted. /// It is expected that they all are present in the DB. // `decommitted_code_hashes` history is necessary - pub decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, + pub decommitted_code_hashes: HistoryRecorder>, HistoryEnabled>, /// Stores history of decommitment requests. decommitment_requests: HistoryRecorder, H>, } @@ -89,7 +89,7 @@ impl DecommitterOracle { pub fn get_decommitted_code_hashes_with_history( &self, - ) -> &HistoryRecorder, HistoryEnabled> { + ) -> &HistoryRecorder>, HistoryEnabled> { &self.decommitted_code_hashes } @@ -108,7 +108,7 @@ impl DecommitterOracle { .map(|(_, value)| value.len() * std::mem::size_of::()) .sum::(); let decommitted_code_hashes_size = - self.decommitted_code_hashes.inner().len() * std::mem::size_of::<(U256, u32)>(); + self.decommitted_code_hashes.inner().len() * std::mem::size_of::<(U256, Option)>(); known_bytecodes_size + decommitted_code_hashes_size } @@ -132,7 +132,7 @@ impl DecommitterOracle { ); let decommitted_code_hashes_size = self.decommitted_code_hashes.borrow_history(|h| h.len(), 0) - * std::mem::size_of::< as WithHistory>::HistoryRecord>(); + * std::mem::size_of::<> as WithHistory>::HistoryRecord>(); known_bytecodes_stack_size + known_bytecodes_heap_size + decommitted_code_hashes_size } @@ -172,6 +172,7 @@ impl DecommittmentProcess .inner() .get(&stored_hash) .copied() + .flatten() { partial_query.is_fresh = false; partial_query.memory_page = MemoryPage(memory_page); @@ -179,6 +180,8 @@ impl DecommittmentProcess Ok(partial_query) } else { partial_query.is_fresh = true; + self.decommitted_code_hashes + .insert(stored_hash, None, partial_query.timestamp); Ok(partial_query) } @@ -216,7 +219,7 @@ impl DecommittmentProcess rw_flag: true, }; self.decommitted_code_hashes - .insert(stored_hash, page_to_use.0, timestamp); + .insert(stored_hash, Some(page_to_use.0), timestamp); // Copy the bytecode (that is stored in 'values' Vec) into the memory page. if B { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index 7bc08b6fb495..1798c700ea2d 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -1,6 +1,14 @@ -use std::collections::{HashMap, HashSet}; +use std::{ + collections::{HashMap, HashSet}, + str::FromStr, +}; use itertools::Itertools; +use zk_evm_1_5_0::{ + abstractions::DecommittmentProcessor, + aux_structures::{DecommittmentQuery, MemoryPage, Timestamp}, + zkevm_opcode_defs::{VersionedHashHeader, VersionedHashNormalizedPreimage}, +}; use zksync_state::WriteStorage; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_test_account::Account; @@ -91,6 +99,47 @@ fn test_get_used_contracts() { } } +#[test] +fn test_contract_is_used_right_after_prepare_to_decommit() { + let mut vm = VmTesterBuilder::new(HistoryDisabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + assert!(vm.vm.get_used_contracts().is_empty()); + + let bytecode_hash = + U256::from_str("0x100067ff3124f394104ab03481f7923f0bc4029a2aa9d41cc1d848c81257185") + .unwrap(); + vm.vm + .state + .decommittment_processor + .populate(vec![(bytecode_hash, vec![])], Timestamp(0)); + + let header = hex::decode("0100067f").unwrap(); + let normalized_preimage = + hex::decode("f3124f394104ab03481f7923f0bc4029a2aa9d41cc1d848c81257185").unwrap(); + vm.vm + .state + .decommittment_processor + .prepare_to_decommit( + 0, + DecommittmentQuery { + header: VersionedHashHeader(header.try_into().unwrap()), + normalized_preimage: VersionedHashNormalizedPreimage( + normalized_preimage.try_into().unwrap(), + ), + timestamp: Timestamp(0), + memory_page: MemoryPage(0), + decommitted_length: 0, + is_fresh: false, + }, + ) + .unwrap(); + + assert_eq!(vm.vm.get_used_contracts(), vec![bytecode_hash]); +} + fn known_bytecodes_without_aa_code( vm: &Vm, ) -> HashMap> { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs index 102822351366..2a6fead8cf9c 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs @@ -43,7 +43,7 @@ pub(crate) struct DecommitterTestInnerState { /// so we just compare the modified keys. This is reasonable enough. pub(crate) modified_storage_keys: ModifiedKeysMap, pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, + pub(crate) decommitted_code_hashes: HistoryRecorder>, HistoryEnabled>, } #[derive(Clone, PartialEq, Debug)] diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs index 7c3012d03f11..4d5dc0b13273 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs @@ -177,20 +177,21 @@ impl CircuitsTracer { .decommitted_code_hashes .history(); for (_, history_event) in &history[last_decommitment_history_entry_checked..] { - // We assume that only insertions may happen during a single VM inspection. - assert!(history_event.value.is_none()); - let bytecode_len = state - .decommittment_processor - .known_bytecodes - .inner() - .get(&history_event.key) - .expect("Bytecode must be known at this point") - .len(); + // We update cycles once per bytecode when it is actually decommitted. + if history_event.value.is_some() { + let bytecode_len = state + .decommittment_processor + .known_bytecodes + .inner() + .get(&history_event.key) + .expect("Bytecode must be known at this point") + .len(); - // Each cycle of `CodeDecommitter` processes 2 words. - // If the number of words in bytecode is odd, then number of cycles must be rounded up. - let decommitter_cycles_used = (bytecode_len + 1) / 2; - self.statistics.code_decommitter_cycles += decommitter_cycles_used as u32; + // Each cycle of `CodeDecommitter` processes 2 words. + // If the number of words in bytecode is odd, then number of cycles must be rounded up. + let decommitter_cycles_used = (bytecode_len + 1) / 2; + self.statistics.code_decommitter_cycles += decommitter_cycles_used as u32; + } } self.last_decommitment_history_entry_checked = Some(history.len()); } diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs index b48277a88e52..e683e0cae00f 100644 --- a/core/lib/snapshots_applier/src/tests/utils.rs +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -156,6 +156,7 @@ fn block_details_base(hash: H256) -> api::BlockDetailsBase { executed_at: None, l1_gas_price: 0, l2_fair_gas_price: 0, + fair_pubdata_price: None, base_system_contracts_hashes: Default::default(), } } diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index ff7006f1c6cc..ce21a754c7aa 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -761,6 +761,8 @@ pub struct BlockDetailsBase { pub executed_at: Option>, pub l1_gas_price: u64, pub l2_fair_gas_price: u64, + // Cost of publishing one byte (in wei). + pub fair_pubdata_price: Option, pub base_system_contracts_hashes: BaseSystemContractsHashes, } diff --git a/core/lib/types/src/contract_verification_api.rs b/core/lib/types/src/contract_verification_api.rs index 033bb9dc9f36..588de3cb675e 100644 --- a/core/lib/types/src/contract_verification_api.rs +++ b/core/lib/types/src/contract_verification_api.rs @@ -140,7 +140,7 @@ pub struct VerificationIncomingRequest { pub optimizer_mode: Option, #[serde(default)] pub constructor_arguments: Bytes, - #[serde(default)] + #[serde(default, alias = "enableEraVMExtensions")] pub is_system: bool, #[serde(default)] pub force_evmla: bool, diff --git a/core/node/api_server/src/tx_sender/proxy.rs b/core/node/api_server/src/tx_sender/proxy.rs index a1fa77d2f1b2..52fcc8a1a8b0 100644 --- a/core/node/api_server/src/tx_sender/proxy.rs +++ b/core/node/api_server/src/tx_sender/proxy.rs @@ -1,6 +1,5 @@ use std::{ collections::{BTreeSet, HashMap, HashSet}, - future::Future, sync::Arc, time::Duration, }; @@ -282,13 +281,24 @@ impl TxProxy { pending_nonce } - pub fn run_account_nonce_sweeper( + pub fn account_nonce_sweeper_task( &self, pool: ConnectionPool, - stop_receiver: watch::Receiver, - ) -> impl Future> { - let tx_cache = self.tx_cache.clone(); - tx_cache.run_updates(pool, stop_receiver) + ) -> AccountNonceSweeperTask { + let cache = self.tx_cache.clone(); + AccountNonceSweeperTask { cache, pool } + } +} + +#[derive(Debug)] +pub struct AccountNonceSweeperTask { + cache: TxCache, + pool: ConnectionPool, +} + +impl AccountNonceSweeperTask { + pub async fn run(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { + self.cache.run_updates(self.pool, stop_receiver).await } } diff --git a/core/node/consensus/Cargo.toml b/core/node/consensus/Cargo.toml index b22fde34e7c6..5fc95b6c91f3 100644 --- a/core/node/consensus/Cargo.toml +++ b/core/node/consensus/Cargo.toml @@ -43,6 +43,7 @@ zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true zksync_node_api_server.workspace = true zksync_test_account.workspace = true +zksync_contracts.workspace= true tokio.workspace = true test-casing.workspace = true diff --git a/core/node/consensus/src/storage/mod.rs b/core/node/consensus/src/storage/mod.rs index cf45f89ad11e..bc8a0b8b8409 100644 --- a/core/node/consensus/src/storage/mod.rs +++ b/core/node/consensus/src/storage/mod.rs @@ -18,7 +18,7 @@ use zksync_types::{commitment::L1BatchWithMetadata, L1BatchNumber, L2BlockNumber use super::config; #[cfg(test)] -mod testonly; +pub(crate) mod testonly; /// Context-aware `zksync_dal::ConnectionPool` wrapper. #[derive(Debug, Clone)] diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index ccac1f7e45a9..f5f30021b7c4 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -3,13 +3,49 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_roles::validator; -use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; +use zksync_contracts::BaseSystemContracts; +use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{recover, snapshot, Snapshot}; -use zksync_types::{commitment::L1BatchWithMetadata, L1BatchNumber}; +use zksync_types::{ + commitment::L1BatchWithMetadata, protocol_version::ProtocolSemanticVersion, + system_contracts::get_system_smart_contracts, L1BatchNumber, L2BlockNumber, ProtocolVersionId, +}; use super::ConnectionPool; +pub(crate) fn mock_genesis_params(protocol_version: ProtocolVersionId) -> GenesisParams { + let mut cfg = mock_genesis_config(); + cfg.protocol_version = Some(ProtocolSemanticVersion { + minor: protocol_version, + patch: 0.into(), + }); + GenesisParams::from_genesis_config( + cfg, + BaseSystemContracts::load_from_disk(), + get_system_smart_contracts(), + ) + .unwrap() +} + impl ConnectionPool { + pub(crate) async fn test( + from_snapshot: bool, + protocol_version: ProtocolVersionId, + ) -> ConnectionPool { + match from_snapshot { + true => { + ConnectionPool::from_snapshot(Snapshot::make( + L1BatchNumber(23), + L2BlockNumber(87), + &[], + mock_genesis_params(protocol_version), + )) + .await + } + false => ConnectionPool::from_genesis(protocol_version).await, + } + } + /// Waits for the `number` L2 block to have a certificate. pub async fn wait_for_certificate( &self, @@ -60,11 +96,11 @@ impl ConnectionPool { } /// Constructs a new db initialized with genesis state. - pub(crate) async fn from_genesis() -> Self { + pub(crate) async fn from_genesis(protocol_version: ProtocolVersionId) -> Self { let pool = zksync_dal::ConnectionPool::test_pool().await; { let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) + insert_genesis_batch(&mut storage, &mock_genesis_params(protocol_version)) .await .unwrap(); } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 5baa1c7b1eed..ce16efed2225 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -54,6 +54,7 @@ use crate::{ /// Fake StateKeeper for tests. pub(super) struct StateKeeper { + protocol_version: ProtocolVersionId, // Batch of the `last_block`. last_batch: L1BatchNumber, last_block: L2BlockNumber, @@ -130,6 +131,16 @@ impl StateKeeper { pool: ConnectionPool, ) -> ctx::Result<(Self, StateKeeperRunner)> { let mut conn = pool.connection(ctx).await.wrap("connection()")?; + // We fetch the last protocol version from storage. + // `protocol_version_id_by_timestamp` does a wrapping conversion to `i64`. + let protocol_version = ctx + .wait( + conn.0 + .protocol_versions_dal() + .protocol_version_id_by_timestamp(i64::MAX.try_into().unwrap()), + ) + .await? + .context("protocol_version_id_by_timestamp()")?; let cursor = ctx .wait(IoCursor::for_fetcher(&mut conn.0)) .await? @@ -164,6 +175,7 @@ impl StateKeeper { let account = Account::random(); Ok(( Self { + protocol_version, last_batch: cursor.l1_batch, last_block: cursor.next_l2_block - 1, last_timestamp: cursor.prev_l2_block_timestamp, @@ -196,7 +208,7 @@ impl StateKeeper { self.batch_sealed = false; SyncAction::OpenBatch { params: L1BatchParams { - protocol_version: ProtocolVersionId::latest(), + protocol_version: self.protocol_version, validation_computational_gas_limit: u32::MAX, operator_address: GenesisParams::mock().config().fee_account, fee_input: BatchFeeInput::L1Pegged(L1PeggedBatchFeeModelInput { diff --git a/core/node/consensus/src/tests.rs b/core/node/consensus/src/tests.rs index 79784f0fbb51..b16c66e478bb 100644 --- a/core/node/consensus/src/tests.rs +++ b/core/node/consensus/src/tests.rs @@ -1,6 +1,6 @@ #![allow(unused)] use anyhow::Context as _; -use test_casing::test_casing; +use test_casing::{test_casing, Product}; use tracing::Instrument as _; use zksync_concurrency::{ctx, scope}; use zksync_config::configs::consensus::{ValidatorPublicKey, WeightedValidator}; @@ -12,26 +12,20 @@ use zksync_consensus_roles::{ }; use zksync_dal::CoreDal; use zksync_node_test_utils::Snapshot; -use zksync_types::{L1BatchNumber, L2BlockNumber}; +use zksync_types::{L1BatchNumber, L2BlockNumber, ProtocolVersionId}; use super::*; -async fn new_pool(from_snapshot: bool) -> ConnectionPool { - match from_snapshot { - true => { - ConnectionPool::from_snapshot(Snapshot::make(L1BatchNumber(23), L2BlockNumber(87), &[])) - .await - } - false => ConnectionPool::from_genesis().await, - } -} +const VERSIONS: [ProtocolVersionId; 2] = [ProtocolVersionId::latest(), ProtocolVersionId::next()]; +const FROM_SNAPSHOT: [bool; 2] = [true, false]; +#[test_casing(2, VERSIONS)] #[tokio::test(flavor = "multi_thread")] -async fn test_validator_block_store() { +async fn test_validator_block_store(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let pool = new_pool(false).await; + let pool = ConnectionPool::test(false, version).await; // Fill storage with unsigned L2 blocks. // Fetch a suffix of blocks that we will generate (fake) certs for. @@ -91,9 +85,9 @@ async fn test_validator_block_store() { // In the current implementation, consensus certificates are created asynchronously // for the L2 blocks constructed by the StateKeeper. This means that consensus actor // is effectively just back filling the consensus certificates for the L2 blocks in storage. -#[test_casing(2, [false, true])] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test(flavor = "multi_thread")] -async fn test_validator(from_snapshot: bool) { +async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); @@ -102,7 +96,7 @@ async fn test_validator(from_snapshot: bool) { scope::run!(ctx, |ctx, s| async { tracing::info!("Start state keeper."); - let pool = new_pool(from_snapshot).await; + let pool = ConnectionPool::test(from_snapshot,version).await; let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; s.spawn_bg(runner.run(ctx)); @@ -155,8 +149,9 @@ async fn test_validator(from_snapshot: bool) { } // Test running a validator node and 2 full nodes recovered from different snapshots. +#[test_casing(2, VERSIONS)] #[tokio::test(flavor = "multi_thread")] -async fn test_nodes_from_various_snapshots() { +async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); @@ -165,7 +160,7 @@ async fn test_nodes_from_various_snapshots() { scope::run!(ctx, |ctx, s| async { tracing::info!("spawn validator"); - let validator_pool = ConnectionPool::from_genesis().await; + let validator_pool = ConnectionPool::from_genesis(version).await; let (mut validator, runner) = testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); @@ -233,9 +228,9 @@ async fn test_nodes_from_various_snapshots() { // Test running a validator node and a couple of full nodes. // Validator is producing signed blocks and fetchers are expected to fetch // them directly or indirectly. -#[test_casing(2, [false, true])] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test(flavor = "multi_thread")] -async fn test_full_nodes(from_snapshot: bool) { +async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { const NODES: usize = 2; zksync_concurrency::testonly::abort_on_panic(); @@ -256,7 +251,7 @@ async fn test_full_nodes(from_snapshot: bool) { // Run validator and fetchers in parallel. scope::run!(ctx, |ctx, s| async { - let validator_pool = new_pool(from_snapshot).await; + let validator_pool = ConnectionPool::test(from_snapshot, version).await; let (mut validator, runner) = testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(async { @@ -272,8 +267,7 @@ async fn test_full_nodes(from_snapshot: bool) { validator.seal_batch().await; validator_pool .wait_for_payload(ctx, validator.last_block()) - .await - .unwrap(); + .await?; tracing::info!("Run validator."); let (cfg, secrets) = testonly::config(&validator_cfgs[0]); @@ -283,7 +277,7 @@ async fn test_full_nodes(from_snapshot: bool) { let mut node_pools = vec![]; for (i, cfg) in node_cfgs.iter().enumerate() { let i = ctx::NoCopy(i); - let pool = new_pool(from_snapshot).await; + let pool = ConnectionPool::test(from_snapshot, version).await; let (node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; node_pools.push(pool.clone()); s.spawn_bg(async { @@ -318,9 +312,9 @@ async fn test_full_nodes(from_snapshot: bool) { } // Test running external node (non-leader) validators. -#[test_casing(2, [false, true])] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test(flavor = "multi_thread")] -async fn test_en_validators(from_snapshot: bool) { +async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { const NODES: usize = 3; zksync_concurrency::testonly::abort_on_panic(); @@ -331,7 +325,7 @@ async fn test_en_validators(from_snapshot: bool) { // Run all nodes in parallel. scope::run!(ctx, |ctx, s| async { - let main_node_pool = new_pool(from_snapshot).await; + let main_node_pool = ConnectionPool::test(from_snapshot, version).await; let (mut main_node, runner) = testonly::StateKeeper::new(ctx, main_node_pool.clone()).await?; s.spawn_bg(async { @@ -370,7 +364,7 @@ async fn test_en_validators(from_snapshot: bool) { let mut ext_node_pools = vec![]; for (i, cfg) in cfgs[1..].iter().enumerate() { let i = ctx::NoCopy(i); - let pool = new_pool(from_snapshot).await; + let pool = ConnectionPool::test(from_snapshot, version).await; let (ext_node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; ext_node_pools.push(pool.clone()); s.spawn_bg(async { @@ -404,9 +398,9 @@ async fn test_en_validators(from_snapshot: bool) { } // Test fetcher back filling missing certs. -#[test_casing(2, [false, true])] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test(flavor = "multi_thread")] -async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool) { +async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); @@ -416,7 +410,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool) { scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn validator."); - let validator_pool = new_pool(from_snapshot).await; + let validator_pool = ConnectionPool::test(from_snapshot, version).await; let (mut validator, runner) = testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); @@ -426,7 +420,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool) { validator.seal_batch().await; let client = validator.connect(ctx).await?; - let node_pool = new_pool(from_snapshot).await; + let node_pool = ConnectionPool::test(from_snapshot, version).await; tracing::info!("Run p2p fetcher."); scope::run!(ctx, |ctx, s| async { @@ -479,16 +473,16 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool) { .unwrap(); } -#[test_casing(2, [false, true])] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_centralized_fetcher(from_snapshot: bool) { +async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn a validator."); - let validator_pool = new_pool(from_snapshot).await; + let validator_pool = ConnectionPool::test(from_snapshot, version).await; let (mut validator, runner) = testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); @@ -498,7 +492,7 @@ async fn test_centralized_fetcher(from_snapshot: bool) { validator.seal_batch().await; tracing::info!("Spawn a node."); - let node_pool = new_pool(from_snapshot).await; + let node_pool = ConnectionPool::test(from_snapshot, version).await; let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("fetcher"))); s.spawn_bg(node.run_fetcher(ctx, validator.connect(ctx).await?)); @@ -520,14 +514,15 @@ async fn test_centralized_fetcher(from_snapshot: bool) { /// Tests that generated L1 batch witnesses can be verified successfully. /// TODO: add tests for verification failures. +#[test_casing(2, VERSIONS)] #[tokio::test] -async fn test_batch_witness() { +async fn test_batch_witness(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); scope::run!(ctx, |ctx, s| async { - let pool = ConnectionPool::from_genesis().await; + let pool = ConnectionPool::from_genesis(version).await; let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; s.spawn_bg(runner.run_real(ctx)); diff --git a/core/node/db_pruner/src/metrics.rs b/core/node/db_pruner/src/metrics.rs index 73bcefd041dd..1070ad842703 100644 --- a/core/node/db_pruner/src/metrics.rs +++ b/core/node/db_pruner/src/metrics.rs @@ -15,8 +15,8 @@ pub(super) enum MetricPruneType { enum PrunedEntityType { L1Batch, L2Block, - StorageLogFromPrunedBatch, - StorageLogFromPastBatch, + StorageLog, + OverwritingLog, // not really removed; just used to measure query complexity Event, L2ToL1Log, CallTrace, @@ -44,27 +44,22 @@ impl DbPrunerMetrics { let HardPruningStats { deleted_l1_batches, deleted_l2_blocks, - deleted_storage_logs_from_past_batches, - deleted_storage_logs_from_pruned_batches, + overwriting_logs, + deleted_storage_logs, deleted_events, deleted_call_traces, deleted_l2_to_l1_logs, } = stats; - let deleted_storage_logs = - deleted_storage_logs_from_past_batches + deleted_storage_logs_from_pruned_batches; tracing::info!( "Performed pruning of database, deleted {deleted_l1_batches} L1 batches, {deleted_l2_blocks} L2 blocks, \ - {deleted_storage_logs} storage logs ({deleted_storage_logs_from_pruned_batches} from pruned batches + \ - {deleted_storage_logs_from_past_batches} from past batches), \ + {deleted_storage_logs} storage logs ({overwriting_logs} overwriting logs), \ {deleted_events} events, {deleted_call_traces} call traces, {deleted_l2_to_l1_logs} L2-to-L1 logs" ); self.deleted_entities[&PrunedEntityType::L1Batch].observe(deleted_l1_batches); self.deleted_entities[&PrunedEntityType::L2Block].observe(deleted_l2_blocks); - self.deleted_entities[&PrunedEntityType::StorageLogFromPastBatch] - .observe(deleted_storage_logs_from_past_batches); - self.deleted_entities[&PrunedEntityType::StorageLogFromPrunedBatch] - .observe(deleted_storage_logs_from_pruned_batches); + self.deleted_entities[&PrunedEntityType::OverwritingLog].observe(overwriting_logs); + self.deleted_entities[&PrunedEntityType::StorageLog].observe(deleted_storage_logs); self.deleted_entities[&PrunedEntityType::Event].observe(deleted_events); self.deleted_entities[&PrunedEntityType::L2ToL1Log].observe(deleted_l2_to_l1_logs); self.deleted_entities[&PrunedEntityType::CallTrace].observe(deleted_call_traces); diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index ea07248aa813..f635d12bae13 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -190,12 +190,13 @@ impl EthTxManager { blob_base_fee_per_gas, signed_tx.hash, signed_tx.raw_tx.as_ref(), + current_block.0, ) .await .unwrap() { if let Err(error) = self - .send_raw_transaction(storage, tx_history_id, signed_tx.raw_tx, current_block) + .send_raw_transaction(storage, tx_history_id, signed_tx.raw_tx) .await { tracing::warn!( @@ -216,17 +217,9 @@ impl EthTxManager { storage: &mut Connection<'_, Core>, tx_history_id: u32, raw_tx: RawTransactionBytes, - current_block: L1BlockNumber, ) -> Result<(), EthSenderError> { match self.l1_interface.send_raw_tx(raw_tx).await { - Ok(_) => { - storage - .eth_sender_dal() - .set_sent_at_block(tx_history_id, current_block.0) - .await - .unwrap(); - Ok(()) - } + Ok(_) => Ok(()), Err(error) => { // In transient errors, server may have received the transaction // we don't want to loose record about it in case that happens @@ -401,16 +394,22 @@ impl EthTxManager { self.apply_tx_status(storage, ð_tx, tx_status, l1_block_numbers.finalized) .await; - } else if let Err(error) = self - .send_raw_transaction( - storage, - tx.id, - RawTransactionBytes::new_unchecked(tx.signed_raw_tx.clone()), - l1_block_numbers.latest, - ) - .await - { - tracing::warn!("Error sending transaction {tx:?}: {error}"); + } else { + storage + .eth_sender_dal() + .set_sent_at_block(tx.id, l1_block_numbers.latest.0) + .await + .unwrap(); + if let Err(error) = self + .send_raw_transaction( + storage, + tx.id, + RawTransactionBytes::new_unchecked(tx.signed_raw_tx.clone()), + ) + .await + { + tracing::warn!("Error sending transaction {tx:?}: {error}"); + } } } } diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 8e2c915d5749..d48522fb8116 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -45,6 +45,7 @@ zksync_tee_verifier_input_producer.workspace = true zksync_queued_job_processor.workspace = true zksync_reorg_detector.workspace = true zksync_vm_runner.workspace = true +zksync_node_db_pruner.workspace = true tracing.workspace = true thiserror.workspace = true diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index a62f04af0334..f0cb8417ff97 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -43,7 +43,7 @@ use zksync_node_framework::{ sigint::SigintHandlerLayer, state_keeper::{ main_batch_executor::MainBatchExecutorLayer, mempool_io::MempoolIOLayer, - StateKeeperLayer, + output_handler::OutputHandlerLayer, StateKeeperLayer, }, web3_api::{ caches::MempoolCacheLayer, @@ -55,6 +55,7 @@ use zksync_node_framework::{ }, service::{ZkStackService, ZkStackServiceBuilder, ZkStackServiceError}, }; +use zksync_state::RocksdbStorageOptions; struct MainNodeBuilder { node: ZkStackServiceBuilder, @@ -145,17 +146,32 @@ impl MainNodeBuilder { fn add_state_keeper_layer(mut self) -> anyhow::Result { let wallets = Wallets::from_env()?; + let contracts_config = ContractsConfig::from_env()?; + let sk_config = StateKeeperConfig::from_env()?; + let persisence_layer = OutputHandlerLayer::new( + contracts_config.l2_shared_bridge_addr.unwrap(), + sk_config.l2_block_seal_queue_capacity, + ); let mempool_io_layer = MempoolIOLayer::new( NetworkConfig::from_env()?.zksync_network_id, - ContractsConfig::from_env()?, - StateKeeperConfig::from_env()?, + sk_config, MempoolConfig::from_env()?, wallets.state_keeper.context("State keeper wallets")?, ); let main_node_batch_executor_builder_layer = - MainBatchExecutorLayer::new(StateKeeperConfig::from_env()?); - let state_keeper_layer = StateKeeperLayer::new(DBConfig::from_env()?); + MainBatchExecutorLayer::new(StateKeeperConfig::from_env()?.save_call_traces, true); + let db_config = DBConfig::from_env()?; + + let rocksdb_options = RocksdbStorageOptions { + block_cache_capacity: db_config + .experimental + .state_keeper_db_block_cache_capacity(), + max_open_files: db_config.experimental.state_keeper_db_max_open_files, + }; + let state_keeper_layer = + StateKeeperLayer::new(db_config.state_keeper_db_path, rocksdb_options); self.node + .add_layer(persisence_layer) .add_layer(mempool_io_layer) .add_layer(main_node_batch_executor_builder_layer) .add_layer(state_keeper_layer); @@ -286,6 +302,7 @@ impl MainNodeBuilder { rpc_config.websocket_requests_per_minute_limit(), ), replication_lag_limit: circuit_breaker_config.replication_lag_limit(), + ..Default::default() }; self.node.add_layer(Web3ServerLayer::ws( rpc_config.ws_port, diff --git a/core/node/node_framework/src/implementations/layers/batch_status_updater.rs b/core/node/node_framework/src/implementations/layers/batch_status_updater.rs new file mode 100644 index 000000000000..ba328facc8a3 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/batch_status_updater.rs @@ -0,0 +1,52 @@ +use zksync_node_sync::batch_status_updater::BatchStatusUpdater; + +use crate::{ + implementations::resources::{ + healthcheck::AppHealthCheckResource, + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + }, + service::{ServiceContext, StopReceiver}, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct BatchStatusUpdaterLayer; + +#[async_trait::async_trait] +impl WiringLayer for BatchStatusUpdaterLayer { + fn layer_name(&self) -> &'static str { + "batch_status_updater_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let pool = context.get_resource::>().await?; + let MainNodeClientResource(client) = context.get_resource().await?; + + let updater = BatchStatusUpdater::new(client, pool.get().await?); + + // Insert healthcheck + let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; + app_health + .insert_component(updater.health_check()) + .map_err(WiringError::internal)?; + + // Insert task + context.add_task(Box::new(updater)); + + Ok(()) + } +} + +#[async_trait::async_trait] +impl Task for BatchStatusUpdater { + fn id(&self) -> TaskId { + "batch_status_updater".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await?; + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/commitment_generator.rs b/core/node/node_framework/src/implementations/layers/commitment_generator.rs index 5d2f63931295..cc57599759eb 100644 --- a/core/node/node_framework/src/implementations/layers/commitment_generator.rs +++ b/core/node/node_framework/src/implementations/layers/commitment_generator.rs @@ -1,3 +1,5 @@ +use std::num::NonZero; + use zksync_commitment_generator::CommitmentGenerator; use zksync_types::commitment::L1BatchCommitmentMode; @@ -14,11 +16,20 @@ use crate::{ #[derive(Debug)] pub struct CommitmentGeneratorLayer { mode: L1BatchCommitmentMode, + max_parallelism: Option>, } impl CommitmentGeneratorLayer { pub fn new(mode: L1BatchCommitmentMode) -> Self { - Self { mode } + Self { + mode, + max_parallelism: None, + } + } + + pub fn with_max_parallelism(mut self, max_parallelism: Option>) -> Self { + self.max_parallelism = max_parallelism; + self } } @@ -30,10 +41,17 @@ impl WiringLayer for CommitmentGeneratorLayer { async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { let pool_resource = context.get_resource::>().await?; - let pool_size = CommitmentGenerator::default_parallelism().get(); + + let pool_size = self + .max_parallelism + .unwrap_or(CommitmentGenerator::default_parallelism()) + .get(); let main_pool = pool_resource.get_custom(pool_size).await?; - let commitment_generator = CommitmentGenerator::new(main_pool, self.mode); + let mut commitment_generator = CommitmentGenerator::new(main_pool, self.mode); + if let Some(max_parallelism) = self.max_parallelism { + commitment_generator.set_max_parallelism(max_parallelism); + } let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; app_health diff --git a/core/node/node_framework/src/implementations/layers/consensus.rs b/core/node/node_framework/src/implementations/layers/consensus.rs index 06bca1bba3ae..8cc7ea4098de 100644 --- a/core/node/node_framework/src/implementations/layers/consensus.rs +++ b/core/node/node_framework/src/implementations/layers/consensus.rs @@ -161,14 +161,14 @@ impl Task for FetcherTask { let root_ctx = ctx::root(); scope::run!(&root_ctx, |ctx, s| async { s.spawn_bg(consensus::era::run_en( - &root_ctx, + ctx, self.config, self.pool, self.sync_state, self.main_node_client, self.action_queue_sender, )); - ctx.wait(stop_receiver.0.wait_for(|stop| *stop)).await??; + let _ = stop_receiver.0.wait_for(|stop| *stop).await?; Ok(()) }) .await diff --git a/core/node/node_framework/src/implementations/layers/consistency_checker.rs b/core/node/node_framework/src/implementations/layers/consistency_checker.rs index a387fc19ead1..fb4b6d8f5eed 100644 --- a/core/node/node_framework/src/implementations/layers/consistency_checker.rs +++ b/core/node/node_framework/src/implementations/layers/consistency_checker.rs @@ -61,25 +61,19 @@ impl WiringLayer for ConsistencyCheckerLayer { .map_err(WiringError::internal)?; // Create and add tasks. - context.add_task(Box::new(ConsistencyCheckerTask { - consistency_checker, - })); + context.add_task(Box::new(consistency_checker)); Ok(()) } } -pub struct ConsistencyCheckerTask { - consistency_checker: ConsistencyChecker, -} - #[async_trait::async_trait] -impl Task for ConsistencyCheckerTask { +impl Task for ConsistencyChecker { fn id(&self) -> TaskId { "consistency_checker".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.consistency_checker.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/house_keeper.rs b/core/node/node_framework/src/implementations/layers/house_keeper.rs index 7b3e52c7ed5d..416d80691a31 100644 --- a/core/node/node_framework/src/implementations/layers/house_keeper.rs +++ b/core/node/node_framework/src/implementations/layers/house_keeper.rs @@ -1,10 +1,7 @@ -use std::time::Duration; - use zksync_config::configs::{ fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, FriProofCompressorConfig, FriProverConfig, FriWitnessGeneratorConfig, }; -use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core}; use zksync_house_keeper::{ blocks_state_reporter::L1BatchMetricsReporter, periodic_job::PeriodicJob, @@ -23,8 +20,6 @@ use crate::{ wiring_layer::{WiringError, WiringLayer}, }; -const SCRAPE_INTERVAL: Duration = Duration::from_secs(60); - #[derive(Debug)] pub struct HouseKeeperLayer { house_keeper_config: HouseKeeperConfig, @@ -67,9 +62,6 @@ impl WiringLayer for HouseKeeperLayer { let prover_pool = prover_pool_resource.get().await?; // initialize and add tasks - let pool_for_metrics = replica_pool_resource.get_singleton().await?; - context.add_task(Box::new(PostgresMetricsScrapingTask { pool_for_metrics })); - let l1_batch_metrics_reporter = L1BatchMetricsReporter::new( self.house_keeper_config .l1_batch_metrics_reporting_interval_ms, @@ -172,30 +164,6 @@ impl WiringLayer for HouseKeeperLayer { } } -#[derive(Debug)] -struct PostgresMetricsScrapingTask { - pool_for_metrics: ConnectionPool, -} - -#[async_trait::async_trait] -impl Task for PostgresMetricsScrapingTask { - fn id(&self) -> TaskId { - "postgres_metrics_scraping".into() - } - - async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - tokio::select! { - () = PostgresMetrics::run_scraping(self.pool_for_metrics, SCRAPE_INTERVAL) => { - tracing::warn!("Postgres metrics scraping unexpectedly stopped"); - } - _ = stop_receiver.0.changed() => { - tracing::info!("Stop signal received, Postgres metrics scraping is shutting down"); - } - } - Ok(()) - } -} - #[derive(Debug)] struct L1BatchMetricsReporterTask { l1_batch_metrics_reporter: L1BatchMetricsReporter, diff --git a/core/node/node_framework/src/implementations/layers/l1_batch_commitment_mode_validation.rs b/core/node/node_framework/src/implementations/layers/l1_batch_commitment_mode_validation.rs new file mode 100644 index 000000000000..e333eda51192 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/l1_batch_commitment_mode_validation.rs @@ -0,0 +1,59 @@ +use zksync_commitment_generator::validation_task::L1BatchCommitmentModeValidationTask; +use zksync_types::{commitment::L1BatchCommitmentMode, Address}; + +use crate::{ + implementations::resources::eth_interface::EthInterfaceResource, + precondition::Precondition, + service::{ServiceContext, StopReceiver}, + task::TaskId, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct L1BatchCommitmentModeValidationLayer { + diamond_proxy_addr: Address, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, +} + +impl L1BatchCommitmentModeValidationLayer { + pub fn new( + diamond_proxy_addr: Address, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, + ) -> Self { + Self { + diamond_proxy_addr, + l1_batch_commit_data_generator_mode, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for L1BatchCommitmentModeValidationLayer { + fn layer_name(&self) -> &'static str { + "l1_batch_commitment_mode_validation_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let EthInterfaceResource(query_client) = context.get_resource().await?; + let task = L1BatchCommitmentModeValidationTask::new( + self.diamond_proxy_addr, + self.l1_batch_commit_data_generator_mode, + query_client, + ); + + context.add_precondition(Box::new(task)); + + Ok(()) + } +} + +#[async_trait::async_trait] +impl Precondition for L1BatchCommitmentModeValidationTask { + fn id(&self) -> TaskId { + "l1_batch_commitment_mode_validation".into() + } + + async fn check(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).exit_on_success().run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/l1_gas.rs b/core/node/node_framework/src/implementations/layers/l1_gas.rs index 8deafd4e2949..d465510eff5d 100644 --- a/core/node/node_framework/src/implementations/layers/l1_gas.rs +++ b/core/node/node_framework/src/implementations/layers/l1_gas.rs @@ -84,7 +84,17 @@ impl Task for GasAdjusterTask { "gas_adjuster".into() } - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + // Gas adjuster layer is added to provide a resource for anyone to use, but it comes with + // a support task. If nobody has used the resource, we don't need to run the support task. + if Arc::strong_count(&self.gas_adjuster) == 1 { + tracing::info!( + "Gas adjuster is not used by any other task, not running the support task" + ); + stop_receiver.0.changed().await?; + return Ok(()); + } + self.gas_adjuster.run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/main_node_client.rs b/core/node/node_framework/src/implementations/layers/main_node_client.rs index 80e5d44c350f..a694eb831330 100644 --- a/core/node/node_framework/src/implementations/layers/main_node_client.rs +++ b/core/node/node_framework/src/implementations/layers/main_node_client.rs @@ -1,11 +1,14 @@ -use std::num::NonZeroUsize; +use std::{num::NonZeroUsize, sync::Arc}; use anyhow::Context; +use zksync_node_sync::MainNodeHealthCheck; use zksync_types::{url::SensitiveUrl, L2ChainId}; use zksync_web3_decl::client::{Client, DynClient, L2}; use crate::{ - implementations::resources::main_node_client::MainNodeClientResource, + implementations::resources::{ + healthcheck::AppHealthCheckResource, main_node_client::MainNodeClientResource, + }, service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, }; @@ -40,9 +43,15 @@ impl WiringLayer for MainNodeClientLayer { .with_allowed_requests_per_second(self.rate_limit_rps) .build(); - context.insert_resource(MainNodeClientResource( - Box::new(main_node_client) as Box> - ))?; + let client = Box::new(main_node_client) as Box>; + context.insert_resource(MainNodeClientResource(client.clone()))?; + + // Insert healthcheck + let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; + app_health + .insert_custom_component(Arc::new(MainNodeHealthCheck::from(client))) + .map_err(WiringError::internal)?; + Ok(()) } } diff --git a/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs b/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs new file mode 100644 index 000000000000..11bfab18a4c6 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs @@ -0,0 +1,46 @@ +use std::sync::Arc; + +use zksync_node_fee_model::l1_gas_price::MainNodeFeeParamsFetcher; + +use crate::{ + implementations::resources::{ + fee_input::FeeInputResource, main_node_client::MainNodeClientResource, + }, + service::{ServiceContext, StopReceiver}, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct MainNodeFeeParamsFetcherLayer; + +#[async_trait::async_trait] +impl WiringLayer for MainNodeFeeParamsFetcherLayer { + fn layer_name(&self) -> &'static str { + "main_node_fee_params_fetcher_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let MainNodeClientResource(main_node_client) = context.get_resource().await?; + let fetcher = Arc::new(MainNodeFeeParamsFetcher::new(main_node_client)); + context.insert_resource(FeeInputResource(fetcher.clone()))?; + context.add_task(Box::new(MainNodeFeeParamsFetcherTask { fetcher })); + Ok(()) + } +} + +#[derive(Debug)] +struct MainNodeFeeParamsFetcherTask { + fetcher: Arc, +} + +#[async_trait::async_trait] +impl Task for MainNodeFeeParamsFetcherTask { + fn id(&self) -> TaskId { + "main_node_fee_params_fetcher".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + self.fetcher.run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index 935bb283fe81..bc1244410bf2 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -1,12 +1,13 @@ use std::{ net::{Ipv4Addr, SocketAddr}, sync::Arc, + time::Duration, }; use anyhow::Context as _; use zksync_config::configs::{api::MerkleTreeApiConfig, database::MerkleTreeMode}; use zksync_metadata_calculator::{ - LazyAsyncTreeReader, MetadataCalculator, MetadataCalculatorConfig, + LazyAsyncTreeReader, MerkleTreePruningTask, MetadataCalculator, MetadataCalculatorConfig, }; use zksync_storage::RocksDB; @@ -35,6 +36,7 @@ use crate::{ pub struct MetadataCalculatorLayer { config: MetadataCalculatorConfig, tree_api_config: Option, + pruning_config: Option, } impl MetadataCalculatorLayer { @@ -42,6 +44,7 @@ impl MetadataCalculatorLayer { Self { config, tree_api_config: None, + pruning_config: None, } } @@ -49,6 +52,11 @@ impl MetadataCalculatorLayer { self.tree_api_config = Some(tree_api_config); self } + + pub fn with_pruning_config(mut self, pruning_config: Duration) -> Self { + self.pruning_config = Some(pruning_config); + self + } } #[async_trait::async_trait] @@ -76,7 +84,7 @@ impl WiringLayer for MetadataCalculatorLayer { } }; - let metadata_calculator = MetadataCalculator::new( + let mut metadata_calculator = MetadataCalculator::new( self.config, object_store.map(|store_resource| store_resource.0), main_pool, @@ -98,6 +106,14 @@ impl WiringLayer for MetadataCalculatorLayer { })); } + if let Some(pruning_removal_delay) = self.pruning_config { + let pruning_task = Box::new(metadata_calculator.pruning_task(pruning_removal_delay)); + app_health + .insert_component(pruning_task.health_check()) + .map_err(|err| WiringError::Internal(err.into()))?; + context.add_task(pruning_task); + } + context.insert_resource(TreeApiClientResource(Arc::new( metadata_calculator.tree_reader(), )))?; @@ -154,3 +170,14 @@ impl Task for TreeApiTask { .await } } + +#[async_trait::async_trait] +impl Task for MerkleTreePruningTask { + fn id(&self) -> TaskId { + "merkle_tree_pruning_task".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 1c171e84b5ba..8637f15459d5 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -1,3 +1,4 @@ +pub mod batch_status_updater; pub mod circuit_breaker_checker; pub mod commitment_generator; pub mod consensus; @@ -7,19 +8,26 @@ pub mod eth_sender; pub mod eth_watch; pub mod healtcheck_server; pub mod house_keeper; +pub mod l1_batch_commitment_mode_validation; pub mod l1_gas; pub mod main_node_client; +pub mod main_node_fee_params_fetcher; pub mod metadata_calculator; pub mod object_store; pub mod pk_signing_eth_client; pub mod pools_layer; +pub mod postgres_metrics; pub mod prometheus_exporter; pub mod proof_data_handler; +pub mod pruning; pub mod query_eth_client; pub mod reorg_detector_checker; pub mod reorg_detector_runner; pub mod sigint; pub mod state_keeper; +pub mod sync_state_updater; pub mod tee_verifier_input_producer; +pub mod tree_data_fetcher; +pub mod validate_chain_ids; pub mod vm_runner; pub mod web3_api; diff --git a/core/node/node_framework/src/implementations/layers/postgres_metrics.rs b/core/node/node_framework/src/implementations/layers/postgres_metrics.rs new file mode 100644 index 000000000000..09d81844dd5a --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/postgres_metrics.rs @@ -0,0 +1,57 @@ +use std::time::Duration; + +use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core}; + +use crate::{ + implementations::resources::pools::{PoolResource, ReplicaPool}, + service::{ServiceContext, StopReceiver}, + task::{TaskId, UnconstrainedTask}, + wiring_layer::{WiringError, WiringLayer}, +}; + +const SCRAPE_INTERVAL: Duration = Duration::from_secs(60); + +#[derive(Debug)] +pub struct PostgresMetricsLayer; + +#[async_trait::async_trait] +impl WiringLayer for PostgresMetricsLayer { + fn layer_name(&self) -> &'static str { + "postgres_metrics_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let replica_pool_resource = context.get_resource::>().await?; + let pool_for_metrics = replica_pool_resource.get_singleton().await?; + context.add_unconstrained_task(Box::new(PostgresMetricsScrapingTask { pool_for_metrics })); + + Ok(()) + } +} + +#[derive(Debug)] +struct PostgresMetricsScrapingTask { + pool_for_metrics: ConnectionPool, +} + +#[async_trait::async_trait] +impl UnconstrainedTask for PostgresMetricsScrapingTask { + fn id(&self) -> TaskId { + "postgres_metrics_scraping".into() + } + + async fn run_unconstrained( + self: Box, + mut stop_receiver: StopReceiver, + ) -> anyhow::Result<()> { + tokio::select! { + () = PostgresMetrics::run_scraping(self.pool_for_metrics, SCRAPE_INTERVAL) => { + tracing::warn!("Postgres metrics scraping unexpectedly stopped"); + } + _ = stop_receiver.0.changed() => { + tracing::info!("Stop signal received, Postgres metrics scraping is shutting down"); + } + } + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs b/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs index 6c7d4f915df4..4b7451348235 100644 --- a/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs +++ b/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs @@ -4,7 +4,7 @@ use zksync_health_check::{HealthStatus, HealthUpdater, ReactiveHealthCheck}; use crate::{ implementations::resources::healthcheck::AppHealthCheckResource, service::{ServiceContext, StopReceiver}, - task::{Task, TaskId}, + task::{TaskId, UnconstrainedTask}, wiring_layer::{WiringError, WiringLayer}, }; @@ -43,18 +43,18 @@ impl WiringLayer for PrometheusExporterLayer { prometheus_health_updater, }); - node.add_task(task); + node.add_unconstrained_task(task); Ok(()) } } #[async_trait::async_trait] -impl Task for PrometheusExporterTask { +impl UnconstrainedTask for PrometheusExporterTask { fn id(&self) -> TaskId { "prometheus_exporter".into() } - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + async fn run_unconstrained(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { let prometheus_task = self.config.run(stop_receiver.0); self.prometheus_health_updater .update(HealthStatus::Ready.into()); diff --git a/core/node/node_framework/src/implementations/layers/pruning.rs b/core/node/node_framework/src/implementations/layers/pruning.rs new file mode 100644 index 000000000000..3ad52606083b --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/pruning.rs @@ -0,0 +1,75 @@ +use std::time::Duration; + +use zksync_node_db_pruner::{DbPruner, DbPrunerConfig}; + +use crate::{ + implementations::resources::{ + healthcheck::AppHealthCheckResource, + pools::{MasterPool, PoolResource}, + }, + service::{ServiceContext, StopReceiver}, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct PruningLayer { + pruning_removal_delay: Duration, + pruning_chunk_size: u32, + minimum_l1_batch_age: Duration, +} + +impl PruningLayer { + pub fn new( + pruning_removal_delay: Duration, + pruning_chunk_size: u32, + minimum_l1_batch_age: Duration, + ) -> Self { + Self { + pruning_removal_delay, + pruning_chunk_size, + minimum_l1_batch_age, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for PruningLayer { + fn layer_name(&self) -> &'static str { + "pruning_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let pool_resource = context.get_resource::>().await?; + let main_pool = pool_resource.get().await?; + + let db_pruner = DbPruner::new( + DbPrunerConfig { + removal_delay: self.pruning_removal_delay, + pruned_batch_chunk_size: self.pruning_chunk_size, + minimum_l1_batch_age: self.minimum_l1_batch_age, + }, + main_pool, + ); + + let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; + app_health + .insert_component(db_pruner.health_check()) + .map_err(WiringError::internal)?; + + context.add_task(Box::new(db_pruner)); + + Ok(()) + } +} + +#[async_trait::async_trait] +impl Task for DbPruner { + fn id(&self) -> TaskId { + "db_pruner".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs b/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs index 64454b63998b..eee63e6763b1 100644 --- a/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs +++ b/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs @@ -1,6 +1,7 @@ use std::time::Duration; use anyhow::Context; +use zksync_dal::{ConnectionPool, Core}; use zksync_reorg_detector::{self, ReorgDetector}; use crate::{ @@ -36,6 +37,7 @@ impl WiringLayer for ReorgDetectorCheckerLayer { // Create and insert precondition. context.add_precondition(Box::new(CheckerPrecondition { + pool: pool.clone(), reorg_detector: ReorgDetector::new(main_node_client, pool), })); @@ -44,6 +46,7 @@ impl WiringLayer for ReorgDetectorCheckerLayer { } pub struct CheckerPrecondition { + pool: ConnectionPool, reorg_detector: ReorgDetector, } @@ -53,7 +56,21 @@ impl Precondition for CheckerPrecondition { "reorg_detector_checker".into() } - async fn check(mut self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + async fn check(mut self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + // Given that this is a precondition -- i.e. something that starts before some invariants are met, + // we need to first ensure that there is at least one batch in the database (there may be none if + // either genesis or snapshot recovery has not been performed yet). + let earliest_batch = zksync_dal::helpers::wait_for_l1_batch( + &self.pool, + REORG_DETECTED_SLEEP_INTERVAL, + &mut stop_receiver.0, + ) + .await?; + if earliest_batch.is_none() { + // Stop signal received. + return Ok(()); + } + loop { match self.reorg_detector.run_once(stop_receiver.0.clone()).await { Ok(()) => return Ok(()), diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs new file mode 100644 index 000000000000..1ec80fef4272 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs @@ -0,0 +1,68 @@ +use std::sync::Arc; + +use anyhow::Context as _; +use zksync_node_sync::{ActionQueue, ExternalIO, SyncState}; +use zksync_state_keeper::seal_criteria::NoopSealer; +use zksync_types::L2ChainId; + +use crate::{ + implementations::resources::{ + action_queue::ActionQueueSenderResource, + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + state_keeper::{ConditionalSealerResource, StateKeeperIOResource}, + sync_state::SyncStateResource, + }, + resource::Unique, + service::ServiceContext, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct ExternalIOLayer { + chain_id: L2ChainId, +} + +impl ExternalIOLayer { + pub fn new(chain_id: L2ChainId) -> Self { + Self { chain_id } + } +} + +#[async_trait::async_trait] +impl WiringLayer for ExternalIOLayer { + fn layer_name(&self) -> &'static str { + "external_io_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + // Fetch required resources. + let master_pool = context.get_resource::>().await?; + let MainNodeClientResource(main_node_client) = context.get_resource().await?; + + // Create `SyncState` resource. + let sync_state = SyncState::default(); + context.insert_resource(SyncStateResource(sync_state))?; + + // Create `ActionQueueSender` resource. + let (action_queue_sender, action_queue) = ActionQueue::new(); + context.insert_resource(ActionQueueSenderResource(Unique::new(action_queue_sender)))?; + + // Create external IO resource. + let io_pool = master_pool.get().await.context("Get master pool")?; + let io = ExternalIO::new( + io_pool, + action_queue, + Box::new(main_node_client.for_component("external_io")), + self.chain_id, + ) + .await + .context("Failed initializing I/O for external node state keeper")?; + context.insert_resource(StateKeeperIOResource(Unique::new(Box::new(io))))?; + + // Create sealer. + context.insert_resource(ConditionalSealerResource(Arc::new(NoopSealer)))?; + + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs index 2fb35fb201ab..82e6e52274aa 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs @@ -1,4 +1,3 @@ -use zksync_config::configs::chain::StateKeeperConfig; use zksync_state_keeper::MainBatchExecutor; use crate::{ @@ -10,13 +9,15 @@ use crate::{ #[derive(Debug)] pub struct MainBatchExecutorLayer { - state_keeper_config: StateKeeperConfig, + save_call_traces: bool, + optional_bytecode_compression: bool, } impl MainBatchExecutorLayer { - pub fn new(state_keeper_config: StateKeeperConfig) -> Self { + pub fn new(save_call_traces: bool, optional_bytecode_compression: bool) -> Self { Self { - state_keeper_config, + save_call_traces, + optional_bytecode_compression, } } } @@ -28,7 +29,8 @@ impl WiringLayer for MainBatchExecutorLayer { } async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let builder = MainBatchExecutor::new(self.state_keeper_config.save_call_traces, false); + let builder = + MainBatchExecutor::new(self.save_call_traces, self.optional_bytecode_compression); context.insert_resource(BatchExecutorResource(Unique::new(Box::new(builder))))?; Ok(()) diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs index 65e86bef5204..1a913fd990bf 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs @@ -1,24 +1,18 @@ use std::sync::Arc; use anyhow::Context as _; -use zksync_config::{ - configs::{ - chain::{MempoolConfig, StateKeeperConfig}, - wallets, - }, - ContractsConfig, -}; -use zksync_state_keeper::{ - io::seal_logic::l2_block_seal_subtasks::L2BlockSealProcess, MempoolFetcher, MempoolGuard, - MempoolIO, OutputHandler, SequencerSealer, StateKeeperPersistence, TreeWritesPersistence, +use zksync_config::configs::{ + chain::{MempoolConfig, StateKeeperConfig}, + wallets, }; +use zksync_state_keeper::{MempoolFetcher, MempoolGuard, MempoolIO, SequencerSealer}; use zksync_types::L2ChainId; use crate::{ implementations::resources::{ fee_input::FeeInputResource, pools::{MasterPool, PoolResource}, - state_keeper::{ConditionalSealerResource, OutputHandlerResource, StateKeeperIOResource}, + state_keeper::{ConditionalSealerResource, StateKeeperIOResource}, }, resource::Unique, service::{ServiceContext, StopReceiver}, @@ -29,7 +23,6 @@ use crate::{ #[derive(Debug)] pub struct MempoolIOLayer { zksync_network_id: L2ChainId, - contracts_config: ContractsConfig, state_keeper_config: StateKeeperConfig, mempool_config: MempoolConfig, wallets: wallets::StateKeeper, @@ -38,14 +31,12 @@ pub struct MempoolIOLayer { impl MempoolIOLayer { pub fn new( zksync_network_id: L2ChainId, - contracts_config: ContractsConfig, state_keeper_config: StateKeeperConfig, mempool_config: MempoolConfig, wallets: wallets::StateKeeper, ) -> Self { Self { zksync_network_id, - contracts_config, state_keeper_config, mempool_config, wallets, @@ -81,23 +72,6 @@ impl WiringLayer for MempoolIOLayer { let batch_fee_input_provider = context.get_resource::().await?.0; let master_pool = context.get_resource::>().await?; - // Create L2 block sealer task and output handler. - // L2 Block sealing process is parallelized, so we have to provide enough pooled connections. - let persistence_pool = master_pool - .get_custom(L2BlockSealProcess::subtasks_len()) - .await - .context("Get master pool")?; - let (persistence, l2_block_sealer) = StateKeeperPersistence::new( - persistence_pool.clone(), - self.contracts_config.l2_shared_bridge_addr.unwrap(), - self.state_keeper_config.l2_block_seal_queue_capacity, - ); - let tree_writes_persistence = TreeWritesPersistence::new(persistence_pool); - let output_handler = OutputHandler::new(Box::new(persistence)) - .with_handler(Box::new(tree_writes_persistence)); - context.insert_resource(OutputHandlerResource(Unique::new(output_handler)))?; - context.add_task(Box::new(L2BlockSealerTask(l2_block_sealer))); - // Create mempool fetcher task. let mempool_guard = self.build_mempool_guard(&master_pool).await?; let mempool_fetcher_pool = master_pool @@ -137,21 +111,6 @@ impl WiringLayer for MempoolIOLayer { } } -#[derive(Debug)] -struct L2BlockSealerTask(zksync_state_keeper::L2BlockSealerTask); - -#[async_trait::async_trait] -impl Task for L2BlockSealerTask { - fn id(&self) -> TaskId { - "state_keeper/l2_block_sealer".into() - } - - async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { - // Miniblock sealer will exit itself once sender is dropped. - self.0.run().await - } -} - #[derive(Debug)] struct MempoolFetcherTask(MempoolFetcher); diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs index edbe1d6e12f7..97364f6388cd 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs @@ -1,16 +1,20 @@ use std::sync::Arc; use anyhow::Context; -use zksync_config::DBConfig; -use zksync_state::{AsyncCatchupTask, ReadStorageFactory, RocksdbStorageOptions}; +use zksync_state::{AsyncCatchupTask, ReadStorageFactory}; use zksync_state_keeper::{ seal_criteria::ConditionalSealer, AsyncRocksdbCache, BatchExecutor, OutputHandler, StateKeeperIO, ZkSyncStateKeeper, }; use zksync_storage::RocksDB; +pub mod external_io; pub mod main_batch_executor; pub mod mempool_io; +pub mod output_handler; + +// Public re-export to not require the user to directly depend on `zksync_state`. +pub use zksync_state::RocksdbStorageOptions; use crate::{ implementations::resources::{ @@ -32,12 +36,16 @@ use crate::{ /// #[derive(Debug)] pub struct StateKeeperLayer { - db_config: DBConfig, + state_keeper_db_path: String, + rocksdb_options: RocksdbStorageOptions, } impl StateKeeperLayer { - pub fn new(db_config: DBConfig) -> Self { - Self { db_config } + pub fn new(state_keeper_db_path: String, rocksdb_options: RocksdbStorageOptions) -> Self { + Self { + state_keeper_db_path, + rocksdb_options, + } } } @@ -69,17 +77,10 @@ impl WiringLayer for StateKeeperLayer { let sealer = context.get_resource::().await?.0; let master_pool = context.get_resource::>().await?; - let cache_options = RocksdbStorageOptions { - block_cache_capacity: self - .db_config - .experimental - .state_keeper_db_block_cache_capacity(), - max_open_files: self.db_config.experimental.state_keeper_db_max_open_files, - }; let (storage_factory, task) = AsyncRocksdbCache::new( master_pool.get_custom(2).await?, - self.db_config.state_keeper_db_path, - cache_options, + self.state_keeper_db_path, + self.rocksdb_options, ); context.add_task(Box::new(RocksdbCatchupTask(task))); diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs new file mode 100644 index 000000000000..d0e94f637e08 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs @@ -0,0 +1,121 @@ +use anyhow::Context as _; +use zksync_state_keeper::{ + io::seal_logic::l2_block_seal_subtasks::L2BlockSealProcess, OutputHandler, + StateKeeperPersistence, TreeWritesPersistence, +}; +use zksync_types::Address; + +use crate::{ + implementations::resources::{ + pools::{MasterPool, PoolResource}, + state_keeper::OutputHandlerResource, + sync_state::SyncStateResource, + }, + resource::Unique, + service::{ServiceContext, StopReceiver}, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct OutputHandlerLayer { + l2_shared_bridge_addr: Address, + l2_block_seal_queue_capacity: usize, + /// Whether transactions should be pre-inserted to DB. + /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB + /// before they are included into L2 blocks. + pre_insert_txs: bool, + /// Whether protective reads persistence is enabled. + /// Must be `true` for any node that maintains a full Merkle Tree (e.g. any instance of main node). + /// May be set to `false` for nodes that do not participate in the sequencing process (e.g. external nodes). + protective_reads_persistence_enabled: bool, +} + +impl OutputHandlerLayer { + pub fn new(l2_shared_bridge_addr: Address, l2_block_seal_queue_capacity: usize) -> Self { + Self { + l2_shared_bridge_addr, + l2_block_seal_queue_capacity, + pre_insert_txs: false, + protective_reads_persistence_enabled: true, + } + } + + pub fn with_pre_insert_txs(mut self, pre_insert_txs: bool) -> Self { + self.pre_insert_txs = pre_insert_txs; + self + } + + pub fn with_protective_reads_persistence_enabled( + mut self, + protective_reads_persistence_enabled: bool, + ) -> Self { + self.protective_reads_persistence_enabled = protective_reads_persistence_enabled; + self + } +} + +#[async_trait::async_trait] +impl WiringLayer for OutputHandlerLayer { + fn layer_name(&self) -> &'static str { + "state_keeper_output_handler_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + // Fetch required resources. + let master_pool = context.get_resource::>().await?; + // Use `SyncState` if provided. + let sync_state = match context.get_resource::().await { + Ok(sync_state) => Some(sync_state.0), + Err(WiringError::ResourceLacking { .. }) => None, + Err(err) => return Err(err), + }; + + // Create L2 block sealer task and output handler. + // L2 Block sealing process is parallelized, so we have to provide enough pooled connections. + let persistence_pool = master_pool + .get_custom(L2BlockSealProcess::subtasks_len()) + .await + .context("Get master pool")?; + let (mut persistence, l2_block_sealer) = StateKeeperPersistence::new( + persistence_pool.clone(), + self.l2_shared_bridge_addr, + self.l2_block_seal_queue_capacity, + ); + if self.pre_insert_txs { + persistence = persistence.with_tx_insertion(); + } + if !self.protective_reads_persistence_enabled { + // **Important:** Disabling protective reads persistence is only sound if the node will never + // run a full Merkle tree. + tracing::warn!("Disabling persisting protective reads; this should be safe, but is considered an experimental option at the moment"); + persistence = persistence.without_protective_reads(); + } + + let tree_writes_persistence = TreeWritesPersistence::new(persistence_pool); + let mut output_handler = OutputHandler::new(Box::new(persistence)) + .with_handler(Box::new(tree_writes_persistence)); + if let Some(sync_state) = sync_state { + output_handler = output_handler.with_handler(Box::new(sync_state)); + } + context.insert_resource(OutputHandlerResource(Unique::new(output_handler)))?; + context.add_task(Box::new(L2BlockSealerTask(l2_block_sealer))); + + Ok(()) + } +} + +#[derive(Debug)] +struct L2BlockSealerTask(zksync_state_keeper::L2BlockSealerTask); + +#[async_trait::async_trait] +impl Task for L2BlockSealerTask { + fn id(&self) -> TaskId { + "state_keeper/l2_block_sealer".into() + } + + async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { + // Miniblock sealer will exit itself once sender is dropped. + self.0.run().await + } +} diff --git a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs new file mode 100644 index 000000000000..fcbe51f581e1 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs @@ -0,0 +1,75 @@ +use zksync_dal::{ConnectionPool, Core}; +use zksync_node_sync::SyncState; +use zksync_web3_decl::client::{DynClient, L2}; + +use crate::{ + implementations::resources::{ + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + sync_state::SyncStateResource, + }, + service::{ServiceContext, StopReceiver}, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, +}; + +/// Runs the dynamic sync state updater for `SyncState` if no `SyncState` was provided before. +/// This layer may be used as a fallback for EN API if API server runs without the core component. +#[derive(Debug)] +pub struct SyncStateUpdaterLayer; + +#[async_trait::async_trait] +impl WiringLayer for SyncStateUpdaterLayer { + fn layer_name(&self) -> &'static str { + "sync_state_updater_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + if context.get_resource::().await.is_ok() { + // `SyncState` was provided by some other layer -- we assume that the layer that added this resource + // will be responsible for its maintenance. + tracing::info!( + "SyncState was provided by another layer, skipping SyncStateUpdaterLayer" + ); + return Ok(()); + } + + let pool = context.get_resource::>().await?; + let MainNodeClientResource(main_node_client) = context.get_resource().await?; + + let sync_state = SyncState::default(); + + // Insert resource. + context.insert_resource(SyncStateResource(sync_state.clone()))?; + + // Insert task + context.add_task(Box::new(SyncStateUpdater { + sync_state, + connection_pool: pool.get().await?, + main_node_client, + })); + + Ok(()) + } +} + +#[derive(Debug)] +struct SyncStateUpdater { + sync_state: SyncState, + connection_pool: ConnectionPool, + main_node_client: Box>, +} + +#[async_trait::async_trait] +impl Task for SyncStateUpdater { + fn id(&self) -> TaskId { + "sync_state_updater".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + self.sync_state + .run_updater(self.connection_pool, self.main_node_client, stop_receiver.0) + .await?; + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs b/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs new file mode 100644 index 000000000000..c45071ce418b --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs @@ -0,0 +1,67 @@ +use zksync_node_sync::tree_data_fetcher::TreeDataFetcher; +use zksync_types::Address; + +use crate::{ + implementations::resources::{ + eth_interface::EthInterfaceResource, + healthcheck::AppHealthCheckResource, + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + }, + service::{ServiceContext, StopReceiver}, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct TreeDataFetcherLayer { + diamond_proxy_addr: Address, +} + +impl TreeDataFetcherLayer { + pub fn new(diamond_proxy_addr: Address) -> Self { + Self { diamond_proxy_addr } + } +} + +#[async_trait::async_trait] +impl WiringLayer for TreeDataFetcherLayer { + fn layer_name(&self) -> &'static str { + "tree_data_fetcher_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let pool = context.get_resource::>().await?; + let MainNodeClientResource(client) = context.get_resource().await?; + let EthInterfaceResource(eth_client) = context.get_resource().await?; + + tracing::warn!( + "Running tree data fetcher (allows a node to operate w/o a Merkle tree or w/o waiting the tree to catch up). \ + This is an experimental feature; do not use unless you know what you're doing" + ); + let fetcher = TreeDataFetcher::new(client, pool.get().await?) + .with_l1_data(eth_client, self.diamond_proxy_addr)?; + + // Insert healthcheck + let AppHealthCheckResource(app_health) = context.get_resource_or_default().await; + app_health + .insert_component(fetcher.health_check()) + .map_err(WiringError::internal)?; + + // Insert task + context.add_task(Box::new(fetcher)); + + Ok(()) + } +} + +#[async_trait::async_trait] +impl Task for TreeDataFetcher { + fn id(&self) -> TaskId { + "tree_data_fetcher".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/validate_chain_ids.rs b/core/node/node_framework/src/implementations/layers/validate_chain_ids.rs new file mode 100644 index 000000000000..0f04a35d484a --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/validate_chain_ids.rs @@ -0,0 +1,61 @@ +use zksync_node_sync::validate_chain_ids_task::ValidateChainIdsTask; +use zksync_types::{L1ChainId, L2ChainId}; + +use crate::{ + implementations::resources::{ + eth_interface::EthInterfaceResource, main_node_client::MainNodeClientResource, + }, + precondition::Precondition, + service::{ServiceContext, StopReceiver}, + task::TaskId, + wiring_layer::{WiringError, WiringLayer}, +}; + +#[derive(Debug)] +pub struct ValidateChainIdsLayer { + l1_chain_id: L1ChainId, + l2_chain_id: L2ChainId, +} + +impl ValidateChainIdsLayer { + pub fn new(l1_chain_id: L1ChainId, l2_chain_id: L2ChainId) -> Self { + Self { + l1_chain_id, + l2_chain_id, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for ValidateChainIdsLayer { + fn layer_name(&self) -> &'static str { + "validate_chain_ids_layer" + } + + async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + let EthInterfaceResource(query_client) = context.get_resource().await?; + let MainNodeClientResource(main_node_client) = context.get_resource().await?; + + let task = ValidateChainIdsTask::new( + self.l1_chain_id, + self.l2_chain_id, + query_client, + main_node_client, + ); + + context.add_precondition(Box::new(task)); + + Ok(()) + } +} + +#[async_trait::async_trait] +impl Precondition for ValidateChainIdsTask { + fn id(&self) -> TaskId { + "validate_chain_ids".into() + } + + async fn check(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run_once(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server.rs index c81b475c3ec4..da0d9d3cc33a 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server.rs @@ -27,8 +27,11 @@ pub struct Web3ServerOptionalConfig { pub batch_request_size_limit: Option, pub response_body_size_limit: Option, pub websocket_requests_per_minute_limit: Option, - // used by circuit breaker. + pub with_extended_tracing: bool, + // Used by circuit breaker. pub replication_lag_limit: Option, + // Used by the external node. + pub pruning_info_refresh_interval: Option, } impl Web3ServerOptionalConfig { @@ -132,7 +135,8 @@ impl WiringLayer for Web3ServerLayer { ApiBuilder::jsonrpsee_backend(self.internal_api_config, replica_pool.clone()) .with_updaters_pool(updaters_pool) .with_tx_sender(tx_sender) - .with_mempool_cache(mempool_cache); + .with_mempool_cache(mempool_cache) + .with_extended_tracing(self.optional_config.with_extended_tracing); if let Some(client) = tree_api_client { api_builder = api_builder.with_tree_api(client); } @@ -147,6 +151,12 @@ impl WiringLayer for Web3ServerLayer { if let Some(sync_state) = sync_state { api_builder = api_builder.with_sync_state(sync_state); } + if let Some(pruning_info_refresh_interval) = + self.optional_config.pruning_info_refresh_interval + { + api_builder = + api_builder.with_pruning_info_refresh_interval(pruning_info_refresh_interval); + } let replication_lag_limit = self.optional_config.replication_lag_limit; api_builder = self.optional_config.apply(api_builder); let server = api_builder.build()?; diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index 8a717258cb46..010778315e58 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -1,14 +1,22 @@ -use std::{fmt, sync::Arc}; +use std::{fmt, sync::Arc, time::Duration}; +use tokio::sync::RwLock; use zksync_node_api_server::{ execution_sandbox::{VmConcurrencyBarrier, VmConcurrencyLimiter}, tx_sender::{ApiContracts, TxSenderBuilder, TxSenderConfig}, }; use zksync_state::PostgresStorageCaches; +use zksync_types::Address; +use zksync_web3_decl::{ + client::{DynClient, L2}, + jsonrpsee, + namespaces::EnNamespaceClient as _, +}; use crate::{ implementations::resources::{ fee_input::FeeInputResource, + main_node_client::MainNodeClientResource, pools::{PoolResource, ReplicaPool}, state_keeper::ConditionalSealerResource, web3_api::{TxSenderResource, TxSinkResource}, @@ -31,6 +39,7 @@ pub struct TxSenderLayer { postgres_storage_caches_config: PostgresStorageCachesConfig, max_vm_concurrency: usize, api_contracts: ApiContracts, + whitelisted_tokens_for_aa_cache: bool, } impl TxSenderLayer { @@ -45,8 +54,18 @@ impl TxSenderLayer { postgres_storage_caches_config, max_vm_concurrency, api_contracts, + whitelisted_tokens_for_aa_cache: false, } } + + /// Enables the task for fetching the whitelisted tokens for the AA cache from the main node. + /// Disabled by default. + /// + /// Requires `MainNodeClientResource` to be present. + pub fn with_whitelisted_tokens_for_aa_cache(mut self, value: bool) -> Self { + self.whitelisted_tokens_for_aa_cache = value; + self + } } #[async_trait::async_trait] @@ -96,6 +115,18 @@ impl WiringLayer for TxSenderLayer { if let Some(sealer) = sealer { tx_sender = tx_sender.with_sealer(sealer); } + + // Add the task for updating the whitelisted tokens for the AA cache. + if self.whitelisted_tokens_for_aa_cache { + let MainNodeClientResource(main_node_client) = context.get_resource().await?; + let whitelisted_tokens = Arc::new(RwLock::new(Default::default())); + context.add_task(Box::new(WhitelistedTokensForAaUpdateTask { + whitelisted_tokens: whitelisted_tokens.clone(), + main_node_client, + })); + tx_sender = tx_sender.with_whitelisted_tokens_for_aa(whitelisted_tokens); + } + let tx_sender = tx_sender.build( fee_input, Arc::new(vm_concurrency_limiter), @@ -153,3 +184,40 @@ impl Task for VmConcurrencyBarrierTask { Ok(()) } } + +#[derive(Debug)] +struct WhitelistedTokensForAaUpdateTask { + whitelisted_tokens: Arc>>, + main_node_client: Box>, +} + +#[async_trait::async_trait] +impl Task for WhitelistedTokensForAaUpdateTask { + fn id(&self) -> TaskId { + "whitelisted_tokens_for_aa_update_task".into() + } + + async fn run(mut self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + while !*stop_receiver.0.borrow_and_update() { + match self.main_node_client.whitelisted_tokens_for_aa().await { + Ok(tokens) => { + *self.whitelisted_tokens.write().await = tokens; + } + Err(jsonrpsee::core::client::Error::Call(error)) + if error.code() == jsonrpsee::types::error::METHOD_NOT_FOUND_CODE => + { + // Method is not supported by the main node, do nothing. + } + Err(err) => { + tracing::error!("Failed to query `whitelisted_tokens_for_aa`, error: {err:?}"); + } + } + + // Error here corresponds to a timeout w/o `stop_receiver` changed; we're OK with this. + tokio::time::timeout(Duration::from_secs(30), stop_receiver.0.changed()) + .await + .ok(); + } + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sink.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink.rs index df4812b3c098..98ed50ba9e45 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sink.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink.rs @@ -1,6 +1,9 @@ use std::sync::Arc; -use zksync_node_api_server::tx_sender::{master_pool_sink::MasterPoolSink, proxy::TxProxy}; +use zksync_node_api_server::tx_sender::{ + master_pool_sink::MasterPoolSink, + proxy::{AccountNonceSweeperTask, TxProxy}, +}; use crate::{ implementations::resources::{ @@ -8,7 +11,8 @@ use crate::{ pools::{MasterPool, PoolResource}, web3_api::TxSinkResource, }, - service::ServiceContext, + service::{ServiceContext, StopReceiver}, + task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, }; @@ -37,10 +41,31 @@ impl WiringLayer for TxSinkLayer { } TxSinkLayer::ProxySink => { let MainNodeClientResource(client) = context.get_resource().await?; - TxSinkResource(Arc::new(TxProxy::new(client))) + let proxy = TxProxy::new(client); + + let pool = context + .get_resource::>() + .await? + .get_singleton() + .await?; + let task = proxy.account_nonce_sweeper_task(pool); + context.add_task(Box::new(task)); + + TxSinkResource(Arc::new(proxy)) } }; context.insert_resource(tx_sink)?; Ok(()) } } + +#[async_trait::async_trait] +impl Task for AccountNonceSweeperTask { + fn id(&self) -> TaskId { + "account_nonce_sweeper_task".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_sync/src/batch_status_updater/tests.rs b/core/node/node_sync/src/batch_status_updater/tests.rs index e1386f985a09..28b89f86f6a7 100644 --- a/core/node/node_sync/src/batch_status_updater/tests.rs +++ b/core/node/node_sync/src/batch_status_updater/tests.rs @@ -158,6 +158,7 @@ fn mock_block_details(number: u32, stage: L1BatchStage) -> api::BlockDetails { .then(|| Utc.timestamp_opt(300, 0).unwrap()), l1_gas_price: 1, l2_fair_gas_price: 2, + fair_pubdata_price: None, base_system_contracts_hashes: BaseSystemContractsHashes::default(), }, operator_address: Address::zero(), diff --git a/core/node/node_sync/src/tree_data_fetcher/metrics.rs b/core/node/node_sync/src/tree_data_fetcher/metrics.rs index 37c81cd2d40a..aad5f090e1fc 100644 --- a/core/node/node_sync/src/tree_data_fetcher/metrics.rs +++ b/core/node/node_sync/src/tree_data_fetcher/metrics.rs @@ -7,7 +7,7 @@ use vise::{ Info, Metrics, Unit, }; -use super::{provider::TreeDataProviderSource, StepOutcome, TreeDataFetcher, TreeDataFetcherError}; +use super::{StepOutcome, TreeDataFetcher, TreeDataFetcherError}; #[derive(Debug, EncodeLabelSet)] struct TreeDataFetcherInfo { @@ -30,6 +30,9 @@ impl From<&TreeDataFetcher> for TreeDataFetcherInfo { #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] pub(super) enum ProcessingStage { + FetchL1CommitEvent, + FetchBatchDetailsRpc, + /// Total latency for all clients. Fetch, Persistence, } @@ -44,6 +47,13 @@ pub(super) enum StepOutcomeLabel { TransientError, } +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "source", rename_all = "snake_case")] +pub(super) enum TreeDataProviderSource { + L1CommitEvent, + BatchDetailsRpc, +} + const BLOCK_DIFF_BUCKETS: Buckets = Buckets::values(&[ 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1_000.0, 2_000.0, 5_000.0, 10_000.0, 20_000.0, 50_000.0, ]); diff --git a/core/node/node_sync/src/tree_data_fetcher/mod.rs b/core/node/node_sync/src/tree_data_fetcher/mod.rs index d155e03b5563..c871ec16b9de 100644 --- a/core/node/node_sync/src/tree_data_fetcher/mod.rs +++ b/core/node/node_sync/src/tree_data_fetcher/mod.rs @@ -22,6 +22,7 @@ use self::{ metrics::{ProcessingStage, TreeDataFetcherMetrics, METRICS}, provider::{L1DataProvider, MissingData, TreeDataProvider}, }; +use crate::tree_data_fetcher::provider::CombinedDataProvider; mod metrics; mod provider; @@ -30,7 +31,7 @@ mod tests; #[derive(Debug, thiserror::Error)] pub(crate) enum TreeDataFetcherError { - #[error("error fetching data from main node")] + #[error("error fetching data")] Rpc(#[from] EnrichedClientError), #[error("internal error")] Internal(#[from] anyhow::Error), @@ -95,7 +96,7 @@ enum StepOutcome { /// by Consistency checker. #[derive(Debug)] pub struct TreeDataFetcher { - data_provider: Box, + data_provider: CombinedDataProvider, // Used in the Info metric diamond_proxy_address: Option
, pool: ConnectionPool, @@ -112,7 +113,7 @@ impl TreeDataFetcher { /// Creates a new fetcher connected to the main node. pub fn new(client: Box>, pool: ConnectionPool) -> Self { Self { - data_provider: Box::new(client.for_component("tree_data_fetcher")), + data_provider: CombinedDataProvider::new(client.for_component("tree_data_fetcher")), diamond_proxy_address: None, pool, metrics: &METRICS, @@ -140,7 +141,7 @@ impl TreeDataFetcher { eth_client.for_component("tree_data_fetcher"), diamond_proxy_address, )?; - self.data_provider = Box::new(l1_provider.with_fallback(self.data_provider)); + self.data_provider.set_l1(l1_provider); self.diamond_proxy_address = Some(diamond_proxy_address); Ok(self) } @@ -212,14 +213,11 @@ impl TreeDataFetcher { .await?; stage_latency.observe(); let root_hash = match root_hash_result { - Ok(output) => { + Ok(root_hash) => { tracing::debug!( - "Received root hash for L1 batch #{l1_batch_to_fetch} from {source:?}: {root_hash:?}", - source = output.source, - root_hash = output.root_hash + "Received root hash for L1 batch #{l1_batch_to_fetch}: {root_hash:?}" ); - self.metrics.root_hash_sources[&output.source].inc(); - output.root_hash + root_hash } Err(MissingData::Batch) => { let err = anyhow::anyhow!( diff --git a/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs index 0c9362369fe6..867ea2427541 100644 --- a/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs +++ b/core/node/node_sync/src/tree_data_fetcher/provider/mod.rs @@ -2,7 +2,6 @@ use std::fmt; use anyhow::Context; use async_trait::async_trait; -use vise::{EncodeLabelSet, EncodeLabelValue}; use zksync_eth_client::EthInterface; use zksync_types::{block::L2BlockHeader, web3, Address, L1BatchNumber, H256, U256, U64}; use zksync_web3_decl::{ @@ -12,7 +11,10 @@ use zksync_web3_decl::{ namespaces::ZksNamespaceClient, }; -use super::{metrics::METRICS, TreeDataFetcherResult}; +use super::{ + metrics::{ProcessingStage, TreeDataProviderSource, METRICS}, + TreeDataFetcherResult, +}; #[cfg(test)] mod tests; @@ -29,21 +31,7 @@ pub(super) enum MissingData { PossibleReorg, } -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "source", rename_all = "snake_case")] -pub(super) enum TreeDataProviderSource { - L1CommitEvent, - BatchDetailsRpc, -} - -#[derive(Debug)] -pub(super) struct TreeDataProviderOutput { - pub root_hash: H256, - pub source: TreeDataProviderSource, -} - -pub(super) type TreeDataProviderResult = - TreeDataFetcherResult>; +pub(super) type TreeDataProviderResult = TreeDataFetcherResult>; /// External provider of tree data, such as main node (via JSON-RPC). #[async_trait] @@ -92,14 +80,7 @@ impl TreeDataProvider for Box> { return Ok(Err(MissingData::PossibleReorg)); } - Ok(batch_details - .base - .root_hash - .ok_or(MissingData::RootHash) - .map(|root_hash| TreeDataProviderOutput { - root_hash, - source: TreeDataProviderSource::BatchDetailsRpc, - })) + Ok(batch_details.base.root_hash.ok_or(MissingData::RootHash)) } } @@ -205,13 +186,6 @@ impl L1DataProvider { })?; Ok((number, block.timestamp)) } - - pub fn with_fallback(self, fallback: Box) -> CombinedDataProvider { - CombinedDataProvider { - l1: Some(self), - fallback, - } - } } #[async_trait] @@ -305,10 +279,7 @@ impl TreeDataProvider for L1DataProvider { l1_commit_block_number, l1_commit_block_timestamp: l1_commit_block.timestamp, }); - Ok(Ok(TreeDataProviderOutput { - root_hash, - source: TreeDataProviderSource::L1CommitEvent, - })) + Ok(Ok(root_hash)) } _ => { tracing::warn!( @@ -325,44 +296,69 @@ impl TreeDataProvider for L1DataProvider { #[derive(Debug)] pub(super) struct CombinedDataProvider { l1: Option, - fallback: Box, + // Generic to allow for tests. + rpc: Box, +} + +impl CombinedDataProvider { + pub fn new(fallback: impl TreeDataProvider) -> Self { + Self { + l1: None, + rpc: Box::new(fallback), + } + } + + pub fn set_l1(&mut self, l1: L1DataProvider) { + self.l1 = Some(l1); + } } #[async_trait] impl TreeDataProvider for CombinedDataProvider { + #[tracing::instrument(skip(self, last_l2_block))] async fn batch_details( &mut self, number: L1BatchNumber, last_l2_block: &L2BlockHeader, ) -> TreeDataProviderResult { if let Some(l1) = &mut self.l1 { - match l1.batch_details(number, last_l2_block).await { + let stage_latency = METRICS.stage_latency[&ProcessingStage::FetchL1CommitEvent].start(); + let l1_result = l1.batch_details(number, last_l2_block).await; + stage_latency.observe(); + + match l1_result { Err(err) => { if err.is_transient() { tracing::info!( - number = number.0, - "Transient error calling L1 data provider: {err}" + "Transient error calling L1 data provider: {:#}", + anyhow::Error::from(err) ); } else { tracing::warn!( - number = number.0, - "Fatal error calling L1 data provider: {err}" + "Fatal error calling L1 data provider: {:#}", + anyhow::Error::from(err) ); self.l1 = None; } } - Ok(Ok(root_hash)) => return Ok(Ok(root_hash)), + Ok(Ok(root_hash)) => { + METRICS.root_hash_sources[&TreeDataProviderSource::L1CommitEvent].inc(); + return Ok(Ok(root_hash)); + } Ok(Err(missing_data)) => { - tracing::debug!( - number = number.0, - "L1 data provider misses batch data: {missing_data}" - ); + tracing::info!("L1 data provider misses batch data: {missing_data}"); // No sense of calling the L1 provider in the future; the L2 provider will very likely get information // about batches significantly faster. self.l1 = None; } } } - self.fallback.batch_details(number, last_l2_block).await + let stage_latency = METRICS.stage_latency[&ProcessingStage::FetchBatchDetailsRpc].start(); + let rpc_result = self.rpc.batch_details(number, last_l2_block).await; + stage_latency.observe(); + if matches!(rpc_result, Ok(Ok(_))) { + METRICS.root_hash_sources[&TreeDataProviderSource::BatchDetailsRpc].inc(); + } + rpc_result } } diff --git a/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs b/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs index bb252e09caad..09fa16f16077 100644 --- a/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs +++ b/core/node/node_sync/src/tree_data_fetcher/provider/tests.rs @@ -39,6 +39,7 @@ fn mock_block_details_base(number: u32, hash: Option) -> api::BlockDetails executed_at: None, l1_gas_price: 10, l2_fair_gas_price: 100, + fair_pubdata_price: None, base_system_contracts_hashes: Default::default(), } } @@ -85,13 +86,12 @@ async fn rpc_data_provider_basics() { }; let mut client: Box> = Box::new(l2_parameters.mock_client()); - let output = client + let root_hash = client .batch_details(L1BatchNumber(1), &last_l2_block) .await .unwrap() .expect("missing block"); - assert_eq!(output.root_hash, H256::from_low_u64_be(1)); - assert_matches!(output.source, TreeDataProviderSource::BatchDetailsRpc); + assert_eq!(root_hash, H256::from_low_u64_be(1)); // Query a future L1 batch. let output = client @@ -269,13 +269,12 @@ async fn test_using_l1_data_provider(l1_batch_timestamps: &[u64]) { L1DataProvider::new(Box::new(eth_params.client()), DIAMOND_PROXY_ADDRESS).unwrap(); for i in 0..l1_batch_timestamps.len() { let number = L1BatchNumber(i as u32 + 1); - let output = provider + let root_hash = provider .batch_details(number, &get_last_l2_block(&mut storage, number).await) .await .unwrap() .expect("no root hash"); - assert_eq!(output.root_hash, H256::repeat_byte(number.0 as u8)); - assert_matches!(output.source, TreeDataProviderSource::L1CommitEvent); + assert_eq!(root_hash, H256::repeat_byte(number.0 as u8)); let past_l1_batch = provider.past_l1_batch.unwrap(); assert_eq!(past_l1_batch.number, number); @@ -351,12 +350,13 @@ async fn combined_data_provider_errors() { let mut main_node_client = MockMainNodeClient::default(); main_node_client.insert_batch(L1BatchNumber(2), H256::repeat_byte(2)); - let mut provider = L1DataProvider::new(Box::new(eth_params.client()), DIAMOND_PROXY_ADDRESS) - .unwrap() - .with_fallback(Box::new(main_node_client)); + let mut provider = CombinedDataProvider::new(main_node_client); + let l1_provider = + L1DataProvider::new(Box::new(eth_params.client()), DIAMOND_PROXY_ADDRESS).unwrap(); + provider.set_l1(l1_provider); // L1 batch #1 should be obtained from L1 - let output = provider + let root_hash = provider .batch_details( L1BatchNumber(1), &get_last_l2_block(&mut storage, L1BatchNumber(1)).await, @@ -364,12 +364,11 @@ async fn combined_data_provider_errors() { .await .unwrap() .expect("no root hash"); - assert_eq!(output.root_hash, H256::repeat_byte(1)); - assert_matches!(output.source, TreeDataProviderSource::L1CommitEvent); + assert_eq!(root_hash, H256::repeat_byte(1)); assert!(provider.l1.is_some()); // L1 batch #2 should be obtained from L2 - let output = provider + let root_hash = provider .batch_details( L1BatchNumber(2), &get_last_l2_block(&mut storage, L1BatchNumber(2)).await, @@ -377,7 +376,6 @@ async fn combined_data_provider_errors() { .await .unwrap() .expect("no root hash"); - assert_eq!(output.root_hash, H256::repeat_byte(2)); - assert_matches!(output.source, TreeDataProviderSource::BatchDetailsRpc); + assert_eq!(root_hash, H256::repeat_byte(2)); assert!(provider.l1.is_none()); } diff --git a/core/node/node_sync/src/tree_data_fetcher/tests.rs b/core/node/node_sync/src/tree_data_fetcher/tests.rs index 3ffbb91d474a..5d94ddf658d6 100644 --- a/core/node/node_sync/src/tree_data_fetcher/tests.rs +++ b/core/node/node_sync/src/tree_data_fetcher/tests.rs @@ -16,11 +16,7 @@ use zksync_node_test_utils::{create_l1_batch, create_l2_block, prepare_recovery_ use zksync_types::{AccountTreeId, Address, L2BlockNumber, StorageKey, StorageLog, H256}; use zksync_web3_decl::jsonrpsee::core::ClientError; -use super::{ - metrics::StepOutcomeLabel, - provider::{TreeDataProviderOutput, TreeDataProviderResult, TreeDataProviderSource}, - *, -}; +use super::{metrics::StepOutcomeLabel, provider::TreeDataProviderResult, *}; #[derive(Debug, Default)] pub(super) struct MockMainNodeClient { @@ -48,10 +44,7 @@ impl TreeDataProvider for MockMainNodeClient { Ok(self .batch_details_responses .get(&number) - .map(|&root_hash| TreeDataProviderOutput { - root_hash, - source: TreeDataProviderSource::BatchDetailsRpc, - }) + .copied() .ok_or(MissingData::Batch)) } } @@ -122,7 +115,7 @@ impl FetcherHarness { let (updates_sender, updates_receiver) = mpsc::unbounded_channel(); let metrics = &*Box::leak(Box::::default()); let fetcher = TreeDataFetcher { - data_provider: Box::new(client), + data_provider: CombinedDataProvider::new(client), diamond_proxy_address: None, pool: pool.clone(), metrics, @@ -324,10 +317,7 @@ impl TreeDataProvider for SlowMainNode { } let request_count = self.request_count.fetch_add(1, Ordering::Relaxed); Ok(if request_count >= self.compute_root_hash_after { - Ok(TreeDataProviderOutput { - root_hash: H256::repeat_byte(1), - source: TreeDataProviderSource::BatchDetailsRpc, - }) + Ok(H256::repeat_byte(1)) } else { Err(MissingData::RootHash) }) diff --git a/core/node/node_sync/src/validate_chain_ids_task.rs b/core/node/node_sync/src/validate_chain_ids_task.rs index 5a75cb384aec..1414b5ab6014 100644 --- a/core/node/node_sync/src/validate_chain_ids_task.rs +++ b/core/node/node_sync/src/validate_chain_ids_task.rs @@ -138,6 +138,23 @@ impl ValidateChainIdsTask { } } + /// Runs the task once, exiting either when all the checks are performed or when the stop signal is received. + pub async fn run_once(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let eth_client_check = Self::check_eth_client(self.eth_client, self.l1_chain_id); + let main_node_l1_check = + Self::check_l1_chain_using_main_node(self.main_node_client.clone(), self.l1_chain_id); + let main_node_l2_check = + Self::check_l2_chain_using_main_node(self.main_node_client, self.l2_chain_id); + let joined_futures = + futures::future::try_join3(eth_client_check, main_node_l1_check, main_node_l2_check) + .fuse(); + tokio::select! { + res = joined_futures => res.map(drop), + _ = stop_receiver.changed() => Ok(()), + } + } + + /// Runs the task until the stop signal is received. pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { // Since check futures are fused, they are safe to poll after getting resolved; they will never resolve again, // so we'll just wait for another check or a stop signal. diff --git a/core/node/state_keeper/src/batch_executor/main_executor.rs b/core/node/state_keeper/src/batch_executor/main_executor.rs index a16b9920dd6e..f3f947d0d1e6 100644 --- a/core/node/state_keeper/src/batch_executor/main_executor.rs +++ b/core/node/state_keeper/src/batch_executor/main_executor.rs @@ -32,6 +32,12 @@ use crate::{ #[derive(Debug, Clone)] pub struct MainBatchExecutor { save_call_traces: bool, + /// Whether batch executor would allow transactions with bytecode that cannot be compressed. + /// For new blocks, bytecode compression is mandatory -- if bytecode compression is not supported, + /// the transaction will be rejected. + /// Note that this flag, if set to `true`, is strictly more permissive than if set to `false`. It means + /// that in cases where the node is expected to process any transactions processed by the sequencer + /// regardless of its configuration, this flag should be set to `true`. optional_bytecode_compression: bool, } @@ -218,6 +224,8 @@ impl CommandReceiver { result } + /// Attempts to execute transaction with or without bytecode compression. + /// If compression fails, the transaction will be re-executed without compression. fn execute_tx_in_vm_with_optional_compression( &self, tx: &Transaction, @@ -283,10 +291,8 @@ impl CommandReceiver { (result.1, compressed_bytecodes, trace) } - // Err when transaction is rejected. - // `Ok(TxExecutionStatus::Success)` when the transaction succeeded - // `Ok(TxExecutionStatus::Failure)` when the transaction failed. - // Note that failed transactions are considered properly processed and are included in blocks + /// Attempts to execute transaction with mandatory bytecode compression. + /// If bytecode compression fails, the transaction will be rejected. fn execute_tx_in_vm( &self, tx: &Transaction, diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index 566eab9c3d21..d0dfe367c21d 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -17,6 +17,7 @@ use zksync_types::{ fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, + protocol_version::ProtocolSemanticVersion, snapshots::SnapshotRecoveryStatus, transaction_request::PaymasterParams, tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, @@ -163,8 +164,8 @@ impl Snapshot { l1_batch: L1BatchNumber, l2_block: L2BlockNumber, storage_logs: &[StorageLog], + genesis_params: GenesisParams, ) -> Self { - let genesis_params = GenesisParams::mock(); let contracts = genesis_params.base_system_contracts(); let l1_batch = L1BatchHeader::new( l1_batch, @@ -208,7 +209,11 @@ pub async fn prepare_recovery_snapshot( l2_block: L2BlockNumber, storage_logs: &[StorageLog], ) -> SnapshotRecoveryStatus { - recover(storage, Snapshot::make(l1_batch, l2_block, storage_logs)).await + recover( + storage, + Snapshot::make(l1_batch, l2_block, storage_logs, GenesisParams::mock()), + ) + .await } /// Takes a storage snapshot at the last sealed L1 batch. @@ -290,6 +295,10 @@ pub async fn recover( .protocol_versions_dal() .save_protocol_version_with_tx(&ProtocolVersion { base_system_contracts_hashes: snapshot.l1_batch.base_system_contracts_hashes, + version: ProtocolSemanticVersion { + minor: snapshot.l1_batch.protocol_version.unwrap(), + patch: 0.into(), + }, ..ProtocolVersion::default() }) .await diff --git a/core/tests/test_account/Cargo.toml b/core/tests/test_account/Cargo.toml index 0b2e7aa9340f..6df10edd7dca 100644 --- a/core/tests/test_account/Cargo.toml +++ b/core/tests/test_account/Cargo.toml @@ -19,3 +19,4 @@ zksync_contracts.workspace = true hex.workspace = true ethabi.workspace = true +rand.workspace = true diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index 619caeb1ebd5..e259ce209c63 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -50,6 +50,10 @@ impl Account { Self::new(K256PrivateKey::random()) } + pub fn random_using(rng: &mut impl rand::Rng) -> Self { + Self::new(K256PrivateKey::random_using(rng)) + } + pub fn get_l2_tx_for_execute(&mut self, execute: Execute, fee: Option) -> Transaction { let tx = self.get_l2_tx_for_execute_with_nonce(execute, fee, self.nonce); self.nonce += 1; diff --git a/docs/guides/setup-dev.md b/docs/guides/setup-dev.md index 862dfa2f3566..4e005fc2795f 100644 --- a/docs/guides/setup-dev.md +++ b/docs/guides/setup-dev.md @@ -258,8 +258,7 @@ Install `nix`. Enable the nix command and flakes. Install docker, rustup and use rust to install SQLx CLI like described above. If you are on NixOS, you also need to enable nix-ld. -Go to the zksync folder and run `nix develop --impure`. After it finishes, you are in a shell that has all the -dependencies. +Go to the zksync folder and run `nix develop`. After it finishes, you are in a shell that has all the dependencies. ## Foundry diff --git a/etc/env/configs/ext-node.toml b/etc/env/configs/ext-node.toml index eb07aa387542..145b1455ab93 100644 --- a/etc/env/configs/ext-node.toml +++ b/etc/env/configs/ext-node.toml @@ -55,6 +55,8 @@ url = "http://127.0.0.1:3050" # Here we use TOML multiline strings: newlines will be trimmed. log = """\ warn,\ +zksync_node_framework=info,\ +zksync_node_consensus=info,\ zksync_consensus_bft=info,\ zksync_consensus_network=info,\ zksync_consensus_storage=info,\ diff --git a/flake.nix b/flake.nix index 26c297d98c34..0287d4cf09d1 100644 --- a/flake.nix +++ b/flake.nix @@ -13,7 +13,7 @@ # $ nix build .#zksync_server.block_reverter # # To enter the development shell, run: -# $ nix develop --impure +# $ nix develop # # To vendor the dependencies manually, run: # $ nix shell .#cargo-vendor -c cargo vendor --no-merge-sources @@ -212,7 +212,7 @@ export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="clang" if [ "x$NIX_LD" = "x" ]; then - export NIX_LD="$ZK_NIX_LD" + export NIX_LD="$(<${clangStdenv.cc}/nix-support/dynamic-linker)" fi if [ "x$NIX_LD_LIBRARY_PATH" = "x" ]; then export NIX_LD_LIBRARY_PATH="$ZK_NIX_LD_LIBRARY_PATH" @@ -222,7 +222,6 @@ ''; ZK_NIX_LD_LIBRARY_PATH = lib.makeLibraryPath [ ]; - ZK_NIX_LD = builtins.readFile "${clangStdenv.cc}/nix-support/dynamic-linker"; }; }; }); diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 44c2a8b8395f..7b30b67c2650 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -9536,6 +9536,7 @@ version = "0.1.0" dependencies = [ "ethabi", "hex", + "rand 0.8.5", "zksync_contracts", "zksync_eth_signer", "zksync_system_constants", diff --git a/prover/prover_dal/src/fri_prover_dal.rs b/prover/prover_dal/src/fri_prover_dal.rs index f6c0379ee8a0..419cb635ac53 100644 --- a/prover/prover_dal/src/fri_prover_dal.rs +++ b/prover/prover_dal/src/fri_prover_dal.rs @@ -5,8 +5,7 @@ use zksync_basic_types::{ basic_fri_types::{AggregationRound, CircuitIdRoundTuple, JobIdentifiers}, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId}, prover_dal::{ - correct_circuit_id, FriProverJobMetadata, JobCountStatistics, ProverJobFriInfo, - ProverJobStatus, StuckJobs, + FriProverJobMetadata, JobCountStatistics, ProverJobFriInfo, ProverJobStatus, StuckJobs, }, L1BatchNumber, }; @@ -659,8 +658,7 @@ impl FriProverDal<'_, '_> { .map(|row| ProverJobFriInfo { id: row.id as u32, l1_batch_number, - // It is necessary to correct the circuit IDs due to the discrepancy between different aggregation rounds. - circuit_id: correct_circuit_id(row.circuit_id, aggregation_round), + circuit_id: row.circuit_id as u32, circuit_blob_url: row.circuit_blob_url.clone(), aggregation_round, sequence_number: row.sequence_number as u32, diff --git a/prover/prover_dal/src/fri_witness_generator_dal.rs b/prover/prover_dal/src/fri_witness_generator_dal.rs index 14d47beed1a0..8db30e5a7f11 100644 --- a/prover/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/prover_dal/src/fri_witness_generator_dal.rs @@ -6,10 +6,10 @@ use zksync_basic_types::{ basic_fri_types::{AggregationRound, Eip4844Blobs}, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, prover_dal::{ - correct_circuit_id, BasicWitnessGeneratorJobInfo, JobCountStatistics, - LeafAggregationJobMetadata, LeafWitnessGeneratorJobInfo, NodeAggregationJobMetadata, - NodeWitnessGeneratorJobInfo, RecursionTipWitnessGeneratorJobInfo, - SchedulerWitnessGeneratorJobInfo, StuckJobs, WitnessJobStatus, + BasicWitnessGeneratorJobInfo, JobCountStatistics, LeafAggregationJobMetadata, + LeafWitnessGeneratorJobInfo, NodeAggregationJobMetadata, NodeWitnessGeneratorJobInfo, + RecursionTipWitnessGeneratorJobInfo, SchedulerWitnessGeneratorJobInfo, StuckJobs, + WitnessJobStatus, }, L1BatchNumber, }; @@ -1553,8 +1553,7 @@ impl FriWitnessGeneratorDal<'_, '_> { .map(|row| NodeWitnessGeneratorJobInfo { id: row.id as u32, l1_batch_number, - // It is necessary to correct the circuit IDs due to the discrepancy between different aggregation rounds. - circuit_id: correct_circuit_id(row.circuit_id, AggregationRound::NodeAggregation), + circuit_id: row.circuit_id as u32, depth: row.depth as u32, status: WitnessJobStatus::from_str(&row.status).unwrap(), attempts: row.attempts as u32, diff --git a/zk_toolbox/crates/common/src/docker.rs b/zk_toolbox/crates/common/src/docker.rs index f52e3214fa23..db8a63e9f5d0 100644 --- a/zk_toolbox/crates/common/src/docker.rs +++ b/zk_toolbox/crates/common/src/docker.rs @@ -3,8 +3,8 @@ use xshell::{cmd, Shell}; use crate::cmd::Cmd; pub fn up(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { - Cmd::new(cmd!(shell, "docker-compose -f {docker_compose_file} up -d")).run() + Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} up -d")).run() } pub fn down(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { - Cmd::new(cmd!(shell, "docker-compose -f {docker_compose_file} down")).run() + Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} down")).run() } diff --git a/zk_toolbox/crates/common/src/forge.rs b/zk_toolbox/crates/common/src/forge.rs index 3ae46a8034a8..565c7aa52d96 100644 --- a/zk_toolbox/crates/common/src/forge.rs +++ b/zk_toolbox/crates/common/src/forge.rs @@ -130,16 +130,16 @@ impl ForgeScript { }) } - pub async fn check_the_balance(&self, minimum_value: U256) -> anyhow::Result { + pub async fn get_the_balance(&self) -> anyhow::Result> { let Some(rpc_url) = self.rpc_url() else { - return Ok(true); + return Ok(None); }; let Some(private_key) = self.private_key() else { - return Ok(true); + return Ok(None); }; let client = create_ethers_client(private_key, rpc_url, None)?; let balance = client.get_balance(client.address(), None).await?; - Ok(balance > minimum_value) + Ok(Some(balance)) } } diff --git a/zk_toolbox/crates/common/src/prerequisites.rs b/zk_toolbox/crates/common/src/prerequisites.rs index 237af5b40483..ae21ba68b3c1 100644 --- a/zk_toolbox/crates/common/src/prerequisites.rs +++ b/zk_toolbox/crates/common/src/prerequisites.rs @@ -2,7 +2,7 @@ use xshell::{cmd, Shell}; use crate::{cmd::Cmd, logger}; -const PREREQUISITES: [Prerequisite; 6] = [ +const PREREQUISITES: [Prerequisite; 5] = [ Prerequisite { name: "git", download_link: "https://git-scm.com/book/en/v2/Getting-Started-Installing-Git", @@ -11,10 +11,6 @@ const PREREQUISITES: [Prerequisite; 6] = [ name: "docker", download_link: "https://docs.docker.com/get-docker/", }, - Prerequisite { - name: "docker-compose", - download_link: "https://docs.docker.com/compose/install/", - }, Prerequisite { name: "forge", download_link: "https://book.getfoundry.sh/getting-started/installation", @@ -29,6 +25,11 @@ const PREREQUISITES: [Prerequisite; 6] = [ }, ]; +const DOCKER_COMPOSE_PREREQUISITE: Prerequisite = Prerequisite { + name: "docker compose", + download_link: "https://docs.docker.com/compose/install/", +}; + struct Prerequisite { name: &'static str, download_link: &'static str, @@ -43,6 +44,10 @@ pub fn check_prerequisites(shell: &Shell) { } } + if !check_docker_compose_prerequisite(shell) { + missing_prerequisites.push(&DOCKER_COMPOSE_PREREQUISITE); + } + if !missing_prerequisites.is_empty() { logger::error("Prerequisite check has failed"); logger::error_note( @@ -63,3 +68,9 @@ pub fn check_prerequisites(shell: &Shell) { fn check_prerequisite(shell: &Shell, name: &str) -> bool { Cmd::new(cmd!(shell, "which {name}")).run().is_ok() } + +fn check_docker_compose_prerequisite(shell: &Shell) -> bool { + Cmd::new(cmd!(shell, "docker compose version")) + .run() + .is_ok() +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/mod.rs index 8ed7a82b8334..ccdf5b082caa 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/mod.rs @@ -2,4 +2,5 @@ pub mod args; pub mod chain; pub mod containers; pub mod ecosystem; +pub mod prover; pub mod server; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs new file mode 100644 index 000000000000..a14dd6fb87e5 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs @@ -0,0 +1,27 @@ +use anyhow::Ok; +use common::{cmd::Cmd, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use super::utils::get_link_to_prover; +use crate::messages::{MSG_GENERATING_SK_SPINNER, MSG_SK_GENERATED}; + +pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let link_to_prover = get_link_to_prover(&ecosystem_config); + shell.change_dir(&link_to_prover); + + let spinner = Spinner::new(MSG_GENERATING_SK_SPINNER); + let mut cmd = Cmd::new(cmd!( + shell, + "cargo run --features gpu --release --bin key_generator -- + generate-sk all --recompute-if-missing + --setup-path=vk_setup_data_generator_server_fri/data + --path={link_to_prover}/vk_setup_data_generator_server_fri/data" + )); + cmd.run()?; + spinner.finish(); + logger::outro(MSG_SK_GENERATED); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs new file mode 100644 index 000000000000..c617b915a52c --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs @@ -0,0 +1,16 @@ +use clap::Subcommand; +use xshell::Shell; +mod generate_sk; +mod utils; + +#[derive(Subcommand, Debug)] +pub enum ProverCommands { + /// Initialize prover + GenerateSK, +} + +pub(crate) async fn run(shell: &Shell, args: ProverCommands) -> anyhow::Result<()> { + match args { + ProverCommands::GenerateSK => generate_sk::run(shell).await, + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/utils.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/utils.rs new file mode 100644 index 000000000000..4dae70863dc9 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/utils.rs @@ -0,0 +1,10 @@ +use std::path::PathBuf; + +use config::EcosystemConfig; + +pub(crate) fn get_link_to_prover(config: &EcosystemConfig) -> PathBuf { + let link_to_code = config.link_to_code.clone(); + let mut link_to_prover = link_to_code.into_os_string(); + link_to_prover.push("/prover"); + link_to_prover.into() +} diff --git a/zk_toolbox/crates/zk_inception/src/config_manipulations.rs b/zk_toolbox/crates/zk_inception/src/config_manipulations.rs index 3c350fa8d894..a300a15e76c6 100644 --- a/zk_toolbox/crates/zk_inception/src/config_manipulations.rs +++ b/zk_toolbox/crates/zk_inception/src/config_manipulations.rs @@ -79,6 +79,8 @@ pub fn update_l2_shared_bridge( let mut contracts_config = ContractsConfig::read_with_base_path(shell, &config.configs)?; contracts_config.bridges.shared.l2_address = Some(initialize_bridges_output.l2_shared_bridge_proxy); + contracts_config.bridges.erc20.l2_address = + Some(initialize_bridges_output.l2_shared_bridge_proxy); contracts_config.save_with_base_path(shell, &config.configs)?; Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/forge_utils.rs b/zk_toolbox/crates/zk_inception/src/forge_utils.rs index 581d1ec892d4..cabc8ff7566b 100644 --- a/zk_toolbox/crates/zk_inception/src/forge_utils.rs +++ b/zk_toolbox/crates/zk_inception/src/forge_utils.rs @@ -22,11 +22,17 @@ pub async fn check_the_balance(forge: &ForgeScript) -> anyhow::Result<()> { return Ok(()); }; - while !forge - .check_the_balance(U256::from(MINIMUM_BALANCE_FOR_WALLET)) - .await? - { - if !common::PromptConfirm::new(msg_address_doesnt_have_enough_money_prompt(&address)).ask() + let expected_balance = U256::from(MINIMUM_BALANCE_FOR_WALLET); + while let Some(balance) = forge.get_the_balance().await? { + if balance >= expected_balance { + return Ok(()); + } + if !common::PromptConfirm::new(msg_address_doesnt_have_enough_money_prompt( + &address, + balance, + expected_balance, + )) + .ask() { break; } diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zk_toolbox/crates/zk_inception/src/main.rs index b0e8e8f4fd69..dff9e479e01f 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zk_toolbox/crates/zk_inception/src/main.rs @@ -7,7 +7,9 @@ use common::{ use config::EcosystemConfig; use xshell::Shell; -use crate::commands::{args::RunServerArgs, chain::ChainCommands, ecosystem::EcosystemCommands}; +use crate::commands::{ + args::RunServerArgs, chain::ChainCommands, ecosystem::EcosystemCommands, prover::ProverCommands, +}; pub mod accept_ownership; mod commands; @@ -35,6 +37,9 @@ pub enum InceptionSubcommands { /// Chain related commands #[command(subcommand)] Chain(ChainCommands), + /// Prover related commands + #[command(subcommand)] + Prover(ProverCommands), /// Run server Server(RunServerArgs), /// Run containers for local development @@ -101,6 +106,7 @@ async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Res match inception_args.command { InceptionSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, args).await?, InceptionSubcommands::Chain(args) => commands::chain::run(shell, args).await?, + InceptionSubcommands::Prover(args) => commands::prover::run(shell, args).await?, InceptionSubcommands::Server(args) => commands::server::run(shell, args)?, InceptionSubcommands::Containers => commands::containers::run(shell)?, } diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 21f051470555..1b3c05258753 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -1,4 +1,7 @@ -use ethers::types::H160; +use ethers::{ + types::{H160, U256}, + utils::format_ether, +}; /// Common messages pub(super) const MSG_SELECTED_CONFIG: &str = "Selected config"; @@ -129,12 +132,15 @@ pub(super) const MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR: &str = "Failed to drop pub(super) fn msg_server_db_url_prompt(chain_name: &str) -> String { format!("Please provide server database url for chain {chain_name}") } + pub(super) fn msg_prover_db_url_prompt(chain_name: &str) -> String { format!("Please provide prover database url for chain {chain_name}") } + pub(super) fn msg_prover_db_name_prompt(chain_name: &str) -> String { format!("Please provide prover database name for chain {chain_name}") } + pub(super) fn msg_server_db_name_prompt(chain_name: &str) -> String { format!("Please provide server database name for chain {chain_name}") } @@ -170,8 +176,19 @@ pub(super) const MSG_BUILDING_L1_CONTRACTS: &str = "Building L1 contracts..."; /// Forge utils related messages pub(super) const MSG_DEPLOYER_PK_NOT_SET_ERR: &str = "Deployer private key is not set"; -pub(super) fn msg_address_doesnt_have_enough_money_prompt(address: &H160) -> String { + +pub(super) fn msg_address_doesnt_have_enough_money_prompt( + address: &H160, + actual: U256, + expected: U256, +) -> String { + let actual = format_ether(actual); + let expected = format_ether(expected); format!( - "Address {address:?} doesn't have enough money to deploy contracts do you want to try again?" + "Address {address:?} doesn't have enough money to deploy contracts only {actual} ETH but expected: {expected} ETH do you want to try again?" ) } + +/// Prover related messages +pub(super) const MSG_GENERATING_SK_SPINNER: &str = "Generating setup keys..."; +pub(super) const MSG_SK_GENERATED: &str = "Setup keys generated successfully";