diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index cfe8f1ea7c01..e58ece5fdf66 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -16,7 +16,7 @@ use zksync_node_framework::{ implementations::layers::{ batch_status_updater::BatchStatusUpdaterLayer, commitment_generator::CommitmentGeneratorLayer, - consensus::{ConsensusLayer, Mode}, + consensus::ExternalNodeConsensusLayer, consistency_checker::ConsistencyCheckerLayer, healtcheck_server::HealthCheckLayer, l1_batch_commitment_mode_validation::L1BatchCommitmentModeValidationLayer, @@ -41,7 +41,7 @@ use zksync_node_framework::{ server::{Web3ServerLayer, Web3ServerOptionalConfig}, tree_api_client::TreeApiClientLayer, tx_sender::{PostgresStorageCachesConfig, TxSenderLayer}, - tx_sink::TxSinkLayer, + tx_sink::ProxySinkLayer, }, }, service::{ZkStackService, ZkStackServiceBuilder}, @@ -209,11 +209,7 @@ impl ExternalNodeBuilder { let config = self.config.consensus.clone(); let secrets = config::read_consensus_secrets().context("config::read_consensus_secrets()")?; - let layer = ConsensusLayer { - mode: Mode::External, - config, - secrets, - }; + let layer = ExternalNodeConsensusLayer { config, secrets }; self.node.add_layer(layer); Ok(self) } @@ -359,7 +355,7 @@ impl ExternalNodeBuilder { ) .with_whitelisted_tokens_for_aa_cache(true); - self.node.add_layer(TxSinkLayer::ProxySink); + self.node.add_layer(ProxySinkLayer); self.node.add_layer(tx_sender_layer); Ok(self) } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 4a80898ca8dc..2144e9598a6f 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -25,7 +25,7 @@ use zksync_node_framework::{ base_token_ratio_provider::BaseTokenRatioProviderLayer, circuit_breaker_checker::CircuitBreakerCheckerLayer, commitment_generator::CommitmentGeneratorLayer, - consensus::{ConsensusLayer, Mode as ConsensusMode}, + consensus::MainNodeConsensusLayer, contract_verification_api::ContractVerificationApiLayer, da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer}, @@ -56,7 +56,7 @@ use zksync_node_framework::{ server::{Web3ServerLayer, Web3ServerOptionalConfig}, tree_api_client::TreeApiClientLayer, tx_sender::{PostgresStorageCachesConfig, TxSenderLayer}, - tx_sink::TxSinkLayer, + tx_sink::MasterPoolSinkLayer, }, }, service::{ZkStackService, ZkStackServiceBuilder}, @@ -280,7 +280,7 @@ impl MainNodeBuilder { }; // On main node we always use master pool sink. - self.node.add_layer(TxSinkLayer::MasterPoolSink); + self.node.add_layer(MasterPoolSinkLayer); self.node.add_layer(TxSenderLayer::new( TxSenderConfig::new( &sk_config, @@ -445,10 +445,16 @@ impl MainNodeBuilder { } fn add_consensus_layer(mut self) -> anyhow::Result { - self.node.add_layer(ConsensusLayer { - mode: ConsensusMode::Main, - config: self.consensus_config.clone(), - secrets: self.secrets.consensus.clone(), + self.node.add_layer(MainNodeConsensusLayer { + config: self + .consensus_config + .clone() + .context("Consensus config has to be provided")?, + secrets: self + .secrets + .consensus + .clone() + .context("Consensus secrets have to be provided")?, }); Ok(self) diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 9d692e84f10e..b14d07b72db6 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -1,12 +1,13 @@ -use std::time::Duration; +use std::{fmt, time::Duration}; use secp256k1::{ecdsa::Signature, Message, PublicKey, Secp256k1, SecretKey}; use url::Url; use zksync_basic_types::H256; use zksync_node_framework::{ - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; use zksync_prover_interface::inputs::TeeVerifierInput; use zksync_tee_verifier::Verify; @@ -15,16 +16,8 @@ use zksync_types::{tee_types::TeeType, L1BatchNumber}; use crate::{api_client::TeeApiClient, error::TeeProverError, metrics::METRICS}; /// Wiring layer for `TeeProver` -/// -/// ## Requests resources -/// -/// no resources requested -/// -/// ## Adds tasks -/// -/// - `TeeProver` #[derive(Debug)] -pub struct TeeProverLayer { +pub(crate) struct TeeProverLayer { api_url: Url, signing_key: SecretKey, attestation_quote_bytes: Vec, @@ -47,14 +40,23 @@ impl TeeProverLayer { } } +#[derive(Debug, IntoContext)] +pub(crate) struct LayerOutput { + #[context(task)] + pub tee_prover: TeeProver, +} + #[async_trait::async_trait] impl WiringLayer for TeeProverLayer { + type Input = (); + type Output = LayerOutput; + fn layer_name(&self) -> &'static str { "tee_prover_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let tee_prover_task = TeeProver { + async fn wire(self, _input: Self::Input) -> Result { + let tee_prover = TeeProver { config: Default::default(), signing_key: self.signing_key, public_key: self.signing_key.public_key(&Secp256k1::new()), @@ -62,12 +64,11 @@ impl WiringLayer for TeeProverLayer { tee_type: self.tee_type, api_client: TeeApiClient::new(self.api_url), }; - context.add_task(tee_prover_task); - Ok(()) + Ok(LayerOutput { tee_prover }) } } -struct TeeProver { +pub(crate) struct TeeProver { config: TeeProverConfig, signing_key: SecretKey, public_key: PublicKey, @@ -76,6 +77,17 @@ struct TeeProver { api_client: TeeApiClient, } +impl fmt::Debug for TeeProver { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TeeProver") + .field("config", &self.config) + .field("public_key", &self.public_key) + .field("attestation_quote_bytes", &self.attestation_quote_bytes) + .field("tee_type", &self.tee_type) + .finish() + } +} + impl TeeProver { fn verify( &self, @@ -169,7 +181,7 @@ impl Task for TeeProver { return Ok(()); } let result = self.step().await; - match result { + let need_to_sleep = match result { Ok(batch_number) => { retries = 1; backoff = self.config.initial_retry_backoff; @@ -179,6 +191,9 @@ impl Task for TeeProver { METRICS .last_batch_number_processed .set(batch_number.0 as u64); + false + } else { + true } } Err(err) => { @@ -188,14 +203,17 @@ impl Task for TeeProver { } retries += 1; tracing::warn!(%err, "Failed TEE prover step function {retries}/{}, retrying in {} milliseconds.", self.config.max_retries, backoff.as_millis()); - tokio::time::timeout(backoff, stop_receiver.0.changed()) - .await - .ok(); backoff = std::cmp::min( backoff.mul_f32(self.config.retry_backoff_multiplier), self.config.max_backoff, ); + true } + }; + if need_to_sleep { + tokio::time::timeout(backoff, stop_receiver.0.changed()) + .await + .ok(); } } } diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index cfeeaa533b36..75bcfac62f24 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -327,6 +327,9 @@ pub struct Log { pub log_type: Option, /// Removed pub removed: Option, + /// L2 block timestamp + #[serde(rename = "blockTimestamp")] + pub block_timestamp: Option, } impl Log { diff --git a/core/lib/dal/.sqlx/query-3ba9bc85e3e286aadef8aad27eb38fc90b18155e3435f58d9888fa50d92042f7.json b/core/lib/dal/.sqlx/query-526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3.json similarity index 80% rename from core/lib/dal/.sqlx/query-3ba9bc85e3e286aadef8aad27eb38fc90b18155e3435f58d9888fa50d92042f7.json rename to core/lib/dal/.sqlx/query-526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3.json index 221e04e0c717..dbdec4ac5d65 100644 --- a/core/lib/dal/.sqlx/query-3ba9bc85e3e286aadef8aad27eb38fc90b18155e3435f58d9888fa50d92042f7.json +++ b/core/lib/dal/.sqlx/query-526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n NULL::bytea AS \"block_hash\",\n NULL::BIGINT AS \"l1_batch_number?\",\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx\n FROM\n events\n WHERE\n tx_hash = ANY ($1)\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", + "query": "\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n NULL::bytea AS \"block_hash\",\n NULL::BIGINT AS \"l1_batch_number?\",\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx,\n NULL::BIGINT AS \"block_timestamp?\"\n FROM\n events\n WHERE\n tx_hash = ANY ($1)\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", "describe": { "columns": [ { @@ -67,6 +67,11 @@ "ordinal": 12, "name": "event_index_in_tx", "type_info": "Int4" + }, + { + "ordinal": 13, + "name": "block_timestamp?", + "type_info": "Int8" } ], "parameters": { @@ -87,8 +92,9 @@ false, false, false, - false + false, + null ] }, - "hash": "3ba9bc85e3e286aadef8aad27eb38fc90b18155e3435f58d9888fa50d92042f7" + "hash": "526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3" } diff --git a/core/lib/dal/.sqlx/query-dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b.json b/core/lib/dal/.sqlx/query-c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479.json similarity index 84% rename from core/lib/dal/.sqlx/query-dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b.json rename to core/lib/dal/.sqlx/query-c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479.json index 0ee5b247c330..1c15bde02fdf 100644 --- a/core/lib/dal/.sqlx/query-dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b.json +++ b/core/lib/dal/.sqlx/query-c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n events_select AS (\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx\n FROM\n events\n WHERE\n miniblock_number > $1\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n )\n SELECT\n miniblocks.hash AS \"block_hash?\",\n address AS \"address!\",\n topic1 AS \"topic1!\",\n topic2 AS \"topic2!\",\n topic3 AS \"topic3!\",\n topic4 AS \"topic4!\",\n value AS \"value!\",\n miniblock_number AS \"miniblock_number!\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n tx_hash AS \"tx_hash!\",\n tx_index_in_block AS \"tx_index_in_block!\",\n event_index_in_block AS \"event_index_in_block!\",\n event_index_in_tx AS \"event_index_in_tx!\"\n FROM\n events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", + "query": "\n WITH\n events_select AS (\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx\n FROM\n events\n WHERE\n miniblock_number > $1\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n )\n SELECT\n miniblocks.hash AS \"block_hash?\",\n address AS \"address!\",\n topic1 AS \"topic1!\",\n topic2 AS \"topic2!\",\n topic3 AS \"topic3!\",\n topic4 AS \"topic4!\",\n value AS \"value!\",\n miniblock_number AS \"miniblock_number!\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n tx_hash AS \"tx_hash!\",\n tx_index_in_block AS \"tx_index_in_block!\",\n event_index_in_block AS \"event_index_in_block!\",\n event_index_in_tx AS \"event_index_in_tx!\",\n miniblocks.timestamp AS \"block_timestamp\"\n FROM\n events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", "describe": { "columns": [ { @@ -67,6 +67,11 @@ "ordinal": 12, "name": "event_index_in_tx!", "type_info": "Int4" + }, + { + "ordinal": 13, + "name": "block_timestamp", + "type_info": "Int8" } ], "parameters": { @@ -87,8 +92,9 @@ false, false, false, + false, false ] }, - "hash": "dcb51063c12341785e57f221e2d5ede2be9770b3799a9ab64fe9690b6eb0a48b" + "hash": "c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479" } diff --git a/core/lib/dal/.sqlx/query-d43d5c96ae92f52b12b320d5c6c43335d23bec1370e520186739d7075e9e3338.json b/core/lib/dal/.sqlx/query-e1e8ab0cb11c6081d3525228eacbad74e1bab808c744fa14bf24332b39120767.json similarity index 84% rename from core/lib/dal/.sqlx/query-d43d5c96ae92f52b12b320d5c6c43335d23bec1370e520186739d7075e9e3338.json rename to core/lib/dal/.sqlx/query-e1e8ab0cb11c6081d3525228eacbad74e1bab808c744fa14bf24332b39120767.json index 93934a3a0bed..de9937ef7b95 100644 --- a/core/lib/dal/.sqlx/query-d43d5c96ae92f52b12b320d5c6c43335d23bec1370e520186739d7075e9e3338.json +++ b/core/lib/dal/.sqlx/query-e1e8ab0cb11c6081d3525228eacbad74e1bab808c744fa14bf24332b39120767.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n events AS (\n SELECT DISTINCT\n ON (events.tx_hash) *\n FROM\n events\n WHERE\n events.address = $1\n AND events.topic1 = $2\n AND events.tx_hash = ANY ($3)\n ORDER BY\n events.tx_hash,\n events.event_index_in_tx DESC\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error AS error,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.initiator_address AS initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas AS refunded_gas,\n transactions.gas_limit AS gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n events.topic4 AS \"contract_address?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN events ON events.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY ($3)\n AND transactions.data != '{}'::jsonb\n ", + "query": "\n WITH\n events AS (\n SELECT DISTINCT\n ON (events.tx_hash) *\n FROM\n events\n WHERE\n events.address = $1\n AND events.topic1 = $2\n AND events.tx_hash = ANY ($3)\n ORDER BY\n events.tx_hash,\n events.event_index_in_tx DESC\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error AS error,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.initiator_address AS initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas AS refunded_gas,\n transactions.gas_limit AS gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n events.topic4 AS \"contract_address?\",\n miniblocks.timestamp AS \"block_timestamp?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN events ON events.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY ($3)\n AND transactions.data != '{}'::jsonb\n ", "describe": { "columns": [ { @@ -77,6 +77,11 @@ "ordinal": 14, "name": "contract_address?", "type_info": "Bytea" + }, + { + "ordinal": 15, + "name": "block_timestamp?", + "type_info": "Int8" } ], "parameters": { @@ -101,8 +106,9 @@ true, false, true, - true + true, + false ] }, - "hash": "d43d5c96ae92f52b12b320d5c6c43335d23bec1370e520186739d7075e9e3338" + "hash": "e1e8ab0cb11c6081d3525228eacbad74e1bab808c744fa14bf24332b39120767" } diff --git a/core/lib/dal/migrations/20240708194915_vm_runner_processing_started_at_timestamp.down.sql b/core/lib/dal/migrations/20240708194915_vm_runner_processing_started_at_timestamp.down.sql new file mode 100644 index 000000000000..9e957f700f43 --- /dev/null +++ b/core/lib/dal/migrations/20240708194915_vm_runner_processing_started_at_timestamp.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE vm_runner_protective_reads ALTER COLUMN processing_started_at TYPE TIME USING (null); +ALTER TABLE vm_runner_bwip ALTER COLUMN processing_started_at TYPE TIME USING (null); diff --git a/core/lib/dal/migrations/20240708194915_vm_runner_processing_started_at_timestamp.up.sql b/core/lib/dal/migrations/20240708194915_vm_runner_processing_started_at_timestamp.up.sql new file mode 100644 index 000000000000..0afcdfe5aecf --- /dev/null +++ b/core/lib/dal/migrations/20240708194915_vm_runner_processing_started_at_timestamp.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE vm_runner_protective_reads ALTER COLUMN processing_started_at TYPE TIMESTAMP USING (null); +ALTER TABLE vm_runner_bwip ALTER COLUMN processing_started_at TYPE TIMESTAMP USING (null); diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index 7bbffb23e320..c2b296fc085b 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -222,7 +222,8 @@ impl EventsDal<'_, '_> { tx_hash, tx_index_in_block, event_index_in_block, - event_index_in_tx + event_index_in_tx, + NULL::BIGINT AS "block_timestamp?" FROM events WHERE diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index 1a182f6052d5..fc21cc36460c 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -79,7 +79,8 @@ impl EventsWeb3Dal<'_, '_> { ORDER BY miniblock_number ASC, event_index_in_block ASC LIMIT ${} ) - SELECT miniblocks.hash as "block_hash", miniblocks.l1_batch_number as "l1_batch_number", events_select.* + SELECT miniblocks.hash as "block_hash", miniblocks.l1_batch_number as "l1_batch_number", + miniblocks.timestamp as block_timestamp, events_select.* FROM events_select INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number ORDER BY miniblock_number ASC, event_index_in_block ASC @@ -222,7 +223,8 @@ impl EventsWeb3Dal<'_, '_> { tx_hash AS "tx_hash!", tx_index_in_block AS "tx_index_in_block!", event_index_in_block AS "event_index_in_block!", - event_index_in_tx AS "event_index_in_tx!" + event_index_in_tx AS "event_index_in_tx!", + miniblocks.timestamp AS "block_timestamp" FROM events_select INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number diff --git a/core/lib/dal/src/models/storage_event.rs b/core/lib/dal/src/models/storage_event.rs index f741e2aa1202..415c39001ea0 100644 --- a/core/lib/dal/src/models/storage_event.rs +++ b/core/lib/dal/src/models/storage_event.rs @@ -20,6 +20,7 @@ pub struct StorageWeb3Log { pub tx_index_in_block: i32, pub event_index_in_block: i32, pub event_index_in_tx: i32, + pub block_timestamp: Option, } impl From for api::Log { @@ -47,6 +48,7 @@ impl From for api::Log { transaction_log_index: Some(U256::from(log.event_index_in_tx as u32)), log_type: None, removed: Some(false), + block_timestamp: log.block_timestamp, } } } diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 01bbf4b4ff45..bce5e554f383 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -337,6 +337,7 @@ pub(crate) struct StorageTransactionReceipt { pub effective_gas_price: Option, pub contract_address: Option>, pub initiator_address: Vec, + pub block_timestamp: Option, } impl From for TransactionReceipt { diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index a73a383ff640..f207468d374c 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -43,7 +43,7 @@ impl TransactionsWeb3Dal<'_, '_> { // Clarification for first part of the query(`WITH` clause): // Looking for `ContractDeployed` event in the events table // to find the address of deployed contract - let mut receipts: Vec = sqlx::query_as!( + let st_receipts: Vec = sqlx::query_as!( StorageTransactionReceipt, r#" WITH @@ -75,7 +75,8 @@ impl TransactionsWeb3Dal<'_, '_> { transactions.gas_limit AS gas_limit, miniblocks.hash AS "block_hash", miniblocks.l1_batch_number AS "l1_batch_number?", - events.topic4 AS "contract_address?" + events.topic4 AS "contract_address?", + miniblocks.timestamp AS "block_timestamp?" FROM transactions JOIN miniblocks ON miniblocks.number = transactions.miniblock_number @@ -93,10 +94,13 @@ impl TransactionsWeb3Dal<'_, '_> { .instrument("get_transaction_receipts") .with_arg("hashes.len", &hashes.len()) .fetch_all(self.storage) - .await? - .into_iter() - .map(Into::into) - .collect(); + .await?; + + let block_timestamps: Vec> = + st_receipts.iter().map(|x| x.block_timestamp).collect(); + + let mut receipts: Vec = + st_receipts.into_iter().map(Into::into).collect(); let mut logs = self .storage @@ -110,7 +114,7 @@ impl TransactionsWeb3Dal<'_, '_> { .get_l2_to_l1_logs_by_hashes(hashes) .await?; - for receipt in &mut receipts { + for (receipt, block_timestamp) in receipts.iter_mut().zip(block_timestamps.into_iter()) { let logs_for_tx = logs.remove(&receipt.transaction_hash); if let Some(logs) = logs_for_tx { @@ -119,6 +123,7 @@ impl TransactionsWeb3Dal<'_, '_> { .map(|mut log| { log.block_hash = Some(receipt.block_hash); log.l1_batch_number = receipt.l1_batch_number; + log.block_timestamp = block_timestamp; log }) .collect(); diff --git a/core/lib/default_da_clients/src/no_da/wiring_layer.rs b/core/lib/default_da_clients/src/no_da/wiring_layer.rs index c1332da9a97e..71a2ee7ce582 100644 --- a/core/lib/default_da_clients/src/no_da/wiring_layer.rs +++ b/core/lib/default_da_clients/src/no_da/wiring_layer.rs @@ -3,8 +3,8 @@ use std::fmt::Debug; use zksync_da_client::DataAvailabilityClient; use zksync_node_framework::{ implementations::resources::da_client::DAClientResource, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; use crate::no_da::client::NoDAClient; @@ -12,17 +12,25 @@ use crate::no_da::client::NoDAClient; #[derive(Debug, Default)] pub struct NoDAClientWiringLayer; +#[derive(Debug, IntoContext)] +pub struct Output { + pub client: DAClientResource, +} + #[async_trait::async_trait] impl WiringLayer for NoDAClientWiringLayer { + type Input = (); + type Output = Output; + fn layer_name(&self) -> &'static str { "no_da_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { let client: Box = Box::new(NoDAClient); - context.insert_resource(DAClientResource(client))?; - - Ok(()) + Ok(Output { + client: DAClientResource(client), + }) } } diff --git a/core/lib/default_da_clients/src/object_store/wiring_layer.rs b/core/lib/default_da_clients/src/object_store/wiring_layer.rs index 7af7e4d04fa6..6fc84fb707b7 100644 --- a/core/lib/default_da_clients/src/object_store/wiring_layer.rs +++ b/core/lib/default_da_clients/src/object_store/wiring_layer.rs @@ -2,8 +2,8 @@ use zksync_config::ObjectStoreConfig; use zksync_da_client::DataAvailabilityClient; use zksync_node_framework::{ implementations::resources::da_client::DAClientResource, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; use crate::object_store::client::ObjectStoreDAClient; @@ -19,18 +19,26 @@ impl ObjectStorageClientWiringLayer { } } +#[derive(Debug, IntoContext)] +pub struct Output { + pub client: DAClientResource, +} + #[async_trait::async_trait] impl WiringLayer for ObjectStorageClientWiringLayer { + type Input = (); + type Output = Output; + fn layer_name(&self) -> &'static str { "object_store_da_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { let client: Box = Box::new(ObjectStoreDAClient::new(self.config).await?); - context.insert_resource(DAClientResource(client))?; - - Ok(()) + Ok(Output { + client: DAClientResource(client), + }) } } diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index abf8288a8327..9c433a4afb85 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -443,6 +443,9 @@ pub struct Log { pub log_type: Option, /// Removed pub removed: Option, + /// L2 block timestamp + #[serde(rename = "blockTimestamp")] + pub block_timestamp: Option, } impl Log { diff --git a/core/lib/types/src/event/mod.rs b/core/lib/types/src/event/mod.rs index 055b41d77c7d..81e796097249 100644 --- a/core/lib/types/src/event/mod.rs +++ b/core/lib/types/src/event/mod.rs @@ -58,6 +58,7 @@ impl From<&VmEvent> for Log { transaction_log_index: None, log_type: None, removed: Some(false), + block_timestamp: None, } } } diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index c0d7267ebfae..2d7aa5c4b756 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -486,6 +486,7 @@ mod tests { transaction_log_index: Default::default(), log_type: Default::default(), removed: Default::default(), + block_timestamp: Default::default(), }; let decoded_op: GovernanceOperation = correct_log.clone().try_into().unwrap(); assert_eq!(decoded_op.calls.len(), 1); diff --git a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs index 39a96556f8de..83a135e7148e 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs @@ -1,4 +1,9 @@ -use std::{fmt::Debug, num::NonZeroU64, time::Duration}; +use std::{ + fmt::Debug, + num::NonZeroU64, + sync::{Arc, RwLock}, + time::Duration, +}; use anyhow::Context; use async_trait::async_trait; @@ -9,23 +14,23 @@ use zksync_types::fee_model::BaseTokenConversionRatio; const CACHE_UPDATE_INTERVAL: Duration = Duration::from_millis(500); #[async_trait] -pub trait BaseTokenRatioProvider: Debug + Send + Sync { +pub trait BaseTokenRatioProvider: Debug + Send + Sync + 'static { fn get_conversion_ratio(&self) -> BaseTokenConversionRatio; } #[derive(Debug, Clone)] pub struct DBBaseTokenRatioProvider { pub pool: ConnectionPool, - pub latest_ratio: BaseTokenConversionRatio, + pub latest_ratio: Arc>, } impl DBBaseTokenRatioProvider { pub async fn new(pool: ConnectionPool) -> anyhow::Result { - let mut fetcher = Self { + let fetcher = Self { pool, - latest_ratio: BaseTokenConversionRatio::default(), + latest_ratio: Arc::default(), }; - fetcher.latest_ratio = fetcher.get_latest_price().await?; + fetcher.update_latest_price().await?; // TODO(PE-129): Implement latest ratio usability logic. @@ -36,7 +41,11 @@ impl DBBaseTokenRatioProvider { Ok(fetcher) } - pub async fn run(&mut self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + fn get_latest_ratio(&self) -> BaseTokenConversionRatio { + *self.latest_ratio.read().unwrap() + } + + pub async fn run(&self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { let mut timer = tokio::time::interval(CACHE_UPDATE_INTERVAL); while !*stop_receiver.borrow_and_update() { @@ -45,20 +54,15 @@ impl DBBaseTokenRatioProvider { _ = stop_receiver.changed() => break, } - let latest_storage_ratio = self.get_latest_price().await?; - // TODO(PE-129): Implement latest ratio usability logic. - self.latest_ratio = BaseTokenConversionRatio { - numerator: latest_storage_ratio.numerator, - denominator: latest_storage_ratio.denominator, - }; + self.update_latest_price().await?; } tracing::info!("Stop signal received, base_token_ratio_provider is shutting down"); Ok(()) } - async fn get_latest_price(&self) -> anyhow::Result { + async fn update_latest_price(&self) -> anyhow::Result<()> { let latest_storage_ratio = self .pool .connection_tagged("db_base_token_ratio_provider") @@ -68,28 +72,31 @@ impl DBBaseTokenRatioProvider { .get_latest_ratio() .await; - match latest_storage_ratio { - Ok(Some(latest_storage_price)) => Ok(BaseTokenConversionRatio { + let ratio = match latest_storage_ratio { + Ok(Some(latest_storage_price)) => BaseTokenConversionRatio { numerator: latest_storage_price.numerator, denominator: latest_storage_price.denominator, - }), + }, Ok(None) => { // TODO(PE-136): Insert initial ratio from genesis. // Though the DB should be populated very soon after the server starts, it is possible // to have no ratios in the DB right after genesis. Having initial ratios in the DB // from the genesis stage will eliminate this possibility. tracing::error!("No latest price found in the database. Using default ratio."); - Ok(BaseTokenConversionRatio::default()) + BaseTokenConversionRatio::default() } Err(err) => anyhow::bail!("Failed to get latest base token ratio: {:?}", err), - } + }; + + *self.latest_ratio.write().unwrap() = ratio; + Ok(()) } } #[async_trait] impl BaseTokenRatioProvider for DBBaseTokenRatioProvider { fn get_conversion_ratio(&self) -> BaseTokenConversionRatio { - self.latest_ratio + self.get_latest_ratio() } } diff --git a/core/node/consistency_checker/src/tests/mod.rs b/core/node/consistency_checker/src/tests/mod.rs index 853090b1907d..13c1caec381a 100644 --- a/core/node/consistency_checker/src/tests/mod.rs +++ b/core/node/consistency_checker/src/tests/mod.rs @@ -382,6 +382,7 @@ fn l1_batch_commit_log(l1_batch: &L1BatchWithMetadata) -> Log { transaction_log_index: None, log_type: Some("mined".into()), removed: None, + block_timestamp: None, } } diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index 6b15c71bd140..773b7f62030a 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -505,6 +505,7 @@ fn tx_into_log(tx: L1Tx) -> Log { transaction_log_index: Some(0u64.into()), log_type: None, removed: None, + block_timestamp: None, } } @@ -549,6 +550,7 @@ fn upgrade_into_governor_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { transaction_log_index: Some(0u64.into()), log_type: None, removed: None, + block_timestamp: None, } } diff --git a/core/node/node_framework/examples/main_node.rs b/core/node/node_framework/examples/main_node.rs index 9fb81aa4069b..38f989bda85f 100644 --- a/core/node/node_framework/examples/main_node.rs +++ b/core/node/node_framework/examples/main_node.rs @@ -50,7 +50,7 @@ use zksync_node_framework::{ server::{Web3ServerLayer, Web3ServerOptionalConfig}, tree_api_client::TreeApiClientLayer, tx_sender::{PostgresStorageCachesConfig, TxSenderLayer}, - tx_sink::TxSinkLayer, + tx_sink::MasterPoolSinkLayer, }, }, service::{ZkStackService, ZkStackServiceBuilder, ZkStackServiceError}, @@ -215,7 +215,7 @@ impl MainNodeBuilder { let wallets = Wallets::from_env()?; // On main node we always use master pool sink. - self.node.add_layer(TxSinkLayer::MasterPoolSink); + self.node.add_layer(MasterPoolSinkLayer); self.node.add_layer(TxSenderLayer::new( TxSenderConfig::new( &state_keeper_config, diff --git a/core/node/node_framework/examples/showcase.rs b/core/node/node_framework/examples/showcase.rs index 5684e53162a9..3dbb576c1935 100644 --- a/core/node/node_framework/examples/showcase.rs +++ b/core/node/node_framework/examples/showcase.rs @@ -9,9 +9,10 @@ use std::{ use zksync_node_framework::{ resource::Resource, - service::{ServiceContext, StopReceiver, ZkStackServiceBuilder}, + service::{StopReceiver, ZkStackServiceBuilder}, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// This will be an example of a shared resource. Basically, something that can be used by multiple @@ -160,45 +161,92 @@ impl Task for CheckTask { /// and another layer to fetch it. The benefit here is that if you want to swap the database /// implementation, you only have to inject a different wiring layer for database, and the /// wiring layers for the tasks will remain unchanged. +/// +/// Each wiring layer has to implement the `WiringLayer` trait. +/// It will receive its inputs and has to produce outputs, which will be stored in the node. +/// Added resources will be available for the layers that are added after this one, +/// and added tasks will be launched once the wiring completes. +/// +/// Inputs and outputs for the layers are defined by the [`FromContext`] and [`IntoContext`] +/// traits correspondingly. These traits have a few ready implementations, for example: +/// +/// - `()` can be used if you don't need inputs or don't produce outputs +/// - Any type `T` or `Option` that implements `Resource` also implements both [`FromContext`] +/// and [`IntoContext`]. This can be handy if you work with a single resource. +/// - Otherwise, the most convenient way is to define a struct that will hold all the inputs/ouptuts +/// and derive [`FromContext`] and [`IntoContext`] for it. +/// +/// See the trait documentation for more detail. struct DatabaseLayer; +/// Here we use a derive macro to define outputs for our layer. +#[derive(IntoContext)] +struct DatabaseLayerOutput { + pub db: DatabaseResource, +} + #[async_trait::async_trait] impl WiringLayer for DatabaseLayer { + // We don't need any input for this layer. + type Input = (); + // We will produce a database resource. + type Output = DatabaseLayerOutput; + fn layer_name(&self) -> &'static str { "database_layer" } /// `wire` method will be invoked by the service before the tasks are started. - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { let database = Arc::new(MemoryDatabase { data: Arc::new(Mutex::new(HashMap::new())), }); // We add the resource to the service context. This way it will be available for the tasks. - context.insert_resource(DatabaseResource(database))?; - Ok(()) + Ok(DatabaseLayerOutput { + db: DatabaseResource(database), + }) } } /// Layer where we add tasks. struct TasksLayer; +#[derive(FromContext)] +struct TasksLayerInput { + pub db: DatabaseResource, +} + +#[derive(IntoContext)] +struct TasksLayerOutput { + // Note that when using derive macros, all the fields are assumed to be resources by default. + // If you want to add a task, you need to apply a special attribute on the field. + #[context(task)] + pub put_task: PutTask, + #[context(task)] + pub check_task: CheckTask, +} + #[async_trait::async_trait] impl WiringLayer for TasksLayer { + // Here we both receive input and produce output. + type Input = TasksLayerInput; + type Output = TasksLayerOutput; + fn layer_name(&self) -> &'static str { "tasks_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // We fetch the database resource from the context. + async fn wire(self, input: Self::Input) -> Result { + // We received the database resource from the context as `input`. // Note that we don't really care where it comes from or what's the actual implementation is. - // We only care whether it's available and bail out if not. - let db = context.get_resource::()?.0; + let db = input.db.0; let put_task = PutTask { db: db.clone() }; let check_task = CheckTask { db }; // These tasks will be launched by the service once the wiring process is complete. - context.add_task(put_task); - context.add_task(check_task); - Ok(()) + Ok(TasksLayerOutput { + put_task, + check_task, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/base_token_ratio_persister.rs b/core/node/node_framework/src/implementations/layers/base_token_ratio_persister.rs index c9a6ef8d8b66..9bf1786f6bbc 100644 --- a/core/node/node_framework/src/implementations/layers/base_token_ratio_persister.rs +++ b/core/node/node_framework/src/implementations/layers/base_token_ratio_persister.rs @@ -3,28 +3,34 @@ use zksync_config::configs::base_token_adjuster::BaseTokenAdjusterConfig; use crate::{ implementations::resources::pools::{MasterPool, PoolResource}, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for `BaseTokenRatioPersister` /// /// Responsible for orchestrating communications with external API feeds to get ETH<->BaseToken /// conversion ratios and persisting them both in the DB and in the L1. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// -/// ## Adds tasks -/// -/// - `BaseTokenRatioPersister` #[derive(Debug)] pub struct BaseTokenRatioPersisterLayer { config: BaseTokenAdjusterConfig, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub persister: BaseTokenRatioPersister, +} + impl BaseTokenRatioPersisterLayer { pub fn new(config: BaseTokenAdjusterConfig) -> Self { Self { config } @@ -33,19 +39,17 @@ impl BaseTokenRatioPersisterLayer { #[async_trait::async_trait] impl WiringLayer for BaseTokenRatioPersisterLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "base_token_ratio_persister" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let master_pool_resource = context.get_resource::>()?; - let master_pool = master_pool_resource.get().await?; - + async fn wire(self, input: Self::Input) -> Result { + let master_pool = input.master_pool.get().await?; let persister = BaseTokenRatioPersister::new(master_pool, self.config); - - context.add_task(persister); - - Ok(()) + Ok(Output { persister }) } } diff --git a/core/node/node_framework/src/implementations/layers/base_token_ratio_provider.rs b/core/node/node_framework/src/implementations/layers/base_token_ratio_provider.rs index d213ac68c79b..465b61cdd1e6 100644 --- a/core/node/node_framework/src/implementations/layers/base_token_ratio_provider.rs +++ b/core/node/node_framework/src/implementations/layers/base_token_ratio_provider.rs @@ -7,9 +7,10 @@ use crate::{ base_token_ratio_provider::BaseTokenRatioProviderResource, pools::{PoolResource, ReplicaPool}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for `BaseTokenRatioProvider` @@ -20,35 +21,41 @@ use crate::{ /// /// If the base token is ETH, a default, no-op impl of the BaseTokenRatioProviderResource is used by other /// layers to always return a conversion ratio of 1. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// -/// ## Adds tasks -/// -/// - `BaseTokenRatioProvider` #[derive(Debug)] pub struct BaseTokenRatioProviderLayer; +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub replica_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub ratio_provider: BaseTokenRatioProviderResource, + #[context(task)] + pub ratio_provider_task: DBBaseTokenRatioProvider, +} + #[async_trait::async_trait] impl WiringLayer for BaseTokenRatioProviderLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "base_token_ratio_provider" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let replica_pool_resource = context.get_resource::>()?; - let replica_pool = replica_pool_resource.get().await.unwrap(); + async fn wire(self, input: Self::Input) -> Result { + let replica_pool = input.replica_pool.get().await.unwrap(); let ratio_provider = DBBaseTokenRatioProvider::new(replica_pool).await?; - - context.insert_resource(BaseTokenRatioProviderResource(Arc::new( - ratio_provider.clone(), - )))?; - context.add_task(ratio_provider); - - Ok(()) + // Cloning the provided preserves the internal state. + Ok(Output { + ratio_provider: Arc::new(ratio_provider.clone()).into(), + ratio_provider_task: ratio_provider, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/batch_status_updater.rs b/core/node/node_framework/src/implementations/layers/batch_status_updater.rs index d2b522ad026c..f9b18a6bf0bf 100644 --- a/core/node/node_framework/src/implementations/layers/batch_status_updater.rs +++ b/core/node/node_framework/src/implementations/layers/batch_status_updater.rs @@ -6,7 +6,7 @@ use crate::{ main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, FromContext, IntoContext, @@ -14,46 +14,39 @@ use crate::{ #[derive(Debug, FromContext)] #[context(crate = crate)] -struct LayerInput { - pool: PoolResource, - client: MainNodeClientResource, +pub struct Input { + pub pool: PoolResource, + pub client: MainNodeClientResource, #[context(default)] - app_health: AppHealthCheckResource, + pub app_health: AppHealthCheckResource, } #[derive(Debug, IntoContext)] #[context(crate = crate)] -struct LayerOutput { +pub struct Output { #[context(task)] - updater: BatchStatusUpdater, + pub updater: BatchStatusUpdater, } /// Wiring layer for `BatchStatusUpdater`, part of the external node. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `MainNodeClientResource` -/// - `AppHealthCheckResource` (adds a health check) -/// -/// ## Adds tasks -/// -/// - `BatchStatusUpdater` #[derive(Debug)] pub struct BatchStatusUpdaterLayer; #[async_trait::async_trait] impl WiringLayer for BatchStatusUpdaterLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "batch_status_updater_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let LayerInput { + async fn wire(self, input: Self::Input) -> Result { + let Input { pool, client, app_health, - } = LayerInput::from_context(&mut context)?; + } = input; let updater = BatchStatusUpdater::new(client.0, pool.get().await?); @@ -63,11 +56,7 @@ impl WiringLayer for BatchStatusUpdaterLayer { .insert_component(updater.health_check()) .map_err(WiringError::internal)?; - // Insert task - let layer_output = LayerOutput { updater }; - layer_output.into_context(&mut context)?; - - Ok(()) + Ok(Output { updater }) } } diff --git a/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs b/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs index 14ac5591840c..b3d31e34c354 100644 --- a/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs +++ b/core/node/node_framework/src/implementations/layers/circuit_breaker_checker.rs @@ -3,9 +3,10 @@ use zksync_config::configs::chain::CircuitBreakerConfig; use crate::{ implementations::resources::circuit_breakers::CircuitBreakersResource, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for circuit breaker checker @@ -13,47 +14,44 @@ use crate::{ /// Expects other layers to insert different components' circuit breakers into /// [`zksync_circuit_breaker::CircuitBreakers`] collection using [`CircuitBreakersResource`]. /// The added task periodically runs checks for all inserted circuit breakers. -/// -/// ## Requests resources -/// -/// - `CircuitBreakersResource` -/// -/// ## Adds tasks -/// -/// - `CircuitBreakerCheckerTask` #[derive(Debug)] pub struct CircuitBreakerCheckerLayer(pub CircuitBreakerConfig); +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + #[context(default)] + pub circuit_breakers: CircuitBreakersResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub circuit_breaker_checker: CircuitBreakerChecker, +} + #[async_trait::async_trait] impl WiringLayer for CircuitBreakerCheckerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "circuit_breaker_checker_layer" } - async fn wire(self: Box, mut node: ServiceContext<'_>) -> Result<(), WiringError> { - // Get resources. - let circuit_breaker_resource = node.get_resource_or_default::(); - + async fn wire(self, input: Self::Input) -> Result { let circuit_breaker_checker = - CircuitBreakerChecker::new(circuit_breaker_resource.breakers, self.0.sync_interval()); + CircuitBreakerChecker::new(input.circuit_breakers.breakers, self.0.sync_interval()); - // Create and insert task. - let task = CircuitBreakerCheckerTask { + Ok(Output { circuit_breaker_checker, - }; - - node.add_task(task); - Ok(()) + }) } } -#[derive(Debug)] -struct CircuitBreakerCheckerTask { - circuit_breaker_checker: CircuitBreakerChecker, -} - #[async_trait::async_trait] -impl Task for CircuitBreakerCheckerTask { +impl Task for CircuitBreakerChecker { fn kind(&self) -> TaskKind { TaskKind::UnconstrainedTask } @@ -63,6 +61,6 @@ impl Task for CircuitBreakerCheckerTask { } async fn run(mut self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.circuit_breaker_checker.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/commitment_generator.rs b/core/node/node_framework/src/implementations/layers/commitment_generator.rs index b2f8cd2d30c2..6d68559d4aec 100644 --- a/core/node/node_framework/src/implementations/layers/commitment_generator.rs +++ b/core/node/node_framework/src/implementations/layers/commitment_generator.rs @@ -8,29 +8,36 @@ use crate::{ healthcheck::AppHealthCheckResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for l1 batches commitment generation /// /// Responsible for initialization and running [`CommitmentGenerator`]. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `AppHealthCheckResource` (adds a health check) -/// -/// ## Adds tasks -/// -/// - `CommitmentGeneratorTask` #[derive(Debug)] pub struct CommitmentGeneratorLayer { mode: L1BatchCommitmentMode, max_parallelism: Option>, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub commitment_generator: CommitmentGenerator, +} + impl CommitmentGeneratorLayer { pub fn new(mode: L1BatchCommitmentMode) -> Self { Self { @@ -47,49 +54,44 @@ impl CommitmentGeneratorLayer { #[async_trait::async_trait] impl WiringLayer for CommitmentGeneratorLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "commitment_generator_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool_resource = context.get_resource::>()?; - + async fn wire(self, input: Self::Input) -> Result { let pool_size = self .max_parallelism .unwrap_or(CommitmentGenerator::default_parallelism()) .get(); - let main_pool = pool_resource.get_custom(pool_size).await?; + let main_pool = input.master_pool.get_custom(pool_size).await?; let mut commitment_generator = CommitmentGenerator::new(main_pool, self.mode); if let Some(max_parallelism) = self.max_parallelism { commitment_generator.set_max_parallelism(max_parallelism); } - let AppHealthCheckResource(app_health) = context.get_resource_or_default(); - app_health + input + .app_health + .0 .insert_component(commitment_generator.health_check()) .map_err(WiringError::internal)?; - context.add_task(CommitmentGeneratorTask { + Ok(Output { commitment_generator, - }); - - Ok(()) + }) } } -#[derive(Debug)] -struct CommitmentGeneratorTask { - commitment_generator: CommitmentGenerator, -} - #[async_trait::async_trait] -impl Task for CommitmentGeneratorTask { +impl Task for CommitmentGenerator { fn id(&self) -> TaskId { "commitment_generator".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.commitment_generator.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/consensus.rs b/core/node/node_framework/src/implementations/layers/consensus.rs deleted file mode 100644 index d1d7fa3b7de1..000000000000 --- a/core/node/node_framework/src/implementations/layers/consensus.rs +++ /dev/null @@ -1,189 +0,0 @@ -use anyhow::Context as _; -use zksync_concurrency::{ctx, scope}; -use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; -use zksync_dal::{ConnectionPool, Core}; -use zksync_node_consensus as consensus; -use zksync_node_sync::{ActionQueueSender, SyncState}; -use zksync_web3_decl::client::{DynClient, L2}; - -use crate::{ - implementations::resources::{ - action_queue::ActionQueueSenderResource, - main_node_client::MainNodeClientResource, - pools::{MasterPool, PoolResource}, - sync_state::SyncStateResource, - }, - service::{ServiceContext, StopReceiver}, - task::{Task, TaskId}, - wiring_layer::{WiringError, WiringLayer}, -}; - -#[derive(Debug, Copy, Clone)] -pub enum Mode { - Main, - External, -} - -/// Wiring layer for consensus component. -/// Can work in either "main" or "external" mode. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `MainNodeClientResource` (if `Mode::External`) -/// - `SyncStateResource` (if `Mode::External`) -/// - `ActionQueueSenderResource` (if `Mode::External`) -/// -/// ## Adds tasks -/// -/// - `MainNodeConsensusTask` (if `Mode::Main`) -/// - `ExternalNodeTask` (if `Mode::External`) -#[derive(Debug)] -pub struct ConsensusLayer { - pub mode: Mode, - pub config: Option, - pub secrets: Option, -} - -#[async_trait::async_trait] -impl WiringLayer for ConsensusLayer { - fn layer_name(&self) -> &'static str { - "consensus_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool = context - .get_resource::>()? - .get() - .await?; - - match self.mode { - Mode::Main => { - let config = self.config.ok_or_else(|| { - WiringError::Configuration("Missing public consensus config".to_string()) - })?; - let secrets = self.secrets.ok_or_else(|| { - WiringError::Configuration("Missing private consensus config".to_string()) - })?; - let task = MainNodeConsensusTask { - config, - secrets, - pool, - }; - context.add_task(task); - } - Mode::External => { - let main_node_client = context.get_resource::()?.0; - let sync_state = context.get_resource::()?.0; - let action_queue_sender = context - .get_resource::()? - .0 - .take() - .ok_or_else(|| { - WiringError::Configuration( - "Action queue sender is taken by another resource".to_string(), - ) - })?; - - let config = match (self.config, self.secrets) { - (Some(cfg), Some(secrets)) => Some((cfg, secrets)), - (Some(_), None) => { - return Err(WiringError::Configuration( - "Consensus config is specified, but secrets are missing".to_string(), - )); - } - (None, _) => { - // Secrets may be unconditionally embedded in some environments, but they are unused - // unless a consensus config is provided. - None - } - }; - - let task = ExternalNodeTask { - config, - pool, - main_node_client, - sync_state, - action_queue_sender, - }; - context.add_task(task); - } - } - Ok(()) - } -} - -#[derive(Debug)] -pub struct MainNodeConsensusTask { - config: ConsensusConfig, - secrets: ConsensusSecrets, - pool: ConnectionPool, -} - -#[async_trait::async_trait] -impl Task for MainNodeConsensusTask { - fn id(&self) -> TaskId { - "consensus".into() - } - - async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - // We instantiate the root context here, since the consensus task is the only user of the - // structured concurrency framework (`MainNodeConsensusTask` and `ExternalNodeTask` are considered mutually - // exclusive). - // Note, however, that awaiting for the `stop_receiver` is related to the root context behavior, - // not the consensus task itself. There may have been any number of tasks running in the root context, - // but we only need to wait for stop signal once, and it will be propagated to all child contexts. - let root_ctx = ctx::root(); - scope::run!(&root_ctx, |ctx, s| async move { - s.spawn_bg(consensus::era::run_main_node( - ctx, - self.config, - self.secrets, - self.pool, - )); - let _ = stop_receiver.0.wait_for(|stop| *stop).await?; - Ok(()) - }) - .await - } -} - -#[derive(Debug)] -pub struct ExternalNodeTask { - config: Option<(ConsensusConfig, ConsensusSecrets)>, - pool: ConnectionPool, - main_node_client: Box>, - sync_state: SyncState, - action_queue_sender: ActionQueueSender, -} - -#[async_trait::async_trait] -impl Task for ExternalNodeTask { - fn id(&self) -> TaskId { - "consensus_fetcher".into() - } - - async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - // We instantiate the root context here, since the consensus task is the only user of the - // structured concurrency framework (`MainNodeConsensusTask` and `ExternalNodeTask` are considered mutually - // exclusive). - // Note, however, that awaiting for the `stop_receiver` is related to the root context behavior, - // not the consensus task itself. There may have been any number of tasks running in the root context, - // but we only need to wait for stop signal once, and it will be propagated to all child contexts. - let root_ctx = ctx::root(); - scope::run!(&root_ctx, |ctx, s| async { - s.spawn_bg(consensus::era::run_external_node( - ctx, - self.config, - self.pool, - self.sync_state, - self.main_node_client, - self.action_queue_sender, - )); - let _ = stop_receiver.0.wait_for(|stop| *stop).await?; - Ok(()) - }) - .await - .context("consensus actor") - } -} diff --git a/core/node/node_framework/src/implementations/layers/consensus/external_node.rs b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs new file mode 100644 index 000000000000..bdb0eae70eea --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs @@ -0,0 +1,129 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, scope}; +use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; +use zksync_dal::{ConnectionPool, Core}; +use zksync_node_consensus as consensus; +use zksync_node_framework_derive::IntoContext; +use zksync_node_sync::{ActionQueueSender, SyncState}; +use zksync_web3_decl::client::{DynClient, L2}; + +use crate::{ + implementations::resources::{ + action_queue::ActionQueueSenderResource, + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + sync_state::SyncStateResource, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, +}; + +/// Wiring layer for external node consensus component. +#[derive(Debug)] +pub struct ExternalNodeConsensusLayer { + pub config: Option, + pub secrets: Option, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub main_node_client: MainNodeClientResource, + pub sync_state: SyncStateResource, + pub action_queue_sender: ActionQueueSenderResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub consensus_task: ExternalNodeTask, +} + +#[async_trait::async_trait] +impl WiringLayer for ExternalNodeConsensusLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "external_node_consensus_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + + let main_node_client = input.main_node_client.0; + let sync_state = input.sync_state.0; + let action_queue_sender = input.action_queue_sender.0.take().ok_or_else(|| { + WiringError::Configuration( + "Action queue sender is taken by another resource".to_string(), + ) + })?; + + let config = match (self.config, self.secrets) { + (Some(cfg), Some(secrets)) => Some((cfg, secrets)), + (Some(_), None) => { + return Err(WiringError::Configuration( + "Consensus config is specified, but secrets are missing".to_string(), + )); + } + (None, _) => { + // Secrets may be unconditionally embedded in some environments, but they are unused + // unless a consensus config is provided. + None + } + }; + + let consensus_task = ExternalNodeTask { + config, + pool, + main_node_client, + sync_state, + action_queue_sender, + }; + Ok(Output { consensus_task }) + } +} + +#[derive(Debug)] +pub struct ExternalNodeTask { + config: Option<(ConsensusConfig, ConsensusSecrets)>, + pool: ConnectionPool, + main_node_client: Box>, + sync_state: SyncState, + action_queue_sender: ActionQueueSender, +} + +#[async_trait::async_trait] +impl Task for ExternalNodeTask { + fn id(&self) -> TaskId { + "consensus_fetcher".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + // We instantiate the root context here, since the consensus task is the only user of the + // structured concurrency framework (`MainNodeConsensusTask` and `ExternalNodeTask` are considered mutually + // exclusive). + // Note, however, that awaiting for the `stop_receiver` is related to the root context behavior, + // not the consensus task itself. There may have been any number of tasks running in the root context, + // but we only need to wait for stop signal once, and it will be propagated to all child contexts. + let root_ctx = ctx::root(); + scope::run!(&root_ctx, |ctx, s| async { + s.spawn_bg(consensus::era::run_external_node( + ctx, + self.config, + self.pool, + self.sync_state, + self.main_node_client, + self.action_queue_sender, + )); + let _ = stop_receiver.0.wait_for(|stop| *stop).await?; + Ok(()) + }) + .await + .context("consensus actor") + } +} diff --git a/core/node/node_framework/src/implementations/layers/consensus/main_node.rs b/core/node/node_framework/src/implementations/layers/consensus/main_node.rs new file mode 100644 index 000000000000..1ecd5f33c5ab --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/consensus/main_node.rs @@ -0,0 +1,90 @@ +use zksync_concurrency::{ctx, scope}; +use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; +use zksync_dal::{ConnectionPool, Core}; +use zksync_node_consensus as consensus; +use zksync_node_framework_derive::FromContext; + +use crate::{ + implementations::resources::pools::{MasterPool, PoolResource}, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; + +/// Wiring layer for main node consensus component. +#[derive(Debug)] +pub struct MainNodeConsensusLayer { + pub config: ConsensusConfig, + pub secrets: ConsensusSecrets, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub consensus_task: MainNodeConsensusTask, +} + +#[async_trait::async_trait] +impl WiringLayer for MainNodeConsensusLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "main_node_consensus_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + + let consensus_task = MainNodeConsensusTask { + config: self.config, + secrets: self.secrets, + pool, + }; + + Ok(Output { consensus_task }) + } +} + +#[derive(Debug)] +pub struct MainNodeConsensusTask { + config: ConsensusConfig, + secrets: ConsensusSecrets, + pool: ConnectionPool, +} + +#[async_trait::async_trait] +impl Task for MainNodeConsensusTask { + fn id(&self) -> TaskId { + "consensus".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + // We instantiate the root context here, since the consensus task is the only user of the + // structured concurrency framework (`MainNodeConsensusTask` and `ExternalNodeTask` are considered mutually + // exclusive). + // Note, however, that awaiting for the `stop_receiver` is related to the root context behavior, + // not the consensus task itself. There may have been any number of tasks running in the root context, + // but we only need to wait for stop signal once, and it will be propagated to all child contexts. + let root_ctx = ctx::root(); + scope::run!(&root_ctx, |ctx, s| async move { + s.spawn_bg(consensus::era::run_main_node( + ctx, + self.config, + self.secrets, + self.pool, + )); + let _ = stop_receiver.0.wait_for(|stop| *stop).await?; + Ok(()) + }) + .await + } +} diff --git a/core/node/node_framework/src/implementations/layers/consensus/mod.rs b/core/node/node_framework/src/implementations/layers/consensus/mod.rs new file mode 100644 index 000000000000..59465d21d70d --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/consensus/mod.rs @@ -0,0 +1,4 @@ +pub use self::{external_node::ExternalNodeConsensusLayer, main_node::MainNodeConsensusLayer}; + +pub mod external_node; +pub mod main_node; diff --git a/core/node/node_framework/src/implementations/layers/consistency_checker.rs b/core/node/node_framework/src/implementations/layers/consistency_checker.rs index d9b5582f76b7..a9e99eb89ac4 100644 --- a/core/node/node_framework/src/implementations/layers/consistency_checker.rs +++ b/core/node/node_framework/src/implementations/layers/consistency_checker.rs @@ -7,22 +7,13 @@ use crate::{ healthcheck::AppHealthCheckResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for the `ConsistencyChecker` (used by the external node). -/// -/// ## Requests resources -/// -/// - `EthInterfaceResource` -/// - `PoolResource` -/// - `AppHealthCheckResource` (adds a health check) -/// -/// ## Adds tasks -/// -/// - `ConsistencyChecker` #[derive(Debug)] pub struct ConsistencyCheckerLayer { diamond_proxy_addr: Address, @@ -30,6 +21,22 @@ pub struct ConsistencyCheckerLayer { commitment_mode: L1BatchCommitmentMode, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub l1_client: EthInterfaceResource, + pub master_pool: PoolResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub consistency_checker: ConsistencyChecker, +} + impl ConsistencyCheckerLayer { pub fn new( diamond_proxy_addr: Address, @@ -46,16 +53,18 @@ impl ConsistencyCheckerLayer { #[async_trait::async_trait] impl WiringLayer for ConsistencyCheckerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "consistency_checker_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { // Get resources. - let l1_client = context.get_resource::()?.0; + let l1_client = input.l1_client.0; - let pool_resource = context.get_resource::>()?; - let singleton_pool = pool_resource.get_singleton().await?; + let singleton_pool = input.master_pool.get_singleton().await?; let consistency_checker = ConsistencyChecker::new( l1_client, @@ -66,15 +75,15 @@ impl WiringLayer for ConsistencyCheckerLayer { .map_err(WiringError::Internal)? .with_diamond_proxy_addr(self.diamond_proxy_addr); - let AppHealthCheckResource(app_health) = context.get_resource_or_default(); - app_health + input + .app_health + .0 .insert_component(consistency_checker.health_check().clone()) .map_err(WiringError::internal)?; - // Create and add tasks. - context.add_task(consistency_checker); - - Ok(()) + Ok(Output { + consistency_checker, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/contract_verification_api.rs b/core/node/node_framework/src/implementations/layers/contract_verification_api.rs index 94264fc27411..3f1f76cc1c12 100644 --- a/core/node/node_framework/src/implementations/layers/contract_verification_api.rs +++ b/core/node/node_framework/src/implementations/layers/contract_verification_api.rs @@ -3,47 +3,52 @@ use zksync_dal::{ConnectionPool, Core}; use crate::{ implementations::resources::pools::{MasterPool, PoolResource, ReplicaPool}, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for contract verification /// /// Responsible for initialization of the contract verification server. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `PoolResource` -/// -/// ## Adds tasks -/// -/// - `ContractVerificationApiTask` #[derive(Debug)] pub struct ContractVerificationApiLayer(pub ContractVerifierConfig); +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub replica_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub contract_verification_api_task: ContractVerificationApiTask, +} + #[async_trait::async_trait] impl WiringLayer for ContractVerificationApiLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "contract_verification_api_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let master_pool = context - .get_resource::>()? - .get() - .await?; - let replica_pool = context - .get_resource::>()? - .get() - .await?; - context.add_task(ContractVerificationApiTask { + async fn wire(self, input: Self::Input) -> Result { + let master_pool = input.master_pool.get().await?; + let replica_pool = input.replica_pool.get().await?; + let contract_verification_api_task = ContractVerificationApiTask { master_pool, replica_pool, config: self.0, - }); - Ok(()) + }; + Ok(Output { + contract_verification_api_task, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs index d1ba66b6ddd3..7759da314cc0 100644 --- a/core/node/node_framework/src/implementations/layers/da_dispatcher.rs +++ b/core/node/node_framework/src/implementations/layers/da_dispatcher.rs @@ -6,9 +6,10 @@ use crate::{ da_client::DAClientResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// A layer that wires the data availability dispatcher task. @@ -18,6 +19,20 @@ pub struct DataAvailabilityDispatcherLayer { da_config: DADispatcherConfig, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub da_client: DAClientResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub da_dispatcher_task: DataAvailabilityDispatcher, +} + impl DataAvailabilityDispatcherLayer { pub fn new(state_keeper_config: StateKeeperConfig, da_config: DADispatcherConfig) -> Self { Self { @@ -29,15 +44,17 @@ impl DataAvailabilityDispatcherLayer { #[async_trait::async_trait] impl WiringLayer for DataAvailabilityDispatcherLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "da_dispatcher_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let master_pool_resource = context.get_resource::>()?; + async fn wire(self, input: Self::Input) -> Result { // A pool with size 2 is used here because there are 2 functions within a task that execute in parallel - let master_pool = master_pool_resource.get_custom(2).await?; - let da_client = context.get_resource::()?.0; + let master_pool = input.master_pool.get_custom(2).await?; + let da_client = input.da_client.0; if let Some(limit) = da_client.blob_size_limit() { if self.state_keeper_config.max_pubdata_per_batch > limit as u64 { @@ -48,13 +65,10 @@ impl WiringLayer for DataAvailabilityDispatcherLayer { } } - context.add_task(DataAvailabilityDispatcher::new( - master_pool, - self.da_config, - da_client, - )); + let da_dispatcher_task = + DataAvailabilityDispatcher::new(master_pool, self.da_config, da_client); - Ok(()) + Ok(Output { da_dispatcher_task }) } } diff --git a/core/node/node_framework/src/implementations/layers/eth_sender.rs b/core/node/node_framework/src/implementations/layers/eth_sender.rs deleted file mode 100644 index 6a9c0894b432..000000000000 --- a/core/node/node_framework/src/implementations/layers/eth_sender.rs +++ /dev/null @@ -1,214 +0,0 @@ -use anyhow::Context; -use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; -use zksync_config::configs::{eth_sender::EthConfig, ContractsConfig}; -use zksync_eth_client::BoundEthInterface; -use zksync_eth_sender::{Aggregator, EthTxAggregator, EthTxManager}; -use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; - -use crate::{ - implementations::resources::{ - circuit_breakers::CircuitBreakersResource, - eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, - l1_tx_params::L1TxParamsResource, - object_store::ObjectStoreResource, - pools::{MasterPool, PoolResource, ReplicaPool}, - }, - service::{ServiceContext, StopReceiver}, - task::{Task, TaskId}, - wiring_layer::{WiringError, WiringLayer}, -}; - -/// Wiring layer for `eth_txs` managing -/// -/// Responsible for initialization and running [`EthTxManager`] component, that manages sending -/// of `eth_txs`(such as `CommitBlocks`, `PublishProofBlocksOnchain` or `ExecuteBlock` ) to L1. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `PoolResource` -/// - `BoundEthInterfaceResource` -/// - `BoundEthInterfaceForBlobsResource` (optional) -/// - `L1TxParamsResource` -/// - `CircuitBreakersResource` (adds a circuit breaker) -/// -/// ## Adds tasks -/// -/// - `EthTxManager` -#[derive(Debug)] -pub struct EthTxManagerLayer { - eth_sender_config: EthConfig, -} - -impl EthTxManagerLayer { - pub fn new(eth_sender_config: EthConfig) -> Self { - Self { eth_sender_config } - } -} - -#[async_trait::async_trait] -impl WiringLayer for EthTxManagerLayer { - fn layer_name(&self) -> &'static str { - "eth_tx_manager_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Get resources. - let master_pool_resource = context.get_resource::>()?; - let master_pool = master_pool_resource.get().await.unwrap(); - let replica_pool_resource = context.get_resource::>()?; - let replica_pool = replica_pool_resource.get().await.unwrap(); - - let eth_client = context.get_resource::()?.0; - let eth_client_blobs = match context.get_resource::() { - Ok(BoundEthInterfaceForBlobsResource(client)) => Some(client), - Err(WiringError::ResourceLacking { .. }) => None, - Err(err) => return Err(err), - }; - - let config = self.eth_sender_config.sender.context("sender")?; - - let gas_adjuster = context.get_resource::()?.0; - - let eth_tx_manager_actor = EthTxManager::new( - master_pool, - config, - gas_adjuster, - eth_client, - eth_client_blobs, - ); - - context.add_task(eth_tx_manager_actor); - - // Insert circuit breaker. - let CircuitBreakersResource { breakers } = context.get_resource_or_default(); - breakers - .insert(Box::new(FailedL1TransactionChecker { pool: replica_pool })) - .await; - - Ok(()) - } -} - -/// Wiring layer for aggregating l1 batches into `eth_txs` -/// -/// Responsible for initialization and running of [`EthTxAggregator`], that aggregates L1 batches -/// into `eth_txs`(such as `CommitBlocks`, `PublishProofBlocksOnchain` or `ExecuteBlock`). -/// These `eth_txs` will be used as a queue for generating signed txs and will be sent later on L1. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `PoolResource` -/// - `BoundEthInterfaceResource` -/// - `BoundEthInterfaceForBlobsResource` (optional) -/// - `ObjectStoreResource` -/// - `CircuitBreakersResource` (adds a circuit breaker) -/// -/// ## Adds tasks -/// -/// - `EthTxAggregator` -#[derive(Debug)] -pub struct EthTxAggregatorLayer { - eth_sender_config: EthConfig, - contracts_config: ContractsConfig, - zksync_network_id: L2ChainId, - l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, -} - -impl EthTxAggregatorLayer { - pub fn new( - eth_sender_config: EthConfig, - contracts_config: ContractsConfig, - zksync_network_id: L2ChainId, - l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, - ) -> Self { - Self { - eth_sender_config, - contracts_config, - zksync_network_id, - l1_batch_commit_data_generator_mode, - } - } -} - -#[async_trait::async_trait] -impl WiringLayer for EthTxAggregatorLayer { - fn layer_name(&self) -> &'static str { - "eth_tx_aggregator_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Get resources. - let master_pool_resource = context.get_resource::>()?; - let master_pool = master_pool_resource.get().await.unwrap(); - let replica_pool_resource = context.get_resource::>()?; - let replica_pool = replica_pool_resource.get().await.unwrap(); - - let eth_client = context.get_resource::()?.0; - let eth_client_blobs = match context.get_resource::() { - Ok(BoundEthInterfaceForBlobsResource(client)) => Some(client), - Err(WiringError::ResourceLacking { .. }) => None, - Err(err) => return Err(err), - }; - let object_store = context.get_resource::()?.0; - - // Create and add tasks. - let eth_client_blobs_addr = eth_client_blobs - .as_deref() - .map(BoundEthInterface::sender_account); - - let config = self.eth_sender_config.sender.context("sender")?; - let aggregator = Aggregator::new( - config.clone(), - object_store, - eth_client_blobs_addr.is_some(), - self.l1_batch_commit_data_generator_mode, - ); - - let eth_tx_aggregator_actor = EthTxAggregator::new( - master_pool.clone(), - config.clone(), - aggregator, - eth_client.clone(), - self.contracts_config.validator_timelock_addr, - self.contracts_config.l1_multicall3_addr, - self.contracts_config.diamond_proxy_addr, - self.zksync_network_id, - eth_client_blobs_addr, - ) - .await; - - context.add_task(eth_tx_aggregator_actor); - - // Insert circuit breaker. - let CircuitBreakersResource { breakers } = context.get_resource_or_default(); - breakers - .insert(Box::new(FailedL1TransactionChecker { pool: replica_pool })) - .await; - - Ok(()) - } -} - -#[async_trait::async_trait] -impl Task for EthTxAggregator { - fn id(&self) -> TaskId { - "eth_tx_aggregator".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for EthTxManager { - fn id(&self) -> TaskId { - "eth_tx_manager".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs b/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs new file mode 100644 index 000000000000..96fffcaf6a84 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs @@ -0,0 +1,146 @@ +use anyhow::Context; +use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; +use zksync_config::configs::{eth_sender::EthConfig, ContractsConfig}; +use zksync_eth_client::BoundEthInterface; +use zksync_eth_sender::{Aggregator, EthTxAggregator}; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; + +use crate::{ + implementations::resources::{ + circuit_breakers::CircuitBreakersResource, + eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, + object_store::ObjectStoreResource, + pools::{MasterPool, PoolResource, ReplicaPool}, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for aggregating l1 batches into `eth_txs` +/// +/// Responsible for initialization and running of [`EthTxAggregator`], that aggregates L1 batches +/// into `eth_txs`(such as `CommitBlocks`, `PublishProofBlocksOnchain` or `ExecuteBlock`). +/// These `eth_txs` will be used as a queue for generating signed txs and will be sent later on L1. +/// +/// ## Requests resources +/// +/// - `PoolResource` +/// - `PoolResource` +/// - `BoundEthInterfaceResource` +/// - `BoundEthInterfaceForBlobsResource` (optional) +/// - `ObjectStoreResource` +/// - `CircuitBreakersResource` (adds a circuit breaker) +/// +/// ## Adds tasks +/// +/// - `EthTxAggregator` +#[derive(Debug)] +pub struct EthTxAggregatorLayer { + eth_sender_config: EthConfig, + contracts_config: ContractsConfig, + zksync_network_id: L2ChainId, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub replica_pool: PoolResource, + pub eth_client: BoundEthInterfaceResource, + pub eth_client_blobs: Option, + pub object_store: ObjectStoreResource, + #[context(default)] + pub circuit_breakers: CircuitBreakersResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub eth_tx_aggregator: EthTxAggregator, +} + +impl EthTxAggregatorLayer { + pub fn new( + eth_sender_config: EthConfig, + contracts_config: ContractsConfig, + zksync_network_id: L2ChainId, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, + ) -> Self { + Self { + eth_sender_config, + contracts_config, + zksync_network_id, + l1_batch_commit_data_generator_mode, + } + } +} + +#[async_trait::async_trait] +impl WiringLayer for EthTxAggregatorLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "eth_tx_aggregator_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + // Get resources. + let master_pool = input.master_pool.get().await.unwrap(); + let replica_pool = input.replica_pool.get().await.unwrap(); + + let eth_client = input.eth_client.0; + let eth_client_blobs = input.eth_client_blobs.map(|c| c.0); + let object_store = input.object_store.0; + + // Create and add tasks. + let eth_client_blobs_addr = eth_client_blobs + .as_deref() + .map(BoundEthInterface::sender_account); + + let config = self.eth_sender_config.sender.context("sender")?; + let aggregator = Aggregator::new( + config.clone(), + object_store, + eth_client_blobs_addr.is_some(), + self.l1_batch_commit_data_generator_mode, + ); + + let eth_tx_aggregator = EthTxAggregator::new( + master_pool.clone(), + config.clone(), + aggregator, + eth_client.clone(), + self.contracts_config.validator_timelock_addr, + self.contracts_config.l1_multicall3_addr, + self.contracts_config.diamond_proxy_addr, + self.zksync_network_id, + eth_client_blobs_addr, + ) + .await; + + // Insert circuit breaker. + input + .circuit_breakers + .breakers + .insert(Box::new(FailedL1TransactionChecker { pool: replica_pool })) + .await; + + Ok(Output { eth_tx_aggregator }) + } +} + +#[async_trait::async_trait] +impl Task for EthTxAggregator { + fn id(&self) -> TaskId { + "eth_tx_aggregator".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs new file mode 100644 index 000000000000..e979c372d8e8 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs @@ -0,0 +1,115 @@ +use anyhow::Context; +use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; +use zksync_config::configs::eth_sender::EthConfig; +use zksync_eth_sender::EthTxManager; + +use crate::{ + implementations::resources::{ + circuit_breakers::CircuitBreakersResource, + eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, + l1_tx_params::L1TxParamsResource, + pools::{MasterPool, PoolResource, ReplicaPool}, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for `eth_txs` managing +/// +/// Responsible for initialization and running [`EthTxManager`] component, that manages sending +/// of `eth_txs`(such as `CommitBlocks`, `PublishProofBlocksOnchain` or `ExecuteBlock` ) to L1. +/// +/// ## Requests resources +/// +/// - `PoolResource` +/// - `PoolResource` +/// - `BoundEthInterfaceResource` +/// - `BoundEthInterfaceForBlobsResource` (optional) +/// - `L1TxParamsResource` +/// - `CircuitBreakersResource` (adds a circuit breaker) +/// +/// ## Adds tasks +/// +/// - `EthTxManager` +#[derive(Debug)] +pub struct EthTxManagerLayer { + eth_sender_config: EthConfig, +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub replica_pool: PoolResource, + pub eth_client: BoundEthInterfaceResource, + pub eth_client_blobs: Option, + pub l1_tx_params: L1TxParamsResource, + #[context(default)] + pub circuit_breakers: CircuitBreakersResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub eth_tx_manager: EthTxManager, +} + +impl EthTxManagerLayer { + pub fn new(eth_sender_config: EthConfig) -> Self { + Self { eth_sender_config } + } +} + +#[async_trait::async_trait] +impl WiringLayer for EthTxManagerLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "eth_tx_manager_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + // Get resources. + let master_pool = input.master_pool.get().await.unwrap(); + let replica_pool = input.replica_pool.get().await.unwrap(); + + let eth_client = input.eth_client.0; + let eth_client_blobs = input.eth_client_blobs.map(|c| c.0); + + let config = self.eth_sender_config.sender.context("sender")?; + + let gas_adjuster = input.l1_tx_params.0; + + let eth_tx_manager = EthTxManager::new( + master_pool, + config, + gas_adjuster, + eth_client, + eth_client_blobs, + ); + + // Insert circuit breaker. + input + .circuit_breakers + .breakers + .insert(Box::new(FailedL1TransactionChecker { pool: replica_pool })) + .await; + + Ok(Output { eth_tx_manager }) + } +} + +#[async_trait::async_trait] +impl Task for EthTxManager { + fn id(&self) -> TaskId { + "eth_tx_manager".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/mod.rs b/core/node/node_framework/src/implementations/layers/eth_sender/mod.rs new file mode 100644 index 000000000000..e072f5c6a11a --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/eth_sender/mod.rs @@ -0,0 +1,4 @@ +pub mod aggregator; +pub mod manager; + +pub use self::{aggregator::EthTxAggregatorLayer, manager::EthTxManagerLayer}; diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index 8c7fe4269586..406d523e2d59 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -1,40 +1,42 @@ -use std::time::Duration; - use zksync_config::{ContractsConfig, EthWatchConfig}; use zksync_contracts::governance_contract; -use zksync_dal::{ConnectionPool, Core}; use zksync_eth_watch::{EthHttpQueryClient, EthWatch}; -use zksync_types::{ethabi::Contract, Address}; use crate::{ implementations::resources::{ eth_interface::EthInterfaceResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for ethereum watcher /// /// Responsible for initializing and running of [`EthWatch`] component, that polls the Ethereum node for the relevant events, /// such as priority operations (aka L1 transactions), protocol upgrades etc. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `EthInterfaceResource` -/// -/// ## Adds tasks -/// -/// - `EthWatchTask` #[derive(Debug)] pub struct EthWatchLayer { eth_watch_config: EthWatchConfig, contracts_config: ContractsConfig, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub eth_client: EthInterfaceResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub eth_watch: EthWatch, +} + impl EthWatchLayer { pub fn new(eth_watch_config: EthWatchConfig, contracts_config: ContractsConfig) -> Self { Self { @@ -46,15 +48,16 @@ impl EthWatchLayer { #[async_trait::async_trait] impl WiringLayer for EthWatchLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "eth_watch_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool_resource = context.get_resource::>()?; - let main_pool = pool_resource.get().await.unwrap(); - - let client = context.get_resource::()?.0; + async fn wire(self, input: Self::Input) -> Result { + let main_pool = input.master_pool.get().await.unwrap(); + let client = input.eth_client.0; let eth_client = EthHttpQueryClient::new( client, @@ -65,43 +68,27 @@ impl WiringLayer for EthWatchLayer { self.contracts_config.governance_addr, self.eth_watch_config.confirmations_for_eth_event, ); - context.add_task(EthWatchTask { + + let eth_watch = EthWatch::new( + self.contracts_config.diamond_proxy_addr, + &governance_contract(), + Box::new(eth_client), main_pool, - client: eth_client, - governance_contract: governance_contract(), - diamond_proxy_address: self.contracts_config.diamond_proxy_addr, - poll_interval: self.eth_watch_config.poll_interval(), - }); + self.eth_watch_config.poll_interval(), + ) + .await?; - Ok(()) + Ok(Output { eth_watch }) } } -#[derive(Debug)] -struct EthWatchTask { - main_pool: ConnectionPool, - client: EthHttpQueryClient, - governance_contract: Contract, - diamond_proxy_address: Address, - poll_interval: Duration, -} - #[async_trait::async_trait] -impl Task for EthWatchTask { +impl Task for EthWatch { fn id(&self) -> TaskId { "eth_watch".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - let eth_watch = EthWatch::new( - self.diamond_proxy_address, - &self.governance_contract, - Box::new(self.client), - self.main_pool, - self.poll_interval, - ) - .await?; - - eth_watch.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/healtcheck_server.rs b/core/node/node_framework/src/implementations/layers/healtcheck_server.rs index 126b7c0a2d4d..227048c0f545 100644 --- a/core/node/node_framework/src/implementations/layers/healtcheck_server.rs +++ b/core/node/node_framework/src/implementations/layers/healtcheck_server.rs @@ -6,9 +6,10 @@ use zksync_node_api_server::healthcheck::HealthCheckHandle; use crate::{ implementations::resources::healthcheck::AppHealthCheckResource, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for health check server @@ -16,38 +17,45 @@ use crate::{ /// Expects other layers to insert different components' health checks /// into [`AppHealthCheck`] aggregating heath using [`AppHealthCheckResource`]. /// The added task spawns a health check server that only exposes the state provided by other tasks. -/// -/// ## Requests resources -/// -/// - `AppHealthCheckResource` -/// -/// ## Adds tasks -/// -/// - `HealthCheckTask` #[derive(Debug)] pub struct HealthCheckLayer(pub HealthCheckConfig); +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + #[context(default)] + pub app_health_check: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub health_check_task: HealthCheckTask, +} + #[async_trait::async_trait] impl WiringLayer for HealthCheckLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "healthcheck_layer" } - async fn wire(self: Box, mut node: ServiceContext<'_>) -> Result<(), WiringError> { - let AppHealthCheckResource(app_health_check) = node.get_resource_or_default(); - - let task = HealthCheckTask { + async fn wire(self, input: Self::Input) -> Result { + let AppHealthCheckResource(app_health_check) = input.app_health_check; + let health_check_task = HealthCheckTask { config: self.0, app_health_check, }; - node.add_task(task); - Ok(()) + Ok(Output { health_check_task }) } } #[derive(Debug)] -struct HealthCheckTask { +pub struct HealthCheckTask { config: HealthCheckConfig, app_health_check: Arc, } diff --git a/core/node/node_framework/src/implementations/layers/house_keeper.rs b/core/node/node_framework/src/implementations/layers/house_keeper.rs index f14a01587f71..74314320d815 100644 --- a/core/node/node_framework/src/implementations/layers/house_keeper.rs +++ b/core/node/node_framework/src/implementations/layers/house_keeper.rs @@ -15,31 +15,14 @@ use zksync_house_keeper::{ use crate::{ implementations::resources::pools::{PoolResource, ProverPool, ReplicaPool}, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for `HouseKeeper` - a component responsible for managing prover jobs /// and auxiliary server activities. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `PoolResource` -/// -/// ## Adds tasks -/// -/// - `L1BatchMetricsReporterTask` -/// - `FriProverJobRetryManagerTask` -/// - `FriWitnessGeneratorJobRetryManagerTask` -/// - `WaitingToQueuedFriWitnessJobMoverTask` -/// - `FriProverJobArchiverTask` -/// - `FriProverGpuArchiverTask` -/// - `FriWitnessGeneratorStatsReporterTask` -/// - `FriProverStatsReporterTask` -/// - `FriProofCompressorStatsReporterTask` -/// - `FriProofCompressorJobRetryManagerTask` #[derive(Debug)] pub struct HouseKeeperLayer { house_keeper_config: HouseKeeperConfig, @@ -49,6 +32,38 @@ pub struct HouseKeeperLayer { fri_proof_compressor_config: FriProofCompressorConfig, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub replica_pool: PoolResource, + pub prover_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub l1_batch_metrics_reporter: L1BatchMetricsReporter, + #[context(task)] + pub fri_prover_job_retry_manager: FriProverJobRetryManager, + #[context(task)] + pub fri_witness_generator_job_retry_manager: FriWitnessGeneratorJobRetryManager, + #[context(task)] + pub waiting_to_queued_fri_witness_job_mover: WaitingToQueuedFriWitnessJobMover, + #[context(task)] + pub fri_prover_job_archiver: Option, + #[context(task)] + pub fri_prover_gpu_archiver: Option, + #[context(task)] + pub fri_witness_generator_stats_reporter: FriWitnessGeneratorQueueReporter, + #[context(task)] + pub fri_prover_stats_reporter: FriProverQueueReporter, + #[context(task)] + pub fri_proof_compressor_stats_reporter: FriProofCompressorQueueReporter, + #[context(task)] + pub fri_proof_compressor_job_retry_manager: FriProofCompressorJobRetryManager, +} + impl HouseKeeperLayer { pub fn new( house_keeper_config: HouseKeeperConfig, @@ -69,17 +84,17 @@ impl HouseKeeperLayer { #[async_trait::async_trait] impl WiringLayer for HouseKeeperLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "house_keeper_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { // Initialize resources - let replica_pool_resource = context.get_resource::>()?; - let replica_pool = replica_pool_resource.get().await?; - - let prover_pool_resource = context.get_resource::>()?; - let prover_pool = prover_pool_resource.get().await?; + let replica_pool = input.replica_pool.get().await?; + let prover_pool = input.prover_pool.get().await?; // Initialize and add tasks let l1_batch_metrics_reporter = L1BatchMetricsReporter::new( @@ -87,9 +102,6 @@ impl WiringLayer for HouseKeeperLayer { .l1_batch_metrics_reporting_interval_ms, replica_pool.clone(), ); - context.add_task(L1BatchMetricsReporterTask { - l1_batch_metrics_reporter, - }); let fri_prover_job_retry_manager = FriProverJobRetryManager::new( self.fri_prover_config.max_attempts, @@ -97,9 +109,6 @@ impl WiringLayer for HouseKeeperLayer { self.house_keeper_config.prover_job_retrying_interval_ms, prover_pool.clone(), ); - context.add_task(FriProverJobRetryManagerTask { - fri_prover_job_retry_manager, - }); let fri_witness_gen_job_retry_manager = FriWitnessGeneratorJobRetryManager::new( self.fri_witness_generator_config.max_attempts, @@ -109,46 +118,30 @@ impl WiringLayer for HouseKeeperLayer { .witness_generator_job_retrying_interval_ms, prover_pool.clone(), ); - context.add_task(FriWitnessGeneratorJobRetryManagerTask { - fri_witness_gen_job_retry_manager, - }); let waiting_to_queued_fri_witness_job_mover = WaitingToQueuedFriWitnessJobMover::new( self.house_keeper_config.witness_job_moving_interval_ms, prover_pool.clone(), ); - context.add_task(WaitingToQueuedFriWitnessJobMoverTask { - waiting_to_queued_fri_witness_job_mover, - }); - - if let Some((archiving_interval, archive_after)) = - self.house_keeper_config.prover_job_archiver_params() - { - let fri_prover_job_archiver = - FriProverJobsArchiver::new(prover_pool.clone(), archiving_interval, archive_after); - context.add_task(FriProverJobArchiverTask { - fri_prover_job_archiver, - }); - } - if let Some((archiving_interval, archive_after)) = - self.house_keeper_config.fri_gpu_prover_archiver_params() - { - let fri_prover_gpu_archiver = - FriGpuProverArchiver::new(prover_pool.clone(), archiving_interval, archive_after); - context.add_task(FriProverGpuArchiverTask { - fri_prover_gpu_archiver, + let fri_prover_job_archiver = self.house_keeper_config.prover_job_archiver_params().map( + |(archiving_interval, archive_after)| { + FriProverJobsArchiver::new(prover_pool.clone(), archiving_interval, archive_after) + }, + ); + + let fri_prover_gpu_archiver = self + .house_keeper_config + .fri_gpu_prover_archiver_params() + .map(|(archiving_interval, archive_after)| { + FriGpuProverArchiver::new(prover_pool.clone(), archiving_interval, archive_after) }); - } let fri_witness_generator_stats_reporter = FriWitnessGeneratorQueueReporter::new( prover_pool.clone(), self.house_keeper_config .witness_generator_stats_reporting_interval_ms, ); - context.add_task(FriWitnessGeneratorStatsReporterTask { - fri_witness_generator_stats_reporter, - }); let fri_prover_stats_reporter = FriProverQueueReporter::new( self.house_keeper_config.prover_stats_reporting_interval_ms, @@ -156,18 +149,12 @@ impl WiringLayer for HouseKeeperLayer { replica_pool.clone(), self.fri_prover_group_config, ); - context.add_task(FriProverStatsReporterTask { - fri_prover_stats_reporter, - }); let fri_proof_compressor_stats_reporter = FriProofCompressorQueueReporter::new( self.house_keeper_config .proof_compressor_stats_reporting_interval_ms, prover_pool.clone(), ); - context.add_task(FriProofCompressorStatsReporterTask { - fri_proof_compressor_stats_reporter, - }); let fri_proof_compressor_retry_manager = FriProofCompressorJobRetryManager::new( self.fri_proof_compressor_config.max_attempts, @@ -176,179 +163,128 @@ impl WiringLayer for HouseKeeperLayer { .proof_compressor_job_retrying_interval_ms, prover_pool.clone(), ); - context.add_task(FriProofCompressorJobRetryManagerTask { - fri_proof_compressor_retry_manager, - }); - Ok(()) + Ok(Output { + l1_batch_metrics_reporter, + fri_prover_job_retry_manager, + fri_witness_generator_job_retry_manager: fri_witness_gen_job_retry_manager, + waiting_to_queued_fri_witness_job_mover, + fri_prover_job_archiver, + fri_prover_gpu_archiver, + fri_witness_generator_stats_reporter, + fri_prover_stats_reporter, + fri_proof_compressor_stats_reporter, + fri_proof_compressor_job_retry_manager: fri_proof_compressor_retry_manager, + }) } } -#[derive(Debug)] -struct L1BatchMetricsReporterTask { - l1_batch_metrics_reporter: L1BatchMetricsReporter, -} - #[async_trait::async_trait] -impl Task for L1BatchMetricsReporterTask { +impl Task for L1BatchMetricsReporter { fn id(&self) -> TaskId { "l1_batch_metrics_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.l1_batch_metrics_reporter.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriProverJobRetryManagerTask { - fri_prover_job_retry_manager: FriProverJobRetryManager, -} - #[async_trait::async_trait] -impl Task for FriProverJobRetryManagerTask { +impl Task for FriProverJobRetryManager { fn id(&self) -> TaskId { "fri_prover_job_retry_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_prover_job_retry_manager.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriWitnessGeneratorJobRetryManagerTask { - fri_witness_gen_job_retry_manager: FriWitnessGeneratorJobRetryManager, -} - #[async_trait::async_trait] -impl Task for FriWitnessGeneratorJobRetryManagerTask { +impl Task for FriWitnessGeneratorJobRetryManager { fn id(&self) -> TaskId { "fri_witness_generator_job_retry_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_witness_gen_job_retry_manager - .run(stop_receiver.0) - .await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct WaitingToQueuedFriWitnessJobMoverTask { - waiting_to_queued_fri_witness_job_mover: WaitingToQueuedFriWitnessJobMover, -} - #[async_trait::async_trait] -impl Task for WaitingToQueuedFriWitnessJobMoverTask { +impl Task for WaitingToQueuedFriWitnessJobMover { fn id(&self) -> TaskId { "waiting_to_queued_fri_witness_job_mover".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.waiting_to_queued_fri_witness_job_mover - .run(stop_receiver.0) - .await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriWitnessGeneratorStatsReporterTask { - fri_witness_generator_stats_reporter: FriWitnessGeneratorQueueReporter, -} - #[async_trait::async_trait] -impl Task for FriWitnessGeneratorStatsReporterTask { +impl Task for FriWitnessGeneratorQueueReporter { fn id(&self) -> TaskId { - "fri_witness_generator_stats_reporter".into() + "fri_witness_generator_queue_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_witness_generator_stats_reporter - .run(stop_receiver.0) - .await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriProverStatsReporterTask { - fri_prover_stats_reporter: FriProverQueueReporter, -} - #[async_trait::async_trait] -impl Task for FriProverStatsReporterTask { +impl Task for FriProverQueueReporter { fn id(&self) -> TaskId { - "fri_prover_stats_reporter".into() + "fri_prover_queue_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_prover_stats_reporter.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriProofCompressorStatsReporterTask { - fri_proof_compressor_stats_reporter: FriProofCompressorQueueReporter, -} - #[async_trait::async_trait] -impl Task for FriProofCompressorStatsReporterTask { +impl Task for FriProofCompressorQueueReporter { fn id(&self) -> TaskId { - "fri_proof_compressor_stats_reporter".into() + "fri_proof_compressor_queue_reporter".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_proof_compressor_stats_reporter - .run(stop_receiver.0) - .await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriProofCompressorJobRetryManagerTask { - fri_proof_compressor_retry_manager: FriProofCompressorJobRetryManager, -} - #[async_trait::async_trait] -impl Task for FriProofCompressorJobRetryManagerTask { +impl Task for FriProofCompressorJobRetryManager { fn id(&self) -> TaskId { "fri_proof_compressor_job_retry_manager".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_proof_compressor_retry_manager - .run(stop_receiver.0) - .await + (*self).run(stop_receiver.0).await } } -#[derive(Debug)] -struct FriProverJobArchiverTask { - fri_prover_job_archiver: FriProverJobsArchiver, -} - #[async_trait::async_trait] -impl Task for FriProverJobArchiverTask { +impl Task for FriProverJobsArchiver { fn id(&self) -> TaskId { - "fri_prover_job_archiver".into() + "fri_prover_jobs_archiver".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_prover_job_archiver.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } -struct FriProverGpuArchiverTask { - fri_prover_gpu_archiver: FriGpuProverArchiver, -} - #[async_trait::async_trait] -impl Task for FriProverGpuArchiverTask { +impl Task for FriGpuProverArchiver { fn id(&self) -> TaskId { - "fri_prover_gpu_archiver".into() + "fri_gpu_prover_archiver".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.fri_prover_gpu_archiver.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/l1_batch_commitment_mode_validation.rs b/core/node/node_framework/src/implementations/layers/l1_batch_commitment_mode_validation.rs index 893c8d361164..1ef340e08aa7 100644 --- a/core/node/node_framework/src/implementations/layers/l1_batch_commitment_mode_validation.rs +++ b/core/node/node_framework/src/implementations/layers/l1_batch_commitment_mode_validation.rs @@ -3,27 +3,33 @@ use zksync_types::{commitment::L1BatchCommitmentMode, Address}; use crate::{ implementations::resources::eth_interface::EthInterfaceResource, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for a prerequisite that checks if the L1 batch commitment mode is valid /// against L1. -/// -/// ## Requests resources -/// -/// - `EthInterfaceResource` -/// -/// ## Adds preconditions -/// -/// - `L1BatchCommitmentModeValidationTask` #[derive(Debug)] pub struct L1BatchCommitmentModeValidationLayer { diamond_proxy_addr: Address, l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub eth_client: EthInterfaceResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: L1BatchCommitmentModeValidationTask, +} + impl L1BatchCommitmentModeValidationLayer { pub fn new( diamond_proxy_addr: Address, @@ -38,21 +44,22 @@ impl L1BatchCommitmentModeValidationLayer { #[async_trait::async_trait] impl WiringLayer for L1BatchCommitmentModeValidationLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "l1_batch_commitment_mode_validation_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let EthInterfaceResource(query_client) = context.get_resource()?; + async fn wire(self, input: Self::Input) -> Result { + let EthInterfaceResource(query_client) = input.eth_client; let task = L1BatchCommitmentModeValidationTask::new( self.diamond_proxy_addr, self.l1_batch_commit_data_generator_mode, query_client, ); - context.add_task(task); - - Ok(()) + Ok(Output { task }) } } diff --git a/core/node/node_framework/src/implementations/layers/l1_gas.rs b/core/node/node_framework/src/implementations/layers/l1_gas.rs index 2276e73e857f..85e0422cdcb1 100644 --- a/core/node/node_framework/src/implementations/layers/l1_gas.rs +++ b/core/node/node_framework/src/implementations/layers/l1_gas.rs @@ -14,26 +14,14 @@ use crate::{ eth_interface::EthInterfaceResource, fee_input::FeeInputResource, l1_tx_params::L1TxParamsResource, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for sequencer L1 gas interfaces. /// Adds several resources that depend on L1 gas price. -/// -/// ## Requests resources -/// -/// - `EthInterfaceResource` -/// -/// ## Adds resources -/// -/// - `FeeInputResource` -/// - `L1TxParamsResource` -/// -/// ## Adds tasks -/// -/// - `GasAdjusterTask` (only runs if someone uses the resourced listed above). #[derive(Debug)] pub struct SequencerL1GasLayer { gas_adjuster_config: GasAdjusterConfig, @@ -42,6 +30,25 @@ pub struct SequencerL1GasLayer { state_keeper_config: StateKeeperConfig, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub eth_client: EthInterfaceResource, + /// If not provided, the base token assumed to be ETH, and the ratio will be constant. + #[context(default)] + pub base_token_ratio_provider: BaseTokenRatioProviderResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub fee_input: FeeInputResource, + pub l1_tx_params: L1TxParamsResource, + /// Only runs if someone uses the resources listed above. + #[context(task)] + pub gas_adjuster_task: GasAdjusterTask, +} + impl SequencerL1GasLayer { pub fn new( gas_adjuster_config: GasAdjusterConfig, @@ -60,12 +67,15 @@ impl SequencerL1GasLayer { #[async_trait::async_trait] impl WiringLayer for SequencerL1GasLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "sequencer_l1_gas_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let client = context.get_resource::()?.0; + async fn wire(self, input: Self::Input) -> Result { + let client = input.eth_client.0; let adjuster = GasAdjuster::new( client, self.gas_adjuster_config, @@ -76,24 +86,23 @@ impl WiringLayer for SequencerL1GasLayer { .context("GasAdjuster::new()")?; let gas_adjuster = Arc::new(adjuster); - let ratio_provider = context.get_resource_or_default::(); + let ratio_provider = input.base_token_ratio_provider; let batch_fee_input_provider = Arc::new(MainNodeFeeInputProvider::new( gas_adjuster.clone(), ratio_provider.0.clone(), FeeModelConfig::from_state_keeper_config(&self.state_keeper_config), )); - context.insert_resource(FeeInputResource(batch_fee_input_provider))?; - - context.insert_resource(L1TxParamsResource(gas_adjuster.clone()))?; - - context.add_task(GasAdjusterTask { gas_adjuster }); - Ok(()) + Ok(Output { + fee_input: batch_fee_input_provider.into(), + l1_tx_params: gas_adjuster.clone().into(), + gas_adjuster_task: GasAdjusterTask { gas_adjuster }, + }) } } #[derive(Debug)] -struct GasAdjusterTask { +pub struct GasAdjusterTask { gas_adjuster: Arc, } diff --git a/core/node/node_framework/src/implementations/layers/main_node_client.rs b/core/node/node_framework/src/implementations/layers/main_node_client.rs index d875a2bc07f8..2f61bf897e5b 100644 --- a/core/node/node_framework/src/implementations/layers/main_node_client.rs +++ b/core/node/node_framework/src/implementations/layers/main_node_client.rs @@ -9,19 +9,11 @@ use crate::{ implementations::resources::{ healthcheck::AppHealthCheckResource, main_node_client::MainNodeClientResource, }, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for main node client. -/// -/// ## Requests resources -/// -/// - `AppHealthCheckResource` (adds a health check) -/// -/// ## Adds resources -/// -/// - `MainNodeClientResource` #[derive(Debug)] pub struct MainNodeClientLayer { url: SensitiveUrl, @@ -29,6 +21,19 @@ pub struct MainNodeClientLayer { l2_chain_id: L2ChainId, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub main_node_client: MainNodeClientResource, +} + impl MainNodeClientLayer { pub fn new(url: SensitiveUrl, rate_limit_rps: NonZeroUsize, l2_chain_id: L2ChainId) -> Self { Self { @@ -41,11 +46,14 @@ impl MainNodeClientLayer { #[async_trait::async_trait] impl WiringLayer for MainNodeClientLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "main_node_client_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { let main_node_client = Client::http(self.url) .context("failed creating JSON-RPC client for main node")? .for_network(self.l2_chain_id.into()) @@ -53,14 +61,16 @@ impl WiringLayer for MainNodeClientLayer { .build(); let client = Box::new(main_node_client) as Box>; - context.insert_resource(MainNodeClientResource(client.clone()))?; // Insert healthcheck - let AppHealthCheckResource(app_health) = context.get_resource_or_default(); - app_health - .insert_custom_component(Arc::new(MainNodeHealthCheck::from(client))) + input + .app_health + .0 + .insert_custom_component(Arc::new(MainNodeHealthCheck::from(client.clone()))) .map_err(WiringError::internal)?; - Ok(()) + Ok(Output { + main_node_client: client.into(), + }) } } diff --git a/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs b/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs index 06db8e69f194..848dd4464387 100644 --- a/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs +++ b/core/node/node_framework/src/implementations/layers/main_node_fee_params_fetcher.rs @@ -6,45 +6,52 @@ use crate::{ implementations::resources::{ fee_input::FeeInputResource, main_node_client::MainNodeClientResource, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for main node fee params fetcher -- a fee input resource used on /// the external node. -/// -/// ## Requests resources -/// -/// - `MainNodeClientResource` -/// -/// ## Adds resources -/// -/// - `FeeInputResource` -/// -/// ## Adds tasks -/// -/// - `MainNodeFeeParamsFetcherTask` #[derive(Debug)] pub struct MainNodeFeeParamsFetcherLayer; +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub main_node_client: MainNodeClientResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub fee_input: FeeInputResource, + #[context(task)] + pub fetcher: MainNodeFeeParamsFetcherTask, +} + #[async_trait::async_trait] impl WiringLayer for MainNodeFeeParamsFetcherLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "main_node_fee_params_fetcher_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let MainNodeClientResource(main_node_client) = context.get_resource()?; + async fn wire(self, input: Self::Input) -> Result { + let MainNodeClientResource(main_node_client) = input.main_node_client; let fetcher = Arc::new(MainNodeFeeParamsFetcher::new(main_node_client)); - context.insert_resource(FeeInputResource(fetcher.clone()))?; - context.add_task(MainNodeFeeParamsFetcherTask { fetcher }); - Ok(()) + Ok(Output { + fee_input: fetcher.clone().into(), + fetcher: MainNodeFeeParamsFetcherTask { fetcher }, + }) } } #[derive(Debug)] -struct MainNodeFeeParamsFetcherTask { +pub struct MainNodeFeeParamsFetcherTask { fetcher: Arc, } diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index 41e7561b70f3..827ec69d9427 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -18,28 +18,13 @@ use crate::{ pools::{MasterPool, PoolResource, ReplicaPool}, web3_api::TreeApiClientResource, }, - service::{ServiceContext, StopReceiver}, + service::{ShutdownHook, StopReceiver}, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; -/// Wiring layer for -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `PoolResource` -/// - `ObjectStoreResource` (only for `MerkleTreeMode::Full`) -/// - `AppHealthCheckResource` (adds several health checks) -/// -/// ## Adds resources -/// -/// - `TreeApiClientResource` -/// -/// ## Adds tasks -/// -/// - `MetadataCalculatorTask` -/// - `TreeApiTask` (if requested) +/// Wiring layer for Metadata calculator and Tree API. #[derive(Debug)] pub struct MetadataCalculatorLayer { config: MetadataCalculatorConfig, @@ -47,6 +32,32 @@ pub struct MetadataCalculatorLayer { pruning_config: Option, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub replica_pool: PoolResource, + /// Only needed for `MerkleTreeMode::Full` + pub object_store: Option, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub metadata_calculator: MetadataCalculator, + pub tree_api_client: TreeApiClientResource, + /// Only provided if configuration is provided. + #[context(task)] + pub tree_api_task: Option, + /// Only provided if configuration is provided. + #[context(task)] + pub pruning_task: Option, + pub rocksdb_shutdown_hook: ShutdownHook, +} + impl MetadataCalculatorLayer { pub fn new(config: MetadataCalculatorConfig) -> Self { Self { @@ -69,24 +80,28 @@ impl MetadataCalculatorLayer { #[async_trait::async_trait] impl WiringLayer for MetadataCalculatorLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "metadata_calculator_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool = context.get_resource::>()?; - let main_pool = pool.get().await?; + async fn wire(self, input: Self::Input) -> Result { + let main_pool = input.master_pool.get().await?; // The number of connections in a recovery pool is based on the mainnet recovery runs. It doesn't need // to be particularly accurate at this point, since the main node isn't expected to recover from a snapshot. - let recovery_pool = context - .get_resource::>()? - .get_custom(10) - .await?; + let recovery_pool = input.replica_pool.get_custom(10).await?; + let app_health = input.app_health.0; let object_store = match self.config.mode { MerkleTreeMode::Lightweight => None, MerkleTreeMode::Full => { - let store = context.get_resource::()?; + let store = input.object_store.ok_or_else(|| { + WiringError::Configuration( + "Object store is required for full Merkle tree mode".into(), + ) + })?; Some(store) } }; @@ -99,42 +114,48 @@ impl WiringLayer for MetadataCalculatorLayer { .await? .with_recovery_pool(recovery_pool); - let AppHealthCheckResource(app_health) = context.get_resource_or_default(); app_health .insert_custom_component(Arc::new(metadata_calculator.tree_health_check())) .map_err(WiringError::internal)?; - if let Some(tree_api_config) = self.tree_api_config { + let tree_api_task = self.tree_api_config.map(|tree_api_config| { let bind_addr = (Ipv4Addr::UNSPECIFIED, tree_api_config.port).into(); let tree_reader = metadata_calculator.tree_reader(); - context.add_task(TreeApiTask { + TreeApiTask { bind_addr, tree_reader, - }); - } - - if let Some(pruning_removal_delay) = self.pruning_config { - let pruning_task = metadata_calculator.pruning_task(pruning_removal_delay); - app_health - .insert_component(pruning_task.health_check()) - .map_err(|err| WiringError::Internal(err.into()))?; - context.add_task(pruning_task); - } - - context.insert_resource(TreeApiClientResource(Arc::new( - metadata_calculator.tree_reader(), - )))?; - - context.add_task(metadata_calculator); + } + }); - context.add_shutdown_hook("rocksdb_terminaton", async { + let pruning_task = self + .pruning_config + .map( + |pruning_removal_delay| -> Result { + let pruning_task = metadata_calculator.pruning_task(pruning_removal_delay); + app_health + .insert_component(pruning_task.health_check()) + .map_err(|err| WiringError::Internal(err.into()))?; + Ok(pruning_task) + }, + ) + .transpose()?; + + let tree_api_client = TreeApiClientResource(Arc::new(metadata_calculator.tree_reader())); + + let rocksdb_shutdown_hook = ShutdownHook::new("rocksdb_terminaton", async { // Wait for all the instances of RocksDB to be destroyed. tokio::task::spawn_blocking(RocksDB::await_rocksdb_termination) .await .context("failed terminating RocksDB instances") }); - Ok(()) + Ok(Output { + metadata_calculator, + tree_api_client, + tree_api_task, + pruning_task, + rocksdb_shutdown_hook, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index f9d2b94bad22..7cf05f1aa06c 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -24,8 +24,6 @@ pub mod prometheus_exporter; pub mod proof_data_handler; pub mod pruning; pub mod query_eth_client; -pub mod reorg_detector_checker; -pub mod reorg_detector_runner; pub mod sigint; pub mod state_keeper; pub mod sync_state_updater; diff --git a/core/node/node_framework/src/implementations/layers/object_store.rs b/core/node/node_framework/src/implementations/layers/object_store.rs index 6803ccfb55b7..55840caf1f9c 100644 --- a/core/node/node_framework/src/implementations/layers/object_store.rs +++ b/core/node/node_framework/src/implementations/layers/object_store.rs @@ -3,15 +3,10 @@ use zksync_object_store::ObjectStoreFactory; use crate::{ implementations::resources::object_store::ObjectStoreResource, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, }; /// Wiring layer for object store. -/// -/// ## Adds resources -/// -/// - `ObjectStoreResource` #[derive(Debug)] pub struct ObjectStoreLayer { config: ObjectStoreConfig, @@ -25,13 +20,16 @@ impl ObjectStoreLayer { #[async_trait::async_trait] impl WiringLayer for ObjectStoreLayer { + type Input = (); + type Output = ObjectStoreResource; + fn layer_name(&self) -> &'static str { "object_store_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { let object_store = ObjectStoreFactory::new(self.config).create_store().await?; - context.insert_resource(ObjectStoreResource(object_store))?; - Ok(()) + let resource = ObjectStoreResource(object_store); + Ok(resource) } } diff --git a/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs b/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs index 74eb5e3bae35..de570105a471 100644 --- a/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs +++ b/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs @@ -10,20 +10,11 @@ use crate::{ implementations::resources::eth_interface::{ BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource, EthInterfaceResource, }, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for [`PKSigningClient`]. -/// -/// ## Requests resources -/// -/// - `EthInterfaceResource` -/// -/// ## Adds resources -/// -/// - `BoundEthInterfaceResource` -/// - `BoundEthInterfaceForBlobsResource` (if key for blob operator is provided) #[derive(Debug)] pub struct PKSigningEthClientLayer { eth_sender_config: EthConfig, @@ -32,6 +23,20 @@ pub struct PKSigningEthClientLayer { wallets: wallets::EthSender, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub eth_client: EthInterfaceResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub signing_client: BoundEthInterfaceResource, + /// Only provided if the blob operator key is provided to the layer. + pub signing_client_for_blobs: Option, +} + impl PKSigningEthClientLayer { pub fn new( eth_sender_config: EthConfig, @@ -50,18 +55,21 @@ impl PKSigningEthClientLayer { #[async_trait::async_trait] impl WiringLayer for PKSigningEthClientLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "pk_signing_eth_client_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { let private_key = self.wallets.operator.private_key(); let gas_adjuster_config = self .eth_sender_config .gas_adjuster .as_ref() .context("gas_adjuster config is missing")?; - let EthInterfaceResource(query_client) = context.get_resource()?; + let EthInterfaceResource(query_client) = input.eth_client; let signing_client = PKSigningClient::new_raw( private_key.clone(), @@ -70,9 +78,9 @@ impl WiringLayer for PKSigningEthClientLayer { self.l1_chain_id, query_client.clone(), ); - context.insert_resource(BoundEthInterfaceResource(Box::new(signing_client)))?; + let signing_client = BoundEthInterfaceResource(Box::new(signing_client)); - if let Some(blob_operator) = &self.wallets.blob_operator { + let signing_client_for_blobs = self.wallets.blob_operator.map(|blob_operator| { let private_key = blob_operator.private_key(); let signing_client_for_blobs = PKSigningClient::new_raw( private_key.clone(), @@ -81,11 +89,12 @@ impl WiringLayer for PKSigningEthClientLayer { self.l1_chain_id, query_client, ); - context.insert_resource(BoundEthInterfaceForBlobsResource(Box::new( - signing_client_for_blobs, - )))?; - } + BoundEthInterfaceForBlobsResource(Box::new(signing_client_for_blobs)) + }); - Ok(()) + Ok(Output { + signing_client, + signing_client_for_blobs, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/pools_layer.rs b/core/node/node_framework/src/implementations/layers/pools_layer.rs index 880b793115b7..54ebdcb2fa9c 100644 --- a/core/node/node_framework/src/implementations/layers/pools_layer.rs +++ b/core/node/node_framework/src/implementations/layers/pools_layer.rs @@ -9,8 +9,8 @@ use crate::{ healthcheck::AppHealthCheckResource, pools::{MasterPool, PoolResource, ProverPool, ReplicaPool}, }, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Builder for the [`PoolsLayer`]. @@ -87,13 +87,31 @@ pub struct PoolsLayer { with_prover: bool, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub master_pool: Option>, + pub replica_pool: Option>, + pub prover_pool: Option>, +} + #[async_trait::async_trait] impl WiringLayer for PoolsLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "pools_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { if !self.with_master && !self.with_replica && !self.with_prover { return Err(WiringError::Configuration( "At least one pool should be enabled".to_string(), @@ -109,56 +127,63 @@ impl WiringLayer for PoolsLayer { } } - if self.with_master { + let master_pool = if self.with_master { let pool_size = self.config.max_connections()?; let pool_size_master = self.config.max_connections_master().unwrap_or(pool_size); - context.insert_resource(PoolResource::::new( + Some(PoolResource::::new( self.secrets.master_url()?, pool_size_master, None, None, - ))?; - } + )) + } else { + None + }; - if self.with_replica { + let replica_pool = if self.with_replica { // We're most interested in setting acquire / statement timeouts for the API server, which puts the most load // on Postgres. - context.insert_resource(PoolResource::::new( + Some(PoolResource::::new( self.secrets.replica_url()?, self.config.max_connections()?, self.config.statement_timeout(), self.config.acquire_timeout(), - ))?; - } + )) + } else { + None + }; - if self.with_prover { - context.insert_resource(PoolResource::::new( + let prover_pool = if self.with_prover { + Some(PoolResource::::new( self.secrets.prover_url()?, self.config.max_connections()?, None, None, - ))?; - } + )) + } else { + None + }; // Insert health checks for the core pool. - let connection_pool = if self.with_replica { - context - .get_resource::>()? - .get() - .await? - } else { - context - .get_resource::>()? - .get() - .await? + // Replica pool is preferred here. + let healthcheck_pool = match (&replica_pool, &master_pool) { + (Some(replica), _) => Some(replica.get().await?), + (_, Some(master)) => Some(master.get().await?), + _ => None, }; - let db_health_check = ConnectionPoolHealthCheck::new(connection_pool); - let AppHealthCheckResource(app_health) = context.get_resource_or_default(); - app_health - .insert_custom_component(Arc::new(db_health_check)) - .map_err(WiringError::internal)?; + if let Some(pool) = healthcheck_pool { + let db_health_check = ConnectionPoolHealthCheck::new(pool); + let AppHealthCheckResource(app_health) = input.app_health; + app_health + .insert_custom_component(Arc::new(db_health_check)) + .map_err(WiringError::internal)?; + } - Ok(()) + Ok(Output { + master_pool, + replica_pool, + prover_pool, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/postgres_metrics.rs b/core/node/node_framework/src/implementations/layers/postgres_metrics.rs index 9b290b76cad0..238bee578678 100644 --- a/core/node/node_framework/src/implementations/layers/postgres_metrics.rs +++ b/core/node/node_framework/src/implementations/layers/postgres_metrics.rs @@ -4,42 +4,50 @@ use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Core}; use crate::{ implementations::resources::pools::{PoolResource, ReplicaPool}, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; const SCRAPE_INTERVAL: Duration = Duration::from_secs(60); /// Wiring layer for the Postgres metrics exporter. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// -/// ## Adds tasks -/// -/// - `PostgresMetricsScrapingTask` #[derive(Debug)] pub struct PostgresMetricsLayer; +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub replica_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: PostgresMetricsScrapingTask, +} + #[async_trait::async_trait] impl WiringLayer for PostgresMetricsLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "postgres_metrics_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let replica_pool_resource = context.get_resource::>()?; - let pool_for_metrics = replica_pool_resource.get_singleton().await?; - context.add_task(PostgresMetricsScrapingTask { pool_for_metrics }); + async fn wire(self, input: Self::Input) -> Result { + let pool_for_metrics = input.replica_pool.get_singleton().await?; + let task = PostgresMetricsScrapingTask { pool_for_metrics }; - Ok(()) + Ok(Output { task }) } } #[derive(Debug)] -struct PostgresMetricsScrapingTask { +pub struct PostgresMetricsScrapingTask { pool_for_metrics: ConnectionPool, } diff --git a/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs b/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs index 3a5b0f2dd93b..8ce53c8bfdb2 100644 --- a/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs +++ b/core/node/node_framework/src/implementations/layers/prometheus_exporter.rs @@ -3,20 +3,13 @@ use zksync_vlog::prometheus::PrometheusExporterConfig; use crate::{ implementations::resources::healthcheck::AppHealthCheckResource, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for Prometheus exporter server. -/// -/// ## Requests resources -/// -/// - `AppHealthCheckResource` (adds a health check) -/// -/// ## Adds tasks -/// -/// - `PrometheusExporterTask` #[derive(Debug)] pub struct PrometheusExporterLayer(pub PrometheusExporterConfig); @@ -26,18 +19,36 @@ pub struct PrometheusExporterTask { prometheus_health_updater: HealthUpdater, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: PrometheusExporterTask, +} + #[async_trait::async_trait] impl WiringLayer for PrometheusExporterLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "prometheus_exporter" } - async fn wire(self: Box, mut node: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { let (prometheus_health_check, prometheus_health_updater) = ReactiveHealthCheck::new("prometheus_exporter"); - let AppHealthCheckResource(app_health) = node.get_resource_or_default(); - app_health + input + .app_health + .0 .insert_component(prometheus_health_check) .map_err(WiringError::internal)?; @@ -46,8 +57,7 @@ impl WiringLayer for PrometheusExporterLayer { prometheus_health_updater, }; - node.add_task(task); - Ok(()) + Ok(Output { task }) } } diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index b7c543f3d4ab..bcb3cedc6e7e 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -10,27 +10,33 @@ use crate::{ object_store::ObjectStoreResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for proof data handler server. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `ObjectStoreResource` -/// -/// ## Adds tasks -/// -/// - `ProofDataHandlerTask` #[derive(Debug)] pub struct ProofDataHandlerLayer { proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub object_store: ObjectStoreResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: ProofDataHandlerTask, +} + impl ProofDataHandlerLayer { pub fn new( proof_data_handler_config: ProofDataHandlerConfig, @@ -45,29 +51,30 @@ impl ProofDataHandlerLayer { #[async_trait::async_trait] impl WiringLayer for ProofDataHandlerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "proof_data_handler_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool_resource = context.get_resource::>()?; - let main_pool = pool_resource.get().await.unwrap(); - - let object_store = context.get_resource::()?; + async fn wire(self, input: Self::Input) -> Result { + let main_pool = input.master_pool.get().await.unwrap(); + let blob_store = input.object_store.0; - context.add_task(ProofDataHandlerTask { + let task = ProofDataHandlerTask { proof_data_handler_config: self.proof_data_handler_config, - blob_store: object_store.0, + blob_store, main_pool, commitment_mode: self.commitment_mode, - }); + }; - Ok(()) + Ok(Output { task }) } } #[derive(Debug)] -struct ProofDataHandlerTask { +pub struct ProofDataHandlerTask { proof_data_handler_config: ProofDataHandlerConfig, blob_store: Arc, main_pool: ConnectionPool, diff --git a/core/node/node_framework/src/implementations/layers/pruning.rs b/core/node/node_framework/src/implementations/layers/pruning.rs index c5acefcbebdd..216e214026b1 100644 --- a/core/node/node_framework/src/implementations/layers/pruning.rs +++ b/core/node/node_framework/src/implementations/layers/pruning.rs @@ -7,21 +7,13 @@ use crate::{ healthcheck::AppHealthCheckResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for node pruning layer. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `AppHealthCheckResource` (adds a health check) -/// -/// ## Adds tasks -/// -/// - `DbPruner` #[derive(Debug)] pub struct PruningLayer { pruning_removal_delay: Duration, @@ -29,6 +21,21 @@ pub struct PruningLayer { minimum_l1_batch_age: Duration, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub db_pruner: DbPruner, +} + impl PruningLayer { pub fn new( pruning_removal_delay: Duration, @@ -45,13 +52,15 @@ impl PruningLayer { #[async_trait::async_trait] impl WiringLayer for PruningLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "pruning_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool_resource = context.get_resource::>()?; - let main_pool = pool_resource.get().await?; + async fn wire(self, input: Self::Input) -> Result { + let main_pool = input.master_pool.get().await?; let db_pruner = DbPruner::new( DbPrunerConfig { @@ -62,14 +71,12 @@ impl WiringLayer for PruningLayer { main_pool, ); - let AppHealthCheckResource(app_health) = context.get_resource_or_default(); - app_health + input + .app_health + .0 .insert_component(db_pruner.health_check()) .map_err(WiringError::internal)?; - - context.add_task(db_pruner); - - Ok(()) + Ok(Output { db_pruner }) } } diff --git a/core/node/node_framework/src/implementations/layers/query_eth_client.rs b/core/node/node_framework/src/implementations/layers/query_eth_client.rs index 36f0c8176609..d48312d7d5b5 100644 --- a/core/node/node_framework/src/implementations/layers/query_eth_client.rs +++ b/core/node/node_framework/src/implementations/layers/query_eth_client.rs @@ -4,15 +4,10 @@ use zksync_web3_decl::client::Client; use crate::{ implementations::resources::eth_interface::EthInterfaceResource, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, }; /// Wiring layer for Ethereum client. -/// -/// ## Adds resources -/// -/// - `EthInterfaceResource` #[derive(Debug)] pub struct QueryEthClientLayer { chain_id: L1ChainId, @@ -27,16 +22,18 @@ impl QueryEthClientLayer { #[async_trait::async_trait] impl WiringLayer for QueryEthClientLayer { + type Input = (); + type Output = EthInterfaceResource; + fn layer_name(&self) -> &'static str { "query_eth_client_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { let query_client = Client::http(self.web3_url.clone()) .context("Client::new()")? .for_network(self.chain_id.into()) .build(); - context.insert_resource(EthInterfaceResource(Box::new(query_client)))?; - Ok(()) + Ok(EthInterfaceResource(Box::new(query_client))) } } diff --git a/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs b/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs deleted file mode 100644 index 0d846501a568..000000000000 --- a/core/node/node_framework/src/implementations/layers/reorg_detector_checker.rs +++ /dev/null @@ -1,100 +0,0 @@ -use std::time::Duration; - -use anyhow::Context; -use zksync_dal::{ConnectionPool, Core}; -use zksync_reorg_detector::{self, ReorgDetector}; - -use crate::{ - implementations::resources::{ - main_node_client::MainNodeClientResource, - pools::{MasterPool, PoolResource}, - }, - service::{ServiceContext, StopReceiver}, - task::{Task, TaskId, TaskKind}, - wiring_layer::{WiringError, WiringLayer}, -}; - -const REORG_DETECTED_SLEEP_INTERVAL: Duration = Duration::from_secs(1); - -/// Wiring layer for [`ReorgDetector`] checker. -/// This layer is responsible for detecting reorgs and preventing the node from starting if it occurs. -/// -/// ## Requests resources -/// -/// - `MainNodeClientResource` -/// - `PoolResource` -/// -/// ## Adds preconditions -/// -/// - `CheckerPrecondition` -#[derive(Debug)] -pub struct ReorgDetectorCheckerLayer; - -#[async_trait::async_trait] -impl WiringLayer for ReorgDetectorCheckerLayer { - fn layer_name(&self) -> &'static str { - "reorg_detector_checker_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Get resources. - let main_node_client = context.get_resource::()?.0; - - let pool_resource = context.get_resource::>()?; - let pool = pool_resource.get().await?; - - // Create and insert precondition. - context.add_task(CheckerPrecondition { - pool: pool.clone(), - reorg_detector: ReorgDetector::new(main_node_client, pool), - }); - - Ok(()) - } -} - -pub struct CheckerPrecondition { - pool: ConnectionPool, - reorg_detector: ReorgDetector, -} - -#[async_trait::async_trait] -impl Task for CheckerPrecondition { - fn kind(&self) -> TaskKind { - TaskKind::Precondition - } - - fn id(&self) -> TaskId { - "reorg_detector_checker".into() - } - - async fn run(mut self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - // Given that this is a precondition -- i.e. something that starts before some invariants are met, - // we need to first ensure that there is at least one batch in the database (there may be none if - // either genesis or snapshot recovery has not been performed yet). - let earliest_batch = zksync_dal::helpers::wait_for_l1_batch( - &self.pool, - REORG_DETECTED_SLEEP_INTERVAL, - &mut stop_receiver.0, - ) - .await?; - if earliest_batch.is_none() { - // Stop signal received. - return Ok(()); - } - - loop { - match self.reorg_detector.run_once(stop_receiver.0.clone()).await { - Ok(()) => return Ok(()), - Err(zksync_reorg_detector::Error::ReorgDetected(last_correct_l1_batch)) => { - tracing::warn!( - "Reorg detected, last correct L1 batch #{}. Waiting till it will be resolved. Sleep for {} seconds and retry", - last_correct_l1_batch, REORG_DETECTED_SLEEP_INTERVAL.as_secs() - ); - tokio::time::sleep(REORG_DETECTED_SLEEP_INTERVAL).await; - } - Err(err) => return Err(err).context("reorg_detector.check_consistency()"), - } - } - } -} diff --git a/core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs b/core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs deleted file mode 100644 index 04ebb9ec3c14..000000000000 --- a/core/node/node_framework/src/implementations/layers/reorg_detector_runner.rs +++ /dev/null @@ -1,85 +0,0 @@ -use std::sync::Arc; - -use anyhow::Context; -use zksync_block_reverter::BlockReverter; -use zksync_reorg_detector::{self, ReorgDetector}; - -use crate::{ - implementations::resources::{ - main_node_client::MainNodeClientResource, - pools::{MasterPool, PoolResource}, - reverter::BlockReverterResource, - }, - service::{ServiceContext, StopReceiver}, - task::{Task, TaskId, TaskKind}, - wiring_layer::{WiringError, WiringLayer}, -}; - -/// Wiring layer for [`ReorgDetector`] runner. -/// Layer responsible for detecting reorg and reverting blocks in case it was found. -/// -/// ## Requests resources -/// -/// - `MainNodeClientResource` -/// - `PoolResource` -/// - `BlockReverterResource` -/// -/// ## Adds oneshot tasks -/// -/// - `RunnerUnconstrainedOneshotTask` -#[derive(Debug)] -pub struct ReorgDetectorRunnerLayer; - -#[async_trait::async_trait] -impl WiringLayer for ReorgDetectorRunnerLayer { - fn layer_name(&self) -> &'static str { - "reorg_detector_runner_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Get resources. - let main_node_client = context.get_resource::()?.0; - - let pool_resource = context.get_resource::>()?; - let pool = pool_resource.get().await?; - - let reverter = context.get_resource::()?.0; - - // Create and insert task. - context.add_task(RunnerUnconstrainedOneshotTask { - reorg_detector: ReorgDetector::new(main_node_client, pool), - reverter, - }); - - Ok(()) - } -} - -pub struct RunnerUnconstrainedOneshotTask { - reorg_detector: ReorgDetector, - reverter: Arc, -} - -#[async_trait::async_trait] -impl Task for RunnerUnconstrainedOneshotTask { - fn kind(&self) -> TaskKind { - TaskKind::UnconstrainedOneshotTask - } - - fn id(&self) -> TaskId { - "reorg_detector_runner".into() - } - - async fn run(mut self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - match self.reorg_detector.run_once(stop_receiver.0.clone()).await { - Ok(()) => {} - Err(zksync_reorg_detector::Error::ReorgDetected(last_correct_l1_batch)) => { - tracing::info!("Reverting to l1 batch number {last_correct_l1_batch}"); - self.reverter.roll_back(last_correct_l1_batch).await?; - tracing::info!("Revert successfully completed"); - } - Err(err) => return Err(err).context("reorg_detector.check_consistency()"), - } - Ok(()) - } -} diff --git a/core/node/node_framework/src/implementations/layers/sigint.rs b/core/node/node_framework/src/implementations/layers/sigint.rs index 9df13285b3a1..014bfdbdde14 100644 --- a/core/node/node_framework/src/implementations/layers/sigint.rs +++ b/core/node/node_framework/src/implementations/layers/sigint.rs @@ -1,39 +1,47 @@ use tokio::sync::oneshot; use crate::{ - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; /// Wiring layer that changes the handling of SIGINT signal, preventing an immediate shutdown. /// Instead, it would propagate the signal to the rest of the node, allowing it to shut down gracefully. -/// -/// ## Adds tasks -/// -/// - `SigintHandlerTask` #[derive(Debug)] pub struct SigintHandlerLayer; +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: SigintHandlerTask, +} + #[async_trait::async_trait] impl WiringLayer for SigintHandlerLayer { + type Input = (); + type Output = Output; + fn layer_name(&self) -> &'static str { "sigint_handler_layer" } - async fn wire(self: Box, mut node: ServiceContext<'_>) -> Result<(), WiringError> { - // SIGINT may happen at any time, so we must handle it as soon as it happens. - node.add_task(SigintHandlerTask); - Ok(()) + async fn wire(self, _input: Self::Input) -> Result { + Ok(Output { + task: SigintHandlerTask, + }) } } #[derive(Debug)] -struct SigintHandlerTask; +pub struct SigintHandlerTask; #[async_trait::async_trait] impl Task for SigintHandlerTask { fn kind(&self) -> TaskKind { + // SIGINT may happen at any time, so we must handle it as soon as it happens. TaskKind::UnconstrainedTask } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs index e923bc9f567a..ba7e87dcca74 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs @@ -13,29 +13,32 @@ use crate::{ state_keeper::{ConditionalSealerResource, StateKeeperIOResource}, sync_state::SyncStateResource, }, - resource::Unique, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for `ExternalIO`, an IO part of state keeper used by the external node. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `MainNodeClientResource` -/// -/// ## Adds resources -/// -/// - `SyncStateResource` -/// - `ActionQueueSenderResource` -/// - `StateKeeperIOResource` -/// - `ConditionalSealerResource` #[derive(Debug)] pub struct ExternalIOLayer { chain_id: L2ChainId, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub pool: PoolResource, + pub main_node_client: MainNodeClientResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub sync_state: SyncStateResource, + pub action_queue_sender: ActionQueueSenderResource, + pub io: StateKeeperIOResource, + pub sealer: ConditionalSealerResource, +} + impl ExternalIOLayer { pub fn new(chain_id: L2ChainId) -> Self { Self { chain_id } @@ -44,38 +47,39 @@ impl ExternalIOLayer { #[async_trait::async_trait] impl WiringLayer for ExternalIOLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "external_io_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Fetch required resources. - let master_pool = context.get_resource::>()?; - let MainNodeClientResource(main_node_client) = context.get_resource()?; - + async fn wire(self, input: Self::Input) -> Result { // Create `SyncState` resource. let sync_state = SyncState::default(); - context.insert_resource(SyncStateResource(sync_state))?; // Create `ActionQueueSender` resource. let (action_queue_sender, action_queue) = ActionQueue::new(); - context.insert_resource(ActionQueueSenderResource(Unique::new(action_queue_sender)))?; // Create external IO resource. - let io_pool = master_pool.get().await.context("Get master pool")?; + let io_pool = input.pool.get().await.context("Get master pool")?; let io = ExternalIO::new( io_pool, action_queue, - Box::new(main_node_client.for_component("external_io")), + Box::new(input.main_node_client.0.for_component("external_io")), self.chain_id, ) .await .context("Failed initializing I/O for external node state keeper")?; - context.insert_resource(StateKeeperIOResource(Unique::new(Box::new(io))))?; // Create sealer. - context.insert_resource(ConditionalSealerResource(Arc::new(NoopSealer)))?; + let sealer = ConditionalSealerResource(Arc::new(NoopSealer)); - Ok(()) + Ok(Output { + sync_state: sync_state.into(), + action_queue_sender: action_queue_sender.into(), + io: io.into(), + sealer, + }) } } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs index 796b147d1c60..33d3b5676aac 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs @@ -2,22 +2,23 @@ use zksync_state_keeper::MainBatchExecutor; use crate::{ implementations::resources::state_keeper::BatchExecutorResource, - resource::Unique, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; /// Wiring layer for `MainBatchExecutor`, part of the state keeper responsible for running the VM. -/// -/// ## Adds resources -/// -/// - `MainBatchExecutor` #[derive(Debug)] pub struct MainBatchExecutorLayer { save_call_traces: bool, optional_bytecode_compression: bool, } +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub batch_executor: BatchExecutorResource, +} + impl MainBatchExecutorLayer { pub fn new(save_call_traces: bool, optional_bytecode_compression: bool) -> Self { Self { @@ -29,15 +30,19 @@ impl MainBatchExecutorLayer { #[async_trait::async_trait] impl WiringLayer for MainBatchExecutorLayer { + type Input = (); + type Output = Output; + fn layer_name(&self) -> &'static str { "main_batch_executor_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { let builder = MainBatchExecutor::new(self.save_call_traces, self.optional_bytecode_compression); - context.insert_resource(BatchExecutorResource(Unique::new(Box::new(builder))))?; - Ok(()) + Ok(Output { + batch_executor: builder.into(), + }) } } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs index 05eff33303a5..cfab1f186438 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use anyhow::Context as _; use zksync_config::configs::{ chain::{MempoolConfig, StateKeeperConfig}, @@ -14,10 +12,10 @@ use crate::{ pools::{MasterPool, PoolResource}, state_keeper::{ConditionalSealerResource, StateKeeperIOResource}, }, - resource::Unique, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for `MempoolIO`, an IO part of state keeper used by the main node. @@ -43,6 +41,22 @@ pub struct MempoolIOLayer { wallets: wallets::StateKeeper, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub fee_input: FeeInputResource, + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub state_keeper_io: StateKeeperIOResource, + pub conditional_sealer: ConditionalSealerResource, + #[context(task)] + pub mempool_fetcher: MempoolFetcher, +} + impl MempoolIOLayer { pub fn new( zksync_network_id: L2ChainId, @@ -78,14 +92,16 @@ impl MempoolIOLayer { #[async_trait::async_trait] impl WiringLayer for MempoolIOLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "mempool_io_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Fetch required resources. - let batch_fee_input_provider = context.get_resource::()?.0; - let master_pool = context.get_resource::>()?; + async fn wire(self, input: Self::Input) -> Result { + let batch_fee_input_provider = input.fee_input.0; + let master_pool = input.master_pool; // Create mempool fetcher task. let mempool_guard = self.build_mempool_guard(&master_pool).await?; @@ -99,7 +115,6 @@ impl WiringLayer for MempoolIOLayer { &self.mempool_config, mempool_fetcher_pool, ); - context.add_task(MempoolFetcherTask(mempool_fetcher)); // Create mempool IO resource. let mempool_db_pool = master_pool @@ -116,26 +131,25 @@ impl WiringLayer for MempoolIOLayer { self.zksync_network_id, ) .await?; - context.insert_resource(StateKeeperIOResource(Unique::new(Box::new(io))))?; // Create sealer. let sealer = SequencerSealer::new(self.state_keeper_config); - context.insert_resource(ConditionalSealerResource(Arc::new(sealer)))?; - Ok(()) + Ok(Output { + state_keeper_io: io.into(), + conditional_sealer: sealer.into(), + mempool_fetcher, + }) } } -#[derive(Debug)] -struct MempoolFetcherTask(MempoolFetcher); - #[async_trait::async_trait] -impl Task for MempoolFetcherTask { +impl Task for MempoolFetcher { fn id(&self) -> TaskId { "state_keeper/mempool_fetcher".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.0.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs index 15237a5b3bd5..b0dfe0f1600c 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs @@ -24,31 +24,39 @@ use crate::{ StateKeeperIOResource, }, }, - service::{ServiceContext, StopReceiver}, - task::{Task, TaskId}, + service::{ShutdownHook, StopReceiver}, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for the state keeper. -/// -/// ## Requests resources -/// -/// - `StateKeeperIOResource` -/// - `BatchExecutorResource` -/// - `OutputHandlerResource` -/// - `ConditionalSealerResource` -/// - `PoolResource` -/// -/// ## Adds tasks -/// -/// - `RocksdbCatchupTask` -/// - `StateKeeperTask` #[derive(Debug)] pub struct StateKeeperLayer { state_keeper_db_path: String, rocksdb_options: RocksdbStorageOptions, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub state_keeper_io: StateKeeperIOResource, + pub batch_executor: BatchExecutorResource, + pub output_handler: OutputHandlerResource, + pub conditional_sealer: ConditionalSealerResource, + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub state_keeper: StateKeeperTask, + #[context(task)] + pub rocksdb_catchup: AsyncCatchupTask, + pub rocksdb_termination_hook: ShutdownHook, +} + impl StateKeeperLayer { pub fn new(state_keeper_db_path: String, rocksdb_options: RocksdbStorageOptions) -> Self { Self { @@ -60,56 +68,62 @@ impl StateKeeperLayer { #[async_trait::async_trait] impl WiringLayer for StateKeeperLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "state_keeper_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let io = context - .get_resource::()? + async fn wire(self, input: Self::Input) -> Result { + let io = input + .state_keeper_io .0 .take() .context("StateKeeperIO was provided but taken by some other task")?; - let batch_executor_base = context - .get_resource::()? + let batch_executor_base = input + .batch_executor .0 .take() .context("L1BatchExecutorBuilder was provided but taken by some other task")?; - let output_handler = context - .get_resource::()? + let output_handler = input + .output_handler .0 .take() .context("HandleStateKeeperOutput was provided but taken by another task")?; - let sealer = context.get_resource::()?.0; - let master_pool = context.get_resource::>()?; + let sealer = input.conditional_sealer.0; + let master_pool = input.master_pool; - let (storage_factory, task) = AsyncRocksdbCache::new( + let (storage_factory, rocksdb_catchup) = AsyncRocksdbCache::new( master_pool.get_custom(2).await?, self.state_keeper_db_path, self.rocksdb_options, ); - context.add_task(RocksdbCatchupTask(task)); - context.add_task(StateKeeperTask { + let state_keeper = StateKeeperTask { io, batch_executor_base, output_handler, sealer, storage_factory: Arc::new(storage_factory), - }); + }; - context.add_shutdown_hook("rocksdb_terminaton", async { + let rocksdb_termination_hook = ShutdownHook::new("rocksdb_terminaton", async { // Wait for all the instances of RocksDB to be destroyed. tokio::task::spawn_blocking(RocksDB::await_rocksdb_termination) .await .context("failed terminating RocksDB instances") }); - Ok(()) + Ok(Output { + state_keeper, + rocksdb_catchup, + rocksdb_termination_hook, + }) } } #[derive(Debug)] -struct StateKeeperTask { +pub struct StateKeeperTask { io: Box, batch_executor_base: Box, output_handler: OutputHandler, @@ -136,18 +150,17 @@ impl Task for StateKeeperTask { } } -#[derive(Debug)] -struct RocksdbCatchupTask(AsyncCatchupTask); - #[async_trait::async_trait] -impl Task for RocksdbCatchupTask { +impl Task for AsyncCatchupTask { + fn kind(&self) -> TaskKind { + TaskKind::OneshotTask + } + fn id(&self) -> TaskId { "state_keeper/rocksdb_catchup_task".into() } - async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.0.run(stop_receiver.0.clone()).await?; - stop_receiver.0.changed().await?; - Ok(()) + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs index d79ce9a5846f..f639d72fe40a 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs @@ -1,6 +1,7 @@ use anyhow::Context as _; +use zksync_node_framework_derive::FromContext; use zksync_state_keeper::{ - io::seal_logic::l2_block_seal_subtasks::L2BlockSealProcess, OutputHandler, + io::seal_logic::l2_block_seal_subtasks::L2BlockSealProcess, L2BlockSealerTask, OutputHandler, StateKeeperPersistence, TreeWritesPersistence, }; use zksync_types::Address; @@ -12,9 +13,10 @@ use crate::{ sync_state::SyncStateResource, }, resource::Unique, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; /// Wiring layer for the state keeper output handler. @@ -23,7 +25,6 @@ use crate::{ /// /// - `PoolResource` /// - `SyncStateResource` (optional) -/// - `AppHealthCheckResource` (adds a health check) /// /// ## Adds resources /// @@ -46,6 +47,21 @@ pub struct OutputHandlerLayer { protective_reads_persistence_enabled: bool, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub sync_state: Option, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub output_handler: OutputHandlerResource, + #[context(task)] + pub l2_block_sealer: L2BlockSealerTask, +} + impl OutputHandlerLayer { pub fn new(l2_shared_bridge_addr: Address, l2_block_seal_queue_capacity: usize) -> Self { Self { @@ -72,23 +88,18 @@ impl OutputHandlerLayer { #[async_trait::async_trait] impl WiringLayer for OutputHandlerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "state_keeper_output_handler_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Fetch required resources. - let master_pool = context.get_resource::>()?; - // Use `SyncState` if provided. - let sync_state = match context.get_resource::() { - Ok(sync_state) => Some(sync_state.0), - Err(WiringError::ResourceLacking { .. }) => None, - Err(err) => return Err(err), - }; - + async fn wire(self, input: Self::Input) -> Result { // Create L2 block sealer task and output handler. // L2 Block sealing process is parallelized, so we have to provide enough pooled connections. - let persistence_pool = master_pool + let persistence_pool = input + .master_pool .get_custom(L2BlockSealProcess::subtasks_len()) .await .context("Get master pool")?; @@ -110,19 +121,18 @@ impl WiringLayer for OutputHandlerLayer { let tree_writes_persistence = TreeWritesPersistence::new(persistence_pool); let mut output_handler = OutputHandler::new(Box::new(persistence)) .with_handler(Box::new(tree_writes_persistence)); - if let Some(sync_state) = sync_state { - output_handler = output_handler.with_handler(Box::new(sync_state)); + if let Some(sync_state) = input.sync_state { + output_handler = output_handler.with_handler(Box::new(sync_state.0)); } - context.insert_resource(OutputHandlerResource(Unique::new(output_handler)))?; - context.add_task(L2BlockSealerTask(l2_block_sealer)); + let output_handler = OutputHandlerResource(Unique::new(output_handler)); - Ok(()) + Ok(Output { + output_handler, + l2_block_sealer, + }) } } -#[derive(Debug)] -struct L2BlockSealerTask(zksync_state_keeper::L2BlockSealerTask); - #[async_trait::async_trait] impl Task for L2BlockSealerTask { fn id(&self) -> TaskId { @@ -131,6 +141,6 @@ impl Task for L2BlockSealerTask { async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { // Miniblock sealer will exit itself once sender is dropped. - self.0.run().await + (*self).run().await } } diff --git a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs index cca96f9ee079..1f86b43f7a5b 100644 --- a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs +++ b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs @@ -8,66 +8,74 @@ use crate::{ pools::{MasterPool, PoolResource}, sync_state::SyncStateResource, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for [`SyncState`] maintenance. /// If [`SyncStateResource`] is already provided by another layer, this layer does nothing. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `MainNodeClientResource` -/// -/// ## Adds resources -/// -/// - `SyncStateResource` -/// -/// ## Adds tasks -/// -/// - `SyncStateUpdater` #[derive(Debug)] pub struct SyncStateUpdaterLayer; +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + /// Fetched to check whether the `SyncState` was already provided by another layer. + pub sync_state: Option, + pub master_pool: PoolResource, + pub main_node_client: MainNodeClientResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub sync_state: Option, + #[context(task)] + pub sync_state_updater: Option, +} + #[async_trait::async_trait] impl WiringLayer for SyncStateUpdaterLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "sync_state_updater_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - if context.get_resource::().is_ok() { + async fn wire(self, input: Self::Input) -> Result { + if input.sync_state.is_some() { // `SyncState` was provided by some other layer -- we assume that the layer that added this resource // will be responsible for its maintenance. tracing::info!( "SyncState was provided by another layer, skipping SyncStateUpdaterLayer" ); - return Ok(()); + return Ok(Output { + sync_state: None, + sync_state_updater: None, + }); } - let pool = context.get_resource::>()?; - let MainNodeClientResource(main_node_client) = context.get_resource()?; + let connection_pool = input.master_pool.get().await?; + let MainNodeClientResource(main_node_client) = input.main_node_client; let sync_state = SyncState::default(); - // Insert resource. - context.insert_resource(SyncStateResource(sync_state.clone()))?; - - // Insert task - context.add_task(SyncStateUpdater { - sync_state, - connection_pool: pool.get().await?, - main_node_client, - }); - - Ok(()) + Ok(Output { + sync_state: Some(sync_state.clone().into()), + sync_state_updater: Some(SyncStateUpdater { + sync_state, + connection_pool, + main_node_client, + }), + }) } } #[derive(Debug)] -struct SyncStateUpdater { +pub struct SyncStateUpdater { sync_state: SyncState, connection_pool: ConnectionPool, main_node_client: Box>, diff --git a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs index dc03a0563709..68789082a226 100644 --- a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs +++ b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs @@ -7,20 +7,13 @@ use crate::{ object_store::ObjectStoreResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for [`TeeVerifierInputProducer`]. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// -/// ## Adds tasks -/// -/// - `TeeVerifierInputProducer` #[derive(Debug)] pub struct TeeVerifierInputProducerLayer { l2_chain_id: L2ChainId, @@ -32,25 +25,35 @@ impl TeeVerifierInputProducerLayer { } } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub object_store: ObjectStoreResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: TeeVerifierInputProducer, +} + #[async_trait::async_trait] impl WiringLayer for TeeVerifierInputProducerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "tee_verifier_input_producer_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - // Get resources. - let pool_resource = context - .get_resource::>()? - .get() - .await?; - let object_store = context.get_resource::()?; - let tee = - TeeVerifierInputProducer::new(pool_resource, object_store.0, self.l2_chain_id).await?; - - context.add_task(tee); + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + let ObjectStoreResource(object_store) = input.object_store; + let task = TeeVerifierInputProducer::new(pool, object_store, self.l2_chain_id).await?; - Ok(()) + Ok(Output { task }) } } diff --git a/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs b/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs index 76db94f1ac20..ca2e80142401 100644 --- a/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs +++ b/core/node/node_framework/src/implementations/layers/tree_data_fetcher.rs @@ -8,28 +8,35 @@ use crate::{ main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for [`TreeDataFetcher`]. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// - `MainNodeClientResource` -/// - `EthInterfaceResource` -/// - `AppHealthCheckResource` (adds a health check) -/// -/// ## Adds tasks -/// -/// - `TreeDataFetcher` #[derive(Debug)] pub struct TreeDataFetcherLayer { diamond_proxy_addr: Address, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub main_node_client: MainNodeClientResource, + pub eth_client: EthInterfaceResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: TreeDataFetcher, +} + impl TreeDataFetcherLayer { pub fn new(diamond_proxy_addr: Address) -> Self { Self { diamond_proxy_addr } @@ -38,32 +45,33 @@ impl TreeDataFetcherLayer { #[async_trait::async_trait] impl WiringLayer for TreeDataFetcherLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "tree_data_fetcher_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool = context.get_resource::>()?; - let MainNodeClientResource(client) = context.get_resource()?; - let EthInterfaceResource(eth_client) = context.get_resource()?; + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + let MainNodeClientResource(client) = input.main_node_client; + let EthInterfaceResource(eth_client) = input.eth_client; tracing::warn!( "Running tree data fetcher (allows a node to operate w/o a Merkle tree or w/o waiting the tree to catch up). \ This is an experimental feature; do not use unless you know what you're doing" ); - let fetcher = TreeDataFetcher::new(client, pool.get().await?) - .with_l1_data(eth_client, self.diamond_proxy_addr)?; + let task = + TreeDataFetcher::new(client, pool).with_l1_data(eth_client, self.diamond_proxy_addr)?; // Insert healthcheck - let AppHealthCheckResource(app_health) = context.get_resource_or_default(); - app_health - .insert_component(fetcher.health_check()) + input + .app_health + .0 + .insert_component(task.health_check()) .map_err(WiringError::internal)?; - // Insert task - context.add_task(fetcher); - - Ok(()) + Ok(Output { task }) } } diff --git a/core/node/node_framework/src/implementations/layers/validate_chain_ids.rs b/core/node/node_framework/src/implementations/layers/validate_chain_ids.rs index e3323a01b778..1e23bdfbd622 100644 --- a/core/node/node_framework/src/implementations/layers/validate_chain_ids.rs +++ b/core/node/node_framework/src/implementations/layers/validate_chain_ids.rs @@ -5,9 +5,10 @@ use crate::{ implementations::resources::{ eth_interface::EthInterfaceResource, main_node_client::MainNodeClientResource, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer for chain ID validation precondition for external node. @@ -27,6 +28,20 @@ pub struct ValidateChainIdsLayer { l2_chain_id: L2ChainId, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub eth_client: EthInterfaceResource, + pub main_node_client: MainNodeClientResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub task: ValidateChainIdsTask, +} + impl ValidateChainIdsLayer { pub fn new(l1_chain_id: L1ChainId, l2_chain_id: L2ChainId) -> Self { Self { @@ -38,13 +53,16 @@ impl ValidateChainIdsLayer { #[async_trait::async_trait] impl WiringLayer for ValidateChainIdsLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "validate_chain_ids_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let EthInterfaceResource(query_client) = context.get_resource()?; - let MainNodeClientResource(main_node_client) = context.get_resource()?; + async fn wire(self, input: Self::Input) -> Result { + let EthInterfaceResource(query_client) = input.eth_client; + let MainNodeClientResource(main_node_client) = input.main_node_client; let task = ValidateChainIdsTask::new( self.l1_chain_id, @@ -53,9 +71,7 @@ impl WiringLayer for ValidateChainIdsLayer { main_node_client, ); - context.add_task(task); - - Ok(()) + Ok(Output { task }) } } diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs index 36ad14b8db5a..74b4b5e32072 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs @@ -1,15 +1,19 @@ use zksync_config::configs::vm_runner::BasicWitnessInputProducerConfig; use zksync_types::L2ChainId; -use zksync_vm_runner::BasicWitnessInputProducer; +use zksync_vm_runner::{ + BasicWitnessInputProducer, BasicWitnessInputProducerIo, ConcurrentOutputHandlerFactoryTask, + StorageSyncTask, +}; use crate::{ implementations::resources::{ object_store::ObjectStoreResource, pools::{MasterPool, PoolResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; #[derive(Debug)] @@ -30,15 +34,39 @@ impl BasicWitnessInputProducerLayer { } } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + pub object_store: ObjectStoreResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub output_handler_factory_task: + ConcurrentOutputHandlerFactoryTask, + #[context(task)] + pub loader_task: StorageSyncTask, + #[context(task)] + pub basic_witness_input_producer: BasicWitnessInputProducer, +} + #[async_trait::async_trait] impl WiringLayer for BasicWitnessInputProducerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "vm_runner_bwip" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let master_pool = context.get_resource::>()?; - let object_store = context.get_resource::()?; + async fn wire(self, input: Self::Input) -> Result { + let Input { + master_pool, + object_store, + } = input; let (basic_witness_input_producer, tasks) = BasicWitnessInputProducer::new( // One for `StorageSyncTask` which can hold a long-term connection in case it needs to @@ -62,29 +90,21 @@ impl WiringLayer for BasicWitnessInputProducerLayer { ) .await?; - context.add_task(tasks.loader_task); - context.add_task(tasks.output_handler_factory_task); - context.add_task(BasicWitnessInputProducerTask { + Ok(Output { + output_handler_factory_task: tasks.output_handler_factory_task, + loader_task: tasks.loader_task, basic_witness_input_producer, - }); - Ok(()) + }) } } -#[derive(Debug)] -struct BasicWitnessInputProducerTask { - basic_witness_input_producer: BasicWitnessInputProducer, -} - #[async_trait::async_trait] -impl Task for BasicWitnessInputProducerTask { +impl Task for BasicWitnessInputProducer { fn id(&self) -> TaskId { "vm_runner/bwip".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.basic_witness_input_producer - .run(&stop_receiver.0) - .await + (*self).run(&stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs b/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs index 0b3f611038b2..91e92ffcd1ba 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs @@ -14,10 +14,8 @@ impl Task for StorageSyncTask { format!("vm_runner/{}/storage_sync", self.io().name()).into() } - async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - StorageSyncTask::run(*self, stop_receiver.0.clone()).await?; - stop_receiver.0.changed().await?; - Ok(()) + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await } } @@ -27,9 +25,7 @@ impl Task for ConcurrentOutputHandlerFactoryTask { format!("vm_runner/{}/output_handler", self.io().name()).into() } - async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { - ConcurrentOutputHandlerFactoryTask::run(*self, stop_receiver.0.clone()).await?; - stop_receiver.0.changed().await?; - Ok(()) + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs b/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs index 6e33cca538fd..3b07d0cea139 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs @@ -1,31 +1,42 @@ use zksync_config::configs::vm_runner::ProtectiveReadsWriterConfig; +use zksync_node_framework_derive::FromContext; use zksync_types::L2ChainId; -use zksync_vm_runner::ProtectiveReadsWriter; +use zksync_vm_runner::{ + ConcurrentOutputHandlerFactoryTask, ProtectiveReadsIo, ProtectiveReadsWriter, StorageSyncTask, +}; use crate::{ implementations::resources::pools::{MasterPool, PoolResource}, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; /// Wiring layer for protective reads writer. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// -/// ## Adds tasks -/// -/// - `StorageSyncTask` -/// - `ConcurrentOutputHandlerFactoryTask` -/// - `ProtectiveReadsWriterTask` #[derive(Debug)] pub struct ProtectiveReadsWriterLayer { protective_reads_writer_config: ProtectiveReadsWriterConfig, zksync_network_id: L2ChainId, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub protective_reads_writer: ProtectiveReadsWriter, + #[context(task)] + pub loader_task: StorageSyncTask, + #[context(task)] + pub output_handler_factory_task: ConcurrentOutputHandlerFactoryTask, +} + impl ProtectiveReadsWriterLayer { pub fn new( protective_reads_writer_config: ProtectiveReadsWriterConfig, @@ -40,12 +51,15 @@ impl ProtectiveReadsWriterLayer { #[async_trait::async_trait] impl WiringLayer for ProtectiveReadsWriterLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "vm_runner_protective_reads" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let master_pool = context.get_resource::>()?; + async fn wire(self, input: Self::Input) -> Result { + let master_pool = input.master_pool; let (protective_reads_writer, tasks) = ProtectiveReadsWriter::new( // One for `StorageSyncTask` which can hold a long-term connection in case it needs to @@ -67,27 +81,21 @@ impl WiringLayer for ProtectiveReadsWriterLayer { ) .await?; - context.add_task(tasks.loader_task); - context.add_task(tasks.output_handler_factory_task); - context.add_task(ProtectiveReadsWriterTask { + Ok(Output { protective_reads_writer, - }); - Ok(()) + loader_task: tasks.loader_task, + output_handler_factory_task: tasks.output_handler_factory_task, + }) } } -#[derive(Debug)] -struct ProtectiveReadsWriterTask { - protective_reads_writer: ProtectiveReadsWriter, -} - #[async_trait::async_trait] -impl Task for ProtectiveReadsWriterTask { +impl Task for ProtectiveReadsWriter { fn id(&self) -> TaskId { "vm_runner/protective_reads_writer".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.protective_reads_writer.run(&stop_receiver.0).await + (*self).run(&stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/caches.rs b/core/node/node_framework/src/implementations/layers/web3_api/caches.rs index 805e7c91eaeb..b7718a41fab0 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/caches.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/caches.rs @@ -1,36 +1,40 @@ use std::time::Duration; -use zksync_node_api_server::web3::mempool_cache::{self, MempoolCache}; +use zksync_node_api_server::web3::mempool_cache::{MempoolCache, MempoolCacheUpdateTask}; +use zksync_node_framework_derive::FromContext; use crate::{ implementations::resources::{ pools::{PoolResource, ReplicaPool}, web3_api::MempoolCacheResource, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + IntoContext, }; /// Wiring layer for API mempool cache. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// -/// ## Adds resources -/// -/// - `MempoolCacheResource` -/// -/// ## Adds tasks -/// -/// - `MempoolCacheUpdateTask` #[derive(Debug)] pub struct MempoolCacheLayer { capacity: usize, update_interval: Duration, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub replica_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub mempool_cache: MempoolCacheResource, + #[context(task)] + pub update_task: MempoolCacheUpdateTask, +} + impl MempoolCacheLayer { pub fn new(capacity: usize, update_interval: Duration) -> Self { Self { @@ -42,24 +46,24 @@ impl MempoolCacheLayer { #[async_trait::async_trait] impl WiringLayer for MempoolCacheLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "mempool_cache_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let pool_resource = context.get_resource::>()?; - let replica_pool = pool_resource.get().await?; + async fn wire(self, input: Self::Input) -> Result { + let replica_pool = input.replica_pool.get().await?; let mempool_cache = MempoolCache::new(self.capacity); let update_task = mempool_cache.update_task(replica_pool, self.update_interval); - context.add_task(MempoolCacheUpdateTask(update_task)); - context.insert_resource(MempoolCacheResource(mempool_cache))?; - Ok(()) + Ok(Output { + mempool_cache: mempool_cache.into(), + update_task, + }) } } -#[derive(Debug)] -pub struct MempoolCacheUpdateTask(mempool_cache::MempoolCacheUpdateTask); - #[async_trait::async_trait] impl Task for MempoolCacheUpdateTask { fn id(&self) -> TaskId { @@ -67,6 +71,6 @@ impl Task for MempoolCacheUpdateTask { } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.0.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server.rs index 365f49c1122c..8b35e13827be 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server.rs @@ -13,9 +13,10 @@ use crate::{ sync_state::SyncStateResource, web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Set of optional variables that can be altered to modify the behavior of API builder. @@ -92,6 +93,29 @@ pub struct Web3ServerLayer { optional_config: Web3ServerOptionalConfig, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub replica_pool: PoolResource, + pub tx_sender: TxSenderResource, + pub sync_state: Option, + pub tree_api_client: Option, + pub mempool_cache: MempoolCacheResource, + #[context(default)] + pub circuit_breakers: CircuitBreakersResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub web3_api_task: Web3ApiTask, + #[context(task)] + pub garbage_collector_task: ApiTaskGarbageCollector, +} + impl Web3ServerLayer { pub fn http( port: u16, @@ -122,6 +146,9 @@ impl Web3ServerLayer { #[async_trait::async_trait] impl WiringLayer for Web3ServerLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { match self.transport { Transport::Http => "web3_http_server_layer", @@ -129,23 +156,15 @@ impl WiringLayer for Web3ServerLayer { } } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { // Get required resources. - let replica_resource_pool = context.get_resource::>()?; + let replica_resource_pool = input.replica_pool; let updaters_pool = replica_resource_pool.get_custom(2).await?; let replica_pool = replica_resource_pool.get().await?; - let tx_sender = context.get_resource::()?.0; - let sync_state = match context.get_resource::() { - Ok(sync_state) => Some(sync_state.0), - Err(WiringError::ResourceLacking { .. }) => None, - Err(err) => return Err(err), - }; - let tree_api_client = match context.get_resource::() { - Ok(client) => Some(client.0), - Err(WiringError::ResourceLacking { .. }) => None, - Err(err) => return Err(err), - }; - let MempoolCacheResource(mempool_cache) = context.get_resource()?; + let TxSenderResource(tx_sender) = input.tx_sender; + let MempoolCacheResource(mempool_cache) = input.mempool_cache; + let sync_state = input.sync_state.map(|state| state.0); + let tree_api_client = input.tree_api_client.map(|client| client.0); // Build server. let mut api_builder = @@ -180,14 +199,15 @@ impl WiringLayer for Web3ServerLayer { // Insert healthcheck. let api_health_check = server.health_check(); - let AppHealthCheckResource(app_health) = context.get_resource_or_default(); - app_health + input + .app_health + .0 .insert_component(api_health_check) .map_err(WiringError::internal)?; // Insert circuit breaker. - let circuit_breaker_resource = context.get_resource_or_default::(); - circuit_breaker_resource + input + .circuit_breakers .breakers .insert(Box::new(ReplicationLagChecker { pool: replica_pool, @@ -203,10 +223,10 @@ impl WiringLayer for Web3ServerLayer { task_sender, }; let garbage_collector_task = ApiTaskGarbageCollector { task_receiver }; - context.add_task(web3_api_task); - context.add_task(garbage_collector_task); - - Ok(()) + Ok(Output { + web3_api_task, + garbage_collector_task, + }) } } @@ -221,7 +241,7 @@ impl WiringLayer for Web3ServerLayer { // TODO (QIT-26): Once we switch the codebase to only use the framework, we need to properly refactor the API to only // use abstractions provided by this framework and not spawn any tasks on its own. #[derive(Debug)] -struct Web3ApiTask { +pub struct Web3ApiTask { transport: Transport, server: ApiServer, task_sender: oneshot::Sender>, @@ -251,7 +271,7 @@ impl Task for Web3ApiTask { /// Helper task that waits for a list of task join handles and then awaits them all. /// For more details, see [`Web3ApiTask`]. #[derive(Debug)] -struct ApiTaskGarbageCollector { +pub struct ApiTaskGarbageCollector { task_receiver: oneshot::Receiver>, } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tree_api_client.rs b/core/node/node_framework/src/implementations/layers/web3_api/tree_api_client.rs index b481e1ea25d7..07371a65131e 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tree_api_client.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tree_api_client.rs @@ -6,27 +6,34 @@ use crate::{ implementations::resources::{ healthcheck::AppHealthCheckResource, web3_api::TreeApiClientResource, }, - service::ServiceContext, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; /// Wiring layer that provides the `TreeApiHttpClient` into the `ServiceContext` resources, if there is no /// other client already inserted. /// /// In case a client is already provided in the context, this layer does nothing. -/// -/// ## Requests resources -/// -/// - `AppHealthCheckResource` (adds a health check) -/// -/// ## Adds resources -/// -/// - `TreeApiClientResource` (if no such resource already exists) #[derive(Debug)] pub struct TreeApiClientLayer { url: Option, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + /// Fetched to check whether the `TreeApiClientResource` was already provided by another layer. + pub tree_api_client: Option, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub tree_api_client: Option, +} + impl TreeApiClientLayer { pub fn http(url: Option) -> Self { Self { url } @@ -35,33 +42,36 @@ impl TreeApiClientLayer { #[async_trait::async_trait] impl WiringLayer for TreeApiClientLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "tree_api_client_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - if let Some(url) = &self.url { - let client = Arc::new(TreeApiHttpClient::new(url)); - match context.insert_resource(TreeApiClientResource(client.clone())) { - Ok(()) => { - // There was no client added before, we added one. - } - Err(WiringError::ResourceAlreadyProvided { .. }) => { - // Some other client was already added. We don't want to replace it. - return Ok(()); - } - err @ Err(_) => { - // Propagate any other error. - return err; - } - } - - // Only provide the health check if necessary. - let AppHealthCheckResource(app_health) = context.get_resource_or_default(); - app_health - .insert_custom_component(client) - .map_err(WiringError::internal)?; + async fn wire(self, input: Self::Input) -> Result { + if input.tree_api_client.is_some() { + tracing::info!("Tree API client is already provided"); + return Ok(Output { + tree_api_client: None, + }); } - Ok(()) + + let Some(url) = &self.url else { + tracing::info!("No Tree API client URL provided, not adding a fallback client"); + return Ok(Output { + tree_api_client: None, + }); + }; + + let client = Arc::new(TreeApiHttpClient::new(url)); + input + .app_health + .0 + .insert_custom_component(client.clone()) + .map_err(WiringError::internal)?; + Ok(Output { + tree_api_client: Some(client.into()), + }) } } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index 0b45b3279680..4ece9b024300 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -1,11 +1,11 @@ -use std::{fmt, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use tokio::sync::RwLock; use zksync_node_api_server::{ execution_sandbox::{VmConcurrencyBarrier, VmConcurrencyLimiter}, tx_sender::{ApiContracts, TxSenderBuilder, TxSenderConfig}, }; -use zksync_state::PostgresStorageCaches; +use zksync_state::{PostgresStorageCaches, PostgresStorageCachesTask}; use zksync_types::Address; use zksync_web3_decl::{ client::{DynClient, L2}, @@ -21,9 +21,10 @@ use crate::{ state_keeper::ConditionalSealerResource, web3_api::{TxSenderResource, TxSinkResource}, }, - service::{ServiceContext, StopReceiver}, + service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, }; #[derive(Debug)] @@ -61,6 +62,28 @@ pub struct TxSenderLayer { whitelisted_tokens_for_aa_cache: bool, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub tx_sink: TxSinkResource, + pub replica_pool: PoolResource, + pub fee_input: FeeInputResource, + pub main_node_client: Option, + pub sealer: Option, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub tx_sender: TxSenderResource, + #[context(task)] + pub vm_concurrency_barrier: VmConcurrencyBarrier, + #[context(task)] + pub postgres_storage_caches_task: Option, + #[context(task)] + pub whitelisted_tokens_for_aa_update_task: Option, +} + impl TxSenderLayer { pub fn new( tx_sender_config: TxSenderConfig, @@ -89,21 +112,19 @@ impl TxSenderLayer { #[async_trait::async_trait] impl WiringLayer for TxSenderLayer { + type Input = Input; + type Output = Output; + fn layer_name(&self) -> &'static str { "tx_sender_layer" } - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, input: Self::Input) -> Result { // Get required resources. - let tx_sink = context.get_resource::()?.0; - let pool_resource = context.get_resource::>()?; - let replica_pool = pool_resource.get().await?; - let sealer = match context.get_resource::() { - Ok(sealer) => Some(sealer.0), - Err(WiringError::ResourceLacking { .. }) => None, - Err(other) => return Err(other), - }; - let fee_input = context.get_resource::()?.0; + let tx_sink = input.tx_sink.0; + let replica_pool = input.replica_pool.get().await?; + let sealer = input.sealer.map(|s| s.0); + let fee_input = input.fee_input.0; // Initialize Postgres caches. let factory_deps_capacity = self.postgres_storage_caches_config.factory_deps_cache_size; @@ -114,20 +135,18 @@ impl WiringLayer for TxSenderLayer { let mut storage_caches = PostgresStorageCaches::new(factory_deps_capacity, initial_writes_capacity); - if values_capacity > 0 { - let values_cache_task = storage_caches - .configure_storage_values_cache(values_capacity, replica_pool.clone()); - context.add_task(PostgresStorageCachesTask { - task: values_cache_task, - }); - } + let postgres_storage_caches_task = if values_capacity > 0 { + Some( + storage_caches + .configure_storage_values_cache(values_capacity, replica_pool.clone()), + ) + } else { + None + }; // Initialize `VmConcurrencyLimiter`. let (vm_concurrency_limiter, vm_concurrency_barrier) = VmConcurrencyLimiter::new(self.max_vm_concurrency); - context.add_task(VmConcurrencyBarrierTask { - barrier: vm_concurrency_barrier, - }); // Build `TxSender`. let mut tx_sender = TxSenderBuilder::new(self.tx_sender_config, replica_pool, tx_sink); @@ -136,15 +155,23 @@ impl WiringLayer for TxSenderLayer { } // Add the task for updating the whitelisted tokens for the AA cache. - if self.whitelisted_tokens_for_aa_cache { - let MainNodeClientResource(main_node_client) = context.get_resource()?; + let whitelisted_tokens_for_aa_update_task = if self.whitelisted_tokens_for_aa_cache { + let MainNodeClientResource(main_node_client) = + input.main_node_client.ok_or_else(|| { + WiringError::Configuration( + "Main node client is required for the whitelisted tokens for AA cache" + .into(), + ) + })?; let whitelisted_tokens = Arc::new(RwLock::new(Default::default())); - context.add_task(WhitelistedTokensForAaUpdateTask { + tx_sender = tx_sender.with_whitelisted_tokens_for_aa(whitelisted_tokens.clone()); + Some(WhitelistedTokensForAaUpdateTask { whitelisted_tokens: whitelisted_tokens.clone(), main_node_client, - }); - tx_sender = tx_sender.with_whitelisted_tokens_for_aa(whitelisted_tokens); - } + }) + } else { + None + }; let tx_sender = tx_sender.build( fee_input, @@ -152,20 +179,13 @@ impl WiringLayer for TxSenderLayer { self.api_contracts, storage_caches, ); - context.insert_resource(TxSenderResource(tx_sender))?; - Ok(()) - } -} - -struct PostgresStorageCachesTask { - task: zksync_state::PostgresStorageCachesTask, -} - -impl fmt::Debug for PostgresStorageCachesTask { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PostgresStorageCachesTask") - .finish_non_exhaustive() + Ok(Output { + tx_sender: tx_sender.into(), + postgres_storage_caches_task, + vm_concurrency_barrier, + whitelisted_tokens_for_aa_update_task, + }) } } @@ -176,16 +196,12 @@ impl Task for PostgresStorageCachesTask { } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.task.run(stop_receiver.0).await + (*self).run(stop_receiver.0).await } } -struct VmConcurrencyBarrierTask { - barrier: VmConcurrencyBarrier, -} - #[async_trait::async_trait] -impl Task for VmConcurrencyBarrierTask { +impl Task for VmConcurrencyBarrier { fn id(&self) -> TaskId { "vm_concurrency_barrier_task".into() } @@ -194,18 +210,18 @@ impl Task for VmConcurrencyBarrierTask { // Wait for the stop signal. stop_receiver.0.changed().await?; // Stop signal was received: seal the barrier so that no new VM requests are accepted. - self.barrier.close(); + self.close(); // Wait until all the existing API requests are processed. // We don't have to synchronize this with API servers being stopped, as they can decide themselves how to handle // ongoing requests during the shutdown. // We don't have to implement a timeout here either, as it'll be handled by the framework itself. - self.barrier.wait_until_stopped().await; + self.wait_until_stopped().await; Ok(()) } } #[derive(Debug)] -struct WhitelistedTokensForAaUpdateTask { +pub struct WhitelistedTokensForAaUpdateTask { whitelisted_tokens: Arc>>, main_node_client: Box>, } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sink.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink.rs deleted file mode 100644 index f7530f835765..000000000000 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sink.rs +++ /dev/null @@ -1,82 +0,0 @@ -use std::sync::Arc; - -use zksync_node_api_server::tx_sender::{ - master_pool_sink::MasterPoolSink, - proxy::{AccountNonceSweeperTask, TxProxy}, -}; - -use crate::{ - implementations::resources::{ - main_node_client::MainNodeClientResource, - pools::{MasterPool, PoolResource}, - web3_api::TxSinkResource, - }, - service::{ServiceContext, StopReceiver}, - task::{Task, TaskId}, - wiring_layer::{WiringError, WiringLayer}, -}; - -/// Wiring layer for `TxSink` -- an abstraction that handles outputs from `TxSender`. -/// -/// ## Requests resources -/// -/// - `PoolResource` -/// -/// ## Adds resources -/// -/// - `TxSinkResource` -/// -/// ## Adds tasks -/// -/// - `AccountNonceSweeperTask` (only for `ProxySink`) -#[derive(Debug)] -#[non_exhaustive] -pub enum TxSinkLayer { - MasterPoolSink, - ProxySink, -} - -#[async_trait::async_trait] -impl WiringLayer for TxSinkLayer { - fn layer_name(&self) -> &'static str { - "tx_sink_layer" - } - - async fn wire(self: Box, mut context: ServiceContext<'_>) -> Result<(), WiringError> { - let tx_sink = match self.as_ref() { - TxSinkLayer::MasterPoolSink => { - let pool = context - .get_resource::>()? - .get() - .await?; - TxSinkResource(Arc::new(MasterPoolSink::new(pool))) - } - TxSinkLayer::ProxySink => { - let MainNodeClientResource(client) = context.get_resource()?; - let proxy = TxProxy::new(client); - - let pool = context - .get_resource::>()? - .get_singleton() - .await?; - let task = proxy.account_nonce_sweeper_task(pool); - context.add_task(task); - - TxSinkResource(Arc::new(proxy)) - } - }; - context.insert_resource(tx_sink)?; - Ok(()) - } -} - -#[async_trait::async_trait] -impl Task for AccountNonceSweeperTask { - fn id(&self) -> TaskId { - "account_nonce_sweeper_task".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/master_pool_sink.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/master_pool_sink.rs new file mode 100644 index 000000000000..79951a95ab1b --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/master_pool_sink.rs @@ -0,0 +1,42 @@ +use zksync_node_api_server::tx_sender::master_pool_sink::MasterPoolSink; + +use crate::{ + implementations::resources::{ + pools::{MasterPool, PoolResource}, + web3_api::TxSinkResource, + }, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for [`MasterPoolSink`], [`TxSink`](zksync_node_api_server::tx_sender::tx_sink::TxSink) implementation. +pub struct MasterPoolSinkLayer; + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub tx_sink: TxSinkResource, +} + +#[async_trait::async_trait] +impl WiringLayer for MasterPoolSinkLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "master_pook_sink_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get().await?; + Ok(Output { + tx_sink: MasterPoolSink::new(pool).into(), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/mod.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/mod.rs new file mode 100644 index 000000000000..61b9fb1d9e9e --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/mod.rs @@ -0,0 +1,4 @@ +pub use self::{master_pool_sink::MasterPoolSinkLayer, proxy_sink::ProxySinkLayer}; + +pub mod master_pool_sink; +pub mod proxy_sink; diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/proxy_sink.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/proxy_sink.rs new file mode 100644 index 000000000000..4340dbdb3f43 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sink/proxy_sink.rs @@ -0,0 +1,66 @@ +use zksync_node_api_server::tx_sender::proxy::{AccountNonceSweeperTask, TxProxy}; + +use crate::{ + implementations::resources::{ + main_node_client::MainNodeClientResource, + pools::{MasterPool, PoolResource}, + web3_api::TxSinkResource, + }, + service::StopReceiver, + task::{Task, TaskId}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for [`TxProxy`], [`TxSink`](zksync_node_api_server::tx_sender::tx_sink::TxSink) implementation. +#[derive(Debug)] +pub struct ProxySinkLayer; + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub main_node_client: MainNodeClientResource, + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub tx_sink: TxSinkResource, + #[context(task)] + pub account_nonce_sweeper_task: AccountNonceSweeperTask, +} + +#[async_trait::async_trait] +impl WiringLayer for ProxySinkLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "proxy_sink_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let MainNodeClientResource(client) = input.main_node_client; + let proxy = TxProxy::new(client); + + let pool = input.master_pool.get_singleton().await?; + let task = proxy.account_nonce_sweeper_task(pool); + + Ok(Output { + tx_sink: proxy.into(), + account_nonce_sweeper_task: task, + }) + } +} + +#[async_trait::async_trait] +impl Task for AccountNonceSweeperTask { + fn id(&self) -> TaskId { + "account_nonce_sweeper_task".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/resources/action_queue.rs b/core/node/node_framework/src/implementations/resources/action_queue.rs index b0f70828018b..7edb8bad3111 100644 --- a/core/node/node_framework/src/implementations/resources/action_queue.rs +++ b/core/node/node_framework/src/implementations/resources/action_queue.rs @@ -12,3 +12,9 @@ impl Resource for ActionQueueSenderResource { "external_node/action_queue_sender".into() } } + +impl From for ActionQueueSenderResource { + fn from(sender: ActionQueueSender) -> Self { + Self(Unique::new(sender)) + } +} diff --git a/core/node/node_framework/src/implementations/resources/base_token_ratio_provider.rs b/core/node/node_framework/src/implementations/resources/base_token_ratio_provider.rs index 9cb43870f76c..6699d5dfc70b 100644 --- a/core/node/node_framework/src/implementations/resources/base_token_ratio_provider.rs +++ b/core/node/node_framework/src/implementations/resources/base_token_ratio_provider.rs @@ -5,7 +5,7 @@ use zksync_base_token_adjuster::{BaseTokenRatioProvider, NoOpRatioProvider}; use crate::resource::Resource; /// A resource that provides [`BaseTokenRatioProvider`] implementation to the service. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct BaseTokenRatioProviderResource(pub Arc); impl Default for BaseTokenRatioProviderResource { @@ -19,3 +19,9 @@ impl Resource for BaseTokenRatioProviderResource { "common/base_token_ratio_provider".into() } } + +impl From> for BaseTokenRatioProviderResource { + fn from(provider: Arc) -> Self { + Self(provider) + } +} diff --git a/core/node/node_framework/src/implementations/resources/da_client.rs b/core/node/node_framework/src/implementations/resources/da_client.rs index 525164cb9b10..51aba6d19d4e 100644 --- a/core/node/node_framework/src/implementations/resources/da_client.rs +++ b/core/node/node_framework/src/implementations/resources/da_client.rs @@ -3,7 +3,7 @@ use zksync_da_client::DataAvailabilityClient; use crate::resource::Resource; /// Represents a client of a certain DA solution. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct DAClientResource(pub Box); impl Resource for DAClientResource { diff --git a/core/node/node_framework/src/implementations/resources/fee_input.rs b/core/node/node_framework/src/implementations/resources/fee_input.rs index e3204510c58e..10271977bac7 100644 --- a/core/node/node_framework/src/implementations/resources/fee_input.rs +++ b/core/node/node_framework/src/implementations/resources/fee_input.rs @@ -13,3 +13,9 @@ impl Resource for FeeInputResource { "common/fee_input".into() } } + +impl From> for FeeInputResource { + fn from(provider: Arc) -> Self { + Self(provider) + } +} diff --git a/core/node/node_framework/src/implementations/resources/l1_tx_params.rs b/core/node/node_framework/src/implementations/resources/l1_tx_params.rs index 8fd962480b9d..676828c39885 100644 --- a/core/node/node_framework/src/implementations/resources/l1_tx_params.rs +++ b/core/node/node_framework/src/implementations/resources/l1_tx_params.rs @@ -13,3 +13,9 @@ impl Resource for L1TxParamsResource { "common/l1_tx_params".into() } } + +impl From> for L1TxParamsResource { + fn from(provider: Arc) -> Self { + Self(provider) + } +} diff --git a/core/node/node_framework/src/implementations/resources/main_node_client.rs b/core/node/node_framework/src/implementations/resources/main_node_client.rs index 64a0ac85bef6..491d39726ea9 100644 --- a/core/node/node_framework/src/implementations/resources/main_node_client.rs +++ b/core/node/node_framework/src/implementations/resources/main_node_client.rs @@ -11,3 +11,9 @@ impl Resource for MainNodeClientResource { "external_node/main_node_client".into() } } + +impl>>> From for MainNodeClientResource { + fn from(client: T) -> Self { + Self(client.into()) + } +} diff --git a/core/node/node_framework/src/implementations/resources/state_keeper.rs b/core/node/node_framework/src/implementations/resources/state_keeper.rs index 860332f26293..5db570d7989b 100644 --- a/core/node/node_framework/src/implementations/resources/state_keeper.rs +++ b/core/node/node_framework/src/implementations/resources/state_keeper.rs @@ -17,6 +17,12 @@ impl Resource for StateKeeperIOResource { } } +impl From for StateKeeperIOResource { + fn from(io: T) -> Self { + Self(Unique::new(Box::new(io))) + } +} + /// A resource that provides [`BatchExecutor`] implementation to the service. /// This resource is unique, e.g. it's expected to be consumed by a single service. #[derive(Debug, Clone)] @@ -28,6 +34,12 @@ impl Resource for BatchExecutorResource { } } +impl From for BatchExecutorResource { + fn from(executor: T) -> Self { + Self(Unique::new(Box::new(executor))) + } +} + /// A resource that provides [`OutputHandler`] implementation to the service. /// This resource is unique, e.g. it's expected to be consumed by a single service. #[derive(Debug, Clone)] @@ -39,6 +51,12 @@ impl Resource for OutputHandlerResource { } } +impl From for OutputHandlerResource { + fn from(handler: OutputHandler) -> Self { + Self(Unique::new(handler)) + } +} + /// A resource that provides [`ConditionalSealer`] implementation to the service. #[derive(Debug, Clone)] pub struct ConditionalSealerResource(pub Arc); @@ -48,3 +66,12 @@ impl Resource for ConditionalSealerResource { "state_keeper/conditional_sealer".into() } } + +impl From for ConditionalSealerResource +where + T: ConditionalSealer + 'static, +{ + fn from(sealer: T) -> Self { + Self(Arc::new(sealer)) + } +} diff --git a/core/node/node_framework/src/implementations/resources/sync_state.rs b/core/node/node_framework/src/implementations/resources/sync_state.rs index a65342dd38d6..d2854d187672 100644 --- a/core/node/node_framework/src/implementations/resources/sync_state.rs +++ b/core/node/node_framework/src/implementations/resources/sync_state.rs @@ -11,3 +11,9 @@ impl Resource for SyncStateResource { "common/sync_state".into() } } + +impl From for SyncStateResource { + fn from(sync_state: SyncState) -> Self { + Self(sync_state) + } +} diff --git a/core/node/node_framework/src/implementations/resources/web3_api.rs b/core/node/node_framework/src/implementations/resources/web3_api.rs index 9b3716721261..78340884a1b4 100644 --- a/core/node/node_framework/src/implementations/resources/web3_api.rs +++ b/core/node/node_framework/src/implementations/resources/web3_api.rs @@ -18,6 +18,12 @@ impl Resource for TxSenderResource { } } +impl From for TxSenderResource { + fn from(sender: TxSender) -> Self { + Self(sender) + } +} + /// A resource that provides [`TxSink`] implementation to the service. #[derive(Debug, Clone)] pub struct TxSinkResource(pub Arc); @@ -28,6 +34,12 @@ impl Resource for TxSinkResource { } } +impl From for TxSinkResource { + fn from(sink: T) -> Self { + Self(Arc::new(sink)) + } +} + /// A resource that provides [`TreeApiClient`] implementation to the service. #[derive(Debug, Clone)] pub struct TreeApiClientResource(pub Arc); @@ -38,6 +50,12 @@ impl Resource for TreeApiClientResource { } } +impl From> for TreeApiClientResource { + fn from(client: Arc) -> Self { + Self(client) + } +} + /// A resource that provides [`MempoolCache`] to the service. #[derive(Debug, Clone)] pub struct MempoolCacheResource(pub MempoolCache); @@ -47,3 +65,9 @@ impl Resource for MempoolCacheResource { "api/mempool_cache".into() } } + +impl From for MempoolCacheResource { + fn from(cache: MempoolCache) -> Self { + Self(cache) + } +} diff --git a/core/node/node_framework/src/service/context.rs b/core/node/node_framework/src/service/context.rs index 0280bb1c8927..8197fdfa9d7f 100644 --- a/core/node/node_framework/src/service/context.rs +++ b/core/node/node_framework/src/service/context.rs @@ -1,7 +1,6 @@ -use std::{any::type_name, future::Future}; - -use futures::FutureExt as _; +use std::any::type_name; +use super::shutdown_hook::ShutdownHook; use crate::{ resource::{Resource, ResourceId, StoredResource}, service::{named_future::NamedFuture, ZkStackService}, @@ -63,20 +62,16 @@ impl<'a> ServiceContext<'a> { /// /// The future is guaranteed to only be polled after all the node tasks are stopped or timed out. /// All the futures will be awaited sequentially. - pub fn add_shutdown_hook( - &mut self, - name: &'static str, - hook: impl Future> + Send + 'static, - ) -> &mut Self { + pub fn add_shutdown_hook(&mut self, hook: ShutdownHook) -> &mut Self { tracing::info!( "Layer {} has added a new shutdown hook: {}", self.layer, - name + hook.id ); self.service .runnables .shutdown_hooks - .push(NamedFuture::new(hook.boxed(), name.into())); + .push(NamedFuture::new(hook.future, hook.id)); self } diff --git a/core/node/node_framework/src/service/mod.rs b/core/node/node_framework/src/service/mod.rs index 2744c08ceba6..22102a60efb7 100644 --- a/core/node/node_framework/src/service/mod.rs +++ b/core/node/node_framework/src/service/mod.rs @@ -9,6 +9,7 @@ pub use self::{ context::ServiceContext, context_traits::{FromContext, IntoContext}, error::ZkStackServiceError, + shutdown_hook::ShutdownHook, stop_receiver::StopReceiver, }; use crate::{ @@ -18,7 +19,7 @@ use crate::{ runnables::{NamedBoxFuture, Runnables, TaskReprs}, }, task::TaskId, - wiring_layer::{WiringError, WiringLayer}, + wiring_layer::{WireFn, WiringError, WiringLayer, WiringLayerExt}, }; mod context; @@ -26,6 +27,7 @@ mod context_traits; mod error; mod named_future; mod runnables; +mod shutdown_hook; mod stop_receiver; #[cfg(test)] mod tests; @@ -37,7 +39,9 @@ const TASK_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(30); #[derive(Default, Debug)] pub struct ZkStackServiceBuilder { /// List of wiring layers. - layers: Vec>, + // Note: It has to be a `Vec` and not e.g. `HashMap` because the order in which we + // iterate through it matters. + layers: Vec<(&'static str, WireFn)>, } impl ZkStackServiceBuilder { @@ -55,12 +59,13 @@ impl ZkStackServiceBuilder { /// This may be useful if the same layer is a prerequisite for multiple other layers: it is safe /// to add it multiple times, and it will only be wired once. pub fn add_layer(&mut self, layer: T) -> &mut Self { + let name = layer.layer_name(); if !self .layers .iter() - .any(|existing_layer| existing_layer.layer_name() == layer.layer_name()) + .any(|(existing_name, _)| name == *existing_name) { - self.layers.push(Box::new(layer)); + self.layers.push((name, layer.into_wire_fn())); } self } @@ -98,7 +103,7 @@ pub struct ZkStackService { /// Cache of resources that have been requested at least by one task. resources: HashMap>, /// List of wiring layers. - layers: Vec>, + layers: Vec<(&'static str, WireFn)>, /// Different kinds of tasks for the service. runnables: Runnables, @@ -144,15 +149,15 @@ impl ZkStackService { let mut errors: Vec<(String, WiringError)> = Vec::new(); let runtime_handle = self.runtime.handle().clone(); - for layer in wiring_layers { - let name = layer.layer_name().to_string(); + for (name, WireFn(wire_fn)) in wiring_layers { // We must process wiring layers sequentially and in the same order as they were added. - let task_result = runtime_handle.block_on(layer.wire(ServiceContext::new(&name, self))); + let mut context = ServiceContext::new(name, self); + let task_result = wire_fn(&runtime_handle, &mut context); if let Err(err) = task_result { // We don't want to bail on the first error, since it'll provide worse DevEx: // People likely want to fix as much problems as they can in one go, rather than have // to fix them one by one. - errors.push((name, err)); + errors.push((name.to_string(), err)); continue; }; } diff --git a/core/node/node_framework/src/service/shutdown_hook.rs b/core/node/node_framework/src/service/shutdown_hook.rs new file mode 100644 index 000000000000..caeb26809bde --- /dev/null +++ b/core/node/node_framework/src/service/shutdown_hook.rs @@ -0,0 +1,47 @@ +use std::{fmt, future::Future}; + +use futures::{future::BoxFuture, FutureExt}; + +use crate::{IntoContext, TaskId}; + +/// A named future that will be invoked after all the tasks are stopped. +/// The future is expected to perform a cleanup or a shutdown of the service. +/// +/// All the shutdown hooks will be executed sequentially, so they may assume that +/// no other tasks are running at the moment of execution on the same node. However, +/// an unique access to the database is not guaranteed, since the node may run in a +/// distributed mode, so this should not be used for potentially destructive actions. +pub struct ShutdownHook { + pub(crate) id: TaskId, + pub(crate) future: BoxFuture<'static, anyhow::Result<()>>, +} + +impl fmt::Debug for ShutdownHook { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ShutdownHook") + .field("name", &self.id) + .finish() + } +} + +impl ShutdownHook { + pub fn new( + name: &'static str, + hook: impl Future> + Send + 'static, + ) -> Self { + Self { + id: name.into(), + future: hook.boxed(), + } + } +} + +impl IntoContext for ShutdownHook { + fn into_context( + self, + context: &mut super::ServiceContext<'_>, + ) -> Result<(), crate::WiringError> { + context.add_shutdown_hook(self); + Ok(()) + } +} diff --git a/core/node/node_framework/src/service/tests.rs b/core/node/node_framework/src/service/tests.rs index 994e41ef21cc..e801e97b7e96 100644 --- a/core/node/node_framework/src/service/tests.rs +++ b/core/node/node_framework/src/service/tests.rs @@ -5,11 +5,9 @@ use assert_matches::assert_matches; use tokio::{runtime::Runtime, sync::Barrier}; use crate::{ - service::{ - ServiceContext, StopReceiver, WiringError, WiringLayer, ZkStackServiceBuilder, - ZkStackServiceError, - }, + service::{StopReceiver, WiringError, WiringLayer, ZkStackServiceBuilder, ZkStackServiceError}, task::{Task, TaskId}, + IntoContext, }; // `ZkStack` Service's `new()` method has to have a check for nested runtime. @@ -30,11 +28,14 @@ struct DefaultLayer { #[async_trait::async_trait] impl WiringLayer for DefaultLayer { + type Input = (); + type Output = (); + fn layer_name(&self) -> &'static str { self.name } - async fn wire(self: Box, mut _node: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { Ok(()) } } @@ -87,11 +88,14 @@ struct WireErrorLayer; #[async_trait::async_trait] impl WiringLayer for WireErrorLayer { + type Input = (); + type Output = (); + fn layer_name(&self) -> &'static str { "wire_error_layer" } - async fn wire(self: Box, _node: ServiceContext<'_>) -> Result<(), WiringError> { + async fn wire(self, _input: Self::Input) -> Result { Err(WiringError::Internal(anyhow!("wiring error"))) } } @@ -110,15 +114,24 @@ fn test_run_with_error_tasks() { #[derive(Debug)] struct TaskErrorLayer; +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +struct TaskErrorLayerOutput { + #[context(task)] + task: ErrorTask, +} + #[async_trait::async_trait] impl WiringLayer for TaskErrorLayer { + type Input = (); + type Output = TaskErrorLayerOutput; + fn layer_name(&self) -> &'static str { "task_error_layer" } - async fn wire(self: Box, mut node: ServiceContext<'_>) -> Result<(), WiringError> { - node.add_task(ErrorTask); - Ok(()) + async fn wire(self, _input: Self::Input) -> Result { + Ok(TaskErrorLayerOutput { task: ErrorTask }) } } @@ -150,25 +163,32 @@ struct TasksLayer { remaining_task_was_run: Arc>, } +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +struct TasksLayerOutput { + #[context(task)] + successful_task: SuccessfulTask, + #[context(task)] + remaining_task: RemainingTask, +} + #[async_trait::async_trait] impl WiringLayer for TasksLayer { + type Input = (); + type Output = TasksLayerOutput; + fn layer_name(&self) -> &'static str { "tasks_layer" } - async fn wire(self: Box, mut node: ServiceContext<'_>) -> Result<(), WiringError> { - // Barrier is needed to make sure that both tasks have started, otherwise the second task - // may exit even before it starts. + async fn wire(self, _input: Self::Input) -> Result { let barrier = Arc::new(Barrier::new(2)); - node.add_task(SuccessfulTask( - barrier.clone(), - self.successful_task_was_run.clone(), - )) - .add_task(RemainingTask( - barrier.clone(), - self.remaining_task_was_run.clone(), - )); - Ok(()) + let successful_task = SuccessfulTask(barrier.clone(), self.successful_task_was_run.clone()); + let remaining_task = RemainingTask(barrier, self.remaining_task_was_run.clone()); + Ok(TasksLayerOutput { + successful_task, + remaining_task, + }) } } diff --git a/core/node/node_framework/src/wiring_layer.rs b/core/node/node_framework/src/wiring_layer.rs index e37bb1c9d487..1cc133eea830 100644 --- a/core/node/node_framework/src/wiring_layer.rs +++ b/core/node/node_framework/src/wiring_layer.rs @@ -1,6 +1,24 @@ use std::fmt; -use crate::{resource::ResourceId, service::ServiceContext}; +use tokio::runtime; + +use crate::{resource::ResourceId, service::ServiceContext, FromContext, IntoContext}; + +/// An envelope for the wiring layer function. +/// Since `WiringLayer` has associated types, we cannot easily erase the types via `dyn WiringLayer`, +/// so instead we preserve the layer type within the closure, and represent the actual wiring logic +/// as a function of the service context instead. +/// See [`WiringLayerExt`] trait for more context. +#[allow(clippy::type_complexity)] // False positive, already a dedicated type. +pub(crate) struct WireFn( + pub Box) -> Result<(), WiringError>>, +); + +impl fmt::Debug for WireFn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("WireFn").finish() + } +} /// Wiring layer provides a way to customize the `ZkStackService` by /// adding new tasks or resources to it. @@ -9,22 +27,35 @@ use crate::{resource::ResourceId, service::ServiceContext}; /// which resources they use or add, and the list of tasks they add. #[async_trait::async_trait] pub trait WiringLayer: 'static + Send + Sync { + type Input: FromContext; + type Output: IntoContext; + /// Identifier of the wiring layer. fn layer_name(&self) -> &'static str; /// Performs the wiring process, e.g. adds tasks and resources to the node. /// This method will be called once during the node initialization. - async fn wire(self: Box, context: ServiceContext<'_>) -> Result<(), WiringError>; + async fn wire(self, input: Self::Input) -> Result; } -impl fmt::Debug for dyn WiringLayer { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("WiringLayer") - .field("layer_name", &self.layer_name()) - .finish() +pub(crate) trait WiringLayerExt: WiringLayer { + /// Hires the actual type of the wiring layer into the closure, so that rest of application + /// doesn't have to know it. + fn into_wire_fn(self) -> WireFn + where + Self: Sized, + { + WireFn(Box::new(move |rt, ctx| { + let input = Self::Input::from_context(ctx)?; + let output = rt.block_on(self.wire(input))?; + output.into_context(ctx)?; + Ok(()) + })) } } +impl WiringLayerExt for T where T: WiringLayer {} + /// An error that can occur during the wiring phase. #[derive(thiserror::Error, Debug)] #[non_exhaustive] diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index c861273c964d..6cb0d6655e6f 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -86,6 +86,7 @@ pub struct BasicWitnessInputProducerTasks { ConcurrentOutputHandlerFactoryTask, } +/// IO implementation for the basic witness input producer. #[derive(Debug, Clone)] pub struct BasicWitnessInputProducerIo { first_processed_batch: L1BatchNumber, diff --git a/core/node/vm_runner/src/impls/mod.rs b/core/node/vm_runner/src/impls/mod.rs index 5bae7e03f568..2d982730498a 100644 --- a/core/node/vm_runner/src/impls/mod.rs +++ b/core/node/vm_runner/src/impls/mod.rs @@ -1,5 +1,7 @@ mod bwip; mod protective_reads; -pub use bwip::{BasicWitnessInputProducer, BasicWitnessInputProducerTasks}; -pub use protective_reads::{ProtectiveReadsWriter, ProtectiveReadsWriterTasks}; +pub use bwip::{ + BasicWitnessInputProducer, BasicWitnessInputProducerIo, BasicWitnessInputProducerTasks, +}; +pub use protective_reads::{ProtectiveReadsIo, ProtectiveReadsWriter, ProtectiveReadsWriterTasks}; diff --git a/core/node/vm_runner/src/impls/protective_reads.rs b/core/node/vm_runner/src/impls/protective_reads.rs index 4748789ae6d9..3be37b77d114 100644 --- a/core/node/vm_runner/src/impls/protective_reads.rs +++ b/core/node/vm_runner/src/impls/protective_reads.rs @@ -75,6 +75,7 @@ pub struct ProtectiveReadsWriterTasks { pub output_handler_factory_task: ConcurrentOutputHandlerFactoryTask, } +/// `VmRunnerIo` implementation for protective reads. #[derive(Debug, Clone)] pub struct ProtectiveReadsIo { first_processed_batch: L1BatchNumber, diff --git a/core/node/vm_runner/src/lib.rs b/core/node/vm_runner/src/lib.rs index d6c9a88185ee..b252eebcbb1f 100644 --- a/core/node/vm_runner/src/lib.rs +++ b/core/node/vm_runner/src/lib.rs @@ -14,8 +14,8 @@ mod metrics; mod tests; pub use impls::{ - BasicWitnessInputProducer, BasicWitnessInputProducerTasks, ProtectiveReadsWriter, - ProtectiveReadsWriterTasks, + BasicWitnessInputProducer, BasicWitnessInputProducerIo, BasicWitnessInputProducerTasks, + ProtectiveReadsIo, ProtectiveReadsWriter, ProtectiveReadsWriterTasks, }; pub use io::VmRunnerIo; pub use output_handler::{ diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index f306d3be43a4..a538eb3a6dff 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -744,6 +744,32 @@ describe('web3 API compatibility tests', () => { expect(logs[0].transactionHash).toEqual(tx.hash); }); + test('Should check getLogs returns block_timestamp', async () => { + // We're sending a transfer from the wallet, so we'll use a new account to make event unique. + let uniqueRecipient = testMaster.newEmptyAccount().address; + const tx = await alice.transfer({ + to: uniqueRecipient, + amount: 1, + token: l2Token + }); + const receipt = await tx.wait(); + const response = await alice.provider.send('eth_getLogs', [ + { + fromBlock: ethers.toBeHex(receipt.blockNumber), + toBlock: ethers.toBeHex(receipt.blockNumber), + address: l2Token, + topics: [ + '0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef', + ethers.zeroPadValue(alice.address, 32), + ethers.zeroPadValue(uniqueRecipient, 32) + ] + } + ]); + expect(response).toHaveLength(1); + // TODO: switch to provider.getLogs once blockTimestamp is added to zksync ethers.js + expect(response[0].blockTimestamp).toBeDefined(); + }); + test('Should check getLogs endpoint works properly with block tags', async () => { const earliestLogs = alice.provider.send('eth_getLogs', [ { diff --git a/prover/prover_fri/README.md b/prover/prover_fri/README.md index 5f0a26cfdd49..c5f434d84d0f 100644 --- a/prover/prover_fri/README.md +++ b/prover/prover_fri/README.md @@ -55,7 +55,7 @@ installation as a pre-requisite, alongside these machine specs: 2. Run the server. In the root of the repository: ```console - zk server --components=api,eth,tree,state_keeper,housekeeper,commitment_generator,proof_data_handler + zk server --components=api,eth,tree,state_keeper,housekeeper,commitment_generator,proof_data_handler,vm_runner_bwip ``` Note that it will produce a first l1 batch that can be proven (should be batch 0). diff --git a/prover/vk_setup_data_generator_server_fri/src/commitment_utils.rs b/prover/vk_setup_data_generator_server_fri/src/commitment_utils.rs index 58fd36ab4a59..471e76e1a680 100644 --- a/prover/vk_setup_data_generator_server_fri/src/commitment_utils.rs +++ b/prover/vk_setup_data_generator_server_fri/src/commitment_utils.rs @@ -3,7 +3,6 @@ use std::{str::FromStr, sync::Mutex}; use anyhow::Context as _; use hex::ToHex; use once_cell::sync::Lazy; -use structopt::lazy_static::lazy_static; use zkevm_test_harness::witness::recursive_aggregation::{ compute_leaf_vks_and_params_commitment, compute_node_vk_commitment, }; @@ -24,14 +23,6 @@ use crate::{ static KEYSTORE: Lazy>> = Lazy::new(|| Mutex::new(None)); -lazy_static! { - // TODO: do not initialize a static const with data read in runtime. - static ref COMMITMENTS: Lazy = Lazy::new(|| { - let keystore = KEYSTORE.lock().unwrap().clone().unwrap_or_default(); - circuit_commitments(&keystore).unwrap() - }); -} - fn circuit_commitments(keystore: &Keystore) -> anyhow::Result { let commitments = generate_commitments(keystore).context("generate_commitments()")?; Ok(L1VerifierConfig { @@ -108,8 +99,12 @@ pub fn get_cached_commitments(setup_data_path: Option) -> L1VerifierConf let mut keystore_lock = KEYSTORE.lock().unwrap(); *keystore_lock = Some(keystore); } - tracing::info!("Using cached commitments {:?}", **COMMITMENTS); - **COMMITMENTS + + let keystore = KEYSTORE.lock().unwrap().clone().unwrap_or_default(); + let commitments = circuit_commitments(&keystore).unwrap(); + + tracing::info!("Using cached commitments {:?}", commitments); + commitments } #[test]