diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index c757ef75231f..a1e542be4e7f 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -244,7 +244,6 @@ jobs: - name: Prepare the server to be the synclayer run: | ci_run zk dev2 supply-rich-wallets - ci_run zk contract build --zkSync ci_run zk contract prepare-sync-layer ci_run zk contract register-sync-layer-counterpart @@ -258,10 +257,8 @@ jobs: ci_run zk config prepare-l1-hyperchain --env-name test-chain --chain-id 320 ci_run zk env test-chain ci_run zk config compile test-chain --diff 5 - ci_run zk init hyper - ci_run zk server --time-to-live 120 &>server2.log - sleep 120 - ci_run zk server --tx-aggregation-paused --time-to-live 60 &>server2.log + ci_run zk init hyper --skip-contract-compilation-override + ci_run zk contract migrate-to-sync-layer ci_run zk contract prepare-sync-layer-validators ci_run zk contract update-config-for-sync-layer diff --git a/contracts b/contracts index 8152af98817b..7206350a0ffd 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 8152af98817b7baa7205a81901810202c589a87e +Subproject commit 7206350a0ffd9f2de0ecd38e7f9e2cd711a5dd81 diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 7f0dd89efbca..142b4b81330e 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -111,6 +111,7 @@ pub(crate) struct RemoteENConfig { // a different name, with names adapted only for consistency. pub l1_shared_bridge_proxy_addr: Option
, pub l2_shared_bridge_addr: Option
, + pub l2_legacy_shared_bridge_addr: Option
, pub l1_erc20_bridge_proxy_addr: Option
, pub l2_erc20_bridge_addr: Option
, pub l1_weth_bridge_addr: Option
, @@ -138,6 +139,10 @@ impl RemoteENConfig { .get_native_token_vault_proxy_addr() .rpc_context("get_native_token_vault") .await?; + let l2_legacy_shared_bridge_addr = client + .get_legacy_shared_bridge() + .rpc_context("get_legacy_shared_bridge") + .await?; let genesis = client.genesis_config().rpc_context("genesis").await.ok(); let ecosystem_contracts = client .get_ecosystem_contracts() @@ -203,6 +208,7 @@ impl RemoteENConfig { l2_erc20_bridge_addr: l2_erc20_default_bridge, l1_shared_bridge_proxy_addr: bridges.l1_shared_default_bridge, l2_shared_bridge_addr: l2_erc20_shared_bridge, + l2_legacy_shared_bridge_addr, l1_weth_bridge_addr: bridges.l1_weth_bridge, l2_weth_bridge_addr: bridges.l2_weth_bridge, base_token_addr, @@ -234,6 +240,7 @@ impl RemoteENConfig { l1_shared_bridge_proxy_addr: Some(Address::repeat_byte(5)), l1_weth_bridge_addr: None, l2_shared_bridge_addr: Some(Address::repeat_byte(6)), + l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(7)), l1_batch_commit_data_generator_mode: L1BatchCommitmentMode::Rollup, dummy_verifier: true, l2_native_token_vault_proxy_addr: Some(Address::repeat_byte(7)), @@ -1421,6 +1428,7 @@ impl From<&ExternalNodeConfig> for InternalApiConfig { dummy_verifier: config.remote.dummy_verifier, l1_batch_commit_data_generator_mode: config.remote.l1_batch_commit_data_generator_mode, l2_native_token_vault_proxy_addr: config.remote.l2_native_token_vault_proxy_addr, + l2_legacy_shared_bridge_addr: config.remote.l2_legacy_shared_bridge_addr, } } } diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 7c40c297e107..70912552d93d 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -200,6 +200,10 @@ impl ExternalNodeBuilder { .remote .l2_native_token_vault_proxy_addr .expect("L2 native token vault proxy address is not set"), + self.config + .remote + .l2_legacy_shared_bridge_addr + .expect("L2 legacy shared bridge address is not set"), self.config.optional.l2_block_seal_queue_capacity, ) .with_pre_insert_txs(true) // EN requires txs to be pre-inserted. diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index f3f8a38c954c..15ba1f73cb93 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -246,6 +246,9 @@ impl MainNodeBuilder { self.contracts_config .l2_native_token_vault_proxy_addr .context("L2 native token vault proxy address")?, + self.contracts_config + .l2_legacy_shared_bridge_addr + .context("L2 legacy shared bridge address")?, sk_config.l2_block_seal_queue_capacity, ) .with_protective_reads_persistence_enabled(sk_config.protective_reads_persistence_enabled); diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index f7601c440ff3..ce9150145958 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -139,6 +139,10 @@ impl ProtocolVersionId { self <= &Self::Version22 } + pub fn is_pre_gateway(&self) -> bool { + self <= &Self::Version24 + } + pub fn is_1_4_0(&self) -> bool { self >= &ProtocolVersionId::Version18 && self < &ProtocolVersionId::Version20 } diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index ee61441539bd..bf88c100cce9 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -30,6 +30,7 @@ pub struct ContractsConfig { pub validator_timelock_addr: Address, pub l1_shared_bridge_proxy_addr: Option
, pub l2_shared_bridge_addr: Option
, + pub l2_legacy_shared_bridge_addr: Option
, pub l1_erc20_bridge_proxy_addr: Option
, pub l2_erc20_bridge_addr: Option
, pub l1_weth_bridge_proxy_addr: Option
, @@ -61,6 +62,7 @@ impl ContractsConfig { l2_erc20_bridge_addr: Some(Address::repeat_byte(0x0c)), l1_shared_bridge_proxy_addr: Some(Address::repeat_byte(0x0e)), l2_shared_bridge_addr: Some(Address::repeat_byte(0x0f)), + l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(0x10)), l1_weth_bridge_proxy_addr: Some(Address::repeat_byte(0x0b)), l2_weth_bridge_addr: Some(Address::repeat_byte(0x0c)), l2_testnet_paymaster_addr: Some(Address::repeat_byte(0x11)), diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index b095648e8bac..348c2a95848b 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -251,6 +251,7 @@ impl Distribution for EncodeDist { l2_erc20_bridge_addr: self.sample_opt(|| rng.gen()), l1_shared_bridge_proxy_addr: self.sample_opt(|| rng.gen()), l2_shared_bridge_addr: self.sample_opt(|| rng.gen()), + l2_legacy_shared_bridge_addr: self.sample_opt(|| rng.gen()), l1_weth_bridge_proxy_addr: self.sample_opt(|| rng.gen()), l2_weth_bridge_addr: self.sample_opt(|| rng.gen()), l2_testnet_paymaster_addr: self.sample_opt(|| rng.gen()), diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index efcc4625ef98..aaa890f5cddf 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -36,11 +36,11 @@ const FORGE_PATH_PREFIX: &str = "contracts/l1-contracts/out"; const BRIDGEHUB_CONTRACT_FILE: (&str, &str) = ("bridgehub", "IBridgehub.sol/IBridgehub.json"); const STATE_TRANSITION_CONTRACT_FILE: (&str, &str) = ( "state-transition", - "IStateTransitionManager.sol/IStateTransitionManager.json", + "IChainTypeManager.sol/IChainTypeManager.json", ); const ZKSYNC_HYPERCHAIN_CONTRACT_FILE: (&str, &str) = ( "state-transition/chain-interfaces", - "IZkSyncHyperchain.sol/IZkSyncHyperchain.json", + "IZKChain.sol/IZKChain.json", ); const DIAMOND_INIT_CONTRACT_FILE: (&str, &str) = ( "state-transition", diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index 68d26114d373..d1c6a252b028 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -98,6 +98,7 @@ mod tests { l2_native_token_vault_proxy_addr: Some(addr( "0xfc073319977e314f251eae6ae6be76b0b3baeecf", )), + l2_legacy_shared_bridge_addr: Some(addr("0x8656770FA78c830456B00B4fFCeE6b1De0e1b888")), chain_admin_addr: Some(addr("0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff")), l2_da_validator_addr: Some(addr("0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff")), } @@ -130,6 +131,7 @@ CONTRACTS_USER_FACING_DIAMOND_PROXY_ADDR="0xF00B988a98Ca742e7958DeF9F7823b590871 CONTRACTS_L2_NATIVE_TOKEN_VAULT_PROXY_ADDR="0xfc073319977e314f251eae6ae6be76b0b3baeecf" CONTRACTS_L2_DA_VALIDATOR_ADDR="0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff" CONTRACTS_CHAIN_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff" +CONTRACTS_L2_LEGACY_SHARED_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" "#; lock.set_env(config); diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs index 883804f0bd6f..e1bddf67ded2 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs @@ -1,14 +1,13 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, - ethabi::Token, + ethabi::{encode, Token}, pubdata_da::PubdataDA, }; use crate::{ - i_executor::structures::{CommitBatchInfo, StoredBatchInfo}, - Tokenizable, Tokenize, + i_executor::structures::{CommitBatchInfo, StoredBatchInfo, SUPPORTED_ENCODING_VERSION}, + Tokenizable, }; - /// Input required to encode `commitBatches` call for a contract #[derive(Debug)] pub struct CommitBatches<'a> { @@ -18,15 +17,33 @@ pub struct CommitBatches<'a> { pub mode: L1BatchCommitmentMode, } -impl Tokenize for CommitBatches<'_> { - fn into_tokens(self) -> Vec { +impl CommitBatches<'_> { + pub fn into_tokens(self, pre_gateway: bool) -> Vec { let stored_batch_info = StoredBatchInfo::from(self.last_committed_l1_batch).into_token(); - let l1_batches_to_commit = self + let l1_batches_to_commit: Vec = self .l1_batches .iter() .map(|batch| CommitBatchInfo::new(self.mode, batch, self.pubdata_da).into_token()) .collect(); - vec![stored_batch_info, Token::Array(l1_batches_to_commit)] + let encoded_data = encode(&[ + stored_batch_info.clone(), + Token::Array(l1_batches_to_commit.clone()), + ]); + let commit_data = [[SUPPORTED_ENCODING_VERSION].to_vec(), encoded_data] + .concat() + .to_vec(); + if pre_gateway { + vec![stored_batch_info, Token::Array(l1_batches_to_commit)] + } else { + vec![ + Token::Uint((self.last_committed_l1_batch.header.number.0 + 1).into()), + Token::Uint( + (self.last_committed_l1_batch.header.number.0 + self.l1_batches.len() as u32) + .into(), + ), + Token::Bytes(commit_data), + ] + } } } diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs index 631eacc3412b..bbf022011834 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/execute_batches.rs @@ -1,9 +1,12 @@ use zksync_types::{ commitment::{L1BatchWithMetadata, PriorityOpsMerkleProof}, - ethabi::Token, + ethabi::{encode, Token}, }; -use crate::{i_executor::structures::StoredBatchInfo, Tokenizable, Tokenize}; +use crate::{ + i_executor::structures::{StoredBatchInfo, SUPPORTED_ENCODING_VERSION}, + Tokenizable, Tokenize, +}; /// Input required to encode `executeBatches` call. #[derive(Debug, Clone)] @@ -14,7 +17,7 @@ pub struct ExecuteBatches { impl Tokenize for &ExecuteBatches { fn into_tokens(self) -> Vec { - vec![ + let encoded_data = encode(&[ Token::Array( self.l1_batches .iter() @@ -27,6 +30,15 @@ impl Tokenize for &ExecuteBatches { .map(|proof| proof.into_token()) .collect(), ), + ]); + let commit_data = [[SUPPORTED_ENCODING_VERSION].to_vec(), encoded_data] + .concat() + .to_vec(); + + vec![ + Token::Uint((self.l1_batches[0].header.number.0).into()), + Token::Uint((self.l1_batches[self.l1_batches.len() - 1].header.number.0).into()), + Token::Bytes(commit_data), ] } } diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs index 935d8a44e0b7..086ec84fb00b 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/prove_batches.rs @@ -1,8 +1,15 @@ use crypto_codegen::serialize_proof; use zksync_prover_interface::outputs::L1BatchProofForL1; -use zksync_types::{commitment::L1BatchWithMetadata, ethabi::Token, U256}; +use zksync_types::{ + commitment::L1BatchWithMetadata, + ethabi::{encode, Token}, + U256, +}; -use crate::{i_executor::structures::StoredBatchInfo, Tokenizable, Tokenize}; +use crate::{ + i_executor::structures::{StoredBatchInfo, SUPPORTED_ENCODING_VERSION}, + Tokenizable, Tokenize, +}; /// Input required to encode `proveBatches` call. #[derive(Debug, Clone)] @@ -15,7 +22,7 @@ pub struct ProveBatches { impl Tokenize for &ProveBatches { fn into_tokens(self) -> Vec { - let prev_l1_batch = StoredBatchInfo::from(&self.prev_l1_batch).into_token(); + let prev_l1_batch_info = StoredBatchInfo::from(&self.prev_l1_batch).into_token(); let batches_arg = self .l1_batches .iter() @@ -42,26 +49,46 @@ impl Tokenize for &ProveBatches { .unwrap() .is_pre_boojum() { - Token::Array( - aggregation_result_coords - .iter() - .map(|bytes| Token::Uint(U256::from_big_endian(bytes))) - .collect(), - ) + aggregation_result_coords + .iter() + .map(|bytes| Token::Uint(U256::from_big_endian(bytes))) + .collect() } else { - Token::Array(Vec::new()) + Vec::new() }; - let proof_input = Token::Tuple(vec![ - aggregation_result_coords, - Token::Array(proof.into_iter().map(Token::Uint).collect()), - ]); + let proof_input = Token::Array( + [ + aggregation_result_coords, + proof.into_iter().map(Token::Uint).collect(), + ] + .concat() + .to_vec(), + ); // todo this changed, might have to be debugged. + + let encoded_data = encode(&[prev_l1_batch_info, batches_arg, proof_input]); + let commit_data = [[SUPPORTED_ENCODING_VERSION].to_vec(), encoded_data] + .concat() + .to_vec(); - vec![prev_l1_batch, batches_arg, proof_input] + vec![ + Token::Uint((self.prev_l1_batch.header.number.0 + 1).into()), + Token::Uint( + (self.prev_l1_batch.header.number.0 + self.l1_batches.len() as u32).into(), + ), + Token::Bytes(commit_data), + ] } else { + let encoded_data = encode(&[prev_l1_batch_info, batches_arg, Token::Array(vec![])]); + let commit_data = [[SUPPORTED_ENCODING_VERSION].to_vec(), encoded_data] + .concat() + .to_vec(); + vec![ - prev_l1_batch, - batches_arg, - Token::Tuple(vec![Token::Array(vec![]), Token::Array(vec![])]), + Token::Uint((self.prev_l1_batch.header.number.0 + 1).into()), + Token::Uint( + (self.prev_l1_batch.header.number.0 + self.l1_batches.len() as u32).into(), + ), + Token::Bytes(commit_data), ] } } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 58d6ca8363f3..f51f2244527e 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -5,7 +5,7 @@ use zksync_types::{ pre_boojum_serialize_commitments, serialize_commitments, L1BatchCommitmentMode, L1BatchWithMetadata, }, - ethabi::Token, + ethabi::{ParamType, Token}, pubdata_da::PubdataDA, web3::{contract::Error as ContractError, keccak256}, ProtocolVersionId, H256, U256, @@ -42,6 +42,21 @@ impl<'a> CommitBatchInfo<'a> { } } + pub fn schema() -> ParamType { + ParamType::Tuple(vec![ + ParamType::Uint(64), // `batch_number` + ParamType::Uint(64), // `timestamp` + ParamType::Uint(64), // `index_repeated_storage_changes` + ParamType::FixedBytes(32), // `new_state_root` + ParamType::Uint(256), // `numberOfLayer1Txs` + ParamType::FixedBytes(32), // `priorityOperationsHash` + ParamType::FixedBytes(32), // `bootloaderHeapInitialContentsHash` + ParamType::FixedBytes(32), // `eventsQueueStateHash` + ParamType::Bytes, // `systemLogs` + ParamType::Bytes, // `operatorDAInput` + ]) + } + fn base_tokens(&self) -> Vec { if self .l1_batch_with_metadata diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs index d1ed57e41f2e..b6d2eefac300 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs @@ -2,5 +2,6 @@ mod commit_batch_info; mod stored_batch_info; +pub const SUPPORTED_ENCODING_VERSION: u8 = 0; pub use self::{commit_batch_info::CommitBatchInfo, stored_batch_info::StoredBatchInfo}; diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs index 8373c46e36bb..18b28f34c29f 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs @@ -1,6 +1,6 @@ use zksync_types::{ commitment::L1BatchWithMetadata, - ethabi::{self, Token}, + ethabi::{self, ParamType, Token}, web3, web3::contract::Error as ContractError, H256, U256, @@ -28,6 +28,19 @@ impl StoredBatchInfo { .clone() .into_token()]))) } + + pub fn schema() -> ParamType { + ParamType::Tuple(vec![ + ParamType::Uint(64), // `batch_number` + ParamType::FixedBytes(32), // `batch_hash` + ParamType::Uint(64), // `index_repeated_storage_changes` + ParamType::Uint(256), // `number_of_layer1_txs` + ParamType::FixedBytes(32), // `priority_operations_hash` + ParamType::FixedBytes(32), // `l2_logs_tree_root` + ParamType::Uint(256), // `timestamp` + ParamType::FixedBytes(32), // `commitment` + ]) + } } impl From<&L1BatchWithMetadata> for StoredBatchInfo { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs index 3463ac1301bd..7f79f46dbe83 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs @@ -20,8 +20,8 @@ use crate::{ }, }; -pub(crate) const L2_DA_VALIDATOR_OUTPUT_HASH_KEY: usize = 7; -pub(crate) const USED_L2_DA_VALIDATOR_ADDRESS_KEY: usize = 8; +pub(crate) const L2_DA_VALIDATOR_OUTPUT_HASH_KEY: usize = 5; +pub(crate) const USED_L2_DA_VALIDATOR_ADDRESS_KEY: usize = 6; pub(crate) fn encoded_uncompressed_state_diffs(input: &PubdataInput) -> Vec { let mut result = vec![]; diff --git a/core/lib/protobuf_config/src/contracts.rs b/core/lib/protobuf_config/src/contracts.rs index 1f62fb166b0b..a022c9aa1a1a 100644 --- a/core/lib/protobuf_config/src/contracts.rs +++ b/core/lib/protobuf_config/src/contracts.rs @@ -76,6 +76,12 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("l2_shared_bridge_addr")?, + l2_legacy_shared_bridge_addr: l2 + .l2_legacy_shared_bridge_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_legacy_shared_bridge_addr")?, l1_weth_bridge_proxy_addr: weth_bridge .as_ref() .and_then(|bridge| bridge.l1_address.as_ref().map(|x| parse_h160(x))) @@ -170,6 +176,9 @@ impl ProtoRepr for proto::Contracts { .l2_native_token_vault_proxy_addr .map(|a| format!("{:?}", a)), l2_da_validator_addr: this.l2_da_validator_addr.map(|a| format!("{:?}", a)), + l2_legacy_shared_bridge_addr: this + .l2_legacy_shared_bridge_addr + .map(|a| format!("{:?}", a)), }), bridges: Some(proto::Bridges { shared: Some(proto::Bridge { diff --git a/core/lib/protobuf_config/src/proto/config/contracts.proto b/core/lib/protobuf_config/src/proto/config/contracts.proto index 7b478598bc64..63e5eb1a3e9b 100644 --- a/core/lib/protobuf_config/src/proto/config/contracts.proto +++ b/core/lib/protobuf_config/src/proto/config/contracts.proto @@ -23,6 +23,7 @@ message L2 { optional string testnet_paymaster_addr = 1; // optional; H160 optional string l2_native_token_vault_proxy_addr = 2; // optional; H160 optional string l2_da_validator_addr = 3; // optional; H160 + optional string l2_legacy_shared_bridge_addr = 4; // optional; H160 } message Bridge { diff --git a/core/lib/types/src/system_contracts.rs b/core/lib/types/src/system_contracts.rs index 65df071521b4..4caf81fd0cf4 100644 --- a/core/lib/types/src/system_contracts.rs +++ b/core/lib/types/src/system_contracts.rs @@ -189,13 +189,13 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 30] = [ ContractLanguage::Sol, ), ( - "../../../l2-contracts/artifacts-zk/contracts/bridge/", + "../../../l1-contracts/artifacts-zk/contracts/bridge/asset-router/", "L2AssetRouter", L2_ASSET_ROUTER_ADDRESS, ContractLanguage::Sol, ), ( - "../../../l2-contracts/artifacts-zk/contracts/bridge/", + "../../../l1-contracts/artifacts-zk/contracts/bridge/ntv/", "L2NativeTokenVault", L2_NATIVE_TOKEN_VAULT_ADDRESS, ContractLanguage::Sol, diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index edd11db290db..4ef3310368a8 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -55,6 +55,9 @@ pub trait ZksNamespace { #[method(name = "getNativeTokenVault")] async fn get_native_token_vault_proxy_addr(&self) -> RpcResult>; + #[method(name = "getLegacySharedBridge")] + async fn get_legacy_shared_bridge(&self) -> RpcResult>; + #[method(name = "getBridgeContracts")] async fn get_bridge_contracts(&self) -> RpcResult; diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index ae256a84cb15..00cd43930215 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -58,6 +58,10 @@ impl ZksNamespaceServer for ZksNamespace { Ok(self.get_native_token_vault_proxy_addr_impl()) } + async fn get_legacy_shared_bridge(&self) -> RpcResult> { + Ok(self.get_legacy_shared_bridge_impl()) + } + async fn get_bridge_contracts(&self) -> RpcResult { Ok(self.get_bridge_contracts_impl()) } diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index b96dff2fff2f..a80340c52625 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -151,6 +151,10 @@ impl ZksNamespace { self.state.api_config.l2_native_token_vault_proxy_addr } + pub fn get_legacy_shared_bridge_impl(&self) -> Option
{ + self.state.api_config.l2_legacy_shared_bridge_addr + } + pub fn get_bridge_contracts_impl(&self) -> BridgeAddresses { self.state.api_config.bridge_addresses.clone() } diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index beb6b510e06b..5f97d5564644 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -115,6 +115,7 @@ pub struct InternalApiConfig { pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, pub user_facing_bridgehub_addr: Option
, pub l2_native_token_vault_proxy_addr: Option
, + pub l2_legacy_shared_bridge_addr: Option
, } impl InternalApiConfig { @@ -183,6 +184,7 @@ impl InternalApiConfig { .map(|a| a.bridgehub_proxy_addr), ), l2_native_token_vault_proxy_addr: contracts_config.l2_native_token_vault_proxy_addr, + l2_legacy_shared_bridge_addr: contracts_config.l2_legacy_shared_bridge_addr, } } } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index dbee146c94ef..01be11557711 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -564,6 +564,7 @@ impl StateKeeperRunner { self.pool.0.clone(), ethabi::Address::repeat_byte(11), ethabi::Address::repeat_byte(12), + ethabi::Address::repeat_byte(13), 5, ); @@ -677,6 +678,7 @@ impl StateKeeperRunner { self.pool.0.clone(), ethabi::Address::repeat_byte(11), ethabi::Address::repeat_byte(12), + ethabi::Address::repeat_byte(13), 5, ); let tree_writes_persistence = TreeWritesPersistence::new(self.pool.0.clone()); diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index 20ba43a4166e..d2d84669978d 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -11,14 +11,17 @@ use zksync_eth_client::{ }; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_l1_contract_interface::{ - i_executor::{commit::kzg::ZK_SYNC_BYTES_PER_BLOB, structures::CommitBatchInfo}, + i_executor::{ + commit::kzg::ZK_SYNC_BYTES_PER_BLOB, + structures::{CommitBatchInfo, StoredBatchInfo, SUPPORTED_ENCODING_VERSION}, + }, Tokenizable, }; use zksync_shared_metrics::{CheckerComponent, EN_METRICS}; use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi, - ethabi::Token, + ethabi::{ParamType, Token}, pubdata_da::PubdataDA, Address, L1BatchNumber, ProtocolVersionId, H256, U256, }; @@ -213,6 +216,13 @@ impl LocalL1BatchCommitData { .map_or(true, |version| version.is_pre_shared_bridge()) } + fn is_pre_gateway(&self) -> bool { + self.l1_batch + .header + .protocol_version + .map_or(true, |version| version.is_pre_gateway()) + } + /// All returned errors are validation errors. fn verify_commitment(&self, reference: ðabi::Token) -> anyhow::Result<()> { let protocol_version = self @@ -434,12 +444,16 @@ impl ConsistencyChecker { .map_err(CheckError::Internal)? }; - let commitment = - Self::extract_commit_data(&commit_tx.input.0, commit_function, batch_number) - .with_context(|| { - format!("failed extracting commit data for transaction {commit_tx_hash:?}") - }) - .map_err(CheckError::Validation)?; + let commitment = Self::extract_commit_data( + &commit_tx.input.0, + commit_function, + batch_number, + local.is_pre_gateway(), + ) + .with_context(|| { + format!("failed extracting commit data for transaction {commit_tx_hash:?}") + }) + .map_err(CheckError::Validation)?; local .verify_commitment(&commitment) .map_err(CheckError::Validation) @@ -450,6 +464,7 @@ impl ConsistencyChecker { commit_tx_input_data: &[u8], commit_function: ðabi::Function, batch_number: L1BatchNumber, + pre_gateway: bool, ) -> anyhow::Result { let expected_solidity_selector = commit_function.short_signature(); let actual_solidity_selector = &commit_tx_input_data[..4]; @@ -461,11 +476,40 @@ impl ConsistencyChecker { let mut commit_input_tokens = commit_function .decode_input(&commit_tx_input_data[4..]) .context("Failed decoding calldata for L1 commit function")?; - let mut commitments = commit_input_tokens - .pop() - .context("Unexpected signature for L1 commit function")? - .into_array() - .context("Unexpected signature for L1 commit function")?; + let mut commitments: Vec; + if pre_gateway { + commitments = commit_input_tokens + .pop() + .context("Unexpected signature for L1 commit function")? + .into_array() + .context("Unexpected signature for L1 commit function")?; + } else { + let commitments_popped = commit_input_tokens + .pop() + .context("Unexpected signature for L1 commit function 1")?; + let commitment_bytes = match commitments_popped { + Token::Bytes(arr) => arr, + _ => anyhow::bail!("Unexpected signature for L1 commit function 2"), + }; + let (version, encoded_data) = commitment_bytes.split_at(1); + assert_eq!(version[0], SUPPORTED_ENCODING_VERSION); + let decoded_data = ethabi::decode( + &[ + StoredBatchInfo::schema(), + ParamType::Array(Box::new(CommitBatchInfo::schema())), + ], // types expected (e.g., Token::Array) + encoded_data, + ) + .expect("Decoding failed"); + // let mut commitments; + if let [_, Token::Array(batch_commitments)] = &decoded_data[..] { + // Now you have access to `stored_batch_info` and `l1_batches_to_commit` + // Process them as needed + commitments = batch_commitments.clone(); + } else { + panic!("Unexpected data format"); + } + } // Commit transactions usually publish multiple commitments at once, so we need to find // the one that corresponds to the batch we're checking. @@ -473,15 +517,15 @@ impl ConsistencyChecker { .first() .context("L1 batch commitment is empty")?; let ethabi::Token::Tuple(first_batch_commitment) = first_batch_commitment else { - anyhow::bail!("Unexpected signature for L1 commit function"); + anyhow::bail!("Unexpected signature for L1 commit function 3"); }; let first_batch_number = first_batch_commitment .first() - .context("Unexpected signature for L1 commit function")?; + .context("Unexpected signature for L1 commit function 4")?; let first_batch_number = first_batch_number .clone() .into_uint() - .context("Unexpected signature for L1 commit function")?; + .context("Unexpected signature for L1 commit function 5")?; let first_batch_number = usize::try_from(first_batch_number) .map_err(|_| anyhow::anyhow!("Integer overflow for L1 batch number"))?; // ^ `TryFrom` has `&str` error here, so we can't use `.context()`. diff --git a/core/node/consistency_checker/src/tests/mod.rs b/core/node/consistency_checker/src/tests/mod.rs index b210ae6213d1..07e750b41c36 100644 --- a/core/node/consistency_checker/src/tests/mod.rs +++ b/core/node/consistency_checker/src/tests/mod.rs @@ -8,7 +8,7 @@ use tokio::sync::mpsc; use zksync_config::GenesisConfig; use zksync_dal::Connection; use zksync_eth_client::{clients::MockSettlementLayer, Options}; -use zksync_l1_contract_interface::{i_executor::methods::CommitBatches, Tokenizable, Tokenize}; +use zksync_l1_contract_interface::{i_executor::methods::CommitBatches, Tokenizable}; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ create_l1_batch, create_l1_batch_metadata, l1_batch_metadata_to_commitment_artifacts, @@ -67,7 +67,7 @@ pub(crate) fn build_commit_tx_input_data( pubdata_da: PubdataDA::Calldata, mode, } - .into_tokens(); + .into_tokens(protocol_version.is_pre_gateway()); if protocol_version.is_pre_boojum() { PRE_BOOJUM_COMMIT_FUNCTION.encode_input(&tokens).unwrap() @@ -163,6 +163,7 @@ fn build_commit_tx_input_data_is_correct(commitment_mode: L1BatchCommitmentMode) &commit_tx_input_data, commit_function, batch.header.number, + false, ) .unwrap(); assert_eq!( @@ -247,6 +248,7 @@ fn extracting_commit_data_for_pre_boojum_batch() { commit_tx_input_data, &PRE_BOOJUM_COMMIT_FUNCTION, L1BatchNumber(200_000), + true, ) .unwrap(); diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index c1dd122b0931..0d5acc624d09 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -433,7 +433,13 @@ impl EthTxAggregator { pubdata_da: *pubdata_da, mode: self.aggregator.mode(), }; - let commit_data_base = commit_batches.into_tokens(); + let commit_data_base = commit_batches.into_tokens( + l1_batches[0] + .header + .protocol_version + .unwrap() + .is_pre_gateway(), + ); args.extend(commit_data_base); diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs index 967bc83b11c3..21531a0eba3e 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs @@ -38,6 +38,7 @@ pub struct OutputHandlerLayer { l2_shared_bridge_addr: Address, l2_block_seal_queue_capacity: usize, l2_native_token_vault_proxy_addr: Address, + l2_legacy_shared_bridge_addr: Address, /// Whether transactions should be pre-inserted to DB. /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB /// before they are included into L2 blocks. @@ -67,12 +68,14 @@ impl OutputHandlerLayer { pub fn new( l2_shared_bridge_addr: Address, l2_native_token_vault_proxy_addr: Address, + l2_legacy_shared_bridge_addr: Address, l2_block_seal_queue_capacity: usize, ) -> Self { Self { l2_shared_bridge_addr, l2_block_seal_queue_capacity, l2_native_token_vault_proxy_addr, + l2_legacy_shared_bridge_addr, pre_insert_txs: false, protective_reads_persistence_enabled: false, } @@ -113,6 +116,7 @@ impl WiringLayer for OutputHandlerLayer { persistence_pool.clone(), self.l2_shared_bridge_addr, self.l2_native_token_vault_proxy_addr, + self.l2_legacy_shared_bridge_addr, self.l2_block_seal_queue_capacity, ); if self.pre_insert_txs { diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index 685b78e52ac0..82d70dfa6644 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -110,6 +110,7 @@ impl StateKeeperHandles { pool.clone(), Address::repeat_byte(1), Address::default(), + Address::repeat_byte(13), 5, ); let tree_writes_persistence = TreeWritesPersistence::new(pool.clone()); diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index d8d126255426..cdca16b353fe 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -31,6 +31,7 @@ pub struct StateKeeperPersistence { pool: ConnectionPool, l2_shared_bridge_addr: Address, l2_native_token_vault_proxy_addr: Address, + l2_legacy_shared_bridge_addr: Address, pre_insert_txs: bool, insert_protective_reads: bool, commands_sender: mpsc::Sender>, @@ -48,6 +49,7 @@ impl StateKeeperPersistence { pool: ConnectionPool, l2_shared_bridge_addr: Address, l2_native_token_vault_proxy_addr: Address, + l2_legacy_shared_bridge_addr: Address, mut command_capacity: usize, ) -> (Self, L2BlockSealerTask) { let is_sync = command_capacity == 0; @@ -64,6 +66,7 @@ impl StateKeeperPersistence { pool, l2_shared_bridge_addr, l2_native_token_vault_proxy_addr, + l2_legacy_shared_bridge_addr, pre_insert_txs: false, insert_protective_reads: true, commands_sender, @@ -163,6 +166,7 @@ impl StateKeeperOutputHandler for StateKeeperPersistence { let command = updates_manager.seal_l2_block_command( self.l2_shared_bridge_addr, self.l2_native_token_vault_proxy_addr, + self.l2_legacy_shared_bridge_addr, self.pre_insert_txs, ); self.submit_l2_block(command).await; @@ -182,6 +186,7 @@ impl StateKeeperOutputHandler for StateKeeperPersistence { self.pool.clone(), self.l2_shared_bridge_addr, self.l2_native_token_vault_proxy_addr, + self.l2_legacy_shared_bridge_addr, self.insert_protective_reads, ) .await @@ -403,6 +408,7 @@ mod tests { pool.clone(), Address::default(), Address::default(), + Address::default(), l2_block_sealer_capacity, ); let mut output_handler = OutputHandler::new(Box::new(persistence)) @@ -537,8 +543,13 @@ mod tests { .unwrap(); drop(storage); - let (mut persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Address::default(), Address::default(), 1); + let (mut persistence, l2_block_sealer) = StateKeeperPersistence::new( + pool.clone(), + Address::default(), + Address::default(), + Address::default(), + 1, + ); persistence = persistence.with_tx_insertion().without_protective_reads(); let mut output_handler = OutputHandler::new(Box::new(persistence)); tokio::spawn(l2_block_sealer.run()); @@ -576,13 +587,22 @@ mod tests { #[tokio::test] async fn l2_block_sealer_handle_blocking() { let pool = ConnectionPool::constrained_test_pool(1).await; - let (mut persistence, mut sealer) = - StateKeeperPersistence::new(pool, Address::default(), Address::default(), 1); + let (mut persistence, mut sealer) = StateKeeperPersistence::new( + pool, + Address::default(), + Address::default(), + Address::default(), + 1, + ); // The first command should be successfully submitted immediately. let mut updates_manager = create_updates_manager(); - let seal_command = - updates_manager.seal_l2_block_command(Address::default(), Address::default(), false); + let seal_command = updates_manager.seal_l2_block_command( + Address::default(), + Address::default(), + Address::default(), + false, + ); persistence.submit_l2_block(seal_command).await; // The second command should lead to blocking @@ -590,8 +610,12 @@ mod tests { timestamp: 2, virtual_blocks: 1, }); - let seal_command = - updates_manager.seal_l2_block_command(Address::default(), Address::default(), false); + let seal_command = updates_manager.seal_l2_block_command( + Address::default(), + Address::default(), + Address::default(), + false, + ); { let submit_future = persistence.submit_l2_block(seal_command); futures::pin_mut!(submit_future); @@ -619,8 +643,12 @@ mod tests { timestamp: 3, virtual_blocks: 1, }); - let seal_command = - updates_manager.seal_l2_block_command(Address::default(), Address::default(), false); + let seal_command = updates_manager.seal_l2_block_command( + Address::default(), + Address::default(), + Address::default(), + false, + ); persistence.submit_l2_block(seal_command).await; let command = sealer.commands_receiver.recv().await.unwrap(); command.completion_sender.send(()).unwrap(); @@ -630,13 +658,19 @@ mod tests { #[tokio::test] async fn l2_block_sealer_handle_parallel_processing() { let pool = ConnectionPool::constrained_test_pool(1).await; - let (mut persistence, mut sealer) = - StateKeeperPersistence::new(pool, Address::default(), Address::default(), 5); + let (mut persistence, mut sealer) = StateKeeperPersistence::new( + pool, + Address::default(), + Address::default(), + Address::default(), + 5, + ); // 5 L2 block sealing commands can be submitted without blocking. let mut updates_manager = create_updates_manager(); for i in 1..=5 { let seal_command = updates_manager.seal_l2_block_command( + Address::default(), Address::default(), Address::default(), false, diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index 490bb4743fbe..608e7f78f4b7 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -7,7 +7,7 @@ use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_types::{ ethabi, tokens::{TokenInfo, TokenMetadata}, - Address, L2BlockNumber, H256, + Address, L2BlockNumber, H160, H256, }; use zksync_utils::h256_to_account_address; @@ -333,11 +333,12 @@ impl L2BlockSealSubtask for InsertTokensSubtask { connection: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { let is_fictive = command.is_l2_block_fictive(); + let mut deployer_address = command.l2_native_token_vault_proxy_addr; + if command.l2_legacy_shared_bridge_addr != H160::zero() { + deployer_address = command.l2_legacy_shared_bridge_addr; + } let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::ExtractAddedTokens, is_fictive); - let added_tokens = extract_added_tokens( - command.l2_native_token_vault_proxy_addr, - &command.l2_block.events, - ); + let added_tokens = extract_added_tokens(deployer_address, &command.l2_block.events); progress.observe(added_tokens.len()); @@ -559,6 +560,7 @@ mod tests { protocol_version: Some(ProtocolVersionId::latest()), l2_shared_bridge_addr: Default::default(), l2_native_token_vault_proxy_addr: Default::default(), + l2_legacy_shared_bridge_addr: Default::default(), pre_insert_txs: false, }; diff --git a/core/node/state_keeper/src/io/seal_logic/mod.rs b/core/node/state_keeper/src/io/seal_logic/mod.rs index c5e1769f26be..0894ace79047 100644 --- a/core/node/state_keeper/src/io/seal_logic/mod.rs +++ b/core/node/state_keeper/src/io/seal_logic/mod.rs @@ -48,6 +48,7 @@ impl UpdatesManager { pool: ConnectionPool, l2_shared_bridge_addr: Address, l2_native_token_vault_proxy_addr: Address, + l2_legacy_shared_bridge_addr: Address, insert_protective_reads: bool, ) -> anyhow::Result<()> { let started_at = Instant::now(); @@ -62,6 +63,7 @@ impl UpdatesManager { let l2_block_command = self.seal_l2_block_command( l2_shared_bridge_addr, l2_native_token_vault_proxy_addr, + l2_legacy_shared_bridge_addr, false, // fictive L2 blocks don't have txs, so it's fine to pass `false` here. ); diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 53c19f3f2e16..e487efe06157 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -287,6 +287,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { protocol_version: Some(ProtocolVersionId::latest()), l2_shared_bridge_addr: Address::default(), l2_native_token_vault_proxy_addr: Address::default(), + l2_legacy_shared_bridge_addr: Address::default(), pre_insert_txs: false, }; connection_pool @@ -378,6 +379,7 @@ async fn processing_events_when_sealing_l2_block() { protocol_version: Some(ProtocolVersionId::latest()), l2_shared_bridge_addr: Address::default(), l2_native_token_vault_proxy_addr: Address::default(), + l2_legacy_shared_bridge_addr: Address::default(), pre_insert_txs: false, }; pool.connection() @@ -470,6 +472,7 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom connection_pool.clone(), Address::default(), Address::default(), + Address::default(), 0, ); tokio::spawn(l2_block_sealer.run()); diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 09c8635a7a36..b640bc8f66aa 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -87,6 +87,7 @@ impl UpdatesManager { &self, l2_shared_bridge_addr: Address, l2_native_token_vault_proxy_addr: Address, + l2_legacy_shared_bridge_addr: Address, pre_insert_txs: bool, ) -> L2BlockSealCommand { L2BlockSealCommand { @@ -101,6 +102,7 @@ impl UpdatesManager { protocol_version: Some(self.protocol_version), l2_shared_bridge_addr, l2_native_token_vault_proxy_addr, + l2_legacy_shared_bridge_addr, pre_insert_txs, } } @@ -213,6 +215,7 @@ pub struct L2BlockSealCommand { pub protocol_version: Option, pub l2_shared_bridge_addr: Address, pub l2_native_token_vault_proxy_addr: Address, + pub l2_legacy_shared_bridge_addr: Address, pub pubdata_params: PubdataParams, /// Whether transactions should be pre-inserted to DB. /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 0e9b863d8e16..c6e94037bb8b 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -4,7 +4,7 @@ "license": "MIT", "private": true, "scripts": { - "test": "zk f jest --forceExit --verbose --testTimeout 120000", + "test": "zk f jest --forceExit --verbose --testTimeout 150000", "long-running-test": "zk f jest", "fee-test": "RUN_FEE_TEST=1 zk f jest -- fees.test.ts", "api-test": "zk f jest -- api/web3.test.ts api/debug.test.ts", diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 05c2fa9729db..7b632c3ae3a4 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -90,8 +90,8 @@ fee_model_version = "V2" validation_computational_gas_limit = 300000 save_call_traces = true -bootloader_hash = "0x010008c79a8fece61d5d29508af0214834522fb17f3419f7df7400cd2776a9d5" -default_aa_hash = "0x0100055da05bf3eb2d670dec0f54ebbdacdfc0dba488f0c0b57738a69127a5d0" +bootloader_hash = "0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf" +default_aa_hash = "0x0100055d760f11a3d737e7fd1816e600a4cd874a9f17f7a225d1f1c537c51a1e" protective_reads_persistence_enabled = false diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index e9d25df5f006..bf69fd48e7bf 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -17,7 +17,6 @@ DIAMOND_PROXY_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" L1_MULTICALL3_ADDR = "0xcA11bde05977b3631167028862bE2a173976CA11" L1_ERC20_BRIDGE_PROXY_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" L1_ERC20_BRIDGE_IMPL_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" -L2_ERC20_BRIDGE_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" L2_TESTNET_PAYMASTER_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" L1_ALLOW_LIST_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" CREATE2_FACTORY_ADDR = "0xce0042B868300000d44A59004Da54A005ffdcf9f" @@ -28,8 +27,6 @@ RECURSION_NODE_LEVEL_VK_HASH = "0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a2 RECURSION_LEAF_LEVEL_VK_HASH = "0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" RECURSION_CIRCUITS_SET_VKS_HASH = "0x18c1639094f58177409186e8c48d9f577c9410901d2f1d486b3e7d6cf553ae4c" GENESIS_TX_HASH = "0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" -GENESIS_ROOT = "0x04c03cdb2847bb11d946d495101862b7acb920550d0a0d8f65ad925148b85da2" -GENESIS_BATCH_COMMITMENT = "0x96ab06d20d1bdddb6a22277c2a1007b2329eff8d3152a6cec94018a8e2705a95" PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 GENESIS_PROTOCOL_VERSION = "25" @@ -41,6 +38,9 @@ L2_WETH_BRIDGE_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L2_WETH_TOKEN_IMPL_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L2_WETH_TOKEN_PROXY_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" BLOB_VERSIONED_HASH_RETRIEVER_ADDR = "0x0000000000000000000000000000000000000000" + +GENESIS_ROOT = "0x09e68951458b18c24ae5f4100160b53c4888c9b3c3c1859cc674bc02236675ad" +GENESIS_BATCH_COMMITMENT = "0x7238eab6a0e9f5bb84421feae6b6b9ae80816d490c875d29ff3ded375a3e078f" GENESIS_ROLLUP_LEAF_INDEX = "64" # Ecosystem-wide params @@ -63,18 +63,25 @@ L2_PROXY_ADMIN_ADDR = "0x0000000000000000000000000000000000000000" BASE_TOKEN_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" BASE_TOKEN_BRIDGE_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" GENESIS_UPGRADE_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" -MAX_NUMBER_OF_HYPERCHAINS = 100 +MAX_NUMBER_OF_ZK_CHAINS = 100 L1_SHARED_BRIDGE_PROXY_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" -L2_SHARED_BRIDGE_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" -L2_SHARED_BRIDGE_IMPL_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" L1_NATIVE_TOKEN_VAULT_IMPL_ADDR ="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" L1_NATIVE_TOKEN_VAULT_PROXY_ADDR ="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" -L2_NATIVE_TOKEN_VAULT_IMPL_ADDR ="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" -L2_NATIVE_TOKEN_VAULT_PROXY_ADDR ="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" -STM_DEPLOYMENT_TRACKER_IMPL_ADDR ="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" -STM_DEPLOYMENT_TRACKER_PROXY_ADDR ="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +L2_NATIVE_TOKEN_VAULT_IMPL_ADDR = "0x0000000000000000000000000000000000010004" +L2_NATIVE_TOKEN_VAULT_PROXY_ADDR = "0x0000000000000000000000000000000000010004" +L2_SHARED_BRIDGE_IMPL_ADDR = "0x0000000000000000000000000000000000010003" +L2_SHARED_BRIDGE_ADDR = "0x0000000000000000000000000000000000010003" +L2_ERC20_BRIDGE_ADDR = "0x0000000000000000000000000000000000010003" +CTM_DEPLOYMENT_TRACKER_IMPL_ADDR ="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +CTM_DEPLOYMENT_TRACKER_PROXY_ADDR ="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" MESSAGE_ROOT_IMPL_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" MESSAGE_ROOT_PROXY_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +L1_NULLIFIER_IMPL_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +L1_NULLIFIER_PROXY_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +L1_BRIDGED_STANDARD_ERC20_IMPL_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +L1_BRIDGED_TOKEN_BEACON_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +L2_LEGACY_SHARED_BRIDGE_IMPL_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +L2_LEGACY_SHARED_BRIDGE_ADDR = "0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" FRI_RECURSION_LEAF_LEVEL_VK_HASH = "0xf9664f4324c1400fa5c3822d667f30e873f53f1b8033180cd15fe41c1e2355c6" FRI_RECURSION_NODE_LEVEL_VK_HASH = "0xf520cd5b37e74e19fdb369c8d676a04dce8a19457497ac6686d2bb95d94109c8" FRI_RECURSION_SCHEDULER_LEVEL_VK_HASH = "0xe6ba9d6b042440c480fa1c7182be32387db6e90281e82f37398d3f98f63f098a" @@ -83,6 +90,7 @@ SHARED_BRIDGE_UPGRADE_STORAGE_SWITCH = 0 ERA_CHAIN_ID = 9 ERA_DIAMOND_PROXY_ADDR = "0x0000000000000000000000000000000000000000" CHAIN_ADMIN_ADDR = "0x0000000000000000000000000000000000000000" +CTM_ASSET_INFO = "0xf9664f4324c1400fa5c3822d667f30e873f53f1b8033180cd15fe41c1e2355c6" L1_CHAIN_ID = 9 [contracts.test] diff --git a/infrastructure/zk/src/contract.ts b/infrastructure/zk/src/contract.ts index 1b4bba6185c6..bc3b22726525 100644 --- a/infrastructure/zk/src/contract.ts +++ b/infrastructure/zk/src/contract.ts @@ -47,7 +47,7 @@ const syncLayerEnvVars = [ // 'GATEWAY_L1_SHARED_BRIDGE_PROXY_ADDR', // 'GATEWAY_L1_ERC20_BRIDGE_IMPL_ADDR', // 'GATEWAY_L1_ERC20_BRIDGE_PROXY_ADDR', - 'CONTRACTS_STM_ASSET_INFO', + 'GATEWAY_CTM_ASSET_INFO', 'GATEWAY_DIAMOND_PROXY_ADDR', 'GATEWAY_L1_RELAYED_SL_DA_VALIDATOR' @@ -110,7 +110,7 @@ async function migrateToSyncLayer() { console.log('Writing to', envFile); // FIXME: consider creating new sync_layer_* variable. - updateContractsEnv(envFile, migrationLog, ['GATEWAY_DIAMOND_PROXY_ADDR']); + updateContractsEnv(envFile, migrationLog, ['GATEWAY_DIAMOND_PROXY_ADDR', 'GATEWAY_STM_ASSET_INFO']); fs.writeFileSync('backup_diamond.txt', process.env.CONTRACTS_DIAMOND_PROXY_ADDR!); env.modify('CONTRACTS_DIAMOND_PROXY_ADDR', process.env.GATEWAY_DIAMOND_PROXY_ADDR!, envFile, true); env.modify('ETH_SENDER_SENDER_PUBDATA_SENDING_MODE', 'RelayedL2Calldata', envFile, true); @@ -228,8 +228,6 @@ export async function deployL2(args: any[] = [], includePaymaster?: boolean): Pr await utils.spawn(`yarn l2-contracts build`); } - await utils.spawn(`yarn l2-contracts deploy-shared-bridge-on-l2 ${args.join(' ')} | tee deployL2.log`); - if (includePaymaster) { await utils.spawn(`yarn l2-contracts deploy-testnet-paymaster ${args.join(' ')} | tee -a deployL2.log`); } @@ -238,7 +236,6 @@ export async function deployL2(args: any[] = [], includePaymaster?: boolean): Pr let l2DeployLog = fs.readFileSync('deployL2.log').toString(); const l2DeploymentEnvVars = [ - 'CONTRACTS_L2_SHARED_BRIDGE_ADDR', 'CONTRACTS_L2_TESTNET_PAYMASTER_ADDR', 'CONTRACTS_L2_WETH_TOKEN_IMPL_ADDR', 'CONTRACTS_L2_WETH_TOKEN_PROXY_ADDR', @@ -250,11 +247,9 @@ export async function deployL2(args: any[] = [], includePaymaster?: boolean): Pr // for testnet and development purposes it is ok to deploy contracts form L1. export async function deployL2ThroughL1({ includePaymaster = true, - localLegacyBridgeTesting, deploymentMode }: { includePaymaster: boolean; - localLegacyBridgeTesting?: boolean; deploymentMode: DeploymentMode; }): Promise { await utils.confirmAction(); @@ -275,12 +270,6 @@ export async function deployL2ThroughL1({ `yarn l2-contracts deploy-l2-da-validator-on-l2-through-l1 ${daArgs.join(' ')} | tee deployL2.log` ); - await utils.spawn( - `yarn l2-contracts deploy-shared-bridge-on-l2-through-l1 ${args.join(' ')} ${ - localLegacyBridgeTesting ? '--local-legacy-bridge-testing' : '' - } | tee -a deployL2.log` - ); - if (includePaymaster) { await utils.spawn( `yarn l2-contracts deploy-testnet-paymaster-through-l1 ${args.join(' ')} | tee -a deployL2.log` @@ -293,25 +282,15 @@ export async function deployL2ThroughL1({ let l2DeployLog = fs.readFileSync('deployL2.log').toString(); const l2DeploymentEnvVars = [ - 'CONTRACTS_L2_SHARED_BRIDGE_ADDR', - 'CONTRACTS_L2_ERC20_BRIDGE_ADDR', 'CONTRACTS_L2_TESTNET_PAYMASTER_ADDR', 'CONTRACTS_L2_WETH_TOKEN_IMPL_ADDR', 'CONTRACTS_L2_WETH_TOKEN_PROXY_ADDR', 'CONTRACTS_L2_DEFAULT_UPGRADE_ADDR', 'CONTRACTS_L1_DA_VALIDATOR_ADDR', - 'CONTRACTS_L2_DA_VALIDATOR_ADDR', - 'CONTRACTS_L2_NATIVE_TOKEN_VAULT_IMPL_ADDR', - 'CONTRACTS_L2_NATIVE_TOKEN_VAULT_PROXY_ADDR', - 'CONTRACTS_L2_PROXY_ADMIN_ADDR' + 'CONTRACTS_L2_DA_VALIDATOR_ADDR' ]; updateContractsEnv(`etc/env/l2-inits/${process.env.ZKSYNC_ENV!}.init.env`, l2DeployLog, l2DeploymentEnvVars); // erc20 bridge is now deployed as shared bridge, but we still need the config var: - updateContractsEnv( - `etc/env/l2-inits/${process.env.ZKSYNC_ENV!}.init.env`, - `CONTRACTS_L2_ERC20_BRIDGE_ADDR=${process.env.CONTRACTS_L2_SHARED_BRIDGE_ADDR}`, - l2DeploymentEnvVars - ); } async function _deployL1(onlyVerifier: boolean): Promise { @@ -367,9 +346,12 @@ async function _deployL1(onlyVerifier: boolean): Promise { 'CONTRACTS_L1_ROLLUP_DA_VALIDATOR', 'CONTRACTS_L1_VALIDIUM_DA_VALIDATOR', - 'CONTRACTS_STM_DEPLOYMENT_TRACKER_IMPL_ADDR', - 'CONTRACTS_STM_DEPLOYMENT_TRACKER_PROXY_ADDR', - 'CONTRACTS_STM_ASSET_INFO', + 'CONTRACTS_CTM_DEPLOYMENT_TRACKER_IMPL_ADDR', + 'CONTRACTS_CTM_DEPLOYMENT_TRACKER_PROXY_ADDR', + 'CONTRACTS_CTM_ASSET_INFO', + + 'CONTRACTS_L1_NULLIFIER_IMPL_ADDR', + 'CONTRACTS_L1_NULLIFIER_PROXY_ADDR', /// temporary: 'CONTRACTS_HYPERCHAIN_UPGRADE_ADDR' @@ -404,11 +386,13 @@ export async function erc20BridgeFinish(args: any[] = []): Promise { await utils.spawn(`yarn l1-contracts erc20-finish-deployment-on-chain ${args.join(' ')} | tee -a deployL2.log`); } -export async function registerHyperchain({ +export async function registerZKChain({ baseTokenName, + localLegacyBridgeTesting, deploymentMode }: { baseTokenName?: string; + localLegacyBridgeTesting?: boolean; deploymentMode?: DeploymentMode; }): Promise { await utils.confirmAction(); @@ -434,10 +418,20 @@ export async function registerHyperchain({ tokenMultiplierSetterAddress ? `--token-multiplier-setter-address ${tokenMultiplierSetterAddress}` : '', '--use-governance' ]; - await utils.spawn(`yarn l1-contracts register-hyperchain ${args.join(' ')} | tee registerHyperchain.log`); - const deployLog = fs.readFileSync('registerHyperchain.log').toString(); + await utils.spawn( + `yarn l1-contracts register-zk-chain ${args.join(' ')} ${ + localLegacyBridgeTesting ? '--local-legacy-bridge-testing' : '' + } | tee registerZKChain.log` + ); + const deployLog = fs.readFileSync('registerZKChain.log').toString(); - const l2EnvVars = ['CHAIN_ETH_ZKSYNC_NETWORK_ID', 'CONTRACTS_DIAMOND_PROXY_ADDR', 'CONTRACTS_BASE_TOKEN_ADDR']; + const l2EnvVars = [ + 'CHAIN_ETH_ZKSYNC_NETWORK_ID', + 'CONTRACTS_DIAMOND_PROXY_ADDR', + 'CONTRACTS_BASE_TOKEN_ADDR', + 'CONTRACTS_L2_LEGACY_SHARED_BRIDGE_ADDR', + 'CONTRACTS_CTM_ASSET_INFO' + ]; const l2EnvFile = `etc/env/l2-inits/${process.env.ZKSYNC_ENV!}.init.env`; console.log('Writing to', l2EnvFile); @@ -573,7 +567,7 @@ command '--token-multiplier-setter-address ', 'address of the token multiplier setter' ) - .action(registerHyperchain); + .action(registerZKChain); command .command('deploy-l2-through-l1') .description('deploy l2 through l1') diff --git a/infrastructure/zk/src/init.ts b/infrastructure/zk/src/init.ts index 3a4e817dc3d3..2fcd0b3b0aba 100644 --- a/infrastructure/zk/src/init.ts +++ b/infrastructure/zk/src/init.ts @@ -117,13 +117,13 @@ const initHyperchain = async ({ localLegacyBridgeTesting, deploymentMode }: InitHyperchainOptions): Promise => { - await announced('Registering Hyperchain', contract.registerHyperchain({ baseTokenName, deploymentMode })); - await announced('Reloading env', env.reload()); - await announced('Running server genesis setup', server.genesisFromSources()); await announced( - 'Deploying L2 contracts', - contract.deployL2ThroughL1({ includePaymaster, localLegacyBridgeTesting, deploymentMode }) + 'Registering ZKChain', + contract.registerZKChain({ baseTokenName, localLegacyBridgeTesting, deploymentMode }) ); + await announced('Reloading env', env.reload()); + await announced('Running server genesis setup', server.genesisFromSources()); + await announced('Deploying L2 contracts', contract.deployL2ThroughL1({ includePaymaster, deploymentMode })); }; const makeEraChainIdSameAsCurrent = async () => { @@ -255,12 +255,16 @@ export const initHyperCmdAction = async ({ deploymentMode }); }; - -export const configCmdAction = async (): Promise => { - await Promise.all([ - announced('Building L1 L2 contracts', contract.build(false)), - announced('Compile L2 system contracts', compiler.compileAll()) - ]); +type ConfigCmdActionOptions = { + skipContractCompilationOverride?: boolean; +}; +export const configCmdAction = async ({ skipContractCompilationOverride }: ConfigCmdActionOptions): Promise => { + if (!skipContractCompilationOverride) { + await Promise.all([ + announced('Building L1 L2 contracts', contract.build(false)), + announced('Compile L2 system contracts', compiler.compileAll()) + ]); + } await initDatabase(true); await announced('Running server genesis setup', server.genesisFromSources()); }; @@ -305,4 +309,4 @@ initCommand .option('--run-observability', 'run observability suite') .action(initHyperCmdAction); -initCommand.command('config').action(configCmdAction); +initCommand.command('config').option('--skip-contract-compilation-override').action(configCmdAction);