Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(WIP): Validium pubdata abstraction as enum #78

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions core/bin/external_node/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use metrics::EN_METRICS;
use prometheus_exporter::PrometheusExporterConfig;
use tokio::{sync::watch, task, time::sleep};
use zksync_basic_types::{Address, L2ChainId};
use zksync_config::configs::database::MerkleTreeMode;
use zksync_config::configs::{database::MerkleTreeMode, eth_sender::PubdataStorageMode};
use zksync_core::{
api_server::{
execution_sandbox::VmConcurrencyLimiter,
Expand Down Expand Up @@ -228,7 +228,8 @@ async fn init_tasks(
.context("failed to build a tree_pool")?;
let tree_handle = task::spawn(metadata_calculator.run(tree_pool, tree_stop_receiver));

let consistency_checker_handle = tokio::spawn(consistency_checker.run(stop_receiver.clone()));
let consistency_checker_handle =
tokio::spawn(consistency_checker.run(stop_receiver.clone(), &PubdataStorageMode::Rollup));

let updater_handle = task::spawn(batch_status_updater.run(stop_receiver.clone()));
let sk_handle = task::spawn(state_keeper.run());
Expand Down
9 changes: 9 additions & 0 deletions core/lib/config/src/configs/eth_sender.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ impl ETHSenderConfig {
l1_batch_min_age_before_execute_seconds: None,
max_acceptable_priority_fee_in_gwei: 100000000000,
proof_loading_mode: ProofLoadingMode::OldProofFromDb,
pubdata_storage_mode: PubdataStorageMode::Rollup,
},
gas_adjuster: GasAdjusterConfig {
default_priority_fee_per_gas: 1000000000,
Expand Down Expand Up @@ -63,6 +64,12 @@ pub enum ProofLoadingMode {
FriProofFromGcs,
}

#[derive(Debug, Deserialize, Clone, Copy, PartialEq)]
pub enum PubdataStorageMode {
Rollup,
Validium,
}

#[derive(Debug, Deserialize, Clone, PartialEq)]
pub struct SenderConfig {
pub aggregated_proof_sizes: Vec<usize>,
Expand Down Expand Up @@ -96,6 +103,8 @@ pub struct SenderConfig {

/// The mode in which proofs are loaded, either from DB/GCS for FRI/Old proof.
pub proof_loading_mode: ProofLoadingMode,

pub pubdata_storage_mode: PubdataStorageMode,
}

impl SenderConfig {
Expand Down
3 changes: 3 additions & 0 deletions core/lib/dal/sqlx-data.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"db": "PostgreSQL"
}
7 changes: 6 additions & 1 deletion core/lib/env_config/src/eth_sender.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,9 @@ impl FromEnv for GasAdjusterConfig {

#[cfg(test)]
mod tests {
use zksync_config::configs::eth_sender::{ProofLoadingMode, ProofSendingMode};
use zksync_config::configs::eth_sender::{
ProofLoadingMode, ProofSendingMode, PubdataStorageMode,
};

use super::*;
use crate::test_utils::{hash, EnvMutex};
Expand Down Expand Up @@ -54,6 +56,8 @@ mod tests {
l1_batch_min_age_before_execute_seconds: Some(1000),
max_acceptable_priority_fee_in_gwei: 100_000_000_000,
proof_loading_mode: ProofLoadingMode::OldProofFromDb,

pubdata_storage_mode: PubdataStorageMode::Rollup,
},
gas_adjuster: GasAdjusterConfig {
default_priority_fee_per_gas: 20000000000,
Expand Down Expand Up @@ -98,6 +102,7 @@ mod tests {
ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS="1000"
ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI="100000000000"
ETH_SENDER_SENDER_PROOF_LOADING_MODE="OldProofFromDb"
ETH_SENDER_SENDER_PUBDATA_STORAGE_MODE="Rollup"
"#;
lock.set_env(config);

Expand Down
5 changes: 3 additions & 2 deletions core/lib/types/src/aggregated_operations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ use zkevm_test_harness::{
witness::oracle::VmWitnessOracle,
};
use zksync_basic_types::{ethabi::Token, L1BatchNumber};
use zksync_config::configs::eth_sender::PubdataStorageMode;

use crate::{commitment::L1BatchWithMetadata, ProtocolVersionId, U256};

Expand All @@ -32,12 +33,12 @@ pub struct L1BatchCommitOperation {
}

impl L1BatchCommitOperation {
pub fn get_eth_tx_args(&self) -> Vec<Token> {
pub fn get_eth_tx_args(&self, pubdata_storage_mode: &PubdataStorageMode) -> Vec<Token> {
let stored_batch_info = self.last_committed_l1_batch.l1_header_data();
let l1_batches_to_commit = self
.l1_batches
.iter()
.map(L1BatchWithMetadata::l1_commit_data)
.map(|batch| L1BatchWithMetadata::l1_commit_data(batch, pubdata_storage_mode))
.collect();

vec![stored_batch_info, Token::Array(l1_batches_to_commit)]
Expand Down
31 changes: 21 additions & 10 deletions core/lib/types/src/commitment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
use std::{collections::HashMap, convert::TryFrom};

use serde::{Deserialize, Serialize};
use zksync_config::configs::eth_sender::PubdataStorageMode;
use zksync_mini_merkle_tree::MiniMerkleTree;
use zksync_system_constants::{
L2_TO_L1_LOGS_TREE_ROOT_KEY, STATE_DIFF_HASH_KEY, ZKPORTER_IS_AVAILABLE,
Expand Down Expand Up @@ -159,7 +160,7 @@ impl L1BatchWithMetadata {
}

/// Encodes the L1Batch into CommitBatchInfo (see IExecutor.sol).
pub fn l1_commit_data(&self) -> Token {
pub fn l1_commit_data(&self, pubdata_storage_mode: &PubdataStorageMode) -> Token {
if self.header.protocol_version.unwrap().is_pre_boojum() {
Token::Tuple(vec![
Token::Uint(U256::from(self.header.number.0)),
Expand Down Expand Up @@ -192,6 +193,19 @@ impl L1BatchWithMetadata {
),
])
} else {
let total_l2_to_l1_pubdata = match pubdata_storage_mode {
PubdataStorageMode::Rollup =>
// `totalL2ToL1Pubdata`
{
Token::Bytes(
self.header
.pubdata_input
.clone()
.unwrap_or(self.construct_pubdata()),
)
}
PubdataStorageMode::Validium => Token::Bytes(vec![]),
};
Token::Tuple(vec![
// `batchNumber`
Token::Uint(U256::from(self.header.number.0)),
Expand Down Expand Up @@ -228,19 +242,16 @@ impl L1BatchWithMetadata {
),
// `systemLogs`
Token::Bytes(self.metadata.l2_l1_messages_compressed.clone()),
// `totalL2ToL1Pubdata`
Token::Bytes(
self.header
.pubdata_input
.clone()
.unwrap_or(self.construct_pubdata()),
),
total_l2_to_l1_pubdata,
])
}
}

pub fn l1_commit_data_size(&self) -> usize {
crate::ethabi::encode(&[Token::Array(vec![self.l1_commit_data()])]).len()
pub fn l1_commit_data_size(&self, pubdata_storage_mode: &PubdataStorageMode) -> usize {
crate::ethabi::encode(&[Token::Array(
vec![self.l1_commit_data(pubdata_storage_mode)],
)])
.len()
}

/// Packs all pubdata needed for batch commitment in boojum into one bytes array. The packing contains the
Expand Down
15 changes: 12 additions & 3 deletions core/lib/zksync_core/src/consistency_checker/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use std::{fmt, time::Duration};

use anyhow::Context as _;
use tokio::sync::watch;
use zksync_config::configs::eth_sender::PubdataStorageMode;
use zksync_contracts::PRE_BOOJUM_COMMIT_FUNCTION;
use zksync_dal::{ConnectionPool, StorageProcessor};
use zksync_eth_client::{clients::QueryClient, Error as L1ClientError, EthInterface};
Expand Down Expand Up @@ -66,6 +67,7 @@ impl LocalL1BatchCommitData {
async fn new(
storage: &mut StorageProcessor<'_>,
batch_number: L1BatchNumber,
pubdata_storage_mode: &PubdataStorageMode,
) -> anyhow::Result<Option<Self>> {
let Some(storage_l1_batch) = storage
.blocks_dal()
Expand Down Expand Up @@ -113,7 +115,7 @@ impl LocalL1BatchCommitData {

Ok(Some(Self {
is_pre_boojum,
l1_commit_data: l1_batch.l1_commit_data(),
l1_commit_data: l1_batch.l1_commit_data(pubdata_storage_mode),
commit_tx_hash,
}))
}
Expand Down Expand Up @@ -247,7 +249,11 @@ impl ConsistencyChecker {
.await?)
}

pub async fn run(mut self, mut stop_receiver: watch::Receiver<bool>) -> anyhow::Result<()> {
pub async fn run(
mut self,
mut stop_receiver: watch::Receiver<bool>,
pubdata_storage_mode: &PubdataStorageMode,
) -> anyhow::Result<()> {
// It doesn't make sense to start the checker until we have at least one L1 batch with metadata.
let earliest_l1_batch_number =
wait_for_l1_batch_with_metadata(&self.pool, self.sleep_interval, &mut stop_receiver)
Expand Down Expand Up @@ -285,7 +291,10 @@ impl ConsistencyChecker {
// The batch might be already committed but not yet processed by the external node's tree
// OR the batch might be processed by the external node's tree but not yet committed.
// We need both.
let Some(local) = LocalL1BatchCommitData::new(&mut storage, batch_number).await? else {
let Some(local) =
LocalL1BatchCommitData::new(&mut storage, batch_number, pubdata_storage_mode)
.await?
else {
tokio::time::sleep(self.sleep_interval).await;
continue;
};
Expand Down
26 changes: 17 additions & 9 deletions core/lib/zksync_core/src/consistency_checker/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,9 @@ fn create_pre_boojum_l1_batch_with_metadata(number: u32) -> L1BatchWithMetadata
}

fn build_commit_tx_input_data(batches: &[L1BatchWithMetadata]) -> Vec<u8> {
let commit_tokens = batches.iter().map(L1BatchWithMetadata::l1_commit_data);
let commit_tokens = batches
.iter()
.map(|batch| L1BatchWithMetadata::l1_commit_data(batch, &PubdataStorageMode::Rollup));
let commit_tokens = ethabi::Token::Array(commit_tokens.collect());

let mut encoded = vec![];
Expand Down Expand Up @@ -93,7 +95,10 @@ fn build_commit_tx_input_data_is_correct() {
batch.header.number,
)
.unwrap();
assert_eq!(commit_data, batch.l1_commit_data());
assert_eq!(
commit_data,
batch.l1_commit_data(&PubdataStorageMode::Rollup)
);
}
}

Expand Down Expand Up @@ -325,7 +330,7 @@ async fn normal_checker_function(
};

let (stop_sender, stop_receiver) = watch::channel(false);
let checker_task = tokio::spawn(checker.run(stop_receiver));
let checker_task = tokio::spawn(checker.run(stop_receiver, &PubdataStorageMode::Rollup));

// Add new batches to the storage.
for save_action in save_actions_mapper(&l1_batches) {
Expand Down Expand Up @@ -399,7 +404,7 @@ async fn checker_processes_pre_boojum_batches(
};

let (stop_sender, stop_receiver) = watch::channel(false);
let checker_task = tokio::spawn(checker.run(stop_receiver));
let checker_task = tokio::spawn(checker.run(stop_receiver, &PubdataStorageMode::Rollup));

// Add new batches to the storage.
for save_action in save_actions_mapper(&l1_batches) {
Expand Down Expand Up @@ -469,7 +474,7 @@ async fn checker_functions_after_snapshot_recovery(delay_batch_insertion: bool)
..create_mock_checker(client, pool.clone())
};
let (stop_sender, stop_receiver) = watch::channel(false);
let checker_task = tokio::spawn(checker.run(stop_receiver));
let checker_task = tokio::spawn(checker.run(stop_receiver, &PubdataStorageMode::Rollup));

if delay_batch_insertion {
tokio::time::sleep(Duration::from_millis(10)).await;
Expand Down Expand Up @@ -593,10 +598,13 @@ async fn checker_detects_incorrect_tx_data(kind: IncorrectDataKind, snapshot_rec
let checker = create_mock_checker(client, pool);
let (_stop_sender, stop_receiver) = watch::channel(false);
// The checker must stop with an error.
tokio::time::timeout(Duration::from_secs(30), checker.run(stop_receiver))
.await
.expect("Timed out waiting for checker to stop")
.unwrap_err();
tokio::time::timeout(
Duration::from_secs(30),
checker.run(stop_receiver, &PubdataStorageMode::Rollup),
)
.await
.expect("Timed out waiting for checker to stop")
.unwrap_err();
}

#[tokio::test]
Expand Down
15 changes: 13 additions & 2 deletions core/lib/zksync_core/src/eth_sender/aggregator.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
use std::sync::Arc;

use zksync_config::configs::eth_sender::{ProofLoadingMode, ProofSendingMode, SenderConfig};
use zksync_config::configs::eth_sender::{
ProofLoadingMode, ProofSendingMode, PubdataStorageMode, SenderConfig,
};
use zksync_contracts::BaseSystemContractsHashes;
use zksync_dal::StorageProcessor;
use zksync_object_store::{ObjectStore, ObjectStoreError};
Expand Down Expand Up @@ -157,6 +159,7 @@ impl Aggregator {
&mut self.execute_criteria,
ready_for_execute_batches,
last_sealed_l1_batch,
&self.config.pubdata_storage_mode,
)
.await;

Expand Down Expand Up @@ -215,6 +218,7 @@ impl Aggregator {
&mut self.commit_criteria,
ready_for_commit_l1_batches,
last_sealed_batch,
&self.config.pubdata_storage_mode,
)
.await;

Expand Down Expand Up @@ -316,6 +320,7 @@ impl Aggregator {
&mut self.proof_criteria,
ready_for_proof_l1_batches,
last_sealed_l1_batch,
&self.config.pubdata_storage_mode,
)
.await?;

Expand Down Expand Up @@ -400,11 +405,17 @@ async fn extract_ready_subrange(
publish_criteria: &mut [Box<dyn L1BatchPublishCriterion>],
unpublished_l1_batches: Vec<L1BatchWithMetadata>,
last_sealed_l1_batch: L1BatchNumber,
pubdata_storage_mode: &PubdataStorageMode,
) -> Option<Vec<L1BatchWithMetadata>> {
let mut last_l1_batch: Option<L1BatchNumber> = None;
for criterion in publish_criteria {
let l1_batch_by_criterion = criterion
.last_l1_batch_to_publish(storage, &unpublished_l1_batches, last_sealed_l1_batch)
.last_l1_batch_to_publish(
storage,
&unpublished_l1_batches,
last_sealed_l1_batch,
&pubdata_storage_mode,
)
.await;
if let Some(l1_batch) = l1_batch_by_criterion {
last_l1_batch = Some(last_l1_batch.map_or(l1_batch, |number| number.min(l1_batch)));
Expand Down
16 changes: 16 additions & 0 deletions core/lib/zksync_core/src/eth_sender/data_provider.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
// pub enum DataProvider {
// Rollup(Rollup),
// Validium(Validium),
// }

// pub trait DataProvider {}

// #[derive(Debug)]
// pub struct Rollup {}

// #[derive(Debug)]
// pub struct Validium {}

// impl DataProvider for Rollup {}

// impl DataProvider for Validium {}
2 changes: 1 addition & 1 deletion core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ impl EthTxAggregator {
.as_ref()
.expect("Missing ABI for commitBatches")
};
f.encode_input(&op.get_eth_tx_args())
f.encode_input(&op.get_eth_tx_args(&self.config.pubdata_storage_mode))
}
AggregatedOperation::PublishProofOnchain(op) => {
assert_eq!(contracts_are_pre_boojum, operation_is_pre_boojum);
Expand Down
1 change: 1 addition & 0 deletions core/lib/zksync_core/src/eth_sender/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
mod aggregator;
pub mod data_provider;
mod error;
mod eth_tx_aggregator;
mod eth_tx_manager;
Expand Down
Loading
Loading