Skip to content

Commit

Permalink
Merge branch 'main' into matias-gonz-prometheus-port-witness-generator
Browse files Browse the repository at this point in the history
  • Loading branch information
matias-gonz authored Jul 8, 2024
2 parents cd83f5a + 312defe commit eade83f
Show file tree
Hide file tree
Showing 101 changed files with 2,371 additions and 1,861 deletions.
12 changes: 4 additions & 8 deletions core/bin/external_node/src/node_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use zksync_node_framework::{
implementations::layers::{
batch_status_updater::BatchStatusUpdaterLayer,
commitment_generator::CommitmentGeneratorLayer,
consensus::{ConsensusLayer, Mode},
consensus::ExternalNodeConsensusLayer,
consistency_checker::ConsistencyCheckerLayer,
healtcheck_server::HealthCheckLayer,
l1_batch_commitment_mode_validation::L1BatchCommitmentModeValidationLayer,
Expand All @@ -41,7 +41,7 @@ use zksync_node_framework::{
server::{Web3ServerLayer, Web3ServerOptionalConfig},
tree_api_client::TreeApiClientLayer,
tx_sender::{PostgresStorageCachesConfig, TxSenderLayer},
tx_sink::TxSinkLayer,
tx_sink::ProxySinkLayer,
},
},
service::{ZkStackService, ZkStackServiceBuilder},
Expand Down Expand Up @@ -209,11 +209,7 @@ impl ExternalNodeBuilder {
let config = self.config.consensus.clone();
let secrets =
config::read_consensus_secrets().context("config::read_consensus_secrets()")?;
let layer = ConsensusLayer {
mode: Mode::External,
config,
secrets,
};
let layer = ExternalNodeConsensusLayer { config, secrets };
self.node.add_layer(layer);
Ok(self)
}
Expand Down Expand Up @@ -359,7 +355,7 @@ impl ExternalNodeBuilder {
)
.with_whitelisted_tokens_for_aa_cache(true);

self.node.add_layer(TxSinkLayer::ProxySink);
self.node.add_layer(ProxySinkLayer);
self.node.add_layer(tx_sender_layer);
Ok(self)
}
Expand Down
20 changes: 13 additions & 7 deletions core/bin/zksync_server/src/node_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ use zksync_node_framework::{
base_token_ratio_provider::BaseTokenRatioProviderLayer,
circuit_breaker_checker::CircuitBreakerCheckerLayer,
commitment_generator::CommitmentGeneratorLayer,
consensus::{ConsensusLayer, Mode as ConsensusMode},
consensus::MainNodeConsensusLayer,
contract_verification_api::ContractVerificationApiLayer,
da_dispatcher::DataAvailabilityDispatcherLayer,
eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer},
Expand Down Expand Up @@ -56,7 +56,7 @@ use zksync_node_framework::{
server::{Web3ServerLayer, Web3ServerOptionalConfig},
tree_api_client::TreeApiClientLayer,
tx_sender::{PostgresStorageCachesConfig, TxSenderLayer},
tx_sink::TxSinkLayer,
tx_sink::MasterPoolSinkLayer,
},
},
service::{ZkStackService, ZkStackServiceBuilder},
Expand Down Expand Up @@ -280,7 +280,7 @@ impl MainNodeBuilder {
};

// On main node we always use master pool sink.
self.node.add_layer(TxSinkLayer::MasterPoolSink);
self.node.add_layer(MasterPoolSinkLayer);
self.node.add_layer(TxSenderLayer::new(
TxSenderConfig::new(
&sk_config,
Expand Down Expand Up @@ -445,10 +445,16 @@ impl MainNodeBuilder {
}

fn add_consensus_layer(mut self) -> anyhow::Result<Self> {
self.node.add_layer(ConsensusLayer {
mode: ConsensusMode::Main,
config: self.consensus_config.clone(),
secrets: self.secrets.consensus.clone(),
self.node.add_layer(MainNodeConsensusLayer {
config: self
.consensus_config
.clone()
.context("Consensus config has to be provided")?,
secrets: self
.secrets
.consensus
.clone()
.context("Consensus secrets have to be provided")?,
});

Ok(self)
Expand Down
58 changes: 38 additions & 20 deletions core/bin/zksync_tee_prover/src/tee_prover.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
use std::time::Duration;
use std::{fmt, time::Duration};

use secp256k1::{ecdsa::Signature, Message, PublicKey, Secp256k1, SecretKey};
use url::Url;
use zksync_basic_types::H256;
use zksync_node_framework::{
service::{ServiceContext, StopReceiver},
service::StopReceiver,
task::{Task, TaskId},
wiring_layer::{WiringError, WiringLayer},
IntoContext,
};
use zksync_prover_interface::inputs::TeeVerifierInput;
use zksync_tee_verifier::Verify;
Expand All @@ -15,16 +16,8 @@ use zksync_types::{tee_types::TeeType, L1BatchNumber};
use crate::{api_client::TeeApiClient, error::TeeProverError, metrics::METRICS};

/// Wiring layer for `TeeProver`
///
/// ## Requests resources
///
/// no resources requested
///
/// ## Adds tasks
///
/// - `TeeProver`
#[derive(Debug)]
pub struct TeeProverLayer {
pub(crate) struct TeeProverLayer {
api_url: Url,
signing_key: SecretKey,
attestation_quote_bytes: Vec<u8>,
Expand All @@ -47,27 +40,35 @@ impl TeeProverLayer {
}
}

#[derive(Debug, IntoContext)]
pub(crate) struct LayerOutput {
#[context(task)]
pub tee_prover: TeeProver,
}

#[async_trait::async_trait]
impl WiringLayer for TeeProverLayer {
type Input = ();
type Output = LayerOutput;

fn layer_name(&self) -> &'static str {
"tee_prover_layer"
}

async fn wire(self: Box<Self>, mut context: ServiceContext<'_>) -> Result<(), WiringError> {
let tee_prover_task = TeeProver {
async fn wire(self, _input: Self::Input) -> Result<Self::Output, WiringError> {
let tee_prover = TeeProver {
config: Default::default(),
signing_key: self.signing_key,
public_key: self.signing_key.public_key(&Secp256k1::new()),
attestation_quote_bytes: self.attestation_quote_bytes,
tee_type: self.tee_type,
api_client: TeeApiClient::new(self.api_url),
};
context.add_task(tee_prover_task);
Ok(())
Ok(LayerOutput { tee_prover })
}
}

struct TeeProver {
pub(crate) struct TeeProver {
config: TeeProverConfig,
signing_key: SecretKey,
public_key: PublicKey,
Expand All @@ -76,6 +77,17 @@ struct TeeProver {
api_client: TeeApiClient,
}

impl fmt::Debug for TeeProver {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TeeProver")
.field("config", &self.config)
.field("public_key", &self.public_key)
.field("attestation_quote_bytes", &self.attestation_quote_bytes)
.field("tee_type", &self.tee_type)
.finish()
}
}

impl TeeProver {
fn verify(
&self,
Expand Down Expand Up @@ -169,7 +181,7 @@ impl Task for TeeProver {
return Ok(());
}
let result = self.step().await;
match result {
let need_to_sleep = match result {
Ok(batch_number) => {
retries = 1;
backoff = self.config.initial_retry_backoff;
Expand All @@ -179,6 +191,9 @@ impl Task for TeeProver {
METRICS
.last_batch_number_processed
.set(batch_number.0 as u64);
false
} else {
true
}
}
Err(err) => {
Expand All @@ -188,14 +203,17 @@ impl Task for TeeProver {
}
retries += 1;
tracing::warn!(%err, "Failed TEE prover step function {retries}/{}, retrying in {} milliseconds.", self.config.max_retries, backoff.as_millis());
tokio::time::timeout(backoff, stop_receiver.0.changed())
.await
.ok();
backoff = std::cmp::min(
backoff.mul_f32(self.config.retry_backoff_multiplier),
self.config.max_backoff,
);
true
}
};
if need_to_sleep {
tokio::time::timeout(backoff, stop_receiver.0.changed())
.await
.ok();
}
}
}
Expand Down
3 changes: 3 additions & 0 deletions core/lib/basic_types/src/web3/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -327,6 +327,9 @@ pub struct Log {
pub log_type: Option<String>,
/// Removed
pub removed: Option<bool>,
/// L2 block timestamp
#[serde(rename = "blockTimestamp")]
pub block_timestamp: Option<i64>,
}

impl Log {
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
ALTER TABLE vm_runner_protective_reads ALTER COLUMN processing_started_at TYPE TIME USING (null);
ALTER TABLE vm_runner_bwip ALTER COLUMN processing_started_at TYPE TIME USING (null);
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
ALTER TABLE vm_runner_protective_reads ALTER COLUMN processing_started_at TYPE TIMESTAMP USING (null);
ALTER TABLE vm_runner_bwip ALTER COLUMN processing_started_at TYPE TIMESTAMP USING (null);
3 changes: 2 additions & 1 deletion core/lib/dal/src/events_dal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,8 @@ impl EventsDal<'_, '_> {
tx_hash,
tx_index_in_block,
event_index_in_block,
event_index_in_tx
event_index_in_tx,
NULL::BIGINT AS "block_timestamp?"
FROM
events
WHERE
Expand Down
Loading

0 comments on commit eade83f

Please sign in to comment.