diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index 41ab439a15fc..5c06e6876574 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -1,7 +1,8 @@ //! Types exposed by the prover DAL for general-purpose use. use std::{net::IpAddr, ops::Add, str::FromStr}; -use chrono::{DateTime, Duration, Utc}; +use chrono::{DateTime, Duration, NaiveDateTime, NaiveTime, Utc}; +use strum::{Display, EnumString}; use crate::{basic_fri_types::AggregationRound, L1BatchNumber}; @@ -229,3 +230,33 @@ impl FromStr for GpuProverInstanceStatus { } } } + +#[derive(Debug, EnumString, Display)] +pub enum ProofCompressionJobStatus { + #[strum(serialize = "queued")] + Queued, + #[strum(serialize = "in_progress")] + InProgress, + #[strum(serialize = "successful")] + Successful, + #[strum(serialize = "failed")] + Failed, + #[strum(serialize = "sent_to_server")] + SentToServer, + #[strum(serialize = "skipped")] + Skipped, +} + +pub struct ProofCompressionJobInfo { + pub l1_batch_number: L1BatchNumber, + pub attempts: u32, + pub status: ProofCompressionJobStatus, + pub fri_proof_blob_url: Option, + pub l1_proof_blob_url: Option, + pub error: Option, + pub created_at: NaiveDateTime, + pub updated_at: NaiveDateTime, + pub processing_started_at: Option, + pub time_taken: Option, + pub picked_by: Option, +} diff --git a/prover/prover_cli/src/commands/status/batch.rs b/prover/prover_cli/src/commands/status/batch.rs index 30528d35d0ef..57576225b55d 100644 --- a/prover/prover_cli/src/commands/status/batch.rs +++ b/prover/prover_cli/src/commands/status/batch.rs @@ -1,9 +1,9 @@ use anyhow::{ensure, Context as _}; use clap::Args as ClapArgs; -use prover_dal::{ConnectionPool, Prover}; +use prover_dal::{Connection, ConnectionPool, Prover, ProverDal}; use zksync_types::L1BatchNumber; -use super::utils::BatchData; +use super::utils::{BatchData, Task, TaskStatus}; use crate::commands::status::utils::postgres_config; #[derive(ClapArgs)] @@ -29,7 +29,7 @@ pub(crate) async fn run(args: Args) -> anyhow::Result<()> { Ok(()) } -async fn get_batches_data(_batches: Vec) -> anyhow::Result> { +async fn get_batches_data(batches: Vec) -> anyhow::Result> { let config = postgres_config()?; let prover_connection_pool = @@ -38,9 +38,29 @@ async fn get_batches_data(_batches: Vec) -> anyhow::Result( + batch_number: L1BatchNumber, + conn: &mut Connection<'a, Prover>, +) -> TaskStatus { + conn.fri_proof_compressor_dal() + .get_proof_compression_job_for_batch(batch_number) + .await + .map(|job| TaskStatus::from(job.status)) + .unwrap_or(TaskStatus::Custom("Compressor job not found 🚫".to_owned())) } diff --git a/prover/prover_cli/src/commands/status/utils.rs b/prover/prover_cli/src/commands/status/utils.rs index e6edecc15039..e844098be567 100644 --- a/prover/prover_cli/src/commands/status/utils.rs +++ b/prover/prover_cli/src/commands/status/utils.rs @@ -5,7 +5,7 @@ use strum::{Display, EnumString}; use zksync_basic_types::{basic_fri_types::AggregationRound, prover_dal::JobCountStatistics}; use zksync_config::PostgresConfig; use zksync_env_config::FromEnv; -use zksync_types::L1BatchNumber; +use zksync_types::{prover_dal::ProofCompressionJobStatus, L1BatchNumber}; pub fn postgres_config() -> anyhow::Result { Ok(PostgresConfig::from_env()?) @@ -74,7 +74,6 @@ impl Default for BatchData { pub enum TaskStatus { /// A custom status that can be set manually. /// Mostly used when a task has singular status. - #[strum(to_string = "{0}")] Custom(String), /// A task is considered queued when all of its jobs is queued. #[strum(to_string = "Queued 📥")] @@ -99,6 +98,21 @@ impl Default for TaskStatus { } } +impl From for TaskStatus { + fn from(status: ProofCompressionJobStatus) -> Self { + match status { + ProofCompressionJobStatus::Queued => TaskStatus::Queued, + ProofCompressionJobStatus::InProgress => TaskStatus::InProgress, + ProofCompressionJobStatus::Successful => TaskStatus::Successful, + ProofCompressionJobStatus::Failed => TaskStatus::InProgress, + ProofCompressionJobStatus::SentToServer => { + TaskStatus::Custom("Sent to server 📤".to_owned()) + } + ProofCompressionJobStatus::Skipped => TaskStatus::Custom("Skipped ⏩".to_owned()), + } + } +} + type ProverJobsData = HashMap<(L1BatchNumber, AggregationRound), JobCountStatistics>; #[derive(EnumString, Clone, Display)] @@ -148,6 +162,10 @@ impl Task { impl Debug for Task { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "-- {} --", self.to_string().bold())?; - writeln!(f, "> {}", self.status().to_string()) + if let TaskStatus::Custom(msg) = self.status() { + writeln!(f, "> {msg}") + } else { + writeln!(f, "> {}", self.status().to_string()) + } } } diff --git a/prover/prover_dal/.sqlx/query-2ab2f83b273c5aa88c1eefc8f70a8ea23052f714cd74c1d28ae1203ce8f0eaa9.json b/prover/prover_dal/.sqlx/query-2ab2f83b273c5aa88c1eefc8f70a8ea23052f714cd74c1d28ae1203ce8f0eaa9.json new file mode 100644 index 000000000000..3441906e0cea --- /dev/null +++ b/prover/prover_dal/.sqlx/query-2ab2f83b273c5aa88c1eefc8f70a8ea23052f714cd74c1d28ae1203ce8f0eaa9.json @@ -0,0 +1,82 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n *\n FROM\n proof_compression_jobs_fri\n WHERE\n l1_batch_number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "attempts", + "type_info": "Int2" + }, + { + "ordinal": 2, + "name": "status", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "fri_proof_blob_url", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "l1_proof_blob_url", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "created_at", + "type_info": "Timestamp" + }, + { + "ordinal": 7, + "name": "updated_at", + "type_info": "Timestamp" + }, + { + "ordinal": 8, + "name": "processing_started_at", + "type_info": "Timestamp" + }, + { + "ordinal": 9, + "name": "time_taken", + "type_info": "Time" + }, + { + "ordinal": 10, + "name": "picked_by", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + true, + true, + true, + false, + false, + true, + true, + true + ] + }, + "hash": "2ab2f83b273c5aa88c1eefc8f70a8ea23052f714cd74c1d28ae1203ce8f0eaa9" +} diff --git a/prover/prover_dal/src/fri_proof_compressor_dal.rs b/prover/prover_dal/src/fri_proof_compressor_dal.rs index 01231d33b00e..7016fcd64ddd 100644 --- a/prover/prover_dal/src/fri_proof_compressor_dal.rs +++ b/prover/prover_dal/src/fri_proof_compressor_dal.rs @@ -2,9 +2,10 @@ use std::{collections::HashMap, str::FromStr, time::Duration}; use sqlx::Row; -use strum::{Display, EnumString}; use zksync_basic_types::{ - prover_dal::{JobCountStatistics, StuckJobs}, + prover_dal::{ + JobCountStatistics, ProofCompressionJobInfo, ProofCompressionJobStatus, StuckJobs, + }, L1BatchNumber, }; use zksync_db_connection::connection::Connection; @@ -16,22 +17,6 @@ pub struct FriProofCompressorDal<'a, 'c> { pub(crate) storage: &'a mut Connection<'c, Prover>, } -#[derive(Debug, EnumString, Display)] -pub enum ProofCompressionJobStatus { - #[strum(serialize = "queued")] - Queued, - #[strum(serialize = "in_progress")] - InProgress, - #[strum(serialize = "successful")] - Successful, - #[strum(serialize = "failed")] - Failed, - #[strum(serialize = "sent_to_server")] - SentToServer, - #[strum(serialize = "skipped")] - Skipped, -} - impl FriProofCompressorDal<'_, '_> { pub async fn insert_proof_compression_job( &mut self, @@ -328,4 +313,37 @@ impl FriProofCompressorDal<'_, '_> { .collect() } } + + pub async fn get_proof_compression_job_for_batch( + &mut self, + block_number: L1BatchNumber, + ) -> Option { + sqlx::query!( + r#" + SELECT + * + FROM + proof_compression_jobs_fri + WHERE + l1_batch_number = $1 + "#, + i64::from(block_number.0) + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| ProofCompressionJobInfo { + l1_batch_number: block_number, + attempts: row.attempts as u32, + status: ProofCompressionJobStatus::from_str(&row.status).unwrap(), + fri_proof_blob_url: row.fri_proof_blob_url, + l1_proof_blob_url: row.l1_proof_blob_url, + error: row.error, + created_at: row.created_at, + updated_at: row.updated_at, + processing_started_at: row.processing_started_at, + time_taken: row.time_taken, + picked_by: row.picked_by, + }) + } }