diff --git a/Cargo.lock b/Cargo.lock index 82054bf9..06df0c5a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,6 +106,28 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "alloy-primitives" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db8aa973e647ec336810a9356af8aea787249c9d00b1525359f3db29a68d231b" +dependencies = [ + "alloy-rlp", + "bytes 1.5.0", + "cfg-if 1.0.0", + "const-hex", + "derive_more", + "hex-literal", + "itoa", + "k256 0.13.2", + "keccak-asm", + "proptest", + "rand 0.8.5", + "ruint", + "serde", + "tiny-keccak", +] + [[package]] name = "alloy-rlp" version = "0.3.2" @@ -1113,13 +1135,14 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.8.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08849ed393c907c90016652a01465a12d86361cd38ad2a7de026c56a520cc259" +checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" dependencies = [ "cfg-if 1.0.0", "cpufeatures", "hex", + "proptest", "serde", ] @@ -1311,14 +1334,38 @@ dependencies = [ "cipher", ] +[[package]] +name = "darling" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" +dependencies = [ + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + [[package]] name = "darling" version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.20.3", + "darling_macro 0.20.3", +] + +[[package]] +name = "darling_core" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 1.0.109", ] [[package]] @@ -1335,13 +1382,24 @@ dependencies = [ "syn 2.0.32", ] +[[package]] +name = "darling_macro" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" +dependencies = [ + "darling_core 0.13.4", + "quote", + "syn 1.0.109", +] + [[package]] name = "darling_macro" version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ - "darling_core", + "darling_core 0.20.3", "quote", "syn 2.0.32", ] @@ -1808,7 +1866,7 @@ dependencies = [ "serde_json", "serde_yaml 0.8.26", "sha2 0.10.8", - "ssz_rs", + "ssz_rs 0.9.0 (git+https://github.com/ralexstokes/ssz-rs?rev=db3bca54b23522df224b1d7d4ac3c3b805a49e84)", "thiserror", "tokio", "tokio-stream", @@ -2643,12 +2701,14 @@ dependencies = [ "rand 0.8.5", "redis", "reqwest", + "reqwest-eventsource", "reth-primitives 0.1.0-alpha.10", "serde", "serde_json", "serial_test", "thiserror", "tokio", + "tokio-stream", "tokio-tungstenite 0.16.1", "tonic", "tonic-build", @@ -2720,11 +2780,15 @@ dependencies = [ "serde", "serde_json", "serde_yaml 0.9.25", + "sha2 0.10.8", + "ssz_rs 0.9.0 (git+https://github.com/ralexstokes/ssz-rs?rev=1df4cd9)", "ssz_types", "thiserror", "tokio", "tokio-postgres", "tracing", + "tree_hash 0.6.0", + "tree_hash_derive", ] [[package]] @@ -2822,6 +2886,8 @@ dependencies = [ "serde", "serde_json", "tracing", + "tree_hash 0.6.0", + "tree_hash_derive", ] [[package]] @@ -3307,6 +3373,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "kernel32-sys" version = "0.2.2" @@ -4371,19 +4447,19 @@ dependencies = [ [[package]] name = "proptest" -version = "1.2.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" +checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ "bit-set", - "bitflags 1.3.2", - "byteorder", + "bit-vec", + "bitflags 2.4.1", "lazy_static", "num-traits", "rand 0.8.5", "rand_chacha", "rand_xorshift", - "regex-syntax 0.6.29", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -4743,6 +4819,12 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + [[package]] name = "reqwest" version = "0.11.23" @@ -5558,7 +5640,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93634eb5f75a2323b16de4748022ac4297f9e76b6dced2be287a099f41b5e788" dependencies = [ - "darling", + "darling 0.20.3", "proc-macro2", "quote", "syn 2.0.32", @@ -5655,6 +5737,7 @@ dependencies = [ "cpufeatures", "digest 0.9.0", "opaque-debug", + "sha2-asm", ] [[package]] @@ -5668,6 +5751,15 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha2-asm" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" +dependencies = [ + "cc", +] + [[package]] name = "sha3" version = "0.10.8" @@ -5678,6 +5770,16 @@ dependencies = [ "keccak", ] +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if 1.0.0", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -5881,6 +5983,18 @@ dependencies = [ "der 0.7.8", ] +[[package]] +name = "ssz_rs" +version = "0.9.0" +source = "git+https://github.com/ralexstokes/ssz-rs?rev=1df4cd9#1df4cd9b849a48c44a6105abc3a38d21cd4fd8d3" +dependencies = [ + "alloy-primitives 0.7.4", + "bitvec", + "serde", + "sha2 0.9.9", + "ssz_rs_derive 0.9.0 (git+https://github.com/ralexstokes/ssz-rs?rev=1df4cd9)", +] + [[package]] name = "ssz_rs" version = "0.9.0" @@ -5891,7 +6005,17 @@ dependencies = [ "hex", "serde", "sha2 0.9.9", - "ssz_rs_derive", + "ssz_rs_derive 0.9.0 (git+https://github.com/ralexstokes/ssz-rs?rev=db3bca54b23522df224b1d7d4ac3c3b805a49e84)", +] + +[[package]] +name = "ssz_rs_derive" +version = "0.9.0" +source = "git+https://github.com/ralexstokes/ssz-rs?rev=1df4cd9#1df4cd9b849a48c44a6105abc3a38d21cd4fd8d3" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -5917,7 +6041,7 @@ dependencies = [ "serde", "serde_derive", "smallvec 1.11.0", - "tree_hash", + "tree_hash 0.5.2", "typenum", ] @@ -6804,6 +6928,28 @@ dependencies = [ "smallvec 1.11.0", ] +[[package]] +name = "tree_hash" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "134d6b24a5b829f30b5ee7de05ba7384557f5f6b00e29409cdf2392f93201bfa" +dependencies = [ + "ethereum-types", + "ethereum_hashing 0.6.0", + "smallvec 1.11.0", +] + +[[package]] +name = "tree_hash_derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce7bccc538359a213436af7bc95804bdbf1c2a21d80e22953cbe9e096837ff1" +dependencies = [ + "darling 0.13.4", + "quote", + "syn 1.0.109", +] + [[package]] name = "triomphe" version = "0.1.11" diff --git a/Cargo.toml b/Cargo.toml index 9679cecd..2820abb8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,13 +1,13 @@ [workspace] members = [ - "crates/beacon-client", - "crates/api", - "crates/utils", - "crates/common", - "crates/database", - "crates/datastore", - "crates/cmd", - "crates/housekeeper", + "crates/beacon-client", + "crates/api", + "crates/utils", + "crates/common", + "crates/database", + "crates/datastore", + "crates/cmd", + "crates/housekeeper", ] resolver = "2" @@ -29,15 +29,20 @@ helix-utils = { path = "./crates/utils" } # Async and Networking async-trait = "0.1" -axum = {version = "0.7.4", features = ["ws"]} +axum = { version = "0.7.4", features = ["ws"] } dashmap = { version = "5.5.3", features = [] } futures = "0.3" hyper = "1.1.0" http = "1.0.0" tower = { version = "0.4.13", features = ["full"] } -reqwest = { version = "0.11.23", features = ["json", "native-tls-vendored", "stream", "blocking"] } +reqwest = { version = "0.11.23", features = [ + "json", + "native-tls-vendored", + "stream", + "blocking", +] } tokio = { version = "1.33.0", features = ["full"] } -tokio-stream = {version = "0.1.15", features = ["sync"]} +tokio-stream = { version = "0.1.15", features = ["sync"] } tower-http = { version = "0.5.1", features = ["limit"] } url = "2.4" @@ -51,7 +56,7 @@ deadpool-redis = { version = "0.12.0", features = ["rt_tokio_1"] } redis = { version = "0.23.2", features = ["aio", "tokio-comp"] } tokio-postgres = "0.7.10" deadpool-postgres = "0.11.0" -refinery = { version = "0.8", features = ["tokio-postgres"]} +refinery = { version = "0.8", features = ["tokio-postgres"] } bytes = "1.5.0" chrono = "0.4.19" diff --git a/Dockerfile b/Dockerfile index 6dda5b3d..e052b1b7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,9 +5,9 @@ RUN apt install -y clang RUN apt install -y protobuf-compiler RUN wget https://github.com/mozilla/sccache/releases/download/v0.3.1/sccache-v0.3.1-x86_64-unknown-linux-musl.tar.gz \ - && tar xzf sccache-v0.3.1-x86_64-unknown-linux-musl.tar.gz \ - && mv sccache-v0.3.1-x86_64-unknown-linux-musl/sccache /usr/local/bin/sccache \ - && chmod +x /usr/local/bin/sccache + && tar xzf sccache-v0.3.1-x86_64-unknown-linux-musl.tar.gz \ + && mv sccache-v0.3.1-x86_64-unknown-linux-musl/sccache /usr/local/bin/sccache \ + && chmod +x /usr/local/bin/sccache ARG AWS_ACCESS_KEY_ID ARG AWS_SECRET_ACCESS_KEY @@ -33,13 +33,13 @@ RUN ls -lah /app/${REPO_NAME} WORKDIR /app/${REPO_NAME} RUN --mount=type=cache,target=/root/.cargo \ - --mount=type=cache,target=/usr/local/cargo/registry \ - cargo fetch + --mount=type=cache,target=/usr/local/cargo/registry \ + cargo fetch # Run build RUN --mount=type=cache,target=/root/.cargo \ - --mount=type=cache,target=/usr/local/cargo/registry \ - RUSTC_WRAPPER=/usr/local/bin/sccache cargo build -p helix-cmd --release + --mount=type=cache,target=/usr/local/cargo/registry \ + RUSTC_WRAPPER=/usr/local/bin/sccache cargo build -p helix-cmd --release # Copy binary into the workdir RUN mv /app/$REPO_NAME/target/release/helix-cmd /app/helix-cmd diff --git a/crates/api/Cargo.toml b/crates/api/Cargo.toml index 463c5716..64eb41ea 100644 --- a/crates/api/Cargo.toml +++ b/crates/api/Cargo.toml @@ -22,8 +22,10 @@ tower = { workspace = true } futures = { workspace = true } hyper = { workspace = true } reqwest = { workspace = true } +reqwest-eventsource = "0.5" tokio = { workspace = true } tokio-tungstenite = "0.16" +tokio-stream = { workspace = true } tower-http = { workspace = true } url = { workspace = true } tonic = "0.10" @@ -50,6 +52,7 @@ auto_impl.workspace = true rand = "0.8" thiserror = { workspace = true } tracing = { workspace = true } +tracing-subscriber = { workspace = true } uuid = { workspace = true } bytes = "1.5" moka = "0.9" diff --git a/crates/api/src/builder/api.rs b/crates/api/src/builder/api.rs index 4649e5d1..2ce5cdf1 100644 --- a/crates/api/src/builder/api.rs +++ b/crates/api/src/builder/api.rs @@ -7,27 +7,36 @@ use std::{ use axum::{ body::{to_bytes, Body}, - extract::ws::{Message, WebSocket, WebSocketUpgrade}, + extract::{ + ws::{Message, WebSocket, WebSocketUpgrade}, + Query, + }, http::{Request, StatusCode}, - response::{IntoResponse, Response}, + response::{ + sse::{Event, KeepAlive}, + IntoResponse, Response, Sse, + }, Extension, Json, }; use ethereum_consensus::{ - configs::mainnet::{CAPELLA_FORK_EPOCH, SECONDS_PER_SLOT}, + configs::mainnet::CAPELLA_FORK_EPOCH, phase0::mainnet::SLOTS_PER_EPOCH, primitives::{BlsPublicKey, Hash32}, ssz::{self, prelude::*}, }; use flate2::read::GzDecoder; -use futures::StreamExt; +use futures::{Stream, StreamExt}; use hyper::HeaderMap; +use reth_primitives::B256; use tokio::{ sync::{ + broadcast, mpsc::{self, error::SendError, Receiver, Sender}, RwLock, }, time::{self, Instant}, }; +use tokio_stream::wrappers::BroadcastStream; use tracing::{debug, error, info, warn}; use uuid::Uuid; @@ -41,6 +50,7 @@ use helix_common::{ BidSubmission, BidTrace, SignedBidSubmission, }, chain_info::ChainInfo, + proofs::{verify_multiproofs, InclusionProofs, SignedConstraints}, signing::RelaySigningContext, simulator::BlockSimError, versioned_payload::PayloadAndBlobs, @@ -52,10 +62,13 @@ use helix_datastore::{types::SaveBidAndUpdateTopBidResponse, Auctioneer}; use helix_housekeeper::{ChainUpdate, PayloadAttributesUpdate, SlotUpdate}; use helix_utils::{get_payload_attributes_key, has_reached_fork}; +use serde::Deserialize; + use crate::{ builder::{ error::BuilderApiError, traits::BlockSimulator, BlockSimRequest, DbInfo, OptimisticVersion, }, + constraints::api::ConstraintsHandle, gossiper::{ traits::GossipClientTrait, types::{ @@ -67,6 +80,11 @@ use crate::{ pub(crate) const MAX_PAYLOAD_LENGTH: usize = 1024 * 1024 * 10; +#[derive(Deserialize)] +pub struct SlotQuery { + slot: u64, +} + #[derive(Clone)] pub struct BuilderApi where @@ -83,6 +101,7 @@ where signing_context: Arc, relay_config: Arc, db_sender: Sender, + constraints_tx: broadcast::Sender, /// Information about the current head slot and next proposer duty curr_slot_info: Arc)>>, @@ -108,8 +127,9 @@ where relay_config: RelayConfig, slot_update_subscription: Sender>, gossip_receiver: Receiver, - ) -> Self { + ) -> (Self, ConstraintsHandle) { let (db_sender, db_receiver) = mpsc::channel::(10_000); + let (constraints_tx, _) = broadcast::channel(128); // Spin up db processing task let db_clone = db.clone(); @@ -127,6 +147,7 @@ where relay_config: Arc::new(relay_config), db_sender, + constraints_tx: constraints_tx.clone(), curr_slot_info: Arc::new(RwLock::new((0, None))), proposer_duties_response: Arc::new(RwLock::new(None)), @@ -151,7 +172,7 @@ where } }); - api + (api, ConstraintsHandle { constraints_tx }) } /// Implements this API: @@ -169,6 +190,104 @@ where } } + /// This endpoint returns a list of signed constraints for a given `slot`. + /// + /// Implements this API: + pub async fn constraints( + Extension(api): Extension>>, + Query(slot): Query, + ) -> Result { + let slot = slot.slot; + + info!(slot, "builder requested constraints for slot"); + + let head_slot = api.curr_slot_info.read().await.0; + + if slot < head_slot || slot > head_slot + 32 { + return Err(BuilderApiError::IncorrectSlot(slot)) + } + + match api.auctioneer.get_constraints(slot).await { + Ok(Some(cache)) => { + let constraints = cache + .into_iter() + .map(|data| data.signed_constraints) + .collect::>(); + + info!(slot, len = constraints.len(), "returning constraints to builder"); + Ok(Json(constraints)) + } + Ok(None) => { + debug!("No constraints found for slot"); + Ok(Json(vec![])) // Return an empty vector if no delegations found + } + Err(err) => { + warn!(error=%err, "Failed to get constraints"); + Err(BuilderApiError::AuctioneerError(err)) + } + } + } + + /// This endpoint returns a stream of signed constraints for a given `slot`. + /// + /// Implements this API: + pub async fn constraints_stream( + Extension(api): Extension>>, + ) -> Sse>> { + let constraints_rx = api.constraints_tx.subscribe(); + let stream = BroadcastStream::new(constraints_rx); + + let filtered = stream.map(|result| match result { + Ok(constraint) => match serde_json::to_string(&vec![constraint]) { + Ok(json) => Ok(Event::default() + .data(json) + .event("signed_constraint") + .retry(Duration::from_millis(50))), + Err(err) => { + warn!(error = %err, "Failed to serialize constraint"); + Err(BuilderApiError::SszSerializeError) + } + }, + Err(err) => { + warn!(error = %err, "Error receiving constraint message"); + Err(BuilderApiError::InternalError) + } + }); + + Sse::new(filtered).keep_alive(KeepAlive::default()) + } + + /// This endpoint returns the active delegations for the validator scheduled to propose + /// at the provided `slot`. The delegations are returned as a list of BLS pubkeys. + /// + /// Implements this API: + pub async fn delegations( + Extension(api): Extension>>, + Query(slot): Query, + ) -> Result { + let slot = slot.slot; + + let Some(duty_bytes) = &*api.proposer_duties_response.read().await else { + warn!(slot, "delegations -- could not find slot duty"); + return Err(BuilderApiError::ProposerDutyNotFound); + }; + let Ok(proposer_duties) = + serde_json::from_slice::>(duty_bytes) + else { + return Err(BuilderApiError::DeserializeError); + }; + + let duty = proposer_duties + .iter() + .find(|duty| duty.slot == slot) + .ok_or(BuilderApiError::ProposerDutyNotFound)?; + + let pubkey = duty.entry.message.public_key.clone(); + let delegations = Json(api.auctioneer.get_validator_delegations(pubkey).await?); + + Ok(delegations) + } + /// Handles the submission of a new block by performing various checks and verifications /// before saving the submission to the auctioneer. /// @@ -398,6 +517,274 @@ where Ok(StatusCode::OK) } + /// Handles the submission of a new block with inclusion proofs. + /// + /// This function extends the `submit_block` functionality to also handle inclusion proofs: + /// 1. Receives the request and decodes the payload into a `SignedBidSubmission` object. + /// 2. Validates the builder and checks against the next proposer duty. + /// 3. Verifies the signature of the payload. + /// 4. Fetches the constraints for the slot and verifies the inclusion proofs. + /// 5. Runs further validations against the auctioneer. + /// 6. Simulates the block to validate the payment. + /// 7. Saves the bid and inclusion proof to the auctioneer. + /// + /// Implements this API: + pub async fn submit_block_with_proofs( + Extension(api): Extension>>, + req: Request, + ) -> Result { + let request_id = Uuid::new_v4(); + let mut trace = SubmissionTrace { receive: get_nanos_timestamp()?, ..Default::default() }; + let (head_slot, next_duty) = api.curr_slot_info.read().await.clone(); + + info!( + request_id = %request_id, + event = "submit_block_with_proofs", + head_slot = head_slot, + timestamp_request_start = trace.receive, + ); + + // Decode the incoming request body into a payload with proofs + let (payload, is_cancellations_enabled) = + decode_payload(req, &mut trace, &request_id).await?; + let block_hash = payload.message().block_hash.clone(); + + // Verify that we have a validator connected for this slot + if next_duty.is_none() { + warn!(request_id = %request_id, "could not find slot duty"); + return Err(BuilderApiError::ProposerDutyNotFound) + } + let next_duty = next_duty.unwrap(); + + debug!( + request_id = %request_id, + builder_pub_key = ?payload.builder_public_key(), + block_value = %payload.value(), + block_hash = ?block_hash, + "submit_block_with_proofs -- payload decoded", + ); + + // Verify the payload is for the current slot + if payload.slot() <= head_slot { + warn!( + request_id = %request_id, + "submission is for a past slot", + ); + return Err(BuilderApiError::SubmissionForPastSlot { + current_slot: head_slot, + submission_slot: payload.slot(), + }) + } + + // Fetch the next payload attributes and validate basic information + let payload_attributes = api + .fetch_payload_attributes(payload.slot(), payload.parent_hash(), &request_id) + .await?; + + // Handle duplicates. + if let Err(err) = api + .check_for_duplicate_block_hash( + &block_hash, + payload.slot(), + payload.parent_hash(), + payload.proposer_public_key(), + &request_id, + ) + .await + { + match err { + BuilderApiError::DuplicateBlockHash { block_hash } => { + // We dont return the error here as we want to continue processing the request. + // This mitigates the risk of someone sending an invalid payload + // with a valid header, which would block subsequent submissions with the same + // header and valid payload. + debug!( + request_id = %request_id, + block_hash = ?block_hash, + builder_pub_key = ?payload.builder_public_key(), + "block hash already seen" + ); + } + _ => return Err(err), + } + } + + // Verify the payload value is above the floor bid + let floor_bid_value = api + .check_if_bid_is_below_floor( + payload.slot(), + payload.parent_hash(), + payload.proposer_public_key(), + payload.builder_public_key(), + payload.value(), + is_cancellations_enabled, + &request_id, + ) + .await?; + trace.floor_bid_checks = get_nanos_timestamp()?; + + // Fetch builder info + let builder_info = api.fetch_builder_info(payload.builder_public_key()).await; + + // Handle trusted builders check + if !api.check_if_trusted_builder(&next_duty, &builder_info).await { + let proposer_trusted_builders = next_duty.entry.preferences.trusted_builders.unwrap(); + warn!( + request_id = %request_id, + builder_pub_key = ?payload.builder_public_key(), + proposer_trusted_builders = ?proposer_trusted_builders, + "builder not in proposer trusted builders list", + ); + return Err(BuilderApiError::BuilderNotInProposersTrustedList { + proposer_trusted_builders, + }) + } + + // Verify payload has not already been delivered + match api.auctioneer.get_last_slot_delivered().await { + Ok(Some(slot)) => { + if payload.slot() <= slot { + warn!(request_id = %request_id, "payload already delivered"); + return Err(BuilderApiError::PayloadAlreadyDelivered) + } + } + Ok(None) => {} + Err(err) => { + error!(request_id = %request_id, error = %err, "failed to get last slot delivered"); + } + } + + // Sanity check the payload + if let Err(err) = sanity_check_block_submission( + &payload, + payload.bid_trace(), + &next_duty, + &payload_attributes, + &api.chain_info, + ) { + warn!(request_id = %request_id, error = %err, "failed sanity check"); + return Err(err) + } + trace.pre_checks = get_nanos_timestamp()?; + + let (payload, was_simulated_optimistically) = api + .verify_submitted_block( + payload, + next_duty, + &builder_info, + &mut trace, + &request_id, + &payload_attributes, + ) + .await?; + + // Fetch constraints, and if available verify inclusion proofs and save them to cache + if let Some(constraints) = api.auctioneer.get_constraints(payload.slot()).await? { + let transactions_root: B256 = payload + .transactions() + .clone() + .hash_tree_root()? + .to_vec() + .as_slice() + .try_into() + .map_err(|e| { + error!(error = %e, "failed to convert root to hash32"); + BuilderApiError::InternalError + })?; + let proofs = payload.proofs().ok_or(BuilderApiError::InclusionProofsNotFound)?; + let constraints_proofs: Vec<_> = constraints.iter().map(|c| &c.proof_data).collect(); + + verify_multiproofs(constraints_proofs.as_slice(), proofs, transactions_root).map_err( + |e| { + error!(error = %e, "failed to verify inclusion proofs"); + BuilderApiError::InclusionProofVerificationFailed(e) + }, + )?; + + // Save inclusion proof to auctioneer. + api.save_inclusion_proof( + payload.slot(), + payload.proposer_public_key(), + payload.block_hash(), + proofs, + &request_id, + ) + .await?; + + info!(request_id = %request_id, head_slot, "inclusion proofs verified and saved to auctioneer"); + } else { + info!(request_id = %request_id, "no constraints found for slot, proof verification is not needed"); + }; + + // If cancellations are enabled, then abort now if there is a later submission + if is_cancellations_enabled { + if let Err(err) = + api.check_for_later_submissions(&payload, trace.receive, &request_id).await + { + warn!(request_id = %request_id, error = %err, "already processing later submission"); + return Err(err) + } + } + + // Save bid to auctioneer + match api + .save_bid_to_auctioneer( + &payload, + &mut trace, + is_cancellations_enabled, + floor_bid_value, + &request_id, + ) + .await? + { + // If the bid was succesfully saved then we gossip the header and payload to all other + // relays. + Some((builder_bid, execution_payload)) => { + api.gossip_new_submission( + &payload, + execution_payload, + builder_bid, + is_cancellations_enabled, + trace.receive, + &request_id, + ) + .await; + } + None => { /* Bid wasn't saved so no need to gossip as it will never be served */ } + } + + // Log some final info + trace.request_finish = get_nanos_timestamp()?; + info!( + request_id = %request_id, + trace = ?trace, + request_duration_ns = trace.request_finish.saturating_sub(trace.receive), + "submit_block_with_proofs request finished" + ); + + let optimistic_version = if was_simulated_optimistically { + OptimisticVersion::V1 + } else { + OptimisticVersion::NotOptimistic + }; + + // Save submission to db. + tokio::spawn(async move { + if let Err(err) = api + .db + .store_block_submission(payload, Arc::new(trace), optimistic_version as i16) + .await + { + error!( + error = %err, + "failed to store block submission with proofs", + ) + } + }); + + Ok(StatusCode::OK) + } + /// Handles the submission of a new payload header by performing various checks and /// verifications before saving the headre to the auctioneer. pub async fn submit_header( @@ -1600,6 +1987,26 @@ where } } + /// This function saves the inclusion proof to the auctioneer. + async fn save_inclusion_proof( + &self, + slot: u64, + proposer_pub_key: &BlsPublicKey, + bid_block_hash: &Hash32, + inclusion_proof: &InclusionProofs, + request_id: &Uuid, + ) -> Result<(), BuilderApiError> { + if let Err(err) = self + .auctioneer + .save_inclusion_proof(slot, proposer_pub_key, bid_block_hash, inclusion_proof) + .await + { + error!(request_id = %request_id, error = %err, "failed to save inclusion proof"); + return Err(BuilderApiError::InternalError) + } + Ok(()) + } + async fn save_header_bid_to_auctioneer( &self, payload: Arc, @@ -2134,7 +2541,8 @@ fn sanity_check_block_submission( payload_attributes: &PayloadAttributesUpdate, chain_info: &ChainInfo, ) -> Result<(), BuilderApiError> { - let expected_timestamp = chain_info.genesis_time_in_secs + (bid_trace.slot * SECONDS_PER_SLOT); + let expected_timestamp = + chain_info.genesis_time_in_secs + (bid_trace.slot * chain_info.seconds_per_slot); if payload.timestamp() != expected_timestamp { return Err(BuilderApiError::IncorrectTimestamp { got: payload.timestamp(), diff --git a/crates/api/src/builder/error.rs b/crates/api/src/builder/error.rs index 1d21c947..464724c1 100644 --- a/crates/api/src/builder/error.rs +++ b/crates/api/src/builder/error.rs @@ -6,7 +6,8 @@ use ethereum_consensus::{ primitives::{BlsPublicKey, Bytes32, Hash32}, ssz::{self, prelude::*}, }; -use helix_common::simulator::BlockSimError; +use helix_common::{proofs::ProofError, simulator::BlockSimError}; +use helix_database::error::DatabaseError; use helix_datastore::error::AuctioneerError; #[derive(Debug, thiserror::Error)] @@ -26,6 +27,12 @@ pub enum BuilderApiError { #[error("ssz deserialize error: {0}")] SszDeserializeError(#[from] ssz::prelude::DeserializeError), + #[error("ssz serialize error")] + SszSerializeError, + + #[error("failed to deserialize")] + DeserializeError, + #[error("failed to decode header-submission")] FailedToDecodeHeaderSubmission, @@ -113,6 +120,9 @@ pub enum BuilderApiError { #[error("datastore error: {0}")] AuctioneerError(#[from] AuctioneerError), + #[error("database error: {0}")] + DatabaseError(#[from] DatabaseError), + #[error("incorrect prev_randao - got: {got:?}, expected: {expected:?}")] PrevRandaoMismatch { got: Bytes32, expected: Bytes32 }, @@ -138,6 +148,24 @@ pub enum BuilderApiError { #[error("V2 submissions invalid if proposer requires regional filtering")] V2SubmissionsInvalidIfProposerRequiresRegionalFiltering, + + #[error("no constraints found")] + NoConstraintsFound, + + #[error("inclusion proof verification failed: {0}")] + InclusionProofVerificationFailed(#[from] ProofError), + + #[error("inclusion proofs not found")] + InclusionProofsNotFound, + + #[error("failed to compute hash tree root for transaction: {0}")] + HashTreeRootError(#[from] MerkleizationError), + + #[error("failed to get constraints for slot {0}")] + ConstraintsError(u64), + + #[error("incorrect slot for constraints request {0}")] + IncorrectSlot(u64), } impl IntoResponse for BuilderApiError { @@ -152,6 +180,12 @@ impl IntoResponse for BuilderApiError { BuilderApiError::SszDeserializeError(err) => { (StatusCode::BAD_REQUEST, format!("SSZ deserialize error: {err}")).into_response() }, + BuilderApiError::SszSerializeError => { + (StatusCode::BAD_REQUEST, "SSZ serialize error".to_string()).into_response() + }, + BuilderApiError::DeserializeError => { + (StatusCode::BAD_REQUEST, "Failed to deserialize").into_response() + }, BuilderApiError::FailedToDecodeHeaderSubmission => { (StatusCode::BAD_REQUEST, "Failed to decode header submission").into_response() }, @@ -225,6 +259,9 @@ impl IntoResponse for BuilderApiError { BuilderApiError::AuctioneerError(err) => { (StatusCode::INTERNAL_SERVER_ERROR, format!("Auctioneer error: {err}")).into_response() }, + BuilderApiError::DatabaseError(err) => { + (StatusCode::INTERNAL_SERVER_ERROR, format!("Database error: {err}")).into_response() + }, BuilderApiError::FeeRecipientMismatch { got, expected } => { (StatusCode::BAD_REQUEST, format!("Fee recipient mismatch. got: {got:?}, expected: {expected:?}")).into_response() }, @@ -281,6 +318,24 @@ impl IntoResponse for BuilderApiError { BuilderApiError::V2SubmissionsInvalidIfProposerRequiresRegionalFiltering => { (StatusCode::BAD_REQUEST, "V2 submissions invalid if proposer requires regional filtering").into_response() } + BuilderApiError::NoConstraintsFound => { + (StatusCode::BAD_REQUEST, "no constraints found").into_response() + } + BuilderApiError::InclusionProofVerificationFailed(err) => { + (StatusCode::BAD_REQUEST, format!("inclusion proof verifcation failed: {err}")).into_response() + } + BuilderApiError::InclusionProofsNotFound => { + (StatusCode::BAD_REQUEST, "inclusion proofs not found".to_string()).into_response() + } + BuilderApiError::HashTreeRootError(err) => { + (StatusCode::BAD_REQUEST, format!("failed to compute hash tree root for transaction: {err}")).into_response() + } + BuilderApiError::ConstraintsError(slot) => { + (StatusCode::INTERNAL_SERVER_ERROR, format!("failed to get constraints for slot {slot}")).into_response() + } + BuilderApiError::IncorrectSlot(slot) => { + (StatusCode::BAD_REQUEST, format!("incorrect slot for constraints request {slot}")).into_response() + } } } } diff --git a/crates/api/src/builder/simulator/mod.rs b/crates/api/src/builder/simulator/mod.rs index 90d5b7fa..d6dac971 100644 --- a/crates/api/src/builder/simulator/mod.rs +++ b/crates/api/src/builder/simulator/mod.rs @@ -2,9 +2,11 @@ pub mod mock_simulator; pub mod optimistic_simulator; mod optimistic_simulator_tests; pub mod rpc_simulator; -mod simulator_tests; pub mod traits; +#[cfg(test)] +mod simulator_tests; + use ethereum_consensus::{deneb::Bytes32, types::mainnet::ExecutionPayload}; use std::sync::Arc; diff --git a/crates/api/src/builder/simulator/simulator_tests.rs b/crates/api/src/builder/simulator/simulator_tests.rs index ae0ee092..653a0cb1 100644 --- a/crates/api/src/builder/simulator/simulator_tests.rs +++ b/crates/api/src/builder/simulator/simulator_tests.rs @@ -1,144 +1,140 @@ -#[cfg(test)] -mod simulator_tests { - // ++++ IMPORTS ++++ - use crate::builder::{ - rpc_simulator::{BlockSimRpcResponse, JsonRpcError, RpcSimulator}, - traits::BlockSimulator, - BlockSimRequest, DbInfo, - }; - use ethereum_consensus::{ - primitives::BlsSignature, ssz::prelude::*, types::mainnet::ExecutionPayload, - }; - use helix_common::{ - bid_submission::{BidTrace, SignedBidSubmission, SignedBidSubmissionCapella}, - simulator::BlockSimError, - BuilderInfo, ValidatorPreferences, - }; - use reqwest::Client; - use reth_primitives::hex; - use serde_json::json; - use std::sync::Arc; - use uuid::Uuid; - - // ++++ HELPERS ++++ - fn get_simulator(endpoint: &str) -> RpcSimulator { - let http = Client::new(); - RpcSimulator::new(http, endpoint.to_string()) - } +// ++++ IMPORTS ++++ +use crate::builder::{ + rpc_simulator::{BlockSimRpcResponse, JsonRpcError, RpcSimulator}, + traits::BlockSimulator, + BlockSimRequest, DbInfo, +}; +use ethereum_consensus::{ + primitives::BlsSignature, ssz::prelude::*, types::mainnet::ExecutionPayload, +}; +use helix_common::{ + bid_submission::{BidTrace, SignedBidSubmission, SignedBidSubmissionCapella}, + simulator::BlockSimError, + BuilderInfo, ValidatorPreferences, +}; +use reqwest::Client; +use reth_primitives::hex; +use serde_json::json; +use std::sync::Arc; +use uuid::Uuid; + +// ++++ HELPERS ++++ +fn get_simulator(endpoint: &str) -> RpcSimulator { + let http = Client::new(); + RpcSimulator::new(http, endpoint.to_string()) +} - fn get_byte_vector_32_for_hex(hex: &str) -> ByteVector<32> { - let bytes = hex::decode(&hex[2..]).unwrap(); - ByteVector::try_from(bytes.as_ref()).unwrap() - } +fn get_byte_vector_32_for_hex(hex: &str) -> ByteVector<32> { + let bytes = hex::decode(&hex[2..]).unwrap(); + ByteVector::try_from(bytes.as_ref()).unwrap() +} - fn get_sim_req() -> BlockSimRequest { - let mut capella_exec_payload = ethereum_consensus::capella::ExecutionPayload::default(); - capella_exec_payload.block_hash = get_byte_vector_32_for_hex( +fn get_sim_req() -> BlockSimRequest { + let capella_exec_payload = ethereum_consensus::capella::ExecutionPayload { + block_hash: get_byte_vector_32_for_hex( "0x9962816e9d0a39fd4c80935338a741dc916d1545694e41eb5a505e1a3098f9e5", - ); - let execution_payload = ExecutionPayload::Capella(capella_exec_payload); - let mut bid_trace = BidTrace::default(); - bid_trace.block_hash = get_byte_vector_32_for_hex( + ), + ..Default::default() + }; + let execution_payload = ExecutionPayload::Capella(capella_exec_payload); + let bid_trace = BidTrace { + block_hash: get_byte_vector_32_for_hex( "0x9962816e9d0a39fd4c80935338a741dc916d1545694e41eb5a505e1a3098f9e5", - ); - let signed_bid_submission = SignedBidSubmission::Capella(SignedBidSubmissionCapella { - message: bid_trace, - signature: BlsSignature::default(), - execution_payload, - }); - - BlockSimRequest::new( - 0, - Arc::new(signed_bid_submission), - ValidatorPreferences::default(), - None, - ) - } + ), + ..Default::default() + }; + let signed_bid_submission = SignedBidSubmission::Capella(SignedBidSubmissionCapella { + message: bid_trace, + signature: BlsSignature::default(), + execution_payload, + }); - // ++++ TESTS ++++ - #[tokio::test] - async fn test_process_request_ok() { - let mut server = mockito::Server::new(); - let mock = server - .mock("POST", "/") - .with_status(200) - .with_body(r#"{"jsonrpc":"2.0","id":"1","result":true}"#) - .create(); - - let (sim_res_sender, mut sim_res_receiver) = tokio::sync::mpsc::channel(100); - let simulator = get_simulator(&server.url()); - let builder_info = BuilderInfo::default(); - let result = simulator - .process_request(get_sim_req(), &builder_info, true, sim_res_sender, Uuid::new_v4()) - .await; - - mock.assert(); - assert!(result.is_ok()); - let received_sim_res = sim_res_receiver.recv().await.unwrap(); - match received_sim_res { - DbInfo::SimulationResult { block_hash, block_sim_result } => { - assert_eq!( - block_hash, - get_byte_vector_32_for_hex( - "0x9962816e9d0a39fd4c80935338a741dc916d1545694e41eb5a505e1a3098f9e5" - ) - ); - assert!(block_sim_result.is_ok()); - } - _ => panic!("Expected DbInfo::SimulationResult"), + BlockSimRequest::new(0, Arc::new(signed_bid_submission), ValidatorPreferences::default(), None) +} + +// ++++ TESTS ++++ +#[tokio::test] +async fn test_process_request_ok() { + let mut server = mockito::Server::new(); + let mock = server + .mock("POST", "/") + .with_status(200) + .with_body(r#"{"jsonrpc":"2.0","id":"1","result":true}"#) + .create(); + + let (sim_res_sender, mut sim_res_receiver) = tokio::sync::mpsc::channel(100); + let simulator = get_simulator(&server.url()); + let builder_info = BuilderInfo::default(); + let result = simulator + .process_request(get_sim_req(), &builder_info, true, sim_res_sender, Uuid::new_v4()) + .await; + + mock.assert(); + assert!(result.is_ok()); + let received_sim_res = sim_res_receiver.recv().await.unwrap(); + match received_sim_res { + DbInfo::SimulationResult { block_hash, block_sim_result } => { + assert_eq!( + block_hash, + get_byte_vector_32_for_hex( + "0x9962816e9d0a39fd4c80935338a741dc916d1545694e41eb5a505e1a3098f9e5" + ) + ); + assert!(block_sim_result.is_ok()); } + _ => panic!("Expected DbInfo::SimulationResult"), } +} - #[tokio::test] - async fn test_process_request_error() { - let mut server = mockito::Server::new(); - let mock = server - .mock("POST", "/") - .with_status(400) - .with_body(r#"{"jsonrpc":"2.0","id":"1","result":false}"#) - .create(); - - let (sim_res_sender, _sim_res_receiver) = tokio::sync::mpsc::channel(100); - let simulator = get_simulator(&server.url()); - let builder_info = BuilderInfo::default(); - let result = simulator - .process_request(get_sim_req(), &builder_info, true, sim_res_sender, Uuid::new_v4()) - .await; - - mock.assert(); - assert!(result.is_err()); - assert!(matches!(result.unwrap_err(), BlockSimError::RpcError(_))); - } +#[tokio::test] +async fn test_process_request_error() { + let mut server = mockito::Server::new(); + let mock = server + .mock("POST", "/") + .with_status(400) + .with_body(r#"{"jsonrpc":"2.0","id":"1","result":false}"#) + .create(); + + let (sim_res_sender, _sim_res_receiver) = tokio::sync::mpsc::channel(100); + let simulator = get_simulator(&server.url()); + let builder_info = BuilderInfo::default(); + let result = simulator + .process_request(get_sim_req(), &builder_info, true, sim_res_sender, Uuid::new_v4()) + .await; + + mock.assert(); + assert!(result.is_err()); + assert!(matches!(result.unwrap_err(), BlockSimError::RpcError(_))); +} - #[tokio::test] - async fn test_quickquci() { - let x = "helllooooo"; +#[tokio::test] +async fn test_quickquci() { + let x = "helllooooo"; - let formatted = format!("{x:?}"); - let formatted_2 = x.to_string(); + let formatted = format!("{x:?}"); + let formatted_2 = x.to_string(); - println!("{formatted}"); - println!("{formatted_2}"); - } + println!("{formatted}"); + println!("{formatted_2}"); +} - #[tokio::test] - async fn test_process_request_validation_failed() { - let rpc_response = BlockSimRpcResponse { - error: Some(JsonRpcError { message: "validation failed".to_string() }), - }; - let rpc_response_json = json!(rpc_response).to_string(); - let mut server = mockito::Server::new(); - let mock = server.mock("POST", "/").with_status(200).with_body(rpc_response_json).create(); - - let (sim_res_sender, _sim_res_receiver) = tokio::sync::mpsc::channel(100); - let simulator = get_simulator(&server.url()); - let builder_info = BuilderInfo::default(); - let result = simulator - .process_request(get_sim_req(), &builder_info, true, sim_res_sender, Uuid::new_v4()) - .await; - - mock.assert(); - assert!(result.is_err()); - assert!(matches!(result.unwrap_err(), BlockSimError::BlockValidationFailed(_))); - } +#[tokio::test] +async fn test_process_request_validation_failed() { + let rpc_response = BlockSimRpcResponse { + error: Some(JsonRpcError { message: "validation failed".to_string() }), + }; + let rpc_response_json = json!(rpc_response).to_string(); + let mut server = mockito::Server::new(); + let mock = server.mock("POST", "/").with_status(200).with_body(rpc_response_json).create(); + + let (sim_res_sender, _sim_res_receiver) = tokio::sync::mpsc::channel(100); + let simulator = get_simulator(&server.url()); + let builder_info = BuilderInfo::default(); + let result = simulator + .process_request(get_sim_req(), &builder_info, true, sim_res_sender, Uuid::new_v4()) + .await; + + mock.assert(); + assert!(result.is_err()); + assert!(matches!(result.unwrap_err(), BlockSimError::BlockValidationFailed(_))); } diff --git a/crates/api/src/builder/tests.rs b/crates/api/src/builder/tests.rs index 80349d78..d9f1d4e5 100644 --- a/crates/api/src/builder/tests.rs +++ b/crates/api/src/builder/tests.rs @@ -4,6 +4,7 @@ use crate::{ api::{decode_header_submission, decode_payload, BuilderApi, MAX_PAYLOAD_LENGTH}, mock_simulator::MockSimulator, }, + constraints::api::ConstraintsHandle, gossiper::mock_gossiper::MockGossiper, service::API_REQUEST_TIMEOUT, test_utils::builder_api_app, @@ -20,7 +21,7 @@ use ethereum_consensus::{ types::mainnet::{ExecutionPayload, ExecutionPayloadHeader}, Fork, }; -use futures::{stream::FuturesOrdered, Future, SinkExt, StreamExt}; +use futures::{lock::Mutex, stream::FuturesOrdered, Future, SinkExt, StreamExt}; use helix_beacon_client::types::PayloadAttributes; use helix_common::{ api::{ @@ -35,6 +36,7 @@ use helix_common::{ }, BidSubmission, SignedBidSubmission, }, + proofs::SignedConstraints, HeaderSubmissionTrace, Route, SubmissionTrace, ValidatorPreferences, }; use helix_database::MockDatabaseService; @@ -43,12 +45,19 @@ use helix_housekeeper::{ChainUpdate, PayloadAttributesUpdate, SlotUpdate}; use helix_utils::{calculate_withdrawals_root, request_encoding::Encoding}; use rand::Rng; use reqwest::{Client, Response}; +use reqwest_eventsource::{Event as ReqwestEvent, EventSource}; use reth_primitives::hex; use serde_json::json; use serial_test::serial; use std::{ - convert::Infallible, future::pending, io::Write, ops::Deref, pin::Pin, str::FromStr, sync::Arc, - time::Duration, + convert::Infallible, + future::pending, + io::Write, + ops::Deref, + pin::Pin, + str::FromStr, + sync::Arc, + time::{Duration, Instant}, }; use tokio::sync::{ mpsc::{Receiver, Sender}, @@ -59,6 +68,7 @@ use tokio_tungstenite::{ tungstenite::{self, Message}, }; use tonic::transport::Body; +use tracing::debug; // +++ HELPER VARIABLES +++ const ADDRESS: &str = "0.0.0.0"; @@ -122,13 +132,6 @@ fn get_byte_vector_32_for_hex(hex: &str) -> ByteVector<32> { ByteVector::try_from(bytes.as_ref()).unwrap() } -fn hex_to_byte_arr_32(hex: &str) -> [u8; 32] { - let bytes = hex::decode(&hex[2..]).unwrap(); - let mut arr = [0u8; 32]; - arr.copy_from_slice(&bytes); - arr -} - fn get_valid_payload_register_validator( submission_slot: Option, ) -> BuilderGetValidatorsResponseEntry { @@ -297,7 +300,7 @@ async fn start_api_server() -> ( let http_config = HttpServiceConfig::new(ADDRESS, PORT); let bind_address = http_config.bind_address(); - let (router, api, slot_update_receiver) = builder_api_app(); + let (router, api, slot_update_receiver, _) = builder_api_app(); // Run the app in a background task tokio::spawn(async move { @@ -316,6 +319,36 @@ async fn start_api_server() -> ( (tx, http_config, api, slot_update_receiver) } +async fn start_api_server_with_constraints() -> ( + oneshot::Sender<()>, + HttpServiceConfig, + Arc>, + Receiver>, + ConstraintsHandle, +) { + let (tx, rx) = oneshot::channel(); + let http_config = HttpServiceConfig::new(ADDRESS, PORT); + let bind_address = http_config.bind_address(); + + let (router, api, slot_update_receiver, constraints_handle) = builder_api_app(); + + // Run the app in a background task + tokio::spawn(async move { + // run it with hyper on localhost:3000 + let listener = tokio::net::TcpListener::bind(bind_address).await.unwrap(); + axum::serve(listener, router) + .with_graceful_shutdown(async { + rx.await.ok(); + }) + .await + .unwrap(); + }); + + tokio::time::sleep(Duration::from_millis(100)).await; + + (tx, http_config, api, slot_update_receiver, constraints_handle) +} + fn _get_req_body_submit_block_json() -> serde_json::Value { json!({ "message": { @@ -358,6 +391,32 @@ fn _get_req_body_submit_block_json() -> serde_json::Value { }) } +fn _get_signed_constraints_json() -> &'static str { + r#"[ + { + "message": { + "pubkey": "0xa20322c78fb784ba5e0d9d67ccf71e96c7efa0ea49fda73d62e58f70aab2703b0edc3ea8547c655021858f98437ee790", + "slot": 987432, + "top": false, + "transactions": [ + "0x02f876018204db8405f5e100850218711a00825208949d22816f6611cfcb0cde5076c5f4e4a269e79bef8904563918244f40000080c080a0ee840d80915c9b506537909a5a6cf1ca2c5b47140d6585adab6ec0faf75fdcb7a07692785c5cb43c7cf02b800f" + ] + }, + "signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" + }, { + "message": { + "pubkey": "0xa20322c78fb784ba5e0d9d67ccf71e96c7efa0ea49fda73d62e58f70aab2703b0edc3ea8547c655021858f98437ee790", + "slot": 987433, + "top": false, + "transactions": [ + "0x02f876018204dbd40c45bf2105dd18711a0082d208949da2816f6611bcab0cde5076c5f4e4a269e79bef8904563918244f40111180c080a0ee840d80915c9b506537909a5a6cf1ca2c5b47140d6585adab6ec0faf75fdcb7a07692785c5cb43c7cf02b800f" + ] + }, + "signature": "0x1b68ac14b663c9fc5b50984123ec9534bbd9cceda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" + } + ]"# +} + pub fn generate_request( cancellations_enabled: bool, gzip_encoding: bool, @@ -550,6 +609,97 @@ async fn test_signed_bid_submission_decoding_deneb() { assert!(decoded_submission.blobs_bundle().is_some()); } +#[tokio::test] +#[serial] +async fn test_constraints_stream_ok() { + tracing_subscriber::fmt::init(); + + // Start the server + let (tx, http_config, _api, _slot_update_receiver, constraints_handle) = + start_api_server_with_constraints().await; + + // GET constraints stream + let req_url = + format!("{}{}", http_config.base_url(), Route::GetBuilderConstraintsStream.path()); + let client = reqwest::Client::new(); + let req = client.get(req_url.as_str()).header("header", "text/event-stream"); + + let event_source = EventSource::new(req).unwrap_or_else(|err| { + panic!("Failed to create EventSource: {:?}", err); + }); + + // Prepare multiple signed constraints + let test_constraints: Vec = + serde_json::from_str(_get_signed_constraints_json()).unwrap(); + + // Shared vector to collect received constraints + let received_constraints = Arc::new(Mutex::new(Vec::new())); + let received_constraints_clone = Arc::clone(&received_constraints); + + // Spawn a task to listen to the SSE stream + tokio::spawn(async move { + let mut event_source = event_source; + while let Some(event) = event_source.next().await { + match event { + Ok(ReqwestEvent::Message(message)) => { + println!("Received SSE message: {:?}", message); + if message.event == "signed_constraint" { + let data = &message.data; + let received_constraints = + serde_json::from_str::>(data) + .unwrap() + .first() + .cloned() + .expect("at least one constraint"); + println!("Received constraint: {:?}", received_constraints); + received_constraints_clone.lock().await.push(received_constraints); + } + } + Ok(ReqwestEvent::Open) => { + println!("SSE connection opened"); + } + Err(err) => { + println!("Error receiving SSE event: {:?}", err); + } + } + } + }); + + // Delay to ensure the subscription is set up + tokio::time::sleep(Duration::from_millis(100)).await; + + // Send the signed constraints + for constraint in &test_constraints { + constraints_handle.send_constraints(constraint.clone()); + } + + // Wait for the constraints to be received + let timeout = Duration::from_secs(5); + let start_time = Instant::now(); + + loop { + { + let received = received_constraints.lock().await; + if received.len() >= test_constraints.len() { + break + } + } + if start_time.elapsed() > timeout { + panic!("Timeout waiting for constraints"); + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + + // Assert that the received constraints match the sent constraints + assert_eq!(received_constraints.lock().await[0].signature, test_constraints[0].signature); + assert_eq!(received_constraints.lock().await[1].signature, test_constraints[1].signature); + debug!("Received constraints: {:?}", received_constraints); + debug!("Sent constraints: {:?}", test_constraints); + + // Shut down the server + let _ = tx.send(()); +} + #[tokio::test] #[serial] async fn test_get_validators_internal_server_error() { @@ -1218,6 +1368,7 @@ async fn test_submit_block_timeout_triggered() { if cancellations_enabled { "?cancellations=1" } else { "" } ); + #[allow(clippy::type_complexity)] let mut body: FuturesOrdered< Pin, Infallible>> + Send>>, > = FuturesOrdered::new(); diff --git a/crates/api/src/constraints/api.rs b/crates/api/src/constraints/api.rs new file mode 100644 index 00000000..582f1ce7 --- /dev/null +++ b/crates/api/src/constraints/api.rs @@ -0,0 +1,461 @@ +use axum::{ + body::{to_bytes, Body}, + http::{Request, StatusCode}, + Extension, +}; +use ethereum_consensus::{deneb::Slot, ssz}; +use helix_common::{ + api::constraints_api::{ + SignableBLS, SignedDelegation, SignedRevocation, DELEGATION_ACTION, + MAX_CONSTRAINTS_PER_SLOT, REVOCATION_ACTION, + }, + bellatrix::List, + chain_info::ChainInfo, + proofs::{ConstraintsMessage, SignedConstraints, SignedConstraintsWithProofData}, + ConstraintSubmissionTrace, +}; +use helix_database::DatabaseService; +use helix_datastore::Auctioneer; +use helix_utils::signing::{verify_signed_message, COMMIT_BOOST_DOMAIN}; +use std::{ + collections::HashSet, + sync::Arc, + time::{SystemTime, UNIX_EPOCH}, +}; +use tokio::{sync::broadcast, time::Instant}; +use tracing::{error, info, trace, warn}; +use uuid::Uuid; + +use crate::constraints::error::ConstraintsApiError; + +use super::error::Conflict; + +// This is the maximum length (randomly chosen) of a request body in bytes. +pub(crate) const MAX_REQUEST_LENGTH: usize = 1024 * 1024 * 5; + +#[derive(Clone)] +pub struct ConstraintsApi +where + A: Auctioneer + 'static, + DB: DatabaseService + 'static, +{ + auctioneer: Arc, + db: Arc, + chain_info: Arc, + + constraints_handle: ConstraintsHandle, +} + +#[derive(Clone)] +pub struct ConstraintsHandle { + pub(crate) constraints_tx: broadcast::Sender, +} + +impl ConstraintsHandle { + pub fn send_constraints(&self, constraints: SignedConstraints) { + if self.constraints_tx.send(constraints).is_err() { + error!("Failed to send constraints to the constraints channel"); + } + } +} + +impl ConstraintsApi +where + A: Auctioneer + 'static, + DB: DatabaseService + 'static, +{ + pub fn new( + auctioneer: Arc, + db: Arc, + chain_info: Arc, + constraints_handle: ConstraintsHandle, + ) -> Self { + Self { auctioneer, db, chain_info, constraints_handle } + } + + /// Handles the submission of batch of signed constraints. + /// + /// Implements this API: + pub async fn submit_constraints( + Extension(api): Extension>>, + req: Request, + ) -> Result { + let request_id = Uuid::new_v4(); + let mut trace = + ConstraintSubmissionTrace { receive: get_nanos_timestamp()?, ..Default::default() }; + + // Decode the incoming request body into a payload. + let signed_constraints = + decode_constraints_submission(req, &mut trace, &request_id).await?; + + if signed_constraints.is_empty() { + return Err(ConstraintsApiError::NilConstraints) + } + + // check that all constraints are for the same slot and with the same pubkey + let Some(first_constraints) = signed_constraints.first().map(|c| c.message.clone()) else { + error!(request_id = %request_id, "No constraints found"); + return Err(ConstraintsApiError::InvalidConstraints); + }; + if !signed_constraints.iter().all(|c| c.message.slot == first_constraints.slot) { + error!(request_id = %request_id, "Constraints for different slots in the same batch"); + return Err(ConstraintsApiError::InvalidConstraints) + } + if !signed_constraints.iter().all(|c| c.message.pubkey == first_constraints.pubkey) { + error!(request_id = %request_id, "Constraints for different pubkeys in the same batch"); + return Err(ConstraintsApiError::InvalidConstraints) + } + + // PERF: can we avoid calling the db? + let maybe_validator_pubkey = api.db.get_proposer_duties().await?.iter().find_map(|d| { + if d.slot == first_constraints.slot { + Some(d.entry.registration.message.public_key.clone()) + } else { + None + } + }); + + let Some(validator_pubkey) = maybe_validator_pubkey else { + error!(request_id = %request_id, slot = first_constraints.slot, "Missing proposer info"); + return Err(ConstraintsApiError::MissingProposerInfo); + }; + + // Fetch active delegations for the validator pubkey, if any + let delegations = + api.auctioneer.get_validator_delegations(validator_pubkey.clone()).await?; + let delegatees = + delegations.iter().map(|d| d.message.delegatee_pubkey.clone()).collect::>(); + + // Add all the valid constraints to the cache + for constraint in signed_constraints { + // Check for conflicts in the constraints + let saved_constraints = api.auctioneer.get_constraints(constraint.message.slot).await?; + if let Some(conflict) = conflicts_with(&saved_constraints, &constraint.message) { + return Err(ConstraintsApiError::Conflict(conflict)) + } + + // Check if the maximum number of constraints per slot has been reached + if saved_constraints.is_some_and(|c| c.len() + 1 > MAX_CONSTRAINTS_PER_SLOT) { + return Err(ConstraintsApiError::MaxConstraintsReached) + } + + // Check if the constraint pubkey is delegated to submit constraints for this validator. + // - If there are no delegations, only the validator pubkey can submit constraints + // - If there are delegations, only delegatees can submit constraints + let message_not_signed_by_validator = + delegatees.is_empty() && constraint.message.pubkey != validator_pubkey; + let message_not_signed_by_delegatee = + !delegatees.is_empty() && !delegatees.contains(&constraint.message.pubkey); + + if message_not_signed_by_validator && message_not_signed_by_delegatee { + error!(request_id = %request_id, pubkey = %constraint.message.pubkey, "Pubkey unauthorized"); + return Err(ConstraintsApiError::PubkeyNotAuthorized(constraint.message.pubkey)) + } + + // Verify the constraints message BLS signature + if let Err(e) = verify_signed_message( + &constraint.message.digest(), + &constraint.signature, + &constraint.message.pubkey, + COMMIT_BOOST_DOMAIN, + &api.chain_info.context, + ) { + error!(err = ?e, request_id = %request_id, "Invalid constraints signature"); + return Err(ConstraintsApiError::InvalidSignature) + }; + + // Send to the constraints channel + api.constraints_handle.send_constraints(constraint.clone()); + + // Finally add the constraints to the redis cache + if let Err(err) = api + .save_constraints_to_auctioneer( + &mut trace, + constraint.message.slot, + constraint, + &request_id, + ) + .await + { + error!(request_id = %request_id, error = %err, "Failed to save constraints to auctioneer"); + }; + } + + // Log some final info + trace.request_finish = get_nanos_timestamp()?; + trace!( + request_id = %request_id, + trace = ?trace, + request_duration_ns = trace.request_finish.saturating_sub(trace.receive), + "submit_constraints request finished", + ); + + Ok(StatusCode::OK) + } + + /// Handles delegating constraint submission rights to another BLS key. + /// + /// Implements this API: + pub async fn delegate( + Extension(api): Extension>>, + req: Request, + ) -> Result { + let request_id = Uuid::new_v4(); + let mut trace = + ConstraintSubmissionTrace { receive: get_nanos_timestamp()?, ..Default::default() }; + + info!( + request_id = %request_id, + event = "delegate", + timestamp_request_start = trace.receive, + ); + + // Read the body + let body = req.into_body(); + let body_bytes = to_bytes(body, MAX_REQUEST_LENGTH).await?; + + // Decode the incoming request body into a `SignedDelegation` array. + let signed_delegations = match serde_json::from_slice::>(&body_bytes) + { + Ok(delegations) => { + let action = delegations.iter().map(|d| d.message.action).collect::>(); + let are_all_actions_delegations = + action.len() == 1 && action.contains(&DELEGATION_ACTION); + if !are_all_actions_delegations { + warn!(request_id = %request_id, actions = ?action, "Invalid delegations action. expected {DELEGATION_ACTION}"); + return Err(ConstraintsApiError::InvalidDelegation) + } + delegations + } + Err(e) => { + warn!(err = ?e, request_id = %request_id, "Failed to decode delegations"); + return Err(ConstraintsApiError::InvalidDelegation) + } + }; + trace.decode = get_nanos_timestamp()?; + + for delegation in &signed_delegations { + if let Err(e) = verify_signed_message( + &delegation.message.digest(), + &delegation.signature, + &delegation.message.validator_pubkey, + COMMIT_BOOST_DOMAIN, + &api.chain_info.context, + ) { + warn!(err = ?e, request_id = %request_id, "Invalid delegation signature"); + return Err(ConstraintsApiError::InvalidSignature) + }; + } + trace.verify_signature = get_nanos_timestamp()?; + + // Store the delegation in the database + tokio::spawn(async move { + if let Err(err) = api.auctioneer.save_validator_delegations(signed_delegations).await { + error!(error = %err, "Failed to save delegations"); + } + }); + + // Log some final info + trace.request_finish = get_nanos_timestamp()?; + trace!( + request_id = %request_id, + trace = ?trace, + request_duration_ns = trace.request_finish.saturating_sub(trace.receive), + "delegate request finished", + ); + + Ok(StatusCode::OK) + } + + /// Handles revoking constraint submission rights from a BLS key. + /// + /// Implements this API: + pub async fn revoke( + Extension(api): Extension>>, + req: Request, + ) -> Result { + let request_id = Uuid::new_v4(); + let mut trace = + ConstraintSubmissionTrace { receive: get_nanos_timestamp()?, ..Default::default() }; + + info!( + request_id = %request_id, + event = "revoke", + timestamp_request_start = trace.receive, + ); + + // Read the body + let body = req.into_body(); + let body_bytes = to_bytes(body, MAX_REQUEST_LENGTH).await?; + + // Decode the incoming request body into a `SignedRevocation` array. + let signed_revocations = match serde_json::from_slice::>(&body_bytes) + { + Ok(revocations) => { + let action = revocations.iter().map(|r| r.message.action).collect::>(); + let are_all_actions_revocations = + action.len() == 1 && action.contains(&REVOCATION_ACTION); + if !are_all_actions_revocations { + warn!(request_id = %request_id, actions = ?action, "Invalid revocation action. expected {REVOCATION_ACTION}"); + return Err(ConstraintsApiError::InvalidRevocation) + } + revocations + } + Err(e) => { + warn!(err = ?e, request_id = %request_id, "Failed to decode revocation"); + return Err(ConstraintsApiError::InvalidRevocation) + } + }; + trace.decode = get_nanos_timestamp()?; + + for revocation in &signed_revocations { + if let Err(e) = verify_signed_message( + &revocation.message.digest(), + &revocation.signature, + &revocation.message.validator_pubkey, + COMMIT_BOOST_DOMAIN, + &api.chain_info.context, + ) { + warn!(err = ?e, request_id = %request_id, "Invalid revocation signature"); + return Err(ConstraintsApiError::InvalidSignature) + }; + } + trace.verify_signature = get_nanos_timestamp()?; + + // Store the delegation in the database + tokio::spawn(async move { + if let Err(err) = api.auctioneer.revoke_validator_delegations(signed_revocations).await + { + error!(error = %err, "Failed to do revocation"); + } + }); + + // Log some final info + trace.request_finish = get_nanos_timestamp()?; + info!( + request_id = %request_id, + trace = ?trace, + request_duration_ns = trace.request_finish.saturating_sub(trace.receive), + "revoke request finished", + ); + + Ok(StatusCode::OK) + } +} + +// Helpers +impl ConstraintsApi +where + A: Auctioneer + 'static, + DB: DatabaseService + 'static, +{ + async fn save_constraints_to_auctioneer( + &self, + trace: &mut ConstraintSubmissionTrace, + slot: Slot, + constraint: SignedConstraints, + request_id: &Uuid, + ) -> Result<(), ConstraintsApiError> { + let message_with_data = SignedConstraintsWithProofData::try_from(constraint)?; + match self.auctioneer.save_constraints(slot, message_with_data).await { + Ok(()) => { + trace.auctioneer_update = get_nanos_timestamp()?; + info!( + request_id = %request_id, + timestamp_after_auctioneer = Instant::now().elapsed().as_nanos(), + auctioneer_latency_ns = trace.auctioneer_update.saturating_sub(trace.decode), + "Constraints saved to auctioneer", + ); + Ok(()) + } + Err(err) => { + error!(request_id = %request_id, error = %err, "Failed to save constraints to auctioneer"); + Err(ConstraintsApiError::AuctioneerError(err)) + } + } + } +} + +/// Checks if the constraints for the given slot conflict with the existing constraints. +/// Returns a [Conflict] in case of a conflict, None otherwise. +/// +/// # Possible conflicts +/// - Multiple ToB constraints per slot +/// - Duplicates of the same transaction per slot +pub fn conflicts_with( + saved_constraints: &Option>, + constraints: &ConstraintsMessage, +) -> Option { + // Check if there are saved constraints to compare against + if let Some(saved_constraints) = saved_constraints { + for saved_constraint in saved_constraints { + // Only 1 ToB (Top of Block) constraint per slot + if constraints.top && saved_constraint.signed_constraints.message.top { + return Some(Conflict::TopOfBlock) + } + + // Check if any of the transactions are the same + for tx in constraints.transactions.iter() { + if saved_constraint + .signed_constraints + .message + .transactions + .iter() + .any(|existing| tx == existing) + { + return Some(Conflict::DuplicateTransaction) + } + } + } + } + + None +} + +pub async fn decode_constraints_submission( + req: Request, + trace: &mut ConstraintSubmissionTrace, + request_id: &Uuid, +) -> Result, ConstraintsApiError> { + // Check if the request is SSZ encoded + let is_ssz = req + .headers() + .get("Content-Type") + .and_then(|val| val.to_str().ok()) + .map_or(false, |v| v == "application/octet-stream"); + + // Read the body + let body = req.into_body(); + let body_bytes = to_bytes(body, MAX_REQUEST_LENGTH).await?; + + // Decode the body + let constraints: List = if is_ssz { + match ssz::prelude::deserialize(&body_bytes) { + Ok(constraints) => constraints, + Err(err) => { + // Fallback for JSON + warn!(request_id = %request_id, error = %err, "Failed to decode SSZ constraints, falling back to JSON"); + serde_json::from_slice(&body_bytes)? + } + } + } else { + serde_json::from_slice(&body_bytes)? + }; + + trace.decode = get_nanos_timestamp()?; + info!( + request_id = %request_id, + timestamp_after_decoding = Instant::now().elapsed().as_nanos(), + decode_latency_ns = trace.decode.saturating_sub(trace.receive), + num_constraints = constraints.len(), + ); + + Ok(constraints.to_vec()) +} + +fn get_nanos_timestamp() -> Result { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|d| d.as_nanos() as u64) + .map_err(|_| ConstraintsApiError::InternalError) +} diff --git a/crates/api/src/constraints/error.rs b/crates/api/src/constraints/error.rs new file mode 100644 index 00000000..8bac3b34 --- /dev/null +++ b/crates/api/src/constraints/error.rs @@ -0,0 +1,128 @@ +use axum::{ + http::StatusCode, + response::{IntoResponse, Response}, +}; +use ethereum_consensus::crypto::PublicKey; +use helix_common::proofs::ProofError; +use helix_database::error::DatabaseError; +use helix_datastore::error::AuctioneerError; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum Conflict { + #[error("Multiple ToB constraints per slot")] + TopOfBlock, + #[error("Duplicate transaction in the same slot")] + DuplicateTransaction, +} + +#[derive(Debug, Error)] +pub enum ConstraintsApiError { + #[error("hyper error: {0}")] + HyperError(#[from] hyper::Error), + + #[error("axum error: {0}")] + AxumError(#[from] axum::Error), + + #[error("serde decode error: {0}")] + SerdeDecodeError(#[from] serde_json::Error), + + #[error("Invalid constraints")] + InvalidConstraints, + + #[error("Invalid delegation")] + InvalidDelegation, + + #[error("Invalid revocation")] + InvalidRevocation, + + #[error("Invalid signature")] + InvalidSignature, + + #[error("Constraints field is empty")] + NilConstraints, + + #[error("datastore error: {0}")] + AuctioneerError(#[from] AuctioneerError), + + #[error("internal error")] + InternalError, + + #[error("failed to get constraints proof data")] + ConstraintsProofDataError(#[from] ProofError), + + #[error(transparent)] + Conflict(#[from] Conflict), + + #[error("Max constraints per slot reached")] + MaxConstraintsReached, + + #[error("database error: {0}")] + DatabaseError(#[from] DatabaseError), + + #[error("Missing proposer info")] + MissingProposerInfo, + + #[error("Pubkey not authorized to submit constraints: {0}")] + PubkeyNotAuthorized(PublicKey), +} + +impl IntoResponse for ConstraintsApiError { + fn into_response(self) -> Response { + match self { + ConstraintsApiError::HyperError(err) => { + (StatusCode::INTERNAL_SERVER_ERROR, format!("Hyper error: {err}")).into_response() + } + ConstraintsApiError::AxumError(err) => { + (StatusCode::INTERNAL_SERVER_ERROR, format!("Axum error: {err}")).into_response() + } + ConstraintsApiError::SerdeDecodeError(err) => { + (StatusCode::BAD_REQUEST, format!("Serde decode error: {err}")).into_response() + } + ConstraintsApiError::InvalidConstraints => { + (StatusCode::BAD_REQUEST, "Invalid constraints").into_response() + } + ConstraintsApiError::InvalidDelegation => { + (StatusCode::BAD_REQUEST, "Invalid delegation").into_response() + } + ConstraintsApiError::InvalidRevocation => { + (StatusCode::BAD_REQUEST, "Invalid revocation").into_response() + } + ConstraintsApiError::InvalidSignature => { + (StatusCode::BAD_REQUEST, "Invalid signature").into_response() + } + ConstraintsApiError::NilConstraints => { + (StatusCode::BAD_REQUEST, "Constraints field is empty").into_response() + } + ConstraintsApiError::AuctioneerError(err) => { + (StatusCode::INTERNAL_SERVER_ERROR, format!("Auctioneer error: {err}")) + .into_response() + } + ConstraintsApiError::InternalError => { + (StatusCode::INTERNAL_SERVER_ERROR, "Internal error").into_response() + } + ConstraintsApiError::ConstraintsProofDataError(err) => { + (StatusCode::INTERNAL_SERVER_ERROR, format!("Constraints proof data error: {err}")) + .into_response() + } + ConstraintsApiError::Conflict(err) => { + (StatusCode::CONFLICT, format!("Conflict: {err}")).into_response() + } + ConstraintsApiError::MaxConstraintsReached => { + (StatusCode::BAD_REQUEST, "Max constraints per slot reached").into_response() + } + ConstraintsApiError::DatabaseError(err) => { + (StatusCode::INTERNAL_SERVER_ERROR, format!("Database error: {err}")) + .into_response() + } + ConstraintsApiError::MissingProposerInfo => { + (StatusCode::BAD_REQUEST, "Missing proposer info").into_response() + } + ConstraintsApiError::PubkeyNotAuthorized(pubkey) => ( + StatusCode::UNAUTHORIZED, + format!("Pubkey not authorized to submit constraints: {pubkey}"), + ) + .into_response(), + } + } +} diff --git a/crates/api/src/constraints/mod.rs b/crates/api/src/constraints/mod.rs new file mode 100644 index 00000000..5dfca9e1 --- /dev/null +++ b/crates/api/src/constraints/mod.rs @@ -0,0 +1,4 @@ +pub mod api; +pub mod error; +#[cfg(test)] +pub mod tests; diff --git a/crates/api/src/constraints/tests.rs b/crates/api/src/constraints/tests.rs new file mode 100644 index 00000000..1516c0ef --- /dev/null +++ b/crates/api/src/constraints/tests.rs @@ -0,0 +1,435 @@ +// +++ IMPORTS +++ + +use std::{sync::Arc, time::Duration}; + +use ethereum_consensus::{ + builder::ValidatorRegistration, + primitives::{BlsPublicKey, BlsSignature}, + ssz, +}; +use helix_common::{ + api::{ + builder_api::BuilderGetValidatorsResponseEntry, + constraints_api::{SignedDelegation, SignedRevocation, MAX_CONSTRAINTS_PER_SLOT}, + proposer_api::ValidatorRegistrationInfo, + }, + bellatrix::{ByteVector, List}, + deneb::SignedValidatorRegistration, + proofs::SignedConstraints, + Route, ValidatorPreferences, +}; +use helix_database::MockDatabaseService; +use helix_datastore::MockAuctioneer; +use helix_housekeeper::{ChainUpdate, SlotUpdate}; +use helix_utils::request_encoding::Encoding; +use rand::Rng; +use reqwest::{Client, Response}; +use reth_primitives::hex; +use serial_test::serial; +use tokio::sync::{ + mpsc::{Receiver, Sender}, + oneshot, +}; +use tracing::info; + +use crate::{ + builder::{api::BuilderApi, mock_simulator::MockSimulator}, + constraints::api::ConstraintsApi, + gossiper::mock_gossiper::MockGossiper, + test_utils::constraints_api_app, +}; + +// +++ HELPER VARIABLES +++ +const ADDRESS: &str = "0.0.0.0"; +const PORT: u16 = 3000; +const HEAD_SLOT: u64 = 32; //ethereum_consensus::configs::mainnet::CAPELLA_FORK_EPOCH; +const SUBMISSION_SLOT: u64 = HEAD_SLOT + 1; +const SUBMISSION_TIMESTAMP: u64 = 1606824419; +const VALIDATOR_INDEX: usize = 1; +#[allow(dead_code)] +const SECRET_KEY: &str = "0x0e8e05025f246b4585d6c95f37d9286ea54bafb6e98de6554e8063084424f91b"; +const PUBLIC_KEY: &str = "0x87001c7c7546c2880b93629c685f8fad3aacdd7d5e59303996fad07c2cc135413eb0568ec84e7594719127827c2717a1"; + +// +++ HELPER FUNCTIONS +++ + +#[derive(Debug, Clone)] +struct HttpServiceConfig { + address: String, + port: u16, +} + +impl HttpServiceConfig { + fn new(address: &str, port: u16) -> Self { + HttpServiceConfig { address: address.to_string(), port } + } + + fn base_url(&self) -> String { + format!("http://{}:{}", self.address, self.port) + } + + fn bind_address(&self) -> String { + format!("{}:{}", self.address, self.port) + } +} + +fn get_test_pub_key_bytes(random: bool) -> [u8; 48] { + if random { + let mut pubkey_array = [0u8; 48]; + rand::thread_rng().fill(&mut pubkey_array[..]); + pubkey_array + } else { + let pubkey_bytes = hex::decode(&PUBLIC_KEY[2..]).unwrap(); + let mut pubkey_array = [0u8; 48]; + pubkey_array.copy_from_slice(&pubkey_bytes); + pubkey_array + } +} + +fn get_byte_vector_20_for_hex(hex: &str) -> ByteVector<20> { + let bytes = hex::decode(&hex[2..]).unwrap(); + ByteVector::try_from(bytes.as_ref()).unwrap() +} + +fn get_valid_payload_register_validator( + submission_slot: Option, + validator_index: Option, +) -> BuilderGetValidatorsResponseEntry { + BuilderGetValidatorsResponseEntry { + slot: submission_slot.unwrap_or(SUBMISSION_SLOT), + validator_index: validator_index.unwrap_or(VALIDATOR_INDEX), + entry: ValidatorRegistrationInfo { + registration: SignedValidatorRegistration { + message: ValidatorRegistration { + fee_recipient: get_byte_vector_20_for_hex("0x5cc0dde14e7256340cc820415a6022a7d1c93a35"), + gas_limit: 30000000, + timestamp: SUBMISSION_TIMESTAMP, + public_key: BlsPublicKey::try_from(&get_test_pub_key_bytes(false)[..]).unwrap(), + }, + signature: BlsSignature::try_from(hex::decode(&"0x834646477659587229991c5411aee68cb06d86c71aada0dfd0d0c50c7a75200d82f07d0b91d0f3c30924952f0e3e7ed7094033e82db91e54bbfdf07350a8a397c3af9914b530ecc38e55b4c70559e1bd82d7c7c9d22f4fc3fad612bc67f869f3"[2..]).unwrap().as_slice()).unwrap(), + }, + preferences: ValidatorPreferences::default(), + } + } +} + +fn get_dummy_slot_update( + head_slot: Option, + submission_slot: Option, + validator_index: Option, +) -> SlotUpdate { + SlotUpdate { + slot: head_slot.unwrap_or(HEAD_SLOT), + next_duty: Some(get_valid_payload_register_validator(submission_slot, validator_index)), + new_duties: Some(vec![get_valid_payload_register_validator( + submission_slot, + validator_index, + )]), + } +} + +async fn send_dummy_slot_update( + slot_update_sender: Sender, + head_slot: Option, + submission_slot: Option, + validator_index: Option, +) { + let chain_update = + ChainUpdate::SlotUpdate(get_dummy_slot_update(head_slot, submission_slot, validator_index)); + slot_update_sender.send(chain_update).await.unwrap(); + + // sleep for a bit to allow the api to process the slot update + tokio::time::sleep(Duration::from_millis(100)).await; +} + +async fn send_request(req_url: &str, encoding: Encoding, req_payload: Vec) -> Response { + let client = Client::new(); + let request = client.post(req_url).header("accept", "*/*"); + let request = encoding.to_headers(request); + + request.body(req_payload).send().await.unwrap() +} + +async fn start_api_server() -> ( + oneshot::Sender<()>, + HttpServiceConfig, + Arc>, + Arc>, + Receiver>, +) { + let (tx, rx) = oneshot::channel(); + let http_config = HttpServiceConfig::new(ADDRESS, PORT); + let bind_address = http_config.bind_address(); + + let (router, constraints_api, builder_api, slot_update_receiver) = constraints_api_app(); + + // Run the app in a background task + tokio::spawn(async move { + // run it with hyper on localhost:3000 + let listener = tokio::net::TcpListener::bind(bind_address).await.unwrap(); + axum::serve(listener, router) + .with_graceful_shutdown(async { + rx.await.ok(); + }) + .await + .unwrap(); + }); + + tokio::time::sleep(Duration::from_millis(100)).await; + + (tx, http_config, constraints_api, builder_api, slot_update_receiver) +} + +fn _get_signed_constraints_json() -> &'static str { + r#"[ + { + "message": { + "pubkey": "0xa695ad325dfc7e1191fbc9f186f58eff42a634029731b18380ff89bf42c464a42cb8ca55b200f051f57f1e1893c68759", + "slot": 33, + "top": true, + "transactions": [ + "0x02f86c870c72dd9d5e883e4d0183408f2382520894d2e2adf7177b7a8afddbc12d1634cf23ea1a71020180c001a08556dcfea479b34675db3fe08e29486fe719c2b22f6b0c1741ecbbdce4575cc6a01cd48009ccafd6b9f1290bbe2ceea268f94101d1d322c787018423ebcbc87ab4" + ] + }, + "signature": "0xb8d50ee0d4b269db3d4658c1dac784d273a4160d769e16dce723a9684c390afe5865348416b3bf0f1a4f47098bec9024135d0d95f08bed18eb577a3d8a67f5dc78b13cc62515e280786a73fb267d35dfb7ab46a25ac29bf5bc2fa5b07b3e07a6" + } + ]"# +} + +fn _get_signed_constraint_conflict_1_json() -> &'static str { + r#"[ + { + "message": { + "pubkey": "0xa695ad325dfc7e1191fbc9f186f58eff42a634029731b18380ff89bf42c464a42cb8ca55b200f051f57f1e1893c68759", + "slot": 32, + "top": true, + "transactions": [ + "0x02f86c870c72dd9d5e883e4d0183408f2382520894d2e2adf7177b7a8afddbc12d1634cf23ea1a71020180c001a08556dcfea479b34675db3fe08e29486fe719c2b22f6b0c1741ecbbdce4575cc6a01cd48009ccafd6b9f1290bbe2ceea268f94101d1d322c787018423ebcbc87ab4", + "0x02f9017b8501a2140cff8303dec685012a05f2008512a05f2000830249f094843669e5220036eddbaca89d8c8b5b82268a0fc580b901040cc7326300000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000022006292538e66f0000000000000000000000005ba38f2c245618e39f6fa067bf1dec304e73ff3c00000000000000000000000092f0ee29e6e1bf0f7c668317ada78f5774a6cb7f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000003fac6482aee49bf58515be2d3fb58378a8497cc9000000000000000000000000c6cc140787b02ae479a10e41169607000c0d44f6c080a00cf74c45dbe9ee1fb923118ec5ce9db8f88cd651196ed3f9d4f8f2a65827e611a04a6bc1d49a7e18b7c92e8f3614cae116b1832ceb311c81d54b2c87de1545f68f" + ] + }, + "signature": "0x97d249cbd4b2a33fab4e8d0b698a1905815375c6eb905356768bc23c90b5f2972e03580116849e3f250e7c650486549d16f090135a109c9dd21360e1ca52cb941fada8bdf0280fe184ec4adba7d9246a6d9fc516bfbe508ad1c21cfc2e169b52" + } + ]"# +} + +fn _get_signed_constraint_conflict_2_json() -> &'static str { + r#"[ + { + "message": { + "pubkey": "0xa695ad325dfc7e1191fbc9f186f58eff42a634029731b18380ff89bf42c464a42cb8ca55b200f051f57f1e1893c68759", + "slot": 32, + "top": true, + "transactions": [ + "0x02f86c870c72dd9d5e883e4d0183408f2382520894d2e2adf7177b7a8afddbc12d1634cf23ea1a71020180c001a08556dcfea479b34675db3fe08e29486fe719c2b22f6b0c1741ecbbdce4575cc6a01cd48009ccafd6b9f1290bbe2ceea268f94101d1d322c787018423ebcbc87ab4" + ] + }, + "signature": "0xb648321b682a445d377c3a11180213f1c21e5a645b2f80fb6ec31a85550d3bb0a0fb1eef806d6403a76ede552f87c8f219e0703d254dc78dca0b4a904794f81c995884dd90648190f4e4c5badc7463314ce57ceb5448767211a3c4af5b860650" + } + ]"# +} + +fn _get_signed_delegations() -> &'static str { + r#" + [{ + "message": + { + "action": 0, + "validator_pubkey": "0x882c02d0c1c30cf9bb84769fc37bf81a73795be9799156ac3a500fba24ddae4f310b47dc27c08e1acdf395a0d9e5ae6a", + "delegatee_pubkey": "0xa30e3c596a76f109094afbc16689adab5c03fb575213085d3e3a0766d269a961e28dd909312408866c6d481fc8a93522" + }, + "signature": "0xb067c33c6b8018086ba0b294e069063d185a01116475caa6e4cf36d08d62422ad68ef83ec0b01b4e13dfd95a914f2ed50301e1bfd945d0339b11a0330b06bd532a8bb9cd8017452e1f44f7c64c1ab4888266e87f99c916c90d5fd95614b0dfc4" + }]"# +} + +fn _get_signed_revocations() -> &'static str { + r#" + [{ + "message": { + "action": 1, + "validator_pubkey": "0x882c02d0c1c30cf9bb84769fc37bf81a73795be9799156ac3a500fba24ddae4f310b47dc27c08e1acdf395a0d9e5ae6a", + "delegatee_pubkey": "0xa30e3c596a76f109094afbc16689adab5c03fb575213085d3e3a0766d269a961e28dd909312408866c6d481fc8a93522" + }, + "signature": "0x90b352ee91ef1e9e6411ff4837a0745547941b593e1104200b208644e247242ac1e352e9318de4b3a54fc5bd29b2dbc015c9c47784edde369303a59187c65a860a04b1114e925fe0dfc38e957068f1dff3bd2825946223222c44f8cc871e1b0e" + }] + "# +} + +// +++ TESTS +++ +#[tokio::test] +#[serial] +#[ignore = "TODO: to fix, we're not adding proposer duties to database"] +async fn test_submit_constraints_conflict() { + let _ = tracing_subscriber::fmt::try_init(); + + // Start the server + let (tx, http_config, _constraints_api, _builder_api, mut slot_update_receiver) = + start_api_server().await; + + let slot_update_sender = slot_update_receiver.recv().await.unwrap(); + send_dummy_slot_update(slot_update_sender.clone(), None, None, None).await; + + let test_constraint: List = + serde_json::from_str(_get_signed_constraint_conflict_1_json()).unwrap(); + + // Submit constraints + let req_url = format!("{}{}", http_config.base_url(), Route::SubmitBuilderConstraints.path()); + + // Send JSON encoded request + let resp = + send_request(&req_url, Encoding::Json, serde_json::to_vec(&test_constraint).unwrap()).await; + assert_eq!(resp.status(), reqwest::StatusCode::OK); + + let test_constraint: List = + serde_json::from_str(_get_signed_constraint_conflict_2_json()).unwrap(); + + // Send JSON encoded request + let resp = + send_request(&req_url, Encoding::Json, serde_json::to_vec(&test_constraint).unwrap()).await; + + // This will result in a conflict as 2 constraints are submitted with top = true + assert_eq!(resp.status(), reqwest::StatusCode::CONFLICT); + info!("Response: {:?}", resp); + + // Send shutdown signal + let _ = tx.send(()); +} + +#[tokio::test] +#[serial] +#[ignore = "TODO: to fix, we're not adding proposer duties to database"] +async fn test_submit_constraints_and_get_constraints_ok() { + let _ = tracing_subscriber::fmt::try_init(); + + // Start the server + let (tx, http_config, _constraints_api, _builder_api, mut slot_update_receiver) = + start_api_server().await; + + let slot_update_sender = slot_update_receiver.recv().await.unwrap(); + send_dummy_slot_update(slot_update_sender.clone(), None, None, None).await; + + let test_constraints: List = + serde_json::from_str(_get_signed_constraints_json()).unwrap(); + + // Submit constraints + let req_url = format!("{}{}", http_config.base_url(), Route::SubmitBuilderConstraints.path()); + + // Send SSZ encoded request + let resp = + send_request(&req_url, Encoding::Ssz, ssz::prelude::serialize(&test_constraints).unwrap()) + .await; + assert_eq!(resp.status(), reqwest::StatusCode::OK); + + // Correct and complete the below + let slot = 32; + + // Get constraints + let req_url = format!("{}{}", http_config.base_url(), Route::GetBuilderConstraints.path()); + + let resp = reqwest::Client::new() + .get(req_url) + .query(&[("slot", slot)]) + .header("accept", "application/json") + .send() + .await + .unwrap(); + + // Ensure the response is OK + assert_eq!(resp.status(), reqwest::StatusCode::OK); + + // Print the response body + let body: Vec = serde_json::from_str(&resp.text().await.unwrap()).unwrap(); + info!("Response body: {:?}", body); + // TODO: clean this + let constraint = body.first().unwrap().clone(); + let send = test_constraints.first().unwrap().clone(); + assert_eq!(constraint.signature, send.signature); + + // Send shutdown signal + let _ = tx.send(()); +} + +#[tokio::test] +#[serial] +async fn test_delegate_submission_rights_ok() { + let _ = tracing_subscriber::fmt::try_init(); + + let (tx, http_config, _api, _, _) = start_api_server().await; + + let test_delegation: Vec = + serde_json::from_str(_get_signed_delegations()).unwrap(); + + let req_url = format!("{}{}", http_config.base_url(), Route::DelegateSubmissionRights.path()); + let req_payload = serde_json::to_vec(&test_delegation).unwrap(); + + // Send JSON encoded request + let resp = send_request(&req_url, Encoding::Json, req_payload).await; + assert_eq!(resp.status(), reqwest::StatusCode::OK); + + let _ = tx.send(()); +} + +#[tokio::test] +#[serial] +#[ignore = "TODO: to fix, we're not adding proposer duties to database"] +async fn test_get_delegations() { + let _ = tracing_subscriber::fmt::try_init(); + + // Start the server + let (tx, http_config, _constraints_api, _builder_api, mut slot_update_receiver) = + start_api_server().await; + + let slot_update_sender = slot_update_receiver.recv().await.unwrap(); + send_dummy_slot_update(slot_update_sender.clone(), None, None, None).await; + + let test_delegation: Vec = + serde_json::from_str(_get_signed_delegations()).unwrap(); + + let req_url = format!("{}{}", http_config.base_url(), Route::DelegateSubmissionRights.path()); + let req_payload = serde_json::to_vec(&test_delegation).unwrap(); + + // Send JSON encoded request + let resp = send_request(&req_url, Encoding::Json, req_payload).await; + assert_eq!(resp.status(), reqwest::StatusCode::OK); + + // Get delegations + let slot = 33; + + let req_url = format!("{}{}", http_config.base_url(), Route::GetBuilderDelegations.path()); + + let resp = reqwest::Client::new() + .get(req_url) + .query(&[("slot", slot)]) + .header("accept", "application/json") + .send() + .await + .unwrap(); + + // Ensure the response is OK + assert_eq!(resp.status(), reqwest::StatusCode::OK); + + let body: Vec = serde_json::from_str(&resp.text().await.unwrap()).unwrap(); + assert_eq!(test_delegation[0].message.delegatee_pubkey, body.first().unwrap().clone()); + + let _ = tx.send(()); +} + +#[tokio::test] +#[serial] +async fn test_revoke_submission_rights_ok() { + let _ = tracing_subscriber::fmt::try_init(); + let (tx, http_config, _, _, _) = start_api_server().await; + + let test_revocation: Vec = + serde_json::from_str(_get_signed_revocations()).unwrap(); + + let req_url = format!("{}{}", http_config.base_url(), Route::RevokeSubmissionRights.path()); + let req_payload = serde_json::to_vec(&test_revocation).unwrap(); + + println!("Payload: {:?}", req_payload); + + // Send JSON encoded request + let resp = send_request(&req_url, Encoding::Json, req_payload).await; + assert_eq!(resp.status(), reqwest::StatusCode::OK); + + let _ = tx.send(()); +} diff --git a/crates/api/src/lib.rs b/crates/api/src/lib.rs index c8c38f07..6b72b979 100644 --- a/crates/api/src/lib.rs +++ b/crates/api/src/lib.rs @@ -1,6 +1,7 @@ #![allow(clippy::too_many_arguments)] pub mod builder; +pub mod constraints; pub mod gossiper; pub mod integration_tests; pub mod middleware; diff --git a/crates/api/src/middleware/rate_limiting/mod.rs b/crates/api/src/middleware/rate_limiting/mod.rs index c23a9c70..330aec8a 100644 --- a/crates/api/src/middleware/rate_limiting/mod.rs +++ b/crates/api/src/middleware/rate_limiting/mod.rs @@ -1,3 +1,5 @@ pub mod error; pub mod rate_limit_by_ip; -pub mod tests; + +#[cfg(test)] +mod tests; diff --git a/crates/api/src/middleware/rate_limiting/tests.rs b/crates/api/src/middleware/rate_limiting/tests.rs index 7853ec72..be4a0972 100644 --- a/crates/api/src/middleware/rate_limiting/tests.rs +++ b/crates/api/src/middleware/rate_limiting/tests.rs @@ -1,204 +1,198 @@ -#![cfg(test)] -mod tests { - use crate::middleware::rate_limiting::rate_limit_by_ip::{ - rate_limit_by_ip, RateLimitState, RateLimitStateForRoute, - }; - use axum::{middleware, routing::get, Router}; - use serial_test::serial; - use std::{collections::HashMap, net::SocketAddr, time::Duration}; - use tokio::sync::oneshot; - - const ROUTE_NO_LIMIT: &str = "/test_without_limit"; - const NO_LIMIT_RESPONSE: &str = "no limit"; - const ROUTE_WITH_LIMIT: &str = "/test_with_limit"; - const LIMIT_RESPONSE: &str = "with limit"; - const ROUTE_WITH_HIGH_LIMIT: &str = "/test_with_high_limit"; - const HIGH_LIMIT_RESPONSE: &str = "with high limit"; - - fn get_router() -> Router<()> { - let mut rate_limits: HashMap = HashMap::new(); - rate_limits.insert( - ROUTE_WITH_LIMIT.to_string(), - RateLimitStateForRoute::new(Duration::from_secs(5), 1), - ); - rate_limits.insert( - ROUTE_WITH_HIGH_LIMIT.to_string(), - RateLimitStateForRoute::new(Duration::from_secs(10), 10), - ); - let rate_limiting_state = RateLimitState::new(rate_limits); - let mut app = Router::new(); - - app = app - .route(ROUTE_NO_LIMIT, get(|| async { NO_LIMIT_RESPONSE })) - .route(ROUTE_WITH_LIMIT, get(|| async { LIMIT_RESPONSE })) - .route(ROUTE_WITH_HIGH_LIMIT, get(|| async { HIGH_LIMIT_RESPONSE })) - .route_layer(middleware::from_fn_with_state( - rate_limiting_state.clone(), - rate_limit_by_ip, - )); - - app - } +use crate::middleware::rate_limiting::rate_limit_by_ip::{ + rate_limit_by_ip, RateLimitState, RateLimitStateForRoute, +}; +use axum::{middleware, routing::get, Router}; +use serial_test::serial; +use std::{collections::HashMap, net::SocketAddr, time::Duration}; +use tokio::sync::oneshot; + +const ROUTE_NO_LIMIT: &str = "/test_without_limit"; +const NO_LIMIT_RESPONSE: &str = "no limit"; +const ROUTE_WITH_LIMIT: &str = "/test_with_limit"; +const LIMIT_RESPONSE: &str = "with limit"; +const ROUTE_WITH_HIGH_LIMIT: &str = "/test_with_high_limit"; +const HIGH_LIMIT_RESPONSE: &str = "with high limit"; + +fn get_router() -> Router<()> { + let mut rate_limits: HashMap = HashMap::new(); + rate_limits.insert( + ROUTE_WITH_LIMIT.to_string(), + RateLimitStateForRoute::new(Duration::from_secs(5), 1), + ); + rate_limits.insert( + ROUTE_WITH_HIGH_LIMIT.to_string(), + RateLimitStateForRoute::new(Duration::from_secs(10), 10), + ); + let rate_limiting_state = RateLimitState::new(rate_limits); + let mut app = Router::new(); + + app = app + .route(ROUTE_NO_LIMIT, get(|| async { NO_LIMIT_RESPONSE })) + .route(ROUTE_WITH_LIMIT, get(|| async { LIMIT_RESPONSE })) + .route(ROUTE_WITH_HIGH_LIMIT, get(|| async { HIGH_LIMIT_RESPONSE })) + .route_layer(middleware::from_fn_with_state(rate_limiting_state.clone(), rate_limit_by_ip)); + + app +} - async fn start_server() -> oneshot::Sender<()> { - let (tx, rx) = oneshot::channel(); - - tokio::spawn(async { - let router = get_router(); - // Start the server - let listener: tokio::net::TcpListener = - tokio::net::TcpListener::bind("0.0.0.0:4040").await.unwrap(); - axum::serve(listener, router.into_make_service_with_connect_info::()) - .with_graceful_shutdown(async { - rx.await.ok(); - }) - .await - .unwrap(); - }); - - tx - } +async fn start_server() -> oneshot::Sender<()> { + let (tx, rx) = oneshot::channel(); + + tokio::spawn(async { + let router = get_router(); + // Start the server + let listener: tokio::net::TcpListener = + tokio::net::TcpListener::bind("0.0.0.0:4040").await.unwrap(); + axum::serve(listener, router.into_make_service_with_connect_info::()) + .with_graceful_shutdown(async { + rx.await.ok(); + }) + .await + .unwrap(); + }); + + tx +} - #[tokio::test] - #[serial] - async fn test_no_limit() { - let tx = start_server().await; - let url = format!("http://localhost:4040{}", ROUTE_NO_LIMIT); - for _ in 0..11 { - let response = reqwest::get(url.clone()).await.unwrap(); - assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), NO_LIMIT_RESPONSE); - tokio::time::sleep(Duration::from_millis(100)).await; - } - - // Shut down the server - let _ = tx.send(()); +#[tokio::test] +#[serial] +async fn test_no_limit() { + let tx = start_server().await; + let url = format!("http://localhost:4040{}", ROUTE_NO_LIMIT); + for _ in 0..11 { + let response = reqwest::get(url.clone()).await.unwrap(); + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), NO_LIMIT_RESPONSE); + tokio::time::sleep(Duration::from_millis(100)).await; } - #[tokio::test] - #[serial] - async fn test_limit() { - let tx = start_server().await; - let url = format!("http://localhost:4040{}", ROUTE_WITH_LIMIT); + // Shut down the server + let _ = tx.send(()); +} + +#[tokio::test] +#[serial] +async fn test_limit() { + let tx = start_server().await; + let url = format!("http://localhost:4040{}", ROUTE_WITH_LIMIT); + let response = reqwest::get(url.clone()).await.unwrap(); + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), LIMIT_RESPONSE); + + for _ in 0..11 { let response = reqwest::get(url.clone()).await.unwrap(); - assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), LIMIT_RESPONSE); + assert_eq!(response.status(), 429); + tokio::time::sleep(Duration::from_millis(100)).await; + } - for _ in 0..11 { - let response = reqwest::get(url.clone()).await.unwrap(); - assert_eq!(response.status(), 429); - tokio::time::sleep(Duration::from_millis(100)).await; - } + tokio::time::sleep(Duration::from_secs(5)).await; - tokio::time::sleep(Duration::from_secs(5)).await; + let response = reqwest::get(url.clone()).await.unwrap(); + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), LIMIT_RESPONSE); + // Shut down the server + let _ = tx.send(()); +} + +#[tokio::test] +#[serial] +async fn test_high_limit() { + let tx = start_server().await; + let url = format!("http://localhost:4040{}", ROUTE_WITH_HIGH_LIMIT); + for _ in 0..10 { let response = reqwest::get(url.clone()).await.unwrap(); assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), LIMIT_RESPONSE); - - // Shut down the server - let _ = tx.send(()); + assert_eq!(response.text().await.unwrap(), HIGH_LIMIT_RESPONSE); + tokio::time::sleep(Duration::from_millis(100)).await; } - #[tokio::test] - #[serial] - async fn test_high_limit() { - let tx = start_server().await; - let url = format!("http://localhost:4040{}", ROUTE_WITH_HIGH_LIMIT); - for _ in 0..10 { - let response = reqwest::get(url.clone()).await.unwrap(); - assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), HIGH_LIMIT_RESPONSE); - tokio::time::sleep(Duration::from_millis(100)).await; - } - - // Shut down the server - let _ = tx.send(()); + // Shut down the server + let _ = tx.send(()); +} + +#[tokio::test] +#[serial] +async fn test_high_limit_exceeded() { + let tx = start_server().await; + let url = format!("http://localhost:4040{}", ROUTE_WITH_HIGH_LIMIT); + for _ in 0..10 { + let response = reqwest::get(url.clone()).await.unwrap(); + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), HIGH_LIMIT_RESPONSE); + tokio::time::sleep(Duration::from_millis(100)).await; } - #[tokio::test] - #[serial] - async fn test_high_limit_exceeded() { - let tx = start_server().await; - let url = format!("http://localhost:4040{}", ROUTE_WITH_HIGH_LIMIT); - for _ in 0..10 { - let response = reqwest::get(url.clone()).await.unwrap(); - assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), HIGH_LIMIT_RESPONSE); - tokio::time::sleep(Duration::from_millis(100)).await; - } - - let response = reqwest::get(url).await.unwrap(); - assert_eq!(response.status(), 429); + let response = reqwest::get(url).await.unwrap(); + assert_eq!(response.status(), 429); - // Shut down the server - let _ = tx.send(()); - } + // Shut down the server + let _ = tx.send(()); +} - #[tokio::test] - #[serial] - async fn test_high_limit_reset() { - let tx = start_server().await; - let url = format!("http://localhost:4040{}", ROUTE_WITH_HIGH_LIMIT); - for _ in 0..10 { - let response = reqwest::get(url.clone()).await.unwrap(); - assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), HIGH_LIMIT_RESPONSE); - tokio::time::sleep(Duration::from_millis(100)).await; - } - - tokio::time::sleep(Duration::from_secs(10)).await; - - for _ in 0..10 { - let response = reqwest::get(url.clone()).await.unwrap(); - assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), HIGH_LIMIT_RESPONSE); - tokio::time::sleep(Duration::from_millis(100)).await; - } - - // Shut down the server - let _ = tx.send(()); +#[tokio::test] +#[serial] +async fn test_high_limit_reset() { + let tx = start_server().await; + let url = format!("http://localhost:4040{}", ROUTE_WITH_HIGH_LIMIT); + for _ in 0..10 { + let response = reqwest::get(url.clone()).await.unwrap(); + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), HIGH_LIMIT_RESPONSE); + tokio::time::sleep(Duration::from_millis(100)).await; } - #[tokio::test] - #[serial] - async fn test_mixed_requests() { - let tx = start_server().await; - let url_no_limit = format!("http://localhost:4040{}", ROUTE_NO_LIMIT); - let url_with_limit = format!("http://localhost:4040{}", ROUTE_WITH_LIMIT); - let url_with_high_limit = format!("http://localhost:4040{}", ROUTE_WITH_HIGH_LIMIT); + tokio::time::sleep(Duration::from_secs(10)).await; - let response = reqwest::get(url_with_limit.clone()).await.unwrap(); + for _ in 0..10 { + let response = reqwest::get(url.clone()).await.unwrap(); assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), LIMIT_RESPONSE); - - for _ in 0..10 { - let response = reqwest::get(url_with_limit.clone()).await.unwrap(); - assert_eq!(response.status(), 429); - tokio::time::sleep(Duration::from_millis(100)).await; - } - - for _ in 0..11 { - let response = reqwest::get(url_no_limit.clone()).await.unwrap(); - assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), NO_LIMIT_RESPONSE); - tokio::time::sleep(Duration::from_millis(100)).await; - } - - for _ in 0..10 { - let response = reqwest::get(url_with_high_limit.clone()).await.unwrap(); - assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), HIGH_LIMIT_RESPONSE); - tokio::time::sleep(Duration::from_millis(100)).await; - } - - tokio::time::sleep(Duration::from_secs(5)).await; + assert_eq!(response.text().await.unwrap(), HIGH_LIMIT_RESPONSE); + tokio::time::sleep(Duration::from_millis(100)).await; + } + + // Shut down the server + let _ = tx.send(()); +} + +#[tokio::test] +#[serial] +async fn test_mixed_requests() { + let tx = start_server().await; + let url_no_limit = format!("http://localhost:4040{}", ROUTE_NO_LIMIT); + let url_with_limit = format!("http://localhost:4040{}", ROUTE_WITH_LIMIT); + let url_with_high_limit = format!("http://localhost:4040{}", ROUTE_WITH_HIGH_LIMIT); + let response = reqwest::get(url_with_limit.clone()).await.unwrap(); + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), LIMIT_RESPONSE); + + for _ in 0..10 { let response = reqwest::get(url_with_limit.clone()).await.unwrap(); + assert_eq!(response.status(), 429); + tokio::time::sleep(Duration::from_millis(100)).await; + } + + for _ in 0..11 { + let response = reqwest::get(url_no_limit.clone()).await.unwrap(); assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), LIMIT_RESPONSE); + assert_eq!(response.text().await.unwrap(), NO_LIMIT_RESPONSE); + tokio::time::sleep(Duration::from_millis(100)).await; + } - // Shut down the server - let _ = tx.send(()); + for _ in 0..10 { + let response = reqwest::get(url_with_high_limit.clone()).await.unwrap(); + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), HIGH_LIMIT_RESPONSE); + tokio::time::sleep(Duration::from_millis(100)).await; } + + tokio::time::sleep(Duration::from_secs(5)).await; + + let response = reqwest::get(url_with_limit.clone()).await.unwrap(); + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), LIMIT_RESPONSE); + + // Shut down the server + let _ = tx.send(()); } diff --git a/crates/api/src/proposer/api.rs b/crates/api/src/proposer/api.rs index 356c6446..a75b1bf2 100644 --- a/crates/api/src/proposer/api.rs +++ b/crates/api/src/proposer/api.rs @@ -233,7 +233,7 @@ where let (head_slot, _) = *proposer_api.curr_slot_info.read().await; let num_registrations = registrations.len(); - debug!( + trace!( request_id = %request_id, event = "register_validators", head_slot = head_slot, @@ -256,7 +256,7 @@ where let pub_key = registration.message.public_key.clone(); - debug!( + trace!( request_id = %request_id, pub_key = ?pub_key, fee_recipient = %registration.message.fee_recipient, @@ -274,7 +274,7 @@ where } if !proposer_api_clone.db.is_registration_update_required(®istration).await? { - debug!( + trace!( request_id = %request_id, pub_key = ?pub_key, "Registration update not required", @@ -469,6 +469,141 @@ where } } + /// Retrieves the best bid header (with inclusion proof) for the specified slot, parent hash, + /// and public key. + /// + /// This function accepts a slot number, parent hash and public_key. + /// 1. Validates that the request's slot is not older than the head slot. + /// 2. Validates the request timestamp to ensure it's not too late. + /// 3. Fetches the best bid for the given parameters from the auctioneer. + /// 4. Fetches the inclusion proof for the best bid. + /// + /// The function returns a JSON response containing the best bid and inclusion proofs if found. + /// + /// Implements this API: + pub async fn get_header_with_proofs( + Extension(proposer_api): Extension>>, + headers: HeaderMap, + Path(GetHeaderParams { slot, parent_hash, public_key }): Path, + ) -> Result { + let request_id = Uuid::new_v4(); + let mut trace = GetHeaderTrace { receive: get_nanos_timestamp()?, ..Default::default() }; + + let (head_slot, _) = *proposer_api.curr_slot_info.read().await; + debug!( + request_id = %request_id, + event = "get_header_with_proofs", + head_slot = head_slot, + request_ts = trace.receive, + slot = slot, + parent_hash = ?parent_hash, + public_key = ?public_key, + ); + + let bid_request = BidRequest { slot, parent_hash, public_key }; + + // Dont allow requests for past slots + if bid_request.slot < head_slot { + warn!(request_id = %request_id, "request for past slot"); + return Err(ProposerApiError::RequestForPastSlot { + request_slot: bid_request.slot, + head_slot, + }) + } + + if let Err(err) = proposer_api.validate_bid_request_time(&bid_request) { + warn!(request_id = %request_id, err = %err, "invalid bid request time"); + return Err(err) + } + trace.validation_complete = get_nanos_timestamp()?; + + // Get best bid from auctioneer + let get_best_bid_res = proposer_api + .auctioneer + .get_best_bid(bid_request.slot, &bid_request.parent_hash, &bid_request.public_key) + .await; + trace.best_bid_fetched = get_nanos_timestamp()?; + info!(request_id = %request_id, trace = ?trace, "best bid fetched"); + + let user_agent = + headers.get("user-agent").and_then(|v| v.to_str().ok()).map(|v| v.to_string()); + + match get_best_bid_res { + Ok(Some(mut bid)) => { + if bid.value() == U256::ZERO { + warn!(request_id = %request_id, "best bid value is 0"); + return Err(ProposerApiError::BidValueZero) + } + + // Get inclusion proofs + let proofs = proposer_api + .auctioneer + .get_inclusion_proof(slot, &bid_request.public_key, bid.block_hash()) + .await?; + + // Save trace to DB + proposer_api + .save_get_header_call( + slot, + bid_request.parent_hash, + bid_request.public_key, + bid.block_hash().clone(), + trace, + request_id, + user_agent, + ) + .await; + + // Attach the proofs to the bid before sending it back + if let Some(proofs) = proofs { + bid.set_inclusion_proofs(proofs); + + info!( + request_id = %request_id, + slot, + value = ?bid.value(), + block_hash = ?bid.block_hash(), + "delivering bid with proofs", + ); + } else { + // Check whether we had constraints saved in the auctioneer. + // If so, this is an internal error and we cannot return a valid bid. + let constraints = + proposer_api.auctioneer.get_constraints(slot).await?.unwrap_or_default(); + + if !constraints.is_empty() { + error!( + request_id = %request_id, + slot, + block_hash = ?bid.block_hash(), + "no inclusion proofs found from auctioneer for bid, but constraints were saved", + ); + return Err(ProposerApiError::InternalServerError) + } + + info!( + request_id = %request_id, + slot, + value = ?bid.value(), + block_hash = ?bid.block_hash(), + "delivering bid with empty proofs, no constraints found", + ); + } + + // Return header with proofs + Ok(axum::Json(bid)) + } + Ok(None) => { + warn!(request_id = %request_id, "no bid found"); + Err(ProposerApiError::NoBidPrepared) + } + Err(err) => { + error!(request_id = %request_id, error = %err, "error getting bid"); + Err(ProposerApiError::InternalServerError) + } + } + } + /// Retrieves the execution payload for a given blinded beacon block. /// /// This function accepts a `SignedBlindedBeaconBlock` as input and performs several steps: @@ -1304,7 +1439,7 @@ where /// Handle a new slot update. /// Updates the next proposer duty for the new slot. async fn handle_new_slot(&self, slot_update: SlotUpdate) { - let epoch = slot_update.slot / SLOTS_PER_EPOCH; + let epoch = slot_update.slot / self.chain_info.seconds_per_slot; info!( epoch = epoch, slot = slot_update.slot, diff --git a/crates/api/src/proposer/tests.rs b/crates/api/src/proposer/tests.rs index aadfc02b..15041ffe 100644 --- a/crates/api/src/proposer/tests.rs +++ b/crates/api/src/proposer/tests.rs @@ -50,7 +50,7 @@ mod proposer_api_tests { ssz::prelude::*, }; use rand::Rng; - use reqwest::{Client, Response, StatusCode}; + use reqwest::StatusCode; use reth_primitives::hex; use crate::proposer::{tests::gen_signed_vr, PATH_REGISTER_VALIDATORS}; @@ -60,16 +60,16 @@ mod proposer_api_tests { api::{ builder_api::BuilderGetValidatorsResponseEntry, proposer_api::ValidatorRegistrationInfo, }, - capella::{self}, + capella, chain_info::ChainInfo, - deneb::{self}, + deneb, versioned_payload::PayloadAndBlobs, SignedBuilderBid, ValidatorPreferences, }; use helix_database::MockDatabaseService; use helix_datastore::MockAuctioneer; use helix_housekeeper::{ChainUpdate, PayloadAttributesUpdate, SlotUpdate}; - use helix_utils::{request_encoding::Encoding, signing::verify_signed_consensus_message}; + use helix_utils::signing::verify_signed_consensus_message; use serial_test::serial; use std::{sync::Arc, time::Duration}; use tokio::{ @@ -111,14 +111,6 @@ mod proposer_api_tests { } } - async fn send_request(req_url: &str, encoding: Encoding, req_payload: Vec) -> Response { - let client = Client::new(); - let request = client.post(req_url).header("accept", "*/*"); - let request = encoding.to_headers(request); - - request.body(req_payload).send().await.unwrap() - } - fn get_test_pub_key_bytes(random: bool) -> [u8; 48] { if random { let mut pubkey_array = [0u8; 48]; @@ -142,13 +134,6 @@ mod proposer_api_tests { ByteVector::try_from(bytes.as_ref()).unwrap() } - fn hex_to_byte_arr_32(hex: &str) -> [u8; 32] { - let bytes = hex::decode(&hex[2..]).unwrap(); - let mut arr = [0u8; 32]; - arr.copy_from_slice(&bytes); - arr - } - fn get_valid_payload_register_validator( submission_slot: Option, validator_index: Option, @@ -260,10 +245,13 @@ mod proposer_api_tests { } fn get_signed_builder_bid(value: U256) -> SignedBuilderBid { - SignedBuilderBid::Capella(capella::SignedBuilderBid { - message: helix_common::eth::capella::BuilderBid { value, ..Default::default() }, - ..Default::default() - }) + SignedBuilderBid::Capella( + capella::SignedBuilderBid { + message: helix_common::eth::capella::BuilderBid { value, ..Default::default() }, + ..Default::default() + }, + None, + ) } fn get_blinded_beacon_block_body() -> BlindedBeaconBlockBody { diff --git a/crates/api/src/proposer/types.rs b/crates/api/src/proposer/types.rs index 188060e8..00496321 100644 --- a/crates/api/src/proposer/types.rs +++ b/crates/api/src/proposer/types.rs @@ -16,9 +16,10 @@ pub const PATH_PROPOSER_API: &str = "/eth/v1/builder"; pub const PATH_STATUS: &str = "/status"; pub const PATH_REGISTER_VALIDATORS: &str = "/validators"; pub const PATH_GET_HEADER: &str = "/header/:slot/:parent_hash/:pubkey"; +pub const PATH_GET_HEADER_WITH_PROOFS: &str = "/header_with_proofs/:slot/:parent_hash/:pubkey"; pub const PATH_GET_PAYLOAD: &str = "/blinded_blocks"; -pub(crate) const GET_HEADER_REQUEST_CUTOFF_MS: i64 = 3000; +pub const GET_HEADER_REQUEST_CUTOFF_MS: i64 = 3000; #[derive(Debug, Deserialize)] pub struct GetHeaderParams { diff --git a/crates/api/src/relay_data/tests.rs b/crates/api/src/relay_data/tests.rs index 449c0d23..724fabc9 100644 --- a/crates/api/src/relay_data/tests.rs +++ b/crates/api/src/relay_data/tests.rs @@ -14,8 +14,7 @@ mod data_api_tests { ReceivedBlocksResponse, ValidatorRegistrationParams, }; use helix_database::MockDatabaseService; - use helix_utils::request_encoding::Encoding; - use reqwest::{Client, Response, StatusCode}; + use reqwest::StatusCode; use serial_test::serial; use std::{sync::Arc, time::Duration}; use tokio::sync::oneshot; @@ -46,14 +45,6 @@ mod data_api_tests { } } - async fn send_request(req_url: &str, encoding: Encoding, req_payload: Vec) -> Response { - let client = Client::new(); - let request = client.post(req_url).header("accept", "*/*"); - let request = encoding.to_headers(request); - - request.body(req_payload).send().await.unwrap() - } - async fn start_api_server() -> ( oneshot::Sender<()>, HttpServiceConfig, diff --git a/crates/api/src/router.rs b/crates/api/src/router.rs index a95b41a2..a2450093 100644 --- a/crates/api/src/router.rs +++ b/crates/api/src/router.rs @@ -18,6 +18,7 @@ use crate::{ api::{BuilderApi, MAX_PAYLOAD_LENGTH}, optimistic_simulator::OptimisticSimulator, }, + constraints::api::ConstraintsApi, gossiper::grpc_gossiper::GrpcGossiperClientManager, middleware::rate_limiting::rate_limit_by_ip::{ rate_limit_by_ip, RateLimitState, RateLimitStateForRoute, @@ -45,11 +46,14 @@ pub type ProposerApiProd = ProposerApi< pub type DataApiProd = DataApi; +pub type ConstraintsApiProd = ConstraintsApi; + pub fn build_router( router_config: &mut RouterConfig, builder_api: Arc, proposer_api: Arc, data_api: Arc, + constraints_api: Arc, bids_cache: Arc, delivered_payloads_cache: Arc, ) -> Router { @@ -81,6 +85,10 @@ pub fn build_router( Route::SubmitBlockOptimistic => { router = router.route(&route.path(), post(BuilderApiProd::submit_block_v2)); } + Route::SubmitBlockWithProofs => { + router = + router.route(&route.path(), post(BuilderApiProd::submit_block_with_proofs)); + } Route::SubmitHeader => { router = router.route(&route.path(), post(BuilderApiProd::submit_header)); } @@ -90,6 +98,15 @@ pub fn build_router( Route::GetTopBid => { router = router.route(&route.path(), get(BuilderApiProd::get_top_bid)); } + Route::GetBuilderConstraints => { + router = router.route(&route.path(), get(BuilderApiProd::constraints)); + } + Route::GetBuilderConstraintsStream => { + router = router.route(&route.path(), get(BuilderApiProd::constraints_stream)); + } + Route::GetBuilderDelegations => { + router = router.route(&route.path(), get(BuilderApiProd::delegations)); + } Route::Status => { router = router.route(&route.path(), get(ProposerApiProd::status)); } @@ -99,6 +116,9 @@ pub fn build_router( Route::GetHeader => { router = router.route(&route.path(), get(ProposerApiProd::get_header)); } + Route::GetHeaderWithProofs => { + router = router.route(&route.path(), get(ProposerApiProd::get_header_with_proofs)); + } Route::GetPayload => { router = router.route(&route.path(), post(ProposerApiProd::get_payload)); } @@ -114,6 +134,15 @@ pub fn build_router( Route::ValidatorRegistration => { router = router.route(&route.path(), get(DataApiProd::validator_registration)); } + Route::SubmitBuilderConstraints => { + router = router.route(&route.path(), post(ConstraintsApiProd::submit_constraints)); + } + Route::DelegateSubmissionRights => { + router = router.route(&route.path(), post(ConstraintsApiProd::delegate)); + } + Route::RevokeSubmissionRights => { + router = router.route(&route.path(), post(ConstraintsApiProd::revoke)); + } _ => { panic!("Route not implemented: {:?}, please add handling if there are new routes or resolve condensed routes before!", route); } @@ -140,6 +169,7 @@ pub fn build_router( .layer(Extension(builder_api)) .layer(Extension(proposer_api)) .layer(Extension(data_api)) + .layer(Extension(constraints_api)) .layer(Extension(bids_cache)) .layer(Extension(delivered_payloads_cache)); diff --git a/crates/api/src/service.rs b/crates/api/src/service.rs index 8d21baf4..44c1718c 100644 --- a/crates/api/src/service.rs +++ b/crates/api/src/service.rs @@ -1,6 +1,7 @@ use std::{env, net::SocketAddr, sync::Arc, time::Duration}; use ethereum_consensus::crypto::SecretKey; +use helix_database::{postgres::postgres_db_service::PostgresDatabaseService, DatabaseService}; use moka::sync::Cache; use tokio::{ sync::broadcast, @@ -12,7 +13,7 @@ use crate::{ builder::optimistic_simulator::OptimisticSimulator, gossiper::grpc_gossiper::GrpcGossiperClientManager, relay_data::{BidsCache, DeliveredPayloadsCache}, - router::{build_router, BuilderApiProd, DataApiProd, ProposerApiProd}, + router::{build_router, BuilderApiProd, ConstraintsApiProd, DataApiProd, ProposerApiProd}, }; use helix_beacon_client::{ beacon_client::BeaconClient, fiber_broadcaster::FiberBroadcaster, @@ -22,7 +23,6 @@ use helix_common::{ chain_info::ChainInfo, signing::RelaySigningContext, BroadcasterConfig, NetworkConfig, RelayConfig, }; -use helix_database::{postgres::postgres_db_service::PostgresDatabaseService, DatabaseService}; use helix_datastore::redis::redis_cache::RedisCache; use helix_housekeeper::{ChainEventUpdater, Housekeeper}; @@ -158,7 +158,7 @@ impl ApiService { let (builder_gossip_sender, builder_gossip_receiver) = tokio::sync::mpsc::channel(10_000); let (proposer_gossip_sender, proposer_gossip_receiver) = tokio::sync::mpsc::channel(10_000); - let builder_api = Arc::new(BuilderApiProd::new( + let (builder_api, constraints_handle) = BuilderApiProd::new( auctioneer.clone(), db.clone(), chain_info.clone(), @@ -168,7 +168,8 @@ impl ApiService { config.clone(), slot_update_sender.clone(), builder_gossip_receiver, - )); + ); + let builder_api = Arc::new(builder_api); gossiper.start_server(builder_gossip_sender, proposer_gossip_sender).await; @@ -179,7 +180,7 @@ impl ApiService { broadcasters, multi_beacon_client.clone(), chain_info.clone(), - slot_update_sender, + slot_update_sender.clone(), validator_preferences.clone(), config.target_get_payload_propagation_duration_ms, proposer_gossip_receiver, @@ -187,6 +188,13 @@ impl ApiService { let data_api = Arc::new(DataApiProd::new(validator_preferences.clone(), db.clone())); + let constraints_api = Arc::new(ConstraintsApiProd::new( + auctioneer.clone(), + db.clone(), + chain_info.clone(), + constraints_handle, + )); + let bids_cache: Arc = Arc::new( Cache::builder() .time_to_live(Duration::from_secs(10)) @@ -206,6 +214,7 @@ impl ApiService { builder_api, proposer_api, data_api, + constraints_api, bids_cache, delivered_payloads_cache, ); @@ -255,9 +264,7 @@ async fn init_broadcasters(config: &RelayConfig) -> Vec> { // add test module #[cfg(test)] mod test { - - use helix_common::{BeaconClientConfig, FiberConfig}; - use helix_utils::request_encoding::Encoding; + use helix_common::BeaconClientConfig; use super::*; use std::convert::TryFrom; diff --git a/crates/api/src/test_utils.rs b/crates/api/src/test_utils.rs index c4085bc0..f243d8b4 100644 --- a/crates/api/src/test_utils.rs +++ b/crates/api/src/test_utils.rs @@ -25,11 +25,12 @@ use crate::{ api::{BuilderApi, MAX_PAYLOAD_LENGTH}, mock_simulator::MockSimulator, }, + constraints::api::{ConstraintsApi, ConstraintsHandle}, gossiper::{mock_gossiper::MockGossiper, types::GossipedMessage}, proposer::{ api::{ProposerApi, MAX_BLINDED_BLOCK_LENGTH, MAX_VAL_REGISTRATIONS_LENGTH}, - PATH_GET_HEADER, PATH_GET_PAYLOAD, PATH_PROPOSER_API, PATH_REGISTER_VALIDATORS, - PATH_STATUS, + PATH_GET_HEADER, PATH_GET_HEADER_WITH_PROOFS, PATH_GET_PAYLOAD, PATH_PROPOSER_API, + PATH_REGISTER_VALIDATORS, PATH_STATUS, }, relay_data::{ DataApi, PATH_BUILDER_BIDS_RECEIVED, PATH_DATA_API, PATH_PROPOSER_PAYLOAD_DELIVERED, @@ -94,6 +95,15 @@ pub fn app() -> Router { MockGossiper, >::get_header), ) + .route( + &format!("{PATH_PROPOSER_API}{PATH_GET_HEADER_WITH_PROOFS}"), + get(ProposerApi::< + MockAuctioneer, + MockDatabaseService, + MockMultiBeaconClient, + MockGossiper, + >::get_header_with_proofs), + ) .route( &format!("{PATH_PROPOSER_API}{PATH_GET_PAYLOAD}"), post( @@ -126,24 +136,24 @@ pub fn builder_api_app() -> ( Router, Arc>, Receiver>, + ConstraintsHandle, ) { let (slot_update_sender, slot_update_receiver) = channel::>(32); let (_gossip_sender, gossip_receiver) = tokio::sync::mpsc::channel(10); - let builder_api_service = - Arc::new( - BuilderApi::::new( - Arc::new(MockAuctioneer::default()), - Arc::new(MockDatabaseService::default()), - Arc::new(ChainInfo::for_mainnet()), - MockSimulator::default(), - Arc::new(MockGossiper::new().unwrap()), - Arc::new(RelaySigningContext::default()), - RelayConfig::default(), - slot_update_sender.clone(), - gossip_receiver, - ), + let (builder_api_service, handler) = + BuilderApi::::new( + Arc::new(MockAuctioneer::default()), + Arc::new(MockDatabaseService::default()), + Arc::new(ChainInfo::for_mainnet()), + MockSimulator::default(), + Arc::new(MockGossiper::new().unwrap()), + Arc::new(RelaySigningContext::default()), + RelayConfig::default(), + slot_update_sender.clone(), + gossip_receiver, ); + let builder_api_service = Arc::new(builder_api_service); let mut router = Router::new() .route( @@ -158,6 +168,12 @@ pub fn builder_api_app() -> ( &Route::GetTopBid.path(), get(BuilderApi::::get_top_bid), ) + .route(&Route::GetBuilderConstraintsStream.path(), + get(BuilderApi::::constraints_stream), + ) + .route(&Route::GetBuilderConstraints.path(), + get(BuilderApi::::constraints), + ) .layer(RequestBodyLimitLayer::new(MAX_PAYLOAD_LENGTH)) .layer(Extension(builder_api_service.clone())); @@ -175,7 +191,7 @@ pub fn builder_api_app() -> ( .layer(RateLimitLayer::new(100, Duration::from_secs(1))), ); - (router, builder_api_service, slot_update_receiver) + (router, builder_api_service, slot_update_receiver, handler) } #[allow(clippy::type_complexity)] @@ -216,6 +232,15 @@ pub fn proposer_api_app() -> ( MockGossiper, >::get_header), ) + .route( + &format!("{PATH_PROPOSER_API}{PATH_GET_HEADER_WITH_PROOFS}"), + get(ProposerApi::< + MockAuctioneer, + MockDatabaseService, + MockMultiBeaconClient, + MockGossiper, + >::get_header_with_proofs), + ) .route( &format!("{PATH_PROPOSER_API}{PATH_GET_PAYLOAD}"), post( @@ -269,3 +294,82 @@ pub fn data_api_app() -> (Router, Arc>, Arc ( + Router, + Arc>, + Arc>, + Receiver>, +) { + let auctioneer = Arc::new(MockAuctioneer::default()); + let database = Arc::new(MockDatabaseService::default()); + + let (slot_update_sender, slot_update_receiver) = channel::>(32); + let (_gossip_sender, gossip_receiver) = tokio::sync::mpsc::channel(10); + + let (builder_api_service, handler) = + BuilderApi::::new( + auctioneer.clone(), + database.clone(), + Arc::new(ChainInfo::for_mainnet()), + MockSimulator::default(), + Arc::new(MockGossiper::new().unwrap()), + Arc::new(RelaySigningContext::default()), + RelayConfig::default(), + slot_update_sender.clone(), + gossip_receiver, + ); + let builder_api_service = Arc::new(builder_api_service); + + let constraints_api_service = + Arc::new(ConstraintsApi::::new( + auctioneer.clone(), + database.clone(), + Arc::new(ChainInfo::for_mainnet()), + handler, + )); + + let router = Router::new() + .route( + &Route::GetValidators.path(), + get(BuilderApi::::get_validators), + ) + .route( + &Route::SubmitBlock.path(), + post(BuilderApi::::submit_block), + ) + .route( + &Route::GetTopBid.path(), + get(BuilderApi::::get_top_bid), + ) + .route( + &Route::GetBuilderConstraints.path(), + get(BuilderApi::::constraints), + ) + .route( + &Route::GetBuilderConstraintsStream.path(), + get(BuilderApi::::constraints_stream), + ) + .route( + &Route::GetBuilderDelegations.path(), + get(BuilderApi::::delegations), + ) + .route( + &Route::SubmitBuilderConstraints.path(), + post(ConstraintsApi::::submit_constraints), + ) + .route( + &Route::DelegateSubmissionRights.path(), + post(ConstraintsApi::::delegate), + ) + .route( + &Route::RevokeSubmissionRights.path(), + post(ConstraintsApi::::revoke), + ) + .layer(RequestBodyLimitLayer::new(MAX_PAYLOAD_LENGTH)) + .layer(Extension(builder_api_service.clone())) + .layer(Extension(constraints_api_service.clone())); + + (router, constraints_api_service, builder_api_service, slot_update_receiver) +} diff --git a/crates/beacon-client/src/beacon_client.rs b/crates/beacon-client/src/beacon_client.rs index f01820b3..b5ec91b7 100644 --- a/crates/beacon-client/src/beacon_client.rs +++ b/crates/beacon-client/src/beacon_client.rs @@ -23,7 +23,10 @@ use crate::{ }; const CONSENSUS_VERSION_HEADER: &str = "eth-consensus-version"; -const BEACON_CLIENT_REQUEST_TIMEOUT: Duration = Duration::from_secs(5); + +// Note: we noticed that beacon clients can take 5-10s to respond with the full +// validators list in some cases. The previous timeout of 5s was too short. +const BEACON_CLIENT_REQUEST_TIMEOUT: Duration = Duration::from_secs(15); #[derive(Clone, Debug)] pub struct BeaconClient { diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index a42ee99b..94afe9b1 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -33,6 +33,15 @@ tokio-postgres.workspace = true # Misc auto_impl.workspace = true -clap = {version = "4.3", features = ["derive"]} +clap = { version = "4.2.7", features = ["derive"] } thiserror.workspace = true -tracing.workspace = true \ No newline at end of file +tracing.workspace = true +ssz_rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "1df4cd9", features = [ + "sha2-asm", +] } +sha2 = "0.10.8" +tree_hash = "0.6.0" + +[dev-dependencies] +tree_hash = "0.6.0" +tree_hash_derive = "0.6.0" diff --git a/crates/common/src/api/constraints_api.rs b/crates/common/src/api/constraints_api.rs new file mode 100644 index 00000000..c2448473 --- /dev/null +++ b/crates/common/src/api/constraints_api.rs @@ -0,0 +1,70 @@ +use ethereum_consensus::{ + primitives::{BlsPublicKey, BlsSignature}, + ssz::prelude::*, +}; +use sha2::{Digest, Sha256}; + +pub const MAX_CONSTRAINTS_PER_SLOT: usize = 256; + +/// The action type for a delegation message. +pub const DELEGATION_ACTION: u8 = 0; + +/// The action type for a revocation message. +pub const REVOCATION_ACTION: u8 = 1; + +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize, Hash, PartialEq, Eq)] +pub struct SignedDelegation { + pub message: DelegationMessage, + pub signature: BlsSignature, +} + +#[derive( + Debug, Clone, SimpleSerialize, serde::Deserialize, serde::Serialize, Hash, PartialEq, Eq, +)] +pub struct DelegationMessage { + pub action: u8, + pub validator_pubkey: BlsPublicKey, + pub delegatee_pubkey: BlsPublicKey, +} + +impl SignableBLS for DelegationMessage { + fn digest(&self) -> [u8; 32] { + let mut hasher = Sha256::new(); + hasher.update([self.action]); + hasher.update(&self.validator_pubkey.to_vec()); + hasher.update(&self.delegatee_pubkey.to_vec()); + + hasher.finalize().into() + } +} + +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +pub struct SignedRevocation { + pub message: RevocationMessage, + pub signature: BlsSignature, +} + +#[derive(Debug, Clone, SimpleSerialize, serde::Deserialize, serde::Serialize)] +pub struct RevocationMessage { + pub action: u8, + pub validator_pubkey: BlsPublicKey, + pub delegatee_pubkey: BlsPublicKey, +} + +impl SignableBLS for RevocationMessage { + fn digest(&self) -> [u8; 32] { + let mut hasher = Sha256::new(); + hasher.update([self.action]); + hasher.update(&self.validator_pubkey.to_vec()); + hasher.update(&self.delegatee_pubkey.to_vec()); + + hasher.finalize().into() + } +} + +/// Trait for any types that can be signed and verified with BLS. +/// This trait is used to abstract over the signing and verification of different types. +pub trait SignableBLS { + /// Returns the digest of the object. + fn digest(&self) -> [u8; 32]; +} diff --git a/crates/common/src/api/mod.rs b/crates/common/src/api/mod.rs index a00269f6..8b789736 100644 --- a/crates/common/src/api/mod.rs +++ b/crates/common/src/api/mod.rs @@ -1,4 +1,5 @@ pub mod builder_api; +pub mod constraints_api; pub mod data_api; pub mod proposer_api; @@ -6,10 +7,14 @@ pub(crate) const PATH_BUILDER_API: &str = "/relay/v1/builder"; pub(crate) const PATH_GET_VALIDATORS: &str = "/validators"; pub(crate) const PATH_SUBMIT_BLOCK: &str = "/blocks"; +pub(crate) const PATH_BUILDER_BLOCKS_WITH_PROOFS: &str = "/blocks_with_proofs"; pub(crate) const PATH_SUBMIT_BLOCK_OPTIMISTIC_V2: &str = "/blocks_optimistic_v2"; pub(crate) const PATH_CANCEL_BID: &str = "/cancel_bid"; pub(crate) const PATH_SUBMIT_HEADER: &str = "/headers"; pub(crate) const PATH_GET_TOP_BID: &str = "/top_bid"; +pub(crate) const PATH_BUILDER_CONSTRAINTS: &str = "/constraints"; +pub(crate) const PATH_BUILDER_CONSTRAINTS_STREAM: &str = "/constraints_stream"; +pub(crate) const PATH_BUILDER_DELEGATIONS: &str = "/delegations"; pub(crate) const PATH_PROPOSER_API: &str = "/eth/v1/builder"; @@ -17,9 +22,17 @@ pub(crate) const PATH_STATUS: &str = "/status"; pub(crate) const PATH_REGISTER_VALIDATORS: &str = "/validators"; pub(crate) const PATH_GET_HEADER: &str = "/header/:slot/:parent_hash/:pubkey"; pub(crate) const PATH_GET_PAYLOAD: &str = "/blinded_blocks"; +pub(crate) const PATH_GET_HEADER_WITH_PROOFS: &str = + "/header_with_proofs/:slot/:parent_hash/:pubkey"; pub(crate) const PATH_DATA_API: &str = "/relay/v1/data"; pub(crate) const PATH_PROPOSER_PAYLOAD_DELIVERED: &str = "/bidtraces/proposer_payload_delivered"; pub(crate) const PATH_BUILDER_BIDS_RECEIVED: &str = "/bidtraces/builder_blocks_received"; pub(crate) const PATH_VALIDATOR_REGISTRATION: &str = "/validator_registration"; + +pub(crate) const PATH_CONSTRAINTS_API: &str = "/constraints/v1"; + +pub(crate) const PATH_SUBMIT_BUILDER_CONSTRAINTS: &str = "/builder/constraints"; +pub(crate) const PATH_DELEGATE_SUBMISSION_RIGHTS: &str = "/builder/delegate"; +pub(crate) const PATH_REVOKE_SUBMISSION_RIGHTS: &str = "/builder/revoke"; diff --git a/crates/common/src/bid_submission/mod.rs b/crates/common/src/bid_submission/mod.rs index 5a6a0355..620c0a00 100644 --- a/crates/common/src/bid_submission/mod.rs +++ b/crates/common/src/bid_submission/mod.rs @@ -15,8 +15,12 @@ use ethereum_consensus::{ Fork, }; +use crate::proofs::InclusionProofs; + #[auto_impl::auto_impl(Arc)] pub trait BidSubmission { + fn proofs(&self) -> Option<&InclusionProofs>; + fn bid_trace(&self) -> &BidTrace; fn signature(&self) -> &BlsSignature; diff --git a/crates/common/src/bid_submission/submission.rs b/crates/common/src/bid_submission/submission.rs index 9ab6cc80..62c5fee2 100644 --- a/crates/common/src/bid_submission/submission.rs +++ b/crates/common/src/bid_submission/submission.rs @@ -2,6 +2,7 @@ use crate::{ bid_submission::{BidSubmission, BidTrace}, capella, deneb::BlobsBundle, + proofs::InclusionProofs, versioned_payload::PayloadAndBlobs, }; use ethereum_consensus::{ @@ -28,6 +29,15 @@ pub enum SignedBidSubmission { } impl BidSubmission for SignedBidSubmission { + fn proofs(&self) -> Option<&InclusionProofs> { + match self { + SignedBidSubmission::Deneb(signed_bid_submission) => { + signed_bid_submission.proofs.as_ref() + } + SignedBidSubmission::Capella(_) => None, + } + } + fn bid_trace(&self) -> &BidTrace { match self { SignedBidSubmission::Deneb(signed_bid_submission) => &signed_bid_submission.message, @@ -353,4 +363,8 @@ pub struct SignedBidSubmissionDeneb { pub execution_payload: ExecutionPayload, pub blobs_bundle: BlobsBundle, pub signature: BlsSignature, + /// The Merkle proofs of inclusion as needed by the Constraints API. + /// Reference: + #[serde(skip_serializing_if = "Option::is_none")] + pub proofs: Option, } diff --git a/crates/common/src/bid_submission/v2/header_submission.rs b/crates/common/src/bid_submission/v2/header_submission.rs index ce53dc50..9bb941cf 100644 --- a/crates/common/src/bid_submission/v2/header_submission.rs +++ b/crates/common/src/bid_submission/v2/header_submission.rs @@ -2,6 +2,7 @@ use crate::{ bid_submission::{BidSubmission, BidTrace}, capella, deneb::{self, BlobsBundle}, + proofs::InclusionProofs, versioned_payload_header::VersionedExecutionPayloadHeader, }; use ethereum_consensus::{ @@ -111,6 +112,10 @@ pub struct SignedHeaderSubmissionDeneb { } impl BidSubmission for SignedHeaderSubmission { + fn proofs(&self) -> Option<&InclusionProofs> { + None + } + fn bid_trace(&self) -> &BidTrace { match self { Self::Capella(signed_header_submission) => &signed_header_submission.message.bid_trace, diff --git a/crates/common/src/config.rs b/crates/common/src/config.rs index 77bfdb3b..bcff1294 100644 --- a/crates/common/src/config.rs +++ b/crates/common/src/config.rs @@ -158,28 +158,51 @@ pub struct RouterConfig { impl RouterConfig { // Function to resolve condensed variants and replace them with real routes pub fn resolve_condensed_routes(&mut self) { - if self.contains(Route::All) { + if self.enabled_routes.is_empty() { + // If no routes are enabled, enable all real routes + self.extend([ + Route::BuilderApi, + Route::ProposerApi, + Route::DataApi, + Route::ConstraintsApi, + ]); + } else if self.contains(Route::All) { // If All is present, replace it with all real routes self.remove(&Route::All); - self.extend([Route::BuilderApi, Route::ProposerApi, Route::DataApi]); + self.extend([ + Route::BuilderApi, + Route::ProposerApi, + Route::DataApi, + Route::ConstraintsApi, + ]); } - // Replace BuilderApi, ProposerApi, DataApi with their real routes + // Replace BuilderApi, ProposerApi, DataApi, ConstraintsApi with their real routes self.replace_condensed_with_real( Route::BuilderApi, &[ Route::GetValidators, Route::SubmitBlock, + Route::SubmitBlockWithProofs, Route::SubmitBlockOptimistic, Route::SubmitHeader, Route::CancelBid, Route::GetTopBid, + Route::GetBuilderConstraints, + Route::GetBuilderConstraintsStream, + Route::GetBuilderDelegations, ], ); self.replace_condensed_with_real( Route::ProposerApi, - &[Route::Status, Route::RegisterValidators, Route::GetHeader, Route::GetPayload], + &[ + Route::Status, + Route::RegisterValidators, + Route::GetHeader, + Route::GetHeaderWithProofs, + Route::GetPayload, + ], ); self.replace_condensed_with_real( @@ -190,6 +213,15 @@ impl RouterConfig { Route::ValidatorRegistration, ], ); + + self.replace_condensed_with_real( + Route::ConstraintsApi, + &[ + Route::SubmitBuilderConstraints, + Route::DelegateSubmissionRights, + Route::RevokeSubmissionRights, + ], + ); } fn contains(&self, route: Route) -> bool { @@ -236,6 +268,7 @@ pub enum Route { BuilderApi, ProposerApi, DataApi, + ConstraintsApi, GetValidators, SubmitBlock, SubmitBlockOptimistic, @@ -249,6 +282,25 @@ pub enum Route { ProposerPayloadDelivered, BuilderBidsReceived, ValidatorRegistration, + + // Constraints API: Builder + /// Reference: + SubmitBuilderConstraints, + /// Reference: + DelegateSubmissionRights, + /// Reference: + RevokeSubmissionRights, + /// Reference: + GetHeaderWithProofs, + + // Constraints API: Relay + /// Reference: + GetBuilderConstraints, + /// Reference: + GetBuilderConstraintsStream, + GetBuilderDelegations, + /// Reference: + SubmitBlockWithProofs, } impl Route { @@ -256,6 +308,9 @@ impl Route { match self { Route::GetValidators => format!("{PATH_BUILDER_API}{PATH_GET_VALIDATORS}"), Route::SubmitBlock => format!("{PATH_BUILDER_API}{PATH_SUBMIT_BLOCK}"), + Route::SubmitBlockWithProofs => { + format!("{PATH_BUILDER_API}{PATH_BUILDER_BLOCKS_WITH_PROOFS}") + } Route::SubmitBlockOptimistic => { format!("{PATH_BUILDER_API}{PATH_SUBMIT_BLOCK_OPTIMISTIC_V2}") } @@ -265,16 +320,34 @@ impl Route { Route::Status => format!("{PATH_PROPOSER_API}{PATH_STATUS}"), Route::RegisterValidators => format!("{PATH_PROPOSER_API}{PATH_REGISTER_VALIDATORS}"), Route::GetHeader => format!("{PATH_PROPOSER_API}{PATH_GET_HEADER}"), + Route::GetHeaderWithProofs => { + format!("{PATH_PROPOSER_API}{PATH_GET_HEADER_WITH_PROOFS}") + } Route::GetPayload => format!("{PATH_PROPOSER_API}{PATH_GET_PAYLOAD}"), Route::ProposerPayloadDelivered => { format!("{PATH_DATA_API}{PATH_PROPOSER_PAYLOAD_DELIVERED}") } Route::BuilderBidsReceived => format!("{PATH_DATA_API}{PATH_BUILDER_BIDS_RECEIVED}"), Route::ValidatorRegistration => format!("{PATH_DATA_API}{PATH_VALIDATOR_REGISTRATION}"), + Route::SubmitBuilderConstraints => { + format!("{PATH_CONSTRAINTS_API}{PATH_SUBMIT_BUILDER_CONSTRAINTS}") + } + Route::DelegateSubmissionRights => { + format!("{PATH_CONSTRAINTS_API}{PATH_DELEGATE_SUBMISSION_RIGHTS}") + } + Route::RevokeSubmissionRights => { + format!("{PATH_CONSTRAINTS_API}{PATH_REVOKE_SUBMISSION_RIGHTS}") + } + Route::GetBuilderConstraints => format!("{PATH_BUILDER_API}{PATH_BUILDER_CONSTRAINTS}"), + Route::GetBuilderConstraintsStream => { + format!("{PATH_BUILDER_API}{PATH_BUILDER_CONSTRAINTS_STREAM}") + } + Route::GetBuilderDelegations => format!("{PATH_BUILDER_API}{PATH_BUILDER_DELEGATIONS}"), Route::All => panic!("All is not a real route"), Route::BuilderApi => panic!("BuilderApi is not a real route"), Route::ProposerApi => panic!("ProposerApi is not a real route"), Route::DataApi => panic!("DataApi is not a real route"), + Route::ConstraintsApi => panic!("ConstraintsApi is not a real route"), } } } diff --git a/crates/common/src/eth/mod.rs b/crates/common/src/eth/mod.rs index b1162359..f3352fd5 100644 --- a/crates/common/src/eth/mod.rs +++ b/crates/common/src/eth/mod.rs @@ -15,9 +15,13 @@ use ethereum_consensus::{ }; use helix_utils::signing::sign_builder_message; +use serde::de; -use crate::bid_submission::{ - v2::header_submission::SignedHeaderSubmission, BidSubmission, SignedBidSubmission, +use crate::{ + bid_submission::{ + v2::header_submission::SignedHeaderSubmission, BidSubmission, SignedBidSubmission, + }, + proofs::InclusionProofs, }; /// Index of the `blob_kzg_commitments` leaf in the `BeaconBlockBody` tree post-deneb. @@ -40,15 +44,120 @@ impl std::fmt::Display for BidRequest { } } -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -#[serde(tag = "version", content = "data")] +/// A signed builder bid with optional inclusion proofs. +/// +/// Deserialized from a JSON object of the following format: +/// +/// ```json +/// { +/// "version": "deneb", +/// "data": { +/// "message": BuilderBid, +/// "signature": Signature, +/// "proofs": Option +/// } +/// } +/// ``` +#[derive(Debug, Clone)] pub enum SignedBuilderBid { - #[serde(rename = "bellatrix")] - Bellatrix(bellatrix::SignedBuilderBid), - #[serde(rename = "capella")] - Capella(capella::SignedBuilderBid), - #[serde(rename = "deneb")] - Deneb(deneb::SignedBuilderBid), + Bellatrix(bellatrix::SignedBuilderBid, Option), + Capella(capella::SignedBuilderBid, Option), + Deneb(deneb::SignedBuilderBid, Option), +} + +impl<'de> serde::Deserialize<'de> for SignedBuilderBid { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let value = serde_json::Value::deserialize(deserializer)?; + let data = value.get("data").ok_or_else(|| de::Error::custom("missing data"))?; + let version = value.get("version").ok_or_else(|| de::Error::custom("missing version"))?; + + // deserialize proofs if they exist, from the "data" field + let proofs = data + .get("proofs") + .map(|proofs| { + ::deserialize(proofs) + .map_err(de::Error::custom) + }) + .transpose()?; + + // deserialize data into SignedBuilderBid based on its version + match version.as_str().ok_or_else(|| de::Error::custom("version is not a string"))? { + "bellatrix" => { + let bid = ::deserialize(data) + .map_err(de::Error::custom)?; + Ok(SignedBuilderBid::Bellatrix(bid, proofs)) + } + "capella" => { + let bid = ::deserialize(data) + .map_err(de::Error::custom)?; + Ok(SignedBuilderBid::Capella(bid, proofs)) + } + "deneb" => { + let bid = ::deserialize(data) + .map_err(de::Error::custom)?; + Ok(SignedBuilderBid::Deneb(bid, proofs)) + } + _ => Err(de::Error::custom("unknown version")), + } + } +} + +impl serde::Serialize for SignedBuilderBid { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match self { + Self::Bellatrix(bid, proofs) => { + let mut map = serde_json::Map::new(); + map.insert("version".to_string(), "bellatrix".into()); + + let mut data_map = serde_json::Map::new(); + data_map.insert("message".to_string(), serde_json::to_value(&bid.message).unwrap()); + data_map + .insert("signature".to_string(), serde_json::to_value(&bid.signature).unwrap()); + if let Some(proofs) = proofs { + data_map.insert("proofs".to_string(), serde_json::to_value(proofs).unwrap()); + } + + map.insert("data".to_string(), serde_json::to_value(data_map).unwrap()); + map.serialize(serializer) + } + Self::Capella(bid, proofs) => { + let mut map = serde_json::Map::new(); + map.insert("version".to_string(), "capella".into()); + + let mut data_map = serde_json::Map::new(); + data_map.insert("message".to_string(), serde_json::to_value(&bid.message).unwrap()); + data_map + .insert("signature".to_string(), serde_json::to_value(&bid.signature).unwrap()); + if let Some(proofs) = proofs { + data_map.insert("proofs".to_string(), serde_json::to_value(proofs).unwrap()); + } + + map.insert("data".to_string(), serde_json::to_value(data_map).unwrap()); + map.serialize(serializer) + } + Self::Deneb(bid, proofs) => { + let mut map = serde_json::Map::new(); + map.insert("version".to_string(), "deneb".into()); + + let mut data_map = serde_json::Map::new(); + data_map.insert("message".to_string(), serde_json::to_value(&bid.message).unwrap()); + data_map + .insert("signature".to_string(), serde_json::to_value(&bid.signature).unwrap()); + if let Some(proofs) = proofs { + data_map.insert("proofs".to_string(), serde_json::to_value(proofs).unwrap()); + } + + map.insert("data".to_string(), serde_json::to_value(data_map).unwrap()); + map.serialize(serializer) + } + } + } } impl std::fmt::Display for SignedBuilderBid { @@ -62,6 +171,7 @@ impl std::fmt::Display for SignedBuilderBid { impl SignedBuilderBid { pub fn from_submission( submission: &mut SignedBidSubmission, + proofs: Option, public_key: BlsPublicKey, signing_key: &SecretKey, context: &Context, @@ -73,14 +183,14 @@ impl SignedBuilderBid { bellatrix::BuilderBid { header, value: submission.value(), public_key }; let signature = sign_builder_message(&mut message, signing_key, context)?; - Ok(Self::Bellatrix(bellatrix::SignedBuilderBid { message, signature })) + Ok(Self::Bellatrix(bellatrix::SignedBuilderBid { message, signature }, proofs)) } ExecutionPayload::Capella(payload) => { let header = capella::ExecutionPayloadHeader::try_from(payload)?; let mut message = capella::BuilderBid { header, value: submission.value(), public_key }; let signature = sign_builder_message(&mut message, signing_key, context)?; - Ok(Self::Capella(capella::SignedBuilderBid { message, signature })) + Ok(Self::Capella(capella::SignedBuilderBid { message, signature }, proofs)) } ExecutionPayload::Deneb(payload) => { let header = deneb::ExecutionPayloadHeader::try_from(payload)?; @@ -94,7 +204,7 @@ impl SignedBuilderBid { }; let signature = sign_builder_message(&mut message, signing_key, context)?; - Ok(Self::Deneb(deneb::SignedBuilderBid { message, signature })) + Ok(Self::Deneb(deneb::SignedBuilderBid { message, signature }, proofs)) } None => Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, @@ -108,6 +218,7 @@ impl SignedBuilderBid { pub fn from_header_submission( submission: &SignedHeaderSubmission, + proofs: Option, public_key: BlsPublicKey, signing_key: &SecretKey, context: &Context, @@ -121,7 +232,7 @@ impl SignedBuilderBid { }; let signature = sign_builder_message(&mut message, signing_key, context)?; - Ok(Self::Bellatrix(bellatrix::SignedBuilderBid { message, signature })) + Ok(Self::Bellatrix(bellatrix::SignedBuilderBid { message, signature }, proofs)) } ExecutionPayloadHeader::Capella(header) => { let mut message = capella::BuilderBid { @@ -130,7 +241,7 @@ impl SignedBuilderBid { public_key, }; let signature = sign_builder_message(&mut message, signing_key, context)?; - Ok(Self::Capella(capella::SignedBuilderBid { message, signature })) + Ok(Self::Capella(capella::SignedBuilderBid { message, signature }, proofs)) } ExecutionPayloadHeader::Deneb(header) => match submission.commitments() { Some(commitments) => { @@ -142,7 +253,7 @@ impl SignedBuilderBid { }; let signature = sign_builder_message(&mut message, signing_key, context)?; - Ok(Self::Deneb(deneb::SignedBuilderBid { message, signature })) + Ok(Self::Deneb(deneb::SignedBuilderBid { message, signature }, proofs)) } None => Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, @@ -155,41 +266,65 @@ impl SignedBuilderBid { pub fn value(&self) -> U256 { match self { - Self::Bellatrix(bid) => bid.message.value, - Self::Capella(bid) => bid.message.value, - Self::Deneb(bid) => bid.message.value, + Self::Bellatrix(bid, _) => bid.message.value, + Self::Capella(bid, _) => bid.message.value, + Self::Deneb(bid, _) => bid.message.value, } } pub fn public_key(&self) -> &BlsPublicKey { match self { - Self::Bellatrix(bid) => &bid.message.public_key, - Self::Capella(bid) => &bid.message.public_key, - Self::Deneb(bid) => &bid.message.public_key, + Self::Bellatrix(bid, _) => &bid.message.public_key, + Self::Capella(bid, _) => &bid.message.public_key, + Self::Deneb(bid, _) => &bid.message.public_key, } } pub fn block_hash(&self) -> &Hash32 { match self { - Self::Bellatrix(bid) => &bid.message.header.block_hash, - Self::Capella(bid) => &bid.message.header.block_hash, - Self::Deneb(bid) => &bid.message.header.block_hash, + Self::Bellatrix(bid, _) => &bid.message.header.block_hash, + Self::Capella(bid, _) => &bid.message.header.block_hash, + Self::Deneb(bid, _) => &bid.message.header.block_hash, } } pub fn parent_hash(&self) -> &Hash32 { match self { - Self::Bellatrix(bid) => &bid.message.header.parent_hash, - Self::Capella(bid) => &bid.message.header.parent_hash, - Self::Deneb(bid) => &bid.message.header.parent_hash, + Self::Bellatrix(bid, _) => &bid.message.header.parent_hash, + Self::Capella(bid, _) => &bid.message.header.parent_hash, + Self::Deneb(bid, _) => &bid.message.header.parent_hash, } } pub fn logs_bloom(&self) -> &ByteVector<256> { match self { - Self::Bellatrix(bid) => &bid.message.header.logs_bloom, - Self::Capella(bid) => &bid.message.header.logs_bloom, - Self::Deneb(bid) => &bid.message.header.logs_bloom, + Self::Bellatrix(bid, _) => &bid.message.header.logs_bloom, + Self::Capella(bid, _) => &bid.message.header.logs_bloom, + Self::Deneb(bid, _) => &bid.message.header.logs_bloom, + } + } + + pub fn version(&self) -> &str { + match self { + Self::Bellatrix(_, _) => "bellatrix", + Self::Capella(_, _) => "capella", + Self::Deneb(_, _) => "deneb", + } + } + + pub fn proofs(&self) -> &Option { + match self { + Self::Bellatrix(_, proofs) => proofs, + Self::Capella(_, proofs) => proofs, + Self::Deneb(_, proofs) => proofs, + } + } + + pub fn set_inclusion_proofs(&mut self, proofs: InclusionProofs) { + match self { + Self::Bellatrix(_, proofs_opt) => *proofs_opt = Some(proofs), + Self::Capella(_, proofs_opt) => *proofs_opt = Some(proofs), + Self::Deneb(_, proofs_opt) => *proofs_opt = Some(proofs), } } } @@ -225,7 +360,7 @@ mod tests { message: Default::default(), signature: Default::default(), }; - let x = SignedBuilderBid::Capella(x); + let x = SignedBuilderBid::Capella(x, None); let x = serde_json::to_vec(&x).unwrap(); println!("{:?}", x); @@ -402,14 +537,17 @@ mod tests { withdrawals_root: Default::default(), }; - let builder_bid = SignedBuilderBid::Capella(capella::SignedBuilderBid { - message: capella::BuilderBid { - header, - value: U256::from(111111), - public_key: Default::default(), + let builder_bid = SignedBuilderBid::Capella( + capella::SignedBuilderBid { + message: capella::BuilderBid { + header, + value: U256::from(111111), + public_key: Default::default(), + }, + signature: Default::default(), }, - signature: Default::default(), - }); + None, + ); let serialized = serde_json::to_vec(&builder_bid); assert!(serialized.is_ok()); @@ -506,4 +644,52 @@ mod tests { let res = serde_json::from_slice::(&json_bytes); assert!(res.is_ok()); } + + #[test] + fn test_deserialize_json_signed_builder_bid_with_proofs() { + let json = serde_json::json!({ + "version": "deneb", + "data": { + "message": { + "header": { + "parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09", + "state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "block_number": "1", + "gas_limit": "1", + "gas_used": "1", + "timestamp": "1", + "extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "base_fee_per_gas": "1", + "blob_gas_used": "1", + "excess_blob_gas": "1", + "block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2" + }, + "blob_kzg_commitments": [ + "0xa94170080872584e54a1cf092d845703b13907f2e6b3b1c0ad573b910530499e3bcd48c6378846b80d2bfa58c81cf3d5" + ], + "value": "1", + "pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a" + }, + "proofs": { + "transaction_hashes": ["0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"], + "generalized_indexes": [4, 5], + "merkle_hashes": ["0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"] + }, + "signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" + } + }); + + let res = serde_json::from_value::(json); + assert!(res.is_ok()); + + let signed_builder_bid = res.unwrap(); + assert_eq!(signed_builder_bid.version(), "deneb"); + assert!(matches!(signed_builder_bid, SignedBuilderBid::Deneb(_, Some(_)))); + } } diff --git a/crates/common/src/lib.rs b/crates/common/src/lib.rs index 2ac70078..d8fc788a 100644 --- a/crates/common/src/lib.rs +++ b/crates/common/src/lib.rs @@ -6,6 +6,7 @@ pub mod chain_info; pub mod config; pub mod eth; pub mod pending_block; +pub mod proofs; pub mod proposer; pub mod signing; pub mod simulator; diff --git a/crates/common/src/proofs.rs b/crates/common/src/proofs.rs new file mode 100644 index 00000000..33a52ac1 --- /dev/null +++ b/crates/common/src/proofs.rs @@ -0,0 +1,191 @@ +use ethereum_consensus::{ + bellatrix::presets::minimal::Transaction, + deneb::minimal::MAX_TRANSACTIONS_PER_PAYLOAD, + phase0::Bytes32, + primitives::{BlsPublicKey, BlsSignature}, + ssz::prelude::*, +}; +use reth_primitives::{PooledTransactionsElement, TxHash, B256}; +use sha2::{Digest, Sha256}; +use tree_hash::Hash256; + +// Import the new version of the `ssz-rs` crate for multiproof verification. +use ::ssz_rs as ssz; + +use crate::api::constraints_api::{SignableBLS, MAX_CONSTRAINTS_PER_SLOT}; + +#[derive(Debug, thiserror::Error)] +pub enum ProofError { + #[error("Leaves and indices length mismatch")] + LengthMismatch, + #[error("Mismatch in provided leaves and leaves to prove")] + LeavesMismatch, + #[error("Hash not found in constraints cache: {0:?}")] + MissingHash(TxHash), + #[error("Proof verification failed")] + VerificationFailed, + #[error("Decoding failed: {0}")] + DecodingFailed(String), +} + +#[derive(Debug, Clone, SimpleSerialize, serde::Serialize, serde::Deserialize)] +pub struct InclusionProofs { + pub transaction_hashes: List, + pub generalized_indexes: List, + pub merkle_hashes: List, +} + +impl InclusionProofs { + /// Returns the total number of leaves in the tree. + pub fn total_leaves(&self) -> usize { + self.transaction_hashes.len() + } +} + +pub type HashTreeRoot = tree_hash::Hash256; + +#[derive(Debug, Clone, Serializable, serde::Deserialize, serde::Serialize)] +pub struct SignedConstraints { + pub message: ConstraintsMessage, + pub signature: BlsSignature, +} + +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize, Serializable, Merkleized)] +pub struct ConstraintsMessage { + pub pubkey: BlsPublicKey, + pub slot: u64, + pub top: bool, + pub transactions: List, +} + +impl SignableBLS for ConstraintsMessage { + fn digest(&self) -> [u8; 32] { + let mut hasher = Sha256::new(); + hasher.update(&self.pubkey.to_vec()); + hasher.update(self.slot.to_le_bytes()); + hasher.update((self.top as u8).to_le_bytes()); + for tx in self.transactions.iter() { + // Convert the opaque bytes to a EIP-2718 envelope and obtain the tx hash. + // this is needed to handle type 3 transactions. + // FIXME: don't unwrap here and handle the error properly + let tx = PooledTransactionsElement::decode_enveloped(tx.to_vec().into()).unwrap(); + hasher.update(tx.hash().as_slice()); + } + + hasher.finalize().into() + } +} + +/// List of transaction hashes and the corresponding hash tree roots of the raw transactions. +pub type ConstraintsProofData = Vec<(TxHash, HashTreeRoot)>; + +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +pub struct SignedConstraintsWithProofData { + pub signed_constraints: SignedConstraints, + pub proof_data: ConstraintsProofData, +} + +impl TryFrom for SignedConstraintsWithProofData { + type Error = ProofError; + + fn try_from(value: SignedConstraints) -> Result { + let mut transactions = Vec::with_capacity(value.message.transactions.len()); + for transaction in value.message.transactions.to_vec().iter() { + let tx = PooledTransactionsElement::decode_enveloped(transaction.to_vec().into()) + .map_err(|e| ProofError::DecodingFailed(e.to_string()))?; + + let tx_hash = *tx.hash(); + + // Compute the hash tree root on the transaction object decoded without the optional + // sidecar. this is to prevent hashing the blobs of type 3 transactions. + let root = tx.into_transaction().envelope_encoded(); + let root = Transaction::try_from(root.as_ref()) + .map_err(|e| ProofError::DecodingFailed(e.to_string()))?; + let root = root + .clone() + .hash_tree_root() + .map_err(|e| ProofError::DecodingFailed(e.to_string()))?; + let root = Hash256::from_slice(&root); + + transactions.push((tx_hash, root)); + } + + Ok(Self { signed_constraints: value, proof_data: transactions }) + } +} + +/// Returns the length of the leaves that need to be proven (i.e. transactions). +fn total_leaves(constraints: &[&ConstraintsProofData]) -> usize { + constraints.iter().map(|c| c.len()).sum() +} + +/// Verifies the provided multiproofs against the constraints & transactions root. +/// +/// NOTE: the constraints hashes and hash tree roots must be in the same order of the transaction +/// hashes in the inclusion proofs. +pub fn verify_multiproofs( + constraints_proofs_data: &[&ConstraintsProofData], + proofs: &InclusionProofs, + root: B256, +) -> Result<(), ProofError> { + // Check if the length of the leaves and indices match + if proofs.transaction_hashes.len() != proofs.generalized_indexes.len() { + return Err(ProofError::LengthMismatch) + } + + let total_leaves = total_leaves(constraints_proofs_data); + + // Check if the total leaves matches the proofs provided + if total_leaves != proofs.total_leaves() { + return Err(ProofError::LeavesMismatch) + } + + // Get all the leaves from the saved constraints + let mut leaves = Vec::with_capacity(proofs.total_leaves()); + + // NOTE: Get the leaves from the constraints cache by matching the saved hashes. + // We need the leaves in order to verify the multiproof. + for hash in proofs.transaction_hashes.iter() { + let mut found = false; + for constraints_proof in constraints_proofs_data { + for (saved_hash, leaf) in *constraints_proof { + if saved_hash.as_slice() == hash.as_slice() { + found = true; + leaves.push(B256::from(leaf.0)); + break + } + } + if found { + break + } + } + + // If the hash is not found in the constraints cache, return an error + if !found { + return Err(ProofError::MissingHash(TxHash::from_slice(hash.as_slice()))) + } + } + + // Conversions to the correct types (and versions of the same type) + let leaves = leaves.into_iter().map(|h| h.as_slice().try_into().unwrap()).collect::>(); + let merkle_proofs = proofs + .merkle_hashes + .to_vec() + .iter() + .map(|h| h.as_slice().try_into().unwrap()) + .collect::>(); + let indexes = + proofs.generalized_indexes.to_vec().iter().map(|h| *h as usize).collect::>(); + let root = root.as_slice().try_into().expect("Invalid root length"); + + // Verify the Merkle multiproof against the root + ssz::multiproofs::verify_merkle_multiproof( + leaves.as_slice(), + merkle_proofs.as_ref(), + indexes.as_slice(), + root, + ) + .map_err(|_| ProofError::VerificationFailed)?; + + Ok(()) +} diff --git a/crates/common/src/traces/constraint_api_trace.rs b/crates/common/src/traces/constraint_api_trace.rs new file mode 100644 index 00000000..aa08b5e4 --- /dev/null +++ b/crates/common/src/traces/constraint_api_trace.rs @@ -0,0 +1,8 @@ +#[derive(Clone, Default, Debug, serde::Serialize, serde::Deserialize)] +pub struct ConstraintSubmissionTrace { + pub receive: u64, + pub decode: u64, + pub verify_signature: u64, + pub auctioneer_update: u64, + pub request_finish: u64, +} diff --git a/crates/common/src/traces/mod.rs b/crates/common/src/traces/mod.rs index 68d9658a..4a21cd16 100644 --- a/crates/common/src/traces/mod.rs +++ b/crates/common/src/traces/mod.rs @@ -1,5 +1,7 @@ pub mod builder_api_trace; +pub mod constraint_api_trace; pub mod proposer_api; pub use builder_api_trace::*; +pub use constraint_api_trace::*; pub use proposer_api::*; diff --git a/crates/database/src/postgres/migrations/V21_validator_delegations.sql b/crates/database/src/postgres/migrations/V21_validator_delegations.sql new file mode 100644 index 00000000..aa9aa9e2 --- /dev/null +++ b/crates/database/src/postgres/migrations/V21_validator_delegations.sql @@ -0,0 +1,6 @@ +CREATE TABLE "validator_delegations" ( + "validator_pubkey" bytea NOT NULL, + "delegatee_pubkey" bytea NOT NULL, + "created_at" timestamptz DEFAULT (now()), + PRIMARY KEY ("validator_pubkey", "delegatee_pubkey") +); \ No newline at end of file diff --git a/crates/database/src/postgres/migrations/V22__proposer_duties_fix.sql b/crates/database/src/postgres/migrations/V22__proposer_duties_fix.sql new file mode 100644 index 00000000..dfcf516f --- /dev/null +++ b/crates/database/src/postgres/migrations/V22__proposer_duties_fix.sql @@ -0,0 +1,2 @@ +ALTER TABLE proposer_duties DROP CONSTRAINT proposer_duties_pkey; +ALTER TABLE proposer_duties ADD PRIMARY KEY (slot_number); diff --git a/crates/database/src/postgres/postgres_db_row_parsing.rs b/crates/database/src/postgres/postgres_db_row_parsing.rs index 73941e35..b1e0bfc6 100644 --- a/crates/database/src/postgres/postgres_db_row_parsing.rs +++ b/crates/database/src/postgres/postgres_db_row_parsing.rs @@ -83,6 +83,12 @@ impl FromRow for GetPayloadTrace { } } +impl FromRow for BlsPublicKey { + fn from_row(row: &tokio_postgres::Row) -> Result { + parse_bytes_to_pubkey(row.get::<&str, &[u8]>("validator_delegations")) + } +} + impl< const BYTES_PER_LOGS_BLOOM: usize, const MAX_EXTRA_DATA_BYTES: usize, diff --git a/crates/datastore/src/auctioneer/mock_auctioneer.rs b/crates/datastore/src/auctioneer/mock_auctioneer.rs index 57729ce3..607cea9c 100644 --- a/crates/datastore/src/auctioneer/mock_auctioneer.rs +++ b/crates/datastore/src/auctioneer/mock_auctioneer.rs @@ -1,15 +1,20 @@ -use std::sync::{atomic::AtomicBool, Arc, Mutex}; +use std::{ + collections::HashMap, + sync::{atomic::AtomicBool, Arc, Mutex}, +}; use async_trait::async_trait; use ethereum_consensus::primitives::{BlsPublicKey, Hash32, U256}; use helix_common::{ + api::constraints_api::{SignedDelegation, SignedRevocation}, bellatrix::Node, bid_submission::{ v2::header_submission::SignedHeaderSubmission, BidTrace, SignedBidSubmission, }, eth::SignedBuilderBid, pending_block::PendingBlock, + proofs::{InclusionProofs, SignedConstraintsWithProofData}, signing::RelaySigningContext, versioned_payload::PayloadAndBlobs, BuilderInfo, ProposerInfo, @@ -25,6 +30,7 @@ pub struct MockAuctioneer { pub builder_demoted: Arc, pub best_bid: Arc>>, pub versioned_execution_payload: Arc>>, + pub constraints: Arc>>>, } impl MockAuctioneer { @@ -34,12 +40,74 @@ impl MockAuctioneer { builder_demoted: Arc::new(AtomicBool::new(false)), best_bid: Arc::new(Mutex::new(None)), versioned_execution_payload: Arc::new(Mutex::new(None)), + constraints: Arc::new(Mutex::new(HashMap::new())), } } } #[async_trait] impl Auctioneer for MockAuctioneer { + async fn get_validator_delegations( + &self, + _pub_key: BlsPublicKey, + ) -> Result, AuctioneerError> { + Ok(vec![]) + } + + async fn save_validator_delegations( + &self, + _signed_delegations: Vec, + ) -> Result<(), AuctioneerError> { + Ok(()) + } + + async fn revoke_validator_delegations( + &self, + _signed_revocations: Vec, + ) -> Result<(), AuctioneerError> { + Ok(()) + } + + async fn save_constraints( + &self, + slot: u64, + constraints: SignedConstraintsWithProofData, + ) -> Result<(), AuctioneerError> { + let mut constraints_map = self.constraints.lock().unwrap(); + let constraints_vec = constraints_map.entry(slot).or_insert_with(Vec::new); + constraints_vec.push(constraints); + Ok(()) + } + async fn get_constraints( + &self, + slot: u64, + ) -> Result>, AuctioneerError> { + let temp = self.constraints.lock().unwrap(); + let constraints = temp.get(&slot); + match constraints { + Some(constraints) => Ok(Some(constraints.to_vec())), + None => Ok(None), + } + } + + async fn save_inclusion_proof( + &self, + _slot: u64, + _proposer_pub_key: &BlsPublicKey, + _bid_block_hash: &Hash32, + _inclusion_proof: &InclusionProofs, + ) -> Result<(), AuctioneerError> { + Ok(()) + } + async fn get_inclusion_proof( + &self, + _slot: u64, + _proposer_pub_key: &BlsPublicKey, + _bid_block_hash: &Hash32, + ) -> Result, AuctioneerError> { + Ok(None) + } + async fn get_last_slot_delivered(&self) -> Result, AuctioneerError> { Ok(None) } diff --git a/crates/datastore/src/auctioneer/traits.rs b/crates/datastore/src/auctioneer/traits.rs index 29669d07..68907916 100644 --- a/crates/datastore/src/auctioneer/traits.rs +++ b/crates/datastore/src/auctioneer/traits.rs @@ -1,6 +1,7 @@ use async_trait::async_trait; use ethereum_consensus::primitives::{BlsPublicKey, Hash32, U256}; use helix_common::{ + api::constraints_api::{SignedDelegation, SignedRevocation}, bellatrix::Node, bid_submission::{ v2::header_submission::SignedHeaderSubmission, BidTrace, SignedBidSubmission, @@ -8,6 +9,7 @@ use helix_common::{ builder_info::BuilderInfo, eth::SignedBuilderBid, pending_block::PendingBlock, + proofs::{InclusionProofs, SignedConstraintsWithProofData}, signing::RelaySigningContext, versioned_payload::PayloadAndBlobs, ProposerInfo, @@ -20,6 +22,45 @@ use tokio_stream::Stream; #[async_trait] #[auto_impl::auto_impl(Arc)] pub trait Auctioneer: Send + Sync + Clone { + async fn get_validator_delegations( + &self, + pub_key: BlsPublicKey, + ) -> Result, AuctioneerError>; + + async fn save_validator_delegations( + &self, + signed_delegations: Vec, + ) -> Result<(), AuctioneerError>; + + async fn revoke_validator_delegations( + &self, + signed_revocations: Vec, + ) -> Result<(), AuctioneerError>; + + async fn save_constraints( + &self, + slot: u64, + constraints: SignedConstraintsWithProofData, + ) -> Result<(), AuctioneerError>; + async fn get_constraints( + &self, + slot: u64, + ) -> Result>, AuctioneerError>; + + async fn save_inclusion_proof( + &self, + slot: u64, + proposer_pub_key: &BlsPublicKey, + bid_block_hash: &Hash32, + inclusion_proof: &InclusionProofs, + ) -> Result<(), AuctioneerError>; + async fn get_inclusion_proof( + &self, + slot: u64, + proposer_pub_key: &BlsPublicKey, + bid_block_hash: &Hash32, + ) -> Result, AuctioneerError>; + async fn get_last_slot_delivered(&self) -> Result, AuctioneerError>; async fn check_and_set_last_slot_and_hash_delivered( &self, diff --git a/crates/datastore/src/redis/redis_cache.rs b/crates/datastore/src/redis/redis_cache.rs index 9c5174f5..c7c68d3e 100644 --- a/crates/datastore/src/redis/redis_cache.rs +++ b/crates/datastore/src/redis/redis_cache.rs @@ -1,4 +1,5 @@ -use std::collections::HashMap; +use crate::redis::utils::get_constraints_key; +use std::collections::{HashMap, HashSet}; use async_trait::async_trait; use deadpool_redis::{Config, CreatePoolError, Pool, Runtime}; @@ -8,20 +9,25 @@ use ethereum_consensus::{ }; use futures_util::TryStreamExt; use helix_common::{ - api::builder_api::TopBidUpdate, + api::{ + builder_api::TopBidUpdate, + constraints_api::{SignedDelegation, SignedRevocation}, + }, bid_submission::{v2::header_submission::SignedHeaderSubmission, BidSubmission}, pending_block::PendingBlock, + proofs::SignedConstraintsWithProofData, versioned_payload::PayloadAndBlobs, ProposerInfo, }; use redis::{AsyncCommands, RedisResult, Script, Value}; use serde::{de::DeserializeOwned, Serialize}; use tokio::sync::broadcast; -use tracing::error; +use tracing::{error, trace}; use helix_common::{ bid_submission::{BidTrace, SignedBidSubmission}, eth::SignedBuilderBid, + proofs::InclusionProofs, signing::RelaySigningContext, BuilderInfo, }; @@ -36,9 +42,9 @@ use crate::{ utils::{ get_builder_latest_bid_time_key, get_builder_latest_bid_value_key, get_cache_bid_trace_key, get_cache_get_header_response_key, get_execution_payload_key, - get_floor_bid_key, get_floor_bid_value_key, get_latest_bid_by_builder_key, - get_latest_bid_by_builder_key_str_builder_pub_key, get_seen_block_hashes_key, - get_top_bid_value_key, + get_floor_bid_key, get_floor_bid_value_key, get_inclusion_proof_key, + get_latest_bid_by_builder_key, get_latest_bid_by_builder_key_str_builder_pub_key, + get_seen_block_hashes_key, get_top_bid_value_key, }, }, types::{ @@ -53,10 +59,13 @@ use crate::{ }; use super::utils::{ - get_hash_from_hex, get_header_tx_root_key, get_pending_block_builder_block_hash_key, - get_pending_block_builder_key, get_pubkey_from_hex, + get_delegations_key, get_hash_from_hex, get_header_tx_root_key, + get_pending_block_builder_block_hash_key, get_pending_block_builder_key, get_pubkey_from_hex, }; +// Constraints expire after 1 epoch = 32 slots. +const CONSTRAINTS_CACHE_EXPIRY_S: usize = 12 * 32; + const BID_CACHE_EXPIRY_S: usize = 45; const PENDING_BLOCK_EXPIRY_S: usize = 45; const HOUSEKEEPER_LOCK_EXPIRY_MS: usize = 45_000; @@ -498,6 +507,120 @@ impl RedisCache { #[async_trait] impl Auctioneer for RedisCache { + async fn get_validator_delegations( + &self, + pub_key: BlsPublicKey, + ) -> Result, AuctioneerError> { + let key = get_delegations_key(&pub_key); + + let delegations = + self.get(&key).await.map_err(AuctioneerError::RedisError)?.unwrap_or_default(); + Ok(delegations) + } + + async fn save_validator_delegations( + &self, + signed_delegations: Vec, + ) -> Result<(), AuctioneerError> { + let len = signed_delegations.len(); + for signed_delegation in signed_delegations { + let key = get_delegations_key(&signed_delegation.message.validator_pubkey); + + // Attempt to get the existing delegations from the cache. + let mut delegations: Vec = + self.get(&key).await.map_err(AuctioneerError::RedisError)?.unwrap_or_default(); + + // Append the new delegation to the existing delegations, removing duplicates. + delegations.push(signed_delegation); + let new_delegations: Vec = + delegations.into_iter().collect::>().into_iter().collect(); + + // Save the updated delegations back to the cache. + self.set(&key, &new_delegations, None).await.map_err(AuctioneerError::RedisError)?; + } + + trace!(len, "saved delegations to cache"); + + Ok(()) + } + + async fn revoke_validator_delegations( + &self, + signed_revocations: Vec, + ) -> Result<(), AuctioneerError> { + for signed_revocation in &signed_revocations { + let key = get_delegations_key(&signed_revocation.message.validator_pubkey); + + // Attempt to get the existing delegations from the cache. + let mut delegations: Vec = + self.get(&key).await.map_err(AuctioneerError::RedisError)?.unwrap_or_default(); + + // Filter out the revoked delegation. + let updated_delegations = delegations.retain(|delegation| { + signed_revocations.iter().all(|revocation| { + delegation.message.delegatee_pubkey != revocation.message.delegatee_pubkey + }) + }); + + // Save the updated delegations back to the cache. + self.set(&key, &updated_delegations, None) + .await + .map_err(AuctioneerError::RedisError)?; + } + + Ok(()) + } + + async fn save_constraints( + &self, + slot: u64, + constraints: SignedConstraintsWithProofData, + ) -> Result<(), AuctioneerError> { + let key = get_constraints_key(slot); + + // Get the existing constraints from the cache or create new constraints. + let mut prev_constraints: Vec = + self.get(&key).await.map_err(AuctioneerError::RedisError)?.unwrap_or_default(); + + prev_constraints.push(constraints); + + // Save the constraints to the cache. + self.set(&key, &prev_constraints, Some(CONSTRAINTS_CACHE_EXPIRY_S)) + .await + .map_err(AuctioneerError::RedisError) + } + + async fn get_constraints( + &self, + slot: u64, + ) -> Result>, AuctioneerError> { + let key = get_constraints_key(slot); + self.get(&key).await.map_err(AuctioneerError::RedisError) + } + + async fn save_inclusion_proof( + &self, + slot: u64, + proposer_pub_key: &BlsPublicKey, + bid_block_hash: &Hash32, + inclusion_proof: &InclusionProofs, + ) -> Result<(), AuctioneerError> { + let key = get_inclusion_proof_key(slot, proposer_pub_key, bid_block_hash); + self.set(&key, inclusion_proof, Some(CONSTRAINTS_CACHE_EXPIRY_S)) + .await + .map_err(AuctioneerError::RedisError) + } + + async fn get_inclusion_proof( + &self, + slot: u64, + proposer_pub_key: &BlsPublicKey, + bid_block_hash: &Hash32, + ) -> Result, AuctioneerError> { + let key = get_inclusion_proof_key(slot, proposer_pub_key, bid_block_hash); + self.get(&key).await.map_err(AuctioneerError::RedisError) + } + async fn get_last_slot_delivered(&self) -> Result, AuctioneerError> { self.get(LAST_SLOT_DELIVERED_KEY).await.map_err(AuctioneerError::RedisError) } @@ -712,6 +835,7 @@ impl Auctioneer for RedisCache { let mut cloned_submission = (*submission).clone(); let builder_bid = SignedBuilderBid::from_submission( &mut cloned_submission, + None, // Note: inclusion proofs are saved separately in the cache. signing_context.public_key.clone(), &signing_context.signing_key, &signing_context.context, @@ -975,6 +1099,7 @@ impl Auctioneer for RedisCache { // Sign builder bid with relay pubkey. let builder_bid = SignedBuilderBid::from_header_submission( submission, + None, // Note: inclusion proofs are saved separately in the cache. signing_context.public_key.clone(), &signing_context.signing_key, &signing_context.context, @@ -1438,10 +1563,13 @@ mod tests { public_key: prev_builder_pubkey.clone(), }; - let prev_best_bid = SignedBuilderBid::Capella(capella::SignedBuilderBid { - message: capella_builder_bid.clone(), - ..Default::default() - }); + let prev_best_bid = SignedBuilderBid::Capella( + capella::SignedBuilderBid { + message: capella_builder_bid.clone(), + ..Default::default() + }, + None, + ); let res = cache .save_builder_bid( @@ -1503,10 +1631,13 @@ mod tests { // Test with floor_value greater than top_bid_value let higher_floor_value = U256::from(70); capella_builder_bid.value = higher_floor_value; - let floor_bid = SignedBuilderBid::Capella(capella::SignedBuilderBid { - message: capella_builder_bid.clone(), - ..Default::default() - }); + let floor_bid = SignedBuilderBid::Capella( + capella::SignedBuilderBid { + message: capella_builder_bid.clone(), + ..Default::default() + }, + None, + ); let key_floor_bid = get_floor_bid_key(slot, &parent_hash, &proposer_pub_key); let res = cache.set(&key_floor_bid, &floor_bid, None).await; @@ -1620,7 +1751,7 @@ mod tests { let mut capella_bid = capella::SignedBuilderBid::default(); capella_bid.message.value = U256::from(1999); - let best_bid = SignedBuilderBid::Capella(capella_bid); + let best_bid = SignedBuilderBid::Capella(capella_bid, None); // Save the best bid let key = get_cache_get_header_response_key(slot, &parent_hash, &proposer_pub_key); @@ -1699,7 +1830,7 @@ mod tests { ..Default::default() }; bid.message.header.block_hash = block_hash; - let builder_bid = SignedBuilderBid::Capella(bid); + let builder_bid = SignedBuilderBid::Capella(bid, None); // Test: save_builder_bid let res = cache @@ -1944,22 +2075,28 @@ mod tests { // Save 2 builder bids. builder bid 1 > builder bid 2 let builder_pub_key_1 = BlsPublicKey::try_from([1u8; 48].as_ref()).unwrap(); - let builder_bid_1 = SignedBuilderBid::Capella(capella::SignedBuilderBid { - message: helix_common::eth::capella::BuilderBid { - value: U256::from(100), + let builder_bid_1 = SignedBuilderBid::Capella( + capella::SignedBuilderBid { + message: helix_common::eth::capella::BuilderBid { + value: U256::from(100), + ..Default::default() + }, ..Default::default() }, - ..Default::default() - }); + None, + ); let builder_pub_key_2 = BlsPublicKey::try_from([2u8; 48].as_ref()).unwrap(); - let builder_bid_2 = SignedBuilderBid::Capella(capella::SignedBuilderBid { - message: helix_common::eth::capella::BuilderBid { - value: U256::from(50), + let builder_bid_2 = SignedBuilderBid::Capella( + capella::SignedBuilderBid { + message: helix_common::eth::capella::BuilderBid { + value: U256::from(50), + ..Default::default() + }, ..Default::default() }, - ..Default::default() - }); + None, + ); // Save both builder bids let set_result = cache diff --git a/crates/datastore/src/redis/utils.rs b/crates/datastore/src/redis/utils.rs index 487de634..b9b41f72 100644 --- a/crates/datastore/src/redis/utils.rs +++ b/crates/datastore/src/redis/utils.rs @@ -4,12 +4,28 @@ use crate::{ error::AuctioneerError, types::keys::{ BID_FLOOR_KEY, BID_FLOOR_VALUE_KEY, BID_TRACE_KEY, BLOCK_BUILDER_LATEST_BID_KEY, - BLOCK_BUILDER_LATEST_BID_TIME_KEY, BLOCK_BUILDER_LATEST_BID_VALUE_KEY, EXEC_PAYLOAD_KEY, - GET_HEADER_RESPONSE_KEY, HEADER_TX_ROOT, PENDING_BLOCK_KEY, SEEN_BLOCK_HASHES_KEY, - TOP_BID_VALUE_KEY, + BLOCK_BUILDER_LATEST_BID_TIME_KEY, BLOCK_BUILDER_LATEST_BID_VALUE_KEY, CONSTRAINTS_KEY, + DELEGATIONS_KEY, EXEC_PAYLOAD_KEY, GET_HEADER_RESPONSE_KEY, HEADER_TX_ROOT, + INCLUSION_PROOF_KEY, PENDING_BLOCK_KEY, SEEN_BLOCK_HASHES_KEY, TOP_BID_VALUE_KEY, }, }; +pub fn get_delegations_key(validator_pubkey: &BlsPublicKey) -> String { + format!("{DELEGATIONS_KEY}:{validator_pubkey:?}") +} + +pub fn get_constraints_key(slot: u64) -> String { + format!("{CONSTRAINTS_KEY}:{slot}") +} + +pub fn get_inclusion_proof_key( + slot: u64, + proposer_pub_key: &BlsPublicKey, + bid_block_hash: &Hash32, +) -> String { + format!("{INCLUSION_PROOF_KEY}:{slot}_{proposer_pub_key:?}_{bid_block_hash:?}") +} + pub fn get_cache_get_header_response_key( slot: u64, parent_hash: &Hash32, diff --git a/crates/datastore/src/types/keys.rs b/crates/datastore/src/types/keys.rs index 5c5b61ad..77ba7cd2 100644 --- a/crates/datastore/src/types/keys.rs +++ b/crates/datastore/src/types/keys.rs @@ -1,4 +1,7 @@ // Auctioneer +pub(crate) const DELEGATIONS_KEY: &str = "delegations"; +pub(crate) const CONSTRAINTS_KEY: &str = "constraints"; +pub(crate) const INCLUSION_PROOF_KEY: &str = "inclusion-proof"; pub(crate) const LAST_HASH_DELIVERED_KEY: &str = "last-hash-delivered"; pub(crate) const LAST_SLOT_DELIVERED_KEY: &str = "last-slot-delivered"; pub(crate) const BID_TRACE_KEY: &str = "cache-bid-trace"; diff --git a/crates/datastore/src/types/signed_builder_bid_wrapper.rs b/crates/datastore/src/types/signed_builder_bid_wrapper.rs index eca2962b..62a499da 100644 --- a/crates/datastore/src/types/signed_builder_bid_wrapper.rs +++ b/crates/datastore/src/types/signed_builder_bid_wrapper.rs @@ -26,7 +26,7 @@ impl SignedBuilderBidWrapper { impl From for TopBidUpdate { fn from(val: SignedBuilderBidWrapper) -> Self { match val.bid { - SignedBuilderBid::Bellatrix(bid) => TopBidUpdate { + SignedBuilderBid::Bellatrix(bid, _) => TopBidUpdate { timestamp: val.received_at_ms, slot: val.slot, block_number: bid.message.header.block_number, @@ -36,7 +36,7 @@ impl From for TopBidUpdate { fee_recipient: bid.message.header.fee_recipient, value: bid.message.value, }, - SignedBuilderBid::Capella(bid) => TopBidUpdate { + SignedBuilderBid::Capella(bid, _) => TopBidUpdate { timestamp: val.received_at_ms, slot: val.slot, block_number: bid.message.header.block_number, @@ -46,7 +46,7 @@ impl From for TopBidUpdate { fee_recipient: bid.message.header.fee_recipient, value: bid.message.value, }, - SignedBuilderBid::Deneb(bid) => TopBidUpdate { + SignedBuilderBid::Deneb(bid, _) => TopBidUpdate { timestamp: val.received_at_ms, slot: val.slot, block_number: bid.message.header.block_number, diff --git a/crates/housekeeper/src/chain_event_updater.rs b/crates/housekeeper/src/chain_event_updater.rs index 57839c5f..1df5fd0a 100644 --- a/crates/housekeeper/src/chain_event_updater.rs +++ b/crates/housekeeper/src/chain_event_updater.rs @@ -5,9 +5,7 @@ use std::{ }; use ethereum_consensus::{ - configs::{goerli::CAPELLA_FORK_EPOCH, mainnet::SECONDS_PER_SLOT}, - deneb::Withdrawal, - primitives::Bytes32, + configs::goerli::CAPELLA_FORK_EPOCH, deneb::Withdrawal, primitives::Bytes32, }; use tokio::{ sync::{broadcast, mpsc}, @@ -157,7 +155,8 @@ impl ChainEventUpdater { // Validate this isn't a faulty head slot if let Ok(current_timestamp) = SystemTime::now().duration_since(UNIX_EPOCH) { - let slot_timestamp = self.chain_info.genesis_time_in_secs + (slot * SECONDS_PER_SLOT); + let slot_timestamp = + self.chain_info.genesis_time_in_secs + (slot * self.chain_info.seconds_per_slot); if slot_timestamp > current_timestamp.as_secs() + MAX_DISTANCE_FOR_FUTURE_SLOT { warn!(head_slot = slot, "slot is too far in the future",); return diff --git a/crates/housekeeper/src/housekeeper.rs b/crates/housekeeper/src/housekeeper.rs index ecd71e53..7b48ad6f 100644 --- a/crates/housekeeper/src/housekeeper.rs +++ b/crates/housekeeper/src/housekeeper.rs @@ -44,7 +44,7 @@ const CUTT_OFF_TIME: u64 = 4; // Constants for known validators refresh logic. const MIN_SLOTS_BETWEEN_UPDATES: u64 = 6; const MAX_SLOTS_BEFORE_FORCED_UPDATE: u64 = 32; -pub(crate) const SLEEP_DURATION_BEFORE_REFRESHING_VALIDATORS: Duration = Duration::from_secs(6); +pub(crate) const SLEEP_DURATION_BEFORE_REFRESHING_VALIDATORS: Duration = Duration::from_millis(200); // Max time between header and payload for OptimsiticV2 submissions const MAX_DELAY_BETWEEN_V2_SUBMISSIONS_MS: u64 = 2_000; @@ -484,6 +484,8 @@ impl let mut formatted_proposer_duties: Vec = Vec::with_capacity(proposer_duties.len()); + let len = proposer_duties.len(); + for duty in proposer_duties { if let Some(reg) = signed_validator_registrations.get(&duty.public_key) { if duty.public_key != reg.registration_info.registration.message.public_key { @@ -495,6 +497,13 @@ impl validator_index: duty.validator_index, entry: reg.registration_info.clone(), }); + } else { + warn!( + public_key = %duty.public_key, + slot = duty.slot, + proposer_duties_len = len, + "No signed validator registration found for proposer duty" + ); } } diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index d5f02e8e..b29ca489 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -10,6 +10,8 @@ license.workspace = true # Serialization and Data Format serde.workspace = true serde_json.workspace = true +tree_hash = "0.6.0" +tree_hash_derive = "0.6.0" # Ethereum Types ethereum-consensus.workspace = true diff --git a/crates/utils/src/signing.rs b/crates/utils/src/signing.rs index c8bc9789..733b2e0e 100644 --- a/crates/utils/src/signing.rs +++ b/crates/utils/src/signing.rs @@ -3,11 +3,17 @@ use ethereum_consensus::{ domains::DomainType, phase0::mainnet::compute_domain, primitives::{BlsPublicKey, BlsSignature, Domain, Root, Slot}, - signing::{compute_signing_root, sign_with_domain, verify_signed_data}, + signing::{compute_signing_root, sign_with_domain, verify_signature, verify_signed_data}, ssz::prelude::*, state_transition::Context, Error, Fork, }; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; + +pub const APPLICATION_BUILDER_DOMAIN: [u8; 4] = [0, 0, 0, 1]; +pub const GENESIS_VALIDATORS_ROOT: [u8; 32] = [0; 32]; +pub const COMMIT_BOOST_DOMAIN: [u8; 4] = [109, 109, 111, 67]; pub fn verify_signed_consensus_message( message: &mut T, @@ -40,6 +46,52 @@ pub fn verify_signed_builder_message( Ok(()) } +pub fn compute_signing_root_custom(object_root: [u8; 32], signing_domain: [u8; 32]) -> [u8; 32] { + #[derive(Default, Debug, TreeHash)] + struct SigningData { + object_root: [u8; 32], + signing_domain: [u8; 32], + } + + let signing_data = SigningData { object_root, signing_domain }; + signing_data.tree_hash_root().0 +} + +pub fn verify_signed_message( + message: &T, + signature: &BlsSignature, + public_key: &BlsPublicKey, + domain_mask: [u8; 4], + context: &Context, +) -> Result<(), Error> { + let domain = compute_domain_custom(context, domain_mask); + let signing_root = compute_signing_root_custom(message.tree_hash_root().0, domain); + + verify_signature(public_key, &signing_root, signature) +} + +// NOTE: this currently works only for builder domain signatures and +// verifications +// ref: https://github.com/ralexstokes/ethereum-consensus/blob/cf3c404043230559660810bc0c9d6d5a8498d819/ethereum-consensus/src/builder/mod.rs#L26-L29 +pub fn compute_domain_custom(chain: &Context, domain_mask: [u8; 4]) -> [u8; 32] { + #[derive(Debug, TreeHash)] + struct ForkData { + fork_version: [u8; 4], + genesis_validators_root: [u8; 32], + } + + let mut domain = [0u8; 32]; + domain[..4].copy_from_slice(&domain_mask); + + let fork_version = chain.genesis_fork_version; + let fd = ForkData { fork_version, genesis_validators_root: GENESIS_VALIDATORS_ROOT }; + let fork_data_root = fd.tree_hash_root().0; + + domain[4..].copy_from_slice(&fork_data_root[..28]); + + domain +} + pub fn compute_consensus_signing_root( data: &mut T, slot: Slot,