From 90cf69e98fe0ec1765418aad3b4a09c8dd8d744a Mon Sep 17 00:00:00 2001 From: lion Date: Wed, 8 Nov 2023 11:29:07 +0300 Subject: [PATCH 01/16] Compute recent lightclient updates --- beacon_node/beacon_chain/src/beacon_chain.rs | 57 +++- beacon_node/beacon_chain/src/builder.rs | 20 +- beacon_node/beacon_chain/src/lib.rs | 5 +- ...ght_client_finality_update_verification.rs | 55 +--- ...t_client_optimistic_update_verification.rs | 64 ++--- .../src/lightclient_proofs_cache.rs | 247 ++++++++++++++++++ .../src/broadcast_lightclient_updates.rs | 30 +++ beacon_node/client/src/builder.rs | 34 ++- beacon_node/client/src/lib.rs | 1 + beacon_node/http_api/src/lib.rs | 10 +- beacon_node/http_api/tests/tests.rs | 10 +- 11 files changed, 425 insertions(+), 108 deletions(-) create mode 100644 beacon_node/beacon_chain/src/lightclient_proofs_cache.rs create mode 100644 beacon_node/client/src/broadcast_lightclient_updates.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 53583390fa3..4a97d6e77fe 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -38,6 +38,7 @@ use crate::light_client_finality_update_verification::{ use crate::light_client_optimistic_update_verification::{ Error as LightClientOptimisticUpdateError, VerifiedLightClientOptimisticUpdate, }; +use crate::lightclient_proofs_cache::LightclientServerCache; use crate::migrate::BackgroundMigrator; use crate::naive_aggregation_pool::{ AggregatedAttestationMap, Error as NaiveAggregationError, NaiveAggregationPool, @@ -339,6 +340,8 @@ struct PartialBeaconBlock { bls_to_execution_changes: Vec, } +pub type LightclientProducerEvent = (Hash256, Slot, SyncAggregate); + pub type BeaconForkChoice = ForkChoice< BeaconForkChoiceStore< ::EthSpec, @@ -418,10 +421,6 @@ pub struct BeaconChain { /// Maintains a record of which validators we've seen BLS to execution changes for. pub(crate) observed_bls_to_execution_changes: Mutex>, - /// The most recently validated light client finality update received on gossip. - pub latest_seen_finality_update: Mutex>>, - /// The most recently validated light client optimistic update received on gossip. - pub latest_seen_optimistic_update: Mutex>>, /// Provides information from the Ethereum 1 (PoW) chain. pub eth1_chain: Option>, /// Interfaces with the execution client. @@ -464,6 +463,11 @@ pub struct BeaconChain { pub block_times_cache: Arc>, /// A cache used to track pre-finalization block roots for quick rejection. pub pre_finalization_block_cache: PreFinalizationBlockCache, + /// A cache used to produce lightclient server messages + pub lightclient_server_cache: LightclientServerCache, + /// Sender to signal the lightclient server to produce new updates + pub lightclient_server_tx: + Option>>, /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. pub shutdown_sender: Sender, @@ -1295,6 +1299,19 @@ impl BeaconChain { self.state_at_slot(load_slot, StateSkipConfig::WithoutStateRoots) } + pub fn recompute_and_cache_lightclient_updates( + &self, + (parent_root, slot, sync_aggregate): LightclientProducerEvent, + ) -> Result<(), Error> { + self.lightclient_server_cache.recompute_and_cache_updates( + &self.log, + self.store.clone(), + &parent_root, + slot, + &sync_aggregate, + ) + } + /// Returns the current heads of the `BeaconChain`. For the canonical head, see `Self::head`. /// /// Returns `(block_root, block_slot)`. @@ -3433,6 +3450,18 @@ impl BeaconChain { }; let current_finalized_checkpoint = state.finalized_checkpoint(); + // compute state proofs for light client updates before inserting the state into the + // snapshot cache. + self.lightclient_server_cache + .cache_state_data( + &self.spec, block, block_root, + // mutable reference on the state is needed to compute merkle proofs + &mut state, + ) + .unwrap_or_else(|e| { + error!(self.log, "error caching lightclient data {:?}", e); + }); + self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .ok_or(Error::SnapshotCacheLockTimeout) @@ -3803,6 +3832,26 @@ impl BeaconChain { })); } } + + // Do not trigger lightclient server update producer for old blocks, to extra work + // during sync. + if block_delay_total < self.slot_clock.slot_duration() * 32 { + if let Some(lightclient_server_tx) = self.lightclient_server_tx.clone() { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { + if let Err(e) = lightclient_server_tx.try_send(( + block.parent_root(), + block.slot(), + sync_aggregate.clone(), + )) { + warn!( + self.log, + "Failed to send lightclient server event"; + "error" => ?e + ); + } + } + } + } } // For the current and next epoch of this state, ensure we have the shuffling from this diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index fbd255126ee..0ce11bddf53 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,4 +1,6 @@ -use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; +use crate::beacon_chain::{ + CanonicalHead, LightclientProducerEvent, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY, +}; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::data_availability_checker::DataAvailabilityChecker; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; @@ -6,6 +8,7 @@ use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::head_tracker::HeadTracker; +use crate::lightclient_proofs_cache::LightclientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; @@ -87,6 +90,7 @@ pub struct BeaconChainBuilder { event_handler: Option>, slot_clock: Option, shutdown_sender: Option>, + lightclient_server_tx: Option>>, head_tracker: Option, validator_pubkey_cache: Option>, spec: ChainSpec, @@ -129,6 +133,7 @@ where event_handler: None, slot_clock: None, shutdown_sender: None, + lightclient_server_tx: None, head_tracker: None, validator_pubkey_cache: None, spec: TEthSpec::default_spec(), @@ -603,6 +608,15 @@ where self } + /// Sets a `Sender` to allow the beacon chain to trigger lightclient update production. + pub fn lightclient_server_tx( + mut self, + sender: tokio::sync::mpsc::Sender>, + ) -> Self { + self.lightclient_server_tx = Some(sender); + self + } + /// Creates a new, empty operation pool. fn empty_op_pool(mut self) -> Self { self.op_pool = Some(OperationPool::new()); @@ -883,8 +897,6 @@ where observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), observed_bls_to_execution_changes: <_>::default(), - latest_seen_finality_update: <_>::default(), - latest_seen_optimistic_update: <_>::default(), eth1_chain: self.eth1_chain, execution_layer: self.execution_layer, genesis_validators_root, @@ -912,6 +924,8 @@ where validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), early_attester_cache: <_>::default(), + lightclient_server_cache: LightclientServerCache::new(), + lightclient_server_tx: self.lightclient_server_tx, shutdown_sender: self .shutdown_sender .ok_or("Cannot build without a shutdown sender.")?, diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index e2d37078ac5..23056c84ca3 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -30,6 +30,7 @@ pub mod historical_blocks; pub mod kzg_utils; pub mod light_client_finality_update_verification; pub mod light_client_optimistic_update_verification; +mod lightclient_proofs_cache; pub mod merge_readiness; pub mod metrics; pub mod migrate; @@ -58,8 +59,8 @@ pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse, BeaconBlockResponseType, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, - WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + ForkChoiceError, LightclientProducerEvent, OverrideForkchoiceUpdate, ProduceBlockVerification, + StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; pub use self::beacon_snapshot::BeaconSnapshot; diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index 791d63ccfe5..6beeb06ca3c 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -3,9 +3,7 @@ use derivative::Derivative; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; -use types::{ - light_client_update::Error as LightClientUpdateError, LightClientFinalityUpdate, Slot, -}; +use types::{light_client_update::Error as LightClientUpdateError, LightClientFinalityUpdate}; /// Returned when a light client finality update was not successfully verified. It might not have been verified for /// two reasons: @@ -63,42 +61,28 @@ impl VerifiedLightClientFinalityUpdate { /// Returns `Ok(Self)` if the `light_client_finality_update` is valid to be (re)published on the gossip /// network. pub fn verify( - light_client_finality_update: LightClientFinalityUpdate, + rcv_finality_update: LightClientFinalityUpdate, chain: &BeaconChain, seen_timestamp: Duration, ) -> Result { - let gossiped_finality_slot = light_client_finality_update.finalized_header.beacon.slot; - let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); - let signature_slot = light_client_finality_update.signature_slot; - let start_time = chain.slot_clock.start_of(signature_slot); - let mut latest_seen_finality_update = chain.latest_seen_finality_update.lock(); - - let head = chain.canonical_head.cached_head(); - let head_block = &head.snapshot.beacon_block; - let attested_block_root = head_block.message().parent_root(); - let attested_block = chain - .get_blinded_block(&attested_block_root)? - .ok_or(Error::FailedConstructingUpdate)?; - let mut attested_state = chain - .get_state(&attested_block.state_root(), Some(attested_block.slot()))? - .ok_or(Error::FailedConstructingUpdate)?; - - let finalized_block_root = attested_state.finalized_checkpoint().root; - let finalized_block = chain - .get_blinded_block(&finalized_block_root)? + let latest_finality_update = chain + .lightclient_server_cache + .get_latest_finality_update() .ok_or(Error::FailedConstructingUpdate)?; - let latest_seen_finality_update_slot = match latest_seen_finality_update.as_ref() { - Some(update) => update.finalized_header.beacon.slot, - None => Slot::new(0), - }; // verify that no other finality_update with a lower or equal // finalized_header.slot was already forwarded on the network - if gossiped_finality_slot <= latest_seen_finality_update_slot { + if rcv_finality_update.finalized_header.beacon.slot + <= latest_finality_update.finalized_header.beacon.slot + { return Err(Error::FinalityUpdateAlreadySeen); } // verify that enough time has passed for the block to have been propagated + let start_time = chain + .slot_clock + .start_of(rcv_finality_update.signature_slot); + let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); match start_time { Some(time) => { if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() @@ -110,24 +94,13 @@ impl VerifiedLightClientFinalityUpdate { None => return Err(Error::SigSlotStartIsNone), } - let head_state = &head.snapshot.beacon_state; - let finality_update = LightClientFinalityUpdate::new( - &chain.spec, - head_state, - head_block, - &mut attested_state, - &finalized_block, - )?; - // verify that the gossiped finality update is the same as the locally constructed one. - if finality_update != light_client_finality_update { + if latest_finality_update != rcv_finality_update { return Err(Error::InvalidLightClientFinalityUpdate); } - *latest_seen_finality_update = Some(light_client_finality_update.clone()); - Ok(Self { - light_client_finality_update, + light_client_finality_update: rcv_finality_update, seen_timestamp, }) } diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index 374cc9a7753..7d5e96d008b 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -4,9 +4,7 @@ use eth2::types::Hash256; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; -use types::{ - light_client_update::Error as LightClientUpdateError, LightClientOptimisticUpdate, Slot, -}; +use types::{light_client_update::Error as LightClientUpdateError, LightClientOptimisticUpdate}; /// Returned when a light client optimistic update was not successfully verified. It might not have been verified for /// two reasons: @@ -67,38 +65,15 @@ impl VerifiedLightClientOptimisticUpdate { /// Returns `Ok(Self)` if the `light_client_optimistic_update` is valid to be (re)published on the gossip /// network. pub fn verify( - light_client_optimistic_update: LightClientOptimisticUpdate, + rcv_optimistic_update: LightClientOptimisticUpdate, chain: &BeaconChain, seen_timestamp: Duration, ) -> Result { - let gossiped_optimistic_slot = light_client_optimistic_update.attested_header.beacon.slot; - let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); - let signature_slot = light_client_optimistic_update.signature_slot; - let start_time = chain.slot_clock.start_of(signature_slot); - let mut latest_seen_optimistic_update = chain.latest_seen_optimistic_update.lock(); - - let head = chain.canonical_head.cached_head(); - let head_block = &head.snapshot.beacon_block; - let attested_block_root = head_block.message().parent_root(); - let attested_block = chain - .get_blinded_block(&attested_block_root)? - .ok_or(Error::FailedConstructingUpdate)?; - - let attested_state = chain - .get_state(&attested_block.state_root(), Some(attested_block.slot()))? - .ok_or(Error::FailedConstructingUpdate)?; - let latest_seen_optimistic_update_slot = match latest_seen_optimistic_update.as_ref() { - Some(update) => update.attested_header.beacon.slot, - None => Slot::new(0), - }; - - // verify that no other optimistic_update with a lower or equal - // optimistic_header.slot was already forwarded on the network - if gossiped_optimistic_slot <= latest_seen_optimistic_update_slot { - return Err(Error::OptimisticUpdateAlreadySeen); - } - // verify that enough time has passed for the block to have been propagated + let start_time = chain + .slot_clock + .start_of(rcv_optimistic_update.signature_slot); + let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); match start_time { Some(time) => { if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() @@ -110,30 +85,21 @@ impl VerifiedLightClientOptimisticUpdate { None => return Err(Error::SigSlotStartIsNone), } - // check if we can process the optimistic update immediately - // otherwise queue - let canonical_root = light_client_optimistic_update - .attested_header - .beacon - .canonical_root(); - - if canonical_root != head_block.message().parent_root() { - return Err(Error::UnknownBlockParentRoot(canonical_root)); - } - - let optimistic_update = - LightClientOptimisticUpdate::new(&chain.spec, head_block, &attested_state)?; + let latest_optimistic_update = chain + .lightclient_server_cache + .get_latest_optimistic_update() + .ok_or(Error::FailedConstructingUpdate)?; // verify that the gossiped optimistic update is the same as the locally constructed one. - if optimistic_update != light_client_optimistic_update { + if latest_optimistic_update != rcv_optimistic_update { return Err(Error::InvalidLightClientOptimisticUpdate); } - *latest_seen_optimistic_update = Some(light_client_optimistic_update.clone()); - + let parent_root = rcv_optimistic_update.attested_header.beacon.parent_root; Ok(Self { - light_client_optimistic_update, - parent_root: canonical_root, + light_client_optimistic_update: rcv_optimistic_update, + // TODO: why is the parent_root necessary here? + parent_root, seen_timestamp, }) } diff --git a/beacon_node/beacon_chain/src/lightclient_proofs_cache.rs b/beacon_node/beacon_chain/src/lightclient_proofs_cache.rs new file mode 100644 index 00000000000..fc43b306c3d --- /dev/null +++ b/beacon_node/beacon_chain/src/lightclient_proofs_cache.rs @@ -0,0 +1,247 @@ +use crate::errors::BeaconChainError; +use crate::{BeaconChainTypes, BeaconStore}; +use parking_lot::{Mutex, RwLock}; +use slog::{debug, Logger}; +use ssz_types::FixedVector; +use types::light_client_update::{FinalizedRootProofLen, FINALIZED_ROOT_INDEX}; +use types::{ + BeaconBlockRef, BeaconState, ChainSpec, EthSpec, ForkName, Hash256, LightClientFinalityUpdate, + LightClientHeader, LightClientOptimisticUpdate, Slot, SyncAggregate, +}; + +/// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the +/// prev block cache are very small 32 * (6 + 1) = 224 bytes. 32 is an arbitrary number that +/// represents unlikely re-orgs, while keeping the cache very small. +const PREV_BLOCK_CACHE_SIZE: usize = 32; + +/// This cache computes light client messages ahead of time, required to satisfy p2p and API +/// requests. These messages include proofs on historical states, so on-demand computation is +/// expensive. +/// +pub struct LightclientServerCache { + /// Tracks a single global latest finality update out of all imported blocks. + /// + /// TODO: Active discussion with @etan-status if this cache should be fork aware to return + /// latest canonical instead of global latest. + latest_finality_update: RwLock>>, + /// Tracks a single global latest optimistic update out of all imported blocks. + latest_optimistic_update: RwLock>>, + /// Caches state proofs by block root + prev_block_cache: Mutex>, +} + +impl LightclientServerCache { + pub fn new() -> Self { + Self { + latest_finality_update: None.into(), + latest_optimistic_update: None.into(), + prev_block_cache: lru::LruCache::new(PREV_BLOCK_CACHE_SIZE).into(), + } + } + + /// Compute and cache state proofs for latter production of light-client messages. Does not + /// trigger block replay. May result in multiple DB write ops. + /// TODO: Should return StoreOps to batch with rest of db operations? + pub fn cache_state_data( + &self, + spec: &ChainSpec, + block: BeaconBlockRef, + block_root: Hash256, + block_post_state: &mut BeaconState, + ) -> Result<(), BeaconChainError> { + // Only post-altair + if spec.fork_name_at_slot::(block.slot()) == ForkName::Base { + return Ok(()); + } + + // Persist in memory cache for a descendent block + + let cached_data = LightclientCachedData::from_state(block_post_state)?; + self.prev_block_cache.lock().put(block_root, cached_data); + + Ok(()) + } + + /// Given a block with a SyncAggregte computes better or more recent light client updates. The + /// results are cached either on disk or memory to be served via p2p and rest API + pub fn recompute_and_cache_updates( + &self, + log: &Logger, + store: BeaconStore, + block_parent_root: &Hash256, + block_slot: Slot, + sync_aggregate: &SyncAggregate, + ) -> Result<(), BeaconChainError> { + let signature_slot = block_slot; + let attested_block_root = block_parent_root; + + let attested_block = store.get_blinded_block(attested_block_root)?.ok_or( + BeaconChainError::DBInconsistent(format!( + "Block not available {:?}", + attested_block_root + )), + )?; + + let cached_parts = self.get_or_compute_prev_block_cache( + store.clone(), + attested_block_root, + &attested_block.state_root(), + attested_block.slot(), + )?; + + let attested_slot = attested_block.slot(); + + // Spec: Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest + // attested_header.beacon.slot (if multiple, highest signature_slot) as selected by fork choice + let is_latest_optimistic = match &self.latest_optimistic_update.read().clone() { + Some(latest_optimistic_update) => { + is_latest_optimistic_update(latest_optimistic_update, attested_slot, signature_slot) + } + None => true, + }; + if is_latest_optimistic { + // can create an optimistic update, that is more recent + *self.latest_optimistic_update.write() = Some(LightClientOptimisticUpdate { + attested_header: block_to_light_client_header(attested_block.message()), + sync_aggregate: sync_aggregate.clone(), + signature_slot, + }); + }; + + // Spec: Full nodes SHOULD provide the LightClientFinalityUpdate with the highest + // attested_header.beacon.slot (if multiple, highest signature_slot) as selected by fork choice + let is_latest_finality = match &self.latest_finality_update.read().clone() { + Some(latest_finality_update) => { + is_latest_finality_update(latest_finality_update, attested_slot, signature_slot) + } + None => true, + }; + if is_latest_finality & !cached_parts.finalized_block_root.is_zero() { + // Immediately after checkpoint sync the finalized block may not be available yet. + if let Some(finalized_block) = + store.get_blinded_block(&cached_parts.finalized_block_root)? + { + *self.latest_finality_update.write() = Some(LightClientFinalityUpdate { + // TODO: may want to cache this result from latest_optimistic_update if producing a + // lightclient header becomes expensive + attested_header: block_to_light_client_header(attested_block.message()), + finalized_header: block_to_light_client_header(finalized_block.message()), + finality_branch: cached_parts.finality_branch.clone(), + sync_aggregate: sync_aggregate.clone(), + signature_slot, + }); + } else { + debug!( + log, + "Finalized block not available in store for lightclient server"; + "finalized_block_root" => format!("{}", cached_parts.finalized_block_root), + ); + } + } + + Ok(()) + } + + /// Retrieves prev block cached data from cache. If not present re-computes by retrieving the + /// parent state, and inserts an entry to the cache. + /// + /// In separate function since FnOnce of get_or_insert can not be fallible. + fn get_or_compute_prev_block_cache( + &self, + store: BeaconStore, + block_root: &Hash256, + block_state_root: &Hash256, + block_slot: Slot, + ) -> Result { + // Attempt to get the value from the cache first. + if let Some(cached_parts) = self.prev_block_cache.lock().get(block_root) { + return Ok(cached_parts.clone()); + } + + // Compute the value, handling potential errors. + let mut state = store + .get_state(block_state_root, Some(block_slot))? + .ok_or_else(|| { + BeaconChainError::DBInconsistent(format!("Missing state {:?}", block_state_root)) + })?; + let new_value = LightclientCachedData::from_state(&mut state)?; + + // Insert value and return owned + self.prev_block_cache + .lock() + .put(*block_root, new_value.clone()); + Ok(new_value) + } + + pub fn get_latest_finality_update(&self) -> Option> { + self.latest_finality_update.read().clone() + } + + pub fn get_latest_optimistic_update(&self) -> Option> { + self.latest_optimistic_update.read().clone() + } +} + +impl Default for LightclientServerCache { + fn default() -> Self { + Self::new() + } +} + +type FinalityBranch = FixedVector; + +#[derive(Clone)] +struct LightclientCachedData { + finality_branch: FinalityBranch, + finalized_block_root: Hash256, +} + +impl LightclientCachedData { + fn from_state(state: &mut BeaconState) -> Result { + Ok(Self { + finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), + finalized_block_root: state.finalized_checkpoint().root, + }) + } +} + +// Implements spec priorization rules: +// > Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) +// +// ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_finality_update +fn is_latest_finality_update( + prev: &LightClientFinalityUpdate, + attested_slot: Slot, + signature_slot: Slot, +) -> bool { + if attested_slot > prev.attested_header.beacon.slot { + true + } else { + attested_slot == prev.attested_header.beacon.slot && signature_slot > prev.signature_slot + } +} + +// Implements spec priorization rules: +// > Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) +// +// ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_optimistic_update +fn is_latest_optimistic_update( + prev: &LightClientOptimisticUpdate, + attested_slot: Slot, + signature_slot: Slot, +) -> bool { + if attested_slot > prev.attested_header.beacon.slot { + true + } else { + attested_slot == prev.attested_header.beacon.slot && signature_slot > prev.signature_slot + } +} + +fn block_to_light_client_header( + block: BeaconBlockRef>, +) -> LightClientHeader { + // TODO: make fork aware + LightClientHeader { + beacon: block.block_header(), + } +} diff --git a/beacon_node/client/src/broadcast_lightclient_updates.rs b/beacon_node/client/src/broadcast_lightclient_updates.rs new file mode 100644 index 00000000000..19ecb3c4ed4 --- /dev/null +++ b/beacon_node/client/src/broadcast_lightclient_updates.rs @@ -0,0 +1,30 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes, LightclientProducerEvent}; +use slog::{error, Logger}; +use tokio::sync::mpsc::Receiver; + +// Each LightclientProducerEvent is ~200 bytes. With the lightclient server producing only recent +// updates it is okay to drop some events in case of overloading. In normal network conditions +// there's one event emitted per block at most every 12 seconds, while consuming the event should +// take a few miliseconds. 32 is a small enough arbitrary number. +pub(crate) const LIGHTCLIENT_SERVER_CHANNEL_CAPACITY: usize = 32; + +pub async fn compute_lightclient_updates( + chain: &BeaconChain, + mut lightclient_server_rv: Receiver>, + log: &Logger, +) { + // lightclient_server_rv is Some if lightclient flag is enabled + // + // Should only receive events for recent blocks, import_block filters by blocks close to clock. + // + // Intents to process SyncAggregates of all recent blocks sequentially, without skipping. + // Uses a bounded receiver, so may drop some SyncAggregates if very overloaded. This is okay + // since only the most recent updates have value. + while let Some((block_root, slot, sync_aggregate)) = lightclient_server_rv.recv().await { + chain + .recompute_and_cache_lightclient_updates((block_root, slot, sync_aggregate)) + .unwrap_or_else(|e| { + error!(log, "error computing lightclient updates {:?}", e); + }) + } +} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index cedf347b9a8..65d063c2a01 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,4 +1,7 @@ use crate::address_change_broadcast::broadcast_address_changes_at_capella; +use crate::broadcast_lightclient_updates::{ + compute_lightclient_updates, LIGHTCLIENT_SERVER_CHANNEL_CAPACITY, +}; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; @@ -6,6 +9,7 @@ use beacon_chain::data_availability_checker::start_availability_cache_maintenanc use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; +use beacon_chain::LightclientProducerEvent; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::{CachingEth1Backend, Eth1Chain}, @@ -35,6 +39,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use timer::spawn_timer; +use tokio::sync::mpsc::Receiver; use tokio::sync::oneshot; use types::{ test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, @@ -76,6 +81,7 @@ pub struct ClientBuilder { slasher: Option>>, beacon_processor_config: Option, beacon_processor_channels: Option>, + lightclient_server_rv: Option>>, eth_spec_instance: T::EthSpec, } @@ -111,6 +117,7 @@ where eth_spec_instance, beacon_processor_config: None, beacon_processor_channels: None, + lightclient_server_rv: None, } } @@ -199,6 +206,16 @@ where builder }; + let builder = if config.network.enable_light_client_server { + let (tx, rv) = tokio::sync::mpsc::channel::>( + LIGHTCLIENT_SERVER_CHANNEL_CAPACITY, + ); + self.lightclient_server_rv = Some(rv); + builder.lightclient_server_tx(tx) + } else { + builder + }; + let chain_exists = builder.store_contains_beacon_chain().unwrap_or(false); // If the client is expect to resume but there's no beacon chain in the database, @@ -816,7 +833,7 @@ where } // Spawn a service to publish BLS to execution changes at the Capella fork. - if let Some(network_senders) = self.network_senders { + if let Some(network_senders) = self.network_senders.clone() { let inner_chain = beacon_chain.clone(); let broadcast_context = runtime_context.service_context("addr_bcast".to_string()); @@ -833,6 +850,21 @@ where "addr_broadcast", ); } + + // Spawn service to publish lightclient updates at some interval into the slot + if let Some(lightclient_server_rv) = self.lightclient_server_rv { + let inner_chain = beacon_chain.clone(); + let broadcast_context = + runtime_context.service_context("lcserv_bcast".to_string()); + let log = broadcast_context.log().clone(); + broadcast_context.executor.spawn( + async move { + compute_lightclient_updates(&inner_chain, lightclient_server_rv, &log) + .await + }, + "lcserv_broadcast", + ); + } } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 399aa06511e..55f9b1dda63 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,6 +1,7 @@ extern crate slog; mod address_change_broadcast; +mod broadcast_lightclient_updates; pub mod config; mod metrics; mod notifier; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5b00a80bdf0..7a5a302389b 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2471,9 +2471,8 @@ pub fn serve( accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { let update = chain - .latest_seen_optimistic_update - .lock() - .clone() + .lightclient_server_cache + .get_latest_optimistic_update() .ok_or_else(|| { warp_utils::reject::custom_not_found( "No LightClientOptimisticUpdate is available".to_string(), @@ -2518,9 +2517,8 @@ pub fn serve( accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { let update = chain - .latest_seen_finality_update - .lock() - .clone() + .lightclient_server_cache + .get_latest_finality_update() .ok_or_else(|| { warp_utils::reject::custom_not_found( "No LightClientFinalityUpdate is available".to_string(), diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index d5fa50ba219..24e57eb8424 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1677,7 +1677,10 @@ impl ApiTester { Err(e) => panic!("query failed incorrectly: {e:?}"), }; - let expected = self.chain.latest_seen_optimistic_update.lock().clone(); + let expected = self + .chain + .lightclient_server_cache + .get_latest_optimistic_update(); assert_eq!(result, expected); self @@ -1693,7 +1696,10 @@ impl ApiTester { Err(e) => panic!("query failed incorrectly: {e:?}"), }; - let expected = self.chain.latest_seen_finality_update.lock().clone(); + let expected = self + .chain + .lightclient_server_cache + .get_latest_finality_update(); assert_eq!(result, expected); self From a5ba62da2caa789941ed4354c35449eb7fef2efb Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 11 Dec 2023 17:32:54 +0100 Subject: [PATCH 02/16] Review PR --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 31 +++++++++-------- beacon_node/beacon_chain/src/builder.rs | 6 ++-- beacon_node/beacon_chain/src/chain_config.rs | 3 ++ beacon_node/beacon_chain/src/lib.rs | 2 +- ...ght_client_finality_update_verification.rs | 34 ++++++------------- ...t_client_optimistic_update_verification.rs | 17 ++++------ ...s_cache.rs => lightclient_server_cache.rs} | 4 ++- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/builder.rs | 6 ++-- ...ates.rs => compute_lightclient_updates.rs} | 13 ++++--- beacon_node/client/src/lib.rs | 2 +- .../gossip_methods.rs | 2 +- beacon_node/src/config.rs | 4 +++ 14 files changed, 61 insertions(+), 65 deletions(-) rename beacon_node/beacon_chain/src/{lightclient_proofs_cache.rs => lightclient_server_cache.rs} (97%) rename beacon_node/client/src/{broadcast_lightclient_updates.rs => compute_lightclient_updates.rs} (69%) diff --git a/Cargo.lock b/Cargo.lock index 9c1b591349b..76f655ce6d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1075,6 +1075,7 @@ dependencies = [ "eth2", "eth2_config", "execution_layer", + "futures", "genesis", "http_api", "http_metrics", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4a97d6e77fe..9ecd175c788 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -38,7 +38,7 @@ use crate::light_client_finality_update_verification::{ use crate::light_client_optimistic_update_verification::{ Error as LightClientOptimisticUpdateError, VerifiedLightClientOptimisticUpdate, }; -use crate::lightclient_proofs_cache::LightclientServerCache; +use crate::lightclient_server_cache::LightclientServerCache; use crate::migrate::BackgroundMigrator; use crate::naive_aggregation_pool::{ AggregatedAttestationMap, Error as NaiveAggregationError, NaiveAggregationPool, @@ -466,8 +466,7 @@ pub struct BeaconChain { /// A cache used to produce lightclient server messages pub lightclient_server_cache: LightclientServerCache, /// Sender to signal the lightclient server to produce new updates - pub lightclient_server_tx: - Option>>, + pub lightclient_server_tx: Option>>, /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. pub shutdown_sender: Sender, @@ -3452,15 +3451,17 @@ impl BeaconChain { // compute state proofs for light client updates before inserting the state into the // snapshot cache. - self.lightclient_server_cache - .cache_state_data( - &self.spec, block, block_root, - // mutable reference on the state is needed to compute merkle proofs - &mut state, - ) - .unwrap_or_else(|e| { - error!(self.log, "error caching lightclient data {:?}", e); - }); + if self.config.enable_lightclient_server { + self.lightclient_server_cache + .cache_state_data( + &self.spec, block, block_root, + // mutable reference on the state is needed to compute merkle proofs + &mut state, + ) + .unwrap_or_else(|e| { + error!(self.log, "error caching lightclient data {:?}", e); + }); + } self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) @@ -3835,8 +3836,10 @@ impl BeaconChain { // Do not trigger lightclient server update producer for old blocks, to extra work // during sync. - if block_delay_total < self.slot_clock.slot_duration() * 32 { - if let Some(lightclient_server_tx) = self.lightclient_server_tx.clone() { + if self.config.enable_lightclient_server + && block_delay_total < self.slot_clock.slot_duration() * 32 + { + if let Some(mut lightclient_server_tx) = self.lightclient_server_tx.clone() { if let Ok(sync_aggregate) = block.body().sync_aggregate() { if let Err(e) = lightclient_server_tx.try_send(( block.parent_root(), diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 0ce11bddf53..f73e338cc89 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -8,7 +8,7 @@ use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::head_tracker::HeadTracker; -use crate::lightclient_proofs_cache::LightclientServerCache; +use crate::lightclient_server_cache::LightclientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; @@ -90,7 +90,7 @@ pub struct BeaconChainBuilder { event_handler: Option>, slot_clock: Option, shutdown_sender: Option>, - lightclient_server_tx: Option>>, + lightclient_server_tx: Option>>, head_tracker: Option, validator_pubkey_cache: Option>, spec: ChainSpec, @@ -611,7 +611,7 @@ where /// Sets a `Sender` to allow the beacon chain to trigger lightclient update production. pub fn lightclient_server_tx( mut self, - sender: tokio::sync::mpsc::Sender>, + sender: Sender>, ) -> Self { self.lightclient_server_tx = Some(sender); self diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index bccc3732c3d..020bd5e88c0 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -83,6 +83,8 @@ pub struct ChainConfig { pub progressive_balances_mode: ProgressiveBalancesMode, /// Number of epochs between each migration of data from the hot database to the freezer. pub epochs_per_migration: u64, + /// When set to true Light client server computes and caches state proofs for serving updates + pub enable_lightclient_server: bool, } impl Default for ChainConfig { @@ -114,6 +116,7 @@ impl Default for ChainConfig { always_prepare_payload: false, progressive_balances_mode: ProgressiveBalancesMode::Checked, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, + enable_lightclient_server: false, } } } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 23056c84ca3..1bebc25fb5c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -30,7 +30,7 @@ pub mod historical_blocks; pub mod kzg_utils; pub mod light_client_finality_update_verification; pub mod light_client_optimistic_update_verification; -mod lightclient_proofs_cache; +mod lightclient_server_cache; pub mod merge_readiness; pub mod metrics; pub mod migrate; diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index 6beeb06ca3c..fb7cf530a67 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -65,35 +65,23 @@ impl VerifiedLightClientFinalityUpdate { chain: &BeaconChain, seen_timestamp: Duration, ) -> Result { - let latest_finality_update = chain - .lightclient_server_cache - .get_latest_finality_update() - .ok_or(Error::FailedConstructingUpdate)?; - - // verify that no other finality_update with a lower or equal - // finalized_header.slot was already forwarded on the network - if rcv_finality_update.finalized_header.beacon.slot - <= latest_finality_update.finalized_header.beacon.slot - { - return Err(Error::FinalityUpdateAlreadySeen); - } - // verify that enough time has passed for the block to have been propagated let start_time = chain .slot_clock - .start_of(rcv_finality_update.signature_slot); + .start_of(rcv_finality_update.signature_slot) + .ok_or(Error::SigSlotStartIsNone)?; let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); - match start_time { - Some(time) => { - if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() - < time + one_third_slot_duration - { - return Err(Error::TooEarly); - } - } - None => return Err(Error::SigSlotStartIsNone), + if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() + < start_time + one_third_slot_duration + { + return Err(Error::TooEarly); } + let latest_finality_update = chain + .lightclient_server_cache + .get_latest_finality_update() + .ok_or(Error::FailedConstructingUpdate)?; + // verify that the gossiped finality update is the same as the locally constructed one. if latest_finality_update != rcv_finality_update { return Err(Error::InvalidLightClientFinalityUpdate); diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index 7d5e96d008b..89e39801695 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -72,17 +72,13 @@ impl VerifiedLightClientOptimisticUpdate { // verify that enough time has passed for the block to have been propagated let start_time = chain .slot_clock - .start_of(rcv_optimistic_update.signature_slot); + .start_of(rcv_optimistic_update.signature_slot) + .ok_or(Error::SigSlotStartIsNone)?; let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); - match start_time { - Some(time) => { - if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() - < time + one_third_slot_duration - { - return Err(Error::TooEarly); - } - } - None => return Err(Error::SigSlotStartIsNone), + if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() + < start_time + one_third_slot_duration + { + return Err(Error::TooEarly); } let latest_optimistic_update = chain @@ -98,7 +94,6 @@ impl VerifiedLightClientOptimisticUpdate { let parent_root = rcv_optimistic_update.attested_header.beacon.parent_root; Ok(Self { light_client_optimistic_update: rcv_optimistic_update, - // TODO: why is the parent_root necessary here? parent_root, seen_timestamp, }) diff --git a/beacon_node/beacon_chain/src/lightclient_proofs_cache.rs b/beacon_node/beacon_chain/src/lightclient_server_cache.rs similarity index 97% rename from beacon_node/beacon_chain/src/lightclient_proofs_cache.rs rename to beacon_node/beacon_chain/src/lightclient_server_cache.rs index fc43b306c3d..6f6e4e028e1 100644 --- a/beacon_node/beacon_chain/src/lightclient_proofs_cache.rs +++ b/beacon_node/beacon_chain/src/lightclient_server_cache.rs @@ -22,7 +22,9 @@ pub struct LightclientServerCache { /// Tracks a single global latest finality update out of all imported blocks. /// /// TODO: Active discussion with @etan-status if this cache should be fork aware to return - /// latest canonical instead of global latest. + /// latest canonical (update with highest signature slot, where its attested header is part of + /// the head chain) instead of global latest (update with highest signature slot, out of all + /// branches). latest_finality_update: RwLock>>, /// Tracks a single global latest optimistic update out of all imported blocks. latest_optimistic_update: RwLock>>, diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 26c53154e3b..0160cad4b20 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -25,6 +25,7 @@ serde = { workspace = true } error-chain = { workspace = true } slog = { workspace = true } tokio = { workspace = true } +futures = { workspace = true } dirs = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 65d063c2a01..9c58783ff09 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,5 +1,5 @@ use crate::address_change_broadcast::broadcast_address_changes_at_capella; -use crate::broadcast_lightclient_updates::{ +use crate::compute_lightclient_updates::{ compute_lightclient_updates, LIGHTCLIENT_SERVER_CHANNEL_CAPACITY, }; use crate::config::{ClientGenesis, Config as ClientConfig}; @@ -27,6 +27,7 @@ use eth2::{ BeaconNodeHttpClient, Error as ApiError, Timeouts, }; use execution_layer::ExecutionLayer; +use futures::channel::mpsc::Receiver; use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH}; use lighthouse_network::{prometheus_client::registry::Registry, NetworkGlobals}; use monitoring_api::{MonitoringHttpClient, ProcessType}; @@ -39,7 +40,6 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use timer::spawn_timer; -use tokio::sync::mpsc::Receiver; use tokio::sync::oneshot; use types::{ test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, @@ -207,7 +207,7 @@ where }; let builder = if config.network.enable_light_client_server { - let (tx, rv) = tokio::sync::mpsc::channel::>( + let (tx, rv) = futures::channel::mpsc::channel::>( LIGHTCLIENT_SERVER_CHANNEL_CAPACITY, ); self.lightclient_server_rv = Some(rv); diff --git a/beacon_node/client/src/broadcast_lightclient_updates.rs b/beacon_node/client/src/compute_lightclient_updates.rs similarity index 69% rename from beacon_node/client/src/broadcast_lightclient_updates.rs rename to beacon_node/client/src/compute_lightclient_updates.rs index 19ecb3c4ed4..30a03e8e21f 100644 --- a/beacon_node/client/src/broadcast_lightclient_updates.rs +++ b/beacon_node/client/src/compute_lightclient_updates.rs @@ -1,11 +1,12 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, LightclientProducerEvent}; +use futures::channel::mpsc::Receiver; +use futures::StreamExt; use slog::{error, Logger}; -use tokio::sync::mpsc::Receiver; -// Each LightclientProducerEvent is ~200 bytes. With the lightclient server producing only recent +// Each `LightclientProducerEvent` is ~200 bytes. With the lightclient server producing only recent // updates it is okay to drop some events in case of overloading. In normal network conditions // there's one event emitted per block at most every 12 seconds, while consuming the event should -// take a few miliseconds. 32 is a small enough arbitrary number. +// take a few milliseconds. 32 is a small enough arbitrary number. pub(crate) const LIGHTCLIENT_SERVER_CHANNEL_CAPACITY: usize = 32; pub async fn compute_lightclient_updates( @@ -13,16 +14,14 @@ pub async fn compute_lightclient_updates( mut lightclient_server_rv: Receiver>, log: &Logger, ) { - // lightclient_server_rv is Some if lightclient flag is enabled - // // Should only receive events for recent blocks, import_block filters by blocks close to clock. // // Intents to process SyncAggregates of all recent blocks sequentially, without skipping. // Uses a bounded receiver, so may drop some SyncAggregates if very overloaded. This is okay // since only the most recent updates have value. - while let Some((block_root, slot, sync_aggregate)) = lightclient_server_rv.recv().await { + while let Some(event) = lightclient_server_rv.next().await { chain - .recompute_and_cache_lightclient_updates((block_root, slot, sync_aggregate)) + .recompute_and_cache_lightclient_updates(event) .unwrap_or_else(|e| { error!(log, "error computing lightclient updates {:?}", e); }) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 55f9b1dda63..677d382adce 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,7 +1,7 @@ extern crate slog; mod address_change_broadcast; -mod broadcast_lightclient_updates; +mod compute_lightclient_updates; pub mod config; mod metrics; mod notifier; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 82daf74efe0..577fed10a0c 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1697,7 +1697,7 @@ impl NetworkBeaconProcessor { self.gossip_penalize_peer( peer_id, - PeerAction::LowToleranceError, + PeerAction::HighToleranceError, "light_client_gossip_error", ); } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 9ceab47f336..1771e5acd66 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -156,6 +156,10 @@ pub fn get_config( cli_args.is_present("light-client-server"); } + if cli_args.is_present("light-client-server") { + client_config.chain.enable_lightclient_server = true; + } + if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { client_config.chain.shuffling_cache_size = cache_size; } From 63729737bc58d2af30f356c6351d5741ce78b869 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 12 Dec 2023 13:00:09 +0100 Subject: [PATCH 03/16] Review PR --- beacon_node/beacon_chain/src/lib.rs | 1 + ...ght_client_finality_update_verification.rs | 22 +----- ...t_client_optimistic_update_verification.rs | 24 +----- .../src/lightclient_server_cache.rs | 3 +- .../gossip_methods.rs | 74 +------------------ lighthouse/tests/beacon_node.rs | 1 + 6 files changed, 7 insertions(+), 118 deletions(-) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 930c952557b..49158c77657 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -61,6 +61,7 @@ pub use self::beacon_chain::{ BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, ForkChoiceError, LightclientProducerEvent, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index fb7cf530a67..8075439963e 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -3,7 +3,7 @@ use derivative::Derivative; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; -use types::{light_client_update::Error as LightClientUpdateError, LightClientFinalityUpdate}; +use types::LightClientFinalityUpdate; /// Returned when a light client finality update was not successfully verified. It might not have been verified for /// two reasons: @@ -14,8 +14,6 @@ use types::{light_client_update::Error as LightClientUpdateError, LightClientFin /// (the `BeaconChainError` variant) #[derive(Debug, AsRefStr)] pub enum Error { - /// Light client finality update message with a lower or equal finalized_header slot already forwarded. - FinalityUpdateAlreadySeen, /// The light client finality message was received is prior to one-third of slot duration passage. (with /// respect to the gossip clock disparity and slot clock duration). /// @@ -24,29 +22,11 @@ pub enum Error { /// Assuming the local clock is correct, the peer has sent an invalid message. TooEarly, /// Light client finality update message does not match the locally constructed one. - /// - /// ## Peer Scoring - /// InvalidLightClientFinalityUpdate, /// Signature slot start time is none. SigSlotStartIsNone, /// Failed to construct a LightClientFinalityUpdate from state. FailedConstructingUpdate, - /// Beacon chain error occurred. - BeaconChainError(BeaconChainError), - LightClientUpdateError(LightClientUpdateError), -} - -impl From for Error { - fn from(e: BeaconChainError) -> Self { - Error::BeaconChainError(e) - } -} - -impl From for Error { - fn from(e: LightClientUpdateError) -> Self { - Error::LightClientUpdateError(e) - } } /// Wraps a `LightClientFinalityUpdate` that has been verified for propagation on the gossip network. diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index 89e39801695..c41c968bce1 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -4,7 +4,7 @@ use eth2::types::Hash256; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; -use types::{light_client_update::Error as LightClientUpdateError, LightClientOptimisticUpdate}; +use types::LightClientOptimisticUpdate; /// Returned when a light client optimistic update was not successfully verified. It might not have been verified for /// two reasons: @@ -15,8 +15,6 @@ use types::{light_client_update::Error as LightClientUpdateError, LightClientOpt /// (the `BeaconChainError` variant) #[derive(Debug, AsRefStr)] pub enum Error { - /// Light client optimistic update message with a lower or equal optimistic_header slot already forwarded. - OptimisticUpdateAlreadySeen, /// The light client optimistic message was received is prior to one-third of slot duration passage. (with /// respect to the gossip clock disparity and slot clock duration). /// @@ -25,31 +23,11 @@ pub enum Error { /// Assuming the local clock is correct, the peer has sent an invalid message. TooEarly, /// Light client optimistic update message does not match the locally constructed one. - /// - /// ## Peer Scoring - /// InvalidLightClientOptimisticUpdate, /// Signature slot start time is none. SigSlotStartIsNone, /// Failed to construct a LightClientOptimisticUpdate from state. FailedConstructingUpdate, - /// Unknown block with parent root. - UnknownBlockParentRoot(Hash256), - /// Beacon chain error occurred. - BeaconChainError(BeaconChainError), - LightClientUpdateError(LightClientUpdateError), -} - -impl From for Error { - fn from(e: BeaconChainError) -> Self { - Error::BeaconChainError(e) - } -} - -impl From for Error { - fn from(e: LightClientUpdateError) -> Self { - Error::LightClientUpdateError(e) - } } /// Wraps a `LightClientOptimisticUpdate` that has been verified for propagation on the gossip network. diff --git a/beacon_node/beacon_chain/src/lightclient_server_cache.rs b/beacon_node/beacon_chain/src/lightclient_server_cache.rs index 6f6e4e028e1..4e8269d4f2e 100644 --- a/beacon_node/beacon_chain/src/lightclient_server_cache.rs +++ b/beacon_node/beacon_chain/src/lightclient_server_cache.rs @@ -42,8 +42,7 @@ impl LightclientServerCache { } /// Compute and cache state proofs for latter production of light-client messages. Does not - /// trigger block replay. May result in multiple DB write ops. - /// TODO: Should return StoreOps to batch with rest of db operations? + /// trigger block replay. pub fn cache_state_data( &self, spec: &ChainSpec, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 9d25d079cfa..29449bbe105 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1675,15 +1675,7 @@ impl NetworkBeaconProcessor { "light_client_gossip_error", ); } - LightClientFinalityUpdateError::FinalityUpdateAlreadySeen => debug!( - self.log, - "Light client finality update already seen"; - "peer" => %peer_id, - "error" => ?e, - ), - LightClientFinalityUpdateError::BeaconChainError(_) - | LightClientFinalityUpdateError::LightClientUpdateError(_) - | LightClientFinalityUpdateError::SigSlotStartIsNone + LightClientFinalityUpdateError::SigSlotStartIsNone | LightClientFinalityUpdateError::FailedConstructingUpdate => debug!( self.log, "Light client error constructing finality update"; @@ -1720,56 +1712,6 @@ impl NetworkBeaconProcessor { } Err(e) => { match e { - LightClientOptimisticUpdateError::UnknownBlockParentRoot(parent_root) => { - metrics::inc_counter( - &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES, - ); - debug!( - self.log, - "Optimistic update for unknown block"; - "peer_id" => %peer_id, - "parent_root" => ?parent_root - ); - - if let Some(sender) = reprocess_tx { - let processor = self.clone(); - let msg = ReprocessQueueMessage::UnknownLightClientOptimisticUpdate( - QueuedLightClientUpdate { - parent_root, - process_fn: Box::new(move || { - processor.process_gossip_optimistic_update( - message_id, - peer_id, - light_client_optimistic_update, - None, // Do not reprocess this message again. - seen_timestamp, - ) - }), - }, - ); - - if sender.try_send(msg).is_err() { - error!( - self.log, - "Failed to send optimistic update for re-processing"; - ) - } - } else { - debug!( - self.log, - "Not sending light client update because it had been reprocessed"; - "peer_id" => %peer_id, - "parent_root" => ?parent_root - ); - - self.propagate_validation_result( - message_id, - peer_id, - MessageAcceptance::Ignore, - ); - } - return; - } LightClientOptimisticUpdateError::InvalidLightClientOptimisticUpdate => { metrics::register_optimistic_update_error(&e); @@ -1801,19 +1743,7 @@ impl NetworkBeaconProcessor { "light_client_gossip_error", ); } - LightClientOptimisticUpdateError::OptimisticUpdateAlreadySeen => { - metrics::register_optimistic_update_error(&e); - - debug!( - self.log, - "Light client optimistic update already seen"; - "peer" => %peer_id, - "error" => ?e, - ) - } - LightClientOptimisticUpdateError::BeaconChainError(_) - | LightClientOptimisticUpdateError::LightClientUpdateError(_) - | LightClientOptimisticUpdateError::SigSlotStartIsNone + LightClientOptimisticUpdateError::SigSlotStartIsNone | LightClientOptimisticUpdateError::FailedConstructingUpdate => { metrics::register_optimistic_update_error(&e); diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index fd74b1b5b92..eb172ba685f 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2359,6 +2359,7 @@ fn light_client_server_enabled() { .run_with_zero_port() .with_config(|config| { assert_eq!(config.network.enable_light_client_server, true); + assert_eq!(config.chain.enable_light_client_server, true); }); } From 6dc45c4d8883d70f7f83c7f1601cdd7fa348a33a Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 12 Dec 2023 13:25:27 +0100 Subject: [PATCH 04/16] consistent naming --- beacon_node/beacon_chain/src/beacon_chain.rs | 34 +++++++++---------- beacon_node/beacon_chain/src/builder.rs | 20 +++++------ beacon_node/beacon_chain/src/chain_config.rs | 4 +-- beacon_node/beacon_chain/src/lib.rs | 4 +-- ...ght_client_finality_update_verification.rs | 4 +-- ...t_client_optimistic_update_verification.rs | 4 +-- ..._cache.rs => light_client_server_cache.rs} | 22 ++++++------ beacon_node/client/src/builder.rs | 24 ++++++------- ...tes.rs => compute_light_client_updates.rs} | 16 ++++----- beacon_node/client/src/lib.rs | 2 +- beacon_node/http_api/src/lib.rs | 4 +-- beacon_node/http_api/tests/tests.rs | 4 +-- .../lighthouse_network/src/rpc/methods.rs | 4 +-- .../gossip_methods.rs | 4 +-- .../src/network_beacon_processor/mod.rs | 4 +-- beacon_node/network/src/router.rs | 2 +- beacon_node/src/config.rs | 2 +- consensus/types/src/light_client_bootstrap.rs | 2 +- .../types/src/light_client_finality_update.rs | 2 +- 19 files changed, 79 insertions(+), 83 deletions(-) rename beacon_node/beacon_chain/src/{lightclient_server_cache.rs => light_client_server_cache.rs} (94%) rename beacon_node/client/src/{compute_lightclient_updates.rs => compute_light_client_updates.rs} (59%) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 29d054596ab..7bc7f733ef8 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -38,7 +38,7 @@ use crate::light_client_finality_update_verification::{ use crate::light_client_optimistic_update_verification::{ Error as LightClientOptimisticUpdateError, VerifiedLightClientOptimisticUpdate, }; -use crate::lightclient_server_cache::LightclientServerCache; +use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::BackgroundMigrator; use crate::naive_aggregation_pool::{ AggregatedAttestationMap, Error as NaiveAggregationError, NaiveAggregationPool, @@ -339,7 +339,7 @@ struct PartialBeaconBlock { bls_to_execution_changes: Vec, } -pub type LightclientProducerEvent = (Hash256, Slot, SyncAggregate); +pub type LightClientProducerEvent = (Hash256, Slot, SyncAggregate); pub type BeaconForkChoice = ForkChoice< BeaconForkChoiceStore< @@ -462,10 +462,10 @@ pub struct BeaconChain { pub block_times_cache: Arc>, /// A cache used to track pre-finalization block roots for quick rejection. pub pre_finalization_block_cache: PreFinalizationBlockCache, - /// A cache used to produce lightclient server messages - pub lightclient_server_cache: LightclientServerCache, - /// Sender to signal the lightclient server to produce new updates - pub lightclient_server_tx: Option>>, + /// A cache used to produce light_client server messages + pub light_client_server_cache: LightClientServerCache, + /// Sender to signal the light_client server to produce new updates + pub light_client_server_tx: Option>>, /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. pub shutdown_sender: Sender, @@ -1330,11 +1330,11 @@ impl BeaconChain { self.state_at_slot(load_slot, StateSkipConfig::WithoutStateRoots) } - pub fn recompute_and_cache_lightclient_updates( + pub fn recompute_and_cache_light_client_updates( &self, - (parent_root, slot, sync_aggregate): LightclientProducerEvent, + (parent_root, slot, sync_aggregate): LightClientProducerEvent, ) -> Result<(), Error> { - self.lightclient_server_cache.recompute_and_cache_updates( + self.light_client_server_cache.recompute_and_cache_updates( &self.log, self.store.clone(), &parent_root, @@ -3503,15 +3503,15 @@ impl BeaconChain { // compute state proofs for light client updates before inserting the state into the // snapshot cache. - if self.config.enable_lightclient_server { - self.lightclient_server_cache + if self.config.enable_light_client_server { + self.light_client_server_cache .cache_state_data( &self.spec, block, block_root, // mutable reference on the state is needed to compute merkle proofs &mut state, ) .unwrap_or_else(|e| { - error!(self.log, "error caching lightclient data {:?}", e); + error!(self.log, "error caching light_client data {:?}", e); }); } @@ -3886,21 +3886,21 @@ impl BeaconChain { } } - // Do not trigger lightclient server update producer for old blocks, to extra work + // Do not trigger light_client server update producer for old blocks, to extra work // during sync. - if self.config.enable_lightclient_server + if self.config.enable_light_client_server && block_delay_total < self.slot_clock.slot_duration() * 32 { - if let Some(mut lightclient_server_tx) = self.lightclient_server_tx.clone() { + if let Some(mut light_client_server_tx) = self.light_client_server_tx.clone() { if let Ok(sync_aggregate) = block.body().sync_aggregate() { - if let Err(e) = lightclient_server_tx.try_send(( + if let Err(e) = light_client_server_tx.try_send(( block.parent_root(), block.slot(), sync_aggregate.clone(), )) { warn!( self.log, - "Failed to send lightclient server event"; + "Failed to send light_client server event"; "error" => ?e ); } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index f73e338cc89..92c334894fa 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,5 +1,5 @@ use crate::beacon_chain::{ - CanonicalHead, LightclientProducerEvent, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY, + CanonicalHead, LightClientProducerEvent, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY, }; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::data_availability_checker::DataAvailabilityChecker; @@ -8,7 +8,7 @@ use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::head_tracker::HeadTracker; -use crate::lightclient_server_cache::LightclientServerCache; +use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; @@ -90,7 +90,7 @@ pub struct BeaconChainBuilder { event_handler: Option>, slot_clock: Option, shutdown_sender: Option>, - lightclient_server_tx: Option>>, + light_client_server_tx: Option>>, head_tracker: Option, validator_pubkey_cache: Option>, spec: ChainSpec, @@ -133,7 +133,7 @@ where event_handler: None, slot_clock: None, shutdown_sender: None, - lightclient_server_tx: None, + light_client_server_tx: None, head_tracker: None, validator_pubkey_cache: None, spec: TEthSpec::default_spec(), @@ -608,12 +608,12 @@ where self } - /// Sets a `Sender` to allow the beacon chain to trigger lightclient update production. - pub fn lightclient_server_tx( + /// Sets a `Sender` to allow the beacon chain to trigger light_client update production. + pub fn light_client_server_tx( mut self, - sender: Sender>, + sender: Sender>, ) -> Self { - self.lightclient_server_tx = Some(sender); + self.light_client_server_tx = Some(sender); self } @@ -924,8 +924,8 @@ where validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), early_attester_cache: <_>::default(), - lightclient_server_cache: LightclientServerCache::new(), - lightclient_server_tx: self.lightclient_server_tx, + light_client_server_cache: LightClientServerCache::new(), + light_client_server_tx: self.light_client_server_tx, shutdown_sender: self .shutdown_sender .ok_or("Cannot build without a shutdown sender.")?, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 50556084eea..23e17a6efad 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -84,7 +84,7 @@ pub struct ChainConfig { /// Number of epochs between each migration of data from the hot database to the freezer. pub epochs_per_migration: u64, /// When set to true Light client server computes and caches state proofs for serving updates - pub enable_lightclient_server: bool, + pub enable_light_client_server: bool, } impl Default for ChainConfig { @@ -116,7 +116,7 @@ impl Default for ChainConfig { always_prepare_payload: false, progressive_balances_mode: ProgressiveBalancesMode::Fast, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, - enable_lightclient_server: false, + enable_light_client_server: false, } } } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 49158c77657..15c924f8c22 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -30,7 +30,7 @@ pub mod historical_blocks; pub mod kzg_utils; pub mod light_client_finality_update_verification; pub mod light_client_optimistic_update_verification; -mod lightclient_server_cache; +mod light_client_server_cache; pub mod merge_readiness; pub mod metrics; pub mod migrate; @@ -59,7 +59,7 @@ pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse, BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, LightclientProducerEvent, OverrideForkchoiceUpdate, ProduceBlockVerification, + ForkChoiceError, LightClientProducerEvent, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index 8075439963e..35863aa05ff 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -1,4 +1,4 @@ -use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::{BeaconChain, BeaconChainTypes}; use derivative::Derivative; use slot_clock::SlotClock; use std::time::Duration; @@ -58,7 +58,7 @@ impl VerifiedLightClientFinalityUpdate { } let latest_finality_update = chain - .lightclient_server_cache + .light_client_server_cache .get_latest_finality_update() .ok_or(Error::FailedConstructingUpdate)?; diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index c41c968bce1..6c48cb8ceff 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -1,4 +1,4 @@ -use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::{BeaconChain, BeaconChainTypes}; use derivative::Derivative; use eth2::types::Hash256; use slot_clock::SlotClock; @@ -60,7 +60,7 @@ impl VerifiedLightClientOptimisticUpdate { } let latest_optimistic_update = chain - .lightclient_server_cache + .light_client_server_cache .get_latest_optimistic_update() .ok_or(Error::FailedConstructingUpdate)?; diff --git a/beacon_node/beacon_chain/src/lightclient_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs similarity index 94% rename from beacon_node/beacon_chain/src/lightclient_server_cache.rs rename to beacon_node/beacon_chain/src/light_client_server_cache.rs index 4e8269d4f2e..2d67b22ee37 100644 --- a/beacon_node/beacon_chain/src/lightclient_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -18,7 +18,7 @@ const PREV_BLOCK_CACHE_SIZE: usize = 32; /// requests. These messages include proofs on historical states, so on-demand computation is /// expensive. /// -pub struct LightclientServerCache { +pub struct LightClientServerCache { /// Tracks a single global latest finality update out of all imported blocks. /// /// TODO: Active discussion with @etan-status if this cache should be fork aware to return @@ -29,10 +29,10 @@ pub struct LightclientServerCache { /// Tracks a single global latest optimistic update out of all imported blocks. latest_optimistic_update: RwLock>>, /// Caches state proofs by block root - prev_block_cache: Mutex>, + prev_block_cache: Mutex>, } -impl LightclientServerCache { +impl LightClientServerCache { pub fn new() -> Self { Self { latest_finality_update: None.into(), @@ -57,7 +57,7 @@ impl LightclientServerCache { // Persist in memory cache for a descendent block - let cached_data = LightclientCachedData::from_state(block_post_state)?; + let cached_data = LightClientCachedData::from_state(block_post_state)?; self.prev_block_cache.lock().put(block_root, cached_data); Ok(()) @@ -124,7 +124,7 @@ impl LightclientServerCache { { *self.latest_finality_update.write() = Some(LightClientFinalityUpdate { // TODO: may want to cache this result from latest_optimistic_update if producing a - // lightclient header becomes expensive + // light_client header becomes expensive attested_header: block_to_light_client_header(attested_block.message()), finalized_header: block_to_light_client_header(finalized_block.message()), finality_branch: cached_parts.finality_branch.clone(), @@ -134,7 +134,7 @@ impl LightclientServerCache { } else { debug!( log, - "Finalized block not available in store for lightclient server"; + "Finalized block not available in store for light_client server"; "finalized_block_root" => format!("{}", cached_parts.finalized_block_root), ); } @@ -153,7 +153,7 @@ impl LightclientServerCache { block_root: &Hash256, block_state_root: &Hash256, block_slot: Slot, - ) -> Result { + ) -> Result { // Attempt to get the value from the cache first. if let Some(cached_parts) = self.prev_block_cache.lock().get(block_root) { return Ok(cached_parts.clone()); @@ -165,7 +165,7 @@ impl LightclientServerCache { .ok_or_else(|| { BeaconChainError::DBInconsistent(format!("Missing state {:?}", block_state_root)) })?; - let new_value = LightclientCachedData::from_state(&mut state)?; + let new_value = LightClientCachedData::from_state(&mut state)?; // Insert value and return owned self.prev_block_cache @@ -183,7 +183,7 @@ impl LightclientServerCache { } } -impl Default for LightclientServerCache { +impl Default for LightClientServerCache { fn default() -> Self { Self::new() } @@ -192,12 +192,12 @@ impl Default for LightclientServerCache { type FinalityBranch = FixedVector; #[derive(Clone)] -struct LightclientCachedData { +struct LightClientCachedData { finality_branch: FinalityBranch, finalized_block_root: Hash256, } -impl LightclientCachedData { +impl LightClientCachedData { fn from_state(state: &mut BeaconState) -> Result { Ok(Self { finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 50c644c9676..0e566a05112 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,6 +1,6 @@ use crate::address_change_broadcast::broadcast_address_changes_at_capella; -use crate::compute_lightclient_updates::{ - compute_lightclient_updates, LIGHTCLIENT_SERVER_CHANNEL_CAPACITY, +use crate::compute_light_client_updates::{ + compute_light_client_updates, LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY, }; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; @@ -9,7 +9,7 @@ use beacon_chain::data_availability_checker::start_availability_cache_maintenanc use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; -use beacon_chain::LightclientProducerEvent; +use beacon_chain::LightClientProducerEvent; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::{CachingEth1Backend, Eth1Chain}, @@ -81,7 +81,7 @@ pub struct ClientBuilder { slasher: Option>>, beacon_processor_config: Option, beacon_processor_channels: Option>, - lightclient_server_rv: Option>>, + light_client_server_rv: Option>>, eth_spec_instance: T::EthSpec, } @@ -117,7 +117,7 @@ where eth_spec_instance, beacon_processor_config: None, beacon_processor_channels: None, - lightclient_server_rv: None, + light_client_server_rv: None, } } @@ -207,11 +207,11 @@ where }; let builder = if config.network.enable_light_client_server { - let (tx, rv) = futures::channel::mpsc::channel::>( - LIGHTCLIENT_SERVER_CHANNEL_CAPACITY, + let (tx, rv) = futures::channel::mpsc::channel::>( + LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY, ); - self.lightclient_server_rv = Some(rv); - builder.lightclient_server_tx(tx) + self.light_client_server_rv = Some(rv); + builder.light_client_server_tx(tx) } else { builder }; @@ -849,15 +849,15 @@ where ); } - // Spawn service to publish lightclient updates at some interval into the slot - if let Some(lightclient_server_rv) = self.lightclient_server_rv { + // Spawn service to publish light_client updates at some interval into the slot + if let Some(light_client_server_rv) = self.light_client_server_rv { let inner_chain = beacon_chain.clone(); let broadcast_context = runtime_context.service_context("lcserv_bcast".to_string()); let log = broadcast_context.log().clone(); broadcast_context.executor.spawn( async move { - compute_lightclient_updates(&inner_chain, lightclient_server_rv, &log) + compute_light_client_updates(&inner_chain, light_client_server_rv, &log) .await }, "lcserv_broadcast", diff --git a/beacon_node/client/src/compute_lightclient_updates.rs b/beacon_node/client/src/compute_light_client_updates.rs similarity index 59% rename from beacon_node/client/src/compute_lightclient_updates.rs rename to beacon_node/client/src/compute_light_client_updates.rs index 30a03e8e21f..7adc05c98dd 100644 --- a/beacon_node/client/src/compute_lightclient_updates.rs +++ b/beacon_node/client/src/compute_light_client_updates.rs @@ -1,17 +1,17 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes, LightclientProducerEvent}; +use beacon_chain::{BeaconChain, BeaconChainTypes, LightClientProducerEvent}; use futures::channel::mpsc::Receiver; use futures::StreamExt; use slog::{error, Logger}; -// Each `LightclientProducerEvent` is ~200 bytes. With the lightclient server producing only recent +// Each `LightClientProducerEvent` is ~200 bytes. With the light_client server producing only recent // updates it is okay to drop some events in case of overloading. In normal network conditions // there's one event emitted per block at most every 12 seconds, while consuming the event should // take a few milliseconds. 32 is a small enough arbitrary number. -pub(crate) const LIGHTCLIENT_SERVER_CHANNEL_CAPACITY: usize = 32; +pub(crate) const LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY: usize = 32; -pub async fn compute_lightclient_updates( +pub async fn compute_light_client_updates( chain: &BeaconChain, - mut lightclient_server_rv: Receiver>, + mut light_client_server_rv: Receiver>, log: &Logger, ) { // Should only receive events for recent blocks, import_block filters by blocks close to clock. @@ -19,11 +19,11 @@ pub async fn compute_lightclient_updates( // Intents to process SyncAggregates of all recent blocks sequentially, without skipping. // Uses a bounded receiver, so may drop some SyncAggregates if very overloaded. This is okay // since only the most recent updates have value. - while let Some(event) = lightclient_server_rv.next().await { + while let Some(event) = light_client_server_rv.next().await { chain - .recompute_and_cache_lightclient_updates(event) + .recompute_and_cache_light_client_updates(event) .unwrap_or_else(|e| { - error!(log, "error computing lightclient updates {:?}", e); + error!(log, "error computing light_client updates {:?}", e); }) } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 677d382adce..2f14d87efc0 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,7 +1,7 @@ extern crate slog; mod address_change_broadcast; -mod compute_lightclient_updates; +mod compute_light_client_updates; pub mod config; mod metrics; mod notifier; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 897881c73bc..532fcc6990f 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2421,7 +2421,7 @@ pub fn serve( accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { let update = chain - .lightclient_server_cache + .light_client_server_cache .get_latest_optimistic_update() .ok_or_else(|| { warp_utils::reject::custom_not_found( @@ -2467,7 +2467,7 @@ pub fn serve( accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { let update = chain - .lightclient_server_cache + .light_client_server_cache .get_latest_finality_update() .ok_or_else(|| { warp_utils::reject::custom_not_found( diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 5c0d3f233b7..1acac963fbb 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1707,7 +1707,7 @@ impl ApiTester { let expected = self .chain - .lightclient_server_cache + .light_client_server_cache .get_latest_optimistic_update(); assert_eq!(result, expected); @@ -1726,7 +1726,7 @@ impl ApiTester { let expected = self .chain - .lightclient_server_cache + .light_client_server_cache .get_latest_finality_update(); assert_eq!(result, expected); diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9a6ad19ac58..3901630cccb 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -389,7 +389,7 @@ pub enum RPCResponse { /// A response to a get BLOBS_BY_RANGE request BlobsByRange(Arc>), - /// A response to a get LIGHTCLIENT_BOOTSTRAP request. + /// A response to a get LIGHT_CLIENT_BOOTSTRAP request. LightClientBootstrap(LightClientBootstrap), /// A response to a get BLOBS_BY_ROOT request. @@ -431,7 +431,7 @@ pub enum RPCCodedResponse { StreamTermination(ResponseTermination), } -/// Request a light_client_bootstrap for lightclients peers. +/// Request a light_client_bootstrap for light_clients peers. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct LightClientBootstrapRequest { pub root: Hash256, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 29449bbe105..42047e39df9 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -40,8 +40,7 @@ use types::{ use beacon_processor::{ work_reprocessing_queue::{ - QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, - ReprocessQueueMessage, + QueuedAggregate, QueuedGossipBlock, QueuedUnaggregate, ReprocessQueueMessage, }, DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, }; @@ -1693,7 +1692,6 @@ impl NetworkBeaconProcessor { message_id: MessageId, peer_id: PeerId, light_client_optimistic_update: LightClientOptimisticUpdate, - reprocess_tx: Option>, seen_timestamp: Duration, ) { match self.chain.verify_optimistic_update_for_gossip( diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 67fc2fabb1e..733a7bc9258 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -348,12 +348,10 @@ impl NetworkBeaconProcessor { ) -> Result<(), Error> { let processor = self.clone(); let process_fn = move || { - let reprocess_tx = processor.reprocess_tx.clone(); processor.process_gossip_optimistic_update( message_id, peer_id, light_client_optimistic_update, - Some(reprocess_tx), seen_timestamp, ) }; @@ -589,7 +587,7 @@ impl NetworkBeaconProcessor { } /// Create a new work event to process `LightClientBootstrap`s from the RPC network. - pub fn send_lightclient_bootstrap_request( + pub fn send_light_client_bootstrap_request( self: &Arc, peer_id: PeerId, request_id: PeerRequestId, diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index f56a3b7445e..a774c0e16f0 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -218,7 +218,7 @@ impl Router { ), Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_lightclient_bootstrap_request(peer_id, request_id, request), + .send_light_client_bootstrap_request(peer_id, request_id, request), ), } } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 1771e5acd66..f8e1e7cedb5 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -157,7 +157,7 @@ pub fn get_config( } if cli_args.is_present("light-client-server") { - client_config.chain.enable_lightclient_server = true; + client_config.chain.enable_light_client_server = true; } if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 616aced483a..6660783abd1 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -9,7 +9,7 @@ use ssz_derive::{Decode, Encode}; use std::sync::Arc; use test_random_derive::TestRandom; -/// A LightClientBootstrap is the initializer we send over to lightclient nodes +/// A LightClientBootstrap is the initializer we send over to light_client nodes /// that are trying to generate their basic storage when booting up. #[derive( Debug, diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 87601b81565..494e68b63f5 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -11,7 +11,7 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; -/// A LightClientFinalityUpdate is the update lightclient request or received by a gossip that +/// A LightClientFinalityUpdate is the update light_client request or received by a gossip that /// signal a new finalized beacon block header for the light client sync protocol. #[derive( Debug, From 9a2a351386ba4d4f7c747dde2115d14ee5f86562 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 12 Dec 2023 14:34:29 +0100 Subject: [PATCH 05/16] add metrics --- .../src/light_client_server_cache.rs | 8 +++++++- beacon_node/beacon_chain/src/metrics.rs | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index 2d67b22ee37..bd5f6c0cc64 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -1,5 +1,5 @@ use crate::errors::BeaconChainError; -use crate::{BeaconChainTypes, BeaconStore}; +use crate::{metrics, BeaconChainTypes, BeaconStore}; use parking_lot::{Mutex, RwLock}; use slog::{debug, Logger}; use ssz_types::FixedVector; @@ -50,6 +50,8 @@ impl LightClientServerCache { block_root: Hash256, block_post_state: &mut BeaconState, ) -> Result<(), BeaconChainError> { + let _timer = metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES); + // Only post-altair if spec.fork_name_at_slot::(block.slot()) == ForkName::Base { return Ok(()); @@ -73,6 +75,9 @@ impl LightClientServerCache { block_slot: Slot, sync_aggregate: &SyncAggregate, ) -> Result<(), BeaconChainError> { + let _timer = + metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_RECOMPUTE_UPDATES_TIMES); + let signature_slot = block_slot; let attested_block_root = block_parent_root; @@ -158,6 +163,7 @@ impl LightClientServerCache { if let Some(cached_parts) = self.prev_block_cache.lock().get(block_root) { return Ok(cached_parts.clone()); } + metrics::inc_counter(&metrics::LIGHT_CLIENT_SERVER_CACHE_PREV_BLOCK_CACHE_MISS); // Compute the value, handling potential errors. let mut state = store diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index ca04366b01e..b168469818c 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1068,6 +1068,22 @@ lazy_static! { // Create a custom bucket list for greater granularity in block delay Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0]) ); + + /* + * light_client server metrics + */ + pub static ref LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES: Result = try_create_histogram( + "beacon_light_client_server_cache_state_data_seconds", + "Time taken to produce and cache state data", + ); + pub static ref LIGHT_CLIENT_SERVER_CACHE_RECOMPUTE_UPDATES_TIMES: Result = try_create_histogram( + "beacon_light_client_server_cache_recompute_updates_seconds", + "Time taken to recompute and cache updates", + ); + pub static ref LIGHT_CLIENT_SERVER_CACHE_PREV_BLOCK_CACHE_MISS: Result = try_create_int_counter( + "beacon_light_client_server_cache_prev_block_cache_miss", + "Count of prev block cache misses", + ); } /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, From b41ed0759964990bec486e223cc3a25ae2c93377 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 13 Dec 2023 11:01:55 +0100 Subject: [PATCH 06/16] revert dropping reprocessing queue --- ...t_client_optimistic_update_verification.rs | 15 ++++++ .../gossip_methods.rs | 54 ++++++++++++++++++- .../src/network_beacon_processor/mod.rs | 2 + 3 files changed, 70 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index 6c48cb8ceff..813b112db5a 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -28,6 +28,8 @@ pub enum Error { SigSlotStartIsNone, /// Failed to construct a LightClientOptimisticUpdate from state. FailedConstructingUpdate, + /// Unknown block with parent root. + UnknownBlockParentRoot(Hash256), } /// Wraps a `LightClientOptimisticUpdate` that has been verified for propagation on the gossip network. @@ -59,6 +61,19 @@ impl VerifiedLightClientOptimisticUpdate { return Err(Error::TooEarly); } + let head = chain.canonical_head.cached_head(); + let head_block = &head.snapshot.beacon_block; + // check if we can process the optimistic update immediately + // otherwise queue + let canonical_root = rcv_optimistic_update + .attested_header + .beacon + .canonical_root(); + + if canonical_root != head_block.message().parent_root() { + return Err(Error::UnknownBlockParentRoot(canonical_root)); + } + let latest_optimistic_update = chain .light_client_server_cache .get_latest_optimistic_update() diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 42047e39df9..07fc06bc370 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -40,7 +40,8 @@ use types::{ use beacon_processor::{ work_reprocessing_queue::{ - QueuedAggregate, QueuedGossipBlock, QueuedUnaggregate, ReprocessQueueMessage, + QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, + ReprocessQueueMessage, }, DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, }; @@ -1692,6 +1693,7 @@ impl NetworkBeaconProcessor { message_id: MessageId, peer_id: PeerId, light_client_optimistic_update: LightClientOptimisticUpdate, + reprocess_tx: Option>, seen_timestamp: Duration, ) { match self.chain.verify_optimistic_update_for_gossip( @@ -1710,6 +1712,56 @@ impl NetworkBeaconProcessor { } Err(e) => { match e { + LightClientOptimisticUpdateError::UnknownBlockParentRoot(parent_root) => { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_SENT_OPTIMISTIC_UPDATES, + ); + debug!( + self.log, + "Optimistic update for unknown block"; + "peer_id" => %peer_id, + "parent_root" => ?parent_root + ); + + if let Some(sender) = reprocess_tx { + let processor = self.clone(); + let msg = ReprocessQueueMessage::UnknownLightClientOptimisticUpdate( + QueuedLightClientUpdate { + parent_root, + process_fn: Box::new(move || { + processor.process_gossip_optimistic_update( + message_id, + peer_id, + light_client_optimistic_update, + None, // Do not reprocess this message again. + seen_timestamp, + ) + }), + }, + ); + + if sender.try_send(msg).is_err() { + error!( + self.log, + "Failed to send optimistic update for re-processing"; + ) + } + } else { + debug!( + self.log, + "Not sending light client update because it had been reprocessed"; + "peer_id" => %peer_id, + "parent_root" => ?parent_root + ); + + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + return; + } LightClientOptimisticUpdateError::InvalidLightClientOptimisticUpdate => { metrics::register_optimistic_update_error(&e); diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 733a7bc9258..e7d3a7ce213 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -348,10 +348,12 @@ impl NetworkBeaconProcessor { ) -> Result<(), Error> { let processor = self.clone(); let process_fn = move || { + let reprocess_tx = processor.reprocess_tx.clone(); processor.process_gossip_optimistic_update( message_id, peer_id, light_client_optimistic_update, + Some(reprocess_tx), seen_timestamp, ) }; From 31acd0c2cf84ab2ab95a1006f3d9eb6300af45b2 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Sat, 20 Jan 2024 13:39:10 +1100 Subject: [PATCH 07/16] Update light client optimistic update re-processing logic. (#7) --- .../beacon_processor/src/work_reprocessing_queue.rs | 7 ++++++- beacon_node/client/src/builder.rs | 11 ++++++++--- .../client/src/compute_light_client_updates.rs | 12 +++++++++++- 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs index 9191509d39f..20f3e21d084 100644 --- a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs @@ -82,12 +82,15 @@ pub enum ReprocessQueueMessage { /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same /// hash until the gossip block is imported. RpcBlock(QueuedRpcBlock), - /// A block that was successfully processed. We use this to handle attestations and light client updates + /// A block that was successfully processed. We use this to handle attestations updates /// for unknown blocks. BlockImported { block_root: Hash256, parent_root: Hash256, }, + /// A new `LightClientOptimisticUpdate` has been produced. We use this to handle light client + /// updates for unknown parent blocks. + NewLightClientOptimisticUpdate { parent_root: Hash256 }, /// An unaggregated attestation that references an unknown block. UnknownBlockUnaggregate(QueuedUnaggregate), /// An aggregated attestation that references an unknown block. @@ -688,6 +691,8 @@ impl ReprocessQueue { ); } } + } + InboundEvent::Msg(NewLightClientOptimisticUpdate { parent_root }) => { // Unqueue the light client optimistic updates we have for this root, if any. if let Some(queued_lc_id) = self .awaiting_lc_updates_per_parent_root diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 0e566a05112..b91573e2a7f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -768,7 +768,7 @@ where } .spawn_manager( beacon_processor_channels.beacon_processor_rx, - beacon_processor_channels.work_reprocessing_tx, + beacon_processor_channels.work_reprocessing_tx.clone(), beacon_processor_channels.work_reprocessing_rx, None, beacon_chain.slot_clock.clone(), @@ -857,8 +857,13 @@ where let log = broadcast_context.log().clone(); broadcast_context.executor.spawn( async move { - compute_light_client_updates(&inner_chain, light_client_server_rv, &log) - .await + compute_light_client_updates( + &inner_chain, + light_client_server_rv, + beacon_processor_channels.work_reprocessing_tx, + &log, + ) + .await }, "lcserv_broadcast", ); diff --git a/beacon_node/client/src/compute_light_client_updates.rs b/beacon_node/client/src/compute_light_client_updates.rs index 7adc05c98dd..1eb977d4213 100644 --- a/beacon_node/client/src/compute_light_client_updates.rs +++ b/beacon_node/client/src/compute_light_client_updates.rs @@ -1,7 +1,9 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, LightClientProducerEvent}; +use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage; use futures::channel::mpsc::Receiver; use futures::StreamExt; use slog::{error, Logger}; +use tokio::sync::mpsc::Sender; // Each `LightClientProducerEvent` is ~200 bytes. With the light_client server producing only recent // updates it is okay to drop some events in case of overloading. In normal network conditions @@ -12,6 +14,7 @@ pub(crate) const LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY: usize = 32; pub async fn compute_light_client_updates( chain: &BeaconChain, mut light_client_server_rv: Receiver>, + reprocess_tx: Sender, log: &Logger, ) { // Should only receive events for recent blocks, import_block filters by blocks close to clock. @@ -20,10 +23,17 @@ pub async fn compute_light_client_updates( // Uses a bounded receiver, so may drop some SyncAggregates if very overloaded. This is okay // since only the most recent updates have value. while let Some(event) = light_client_server_rv.next().await { + let parent_root = event.0; + chain .recompute_and_cache_light_client_updates(event) .unwrap_or_else(|e| { error!(log, "error computing light_client updates {:?}", e); - }) + }); + + let msg = ReprocessQueueMessage::NewLightClientOptimisticUpdate { parent_root }; + if reprocess_tx.try_send(msg).is_err() { + error!(log, "Failed to inform light client update"; "parent_root" => %parent_root) + }; } } From 7b385cce8b9fa29faa010d2a3a5a372aa3ba41b9 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 30 Jan 2024 12:29:08 +1100 Subject: [PATCH 08/16] Add light client server simulator tests. Co-authored by @dapplion. --- testing/simulator/src/checks.rs | 74 +++++++++++++++++++++++++++++++ testing/simulator/src/eth1_sim.rs | 9 ++++ 2 files changed, 83 insertions(+) diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index d34cdbc9ff1..fa754c1e845 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -243,3 +243,77 @@ pub async fn verify_transition_block_finalized( )) } } + +pub(crate) async fn verify_light_client_updates( + network: LocalNetwork, + start_slot: Slot, + end_slot: Slot, + slot_duration: Duration, +) -> Result<(), String> { + // Tolerance of 2 slot allows for 1 single missed slot. + let light_client_update_slot_tolerance = Slot::new(2); + + // Wait for the first altair block. + let mut have_seen_block = false; + + for slot in start_slot.as_u64()..=end_slot.as_u64() { + let slot = Slot::new(slot); + let previous_slot = slot - 1; + slot_delay(slot, slot_duration).await; + + let remote_nodes = network.remote_nodes()?; + let client = remote_nodes.first().unwrap(); + let previous_slot_has_block = client + .get_beacon_blocks::(BlockId::Slot(previous_slot)) + .await + .map_err(|e| { + format!("Unable to get beacon block for previous slot {previous_slot:?}: {e:?}") + })? + .is_some(); + + if !have_seen_block { + // Make sure we have seen the first block in Altair, to make sure we have sync aggregates available. + if previous_slot_has_block { + have_seen_block = true; + } + // Wait for another slot before we check the first update to avoid race condition. + continue; + } + + // Make sure previous slot has a block, otherwise skip checking for the signature slot distance + let resp = client + .get_beacon_light_client_optimistic_update::() + .await + .map_err(|e| format!("Error while getting light client updates: {:?}", e))? + .ok_or(format!("Light client optimistic update not found {slot:?}"))?; + + if previous_slot_has_block { + // should be 1 in the healthy scenario + let signature_slot = resp.data.signature_slot; + let signature_slot_distance = slot - signature_slot; + if signature_slot_distance > light_client_update_slot_tolerance { + return Err(format!("Existing optimistic update at signature slot {signature_slot} during slot {slot:?}")); + } + } + + let resp = client + .get_beacon_light_client_finality_update::() + .await + .map_err(|e| format!("Error while getting light client updates: {:?}", e))? + .ok_or(format!("Light client finality update not found {slot:?}"))?; + + // Currently finality updates are produced as long as the finalized block is known, even if the finalized header + // sync committee period does not match the signature slot committee period. + // TODO: This complies with the current spec, we should check if this is a bug. + if previous_slot_has_block { + // should be 1 in the healthy scenario + let signature_slot = resp.data.signature_slot; + let signature_slot_distance = slot - signature_slot; + if signature_slot_distance > light_client_update_slot_tolerance { + return Err(format!("Existing finality update at signature slot {signature_slot} during slot {slot:?}")); + } + } + } + + Ok(()) +} diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 953dcf5822f..4bdc9dc09c6 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -220,6 +220,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { fork, sync_aggregate, transition, + light_client_update, ) = futures::join!( // Check that the chain finalizes at the first given opportunity. checks::verify_first_finalization(network.clone(), slot_duration), @@ -272,6 +273,13 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { Epoch::new(TERMINAL_BLOCK / MinimalEthSpec::slots_per_epoch()), slot_duration, post_merge_sim + ), + checks::verify_light_client_updates( + network.clone(), + // Sync aggregate available from slot 1 after Altair fork transition. + Epoch::new(ALTAIR_FORK_EPOCH).start_slot(MinimalEthSpec::slots_per_epoch()) + 1, + Epoch::new(END_EPOCH).start_slot(MinimalEthSpec::slots_per_epoch()), + slot_duration ) ); @@ -282,6 +290,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { fork?; sync_aggregate?; transition?; + light_client_update?; // The `final_future` either completes immediately or never completes, depending on the value // of `continue_after_checks`. From e9575a7939e34fc430c4afe99807fab9574244aa Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 30 Jan 2024 15:18:37 +1100 Subject: [PATCH 09/16] Fix lint --- beacon_node/beacon_chain/src/light_client_server_cache.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index bd5f6c0cc64..1397e3fc9df 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -3,7 +3,9 @@ use crate::{metrics, BeaconChainTypes, BeaconStore}; use parking_lot::{Mutex, RwLock}; use slog::{debug, Logger}; use ssz_types::FixedVector; +use std::num::NonZeroUsize; use types::light_client_update::{FinalizedRootProofLen, FINALIZED_ROOT_INDEX}; +use types::non_zero_usize::new_non_zero_usize; use types::{ BeaconBlockRef, BeaconState, ChainSpec, EthSpec, ForkName, Hash256, LightClientFinalityUpdate, LightClientHeader, LightClientOptimisticUpdate, Slot, SyncAggregate, @@ -12,7 +14,7 @@ use types::{ /// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the /// prev block cache are very small 32 * (6 + 1) = 224 bytes. 32 is an arbitrary number that /// represents unlikely re-orgs, while keeping the cache very small. -const PREV_BLOCK_CACHE_SIZE: usize = 32; +const PREV_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(32); /// This cache computes light client messages ahead of time, required to satisfy p2p and API /// requests. These messages include proofs on historical states, so on-demand computation is From c02590ffe9e5637861f59fdaa7103933fc324af5 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 30 Jan 2024 15:35:41 +1100 Subject: [PATCH 10/16] Enable light client server in simulator test. --- testing/simulator/src/eth1_sim.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 4bdc9dc09c6..26cf92d457c 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -389,6 +389,7 @@ async fn create_local_network( beacon_config.network.target_peers = node_count + proposer_nodes - 1; beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); + beacon_config.network.enable_light_client_server = true; if post_merge_sim { let el_config = execution_layer::Config { From a63a4886d140d19506c73ee54c4f6971654db9ad Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 30 Jan 2024 23:17:21 +1100 Subject: [PATCH 11/16] Fix test for light client optimistic updates and finality updates. --- beacon_node/client/src/builder.rs | 38 +++++++------- consensus/types/src/beacon_state.rs | 1 + lighthouse/tests/beacon_node.rs | 1 + testing/simulator/src/checks.rs | 78 +++++++++++++++++------------ testing/simulator/src/eth1_sim.rs | 2 + 5 files changed, 70 insertions(+), 50 deletions(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 752bee29466..444a277509f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -894,26 +894,26 @@ where "addr_broadcast", ); } + } - // Spawn service to publish light_client updates at some interval into the slot - if let Some(light_client_server_rv) = self.light_client_server_rv { - let inner_chain = beacon_chain.clone(); - let broadcast_context = - runtime_context.service_context("lcserv_bcast".to_string()); - let log = broadcast_context.log().clone(); - broadcast_context.executor.spawn( - async move { - compute_light_client_updates( - &inner_chain, - light_client_server_rv, - beacon_processor_channels.work_reprocessing_tx, - &log, - ) - .await - }, - "lcserv_broadcast", - ); - } + // Spawn service to publish light_client updates at some interval into the slot. + if let Some(light_client_server_rv) = self.light_client_server_rv { + let inner_chain = beacon_chain.clone(); + let light_client_update_context = + runtime_context.service_context("lc_update".to_string()); + let log = light_client_update_context.log().clone(); + light_client_update_context.executor.spawn( + async move { + compute_light_client_updates( + &inner_chain, + light_client_server_rv, + beacon_processor_channels.work_reprocessing_tx, + &log, + ) + .await + }, + "lc_update", + ); } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index e2e25f24b82..bdc3be9ece4 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1866,6 +1866,7 @@ impl BeaconState { }; // 2. Get all `BeaconState` leaves. + self.initialize_tree_hash_cache(); let mut cache = self .tree_hash_cache_mut() .take() diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 93e99fd3a96..3efcb2d0e5f 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2372,6 +2372,7 @@ fn light_client_server_default() { .run_with_zero_port() .with_config(|config| { assert_eq!(config.network.enable_light_client_server, false); + assert_eq!(config.chain.enable_light_client_server, false); assert_eq!(config.http_api.enable_light_client_server, false); }); } diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index fa754c1e845..f38eacc394a 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,5 +1,5 @@ use crate::local_network::LocalNetwork; -use node_test_rig::eth2::types::{BlockId, StateId}; +use node_test_rig::eth2::types::{BlockId, FinalityCheckpointsData, StateId}; use std::time::Duration; use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, Slot, Unsigned}; @@ -250,26 +250,27 @@ pub(crate) async fn verify_light_client_updates( end_slot: Slot, slot_duration: Duration, ) -> Result<(), String> { + slot_delay(start_slot, slot_duration).await; + // Tolerance of 2 slot allows for 1 single missed slot. let light_client_update_slot_tolerance = Slot::new(2); - - // Wait for the first altair block. + let remote_nodes = network.remote_nodes()?; + let client = remote_nodes.first().unwrap(); let mut have_seen_block = false; + let mut have_achieved_finality = false; for slot in start_slot.as_u64()..=end_slot.as_u64() { + slot_delay(Slot::new(1), slot_duration).await; let slot = Slot::new(slot); let previous_slot = slot - 1; - slot_delay(slot, slot_duration).await; - let remote_nodes = network.remote_nodes()?; - let client = remote_nodes.first().unwrap(); - let previous_slot_has_block = client + let previous_slot_block = client .get_beacon_blocks::(BlockId::Slot(previous_slot)) .await .map_err(|e| { format!("Unable to get beacon block for previous slot {previous_slot:?}: {e:?}") - })? - .is_some(); + })?; + let previous_slot_has_block = previous_slot_block.is_some(); if !have_seen_block { // Make sure we have seen the first block in Altair, to make sure we have sync aggregates available. @@ -281,37 +282,52 @@ pub(crate) async fn verify_light_client_updates( } // Make sure previous slot has a block, otherwise skip checking for the signature slot distance - let resp = client + if !previous_slot_has_block { + continue; + } + + // Verify light client optimistic update. `signature_slot_distance` should be 1 in the ideal scenario. + let signature_slot = client .get_beacon_light_client_optimistic_update::() .await .map_err(|e| format!("Error while getting light client updates: {:?}", e))? - .ok_or(format!("Light client optimistic update not found {slot:?}"))?; + .ok_or(format!("Light client optimistic update not found {slot:?}"))? + .data + .signature_slot; + let signature_slot_distance = slot - signature_slot; + if signature_slot_distance > light_client_update_slot_tolerance { + return Err(format!("Existing optimistic update too old: signature slot {signature_slot}, current slot {slot:?}")); + } - if previous_slot_has_block { - // should be 1 in the healthy scenario - let signature_slot = resp.data.signature_slot; - let signature_slot_distance = slot - signature_slot; - if signature_slot_distance > light_client_update_slot_tolerance { - return Err(format!("Existing optimistic update at signature slot {signature_slot} during slot {slot:?}")); + // Verify light client finality update. `signature_slot_distance` should be 1 in the ideal scenario. + // NOTE: Currently finality updates are produced as long as the finalized block is known, even if the finalized header + // sync committee period does not match the signature slot committee period. + // TODO: This complies with the current spec, but we should check if this is a bug. + if !have_achieved_finality { + let FinalityCheckpointsData { finalized, .. } = client + .get_beacon_states_finality_checkpoints(StateId::Head) + .await + .map_err(|e| format!("Unable to get beacon state finality checkpoint: {e:?}"))? + .ok_or("Unable to get head state".to_string())? + .data; + if !finalized.root.is_zero() { + // Wait for another slot before we check the first finality update to avoid race condition. + have_achieved_finality = true; } + continue; } - - let resp = client + let signature_slot = client .get_beacon_light_client_finality_update::() .await .map_err(|e| format!("Error while getting light client updates: {:?}", e))? - .ok_or(format!("Light client finality update not found {slot:?}"))?; - - // Currently finality updates are produced as long as the finalized block is known, even if the finalized header - // sync committee period does not match the signature slot committee period. - // TODO: This complies with the current spec, we should check if this is a bug. - if previous_slot_has_block { - // should be 1 in the healthy scenario - let signature_slot = resp.data.signature_slot; - let signature_slot_distance = slot - signature_slot; - if signature_slot_distance > light_client_update_slot_tolerance { - return Err(format!("Existing finality update at signature slot {signature_slot} during slot {slot:?}")); - } + .ok_or(format!("Light client finality update not found {slot:?}"))? + .data + .signature_slot; + let signature_slot_distance = slot - signature_slot; + if signature_slot_distance > light_client_update_slot_tolerance { + return Err(format!( + "Existing finality update too old: signature slot {signature_slot}, current slot {slot:?}" + )); } } diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 26cf92d457c..8d6ffc42ffa 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -390,6 +390,8 @@ async fn create_local_network( beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); beacon_config.network.enable_light_client_server = true; + beacon_config.chain.enable_light_client_server = true; + beacon_config.http_api.enable_light_client_server = true; if post_merge_sim { let el_config = execution_layer::Config { From b0269e91aa1c48761c9209c8feda8bccaf1cb5bf Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 31 Jan 2024 09:17:12 +1100 Subject: [PATCH 12/16] Fix bug in `--builder-proposals` --- validator_client/src/validator_store.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index c913b990603..a2298d30380 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -572,7 +572,7 @@ impl ValidatorStore { return Some(u64::MAX); } self.builder_boost_factor.or({ - if self.builder_proposals { + if !self.builder_proposals { Some(0) } else { None From a9fd61c4ee2d28d687e60973fc06c75edb122289 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 31 Jan 2024 10:51:51 +1100 Subject: [PATCH 13/16] Add tests --- validator_client/src/http_api/tests.rs | 52 ++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index f7db76e4ad5..30ad60e1225 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -1177,6 +1177,58 @@ async fn validator_derived_builder_boost_factor_with_process_defaults() { .await; } +#[tokio::test] +async fn validator_builder_boost_factor_global_builder_proposals_true() { + let config = Config { + builder_proposals: true, + prefer_builder_proposals: false, + builder_boost_factor: None, + ..Config::default() + }; + ApiTester::new_with_config(config) + .await + .assert_default_builder_boost_factor(None); +} + +#[tokio::test] +async fn validator_builder_boost_factor_global_prefer_builder_proposals_true() { + let config = Config { + builder_proposals: true, + prefer_builder_proposals: true, + builder_boost_factor: None, + ..Config::default() + }; + ApiTester::new_with_config(config) + .await + .assert_default_builder_boost_factor(Some(u64::MAX)); +} + +#[tokio::test] +async fn validator_builder_boost_factor_global_prefer_builder_proposals_false() { + let config = Config { + builder_proposals: true, + prefer_builder_proposals: false, + builder_boost_factor: None, + ..Config::default() + }; + ApiTester::new_with_config(config) + .await + .assert_default_builder_boost_factor(None); +} + +#[tokio::test] +async fn validator_builder_boost_factor_global_builder_proposals_false() { + let config = Config { + builder_proposals: false, + prefer_builder_proposals: false, + builder_boost_factor: None, + ..Config::default() + }; + ApiTester::new_with_config(config) + .await + .assert_default_builder_boost_factor(Some(0)); +} + #[tokio::test] async fn prefer_builder_proposals_validator() { ApiTester::new() From 16c2332c2207a4c716208f31969916d9ff65445d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 31 Jan 2024 10:58:23 +1100 Subject: [PATCH 14/16] More sensible test order --- validator_client/src/http_api/tests.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 30ad60e1225..44902b95486 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -1191,42 +1191,42 @@ async fn validator_builder_boost_factor_global_builder_proposals_true() { } #[tokio::test] -async fn validator_builder_boost_factor_global_prefer_builder_proposals_true() { +async fn validator_builder_boost_factor_global_builder_proposals_false() { let config = Config { - builder_proposals: true, - prefer_builder_proposals: true, + builder_proposals: false, + prefer_builder_proposals: false, builder_boost_factor: None, ..Config::default() }; ApiTester::new_with_config(config) .await - .assert_default_builder_boost_factor(Some(u64::MAX)); + .assert_default_builder_boost_factor(Some(0)); } #[tokio::test] -async fn validator_builder_boost_factor_global_prefer_builder_proposals_false() { +async fn validator_builder_boost_factor_global_prefer_builder_proposals_true() { let config = Config { builder_proposals: true, - prefer_builder_proposals: false, + prefer_builder_proposals: true, builder_boost_factor: None, ..Config::default() }; ApiTester::new_with_config(config) .await - .assert_default_builder_boost_factor(None); + .assert_default_builder_boost_factor(Some(u64::MAX)); } #[tokio::test] -async fn validator_builder_boost_factor_global_builder_proposals_false() { +async fn validator_builder_boost_factor_global_prefer_builder_proposals_false() { let config = Config { - builder_proposals: false, + builder_proposals: true, prefer_builder_proposals: false, builder_boost_factor: None, ..Config::default() }; ApiTester::new_with_config(config) .await - .assert_default_builder_boost_factor(Some(0)); + .assert_default_builder_boost_factor(None); } #[tokio::test] From 71ecd7e2c1dda50cd113378beade61beb73c5889 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 31 Jan 2024 14:09:33 +1100 Subject: [PATCH 15/16] Fix duplicate builder-boost test case --- validator_client/src/http_api/tests.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 44902b95486..c42aea447f7 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -1217,16 +1217,16 @@ async fn validator_builder_boost_factor_global_prefer_builder_proposals_true() { } #[tokio::test] -async fn validator_builder_boost_factor_global_prefer_builder_proposals_false() { +async fn validator_builder_boost_factor_global_prefer_builder_proposals_true_overrides_builder_proposals() { let config = Config { - builder_proposals: true, - prefer_builder_proposals: false, + builder_proposals: false, + prefer_builder_proposals: true, builder_boost_factor: None, ..Config::default() }; ApiTester::new_with_config(config) .await - .assert_default_builder_boost_factor(None); + .assert_default_builder_boost_factor(Some(u64::MAX)); } #[tokio::test] From efb38d084499db79f09a6894261a36ddb9910d53 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 31 Jan 2024 15:01:08 +1100 Subject: [PATCH 16/16] Cargo fmt and rename --- validator_client/src/http_api/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index c42aea447f7..9dc4e1a89f9 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -1217,7 +1217,7 @@ async fn validator_builder_boost_factor_global_prefer_builder_proposals_true() { } #[tokio::test] -async fn validator_builder_boost_factor_global_prefer_builder_proposals_true_overrides_builder_proposals() { +async fn validator_builder_boost_factor_global_prefer_builder_proposals_true_override() { let config = Config { builder_proposals: false, prefer_builder_proposals: true,