diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index daab2838d7ee..ed5cb69aee8a 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -272,6 +272,7 @@ impl Initialized { update: ActiveLeavesUpdate, now: u64, ) -> Result<()> { + gum::trace!(target: LOG_TARGET, timestamp = now, "Processing ActiveLeavesUpdate"); let scraped_updates = self.scraper.process_active_leaves_update(ctx.sender(), &update).await?; log_error( @@ -314,8 +315,15 @@ impl Initialized { Ok(SessionWindowUpdate::Unchanged) => {}, }; + gum::trace!( + target: LOG_TARGET, + timestamp = now, + "Will process {} onchain votes", + scraped_updates.on_chain_votes.len() + ); + // The `runtime-api` subsystem has an internal queue which serializes the execution, - // so there is no point in running these in parallel. + // so there is no point in running these in parallel for votes in scraped_updates.on_chain_votes { let _ = self.process_on_chain_votes(ctx, overlay_db, votes, now).await.map_err( |error| { @@ -329,6 +337,7 @@ impl Initialized { } } + gum::trace!(target: LOG_TARGET, timestamp = now, "Done processing ActiveLeavesUpdate"); Ok(()) } diff --git a/node/core/dispute-coordinator/src/scraping/mod.rs b/node/core/dispute-coordinator/src/scraping/mod.rs index 0feff1753d83..0e3c4ecc29ba 100644 --- a/node/core/dispute-coordinator/src/scraping/mod.rs +++ b/node/core/dispute-coordinator/src/scraping/mod.rs @@ -241,7 +241,7 @@ impl ChainScraper { let mut scraped_updates = ScrapedUpdates::new(); for (block_number, block_hash) in block_numbers.zip(block_hashes) { - gum::trace!(?block_number, ?block_hash, "In ancestor processing."); + gum::trace!(target: LOG_TARGET, ?block_number, ?block_hash, "In ancestor processing."); let receipts_for_block = self.process_candidate_events(sender, block_number, block_hash).await?; diff --git a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs index 2b5e6b177abf..630fd33855a5 100644 --- a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs +++ b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs @@ -100,7 +100,15 @@ where // Fetch the onchain disputes. We'll do a prioritization based on them. let onchain = match get_onchain_disputes(sender, leaf.hash).await { - Ok(r) => r, + Ok(r) => { + gum::trace!( + target: LOG_TARGET, + ?leaf, + "Successfully fetched {} onchain disputes", + r.len() + ); + r + }, Err(GetOnchainDisputesError::NotSupported(runtime_api_err, relay_parent)) => { // Runtime version is checked before calling this method, so the error below should never happen! gum::error!( @@ -129,7 +137,9 @@ where HashMap::new() }, }; + metrics.on_fetched_onchain_disputes(onchain.keys().len() as u64); + gum::trace!(target: LOG_TARGET, ?leaf, "Fetching recent disputes"); let recent_disputes = request_disputes(sender).await; gum::trace!( target: LOG_TARGET, @@ -139,6 +149,8 @@ where onchain.len(), ); + gum::trace!(target: LOG_TARGET, ?leaf, "Filtering recent disputes"); + // Filter out unconfirmed disputes. However if the dispute is already onchain - don't skip it. // In this case we'd better push as much fresh votes as possible to bring it to conclusion faster. let recent_disputes = recent_disputes @@ -146,6 +158,7 @@ where .filter(|d| d.2.is_confirmed_concluded() || onchain.contains_key(&(d.0, d.1))) .collect::>(); + gum::trace!(target: LOG_TARGET, ?leaf, "Partitioning recent disputes"); let partitioned = partition_recent_disputes(recent_disputes, &onchain); metrics.on_partition_recent_disputes(&partitioned); @@ -153,12 +166,15 @@ where gum::warn!( target: LOG_TARGET, ?leaf, - "Got {} inactive unknown onchain disputes. This should not happen!", + "Got {} inactive unknown onchain disputes. This should not happen in normal conditions!", partitioned.inactive_unknown_onchain.len() ); } + + gum::trace!(target: LOG_TARGET, ?leaf, "Vote selection for recent disputes"); let result = vote_selection(sender, partitioned, &onchain).await; + gum::trace!(target: LOG_TARGET, ?leaf, "Convert to multi dispute statement set"); make_multi_dispute_statement_set(metrics, result) } @@ -179,11 +195,13 @@ where let mut result = BTreeMap::new(); let mut request_votes_counter = 0; while !disputes.is_empty() { + gum::trace!(target: LOG_TARGET, "has to process {} disputes left", disputes.len()); let batch_size = std::cmp::min(VOTES_SELECTION_BATCH_SIZE, disputes.len()); let batch = Vec::from_iter(disputes.drain(0..batch_size)); // Filter votes which are already onchain request_votes_counter += 1; + gum::trace!(target: LOG_TARGET, "requesting onchain votes",); let votes = super::request_votes(sender, batch) .await .into_iter() @@ -213,6 +231,7 @@ where (session_index, candidate_hash, votes) }) .collect::>(); + gum::trace!(target: LOG_TARGET, "got {} onchain votes after processing", votes.len()); // Check if votes are within the limit for (session_index, candidate_hash, selected_votes) in votes { @@ -221,8 +240,16 @@ where // we are done - no more votes can be added. Importantly, we don't add any votes for a dispute here // if we can't fit them all. This gives us an important invariant, that backing votes for // disputes make it into the provisioned vote set. + gum::trace!( + target: LOG_TARGET, + ?request_votes_counter, + ?total_votes_len, + "vote_selection DisputeCoordinatorMessage::QueryCandidateVotes counter", + ); + return result } + result.insert((session_index, candidate_hash), selected_votes); total_votes_len += votes_len } @@ -231,6 +258,7 @@ where gum::trace!( target: LOG_TARGET, ?request_votes_counter, + ?total_votes_len, "vote_selection DisputeCoordinatorMessage::QueryCandidateVotes counter", ); diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index a8f19854073e..30e7bdbf0821 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -179,11 +179,13 @@ fn handle_active_leaves_update( per_relay_parent: &mut HashMap, inherent_delays: &mut InherentDelays, ) { + gum::trace!(target: LOG_TARGET, "Handle ActiveLeavesUpdate"); for deactivated in &update.deactivated { per_relay_parent.remove(deactivated); } if let Some(leaf) = update.activated { + gum::trace!(target: LOG_TARGET, leaf_hash=?leaf.hash, "Adding delay"); let delay_fut = Delay::new(PRE_PROPOSE_TIMEOUT).map(move |_| leaf.hash).boxed(); per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf)); inherent_delays.push(delay_fut); @@ -590,6 +592,10 @@ async fn select_candidates( } } + gum::trace!(target: LOG_TARGET, + leaf_hash=?relay_parent, + "before GetBackedCandidates"); + // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); sender.send_unbounded_message(CandidateBackingMessage::GetBackedCandidates( @@ -598,6 +604,8 @@ async fn select_candidates( tx, )); let mut candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?; + gum::trace!(target: LOG_TARGET, leaf_hash=?relay_parent, + "Got {} backed candidates", candidates.len()); // `selected_candidates` is generated in ascending order by core index, and `GetBackedCandidates` // _should_ preserve that property, but let's just make sure. diff --git a/node/core/provisioner/src/metrics.rs b/node/core/provisioner/src/metrics.rs index 3aa0e8719d0f..f4f683470227 100644 --- a/node/core/provisioner/src/metrics.rs +++ b/node/core/provisioner/src/metrics.rs @@ -36,6 +36,9 @@ struct MetricsInner { /// The disputes received from `disputes-coordinator` by partition partitioned_disputes: prometheus::CounterVec, + + /// The disputes fetched from the runtime. + fetched_onchain_disputes: prometheus::Counter, } /// Provisioner metrics. @@ -143,6 +146,12 @@ impl Metrics { .inc_by(inactive_concluded_known_onchain.len().try_into().unwrap_or(0)); } } + + pub(crate) fn on_fetched_onchain_disputes(&self, onchain_count: u64) { + if let Some(metrics) = &self.0 { + metrics.fetched_onchain_disputes.inc_by(onchain_count); + } + } } impl metrics::Metrics for Metrics { @@ -208,6 +217,11 @@ impl metrics::Metrics for Metrics { )?, ®istry, )?, + fetched_onchain_disputes: prometheus::register( + prometheus::Counter::new("polkadot_parachain_fetched_onchain_disputes", "Number of disputes fetched from the runtime" + )?, + ®istry, + )?, }; Ok(Metrics(Some(metrics))) }