Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

eip4844 gossip #4444

Merged
merged 10 commits into from
Jan 4, 2023
59 changes: 58 additions & 1 deletion beacon_chain/gossip_processing/eth2_processor.nim
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ import
stew/results,
chronicles, chronos, metrics, taskpools,
../spec/[helpers, forks],
../spec/datatypes/[altair, phase0],
../spec/datatypes/[altair, phase0, eip4844],
../consensus_object_pools/[
block_clearance, block_quarantine, blockchain_dag, exit_pool, attestation_pool,
light_client_pool, sync_committee_msg_pool],
Expand Down Expand Up @@ -240,6 +240,63 @@ proc processSignedBeaconBlock*(

v

proc processSignedBeaconBlockAndBlobsSidecar*(
self: var Eth2Processor, src: MsgSource,
signedBlockAndBlobsSidecar: SignedBeaconBlockAndBlobsSidecar): ValidationRes =
let
wallTime = self.getCurrentBeaconTime()
(afterGenesis, wallSlot) = wallTime.toSlot()

template signedBlock: auto = signedBlockAndBlobsSidecar.beacon_block

logScope:
blockRoot = shortLog(signedBlock.root)
blck = shortLog(signedBlock.message)
signature = shortLog(signedBlock.signature)
wallSlot

if not afterGenesis:
notice "Block before genesis"
return errIgnore("Block before genesis")

# Potential under/overflows are fine; would just create odd metrics and logs
let delay = wallTime - signedBlock.message.slot.start_beacon_time

# Start of block processing - in reality, we have already gone through SSZ
# decoding at this stage, which may be significant
debug "Block received", delay

let blockRes =
self.dag.validateBeaconBlock(self.quarantine, signedBlock, wallTime, {})
if blockRes.isErr():
debug "Dropping block", error = blockRes.error()
self.blockProcessor[].dumpInvalidBlock(signedBlock)
beacon_blocks_dropped.inc(1, [$blockRes.error[0]])
return blockRes

let sidecarRes = validateBeaconBlockAndBlobsSidecar(signedBlockAndBlobsSidecar)
if sidecarRes.isOk():
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

codewise, it makes more sense to use isErr here and return early so as to keep the same flow for all error returns and have the success condition at the end..

also, I wonder if we should structure the code more towards the blob being decoupled from each other - not in this PR perhaps, but probably we want to run blob/sidecar validation in parallel (in a decoupled design, we'd need a quarantine / holding area for unmatched blob/block pairs etc but it would be interesting to explore, code-wise, what the impact is)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

codewise, it makes more sense to use isErr here and return early so as to keep the same flow for all error returns and have the success condition at the end..

fully agreed, done in c03e57a

but probably we want to run blob/sidecar validation in parallel

Is the motivation to limit validation latency? I guess I don't have much intuition at this point of how much extra sequential processing time blob validation will take, but if the additional latency is notable then yes it makes sense.

Or maybe this is this future-proofing for a world where gossip ends up being reworked to have separate blocks and blobs topics? I agree with that benefit (ofc depends on if/when that gossip change would be made).

# Block passed validation - enqueue it for processing. The block processing
# queue is effectively unbounded as we use a freestanding task to enqueue
# the block - this is done so that when blocks arrive concurrently with
# sync, we don't lose the gossip blocks, but also don't block the gossip
# propagation of seemingly good blocks
trace "Block validated"
self.blockProcessor[].addBlock(
src, ForkedSignedBeaconBlock.init(signedBlock),
validationDur = nanoseconds(
(self.getCurrentBeaconTime() - wallTime).nanoseconds))

# Validator monitor registration for blocks is done by the processor
beacon_blocks_received.inc()
beacon_block_delay.observe(delay.toFloatSeconds())
else:
debug "Dropping block", error = sidecarRes.error()
self.blockProcessor[].dumpInvalidBlock(signedBlock)
beacon_blocks_dropped.inc(1, [$sidecarRes.error[0]])

sidecarRes

proc setupDoppelgangerDetection*(self: var Eth2Processor, slot: Slot) =
# When another client's already running, this is very likely to detect
# potential duplicate validators, which can trigger slashing.
Expand Down
62 changes: 53 additions & 9 deletions beacon_chain/gossip_processing/gossip_validation.nim
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import
./batch_validation

from ../spec/datatypes/capella import SignedBeaconBlock
from ../spec/datatypes/eip4844 import SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, BLS_MODULUS
henridf marked this conversation as resolved.
Show resolved Hide resolved

from libp2p/protocols/pubsub/pubsub import ValidationResult

Expand Down Expand Up @@ -182,7 +183,8 @@ template validateBeaconBlockBellatrix(

# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.2/specs/bellatrix/p2p-interface.md#beacon_block
template validateBeaconBlockBellatrix(
signed_beacon_block: bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock,
signed_beacon_block: bellatrix.SignedBeaconBlock |
capella.SignedBeaconBlock | eip4844.SignedBeaconBlock,
parent: BlockRef): untyped =
# If the execution is enabled for the block -- i.e.
# is_execution_enabled(state, block.body) then validate the following:
Expand Down Expand Up @@ -225,7 +227,8 @@ template validateBeaconBlockBellatrix(
proc validateBeaconBlock*(
dag: ChainDAGRef, quarantine: ref Quarantine,
signed_beacon_block: phase0.SignedBeaconBlock | altair.SignedBeaconBlock |
bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock,
bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock |
eip4844.SignedBeaconBlock,
wallTime: BeaconTime, flags: UpdateFlags): Result[void, ValidationError] =
# In general, checks are ordered from cheap to expensive. Especially, crypto
# verification could be quite a bit more expensive than the rest. This is an
Expand Down Expand Up @@ -387,14 +390,55 @@ proc validateBeaconBlock*(

ok()

from ../spec/datatypes/eip4844 import SignedBeaconBlock
proc validateBeaconBlockAndBlobsSidecar*(signedBlock: SignedBeaconBlockAndBlobsSidecar):
Result[void, ValidationError] =
# TODO
# [REJECT] The KZG commitments of the blobs are all correctly encoded
# compressed BLS G1 points -- i.e. all(bls.KeyValidate(commitment) for
# commitment in block.body.blob_kzg_commitments)

# [REJECT] The KZG commitments correspond to the versioned hashes in
# the transactions list --
# i.e. verify_kzg_commitments_against_transactions(block.body.execution_payload.transactions,
# block.body.blob_kzg_commitments)
if not verify_kzg_commitments_against_transactions(
signedBlock.beacon_block.message.body.execution_payload.transactions.asSeq,
signedBlock.beacon_block.message.body.blob_kzg_commitments.asSeq):
return errReject("KZG blob commitments not correctly encoded")

let sidecar = signedBlock.blobs_sidecar

# [IGNORE] the sidecar.beacon_block_slot is for the current slot
# (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. sidecar.beacon_block_slot == block.slot.
if not (sidecar.beacon_block_slot == signedBlock.beacon_block.message.slot):
return errIgnore("sidecar and block slots not equal")

# [REJECT] the sidecar.blobs are all well formatted, i.e. the
# BLSFieldElement in valid range (x < BLS_MODULUS).
for blob in sidecar.blobs:
for i in 0..<blob.len div 8:
let fe = cast[UInt256](sidecar.blobs[i*8..(i+1)*8])
if fe >= BLS_MODULUS:
return errIgnore("BLSFieldElement outside of valid range")

# TODO
# [REJECT] The KZG proof is a correctly encoded compressed BLS G1
# point -- i.e. bls.KeyValidate(blobs_sidecar.kzg_aggregated_proof)


# [REJECT] The KZG commitments in the block are valid against the
# provided blobs sidecar -- i.e. validate_blobs_sidecar(block.slot,
# hash_tree_root(block), block.body.blob_kzg_commitments, sidecar)
let res = validate_blobs_sidecar(signedBlock.beacon_block.message.slot,
hash_tree_root(signedBlock.beacon_block),
signedBlock.beacon_block.message
.body.blob_kzg_commitments.asSeq,
sidecar)
if res.isOk():
ok()
else:
errIgnore(res.error())

proc validateBeaconBlock*(
dag: ChainDAGRef, quarantine: ref Quarantine,
signed_beacon_block: eip4844.SignedBeaconBlock,
wallTime: BeaconTime, flags: UpdateFlags): Result[void, ValidationError] =
debugRaiseAssert $eip4844ImplementationMissing & ": gossip_validation.nim: validateBeaconBlock not how EIP4844 works anymore"
err(default(ValidationError))

# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
proc validateAttestation*(
Expand Down
4 changes: 3 additions & 1 deletion beacon_chain/networking/eth2_network.nim
Original file line number Diff line number Diff line change
Expand Up @@ -817,12 +817,14 @@ func maxGossipMaxSize(): auto {.compileTime.} =
max(GOSSIP_MAX_SIZE, GOSSIP_MAX_SIZE_BELLATRIX)

from ../spec/datatypes/capella import SignedBeaconBlock
from ../spec/datatypes/eip4844 import SignedBeaconBlockAndBlobsSidecar

template gossipMaxSize(T: untyped): uint32 =
const maxSize = static:
when isFixedSize(T):
fixedPortionSize(T)
elif T is bellatrix.SignedBeaconBlock or T is capella.SignedBeaconBlock:
elif T is bellatrix.SignedBeaconBlock or T is capella.SignedBeaconBlock or
T is eip4844.SignedBeaconBlockAndBlobsSidecar:
GOSSIP_MAX_SIZE_BELLATRIX
# TODO https://github.com/status-im/nim-ssz-serialization/issues/20 for
# Attestation, AttesterSlashing, and SignedAggregateAndProof, which all
Expand Down
72 changes: 27 additions & 45 deletions beacon_chain/nimbus_beacon_node.nim
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ import
when defined(posix):
import system/ansi_c

from ./spec/datatypes/eip4844 import SignedBeaconBlock

from
libp2p/protocols/pubsub/gossipsub
import
Expand Down Expand Up @@ -1067,25 +1069,23 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =

let forkDigests = node.forkDigests()

discard $eip4844ImplementationMissing & "nimbus_beacon_node.nim:updateGossipStatus check EIP4844 removeMessageHandlers"
const removeMessageHandlers: array[BeaconStateFork, auto] = [
removePhase0MessageHandlers,
removeAltairMessageHandlers,
removeAltairMessageHandlers, # with different forkDigest
removeAltairMessageHandlers, # bellatrix (altair handlers, with different forkDigest)
removeCapellaMessageHandlers,
removeCapellaMessageHandlers
removeCapellaMessageHandlers # eip4844 (capella handlers, different forkDigest)
]

for gossipFork in oldGossipForks:
removeMessageHandlers[gossipFork](node, forkDigests[gossipFork])

discard $eip4844ImplementationMissing & "nimbus_beacon_node.nim:updateGossipStatus check EIP4844 message addMessageHandlers"
const addMessageHandlers: array[BeaconStateFork, auto] = [
addPhase0MessageHandlers,
addAltairMessageHandlers,
addAltairMessageHandlers, # with different forkDigest
addAltairMessageHandlers, # bellatrix (altair handlers, with different forkDigest)
addCapellaMessageHandlers,
addCapellaMessageHandlers
addCapellaMessageHandlers # eip4844 (capella handlers, different forkDigest)
]

for gossipFork in newGossipForks:
Expand Down Expand Up @@ -1373,14 +1373,28 @@ proc installMessageValidators(node: BeaconNode) =
# subnets are subscribed to during any given epoch.
let forkDigests = node.dag.forkDigests

template installBeaconBlocksValidator(digest: auto, phase: auto) =
node.network.addValidator(
getBeaconBlocksTopic(digest),
proc (signedBlock: phase.SignedBeaconBlock): ValidationResult =
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If this phase.SignedBeaconBlock method works reliably, that'd be useful more broadly.

The current codebase doesn't do this, I'm not sure if it's documented, and Nim does some strange things with it, though:

let x = 3
template f(y: auto) = echo y.x      # Builds, and
f(system)                           # outputs "3"; same with f(y: untyped)

# template g(y: untyped) = echo y.3 # Error: identifier expected, but got '3'
# echo system.3                     # Error: identifier expected, but got '3'

#echo system.x                       # Error: undeclared identifier: 'x'

Also, Nim 1.6 still doesn't properly typecheck these situations -- nim-lang/Nim#1027 was only fixed a couple months ago in Nim devel. I've asked for a backport of its fix nim-lang/Nim#20631 to 1.6, since Nimbus uses this pattern extensively.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

One approach used is to pass the entire type (phase0.SignedBeaconBlock, altair.SignedBeaconBlock, et cetera) as a type or generics parameter to a template or function. Repeats itself slightly, but the semantics are better-defined -- those things are in themselves types, while it's not clear at least to me what Nim thinks is happening here.

Copy link
Contributor

@tersec tersec Jan 3, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm partly concerned because the local testnets in CI keep failing (e.g., https://ci.status.im/blue/organizations/jenkins/nimbus-eth2%2Fplatforms%2Flinux%2Fx86_64/detail/PR-4444/7/pipeline https://ci.status.im/blue/organizations/jenkins/nimbus-eth2%2Fplatforms%2Fmacos%2Faarch64/detail/PR-4444/7/pipeline https://ci.status.im/job/nimbus-eth2/job/platforms/job/macos/job/x86_64/job/PR-4444/7/display/redirect).

Block receive gossip just stops working, regardless of fork or slot, entirely with this PR.

So, blocks get either sent by a node, which sends them through the block processing pipeline locally or sent and ... apparently not really processed properly. This works for a while, because nodes manage to request backfill blocks in time via req/resp, e.g.:

{"lvl":"DBG","ts":"2023-01-03 21:56:07.723+00:00","msg":"Requesting detected missing blocks","topics":"beacnde","blocks":"[f1db17e9]"}
{"lvl":"DBG","ts":"2023-01-03 21:56:07.723+00:00","msg":"Requesting blocks by root","topics":"requman","peer":"16U*ZvWiek","blocks":"[f1db17e9]","peer_score":1000}
{"lvl":"DBG","ts":"2023-01-03 21:56:07.733+00:00","msg":"Block resolved","blockRoot":"f1db17e9","blck":{"slot":20,"proposer_index":84,"parent_root":"264c4bde","state_root":"07584766","eth1data":{"deposit_root":"1681181313acd0d68b1e8d8d275665fe521a9ce5724858c3121cd2f2d84f3e33","deposit_count":1024,"block_hash":"4242424242424242424242424242424242424242424242424242424242424242"},"graffiti":"Nimbus/v22.12.0-c03e57-stateofus","proposer_slashings_len":0,"attester_slashings_len":0,"attestations_len":12,"deposits_len":0,"voluntary_exits_len":0,"sync_committee_participants":21,"block_number":0,"fee_recipient":"0x0000000000000000000000000000000000000000"},"blockVerified":true,"heads":1,"stateDataDur":{"value":8417},"sigVerifyDur":{"value":6546958},"stateVerifyDur":{"value":229417},"putBlockDur":{"value":210083},"epochRefDur":{"value":4417}}
{"lvl":"DBG","ts":"2023-01-03 21:56:07.733+00:00","msg":"Updated head block","topics":"chaindag","stateRoot":"07584766","justified":"0:00000000","finalized":"0:00000000","isOptHead":false,"newHead":"f1db17e9:20","lastHead":"264c4bde:19"}
{"lvl":"DBG","ts":"2023-01-03 21:56:07.734+00:00","msg":"Block processed","localHeadSlot":20,"blockSlot":20,"validationDur":{"value":0},"queueDur":{"value":197417},"storeBlockDur":{"value":7515041},"updateHeadDur":{"value":282667}}

Where node 3 initially sent that:

{"lvl":"NOT","ts":"2023-01-03 21:56:05.029+00:00","msg":"Block sent","blockRoot":"f1db17e9","blck":{"slot":20,"proposer_index":84,"parent_root":"264c4bde","state_root":"07584766","eth1data":{"deposit_root":"1681181313acd0d68b1e8d8d275665fe521a9ce5724858c3121cd2f2d84f3e33","deposit_count":1024,"block_hash":"4242424242424242424242424242424242424242424242424242424242424242"},"graffiti":"Nimbus/v22.12.0-c03e57-stateofus","proposer_slashings_len":0,"attester_slashings_len":0,"attestations_len":12,"deposits_len":0,"voluntary_exits_len":0,"sync_committee_participants":21,"block_number":0,"fee_recipient":"0x0000000000000000000000000000000000000000"},"signature":"a66896bc","delay":"29ms419us"}
{"lvl":"DBG","ts":"2023-01-03 21:56:05.037+00:00","msg":"Block resolved","blockRoot":"f1db17e9","blck":{"slot":20,"proposer_index":84,"parent_root":"264c4bde","state_root":"07584766","eth1data":{"deposit_root":"1681181313acd0d68b1e8d8d275665fe521a9ce5724858c3121cd2f2d84f3e33","deposit_count":1024,"block_hash":"4242424242424242424242424242424242424242424242424242424242424242"},"graffiti":"Nimbus/v22.12.0-c03e57-stateofus","proposer_slashings_len":0,"attester_slashings_len":0,"attestations_len":12,"deposits_len":0,"voluntary_exits_len":0,"sync_committee_participants":21,"block_number":0,"fee_recipient":"0x0000000000000000000000000000000000000000"},"blockVerified":true,"heads":1,"stateDataDur":{"value":10625},"sigVerifyDur":{"value":7561541},"stateVerifyDur":{"value":192250},"putBlockDur":{"value":268833},"epochRefDur":{"value":3417}}
{"lvl":"DBG","ts":"2023-01-03 21:56:05.038+00:00","msg":"Updated head block","topics":"chaindag","stateRoot":"07584766","justified":"0:00000000","finalized":"0:00000000","isOptHead":false,"newHead":"f1db17e9:20","lastHead":"264c4bde:19"}
{"lvl":"DBG","ts":"2023-01-03 21:56:05.038+00:00","msg":"Block processed","localHeadSlot":20,"blockSlot":20,"validationDur":{"value":0},"queueDur":{"value":38042},"storeBlockDur":{"value":8647625},"updateHeadDur":{"value":257000}}

This is the entirety of "Block received" via gossip, across all nodes and all slots:

local-testnet-minimal$ rg "Block received"
log0.txt
6294:{"lvl":"DBG","ts":"2023-01-03 21:54:53.019+00:00","msg":"Block received","topics":"beacnde","delay":"19ms893us","blockRoot":"83cebea6","wallSlot":8,"signature":"91a80241","blck":{"slot":8,"proposer_index":1003,"parent_root":"1aad008d","state_root":"2e2f695d","eth1data":{"deposit_root":"1681181313acd0d68b1e8d8d275665fe521a9ce5724858c3121cd2f2d84f3e33","deposit_count":1024,"block_hash":"4242424242424242424242424242424242424242424242424242424242424242"},"graffiti":"Nimbus/v22.12.0-c03e57-stateofus","proposer_slashings_len":0,"attester_slashings_len":0,"attestations_len":7,"deposits_len":0,"voluntary_exits_len":0,"sync_committee_participants":0,"block_number":0,"fee_recipient":""}}
7098:{"lvl":"DBG","ts":"2023-01-03 21:54:59.023+00:00","msg":"Block received","topics":"beacnde","delay":"23ms518us","blockRoot":"7f2a43ea","wallSlot":9,"signature":"aa987743","blck":{"slot":9,"proposer_index":804,"parent_root":"83cebea6","state_root":"094c2578","eth1data":{"deposit_root":"1681181313acd0d68b1e8d8d275665fe521a9ce5724858c3121cd2f2d84f3e33","deposit_count":1024,"block_hash":"4242424242424242424242424242424242424242424242424242424242424242"},"graffiti":"Nimbus/v22.12.0-c03e57-stateofus","proposer_slashings_len":0,"attester_slashings_len":0,"attestations_len":4,"deposits_len":0,"voluntary_exits_len":0,"sync_committee_participants":5,"block_number":0,"fee_recipient":""}}
7994:{"lvl":"DBG","ts":"2023-01-03 21:55:05.028+00:00","msg":"Block received","topics":"beacnde","delay":"28ms185us","blockRoot":"2a130009","wallSlot":10,"signature":"b72d5a20","blck":{"slot":10,"proposer_index":230,"parent_root":"7f2a43ea","state_root":"e82082e7","eth1data":{"deposit_root":"1681181313acd0d68b1e8d8d275665fe521a9ce5724858c3121cd2f2d84f3e33","deposit_count":1024,"block_hash":"4242424242424242424242424242424242424242424242424242424242424242"},"graffiti":"Nimbus/v22.12.0-c03e57-stateofus","proposer_slashings_len":0,"attester_slashings_len":0,"attestations_len":20,"deposits_len":0,"voluntary_exits_len":0,"sync_committee_participants":13,"block_number":0,"fee_recipient":""}}
8929:{"lvl":"DBG","ts":"2023-01-03 21:55:11.012+00:00","msg":"Block received","topics":"beacnde","delay":"12ms368us","blockRoot":"d1b30239","wallSlot":11,"signature":"b0c231e7","blck":{"slot":11,"proposer_index":920,"parent_root":"2a130009","state_root":"65d0af91","eth1data":{"deposit_root":"1681181313acd0d68b1e8d8d275665fe521a9ce5724858c3121cd2f2d84f3e33","deposit_count":1024,"block_hash":"4242424242424242424242424242424242424242424242424242424242424242"},"graffiti":"Nimbus/v22.12.0-c03e57-stateofus","proposer_slashings_len":0,"attester_slashings_len":0,"attestations_len":4,"deposits_len":0,"voluntary_exits_len":0,"sync_committee_participants":29,"block_number":0,"fee_recipient":""}}
9820:{"lvl":"DBG","ts":"2023-01-03 21:55:17.032+00:00","msg":"Block received","topics":"beacnde","delay":"32ms541us","blockRoot":"73c1b8f5","wallSlot":12,"signature":"8571e96a","blck":{"slot":12,"proposer_index":876,"parent_root":"d1b30239","state_root":"1bd3e6c9","eth1data":{"deposit_root":"1681181313acd0d68b1e8d8d275665fe521a9ce5724858c3121cd2f2d84f3e33","deposit_count":1024,"block_hash":"4242424242424242424242424242424242424242424242424242424242424242"},"graffiti":"Nimbus/v22.12.0-c03e57-stateofus","proposer_slashings_len":0,"attester_slashings_len":0,"attestations_len":4,"deposits_len":0,"voluntary_exits_len":0,"sync_committee_participants":29,"block_number":0,"fee_recipient":""}}
10744:{"lvl":"DBG","ts":"2023-01-03 21:55:23.033+00:00","msg":"Block received","topics":"beacnde","delay":"33ms487us","blockRoot":"0830c947","wallSlot":13,"signature":"ae0d9289","blck":{"slot":13,"proposer_index":456,"parent_root":"73c1b8f5","state_root":"67ba293e","eth1data":{"deposit_root":"1681181313acd0d68b1e8d8d275665fe521a9ce5724858c3121cd2f2d84f3e33","deposit_count":1024,"block_hash":"4242424242424242424242424242424242424242424242424242424242424242"},"graffiti":"Nimbus/v22.12.0-c03e57-stateofus","proposer_slashings_len":0,"attester_slashings_len":0,"attestations_len":8,"deposits_len":0,"voluntary_exits_len":0,"sync_committee_participants":29,"block_number":0,"fee_recipient":""}}
11584:{"lvl":"DBG","ts":"2023-01-03 21:55:29.018+00:00","msg":"Block received","topics":"beacnde","delay":"18ms649us","blockRoot":"f7bec601","wallSlot":14,"signature":"877a66b2","blck":{"slot":14,"proposer_index":686,"parent_root":"0830c947","state_root":"157c5ba6","eth1data":{"deposit_root":"1681181313acd0d68b1e8d8d275665fe521a9ce5724858c3121cd2f2d84f3e33","deposit_count":1024,"block_hash":"4242424242424242424242424242424242424242424242424242424242424242"},"graffiti":"Nimbus/v22.12.0-c03e57-stateofus","proposer_slashings_len":0,"attester_slashings_len":0,"attestations_len":4,"deposits_len":0,"voluntary_exits_len":0,"sync_committee_participants":29,"block_number":0,"fee_recipient":""}}
12431:{"lvl":"DBG","ts":"2023-01-03 21:55:35.032+00:00","msg":"Block received","topics":"beacnde","delay":"32ms632us","blockRoot":"64f7df29","wallSlot":15,"signature":"b484524e","blck":{"slot":15,"proposer_index":96,"parent_root":"f7bec601","state_root":"4548c429","eth1data":{"deposit_root":"1681181313acd0d68b1e8d8d275665fe521a9ce5724858c3121cd2f2d84f3e33","deposit_count":1024,"block_hash":"4242424242424242424242424242424242424242424242424242424242424242"},"graffiti":"Nimbus/v22.12.0-c03e57-stateofus","proposer_slashings_len":0,"attester_slashings_len":0,"attestations_len":5,"deposits_len":0,"voluntary_exits_len":0,"sync_committee_participants":29,"block_number":0,"fee_recipient":""}}

There should be 3 block received logs for every slot (not the one which sent the block,, of the 4 nodes).

The trouble with relying on req/resp, aside from that block gossip shouldn't be failing to begin with, is that since the attestations get dropped, these blocks don't necessarily get enough votes to keep the head moving. By the time req/resp gets processed, it's too late for the nodes to count votes in a similar way.

So, the combination of those Nim issues/bugs and this particular failure mode causes me to be skeptical that this refactoring is working as intended.

N=0; while ./scripts/launch_local_testnet.sh --preset minimal --nodes 4 --disable-htop --stop-at-epoch 8 -- --verify-finalization --discv5:no; do N=$((N+1)); echo "That was run #${N}"; sleep 67; done

where that variation runs the minimal preset version in a loop until it fails for some reason.

./scripts/launch_local_testnet.sh --preset minimal --nodes 4 --disable-htop --stop-at-epoch 8 -- --verify-finalization --discv5:no

from the nimbus-eth2 directory runs the local testnet a single time, and the various options can of course be adjusted.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for digging into this and for the explanations. I've removed the template. I figured the refactoring was straightforward enough; lesson learned. In any case, it belonged at the least in a separate commit and probably in a different PR.

As a sidenote, this also changed the order in which validators were installed. I did that that to group related validators for clarity, assuming the change had to be innocuous, but given the specific order in which the validators were being installed, am now wondering if there might have been a reason for that order. Anyway, order restored.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for digging into this and for the explanations. I've removed the template. I figured the refactoring was straightforward enough; lesson learned. In any case, it belonged at the least in a separate commit and probably in a different PR.

Sure. I agree, in principle, with the refactoring, but also that's it probably shouldn't be part of this PR.

As a sidenote, this also changed the order in which validators were installed. I did that that to group related validators for clarity, assuming the change had to be innocuous, but given the specific order in which the validators were being installed, am now wondering if there might have been a reason for that order. Anyway, order restored.

The order was not intended to be important. They'll get called by nim-libp2p in whatever order messages come in, regardless. Might as well keep the order though.

if node.shouldSyncOptimistically(node.currentSlot):
toValidationResult(
node.optimisticProcessor.processSignedBeaconBlock(signedBlock))
else:
toValidationResult(node.processor[].processSignedBeaconBlock(
MsgSource.gossip, signedBlock)))


installBeaconBlocksValidator(forkDigests.phase0, phase0)
installBeaconBlocksValidator(forkDigests.altair, altair)
installBeaconBlocksValidator(forkDigests.bellatrix, bellatrix)
installBeaconBlocksValidator(forkDigests.capella, capella)

node.network.addValidator(
getBeaconBlocksTopic(forkDigests.phase0),
proc (signedBlock: phase0.SignedBeaconBlock): ValidationResult =
if node.shouldSyncOptimistically(node.currentSlot):
toValidationResult(
node.optimisticProcessor.processSignedBeaconBlock(signedBlock))
else:
toValidationResult(node.processor[].processSignedBeaconBlock(
getBeaconBlockAndBlobsSidecarTopic(forkDigests.eip4844),
proc (signedBlock: eip4844.SignedBeaconBlockAndBlobsSidecar): ValidationResult =
# TODO: take into account node.shouldSyncOptimistically(node.currentSlot)
toValidationResult(node.processor[].processSignedBeaconBlockAndBlobsSidecar(
MsgSource.gossip, signedBlock)))

template installPhase0Validators(digest: auto) =
Expand Down Expand Up @@ -1433,38 +1447,6 @@ proc installMessageValidators(node: BeaconNode) =
installPhase0Validators(forkDigests.capella)
installPhase0Validators(forkDigests.eip4844)

node.network.addValidator(
getBeaconBlocksTopic(forkDigests.altair),
proc (signedBlock: altair.SignedBeaconBlock): ValidationResult =
if node.shouldSyncOptimistically(node.currentSlot):
toValidationResult(
node.optimisticProcessor.processSignedBeaconBlock(signedBlock))
else:
toValidationResult(node.processor[].processSignedBeaconBlock(
MsgSource.gossip, signedBlock)))

node.network.addValidator(
getBeaconBlocksTopic(forkDigests.bellatrix),
proc (signedBlock: bellatrix.SignedBeaconBlock): ValidationResult =
if node.shouldSyncOptimistically(node.currentSlot):
toValidationResult(
node.optimisticProcessor.processSignedBeaconBlock(signedBlock))
else:
toValidationResult(node.processor[].processSignedBeaconBlock(
MsgSource.gossip, signedBlock)))

node.network.addValidator(
getBeaconBlocksTopic(forkDigests.capella),
proc (signedBlock: capella.SignedBeaconBlock): ValidationResult =
if node.shouldSyncOptimistically(node.currentSlot):
toValidationResult(
node.optimisticProcessor.processSignedBeaconBlock(signedBlock))
else:
toValidationResult(node.processor[].processSignedBeaconBlock(
MsgSource.gossip, signedBlock)))

discard $eip4844ImplementationMissing & ": add validation here, but per https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.1/specs/eip4844/p2p-interface.md#beacon_block it's not beacon_block but beacon_block_and_blobs_sidecar"

template installSyncCommitteeeValidators(digest: auto) =
for subcommitteeIdx in SyncSubcommitteeIndex:
closureScope:
Expand Down
7 changes: 6 additions & 1 deletion beacon_chain/spec/datatypes/eip4844.nim
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,17 @@ const
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.1/specs/eip4844/beacon-chain.md#blob
BLOB_TX_TYPE* = 0x05'u8

# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.2/specs/eip4844/polynomial-commitments.md
henridf marked this conversation as resolved.
Show resolved Hide resolved
BLS_MODULUS* = "52435875175126190479447740508185965837690552500527637822603658699938581184513".u256

type
# this block belongs elsewhere - will figure out after implementing c-kzg bindings
KZGCommitment* = array[48, byte]
KZGProof* = array[48, byte]
BLSFieldElement* = array[32, byte]

KZGCommitmentList* = List[KZGCommitment, Limit MAX_BLOBS_PER_BLOCK]

# TODO this apparently is suppposed to be SSZ-equivalent to Bytes32, but
# current spec doesn't ever SSZ-serialize it or hash_tree_root it
VersionedHash* = array[32, byte]
Expand Down Expand Up @@ -251,7 +256,7 @@ type
# Execution
execution_payload*: ExecutionPayload
bls_to_execution_changes*: SignedBLSToExecutionChangeList
blob_kzg_commitments*: List[KZGCommitment, Limit MAX_BLOBS_PER_BLOCK] # [New in EIP-4844]
blob_kzg_commitments*: KZGCommitmentList # [New in EIP-4844]

SigVerifiedBeaconBlockBody* = object
## A BeaconBlock body with signatures verified
Expand Down
34 changes: 27 additions & 7 deletions beacon_chain/spec/state_transition_block.nim
Original file line number Diff line number Diff line change
Expand Up @@ -821,21 +821,21 @@ func tx_peek_blob_versioned_hashes(opaque_tx: Transaction):
res.add versionedHash
ok res

# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.1/specs/eip4844/beacon-chain.md#kzg_commitment_to_versioned_hash
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.2/specs/eip4844/beacon-chain.md#kzg_commitment_to_versioned_hash
func kzg_commitment_to_versioned_hash(
kzg_commitment: KZGCommitment): VersionedHash =
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.1/specs/eip4844/beacon-chain.md#blob
kzg_commitment: eip4844.KZGCommitment): VersionedHash =
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.2/specs/eip4844/beacon-chain.md#blob
const VERSIONED_HASH_VERSION_KZG = 0x01'u8

var res: VersionedHash
res[0] = VERSIONED_HASH_VERSION_KZG
res[1 .. 31] = eth2digest(kzg_commitment).data.toOpenArray(1, 31)
res

# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.1/specs/eip4844/beacon-chain.md#verify_kzg_commitments_against_transactions
func verify_kzg_commitments_against_transactions(
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.2/specs/eip4844/beacon-chain.md#verify_kzg_commitments_against_transactions
func verify_kzg_commitments_against_transactions*(
transactions: seq[Transaction],
kzg_commitments: seq[KZGCommitment]): bool =
kzg_commitments: seq[eip4844.KZGCommitment]): bool =
var all_versioned_hashes: seq[VersionedHash]
for tx in transactions:
if tx[0] == BLOB_TX_TYPE:
Expand All @@ -862,10 +862,30 @@ func process_blob_kzg_commitments(
else:
return err("process_blob_kzg_commitments: verify_kzg_commitments_against_transactions failed")

# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.2/specs/eip4844/beacon-chain.md#validate_blobs_sidecar
proc validate_blobs_sidecar*(slot: Slot, root: Eth2Digest,
expected_kzg_commitments: seq[eip4844.KZGCommitment],
blobs_sidecar: eip4844.BlobsSidecar):
Result[void, cstring] =
if slot != blobs_sidecar.beacon_block_slot:
return err("validate_blobs_sidecar: different slot in block and sidecar")

if root != blobs_sidecar.beacon_block_root:
return err("validate_blobs_sidecar: different root in block and sidecar")

if expected_kzg_commitments.len != blobs_sidecar.blobs.len:
return err("validate_blobs_sidecar: different commitment lengths")

# TODO
# if not kzg_4844.verify_aggregate_kzg_proof(asSeq(blobs_sidecar.blobs), expected_kzg_commitments, blobs_sidecar.kzg_aggregated_proof):
# return err("validate_blobs_sidecar: aggregated kzg proof verification failed")

ok()

henridf marked this conversation as resolved.
Show resolved Hide resolved
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.1/specs/eip4844/beacon-chain.md#is_data_available
func is_data_available(
slot: Slot, beacon_block_root: Eth2Digest,
blob_kzg_commitments: seq[KZGCommitment]): bool =
blob_kzg_commitments: seq[eip4844.KZGCommitment]): bool =
discard $eip4844ImplementationMissing & ": state_transition_block.nim:is_data_available"

true
Expand Down
3 changes: 2 additions & 1 deletion tests/test_gossip_validation.nim
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,8 @@ suite "Gossip validation - Extra": # Not based on preset config
const nilCallback = OnCapellaBlockAdded(nil)
dag.addHeadBlock(verifier, blck.capellaData, nilCallback)
of BeaconBlockFork.EIP4844:
raiseAssert $eip4844ImplementationMissing
const nilCallback = OnEIP4844BlockAdded(nil)
dag.addHeadBlock(verifier, blck.eip4844Data, nilCallback)
check: added.isOk()
dag.updateHead(added[], quarantine[])
dag
Expand Down