diff --git a/Makefile b/Makefile index f436b46173..b68ba6defb 100644 --- a/Makefile +++ b/Makefile @@ -174,11 +174,12 @@ libbacktrace: # - --base-metrics-port + [0, --nodes) # - --base-vc-keymanager-port + [0, --nodes) # - --base-vc-metrics-port + [0, --nodes] -# - --base-remote-signer-port + [0, --remote-signers) +# - --base-remote-signer-port + [0, --nimbus-signer-nodes | --web3signer-nodes) +# - --base-remote-signer-metrics-port + [0, --nimbus-signer-node | --web3signer-nodes) # # Local testnets with --run-geth or --run-nimbus (only these ports): # - --base-el-net-port + --el-port-offset * [0, --nodes + --light-clients) -# - --base-el-http-port + --el-port-offset * [0, --nodes + --light-clients) +# - --base-el-rpc-port + --el-port-offset * [0, --nodes + --light-clients) # - --base-el-ws-port + --el-port-offset * [0, --nodes + --light-clients) # - --base-el-auth-rpc-port + --el-port-offset * [0, --nodes + --light-clients) UNIT_TEST_BASE_PORT := 9950 @@ -192,19 +193,19 @@ restapi-test: --resttest-delay 30 \ --kill-old-processes -ifneq ($(shell uname -p), arm) -TESTNET_EXTRA_FLAGS := --run-geth --dl-geth -else -TESTNET_EXTRA_FLAGS := -endif - local-testnet-minimal: ./scripts/launch_local_testnet.sh \ --data-dir $@ \ --preset minimal \ --nodes 4 \ - --stop-at-epoch 6 \ + --capella-fork-epoch 4 \ + --eip4844-fork-epoch 20 \ + --stop-at-epoch 10 \ --disable-htop \ + --remote-validators-count 512 \ + --enable-payload-builder \ + --nimbus-signer-nodes 1 \ + --threshold 1 \ --enable-logtrace \ --base-port $$(( 6001 + EXECUTOR_NUMBER * 500 )) \ --base-rest-port $$(( 6031 + EXECUTOR_NUMBER * 500 )) \ @@ -212,14 +213,16 @@ local-testnet-minimal: --base-vc-keymanager-port $$(( 6131 + EXECUTOR_NUMBER * 500 )) \ --base-vc-metrics-port $$(( 6161 + EXECUTOR_NUMBER * 500 )) \ --base-remote-signer-port $$(( 6201 + EXECUTOR_NUMBER * 500 )) \ + --base-remote-signer-metrics-port $$(( 6251 + EXECUTOR_NUMBER * 500 )) \ --base-el-net-port $$(( 6301 + EXECUTOR_NUMBER * 500 )) \ - --base-el-http-port $$(( 6302 + EXECUTOR_NUMBER * 500 )) \ + --base-el-rpc-port $$(( 6302 + EXECUTOR_NUMBER * 500 )) \ --base-el-ws-port $$(( 6303 + EXECUTOR_NUMBER * 500 )) \ --base-el-auth-rpc-port $$(( 6304 + EXECUTOR_NUMBER * 500 )) \ --el-port-offset 5 \ --timeout 648 \ --kill-old-processes \ - $(TESTNET_EXTRA_FLAGS) \ + --run-geth --dl-geth \ + --run-nimbus-eth1 --dl-nimbus-eth1 \ -- \ --verify-finalization \ --discv5:no @@ -231,20 +234,25 @@ local-testnet-mainnet: --stop-at-epoch 6 \ --disable-htop \ --enable-logtrace \ + --nimbus-signer-nodes 3 \ + --threshold 2 \ + --remote-validators-count 512 \ --base-port $$(( 7001 + EXECUTOR_NUMBER * 500 )) \ --base-rest-port $$(( 7031 + EXECUTOR_NUMBER * 500 )) \ --base-metrics-port $$(( 7061 + EXECUTOR_NUMBER * 500 )) \ --base-vc-keymanager-port $$(( 7131 + EXECUTOR_NUMBER * 500 )) \ --base-vc-metrics-port $$(( 7161 + EXECUTOR_NUMBER * 500 )) \ --base-remote-signer-port $$(( 7201 + EXECUTOR_NUMBER * 500 )) \ + --base-remote-signer-metrics-port $$(( 7251 + EXECUTOR_NUMBER * 500 )) \ --base-el-net-port $$(( 7301 + EXECUTOR_NUMBER * 500 )) \ - --base-el-http-port $$(( 7302 + EXECUTOR_NUMBER * 500 )) \ + --base-el-rpc-port $$(( 7302 + EXECUTOR_NUMBER * 500 )) \ --base-el-ws-port $$(( 7303 + EXECUTOR_NUMBER * 500 )) \ --base-el-auth-rpc-port $$(( 7304 + EXECUTOR_NUMBER * 500 )) \ --el-port-offset 5 \ --timeout 2784 \ --kill-old-processes \ - $(TESTNET_EXTRA_FLAGS) \ + --run-geth --dl-geth \ + --run-nimbus-eth1 --dl-nimbus-eth1 \ -- \ --verify-finalization \ --discv5:no diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index c2e8c9e98b..0af8b9b091 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -66,7 +66,7 @@ type syncCommitteeMsgPool*: ref SyncCommitteeMsgPool lightClientPool*: ref LightClientPool exitPool*: ref ExitPool - eth1Monitor*: Eth1Monitor + elManager*: ELManager payloadBuilderRestClient*: RestClientRef restServer*: RestServerRef keymanagerHost*: ref KeymanagerHost @@ -87,7 +87,6 @@ type restKeysCache*: Table[ValidatorPubKey, ValidatorIndex] validatorMonitor*: ref ValidatorMonitor stateTtlCache*: StateTtlCache - nextExchangeTransitionConfTime*: Moment router*: ref MessageRouter dynamicFeeRecipientsStore*: ref DynamicFeeRecipientsStore externalBuilderRegistrations*: diff --git a/beacon_chain/beacon_node_light_client.nim b/beacon_chain/beacon_node_light_client.nim index b77f16a4fb..8075a7985c 100644 --- a/beacon_chain/beacon_node_light_client.nim +++ b/beacon_chain/beacon_node_light_client.nim @@ -11,14 +11,12 @@ else: {.push raises: [].} import - chronicles, + chronicles, web3/engine_api_types, ./beacon_node logScope: topics = "beacnde" func shouldSyncOptimistically*(node: BeaconNode, wallSlot: Slot): bool = - if node.eth1Monitor == nil: - return false let optimisticHeader = node.lightClient.optimisticHeader.valueOr: return false @@ -42,7 +40,7 @@ proc initLightClient*( let optimisticHandler = proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock): - Future[void] {.async.} = + Future[void] {.async.} = info "New LC optimistic block", opt = signedBlock.toBlockId(), dag = node.dag.head.bid, @@ -54,10 +52,9 @@ proc initLightClient*( if blck.message.is_execution_block: template payload(): auto = blck.message.body.execution_payload - let eth1Monitor = node.eth1Monitor - if eth1Monitor != nil and not payload.block_hash.isZero: + if not payload.block_hash.isZero: # engine_newPayloadV1 - discard await eth1Monitor.newExecutionPayload(payload) + discard await node.elManager.newExecutionPayload(payload) # Retain optimistic head for other `forkchoiceUpdated` callers. # May temporarily block `forkchoiceUpdatedV1` calls, e.g., Geth: @@ -70,10 +67,10 @@ proc initLightClient*( # engine_forkchoiceUpdatedV1 let beaconHead = node.attestationPool[].getBeaconHead(nil) - discard await eth1Monitor.runForkchoiceUpdated( - headBlockRoot = payload.block_hash, - safeBlockRoot = beaconHead.safeExecutionPayloadHash, - finalizedBlockRoot = beaconHead.finalizedExecutionPayloadHash) + discard await node.elManager.forkchoiceUpdated( + headBlock = payload.block_hash, + safeBlock = beaconHead.safeExecutionPayloadHash, + finalizedBlock = beaconHead.finalizedExecutionPayloadHash) else: discard optimisticProcessor = initOptimisticProcessor( diff --git a/beacon_chain/conf.nim b/beacon_chain/conf.nim index 2e6f01c593..7c86f40bea 100644 --- a/beacon_chain/conf.nim +++ b/beacon_chain/conf.nim @@ -29,6 +29,7 @@ import ./spec/datatypes/base, ./networking/network_metadata, ./validators/slashing_protection_common, + ./eth1/el_conf, ./filepath from consensus_object_pools/block_pools_types_light_client @@ -38,7 +39,7 @@ export uri, nat, enr, defaultEth2TcpPort, enabledLogLevel, ValidIpAddress, defs, parseCmdArg, completeCmdArg, network_metadata, - network, BlockHashOrNumber, + el_conf, network, BlockHashOrNumber, confTomlDefs, confTomlNet, confTomlUri declareGauge network_name, "network name", ["name"] @@ -170,14 +171,12 @@ type name: "era-dir" .}: Option[InputDir] web3Urls* {. - desc: "One or more execution layer Web3 provider URLs" - name: "web3-url" .}: seq[string] + desc: "One or more Execution Layer Engine API URLs" + name: "web3-url" .}: seq[EngineApiUrlConfigValue] - web3ForcePolling* {. - hidden - defaultValue: false - desc: "Force the use of polling when determining the head block of Eth1" - name: "web3-force-polling" .}: bool + elUrls* {. + desc: "One or more Execution Layer Engine API URLs" + name: "el" .}: seq[EngineApiUrlConfigValue] optimistic* {. hidden # deprecated > 22.12 @@ -228,7 +227,7 @@ type # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/authentication.md#key-distribution jwtSecret* {. desc: "A file containing the hex-encoded 256 bit secret key to be used for verifying/generating JWT tokens" - name: "jwt-secret" .}: Option[string] + name: "jwt-secret" .}: Option[InputFile] case cmd* {. command @@ -597,10 +596,13 @@ type defaultValueDesc: $defaultEth2TcpPortDesc name: "bootstrap-port" .}: Port + genesisTime* {. + desc: "Unix epoch time of the network genesis" + name: "genesis-time" .}: Option[uint64] + genesisOffset* {. desc: "Seconds from now to add to genesis time" - defaultValue: 5 - name: "genesis-offset" .}: int + name: "genesis-offset" .}: Option[int] outputGenesis* {. desc: "Output file where to write the initial state snapshot" @@ -1290,7 +1292,7 @@ func defaultFeeRecipient*(conf: AnyConf): Eth1Address = proc loadJwtSecret*( rng: var HmacDrbgContext, dataDir: string, - jwtSecret: Option[string], + jwtSecret: Option[InputFile], allowCreate: bool): Option[seq[byte]] = # Some Web3 endpoints aren't compatible with JWT, but if explicitly chosen, # use it regardless. @@ -1305,8 +1307,11 @@ proc loadJwtSecret*( else: none(seq[byte]) -template loadJwtSecret*( +proc loadJwtSecret*( rng: var HmacDrbgContext, config: BeaconNodeConf, allowCreate: bool): Option[seq[byte]] = rng.loadJwtSecret(string(config.dataDir), config.jwtSecret, allowCreate) + +proc engineApiUrls*(config: BeaconNodeConf): seq[EngineApiUrl] = + (config.elUrls & config.web3Urls).toFinalEngineApiUrls(config.jwtSecret) diff --git a/beacon_chain/conf_light_client.nim b/beacon_chain/conf_light_client.nim index 4c2d329671..072c0142ec 100644 --- a/beacon_chain/conf_light_client.nim +++ b/beacon_chain/conf_light_client.nim @@ -127,11 +127,15 @@ type LightClientConf* = object # Execution layer web3Urls* {. desc: "One or more execution layer Web3 provider URLs" - name: "web3-url" .}: seq[string] + name: "web3-url" .}: seq[EngineApiUrlConfigValue] + + elUrls* {. + desc: "One or more Execution Layer Engine API URLs" + name: "el" .}: seq[EngineApiUrlConfigValue] jwtSecret* {. desc: "A file containing the hex-encoded 256 bit secret key to be used for verifying/generating JWT tokens" - name: "jwt-secret" .}: Option[string] + name: "jwt-secret" .}: Option[InputFile] # Testing stopAtEpoch* {. @@ -148,3 +152,6 @@ template loadJwtSecret*( config: LightClientConf, allowCreate: bool): Option[seq[byte]] = rng.loadJwtSecret(string(config.dataDir), config.jwtSecret, allowCreate) + +proc engineApiUrls*(config: LightClientConf): seq[EngineApiUrl] = + (config.elUrls & config.web3Urls).toFinalEngineApiUrls(config.jwtSecret) diff --git a/beacon_chain/consensus_object_pools/consensus_manager.nim b/beacon_chain/consensus_object_pools/consensus_manager.nim index d14e5c5218..93d310118f 100644 --- a/beacon_chain/consensus_object_pools/consensus_manager.nim +++ b/beacon_chain/consensus_object_pools/consensus_manager.nim @@ -11,10 +11,11 @@ else: {.push raises: [].} import - chronicles, chronos, + chronicles, chronos, web3/[ethtypes, engine_api_types], ../spec/datatypes/base, ../consensus_object_pools/[blockchain_dag, block_quarantine, attestation_pool], - ../eth1/eth1_monitor + ../eth1/eth1_monitor, + ../beacon_clock from ../spec/eth2_apis/dynamic_fee_recipients import DynamicFeeRecipientsStore, getDynamicFeeRecipient @@ -23,14 +24,6 @@ from ../validators/keystore_management import from ../validators/action_tracker import ActionTracker, getNextProposalSlot type - ForkChoiceUpdatedInformation* = object - payloadId*: PayloadID - headBlockRoot*: Eth2Digest - safeBlockRoot*: Eth2Digest - finalizedBlockRoot*: Eth2Digest - timestamp*: uint64 - feeRecipient*: Eth1Address - ConsensusManager* = object expectedSlot: Slot expectedBlockReceived: Future[bool] @@ -46,7 +39,7 @@ type # Execution layer integration # ---------------------------------------------------------------- - eth1Monitor*: Eth1Monitor + elManager*: ELManager # Allow determination of whether there's an upcoming proposal # ---------------------------------------------------------------- @@ -60,7 +53,6 @@ type # Tracking last proposal forkchoiceUpdated payload information # ---------------------------------------------------------------- - forkchoiceUpdatedInfo*: Opt[ForkchoiceUpdatedInformation] optimisticHead: tuple[bid: BlockId, execution_block_hash: Eth2Digest] # Initialization @@ -70,7 +62,7 @@ func new*(T: type ConsensusManager, dag: ChainDAGRef, attestationPool: ref AttestationPool, quarantine: ref Quarantine, - eth1Monitor: Eth1Monitor, + elManager: ELManager, actionTracker: ActionTracker, dynamicFeeRecipientsStore: ref DynamicFeeRecipientsStore, validatorsDir: string, @@ -80,11 +72,10 @@ func new*(T: type ConsensusManager, dag: dag, attestationPool: attestationPool, quarantine: quarantine, - eth1Monitor: eth1Monitor, + elManager: elManager, actionTracker: actionTracker, dynamicFeeRecipientsStore: dynamicFeeRecipientsStore, validatorsDir: validatorsDir, - forkchoiceUpdatedInfo: Opt.none ForkchoiceUpdatedInformation, defaultFeeRecipient: defaultFeeRecipient ) @@ -121,7 +112,8 @@ proc expectBlock*(self: var ConsensusManager, expectedSlot: Slot): Future[bool] from eth/async_utils import awaitWithTimeout from web3/engine_api_types import - ForkchoiceUpdatedResponse, PayloadExecutionStatus, PayloadStatusV1 + ForkchoiceUpdatedResponse, + PayloadExecutionStatus, PayloadStatusV1, PayloadAttributesV1 func `$`(h: BlockHash): string = $h.asEth2Digest @@ -143,8 +135,6 @@ func shouldSyncOptimistically*( true func shouldSyncOptimistically*(self: ConsensusManager, wallSlot: Slot): bool = - if self.eth1Monitor == nil: - return false if self.optimisticHead.execution_block_hash.isZero: return false @@ -164,68 +154,8 @@ func setOptimisticHead*( bid: BlockId, execution_block_hash: Eth2Digest) = self.optimisticHead = (bid: bid, execution_block_hash: execution_block_hash) -proc runForkchoiceUpdated*( - eth1Monitor: Eth1Monitor, - headBlockRoot, safeBlockRoot, finalizedBlockRoot: Eth2Digest): - Future[(PayloadExecutionStatus, Option[BlockHash])] {.async.} = - # Allow finalizedBlockRoot to be 0 to avoid sync deadlocks. - # - # https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3675.md#pos-events - # has "Before the first finalized block occurs in the system the finalized - # block hash provided by this event is stubbed with - # `0x0000000000000000000000000000000000000000000000000000000000000000`." - # and - # https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.2/specs/bellatrix/validator.md#executionpayload - # notes "`finalized_block_hash` is the hash of the latest finalized execution - # payload (`Hash32()` if none yet finalized)" - doAssert not headBlockRoot.isZero - - try: - # Minimize window for Eth1 monitor to shut down connection - await eth1Monitor.ensureDataProvider() - - let fcuR = awaitWithTimeout( - forkchoiceUpdated( - eth1Monitor, headBlockRoot, safeBlockRoot, finalizedBlockRoot), - FORKCHOICEUPDATED_TIMEOUT): - debug "runForkchoiceUpdated: forkchoiceUpdated timed out", - headBlockRoot = shortLog(headBlockRoot), - safeBlockRoot = shortLog(safeBlockRoot), - finalizedBlockRoot = shortLog(finalizedBlockRoot) - ForkchoiceUpdatedResponse( - payloadStatus: PayloadStatusV1( - status: PayloadExecutionStatus.syncing)) - - debug "runForkchoiceUpdated: ran forkchoiceUpdated", - headBlockRoot, safeBlockRoot, finalizedBlockRoot, - payloadStatus = $fcuR.payloadStatus.status, - latestValidHash = $fcuR.payloadStatus.latestValidHash, - validationError = $fcuR.payloadStatus.validationError - - return (fcuR.payloadStatus.status, fcuR.payloadStatus.latestValidHash) - except CatchableError as err: - warn "forkchoiceUpdated failed - check execution client", - err = err.msg, - headBlockRoot = shortLog(headBlockRoot), - safeBlockRoot = shortLog(safeBlockRoot), - finalizedBlockRoot = shortLog(finalizedBlockRoot) - return (PayloadExecutionStatus.syncing, none BlockHash) - -proc runForkchoiceUpdatedDiscardResult*( - eth1Monitor: Eth1Monitor, - headBlockHash, safeBlockHash, finalizedBlockHash: Eth2Digest) {.async.} = - discard await eth1Monitor.runForkchoiceUpdated( - headBlockHash, safeBlockHash, finalizedBlockHash) - -from ../beacon_clock import GetBeaconTimeFn -from ../fork_choice/fork_choice import mark_root_invalid - -proc updateExecutionClientHead( - self: ref ConsensusManager, newHead: BeaconHead): - Future[Opt[void]] {.async.} = - if self.eth1Monitor.isNil: - return Opt[void].ok() - +proc updateExecutionClientHead(self: ref ConsensusManager, + newHead: BeaconHead): Future[Opt[void]] {.async.} = let headExecutionPayloadHash = self.dag.loadExecutionBlockRoot(newHead.blck) if headExecutionPayloadHash.isZero: @@ -235,7 +165,7 @@ proc updateExecutionClientHead( # Can't use dag.head here because it hasn't been updated yet let (payloadExecutionStatus, latestValidHash) = - await self.eth1Monitor.runForkchoiceUpdated( + await self.elManager.forkchoiceUpdated( headExecutionPayloadHash, newHead.safeExecutionPayloadHash, newHead.finalizedExecutionPayloadHash) @@ -341,6 +271,7 @@ proc getFeeRecipient*( self.defaultFeeRecipient from ../spec/datatypes/bellatrix import PayloadID +from ../spec/state_transition_block import get_expected_withdrawals proc runProposalForkchoiceUpdated*( self: ref ConsensusManager, wallSlot: Slot) {.async.} = @@ -366,30 +297,32 @@ proc runProposalForkchoiceUpdated*( if headBlockRoot.isZero: return + let + payloadAttributes = withState(self.dag.headState): + when stateFork >= BeaconStateFork.Capella: + ForkedPayloadAttributes( + kind: ForkedPayloadAttributesKind.v2, + v2: PayloadAttributesV2( + timestamp: Quantity timestamp, + prevRandao: FixedBytes[32] randomData, + suggestedFeeRecipient: feeRecipient, + withdrawals: toEngineWithdrawals get_expected_withdrawals(forkyState.data))) + else: + ForkedPayloadAttributes( + kind: ForkedPayloadAttributesKind.v1, + v1: PayloadAttributesV1( + timestamp: Quantity timestamp, + prevRandao: FixedBytes[32] randomData, + suggestedFeeRecipient: feeRecipient)) try: - let fcResult = awaitWithTimeout( - forkchoiceUpdated( - self.eth1Monitor, + let + safeBlockRoot = beaconHead.safeExecutionPayloadHash + (status, _) = await self.elManager.forkchoiceUpdated( headBlockRoot, - beaconHead.safeExecutionPayloadHash, + safeBlockRoot, beaconHead.finalizedExecutionPayloadHash, - timestamp, randomData, feeRecipient), - FORKCHOICEUPDATED_TIMEOUT): - debug "runProposalForkchoiceUpdated: forkchoiceUpdated timed out" - ForkchoiceUpdatedResponse( - payloadStatus: PayloadStatusV1(status: PayloadExecutionStatus.syncing)) - - if fcResult.payloadStatus.status != PayloadExecutionStatus.valid or - fcResult.payloadId.isNone: - return - - self.forkchoiceUpdatedInfo = Opt.some ForkchoiceUpdatedInformation( - payloadId: bellatrix.PayloadID(fcResult.payloadId.get), - headBlockRoot: headBlockRoot, - safeBlockRoot: beaconHead.safeExecutionPayloadHash, - finalizedBlockRoot: beaconHead.finalizedExecutionPayloadHash, - timestamp: timestamp, - feeRecipient: feeRecipient) + payloadAttributes = payloadAttributes) + debug "forkchoice updated for proposal", status except CatchableError as err: error "Engine API fork-choice update failed", err = err.msg diff --git a/beacon_chain/eth1/deposit_contract.nim b/beacon_chain/eth1/deposit_contract.nim index 77d0d99b98..7343aa2eb1 100644 --- a/beacon_chain/eth1/deposit_contract.nim +++ b/beacon_chain/eth1/deposit_contract.nim @@ -212,6 +212,13 @@ proc main() {.async.} = mnemonic = generateMnemonic(rng[]) seed = getSeed(mnemonic, KeystorePass.init "") cfg = getRuntimeConfig(conf.eth2Network) + threshold = if conf.remoteSignersUrls.len > 0: conf.threshold + else: 0 + + if conf.remoteValidatorsCount > 0 and + conf.remoteSignersUrls.len == 0: + fatal "Please specify at least one remote signer URL" + quit 1 if (let res = secureCreatePath(string conf.outValidatorsDir); res.isErr): warn "Could not create validators folder", @@ -229,7 +236,7 @@ proc main() {.async.} = string conf.outValidatorsDir, string conf.outSecretsDir, conf.remoteSignersUrls, - conf.threshold, + threshold, conf.remoteValidatorsCount, KeystoreMode.Fast) diff --git a/beacon_chain/eth1/el_conf.nim b/beacon_chain/eth1/el_conf.nim new file mode 100644 index 0000000000..25e821d24e --- /dev/null +++ b/beacon_chain/eth1/el_conf.nim @@ -0,0 +1,254 @@ +import + std/[options, strutils, uri], + stew/results, chronicles, confutils, + json_serialization, # for logging + toml_serialization, toml_serialization/lexer, + ../spec/engine_authentication + +type + EngineApiRole* = enum + DepositSyncing = "sync-deposits" + BlockValidation = "validate-blocks" + BlockProduction = "produce-blocks" + + EngineApiRoles* = set[EngineApiRole] + + EngineApiUrl* = object + url: string + jwtSecret: Option[seq[byte]] + roles: EngineApiRoles + + EngineApiUrlConfigValue* = object + url*: string # TODO: Use the URI type here + jwtSecret*: Option[string] + jwtSecretFile*: Option[InputFile] + roles*: Option[EngineApiRoles] + +const defaultEngineApiRoles* = { DepositSyncing, BlockValidation, BlockProduction } + +chronicles.formatIt EngineApiUrl: + it.url + +proc init*(T: type EngineApiUrl, + url: string, + jwtSecret = none seq[byte], + roles = defaultEngineApiRoles): T = + T(url: url, jwtSecret: jwtSecret, roles: roles) + +func url*(engineUrl: EngineApiUrl): string = + engineUrl.url + +func jwtSecret*(engineUrl: EngineApiUrl): Option[seq[byte]] = + engineUrl.jwtSecret + +func roles*(engineUrl: EngineApiUrl): EngineApiRoles = + engineUrl.roles + +func unknownRoleMsg(role: string): string = + "'" & role & "' is not a valid EL function" + +template raiseError(reader: var TomlReader, msg: string) = + raiseTomlErr(reader.lex, msg) + +template raiseError(reader: var JsonReader, msg: string) = + raiseTomlErr(reader.lex, msg) + +proc readValue*(reader: var TomlReader, value: var EngineApiRoles) + {.raises: [Defect, SerializationError, IOError].} = + let roles = reader.readValue seq[string] + if roles.len == 0: + reader.raiseError "At least one role should be provided" + for role in roles: + case role.toLowerAscii + of $DepositSyncing: + value.incl DepositSyncing + of $BlockValidation: + value.incl BlockValidation + of $BlockProduction: + value.incl BlockProduction + else: + reader.raiseError(unknownRoleMsg role) + +proc writeValue*(writer: var JsonWriter, roles: EngineApiRoles) + {.raises: [Defect, SerializationError, IOError].} = + var strRoles: seq[string] + + for role in EngineApiRole: + if role in roles: strRoles.add $role + + writer.writeValue strRoles + +# TODO +# Remove this once we drop support for Nim 1.2 +# `decodeQuery` was introduced in Nim 1.4 +when not declared(decodeQuery): + # TODO + # This is a verbatim copy of the iterator from Nim's std library. + # We can remove it from the code once we stop supporting Nim 1.2. + iterator decodeQuery*(data: string, sep = '&'): tuple[key, value: string] = + ## Reads and decodes the query string `data` and yields the `(key, value)` pairs + ## the data consists of. If compiled with `-d:nimLegacyParseQueryStrict`, + ## a `UriParseError` is raised when there is an unencoded `=` character in a decoded + ## value, which was the behavior in Nim < 1.5.1. + runnableExamples: + import std/sequtils + assert toSeq(decodeQuery("foo=1&bar=2=3")) == @[("foo", "1"), ("bar", "2=3")] + assert toSeq(decodeQuery("foo=1;bar=2=3", ';')) == @[("foo", "1"), ("bar", "2=3")] + assert toSeq(decodeQuery("&a&=b&=&&")) == @[("", ""), ("a", ""), ("", "b"), ("", ""), ("", "")] + + proc handleHexChar(c: char, x: var int): bool {.inline.} = + ## Converts `%xx` hexadecimal to the ordinal number and adds the result to `x`. + ## Returns `true` if `c` is hexadecimal. + ## + ## When `c` is hexadecimal, the proc is equal to `x = x shl 4 + hex2Int(c)`. + runnableExamples: + var x = 0 + assert handleHexChar('a', x) + assert x == 10 + + assert handleHexChar('B', x) + assert x == 171 # 10 shl 4 + 11 + + assert not handleHexChar('?', x) + assert x == 171 # unchanged + result = true + case c + of '0'..'9': x = (x shl 4) or (ord(c) - ord('0')) + of 'a'..'f': x = (x shl 4) or (ord(c) - ord('a') + 10) + of 'A'..'F': x = (x shl 4) or (ord(c) - ord('A') + 10) + else: + result = false + + proc decodePercent(s: openArray[char], i: var int): char = + ## Converts `%xx` hexadecimal to the character with ordinal number `xx`. + ## + ## If `xx` is not a valid hexadecimal value, it is left intact: only the + ## leading `%` is returned as-is, and `xx` characters will be processed in the + ## next step (e.g. in `uri.decodeUrl`) as regular characters. + result = '%' + if i+2 < s.len: + var x = 0 + if handleHexChar(s[i+1], x) and handleHexChar(s[i+2], x): + result = chr(x) + inc(i, 2) + + proc parseData(data: string, i: int, field: var string, sep: char): int = + result = i + while result < data.len: + let c = data[result] + case c + of '%': add(field, decodePercent(data, result)) + of '+': add(field, ' ') + of '&': break + else: + if c == sep: break + else: add(field, data[result]) + inc(result) + + var i = 0 + var name = "" + var value = "" + # decode everything in one pass: + while i < data.len: + setLen(name, 0) # reuse memory + i = parseData(data, i, name, '=') + setLen(value, 0) # reuse memory + if i < data.len and data[i] == '=': + inc(i) # skip '=' + when defined(nimLegacyParseQueryStrict): + i = parseData(data, i, value, '=') + else: + i = parseData(data, i, value, sep) + yield (name, value) + if i < data.len: + when defined(nimLegacyParseQueryStrict): + if data[i] != '&': + uriParseError("'&' expected at index '$#' for '$#'" % [$i, data]) + inc(i) + +proc parseCmdArg*(T: type EngineApiUrlConfigValue, input: string): T + {.raises: [ValueError, Defect].} = + var + uri = parseUri(input) + jwtSecret: Option[string] + jwtSecretFile: Option[InputFile] + roles: Option[EngineApiRoles] + + if uri.anchor != "": + for key, value in decodeQuery(uri.anchor): + case key + of "jwtSecret": + jwtSecret = some value + of "jwtSecretFile": + jwtSecretFile = some InputFile.parseCmdArg(value) + of "roles": + var uriRoles: EngineApiRoles = {} + for role in split(value, ","): + case role.toLowerAscii + of $DepositSyncing: + uriRoles.incl DepositSyncing + of $BlockValidation: + uriRoles.incl BlockValidation + of $BlockProduction: + uriRoles.incl BlockProduction + else: + raise newException(ValueError, unknownRoleMsg role) + if uriRoles == {}: + raise newException(ValueError, "The list of roles should not be empty") + roles = some uriRoles + else: + raise newException(ValueError, "'" & key & "' is not a recognized Engine URL property") + uri.anchor = "" + + EngineApiUrlConfigValue( + url: $uri, + jwtSecret: jwtSecret, + jwtSecretFile: jwtSecretFile, + roles: roles) + +proc toFinalUrl*(confValue: EngineApiUrlConfigValue, + defaultJwtSecret: Option[seq[byte]]): Result[EngineApiUrl, cstring] = + if confValue.jwtSecret.isSome and confValue.jwtSecretFile.isSome: + return err "The options `jwtSecret` and `jwtSecretFile` should not be specified together" + + let jwtSecret = if confValue.jwtSecret.isSome: + some(? parseJwtTokenValue(confValue.jwtSecret.get)) + elif confValue.jwtSecretFile.isSome: + some(? loadJwtSecretFile(confValue.jwtSecretFile.get)) + else: + defaultJwtSecret + + ok EngineApiUrl.init( + url = confValue.url, + jwtSecret = jwtSecret, + roles = confValue.roles.get(defaultEngineApiRoles)) + +proc loadJwtSecret*(jwtSecret: Option[InputFile]): Option[seq[byte]] = + if jwtSecret.isSome: + let res = loadJwtSecretFile(jwtSecret.get) + if res.isOk: + some res.value + else: + fatal "Failed to load JWT secret file", err = res.error + quit 1 + else: + none seq[byte] + +proc toFinalEngineApiUrls*(elUrls: seq[EngineApiUrlConfigValue], + defaultJwtSecret: Option[InputFile]): seq[EngineApiUrl] = + let jwtSecret = loadJwtSecret defaultJwtSecret + + for elUrl in elUrls: + let engineApiUrl = elUrl.toFinalUrl(jwtSecret).valueOr: + fatal "Invalid EL configuration", err = error + quit 1 + result.add engineApiUrl + +proc fixupWeb3Urls*(web3Url: var string) = + var normalizedUrl = toLowerAscii(web3Url) + if not (normalizedUrl.startsWith("https://") or + normalizedUrl.startsWith("http://") or + normalizedUrl.startsWith("wss://") or + normalizedUrl.startsWith("ws://")): + warn "The Web3 URL does not specify a protocol. Assuming a WebSocket server", web3Url + web3Url = "ws://" & web3Url diff --git a/beacon_chain/eth1/eth1_monitor.nim b/beacon_chain/eth1/eth1_monitor.nim index 3b4422655a..2eeeae6f23 100644 --- a/beacon_chain/eth1/eth1_monitor.nim +++ b/beacon_chain/eth1/eth1_monitor.nim @@ -15,22 +15,23 @@ import typetraits, uri, json], # Nimble packages: chronos, metrics, chronicles/timings, stint/endians2, - web3, web3/ethtypes as web3Types, web3/ethhexstrings, web3/engine_api, - eth/common/eth_types, + json_rpc/client, + web3, web3/ethhexstrings, web3/engine_api, + eth/common/[eth_types, transaction], eth/async_utils, stew/[byteutils, objects, results, shims/hashes], # Local modules: ../spec/[deposit_snapshots, eth2_merkleization, forks, helpers], ../spec/datatypes/[base, phase0, bellatrix], ../networking/network_metadata, ../consensus_object_pools/block_pools_types, - ".."/[beacon_chain_db, beacon_node_status, beacon_clock], - ./merkle_minimal + ".."/[beacon_chain_db, beacon_node_status, beacon_clock, future_combinators], + "."/[merkle_minimal, el_conf] from std/times import getTime, inSeconds, initTime, `-` from ../spec/engine_authentication import getSignedIatToken export - web3Types, deques, base, DepositTreeSnapshot + el_conf, engine_api, deques, base, DepositTreeSnapshot logScope: topics = "eth1" @@ -57,16 +58,22 @@ contract(DepositContract): index: Int64LeBytes) {.event.} const - web3Timeouts = 60.seconds hasDepositRootChecks = defined(has_deposit_root_checks) hasGenesisDetection* = defined(has_genesis_detection) targetBlocksPerLogsRequest = 5000'u64 # This is roughly a day of Eth1 blocks + # Engine API timeouts + engineApiConnectionTimeout = 5.seconds # How much we wait before giving up connecting to the Engine API + web3RequestsTimeout* = 8.seconds # How much we wait for eth_* requests (e.g. eth_getBlockByHash) + + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#request-2 + GETPAYLOAD_TIMEOUT = 1.seconds + type Eth1BlockNumber* = uint64 Eth1BlockTimestamp* = uint64 - Eth1BlockHeader = web3Types.BlockHeader + Eth1BlockHeader = engine_api.BlockHeader GenesisStateRef = ref phase0.BeaconState @@ -109,53 +116,104 @@ type hasConsensusViolation: bool ## The local chain contradicts the observed consensus on the network - Eth1MonitorState = enum - Initialized - Started - ReadyToRestartToPrimary - Failed - Stopping - Stopped - - Eth1Monitor* = ref object - state: Eth1MonitorState - startIdx: int - web3Urls: seq[string] + ForkedPayloadAttributesKind* {.pure.} = enum + v1 + v2 + + ForkedPayloadAttributes* = ref object + case kind*: ForkedPayloadAttributesKind + of ForkedPayloadAttributesKind.v1: + v1*: PayloadAttributesV1 + of ForkedPayloadAttributesKind.v2: + v2*: PayloadAttributesV2 + + NextExpectedPayloadParams* = object + headBlockRoot*: Eth2Digest + safeBlockRoot*: Eth2Digest + finalizedBlockRoot*: Eth2Digest + payloadAttributes: ForkedPayloadAttributes + + ELManager* = ref object eth1Network: Option[Eth1Network] + ## If this value is supplied the EL monitor will check whether + ## all configured EL nodes are connected to the same network. + + getBeaconTime: GetBeaconTimeFn + ## The current beacon time is used for deciding when to active + ## the TTD monitoring and the transition configuration exchange. + ## TODO We may be able to remove these features once the Gnosis + ## network has transitionsed through the merge in Dec 2023. + depositContractAddress*: Eth1Address - depositContractDeployedAt: BlockHashOrNumber - forcePolling: bool - jwtSecret: Option[seq[byte]] + depositContractBlockNumber: uint64 + depositContractBlockHash: BlockHash + blocksPerLogsRequest: uint64 + ## This value is used to dynamically adjust the number of + ## blocks we are trying to download at once during deposit + ## syncing. By default, the value is set to the constant + ## `targetBlocksPerLogsRequest`, but if the EL is failing + ## to serve this number of blocks per single `eth_getLogs` + ## request, we temporarily lower the value until the request + ## succeeds. The failures are generally expected only in + ## periods in the history for very high deposit density. + + elConnections: seq[ELConnection] + ## All active EL connections - dataProvider: Web3DataProviderRef - latestEth1Block: Option[FullBlockId] + depositSyncConnectionIdx: int + ## The current connection used for deposit syncing - depositsChain: Eth1Chain - eth1Progress: AsyncEvent + eth1Chain: Eth1Chain + ## - exchangedConfiguration*: bool + syncTargetBlock: Option[Eth1BlockNumber] terminalBlockHash*: Option[BlockHash] - runFut: Future[void] + chainSyncingLoopFut: Future[void] + exchangeTransitionConfigurationLoopFut: Future[void] stopFut: Future[void] - getBeaconTime: GetBeaconTimeFn ttdReachedField: bool + nextExpectedPayloadParams*: Option[NextExpectedPayloadParams] + when hasGenesisDetection: genesisValidators: seq[ImmutableValidatorData] genesisValidatorKeyToIndex: Table[ValidatorPubKey, ValidatorIndex] genesisState: GenesisStateRef genesisStateFut: Future[void] - Web3DataProvider* = object - url: string - web3: Web3 - ns: Sender[DepositContract] - blockHeadersSubscription: Subscription + EtcStatus {.pure.} = enum + notExchangedYet + exchangeError + mismatch + match + + DepositContractSyncStatus {.pure.} = enum + unknown + notSynced + synced + + ELConnection* = ref object + engineUrl: EngineApiUrl + + web3: Option[Web3] + ## This will be `none` before connecting and while we are + ## reconnecting after a lost connetion. You can wait on + ## the future below for the moment the connection is active. - Web3DataProviderRef* = ref Web3DataProvider + connectingFut: Future[Result[Web3, string]] + ## This future will be replaced when the connection is lost. + + etcStatus: EtcStatus + ## The latest status of the `exchangeTransitionConfiguration` + ## exchange. + + depositContractSyncStatus: DepositContractSyncStatus + ## Are we sure that this EL has synced the deposit contract? + + lastPayloadId: Option[engine_api.PayloadID] FullBlockId* = object number: Eth1BlockNumber @@ -198,44 +256,76 @@ declareGauge eth1_finalized_deposits, declareGauge eth1_chain_len, "The length of the in-memory chain of Eth1 blocks" -func ttdReached*(m: Eth1Monitor): bool = +declareCounter engine_newPayload_failures, + "Number of failed requests to the newPayload Engine API end-point", labels = ["url"] + +declareCounter engine_newPayload_sent, + "Number of successful requests to the newPayload Engine API end-point", + labels = ["url", "status"] + +declareCounter engine_forkchoiceUpdated_failures, + "Number of failed requests to the forkchoiceUpdated Engine API end-point", labels = ["url"] + +declareCounter engine_forkchoiceUpdated_sent, + "Number of successful requests to the forkchoiceUpdated Engine API end-point", + labels = ["url", "status"] + +template awaitOrRaiseOnTimeout[T](fut: Future[T], + timeout: Duration): T = + awaitWithTimeout(fut, timeout): + raise newException(DataProviderTimeout, "Timeout") + +func ttdReached*(m: ELManager): bool = m.ttdReachedField -template cfg(m: Eth1Monitor): auto = - m.depositsChain.cfg +template cfg(m: ELManager): auto = + m.eth1Chain.cfg + +template db(m: ELManager): BeaconChainDB = + m.eth1Chain.db + +func hasJwtSecret*(m: ELManager): bool = + for c in m.elConnections: + if c.engineUrl.jwtSecret.isSome: + return true + +func isSynced*(m: ELManager): bool = + m.syncTargetBlock.isSome and + m.eth1Chain.blocks.len > 0 and + m.syncTargetBlock.get <= m.eth1Chain.blocks[^1].number when hasGenesisDetection: import ../spec/[beaconstate, signatures] - template hasEnoughValidators(m: Eth1Monitor, blk: Eth1Block): bool = + template hasEnoughValidators(m: ELManager, blk: Eth1Block): bool = blk.activeValidatorsCount >= m.cfg.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - func chainHasEnoughValidators(m: Eth1Monitor): bool = - m.depositsChain.blocks.len > 0 and m.hasEnoughValidators(m.depositsChain.blocks[^1]) + func chainHasEnoughValidators(m: ELManager): bool = + m.eth1Chain.blocks.len > 0 and m.hasEnoughValidators(m.eth1Chain.blocks[^1]) - func isAfterMinGenesisTime(m: Eth1Monitor, blk: Eth1Block): bool = + func isAfterMinGenesisTime(m: ELManager, blk: Eth1Block): bool = doAssert blk.timestamp != 0 let t = genesis_time_from_eth1_timestamp(m.cfg, uint64 blk.timestamp) t >= m.cfg.MIN_GENESIS_TIME - func isGenesisCandidate(m: Eth1Monitor, blk: Eth1Block): bool = + func isGenesisCandidate(m: ELManager, blk: Eth1Block): bool = m.hasEnoughValidators(blk) and m.isAfterMinGenesisTime(blk) - proc findGenesisBlockInRange(m: Eth1Monitor, startBlock, endBlock: Eth1Block): + proc findGenesisBlockInRange(m: ELManager, startBlock, endBlock: Eth1Block): Future[Eth1Block] {.gcsafe.} - proc signalGenesis(m: Eth1Monitor, genesisState: GenesisStateRef) = + proc signalGenesis(m: ELManager, genesisState: GenesisStateRef) = m.genesisState = genesisState if not m.genesisStateFut.isNil: m.genesisStateFut.complete() m.genesisStateFut = nil - func allGenesisDepositsUpTo(m: Eth1Monitor, totalDeposits: uint64): seq[DepositData] = + func allGenesisDepositsUpTo(m: ELManager, totalDeposits: uint64): seq[DepositData] = for i in 0 ..< int64(totalDeposits): - result.add m.depositsChain.db.genesisDeposits.get(i) + result.add m.db.genesisDeposits.get(i) - proc createGenesisState(m: Eth1Monitor, eth1Block: Eth1Block): GenesisStateRef = + proc createGenesisState(m: ELManager, eth1Block: Eth1Block): GenesisStateRef = notice "Generating genesis state", blockNum = eth1Block.number, blockHash = eth1Block.hash, @@ -254,7 +344,7 @@ when hasGenesisDetection: if eth1Block.activeValidatorsCount != 0: doAssert result.validators.lenu64 == eth1Block.activeValidatorsCount - proc produceDerivedData(m: Eth1Monitor, deposit: DepositData) = + proc produceDerivedData(m: ELManager, deposit: DepositData) = let htr = hash_tree_root(deposit) if verify_deposit_signature(m.cfg, deposit): @@ -266,27 +356,18 @@ when hasGenesisDetection: withdrawal_credentials: deposit.withdrawal_credentials) m.genesisValidatorKeyToIndex[pubkey] = idx - proc processGenesisDeposit*(m: Eth1Monitor, newDeposit: DepositData) = - m.depositsChain.db.genesisDeposits.add newDeposit + proc processGenesisDeposit*(m: ELManager, newDeposit: DepositData) = + m.db.genesisDeposits.add newDeposit m.produceDerivedData(newDeposit) -template depositChainBlocks*(m: Eth1Monitor): Deque[Eth1Block] = - m.depositsChain.blocks - -template finalizedDepositsMerkleizer(m: Eth1Monitor): auto = - m.depositsChain.finalizedDepositsMerkleizer +template eth1ChainBlocks*(m: ELManager): Deque[Eth1Block] = + m.eth1Chain.blocks -template headMerkleizer(m: Eth1Monitor): auto = - m.depositsChain.headMerkleizer +template finalizedDepositsMerkleizer(m: ELManager): auto = + m.eth1Chain.finalizedDepositsMerkleizer -proc fixupWeb3Urls*(web3Url: var string) = - var normalizedUrl = toLowerAscii(web3Url) - if not (normalizedUrl.startsWith("https://") or - normalizedUrl.startsWith("http://") or - normalizedUrl.startsWith("wss://") or - normalizedUrl.startsWith("ws://")): - warn "The Web3 URL does not specify a protocol. Assuming a WebSocket server", web3Url - web3Url = "ws://" & web3Url +template headMerkleizer(m: ELManager): auto = + m.eth1Chain.headMerkleizer template toGaugeValue(x: Quantity): int64 = toGaugeValue(distinctBase x) @@ -349,6 +430,7 @@ func asConsensusExecutionPayload*(rpcExecutionPayload: ExecutionPayloadV1): mapIt(rpcExecutionPayload.transactions, it.getTransaction))) from ../spec/datatypes/capella import ExecutionPayload, Withdrawal +from ../spec/datatypes/eip4844 import ExecutionPayload func asConsensusExecutionPayload*(rpcExecutionPayload: ExecutionPayloadV2): capella.ExecutionPayload = @@ -406,17 +488,17 @@ func asEngineExecutionPayload*(executionPayload: bellatrix.ExecutionPayload): blockHash: executionPayload.block_hash.asBlockHash, transactions: mapIt(executionPayload.transactions, it.getTypedTransaction)) -func asEngineExecutionPayload*(executionPayload: capella.ExecutionPayload): - ExecutionPayloadV2 = - template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction = - TypedTransaction(tt.distinctBase) - template getEngineWithdrawal(w: capella.Withdrawal): WithdrawalV1 = +template toEngineWithdrawal(w: capella.Withdrawal): WithdrawalV1 = WithdrawalV1( index: Quantity(w.index), validatorIndex: Quantity(w.validator_index), address: Address(w.address.data), amount: w.amount.u256 * weiInGwei) +func asEngineExecutionPayload*(executionPayload: capella.ExecutionPayload | eip4844.ExecutionPayload): + ExecutionPayloadV2 = + template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction = + TypedTransaction(tt.distinctBase) engine_api.ExecutionPayloadV2( parentHash: executionPayload.parent_hash.asBlockHash, feeRecipient: Address(executionPayload.fee_recipient.data), @@ -434,7 +516,7 @@ func asEngineExecutionPayload*(executionPayload: capella.ExecutionPayload): baseFeePerGas: executionPayload.base_fee_per_gas, blockHash: executionPayload.block_hash.asBlockHash, transactions: mapIt(executionPayload.transactions, it.getTypedTransaction), - withdrawals: mapIt(executionPayload.withdrawals, it.getEngineWithdrawal)) + withdrawals: mapIt(executionPayload.withdrawals, it.toEngineWithdrawal)) func shortLog*(b: Eth1Block): string = try: @@ -489,211 +571,575 @@ func toVoteData(blk: Eth1Block): Eth1Data = func hash*(x: Eth1Data): Hash = hash(x.block_hash) -template awaitWithRetries*[T](lazyFutExpr: Future[T], - retries = 3, - timeout = web3Timeouts): untyped = - const - reqType = astToStr(lazyFutExpr) - var - retryDelayMs = 16000 - f: Future[T] - attempts = 0 +proc close(connection: ELConnection): Future[void] {.async.} = + if connection.web3.isSome: + awaitWithTimeout(connection.web3.get.close(), 30.seconds): + debug "Failed to close data provider in time" - while true: - f = lazyFutExpr - yield f or sleepAsync(timeout) - if not f.finished: - await cancelAndWait(f) - elif f.failed: - when not (f.error of CatchableError): - static: doAssert false, "f.error not CatchableError" - debug "Web3 request failed", req = reqType, err = f.error.msg - inc failed_web3_requests +proc isConnected(connection: ELConnection): bool = + connection.web3.isSome + +proc getJsonRpcRequestHeaders(jwtSecret: Option[seq[byte]]): + auto = + if jwtSecret.isSome: + let secret = jwtSecret.get + (proc(): seq[(string, string)] = + # https://www.rfc-editor.org/rfc/rfc6750#section-6.1.1 + @[("Authorization", "Bearer " & getSignedIatToken( + secret, (getTime() - initTime(0, 0)).inSeconds))]) + else: + (proc(): seq[(string, string)] = @[]) + +proc newWeb3*(engineUrl: EngineApiUrl): Future[Web3] = + newWeb3(engineUrl.url, getJsonRpcRequestHeaders(engineUrl.jwtSecret)) + +proc establishEngineApiConnection*(url: EngineApiUrl): + Future[Result[Web3, string]] {.async.} = + let web3Fut = newWeb3(url) + yield web3Fut or sleepAsync(engineApiConnectionTimeout) + + if (not web3Fut.finished) or web3Fut.failed: + await cancelAndWait(web3Fut) + if web3Fut.failed: + return err "Failed to setup Engine API connection: " & web3Fut.readError.msg else: - break + return err "Failed to setup Engine API connection" + else: + return ok web3Fut.read - inc attempts - if attempts >= retries: - var errorMsg = reqType & " failed " & $retries & " times" - if f.failed: errorMsg &= ". Last error: " & f.error.msg - raise newException(DataProviderFailure, errorMsg) +proc tryConnecting(connection: ELConnection): Future[bool] {.async.} = + if connection.isConnected: + return true - await sleepAsync(chronos.milliseconds(retryDelayMs)) - retryDelayMs *= 2 + if connection.connectingFut == nil: + connection.connectingFut = establishEngineApiConnection(connection.engineUrl) - read(f) + let web3Res = await connection.connectingFut + if web3Res.isErr: + return false + else: + connection.web3 = some web3Res.get + return true -proc close(p: Web3DataProviderRef): Future[void] {.async.} = - if p.blockHeadersSubscription != nil: - try: - awaitWithRetries(p.blockHeadersSubscription.unsubscribe()) - except CatchableError: - debug "Failed to clean up block headers subscription properly" +proc connectedRpcClient(connection: ELConnection): Future[RpcClient] {.async.} = + while not connection.isConnected: + if not await connection.tryConnecting(): + await sleepAsync(chronos.seconds(10)) - awaitWithTimeout(p.web3.close(), 30.seconds): - debug "Failed to close data provider in time" + return connection.web3.get.provider -proc getBlockByHash(p: Web3DataProviderRef, hash: BlockHash): - Future[BlockObject] = - return p.web3.provider.eth_getBlockByHash(hash, false) +proc getBlockByHash(rpcClient: RpcClient, hash: BlockHash): Future[BlockObject] = + rpcClient.eth_getBlockByHash(hash, false) -proc getBlockByNumber*(p: Web3DataProviderRef, +proc getBlockByNumber*(rpcClient: RpcClient, number: Eth1BlockNumber): Future[BlockObject] = - let hexNumber = try: &"0x{number:X}" # No leading 0's! - except ValueError as exc: raiseAssert exc.msg # Never fails - p.web3.provider.eth_getBlockByNumber(hexNumber, false) - -proc getPayloadV1*( - p: Eth1Monitor, payloadId: bellatrix.PayloadID): - Future[engine_api.ExecutionPayloadV1] = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.isNil or p.dataProvider.isNil: - let epr = newFuture[engine_api.ExecutionPayloadV1]("getPayload") - epr.complete(default(engine_api.ExecutionPayloadV1)) - return epr - - p.dataProvider.web3.provider.engine_getPayloadV1(FixedBytes[8] payloadId) - -proc getPayloadV2*( - p: Eth1Monitor, payloadId: bellatrix.PayloadID): - Future[engine_api.ExecutionPayloadV2] = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.isNil or p.dataProvider.isNil: - let epr = newFuture[engine_api.ExecutionPayloadV2]("getPayload") - epr.complete(default(engine_api.ExecutionPayloadV2)) - return epr - - p.dataProvider.web3.provider.engine_getPayloadV2(FixedBytes[8] payloadId) - -proc newPayload*(p: Eth1Monitor, payload: engine_api.ExecutionPayloadV1): - Future[PayloadStatusV1] = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.dataProvider.isNil: - let epr = newFuture[PayloadStatusV1]("newPayload") - epr.complete(PayloadStatusV1(status: PayloadExecutionStatus.syncing)) - return epr - - p.dataProvider.web3.provider.engine_newPayloadV1(payload) - -proc newPayload*(p: Eth1Monitor, payload: engine_api.ExecutionPayloadV2): - Future[PayloadStatusV1] = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.dataProvider.isNil: - let epr = newFuture[PayloadStatusV1]("newPayload") - epr.complete(PayloadStatusV1(status: PayloadExecutionStatus.syncing)) - return epr - - p.dataProvider.web3.provider.engine_newPayloadV2(payload) - -proc forkchoiceUpdated*(p: Eth1Monitor, - headBlock, safeBlock, finalizedBlock: Eth2Digest): - Future[engine_api.ForkchoiceUpdatedResponse] = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.isNil or p.dataProvider.isNil: - let fcuR = - newFuture[engine_api.ForkchoiceUpdatedResponse]("forkchoiceUpdated") - fcuR.complete(engine_api.ForkchoiceUpdatedResponse( - payloadStatus: PayloadStatusV1(status: PayloadExecutionStatus.syncing))) - return fcuR - - p.dataProvider.web3.provider.engine_forkchoiceUpdatedV1( - ForkchoiceStateV1( - headBlockHash: headBlock.asBlockHash, - safeBlockHash: safeBlock.asBlockHash, - finalizedBlockHash: finalizedBlock.asBlockHash), - none(engine_api.PayloadAttributesV1)) + let hexNumber = try: + &"0x{number:X}" # No leading 0's! + except ValueError as exc: + # Since the format above is valid, failing here should not be possible + raiseAssert exc.msg + + rpcClient.eth_getBlockByNumber(hexNumber, false) + +proc getBlock(rpcClient: RpcClient, id: BlockHashOrNumber): Future[BlockObject] = + if id.isHash: + let hash = id.hash.asBlockHash() + return rpcClient.getBlockByHash(hash) + else: + return rpcClient.getBlockByNumber(id.number) + +func areSameAs(expectedParams: Option[NextExpectedPayloadParams], + latestHead, latestSafe, latestFinalized: Eth2Digest, + timestamp: uint64, + randomData: Eth2Digest, + feeRecipient: Eth1Address, + withdrawals: seq[WithdrawalV1]): bool = + if not(expectedParams.isSome and + expectedParams.get.headBlockRoot == latestHead and + expectedParams.get.safeBlockRoot == latestSafe and + expectedParams.get.finalizedBlockRoot == latestFinalized): + return false + + if expectedParams.get.payloadAttributes == nil: + return false + + case expectedParams.get.payloadAttributes.kind + of ForkedPayloadAttributesKind.v1: + expectedParams.get.payloadAttributes.v1.timestamp.uint64 == timestamp and + expectedParams.get.payloadAttributes.v1.prevRandao.bytes == randomData.data and + expectedParams.get.payloadAttributes.v1.suggestedFeeRecipient == feeRecipient and + withdrawals.len == 0 + of ForkedPayloadAttributesKind.v2: + expectedParams.get.payloadAttributes.v2.timestamp.uint64 == timestamp and + expectedParams.get.payloadAttributes.v2.prevRandao.bytes == randomData.data and + expectedParams.get.payloadAttributes.v2.suggestedFeeRecipient == feeRecipient and + expectedParams.get.payloadAttributes.v2.withdrawals == withdrawals + +template makeForkedPayloadAttributes( + GetPayloadResponseType: type engine_api.ExecutionPayloadV1, + timestamp: uint64, + randomData: Eth2Digest, + suggestedFeeRecipient: Eth1Address, + withdrawals: seq[WithdrawalV1]): ForkedPayloadAttributes = + ForkedPayloadAttributes( + kind: ForkedPayloadAttributesKind.v1, + v1: engine_api.PayloadAttributesV1( + timestamp: Quantity timestamp, + prevRandao: FixedBytes[32] randomData.data, + suggestedFeeRecipient: suggestedFeeRecipient)) + +template makeForkedPayloadAttributes( + GetPayloadResponseType: type engine_api.GetPayloadV2Response, + timestamp: uint64, + randomData: Eth2Digest, + suggestedFeeRecipient: Eth1Address, + withdrawals: seq[WithdrawalV1]): ForkedPayloadAttributes = + ForkedPayloadAttributes( + kind: ForkedPayloadAttributesKind.v2, + v2: engine_api.PayloadAttributesV2( + timestamp: Quantity timestamp, + prevRandao: FixedBytes[32] randomData.data, + suggestedFeeRecipient: suggestedFeeRecipient, + withdrawals: withdrawals)) + +proc forkchoiceUpdated(rpcClient: RpcClient, + state: ForkchoiceStateV1, + payloadAttributes: ForkedPayloadAttributes): Future[ForkchoiceUpdatedResponse] = + if payloadAttributes == nil: + rpcClient.engine_forkchoiceUpdatedV1(state, none PayloadAttributesV1) + else: + case payloadAttributes.kind + of ForkedPayloadAttributesKind.v1: + rpcClient.engine_forkchoiceUpdatedV1(state, some payloadAttributes.v1) + of ForkedPayloadAttributesKind.v2: + rpcClient.engine_forkchoiceUpdatedV2(state, some payloadAttributes.v2) + +proc getPayloadFromSingleEL( + connection: ELConnection, + GetPayloadResponseType: type, + isForkChoiceUpToDate: bool, + headBlock, safeBlock, finalizedBlock: Eth2Digest, + timestamp: uint64, + randomData: Eth2Digest, + suggestedFeeRecipient: Eth1Address, + withdrawals: seq[WithdrawalV1]): Future[GetPayloadResponseType] {.async.} = + + let + rpcClient = await connection.connectedRpcClient() + payloadId = if isForkChoiceUpToDate and connection.lastPayloadId.isSome: + connection.lastPayloadId.get + elif not headBlock.isZero: + # TODO Add metric + let response = await rpcClient.forkchoiceUpdated( + ForkchoiceStateV1( + headBlockHash: headBlock.asBlockHash, + safeBlockHash: safeBlock.asBlockHash, + finalizedBlockHash: finalizedBlock.asBlockHash), + makeForkedPayloadAttributes( + GetPayloadResponseType, + timestamp, + randomData, + suggestedFeeRecipient, + withdrawals)) + + if response.payloadStatus.status != PayloadExecutionStatus.valid or + response.payloadId.isNone: + raise newException(CatchableError, "Head block is not a valid payload") + + response.payloadId.get + else: + raise newException(CatchableError, "No confirmed execution head yet") + + return await engine_api.getPayload(rpcClient, GetPayloadResponseType, payloadId) + +func blockValue(blk: ExecutionPayloadV1): int64 {.raises: [RlpError].} = + ## TODO Ensure this cannot overflow + for transactionBytes in blk.transactions: + var rlp = rlpFromBytes distinctBase(transactionBytes) + let transaction = rlp.read(eth_types.Transaction) + result += distinctBase effectiveGasTip(transaction, blk.baseFeePerGas) + +proc cmpGetPayloadResponses(lhs, rhs: ExecutionPayloadV1): int = + try: + cmp(blockValue(lhs), blockValue(rhs)) + except CatchableError as err: + debug "Failure while decoding transactions", err = err.msg + return 0 + +proc cmpGetPayloadResponses(lhs, rhs: GetPayloadV2Response): int = + # TODO Would this fit in uint64? + cmp(uint64 lhs.blockValue, uint64 rhs.blockValue) + +template EngineApiPayloadType*(T: type bellatrix.ExecutionPayload): type = + engine_api.ExecutionPayloadV1 + +template EngineApiPayloadType*(T: type capella.ExecutionPayload): type = + engine_api.ExecutionPayloadV2 + +template EngineApiResponseType*(T: type engine_api.ExecutionPayloadV1): type = + engine_api.ExecutionPayloadV1 + +template EngineApiResponseType*(T: type engine_api.ExecutionPayloadV2): type = + engine_api.GetPayloadV2Response + +template payload(response: engine_api.ExecutionPayloadV1): engine_api.ExecutionPayloadV1 = + response + +template payload(response: engine_api.GetPayloadV2Response): engine_api.ExecutionPayloadV2 = + response.executionPayload + +template toEngineWithdrawals*(withdrawals: seq[capella.Withdrawal]): seq[WithdrawalV1] = + mapIt(withdrawals, toEngineWithdrawal(it)) + +proc getPayload*(m: ELManager, + PayloadType: type engine_api.SomeExecutionPayload, + headBlock, safeBlock, finalizedBlock: Eth2Digest, + timestamp: uint64, + randomData: Eth2Digest, + suggestedFeeRecipient: Eth1Address, + withdrawals: seq[capella.Withdrawal]): + Future[Opt[PayloadType]] {.async.} = + # TODO Pre-merge, deliver empty payload + # default(bellatrix.ExecutionPayload) + + let + engineApiWithdrawals = toEngineWithdrawals withdrawals + let isFcUpToDate = m.nextExpectedPayloadParams.areSameAs( + headBlock, safeBlock, finalizedBlock, timestamp, + randomData, suggestedFeeRecipient, engineApiWithdrawals) + + let + deadline = sleepAsync(GETPAYLOAD_TIMEOUT) + requests = m.elConnections.mapIt(it.getPayloadFromSingleEL( + EngineApiResponseType(PayloadType), + isFcUpToDate, headBlock, safeBlock, finalizedBlock, + timestamp, randomData, suggestedFeeRecipient, engineApiWithdrawals + )) + requestsCompleted = allFutures(requests) + + await requestsCompleted or deadline + + var bestPayloadIdx = none int + for idx, req in requests: + if not req.finished: + req.cancel() + elif req.failed: + error "Failed to get execution payload from EL", + url = m.elConnections[idx].engineUrl.url, + err = req.error.msg + elif bestPayloadIdx.isNone: + bestPayloadIdx = some idx + else: + if cmpGetPayloadResponses(req.read, requests[bestPayloadIdx.get].read) > 0: + bestPayloadIdx = some idx + + if bestPayloadIdx.isSome: + return ok requests[bestPayloadIdx.get].read.payload + else: + return err() + +proc sendNewPayloadToSingleEL(connection: ELConnection, + payload: engine_api.ExecutionPayloadV1): + Future[PayloadStatusV1] {.async.} = + let rpcClient = await connection.connectedRpcClient() + return await rpcClient.engine_newPayloadV1(payload) + +proc sendNewPayloadToSingleEL(connection: ELConnection, + payload: engine_api.ExecutionPayloadV2): + Future[PayloadStatusV1] {.async.} = + let rpcClient = await connection.connectedRpcClient() + return await rpcClient.engine_newPayloadV2(payload) + +type + StatusRelation = enum + newStatusIsPreferable + oldStatusIsOk + disagreement + +proc compareStatuses(prevStatus, newStatus: PayloadExecutionStatus): StatusRelation = + case prevStatus + of PayloadExecutionStatus.syncing: + if newStatus == PayloadExecutionStatus.syncing: + oldStatusIsOk + else: + newStatusIsPreferable + + of PayloadExecutionStatus.valid: + case newStatus + of PayloadExecutionStatus.syncing, + PayloadExecutionStatus.accepted, + PayloadExecutionStatus.valid: + oldStatusIsOk + of PayloadExecutionStatus.invalid_block_hash, + PayloadExecutionStatus.invalid: + disagreement + + of PayloadExecutionStatus.invalid: + case newStatus + of PayloadExecutionStatus.syncing, + PayloadExecutionStatus.invalid: + oldStatusIsOk + of PayloadExecutionStatus.valid, + PayloadExecutionStatus.accepted, + PayloadExecutionStatus.invalid_block_hash: + disagreement + + of PayloadExecutionStatus.accepted: + case newStatus + of PayloadExecutionStatus.accepted, + PayloadExecutionStatus.syncing: + oldStatusIsOk + of PayloadExecutionStatus.valid: + newStatusIsPreferable + of PayloadExecutionStatus.invalid_block_hash, + PayloadExecutionStatus.invalid: + disagreement + + of PayloadExecutionStatus.invalid_block_hash: + if newStatus == PayloadExecutionStatus.invalid_block_hash: + oldStatusIsOk + else: + disagreement -proc forkchoiceUpdated*(p: Eth1Monitor, +proc sendNewPayload*(m: ELManager, + payload: engine_api.ExecutionPayloadV1 | engine_api.ExecutionPayloadV2): + Future[PayloadExecutionStatus] {.async.} = + let + deadline = sleepAsync(NEWPAYLOAD_TIMEOUT) + requests = m.elConnections.mapIt(sendNewPayloadToSingleEL(it, payload)) + requestsCompleted = allFutures(requests) + + await requestsCompleted or deadline + + var + selectedResponse = none int + disagreementAlreadyDetected = false + + for idx, req in requests: + if not req.finished: + req.cancel() + else: + let url = m.elConnections[idx].engineUrl.url + if req.failed: + engine_newPayload_failures.inc(1, [url]) + error "Sending payload to the EL failed", + url, err = req.error.msg + else: + let status = req.read.status + engine_newPayload_sent.inc(1, [url, $status]) + + if selectedResponse.isNone: + selectedResponse = some idx + elif not disagreementAlreadyDetected: + let prevStatus = requests[selectedResponse.get].read.status + case compareStatuses(status, prevStatus) + of newStatusIsPreferable: + selectedResponse = some idx + of oldStatusIsOk: + discard + of disagreement: + disagreementAlreadyDetected = true + error "ELs disagree regarding newPayload status", + url1 = m.elConnections[selectedResponse.get].engineUrl.url, + status1 = prevStatus, + url2 = url, + status2 = status + + return if disagreementAlreadyDetected: + PayloadExecutionStatus.invalid + elif selectedResponse.isSome: + requests[selectedResponse.get].read.status + else: + PayloadExecutionStatus.syncing + +proc forkchoiceUpdatedForSingleEL( + connection: ELConnection, + state: ref ForkchoiceStateV1, + payloadAttributes: ForkedPayloadAttributes): + Future[PayloadStatusV1] {.async.} = + let + rpcClient = await connection.connectedRpcClient() + response = await rpcClient.forkchoiceUpdated(state[], payloadAttributes) + + if response.payloadStatus.status notin {syncing, valid, invalid}: + debug "Invalid fork-choice updated response from the EL", + payloadStatus = response.payloadStatus + return + + if response.payloadStatus.status == PayloadExecutionStatus.valid and + response.payloadId.isSome: + connection.lastPayloadId = response.payloadId + + return response.payloadStatus + +proc forkchoiceUpdated*(m: ELManager, headBlock, safeBlock, finalizedBlock: Eth2Digest, - timestamp: uint64, - randomData: array[32, byte], - suggestedFeeRecipient: Eth1Address): - Future[engine_api.ForkchoiceUpdatedResponse] = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.isNil or p.dataProvider.isNil: - let fcuR = - newFuture[engine_api.ForkchoiceUpdatedResponse]("forkchoiceUpdated") - fcuR.complete(engine_api.ForkchoiceUpdatedResponse( - payloadStatus: PayloadStatusV1(status: PayloadExecutionStatus.syncing))) - return fcuR - - p.dataProvider.web3.provider.engine_forkchoiceUpdatedV1( - ForkchoiceStateV1( + payloadAttributes: ForkedPayloadAttributes = nil): + Future[(PayloadExecutionStatus, Option[BlockHash])] {.async.} = + doAssert not headBlock.isZero + + # Allow finalizedBlockRoot to be 0 to avoid sync deadlocks. + # + # https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3675.md#pos-events + # has "Before the first finalized block occurs in the system the finalized + # block hash provided by this event is stubbed with + # `0x0000000000000000000000000000000000000000000000000000000000000000`." + # and + # https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/bellatrix/validator.md#executionpayload + # notes "`finalized_block_hash` is the hash of the latest finalized execution + # payload (`Hash32()` if none yet finalized)" + + m.nextExpectedPayloadParams = some NextExpectedPayloadParams( + headBlockRoot: headBlock, + safeBlockRoot: safeBlock, + finalizedBlockRoot: finalizedBlock, + payloadAttributes: payloadAttributes) + + let + state = newClone ForkchoiceStateV1( headBlockHash: headBlock.asBlockHash, safeBlockHash: safeBlock.asBlockHash, - finalizedBlockHash: finalizedBlock.asBlockHash), - some(engine_api.PayloadAttributesV1( - timestamp: Quantity timestamp, - prevRandao: FixedBytes[32] randomData, - suggestedFeeRecipient: suggestedFeeRecipient))) + finalizedBlockHash: finalizedBlock.asBlockHash) + deadline = sleepAsync(FORKCHOICEUPDATED_TIMEOUT) + requests = m.elConnections.mapIt( + it.forkchoiceUpdatedForSingleEL(state, payloadAttributes)) + requestsCompleted = allFutures(requests) + + await requestsCompleted or deadline + + var + selectedResponse = none int + disagreementAlreadyDetected = false + + for idx, req in requests: + if not req.finished: + req.cancel() + else: + let url = m.elConnections[idx].engineUrl.url + if req.failed: + engine_forkchoiceUpdated_failures.inc(1, [url]) + error "Sending fork-choice update to the EL failed", + url, err = req.error.msg + else: + let status = req.read.status + engine_newPayload_sent.inc(1, [url, $status]) + + if selectedResponse.isNone: + selectedResponse = some idx + elif not disagreementAlreadyDetected: + let prevStatus = requests[selectedResponse.get].read.status + case compareStatuses(status, prevStatus) + of newStatusIsPreferable: + selectedResponse = some idx + of oldStatusIsOk: + discard + of disagreement: + disagreementAlreadyDetected = true + error "ELs disagree regarding fork-choice update status", + url1 = m.elConnections[selectedResponse.get].engineUrl, + status1 = prevStatus, + url2 = url, + status2 = status + + return if disagreementAlreadyDetected: + (PayloadExecutionStatus.invalid, none BlockHash) + elif selectedResponse.isSome: + (requests[selectedResponse.get].read.status, + requests[selectedResponse.get].read.latestValidHash) + else: + (PayloadExecutionStatus.syncing, none BlockHash) + +proc forkchoiceUpdatedNoResult*(m: ELManager, + headBlock, safeBlock, finalizedBlock: Eth2Digest, + payloadAttributes: ForkedPayloadAttributes = nil) {.async.} = + discard await m.forkchoiceUpdated( + headBlock, safeBlock, finalizedBlock, payloadAttributes) -# TODO can't be defined within exchangeTransitionConfiguration +# TODO can't be defined within exchangeConfigWithSingleEL proc `==`(x, y: Quantity): bool {.borrow, noSideEffect.} -type - EtcStatus {.pure.} = enum - exchangeError - mismatch - match +proc exchangeConfigWithSingleEL(m: ELManager, connection: ELConnection) {.async.} = + let rpcClient = await connection.connectedRpcClient() -proc exchangeTransitionConfiguration*(p: Eth1Monitor): Future[EtcStatus] {.async.} = - # Eth1 monitor can recycle connections without (external) warning; at least, - # don't crash. - if p.isNil: - debug "exchangeTransitionConfiguration: nil Eth1Monitor" - return EtcStatus.exchangeError + if m.eth1Network.isSome and + connection.etcStatus == EtcStatus.notExchangedYet: + try: + let + providerChain = + awaitOrRaiseOnTimeout(rpcClient.eth_chainId(), web3RequestsTimeout) - let dataProvider = p.dataProvider - if dataProvider.isNil: - return EtcStatus.exchangeError + # https://eips.ethereum.org/EIPS/eip-155#list-of-chain-ids + expectedChain = case m.eth1Network.get + of mainnet: 1.Quantity + of ropsten: 3.Quantity + of rinkeby: 4.Quantity + of goerli: 5.Quantity + of sepolia: 11155111.Quantity # https://chainid.network/ + if expectedChain != providerChain: + warn "The specified EL client is connected to a different chain", + url = connection.engineUrl, + expectedChain = distinctBase(expectedChain), + actualChain = distinctBase(providerChain) + connection.etcStatus = EtcStatus.mismatch + return + except CatchableError as exc: + # Typically because it's not synced through EIP-155, assuming this Web3 + # endpoint has been otherwise working. + debug "Failed to obtain eth_chainId", + error = exc.msg # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#engine_exchangetransitionconfigurationv1 - let consensusCfg = TransitionConfigurationV1( - terminalTotalDifficulty: p.depositsChain.cfg.TERMINAL_TOTAL_DIFFICULTY, - terminalBlockHash: p.depositsChain.cfg.TERMINAL_BLOCK_HASH, - terminalBlockNumber: Quantity 0) - let executionCfg = - try: - awaitWithRetries( - dataProvider.web3.provider.engine_exchangeTransitionConfigurationV1( - consensusCfg), + let + ourConf = TransitionConfigurationV1( + terminalTotalDifficulty: m.eth1Chain.cfg.TERMINAL_TOTAL_DIFFICULTY, + terminalBlockHash: m.eth1Chain.cfg.TERMINAL_BLOCK_HASH, + terminalBlockNumber: Quantity 0) + elConf = try: + awaitOrRaiseOnTimeout( + rpcClient.engine_exchangeTransitionConfigurationV1(ourConf), timeout = 1.seconds) except CatchableError as err: - warn "Failed to exchange transition configuration", err = err.msg - return EtcStatus.exchangeError + error "Failed to exchange transition configuration", + url = connection.engineUrl, err = err.msg + connection.etcStatus = EtcStatus.exchangeError + return - return - if consensusCfg.terminalTotalDifficulty != executionCfg.terminalTotalDifficulty: + connection.etcStatus = + if ourConf.terminalTotalDifficulty != elConf.terminalTotalDifficulty: error "Engine API configured with different terminal total difficulty", - engineAPI_value = executionCfg.terminalTotalDifficulty, - localValue = consensusCfg.terminalTotalDifficulty + engineAPI_value = elConf.terminalTotalDifficulty, + localValue = ourConf.terminalTotalDifficulty EtcStatus.mismatch - elif consensusCfg.terminalBlockNumber != executionCfg.terminalBlockNumber: + elif ourConf.terminalBlockNumber != elConf.terminalBlockNumber: warn "Engine API reporting different terminal block number", - engineAPI_value = executionCfg.terminalBlockNumber.uint64, - localValue = consensusCfg.terminalBlockNumber.uint64 + engineAPI_value = elConf.terminalBlockNumber.uint64, + localValue = ourConf.terminalBlockNumber.uint64 EtcStatus.mismatch - elif consensusCfg.terminalBlockHash != executionCfg.terminalBlockHash: + elif ourConf.terminalBlockHash != elConf.terminalBlockHash: warn "Engine API reporting different terminal block hash", - engineAPI_value = executionCfg.terminalBlockHash, - localValue = consensusCfg.terminalBlockHash + engineAPI_value = elConf.terminalBlockHash, + localValue = ourConf.terminalBlockHash EtcStatus.mismatch else: - if not p.exchangedConfiguration: + if connection.etcStatus == EtcStatus.notExchangedYet: # Log successful engine configuration exchange once at startup - p.exchangedConfiguration = true - info "Exchanged engine configuration", - terminalTotalDifficulty = executionCfg.terminalTotalDifficulty, - terminalBlockHash = executionCfg.terminalBlockHash, - terminalBlockNumber = executionCfg.terminalBlockNumber.uint64 + info "Successfully exchanged engine configuration", + url = connection.engineUrl EtcStatus.match +proc exchangeTransitionConfiguration*(m: ELManager) {.async.} = + let + deadline = sleepAsync(3.seconds) + requests = m.elConnections.mapIt(m.exchangeConfigWithSingleEL(it)) + requestsCompleted = allFutures(requests) + + await requestsCompleted or deadline + + for idx, req in requests: + if not req.finished: + m.elConnections[idx].etcStatus = EtcStatus.exchangeError + req.cancel() + template readJsonField(j: JsonNode, fieldName: string, ValueType: type): untyped = var res: ValueType fromJson(j[fieldName], fieldName, res) @@ -702,10 +1148,11 @@ template readJsonField(j: JsonNode, fieldName: string, ValueType: type): untyped template init[N: static int](T: type DynamicBytes[N, N]): T = T newSeq[byte](N) -proc fetchTimestampWithRetries(blkParam: Eth1Block, p: Web3DataProviderRef) {.async.} = - let blk = blkParam - let web3block = awaitWithRetries( - p.getBlockByHash(blk.hash.asBlockHash)) +proc fetchTimestampWithRetries(rpcClient: RpcClient, + blk: Eth1Block) {.async.} = + let web3block = awaitOrRaiseOnTimeout( + rpcClient.getBlockByHash(blk.hash.asBlockHash), + web3RequestsTimeout) blk.timestamp = Eth1BlockTimestamp web3block.timestamp func depositEventsToBlocks(depositsList: JsonNode): seq[Eth1Block] {. @@ -769,20 +1216,16 @@ type DepositCountIncorrect DepositCountUnavailable -template awaitOrRaiseOnTimeout[T](fut: Future[T], - timeout: Duration): T = - awaitWithTimeout(fut, timeout): - raise newException(DataProviderTimeout, "Timeout") - when hasDepositRootChecks: const contractCallTimeout = 60.seconds - proc fetchDepositContractData(p: Web3DataProviderRef, blk: Eth1Block): - Future[DepositContractDataStatus] {.async.} = + proc fetchDepositContractData(rpcClient: RpcClient, + depositContact: Sender[DepositContract], + blk: Eth1Block): Future[DepositContractDataStatus] {.async.} = let - depositRoot = p.ns.get_deposit_root.call(blockNumber = blk.number) - rawCount = p.ns.get_deposit_count.call(blockNumber = blk.number) + depositRoot = depositContract.get_deposit_root.call(blockNumber = blk.number) + rawCount = depositContract.get_deposit_count.call(blockNumber = blk.number) try: let fetchedRoot = asEth2Digest( @@ -813,14 +1256,6 @@ when hasDepositRootChecks: err = err.msg result = DepositCountUnavailable -proc onBlockHeaders(p: Web3DataProviderRef, - blockHeaderHandler: BlockHeaderHandler, - errorHandler: SubscriptionErrorHandler) {.async.} = - info "Waiting for new Eth1 block headers" - - p.blockHeadersSubscription = awaitWithRetries( - p.web3.subscribeForBlockHeaders(blockHeaderHandler, errorHandler)) - proc pruneOldBlocks(chain: var Eth1Chain, depositIndex: uint64) = ## Called on block finalization to delete old and now redundant data. let initialChunks = chain.finalizedDepositsMerkleizer.getChunkCount @@ -909,7 +1344,7 @@ proc trackFinalizedState(chain: var Eth1Chain, finalizedEth1Data: Eth1Data, finalizedStateDepositIndex: uint64, blockProposalExpected = false): bool = - ## This function will return true if the Eth1Monitor is synced + ## This function will return true if the ELManager is synced ## to the finalization point. if chain.blocks.len == 0: @@ -948,10 +1383,10 @@ proc trackFinalizedState(chain: var Eth1Chain, if result: chain.pruneOldBlocks(finalizedStateDepositIndex) -template trackFinalizedState*(m: Eth1Monitor, +template trackFinalizedState*(m: ELManager, finalizedEth1Data: Eth1Data, finalizedStateDepositIndex: uint64): bool = - trackFinalizedState(m.depositsChain, finalizedEth1Data, finalizedStateDepositIndex) + trackFinalizedState(m.eth1Chain, finalizedEth1Data, finalizedStateDepositIndex) # https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.2/specs/phase0/validator.md#get_eth1_data proc getBlockProposalData*(chain: var Eth1Chain, @@ -1034,44 +1469,19 @@ proc getBlockProposalData*(chain: var Eth1Chain, else: result.hasMissingDeposits = true -template getBlockProposalData*(m: Eth1Monitor, +template getBlockProposalData*(m: ELManager, state: ForkedHashedBeaconState, finalizedEth1Data: Eth1Data, finalizedStateDepositIndex: uint64): BlockProposalEth1Data = getBlockProposalData( - m.depositsChain, state, finalizedEth1Data, finalizedStateDepositIndex) - -proc getJsonRpcRequestHeaders(jwtSecret: Option[seq[byte]]): - auto = - if jwtSecret.isSome: - let secret = jwtSecret.get - (proc(): seq[(string, string)] = - # https://www.rfc-editor.org/rfc/rfc6750#section-6.1.1 - @[("Authorization", "Bearer " & getSignedIatToken( - secret, (getTime() - initTime(0, 0)).inSeconds))]) - else: - (proc(): seq[(string, string)] = @[]) - -proc new*(T: type Web3DataProvider, - depositContractAddress: Eth1Address, - web3Url: string, - jwtSecret: Option[seq[byte]]): - Future[Result[Web3DataProviderRef, string]] {.async.} = - let web3Fut = newWeb3(web3Url, getJsonRpcRequestHeaders(jwtSecret)) - yield web3Fut or sleepAsync(10.seconds) - if (not web3Fut.finished) or web3Fut.failed: - await cancelAndWait(web3Fut) - if web3Fut.failed: - return err "Failed to setup web3 connection: " & web3Fut.readError.msg - else: - return err "Failed to setup web3 connection" - - let - web3 = web3Fut.read - ns = web3.contractSender(DepositContract, depositContractAddress) + m.eth1Chain, state, finalizedEth1Data, finalizedStateDepositIndex) - return ok Web3DataProviderRef(url: web3Url, web3: web3, ns: ns) +proc new*(T: type ELConnection, + engineUrl: EngineApiUrl): T = + ELConnection( + engineUrl: engineUrl, + depositContractSyncStatus: DepositContractSyncStatus.unknown) template getOrDefault[T, E](r: Result[T, E]): T = type TT = T @@ -1107,51 +1517,32 @@ proc init*(T: type Eth1Chain, finalizedDepositsMerkleizer: m, headMerkleizer: copy m) -proc getBlock(provider: Web3DataProviderRef, id: BlockHashOrNumber): - Future[BlockObject] = - if id.isHash: - let hash = id.hash.asBlockHash() - return provider.getBlockByHash(hash) - else: - return provider.getBlockByNumber(id.number) - -proc currentEpoch(m: Eth1Monitor): Epoch = +proc currentEpoch(m: ELManager): Epoch = if m.getBeaconTime != nil: m.getBeaconTime().slotOrZero.epoch else: Epoch 0 -proc init*(T: type Eth1Monitor, - cfg: RuntimeConfig, - depositContractBlockNumber: uint64, - depositContractBlockHash: Eth2Digest, - db: BeaconChainDB, - getBeaconTime: GetBeaconTimeFn, - web3Urls: seq[string], - eth1Network: Option[Eth1Network], - forcePolling: bool, - jwtSecret: Option[seq[byte]], - ttdReached: bool): T = - doAssert web3Urls.len > 0 - var web3Urls = web3Urls - for url in mitems(web3Urls): - fixupWeb3Urls url - - let eth1Chain = Eth1Chain.init( - cfg, db, depositContractBlockNumber, depositContractBlockHash) - - T(state: Initialized, - depositsChain: eth1Chain, +proc new*(T: type ELManager, + cfg: RuntimeConfig, + depositContractBlockNumber: uint64, + depositContractBlockHash: Eth2Digest, + db: BeaconChainDB, + getBeaconTime: GetBeaconTimeFn, + engineApiUrls: seq[EngineApiUrl], + eth1Network: Option[Eth1Network], + ttdReached: bool): T = + let + eth1Chain = Eth1Chain.init( + cfg, db, depositContractBlockNumber, depositContractBlockHash) + + T(eth1Chain: eth1Chain, depositContractAddress: cfg.DEPOSIT_CONTRACT_ADDRESS, - depositContractDeployedAt: BlockHashOrNumber( - isHash: true, - hash: depositContractBlockHash), + depositContractBlockNumber: depositContractBlockNumber, + depositContractBlockHash: depositContractBlockHash.asBlockHash, getBeaconTime: getBeaconTime, - web3Urls: web3Urls, + elConnections: mapIt(engineApiUrls, ELConnection.new(it)), eth1Network: eth1Network, - eth1Progress: newAsyncEvent(), - forcePolling: forcePolling, - jwtSecret: jwtSecret, blocksPerLogsRequest: targetBlocksPerLogsRequest, ttdReachedField: ttdReached) @@ -1166,92 +1557,33 @@ func clear(chain: var Eth1Chain) = chain.headMerkleizer = copy chain.finalizedDepositsMerkleizer chain.hasConsensusViolation = false -proc detectPrimaryProviderComingOnline(m: Eth1Monitor) {.async.} = - const checkInterval = 30.seconds - - let - web3Url = m.web3Urls[0] - initialRunFut = m.runFut - - # This is a way to detect that the monitor was restarted. When this - # happens, this function will just return terminating the "async thread" - while m.runFut == initialRunFut: - let tempProviderRes = await Web3DataProvider.new( - m.depositContractAddress, - web3Url, - m.jwtSecret) - - if tempProviderRes.isErr: - await sleepAsync(checkInterval) - continue - - var tempProvider = tempProviderRes.get +proc doStop(m: ELManager) {.async.} = + safeCancel m.chainSyncingLoopFut + safeCancel m.exchangeTransitionConfigurationLoopFut - # Use one of the get/request-type methods from - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#underlying-protocol - # which doesn't take parameters and returns a small structure, to ensure - # this works with engine API endpoints. - let testRequest = tempProvider.web3.provider.eth_syncing() + let closeConnectionFutures = mapIt(m.elConnections, close(it)) + await allFutures(closeConnectionFutures) - yield testRequest or sleepAsync(web3Timeouts) - - traceAsyncErrors tempProvider.close() - - if testRequest.completed and m.state == Started: - m.state = ReadyToRestartToPrimary - return - else: - await sleepAsync(checkInterval) - -proc doStop(m: Eth1Monitor) {.async.} = - safeCancel m.runFut - - if m.dataProvider != nil: - awaitWithTimeout(m.dataProvider.close(), 30.seconds): - debug "Failed to close data provider in time" - m.dataProvider = nil - -proc ensureDataProvider*(m: Eth1Monitor) {.async.} = - if m.isNil or not m.dataProvider.isNil: - return - - let web3Url = m.web3Urls[m.startIdx mod m.web3Urls.len] - inc m.startIdx - - m.dataProvider = block: - let v = await Web3DataProvider.new( - m.depositContractAddress, web3Url, m.jwtSecret) - if v.isErr(): - raise (ref CatchableError)(msg: v.error()) - info "Established connection to execution layer", url = web3Url - v.get() - -proc stop(m: Eth1Monitor) {.async.} = - if m.state in {Started, ReadyToRestartToPrimary}: - m.state = Stopping - m.stopFut = m.doStop() +proc stop(m: ELManager) {.async.} = + if not m.stopFut.isNil: await m.stopFut - m.state = Stopped - elif m.state == Stopping: + else: + m.stopFut = m.doStop() await m.stopFut + m.stopFut = nil const votedBlocksSafetyMargin = 50 -func latestEth1BlockNumber(m: Eth1Monitor): Eth1BlockNumber = - if m.latestEth1Block.isSome: - Eth1BlockNumber m.latestEth1Block.get.number - else: - Eth1BlockNumber 0 - -func earliestBlockOfInterest(m: Eth1Monitor): Eth1BlockNumber = - m.latestEth1BlockNumber - (2 * m.cfg.ETH1_FOLLOW_DISTANCE) - votedBlocksSafetyMargin +func earliestBlockOfInterest(m: ELManager, latestEth1BlockNumber: Eth1BlockNumber): Eth1BlockNumber = + latestEth1BlockNumber - (2 * m.cfg.ETH1_FOLLOW_DISTANCE) - votedBlocksSafetyMargin -proc syncBlockRange(m: Eth1Monitor, +proc syncBlockRange(m: ELManager, + rpcClient: RpcClient, + depositContract: Sender[DepositContract], fromBlock, toBlock, fullSyncFromBlock: Eth1BlockNumber) {.gcsafe, async.} = - doAssert m.dataProvider != nil, "close not called concurrently" - doAssert m.depositsChain.blocks.len > 0 + doAssert m.eth1Chain.blocks.len > 0 var currentBlock = fromBlock while currentBlock <= toBlock: @@ -1273,14 +1605,14 @@ proc syncBlockRange(m: Eth1Monitor, # Reduce all request rate until we have a more general solution # for dealing with Infura's rate limits await sleepAsync(milliseconds(backoff)) - let jsonLogsFut = m.dataProvider.ns.getJsonLogs( + let jsonLogsFut = depositContract.getJsonLogs( DepositEvent, fromBlock = some blockId(currentBlock), toBlock = some blockId(maxBlockNumberRequested)) depositLogs = try: # Downloading large amounts of deposits may take several minutes - awaitWithTimeout(jsonLogsFut, web3Timeouts): + awaitWithTimeout(jsonLogsFut, 60.seconds): raise newException(DataProviderTimeout, "Request time out while obtaining json logs") except CatchableError as err: @@ -1303,20 +1635,22 @@ proc syncBlockRange(m: Eth1Monitor, for i in 0 ..< blocksWithDeposits.len: let blk = blocksWithDeposits[i] - await blk.fetchTimestampWithRetries(m.dataProvider) + debug "Fetching block timestamp", blockNum = blk.number + await rpcClient.fetchTimestampWithRetries(blk) if blk.number > fullSyncFromBlock: - let lastBlock = m.depositsChain.blocks.peekLast + let lastBlock = m.eth1Chain.blocks.peekLast for n in max(lastBlock.number + 1, fullSyncFromBlock) ..< blk.number: debug "Obtaining block without deposits", blockNum = n - let blockWithoutDeposits = awaitWithRetries( - m.dataProvider.getBlockByNumber(n)) + let blockWithoutDeposits = awaitOrRaiseOnTimeout( + rpcClient.getBlockByNumber(n), + web3RequestsTimeout) - m.depositsChain.addBlock( + m.eth1Chain.addBlock( lastBlock.makeSuccessorWithoutDeposits(blockWithoutDeposits)) eth1_synced_head.set blockWithoutDeposits.number.toGaugeValue - m.depositsChain.addBlock blk + m.eth1Chain.addBlock blk eth1_synced_head.set blk.number.toGaugeValue if blocksWithDeposits.len > 0: @@ -1324,7 +1658,9 @@ proc syncBlockRange(m: Eth1Monitor, template lastBlock: auto = blocksWithDeposits[lastIdx] let status = when hasDepositRootChecks: - awaitWithRetries m.dataProvider.fetchDepositContractData(lastBlock) + awaitOrRaiseOnTimeout( + rpcClient.fetchDepositContractData(depositContract, lastBlock), + web3RequestsTimeout) else: DepositRootUnavailable @@ -1359,31 +1695,32 @@ proc syncBlockRange(m: Eth1Monitor, depositContractState: m.headMerkleizer.toDepositContractState, blockNumber: lastBlock.number) - m.depositsChain.db.putDepositTreeSnapshot depositTreeSnapshot + m.db.putDepositTreeSnapshot depositTreeSnapshot if m.genesisStateFut != nil and m.chainHasEnoughValidators: - let lastIdx = m.depositsChain.blocks.len - 1 - template lastBlock: auto = m.depositsChain.blocks[lastIdx] + let lastIdx = m.eth1Chain.blocks.len - 1 + template lastBlock: auto = m.eth1Chain.blocks[lastIdx] if maxBlockNumberRequested == toBlock and - (m.depositsChain.blocks.len == 0 or lastBlock.number != toBlock): - let web3Block = awaitWithRetries( - m.dataProvider.getBlockByNumber(toBlock)) + (m.eth1Chain.blocks.len == 0 or lastBlock.number != toBlock): + let web3Block = awaitOrRaiseOnTimeout( + rpcClient.getBlockByNumber(toBlock), + ethRequetsTimeout) debug "Latest block doesn't hold deposits. Obtaining it", ts = web3Block.timestamp.uint64, number = web3Block.number.uint64 - m.depositsChain.addBlock lastBlock.makeSuccessorWithoutDeposits(web3Block) + m.eth1Chain.addBlock lastBlock.makeSuccessorWithoutDeposits(web3Block) else: - await lastBlock.fetchTimestampWithRetries(m.dataProvider) + await rpcClient.fetchTimestampWithRetries(lastBlock) - var genesisBlockIdx = m.depositsChain.blocks.len - 1 - if m.isAfterMinGenesisTime(m.depositsChain.blocks[genesisBlockIdx]): + var genesisBlockIdx = m.eth1Chain.blocks.len - 1 + if m.isAfterMinGenesisTime(m.eth1Chain.blocks[genesisBlockIdx]): for i in 1 ..< blocksWithDeposits.len: - let idx = (m.depositsChain.blocks.len - 1) - i - let blk = m.depositsChain.blocks[idx] - await blk.fetchTimestampWithRetries(m.dataProvider) + let idx = (m.eth1Chain.blocks.len - 1) - i + let blk = m.eth1Chain.blocks[idx] + await rpcClient.fetchTimestampWithRetries(blk) if m.isGenesisCandidate(blk): genesisBlockIdx = idx else: @@ -1401,25 +1738,26 @@ proc syncBlockRange(m: Eth1Monitor, # We'll handle this special case below by examing whether we are in # this potential scenario and we'll use a fast guessing algorith to # discover the ETh1 block with minimal valid genesis time. - var genesisBlock = m.depositsChain.blocks[genesisBlockIdx] + var genesisBlock = m.eth1Chain.blocks[genesisBlockIdx] if genesisBlockIdx > 0: - let genesisParent = m.depositsChain.blocks[genesisBlockIdx - 1] + let genesisParent = m.eth1Chain.blocks[genesisBlockIdx - 1] if genesisParent.timestamp == 0: - await genesisParent.fetchTimestampWithRetries(m.dataProvider) + await rpcClient.fetchTimestampWithRetries(genesisParent) if m.hasEnoughValidators(genesisParent) and genesisBlock.number - genesisParent.number > 1: - genesisBlock = awaitWithRetries( - m.findGenesisBlockInRange(genesisParent, genesisBlock)) + genesisBlock = awaitOrRaiseOnTimeout( + m.findGenesisBlockInRange(genesisParent, genesisBlock), + web3RequestsTimeout) m.signalGenesis m.createGenesisState(genesisBlock) func init(T: type FullBlockId, blk: Eth1BlockHeader|BlockObject): T = FullBlockId(number: Eth1BlockNumber blk.number, hash: blk.hash) -func isNewLastBlock(m: Eth1Monitor, blk: Eth1BlockHeader|BlockObject): bool = +func isNewLastBlock(m: ELManager, blk: Eth1BlockHeader|BlockObject): bool = m.latestEth1Block.isNone or blk.number.uint64 > m.latestEth1BlockNumber -proc findTerminalBlock(provider: Web3DataProviderRef, +proc findTerminalBlock(rpcClient: RpcClient, ttd: Uint256): Future[BlockObject] {.async.} = ## Find the first execution block with a difficulty higher than the ## specified `ttd`. @@ -1429,24 +1767,28 @@ proc findTerminalBlock(provider: Web3DataProviderRef, proc next(x: BlockObject): Future[BlockObject] {.async.} = ## Returns the next block that's `step` steps away. - let key = uint64(max(int64(x.number) + step, 1)) + let key = uint64(max(int64(x.number) + step, 0)) # Check if present in cache. if key in cache: return cache[key] # Not cached, fetch. - let value = awaitWithRetries provider.getBlockByNumber(key) + let value = awaitOrRaiseOnTimeout(rpcClient.getBlockByNumber(key), + web3RequestsTimeout) cache[key] = value return value # Block A follows, B leads. var - a = awaitWithRetries( - provider.web3.provider.eth_getBlockByNumber("latest", false)) + a = awaitOrRaiseOnTimeout(rpcClient.eth_getBlockByNumber("latest", false), + web3RequestsTimeout) b = await next(a) + if a.number.uint64 == 0 and a.totalDifficulty >= ttd: + return a + while true: - let one = a.totalDifficulty > ttd - let two = b.totalDifficulty > ttd + let one = a.totalDifficulty >= ttd + let two = b.totalDifficulty >= ttd if one != two: step = step div -2i64 if step == 0: @@ -1461,30 +1803,110 @@ proc findTerminalBlock(provider: Web3DataProviderRef, # This is unreachable. doAssert(false) -proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} = - if m.state == Started: +func hasProperlyConfiguredConnection(m: ELManager): bool = + for connection in m.elConnections: + if connection.etcStatus == EtcStatus.match: + return true + + return false + +proc shouldExchangeTransitionConfiguration*(m: ELManager): bool = + # We start exchanging the configuration roughly two weeks before the hard-fork + m.currentEpoch + 14 * 256 >= m.cfg.BELLATRIX_FORK_EPOCH + +proc startExchangeTransitionConfigurationLoop(m: ELManager) {.async.} = + debug "Starting exchange transition configuration loop" + + if m.shouldExchangeTransitionConfiguration and not m.hasProperlyConfiguredConnection: + await m.exchangeTransitionConfiguration() + if not m.hasProperlyConfiguredConnection: + fatal "The Bellatrix hard fork requires the beacon node to be connected to a properly configured Engine API end-point. " & + "See https://nimbus.guide/merge.html for more details." + quit 1 + + while true: + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#engine_exchangetransitionconfigurationv1 + await sleepAsync(60.seconds) + debug "Exchange transition configuration tick" + if m.shouldExchangeTransitionConfiguration: + traceAsyncErrors m.exchangeTransitionConfiguration() + +proc waitELToSyncDeposits(connection: ELConnection, + minimalRequiredBlock: BlockHash) {.async.} = + var rpcClient = await connection.connectedRpcClient() + + if connection.depositContractSyncStatus == DepositContractSyncStatus.synced: return - let isFirstRun = m.state == Initialized - let needsReset = m.state in {Failed, ReadyToRestartToPrimary} + var attempt = 0 - m.state = Started + while true: + try: + discard awaitOrRaiseOnTimeout(rpcClient.getBlockByHash(minimalRequiredBlock), + web3RequestsTimeout) + connection.depositContractSyncStatus = DepositContractSyncStatus.synced + return + except CancelledError as err: + trace "waitELToSyncDepositContract cancelled", + url = connection.engineUrl.url + raise err + except CatchableError as err: + connection.depositContractSyncStatus = DepositContractSyncStatus.notSynced + if attempt == 0: + warn "Failed to obtain the most recent known block from the execution " & + "layer node (the node is probably not synced)", + url = connection.engineUrl.url, + blk = minimalRequiredBlock, + err = err.msg + elif attempt mod 60 == 0: + # This warning will be produced every 30 minutes + warn "Still failing to obtain the most recent known block from the " & + "execution layer node (the node is probably still not synced)", + url = connection.engineUrl.url, + blk = minimalRequiredBlock, + err = err.msg + await sleepAsync(seconds(30)) + rpcClient = await connection.connectedRpcClient() + +func mostRecentKnownBlock(m: ELManager): BlockHash = + if m.eth1Chain.finalizedDepositsMerkleizer.getChunkCount() > 0: + m.eth1Chain.finalizedBlockHash.asBlockHash + else: + m.depositContractBlockHash - if delayBeforeStart != ZeroDuration: - await sleepAsync(delayBeforeStart) +proc networkHasDepositContract(m: ELManager): bool = + not m.cfg.DEPOSIT_CONTRACT_ADDRESS.isDefaultValue + +proc selectConnectionForChainSyncing(m: ELManager): Future[ELConnection] {.async.} = + let connectionsFuts = mapIt( + m.elConnections, + if m.networkHasDepositContract: + FutureBase waitELToSyncDeposits(it, m.mostRecentKnownBlock) + else: + FutureBase connectedRpcClient(it)) - # If the monitor died with an exception, the web3 provider may be in - # an arbitary state, so we better reset it (not doing this has resulted - # in resource leaks historically). - if not m.dataProvider.isNil and needsReset: - # We introduce a local var to eliminate the risk of scheduling two - # competing calls to `close` below. - let provider = m.dataProvider - m.dataProvider = nil - await provider.close() + let firstConnected = await firstCompletedFuture(connectionsFuts) - await m.ensureDataProvider() - doAssert m.dataProvider != nil, "close not called concurrently" + # TODO: Ideally, the cancellation will be handled automatically + # by a helper like `firstCompletedFuture` + for future in connectionsFuts: + if future != firstConnected: + future.cancel() + + return m.elConnections[find(connectionsFuts, firstConnected)] + +proc syncEth1Chain(m: ELManager, connection: ELConnection) {.async.} = + let rpcClient = await connection.connectedRpcClient() + + let + shouldProcessDeposits = not ( + m.depositContractAddress.isZeroMemory or + m.eth1Chain.finalizedBlockHash.data.isZeroMemory) + + trace "Starting syncEth1Chain", shouldProcessDeposits + + logScope: + url = connection.engineUrl.url # We might need to reset the chain if the new provider disagrees # with the previous one regarding the history of the chain or if @@ -1496,100 +1918,46 @@ proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} = # when they don't indicate any errors in the response. When this # happens, we are usually able to download the data successfully # on the second attempt. - if m.latestEth1Block.isSome and m.depositsChain.blocks.len > 0: - let needsReset = m.depositsChain.hasConsensusViolation or (block: + # + # TODO + # Perhaps the above problem was manifesting only with the obsolete + # JSON-RPC data providers, which can no longer be used with Nimbus. + if m.eth1Chain.blocks.len > 0: + let needsReset = m.eth1Chain.hasConsensusViolation or (block: let - lastKnownBlock = m.depositsChain.blocks.peekLast - matchingBlockAtNewProvider = awaitWithRetries( - m.dataProvider.getBlockByNumber lastKnownBlock.number) + lastKnownBlock = m.eth1Chain.blocks.peekLast + matchingBlockAtNewProvider = awaitOrRaiseOnTimeout( + rpcClient.getBlockByNumber(lastKnownBlock.number), + web3RequestsTimeout) lastKnownBlock.hash.asBlockHash != matchingBlockAtNewProvider.hash) if needsReset: - m.depositsChain.clear() - m.latestEth1Block = none(FullBlockId) - - template web3Url: string = m.dataProvider.url - - if web3Url != m.web3Urls[0]: - asyncSpawn m.detectPrimaryProviderComingOnline() - - info "Starting Eth1 deposit contract monitoring", - contract = $m.depositContractAddress - - if isFirstRun and m.eth1Network.isSome: - try: - let - providerChain = - awaitWithRetries m.dataProvider.web3.provider.eth_chainId() - - # https://eips.ethereum.org/EIPS/eip-155#list-of-chain-ids - expectedChain = case m.eth1Network.get - of mainnet: 1.Quantity - of ropsten: 3.Quantity - of rinkeby: 4.Quantity - of goerli: 5.Quantity - of sepolia: 11155111.Quantity # https://chainid.network/ - if expectedChain != providerChain: - fatal "The specified Web3 provider serves data for a different chain", - expectedChain = distinctBase(expectedChain), - providerChain = distinctBase(providerChain) - quit 1 - except CatchableError as exc: - # Typically because it's not synced through EIP-155, assuming this Web3 - # endpoint has been otherwise working. - debug "startEth1Syncing: eth_chainId failed: ", - error = exc.msg - - var mustUsePolling = m.forcePolling or - web3Url.startsWith("http://") or - web3Url.startsWith("https://") - - if not mustUsePolling: - proc newBlockHeadersHandler(blk: Eth1BlockHeader) - {.raises: [Defect], gcsafe.} = - try: - if m.isNewLastBlock(blk): - eth1_latest_head.set blk.number.toGaugeValue - m.latestEth1Block = some FullBlockId.init(blk) - m.eth1Progress.fire() - except Exception: - # TODO Investigate why this exception is being raised - raiseAssert "AsyncEvent.fire should not raise exceptions" - - proc subscriptionErrorHandler(err: CatchableError) - {.raises: [Defect], gcsafe.} = - warn "Failed to subscribe for block headers. Switching to polling", - err = err.msg - mustUsePolling = true - - await m.dataProvider.onBlockHeaders(newBlockHeadersHandler, - subscriptionErrorHandler) - - let shouldProcessDeposits = not ( - m.depositContractAddress.isZeroMemory or - m.depositsChain.finalizedBlockHash.data.isZeroMemory) + trace "Resetting the Eth1 chain", + hasConsensusViolation = m.eth1Chain.hasConsensusViolation + m.eth1Chain.clear() var eth1SyncedTo: Eth1BlockNumber if shouldProcessDeposits: - if m.depositsChain.blocks.len == 0: - let startBlock = awaitWithRetries( - m.dataProvider.getBlockByHash( - m.depositsChain.finalizedBlockHash.asBlockHash)) - - m.depositsChain.addBlock Eth1Block( - hash: m.depositsChain.finalizedBlockHash, + if m.eth1Chain.blocks.len == 0: + let finalizedBlockHash = m.eth1Chain.finalizedBlockHash.asBlockHash + let startBlock = + awaitOrRaiseOnTimeout(rpcClient.getBlockByHash(finalizedBlockHash), + web3RequestsTimeout) + + m.eth1Chain.addBlock Eth1Block( + hash: m.eth1Chain.finalizedBlockHash, number: Eth1BlockNumber startBlock.number, timestamp: Eth1BlockTimestamp startBlock.timestamp) - eth1SyncedTo = Eth1BlockNumber m.depositsChain.blocks[^1].number + eth1SyncedTo = m.eth1Chain.blocks[^1].number eth1_synced_head.set eth1SyncedTo.toGaugeValue eth1_finalized_head.set eth1SyncedTo.toGaugeValue eth1_finalized_deposits.set( - m.depositsChain.finalizedDepositsMerkleizer.getChunkCount.toGaugeValue) + m.eth1Chain.finalizedDepositsMerkleizer.getChunkCount.toGaugeValue) - debug "Starting Eth1 syncing", `from` = shortLog(m.depositsChain.blocks[^1]) + debug "Starting Eth1 syncing", `from` = shortLog(m.eth1Chain.blocks[^1]) let shouldCheckForMergeTransition = block: const FAR_FUTURE_TOTAL_DIFFICULTY = @@ -1597,8 +1965,9 @@ proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} = (not m.ttdReachedField) and (m.cfg.TERMINAL_TOTAL_DIFFICULTY != FAR_FUTURE_TOTAL_DIFFICULTY) - var didPollOnce = false while true: + debug "syncEth1Chain tick" + if bnStatus == BeaconNodeStatus.Stopping: when hasGenesisDetection: if not m.genesisStateFut.isNil: @@ -1607,95 +1976,112 @@ proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} = await m.stop() return - if m.depositsChain.hasConsensusViolation: + if m.eth1Chain.hasConsensusViolation: raise newException(CorruptDataProvider, "Eth1 chain contradicts Eth2 consensus") - if m.state == ReadyToRestartToPrimary: - info "Primary web3 provider is back online. Restarting the Eth1 monitor" - m.startIdx = 0 - return - - let nextBlock = if mustUsePolling or not didPollOnce: - let blk = awaitWithRetries( - m.dataProvider.web3.provider.eth_getBlockByNumber(blockId("latest"), false)) - - # Same as when handling events, minus `m.eth1Progress` round trip - if m.isNewLastBlock(blk): - eth1_latest_head.set blk.number.toGaugeValue - m.latestEth1Block = some FullBlockId.init(blk) - elif mustUsePolling: - await sleepAsync(m.cfg.SECONDS_PER_ETH1_BLOCK.int.seconds) - continue - else: - doAssert not didPollOnce - - didPollOnce = true - blk - else: - awaitWithTimeout(m.eth1Progress.wait(), 5.minutes): - raise newException(CorruptDataProvider, "No eth1 chain progress for too long") - - m.eth1Progress.clear() + let latestBlock = try: + awaitOrRaiseOnTimeout( + rpcClient.eth_getBlockByNumber(blockId("latest"), false), + web3RequestsTimeout) + except CatchableError as err: + error "Failed to obtain the latest block from the EL", err = err.msg + raise err - doAssert m.latestEth1Block.isSome - awaitWithRetries m.dataProvider.getBlockByHash(m.latestEth1Block.get.hash) + if shouldCheckForMergeTransition and + m.currentEpoch >= m.cfg.BELLATRIX_FORK_EPOCH and + m.terminalBlockHash.isNone and + latestBlock.totalDifficulty >= m.cfg.TERMINAL_TOTAL_DIFFICULTY: - # TODO when a terminal block hash is configured in cfg.TERMINAL_BLOCK_HASH, - # we should try to fetch that block from the EL - this facility is not - # in use on any current network, but should be implemented for full - # compliance - if m.terminalBlockHash.isNone and shouldCheckForMergeTransition: - let terminalBlock = await findTerminalBlock(m.dataProvider, m.cfg.TERMINAL_TOTAL_DIFFICULTY) + info "syncEth1Chain: checking for merge terminal block", + currentEpoch = m.currentEpoch, + BELLATRIX_FORK_EPOCH = m.cfg.BELLATRIX_FORK_EPOCH, + totalDifficulty = $latestBlock.totalDifficulty, + ttd = $m.cfg.TERMINAL_TOTAL_DIFFICULTY, + terminalBlockHash = m.terminalBlockHash + + # TODO when a terminal block hash is configured in cfg.TERMINAL_BLOCK_HASH, + # we should try to fetch that block from the EL - this facility is not + # in use on any current network, but should be implemented for full + # compliance + let terminalBlock = await findTerminalBlock( + rpcClient, + m.cfg.TERMINAL_TOTAL_DIFFICULTY) m.terminalBlockHash = some(terminalBlock.hash) m.ttdReachedField = true - - debug "startEth1Syncing: found merge terminal block", + debug "syncEth1Chain: found merge terminal block", currentEpoch = m.currentEpoch, BELLATRIX_FORK_EPOCH = m.cfg.BELLATRIX_FORK_EPOCH, - totalDifficulty = $nextBlock.totalDifficulty, ttd = $m.cfg.TERMINAL_TOTAL_DIFFICULTY, - terminalBlockHash = m.terminalBlockHash, - candidateBlockNumber = distinctBase(terminalBlock.number) - - if shouldProcessDeposits: - if m.latestEth1BlockNumber <= m.cfg.ETH1_FOLLOW_DISTANCE: - continue - - let targetBlock = m.latestEth1BlockNumber - m.cfg.ETH1_FOLLOW_DISTANCE - if targetBlock <= eth1SyncedTo: - continue - - let earliestBlockOfInterest = m.earliestBlockOfInterest() - await m.syncBlockRange(eth1SyncedTo + 1, - targetBlock, - earliestBlockOfInterest) - eth1SyncedTo = targetBlock - eth1_synced_head.set eth1SyncedTo.toGaugeValue - -proc start(m: Eth1Monitor, delayBeforeStart: Duration) {.gcsafe.} = - if m.runFut.isNil: - let runFut = m.startEth1Syncing(delayBeforeStart) - m.runFut = runFut - runFut.addCallback do (p: pointer) {.gcsafe.}: - if runFut.failed: - if runFut == m.runFut: - warn "Eth1 chain monitoring failure, restarting", err = runFut.error.msg - m.state = Failed - - safeCancel m.runFut - m.start(5.seconds) - -proc start*(m: Eth1Monitor) = - m.start(0.seconds) + terminalBlockDifficulty = $terminalBlock.totalDifficulty, + terminalBlockHash = m.terminalBlockHash.get, + terminalBlockNumber = distinctBase(terminalBlock.number) + + m.syncTargetBlock = some( + if Eth1BlockNumber(latestBlock.number) > m.cfg.ETH1_FOLLOW_DISTANCE: + Eth1BlockNumber(latestBlock.number) - m.cfg.ETH1_FOLLOW_DISTANCE + else: + Eth1BlockNumber(0)) + if m.syncTargetBlock.get <= eth1SyncedTo: + # The chain reorged to a lower height. + # It's relatively safe to ignore that. + await sleepAsync(m.cfg.SECONDS_PER_ETH1_BLOCK.int.seconds) + continue + + eth1_latest_head.set latestBlock.number.toGaugeValue + + if shouldProcessDeposits and + latestBlock.number.uint64 > m.cfg.ETH1_FOLLOW_DISTANCE: + let depositContract = connection.web3.get.contractSender( + DepositContract, m.depositContractAddress) + await m.syncBlockRange(rpcClient, + depositContract, + eth1SyncedTo + 1, + m.syncTargetBlock.get, + m.earliestBlockOfInterest(Eth1BlockNumber latestBlock.number)) + + eth1SyncedTo = m.syncTargetBlock.get + eth1_synced_head.set eth1SyncedTo.toGaugeValue + +proc startChainSyncingLoop(m: ELManager) {.async.} = + info "Starting execution layer deposits syncing", + contract = $m.depositContractAddress + + while true: + let connection = try: await m.selectConnectionForChainSyncing() + except CancelledError as err: + raise err + except CatchableError as err: + error "No suitable el connection for deposit syncing" + await sleepAsync(chronos.seconds(30)) + continue + try: + await syncEth1Chain(m, connection) + except CatchableError as err: + # TODO: edge triggered logging of failures + error "Error while syncing deposits", + url = connection.engineUrl.url, err = err.msg + +proc start*(m: ELManager) {.gcsafe.} = + if m.elConnections.len == 0: + return + + ## Calling `ELManager.start()` on an already started ELManager is a noop + if m.chainSyncingLoopFut.isNil: + m.chainSyncingLoopFut = + m.startChainSyncingLoop() + + if m.hasJwtSecret and m.exchangeTransitionConfigurationLoopFut.isNil: + m.exchangeTransitionConfigurationLoopFut = + m.startExchangeTransitionConfigurationLoop() proc getEth1BlockHash*( - url: string, blockId: RtBlockIdentifier, jwtSecret: Option[seq[byte]]): + url: EngineApiUrl, blockId: RtBlockIdentifier, jwtSecret: Option[seq[byte]]): Future[BlockHash] {.async.} = - let web3 = awaitOrRaiseOnTimeout(newWeb3(url, getJsonRpcRequestHeaders(jwtSecret)), - 10.seconds) + let web3 = awaitOrRaiseOnTimeout(url.newWeb3(), 10.seconds) try: - let blk = awaitWithRetries( - web3.provider.eth_getBlockByNumber(blockId, false)) + let blk = awaitOrRaiseOnTimeout( + web3.provider.eth_getBlockByNumber(blockId, false), + web3RequestsTimeout) return blk.hash finally: await web3.close() @@ -1725,7 +2111,7 @@ proc testWeb3Provider*(web3Url: Uri, stdout.flushFile() var res: typeof(read action) try: - res = awaitWithRetries action + res = awaitOrRaiseOnTimeout(action, web3RequestsTimeout) stdout.write "\r" & actionDesc & ": " & $res except CatchableError as err: stdout.write "\r" & actionDesc & ": Error(" & err.msg & ")" @@ -1757,13 +2143,14 @@ proc testWeb3Provider*(web3Url: Uri, ns.get_deposit_root.call(blockNumber = latestBlock.number.uint64) when hasGenesisDetection: - proc loadPersistedDeposits*(monitor: Eth1Monitor) = - for i in 0 ..< monitor.depositsChain.db.genesisDeposits.len: - monitor.produceDerivedData monitor.depositsChain.db.genesisDeposits.get(i) + proc loadPersistedDeposits*(monitor: ELManager) = + for i in 0 ..< monitor.db.genesisDeposits.len: + monitor.produceDerivedData monitor.db.genesisDeposits.get(i) - proc findGenesisBlockInRange(m: Eth1Monitor, startBlock, endBlock: Eth1Block): + proc findGenesisBlockInRange(m: ELManager, + rpcClient: RpcClient, + startBlock, endBlock: Eth1Block): Future[Eth1Block] {.async.} = - doAssert m.dataProvider != nil, "close not called concurrently" doAssert startBlock.timestamp != 0 and not m.isAfterMinGenesisTime(startBlock) doAssert endBlock.timestamp != 0 and m.isAfterMinGenesisTime(endBlock) doAssert m.hasEnoughValidators(startBlock) @@ -1782,8 +2169,9 @@ when hasGenesisDetection: float(endBlock.number - startBlock.number) blocksToJump = max(float(MIN_GENESIS_TIME - startBlockTime) / secondsPerBlock, 1.0) candidateNumber = min(endBlock.number - 1, startBlock.number + blocksToJump.uint64) - candidateBlock = awaitWithRetries( - m.dataProvider.getBlockByNumber(candidateNumber)) + candidateBlock = awaitOrRaiseOnTimeout( + rpcClient.getBlockByNumber(candidateNumber), + web3RequestsTimeout) var candidateAsEth1Block = Eth1Block(hash: candidateBlock.hash.asEth2Digest, number: candidateBlock.number.uint64, @@ -1806,7 +2194,7 @@ when hasGenesisDetection: return endBlock - proc waitGenesis*(m: Eth1Monitor): Future[GenesisStateRef] {.async.} = + proc waitGenesis*(m: ELManager): Future[GenesisStateRef] {.async.} = if m.genesisState.isNil: m.start() diff --git a/beacon_chain/future_combinators.nim b/beacon_chain/future_combinators.nim new file mode 100644 index 0000000000..e109134db7 --- /dev/null +++ b/beacon_chain/future_combinators.nim @@ -0,0 +1,98 @@ +# TODO: These should be added to the Chronos's asyncfutures2 module +# See https://github.com/status-im/nim-chronos/pull/339 + +import + chronos + +proc firstCompletedFuture*(futs: varargs[FutureBase]): Future[FutureBase] = + ## Returns a future which will complete and return completed FutureBase, + ## when one of the futures in ``futs`` is completed. + ## + ## If the argument is empty, the returned future FAILS immediately. + ## + ## On success, the returned Future will hold the completed FutureBase. + ## + ## If all futures fail naturally or due to cancellation, the returned + ## future will be failed as well. + ## + ## On cancellation, futures in ``futs`` WILL NOT BE cancelled. + + var retFuture = newFuture[FutureBase]("chronos.firstCompletedFuture()") + + # Because we can't capture varargs[T] in closures we need to create copy. + var nfuts = @futs + + # If one of the Future[T] already finished we return it as result + for fut in nfuts: + if fut.completed(): + retFuture.complete(fut) + return retFuture + + if len(nfuts) == 0: + retFuture.fail(newException(ValueError, "Empty Future[T] list")) + return + + var failedFutures = 0 + + var cb: proc(udata: pointer) {.gcsafe, raises: [Defect].} + cb = proc(udata: pointer) {.gcsafe, raises: [Defect].} = + if not(retFuture.finished()): + var res: FutureBase + var rfut = cast[FutureBase](udata) + if rfut.completed: + for i in 0..= BeaconBlockFork.Bellatrix: receivedBlock.message.body.execution_payload.block_hash else: @@ -200,7 +200,7 @@ proc expectValidForkchoiceUpdated( # previous `forkchoiceUpdated` had already marked it as valid. However, if # it's not the block that was received, don't info/warn either way given a # relative lack of immediate evidence. - if receivedExecutionBlockHash != headBlockHash: + if receivedExecutionBlockRoot != headBlockRoot: return case payloadExecutionStatus @@ -209,13 +209,13 @@ proc expectValidForkchoiceUpdated( discard of PayloadExecutionStatus.accepted, PayloadExecutionStatus.syncing: info "execution payload forkChoiceUpdated status ACCEPTED/SYNCING, but was previously VALID", - payloadExecutionStatus = $payloadExecutionStatus, headBlockHash, - safeBlockHash, finalizedBlockHash, + payloadExecutionStatus = $payloadExecutionStatus, headBlockRoot, + safeBlockRoot, finalizedBlockRoot, receivedBlock = shortLog(receivedBlock) of PayloadExecutionStatus.invalid, PayloadExecutionStatus.invalid_block_hash: warn "execution payload forkChoiceUpdated status INVALID, but was previously VALID", - payloadExecutionStatus = $payloadExecutionStatus, headBlockHash, - safeBlockHash, finalizedBlockHash, + payloadExecutionStatus = $payloadExecutionStatus, headBlockRoot, + safeBlockRoot, finalizedBlockRoot, receivedBlock = shortLog(receivedBlock) from ../consensus_object_pools/attestation_pool import @@ -233,12 +233,15 @@ from ../spec/datatypes/bellatrix import ExecutionPayload, SignedBeaconBlock from ../spec/datatypes/capella import ExecutionPayload, SignedBeaconBlock, asTrusted, shortLog +# TODO investigate why this seems to allow compilation even though it doesn't +# directly address eip4844.ExecutionPayload when complaint was that it didn't +# know about "eip4844" +from ../spec/datatypes/eip4844 import SignedBeaconBlock, asTrusted, shortLog + proc newExecutionPayload*( - eth1Monitor: Eth1Monitor, - executionPayload: bellatrix.ExecutionPayload | capella.ExecutionPayload): + elManager: ELManager, + executionPayload: bellatrix.ExecutionPayload | capella.ExecutionPayload | eip4844.ExecutionPayload): Future[Opt[PayloadExecutionStatus]] {.async.} = - if eth1Monitor.isNil: - return Opt.none PayloadExecutionStatus debug "newPayload: inserting block into execution engine", parentHash = executionPayload.parent_hash, @@ -254,23 +257,9 @@ proc newExecutionPayload*( baseFeePerGas = $executionPayload.base_fee_per_gas, numTransactions = executionPayload.transactions.len - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#request - const NEWPAYLOAD_TIMEOUT = 8.seconds - try: - let - payloadResponse = - awaitWithTimeout( - eth1Monitor.newPayload( - executionPayload.asEngineExecutionPayload), - NEWPAYLOAD_TIMEOUT): - info "newPayload: newPayload timed out" - return Opt.none PayloadExecutionStatus - - # Placeholder for type system - PayloadStatusV1(status: PayloadExecutionStatus.syncing) - - payloadStatus = payloadResponse.status + let payloadStatus = await elManager.sendNewPayload( + executionPayload.asEngineExecutionPayload) debug "newPayload: succeeded", parentHash = executionPayload.parent_hash, @@ -287,23 +276,12 @@ proc newExecutionPayload*( blockNumber = executionPayload.block_number return Opt.none PayloadExecutionStatus -# TODO investigate why this seems to allow compilation even though it doesn't -# directly address eip4844.ExecutionPayload when complaint was that it didn't -# know about "eip4844" -from ../spec/datatypes/eip4844 import SignedBeaconBlock, asTrusted, shortLog - -proc newExecutionPayload*( - eth1Monitor: Eth1Monitor, - executionPayload: eip4844.ExecutionPayload): - Future[Opt[PayloadExecutionStatus]] {.async.} = - debugRaiseAssert $eip4844ImplementationMissing & ": block_processor.nim:newExecutionPayload" - proc getExecutionValidity( - eth1Monitor: Eth1Monitor, - blck: bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock): + elManager: ELManager, + blck: bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock | eip4844.SignedBeaconBlock): Future[NewPayloadStatus] {.async.} = # Eth1 syncing is asynchronous from this - # TODO self.consensusManager.eth1Monitor.ttdReached + # TODO self.consensusManager.elManager.ttdReached # should gate this when it works more reliably # TODO detect have-TTD-but-not-is_execution_block case, and where # execution payload was non-zero when TTD detection more reliable @@ -311,15 +289,9 @@ proc getExecutionValidity( if not blck.message.is_execution_block: return NewPayloadStatus.valid # vacuously - if eth1Monitor.isNil: - return NewPayloadStatus.noResponse - try: - # Minimize window for Eth1 monitor to shut down connection - await eth1Monitor.ensureDataProvider() - let executionPayloadStatus = await newExecutionPayload( - eth1Monitor, blck.message.body.execution_payload) + elManager, blck.message.body.execution_payload) if executionPayloadStatus.isNone: return NewPayloadStatus.noResponse @@ -337,12 +309,6 @@ proc getExecutionValidity( error "getExecutionValidity: newPayload failed", err = err.msg return NewPayloadStatus.noResponse -proc getExecutionValidity( - eth1Monitor: Eth1Monitor, - blck: eip4844.SignedBeaconBlock): - Future[NewPayloadStatus] {.async.} = - debugRaiseAssert $eip4844ImplementationMissing & ": block_processor.nim:getExecutionValidity" - proc storeBlock*( self: ref BlockProcessor, src: MsgSource, wallTime: BeaconTime, signedBlock: ForkySignedBeaconBlock, queueTick: Moment = Moment.now(), @@ -359,7 +325,7 @@ proc storeBlock*( dag = self.consensusManager.dag payloadStatus = when typeof(signedBlock).toFork() >= BeaconBlockFork.Bellatrix: - await self.consensusManager.eth1Monitor.getExecutionValidity(signedBlock) + await self.consensusManager.elManager.getExecutionValidity(signedBlock) else: NewPayloadStatus.valid # vacuously payloadValid = payloadStatus == NewPayloadStatus.valid @@ -470,7 +436,7 @@ proc storeBlock*( wallSlot.start_beacon_time) if newHead.isOk: - template eth1Monitor(): auto = self.consensusManager.eth1Monitor + template elManager(): auto = self.consensusManager.elManager if self.consensusManager[].shouldSyncOptimistically(wallSlot): # Optimistic head is far in the future; report it as head block to EL. @@ -488,10 +454,10 @@ proc storeBlock*( # - "Beacon chain gapped" from DAG head to optimistic head, # - followed by "Beacon chain reorged" from optimistic head back to DAG. self.consensusManager[].updateHead(newHead.get.blck) - asyncSpawn eth1Monitor.runForkchoiceUpdatedDiscardResult( - headBlockHash = self.consensusManager[].optimisticExecutionPayloadHash, - safeBlockHash = newHead.get.safeExecutionPayloadHash, - finalizedBlockHash = newHead.get.finalizedExecutionPayloadHash) + asyncSpawn elManager.forkchoiceUpdatedNoResult( + headBlock = self.consensusManager[].optimisticExecutionPayloadHash, + safeBlock = newHead.get.safeExecutionPayloadHash, + finalizedBlock = newHead.get.finalizedExecutionPayloadHash) else: let headExecutionPayloadHash = @@ -508,10 +474,10 @@ proc storeBlock*( if self.consensusManager.checkNextProposer(wallSlot).isNone: # No attached validator is next proposer, so use non-proposal fcU - asyncSpawn eth1Monitor.expectValidForkchoiceUpdated( - headBlockHash = headExecutionPayloadHash, - safeBlockHash = newHead.get.safeExecutionPayloadHash, - finalizedBlockHash = newHead.get.finalizedExecutionPayloadHash, + asyncSpawn elManager.expectValidForkchoiceUpdated( + headBlockRoot = headExecutionPayloadHash, + safeBlockRoot = newHead.get.safeExecutionPayloadHash, + finalizedBlockRoot = newHead.get.finalizedExecutionPayloadHash, receivedBlock = signedBlock) else: # Some attached validator is next proposer, so prepare payload. As diff --git a/beacon_chain/networking/network_metadata.nim b/beacon_chain/networking/network_metadata.nim index 60119a8c66..fefcc27bb1 100644 --- a/beacon_chain/networking/network_metadata.nim +++ b/beacon_chain/networking/network_metadata.nim @@ -48,7 +48,7 @@ type # branch is not active and thus it will override the first variable # in this branch. dummy: string - # If the eth1Network is specified, the Eth1Monitor will perform some + # If the eth1Network is specified, the ELManager will perform some # additional checks to ensure we are connecting to a web3 provider # serving data for the same network. The value can be set to `None` # for custom networks and testing purposes. diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index bc01629f74..7ed0eaa8f3 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -12,7 +12,7 @@ else: import std/[os, random, sequtils, terminal, times], - chronos, chronicles, chronicles/chronos_tools, + chronos, chronicles, metrics, metrics/chronos_httpserver, stew/[byteutils, io2], eth/p2p/discoveryv5/[enr, random2], @@ -248,12 +248,12 @@ proc initFullNode( # This `nimcall` functions helps for keeping track of what # needs to be captured by the onFinalization closure. eventBus: EventBus, - eth1Monitor: Eth1Monitor): OnFinalizedCallback {.nimcall.} = - static: doAssert (eth1Monitor is ref) + elManager: ELManager): OnFinalizedCallback {.nimcall.} = + static: doAssert (elManager is ref) return proc(dag: ChainDAGRef, data: FinalizationInfoObject) = - if eth1Monitor != nil: + if elManager != nil: let finalizedEpochRef = dag.getFinalizedEpochRef() - discard trackFinalizedState(eth1Monitor, + discard trackFinalizedState(elManager, finalizedEpochRef.eth1_data, finalizedEpochRef.eth1_deposit_index) node.updateLightClientFromDag() @@ -293,7 +293,7 @@ proc initFullNode( exitPool = newClone( ExitPool.init(dag, attestationPool, onVoluntaryExitAdded)) consensusManager = ConsensusManager.new( - dag, attestationPool, quarantine, node.eth1Monitor, + dag, attestationPool, quarantine, node.elManager, ActionTracker.init(rng, config.subscribeAllSubnets), node.dynamicFeeRecipientsStore, config.validatorsDir, config.defaultFeeRecipient) @@ -341,7 +341,7 @@ proc initFullNode( router.onSyncCommitteeMessage = scheduleSendingLightClientUpdates - dag.setFinalizationCb makeOnFinalizationCb(node.eventBus, node.eth1Monitor) + dag.setFinalizationCb makeOnFinalizationCb(node.eventBus, node.elManager) dag.setBlockCb(onBlockAdded) dag.setHeadCb(onHeadChanged) dag.setReorgCb(onChainReorg) @@ -446,13 +446,12 @@ proc init*(T: type BeaconNode, else: nil - let optJwtSecret = rng[].loadJwtSecret(config, allowCreate = false) + let engineApiUrls = config.engineApiUrls - if config.web3Urls.len() == 0: + if engineApiUrls.len == 0: notice "Running without execution client - validator features disabled (see https://nimbus.guide/eth1.html)" - var eth1Monitor: Eth1Monitor - + var genesisDetectionElManager: ELManager let genesisState = if metadata.genesisData.len > 0: try: @@ -470,27 +469,24 @@ proc init*(T: type BeaconNode, # This is a fresh start without a known genesis state # (most likely, it hasn't arrived yet). We'll try to # obtain a genesis through the Eth1 deposits monitor: - if config.web3Urls.len == 0: + if engineApiUrls.len == 0: fatal "Web3 URL not specified" quit 1 # TODO Could move this to a separate "GenesisMonitor" process or task # that would do only this - see Paul's proposal for this. - let eth1Monitor = Eth1Monitor.init( + genesisDetectionElManager = ELManager.new( cfg, metadata.depositContractBlock, metadata.depositContractBlockHash, db, nil, - config.web3Urls, - eth1Network, - config.web3ForcePolling, - optJwtSecret, - ttdReached = false) + engineApiUrls, + eth1Network) - eth1Monitor.loadPersistedDeposits() + elManager.loadPersistedDeposits() - let phase0Genesis = waitFor eth1Monitor.waitGenesis() + let phase0Genesis = waitFor elManager.waitGenesis() genesisState = (ref ForkedHashedBeaconState)( kind: BeaconStateFork.Phase0, phase0Data: @@ -572,17 +568,17 @@ proc init*(T: type BeaconNode, dag.checkWeakSubjectivityCheckpoint( config.weakSubjectivityCheckpoint.get, beaconClock) - if eth1Monitor.isNil and config.web3Urls.len > 0: - eth1Monitor = Eth1Monitor.init( + let elManager = if genesisDetectionElManager != nil: + genesisDetectionElManager + else: + ELManager.new( cfg, metadata.depositContractBlock, metadata.depositContractBlockHash, db, getBeaconTime, - config.web3Urls, + engineApiUrls, eth1Network, - config.web3ForcePolling, - optJwtSecret, ttdReached = not dag.loadExecutionBlockRoot(dag.finalizedHead.blck).isZero) if config.rpcEnabled.isSome: @@ -667,7 +663,7 @@ proc init*(T: type BeaconNode, db: db, config: config, attachedValidators: validatorPool, - eth1Monitor: eth1Monitor, + elManager: elManager, payloadBuilderRestClient: payloadBuilderRestClient, restServer: restServer, keymanagerHost: keymanagerHost, @@ -678,13 +674,6 @@ proc init*(T: type BeaconNode, beaconClock: beaconClock, validatorMonitor: validatorMonitor, stateTtlCache: stateTtlCache, - nextExchangeTransitionConfTime: - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#specification-3 - # Consensus Layer client software **SHOULD** poll this endpoint every - # 60 seconds. - # Delay first call by that time to allow for EL syncing to begin; it can - # otherwise generate an EL warning by claiming a zero merge block. - Moment.now + chronos.seconds(60), dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init())) node.initLightClient( @@ -1328,17 +1317,6 @@ proc onSecond(node: BeaconNode, time: Moment) = # Nim GC metrics (for the main thread) updateThreadMetrics() - if time >= node.nextExchangeTransitionConfTime and not node.eth1Monitor.isNil: - # The EL client SHOULD log a warning when not receiving an exchange message - # at least once every 120 seconds. If we only attempt to exchange every 60 - # seconds, the warning would be triggered if a single message is missed. - # To accommodate for that, exchange slightly more frequently. - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#engine_exchangetransitionconfigurationv1 - node.nextExchangeTransitionConfTime = time + chronos.seconds(45) - - if node.currentSlot.epoch >= node.dag.cfg.BELLATRIX_FORK_EPOCH: - traceAsyncErrors node.eth1Monitor.exchangeTransitionConfiguration() - if node.config.stopAtSyncedEpoch != 0 and node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch: notice "Shutting down after having reached the target synced epoch" @@ -1638,9 +1616,7 @@ proc start*(node: BeaconNode) {.raises: [Defect, CatchableError].} = waitFor node.initializeNetworking() - if node.eth1Monitor != nil: - node.eth1Monitor.start() - + node.elManager.start() node.run() func formatGwei(amount: uint64): string = @@ -1851,11 +1827,15 @@ proc doCreateTestnet*(config: BeaconNodeConf, rng: var HmacDrbgContext) {.raises deposits.add(launchPadDeposits[i] as DepositData) let - startTime = uint64(times.toUnix(times.getTime()) + config.genesisOffset) + startTime = if config.genesisTime.isSome: + config.genesisTime.get + else: + uint64(times.toUnix(times.getTime()) + config.genesisOffset.get(0)) outGenesis = config.outputGenesis.string - eth1Hash = if config.web3Urls.len == 0: eth1BlockHash + engineApiUrls = config.engineApiUrls + eth1Hash = if engineApiUrls.len == 0: eth1BlockHash else: (waitFor getEth1BlockHash( - config.web3Urls[0], blockId("latest"), + engineApiUrls[0], blockId("latest"), rng.loadJwtSecret(config, allowCreate = true))).asEth2Digest cfg = getRuntimeConfig(config.eth2Network) var diff --git a/beacon_chain/nimbus_light_client.nim b/beacon_chain/nimbus_light_client.nim index 54e514bc36..e140f81791 100644 --- a/beacon_chain/nimbus_light_client.nim +++ b/beacon_chain/nimbus_light_client.nim @@ -16,7 +16,6 @@ import ./spec/datatypes/[phase0, altair, bellatrix], "."/[filepath, light_client, light_client_db, nimbus_binary_common, version] -from ./consensus_object_pools/consensus_manager import runForkchoiceUpdated from ./gossip_processing/block_processor import newExecutionPayload from ./gossip_processing/eth2_processor import toValidationResult @@ -86,24 +85,20 @@ programMain: network = createEth2Node( rng, config, netKeys, cfg, forkDigests, getBeaconTime, genesis_validators_root) - - eth1Monitor = - if config.web3Urls.len > 0: - let res = Eth1Monitor.init( + engineApiUrls = config.engineApiUrls + elManager = + if engineApiUrls.len > 0: + ELManager.new( cfg, metadata.depositContractBlock, metadata.depositContractBlockHash, db = nil, getBeaconTime, - config.web3Urls, + engineApiUrls, metadata.eth1Network, - forcePolling = false, - rng[].loadJwtSecret(config, allowCreate = false), # TTD is not relevant for the light client, so it's safe # to assume that the TTD has been reached. ttdReached = true) - waitFor res.ensureDataProvider() - res else: nil @@ -117,17 +112,12 @@ programMain: if blck.message.is_execution_block: template payload(): auto = blck.message.body.execution_payload - if eth1Monitor != nil and not payload.block_hash.isZero: - await eth1Monitor.ensureDataProvider() - - # engine_newPayloadV1 - discard await eth1Monitor.newExecutionPayload(payload) - - # engine_forkchoiceUpdatedV1 - discard await eth1Monitor.runForkchoiceUpdated( - headBlockRoot = payload.block_hash, - safeBlockRoot = payload.block_hash, # stub value - finalizedBlockRoot = ZERO_HASH) + if elManager != nil and not payload.block_hash.isZero: + discard await elManager.newExecutionPayload(payload) + discard await elManager.forkchoiceUpdated( + headBlock = payload.block_hash, + safeBlock = payload.block_hash, # stub value + finalizedBlock = ZERO_HASH) else: discard optimisticProcessor = initOptimisticProcessor( getBeaconTime, optimisticHandler) @@ -213,7 +203,7 @@ programMain: func shouldSyncOptimistically(wallSlot: Slot): bool = # Check whether an EL is connected - if eth1Monitor == nil: + if elManager == nil: return false isSynced(wallSlot) @@ -312,16 +302,8 @@ programMain: nextSlot = wallSlot + 1 timeToNextSlot = nextSlot.start_beacon_time() - getBeaconTime() - var nextExchangeTransitionConfTime = Moment.now + chronos.seconds(60) proc onSecond(time: Moment) = let wallSlot = getBeaconTime().slotOrZero() - - # engine_exchangeTransitionConfigurationV1 - if time > nextExchangeTransitionConfTime and eth1Monitor != nil: - nextExchangeTransitionConfTime = time + chronos.seconds(45) - if wallSlot.epoch >= cfg.BELLATRIX_FORK_EPOCH: - traceAsyncErrors eth1Monitor.exchangeTransitionConfiguration() - if checkIfShouldStopAtEpoch(wallSlot, config.stopAtEpoch): quit(0) diff --git a/beacon_chain/rpc/rest_nimbus_api.nim b/beacon_chain/rpc/rest_nimbus_api.nim index 339caf109c..19aa7b7e68 100644 --- a/beacon_chain/rpc/rest_nimbus_api.nim +++ b/beacon_chain/rpc/rest_nimbus_api.nim @@ -227,11 +227,7 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) = router.api(MethodGet, "/nimbus/v1/eth1/chain") do ( ) -> RestApiResponse: - let res = - if not(isNil(node.eth1Monitor)): - mapIt(node.eth1Monitor.depositChainBlocks, it) - else: - @[] + let res = mapIt(node.elManager.eth1ChainBlocks, it) return RestApiResponse.jsonResponse(res) router.api(MethodGet, "/nimbus/v1/eth1/proposal_data") do ( diff --git a/beacon_chain/rpc/rest_validator_api.nim b/beacon_chain/rpc/rest_validator_api.nim index 3eec3f38ee..1604af8fd9 100644 --- a/beacon_chain/rpc/rest_validator_api.nim +++ b/beacon_chain/rpc/rest_validator_api.nim @@ -376,10 +376,12 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = let res = if qslot.epoch >= node.dag.cfg.CAPELLA_FORK_EPOCH: - await makeBeaconBlockForHeadAndSlot[capella.ExecutionPayload]( + await makeBeaconBlockForHeadAndSlot( + capella.ExecutionPayload, node, qrandao, proposer.get(), qgraffiti, qhead, qslot) else: - await makeBeaconBlockForHeadAndSlot[bellatrix.ExecutionPayload]( + await makeBeaconBlockForHeadAndSlot( + bellatrix.ExecutionPayload, node, qrandao, proposer.get(), qgraffiti, qhead, qslot) if res.isErr(): return RestApiResponse.jsonError(Http400, res.error()) @@ -482,8 +484,9 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) = bellatrixData: res.get())) else: # Pre-Bellatrix, this endpoint will return a BeaconBlock - let res = await makeBeaconBlockForHeadAndSlot[bellatrix.ExecutionPayload]( - node, qrandao, proposer.get(), qgraffiti, qhead, qslot) + let res = await makeBeaconBlockForHeadAndSlot( + bellatrix.ExecutionPayload, node, qrandao, + proposer.get(), qgraffiti, qhead, qslot) if res.isErr(): return RestApiResponse.jsonError(Http400, res.error()) return responsePlain(res.get()) diff --git a/beacon_chain/spec/datatypes/bellatrix.nim b/beacon_chain/spec/datatypes/bellatrix.nim index b6a7fe524f..718f72ea23 100644 --- a/beacon_chain/spec/datatypes/bellatrix.nim +++ b/beacon_chain/spec/datatypes/bellatrix.nim @@ -31,6 +31,9 @@ const # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#request-1 FORKCHOICEUPDATED_TIMEOUT* = 8.seconds + # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#request + NEWPAYLOAD_TIMEOUT* = 8.seconds + type # https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.2/specs/bellatrix/beacon-chain.md#custom-types Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION] diff --git a/beacon_chain/spec/engine_authentication.nim b/beacon_chain/spec/engine_authentication.nim index 0cb44c3bd3..58b119b7ab 100644 --- a/beacon_chain/spec/engine_authentication.nim +++ b/beacon_chain/spec/engine_authentication.nim @@ -6,7 +6,8 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - chronicles, + std/[base64, json, options, os, strutils], + chronicles, confutils/defs, bearssl/rand, nimcrypto/[hmac, utils], stew/[byteutils, results] @@ -24,6 +25,9 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} +const + JWT_SECRET_LEN = 32 + proc base64urlEncode(x: auto): string = # The only strings this gets are internally generated, and don't have # encoding quirks. @@ -59,15 +63,34 @@ proc getSignedToken*(key: openArray[byte], payload: string): string = proc getSignedIatToken*(key: openArray[byte], time: int64): string = getSignedToken(key, $getIatToken(time)) +proc parseJwtTokenValue*(input: string): Result[seq[byte], cstring] = + # Secret JWT key is parsed in constant time using nimcrypto: + # https://github.com/cheatfate/nimcrypto/pull/44 + let secret = utils.fromHex(input) + if secret.len == JWT_SECRET_LEN: + ok(secret) + else: + err("The JWT secret should be 256 bits and hex-encoded") + +proc loadJwtSecretFile*(jwtSecretFile: InputFile): Result[seq[byte], cstring] = + try: + let lines = readLines(string jwtSecretFile, 1) + if lines.len > 0: + parseJwtTokenValue(lines[0]) + else: + err("The JWT token file should not be empty") + except IOError: + err("couldn't open specified JWT secret file") + except ValueError: + err("invalid JWT hex string") + proc checkJwtSecret*( - rng: var HmacDrbgContext, dataDir: string, jwtSecret: Option[string]): + rng: var HmacDrbgContext, dataDir: string, jwtSecret: Option[InputFile]): Result[seq[byte], cstring] = # If such a parameter is given, but the file cannot be read, or does not # contain a hex-encoded key of 256 bits, the client should treat this as an # error: either abort the startup, or show error and continue without # exposing the authenticated port. - const SECRET_LEN = 32 - if jwtSecret.isNone: # If such a parameter is not given, the client SHOULD generate such a # token, valid for the duration of the execution, and store it the @@ -78,7 +101,7 @@ proc checkJwtSecret*( const jwtSecretFilename = "jwt.hex" let jwtSecretPath = dataDir / jwtSecretFilename - let newSecret = rng.generateBytes(SECRET_LEN) + let newSecret = rng.generateBytes(JWT_SECRET_LEN) try: writeFile(jwtSecretPath, newSecret.to0xHex()) except IOError as exc: @@ -89,20 +112,4 @@ proc checkJwtSecret*( err = exc.msg return ok(newSecret) - try: - # TODO replace with separate function - let lines = readLines(jwtSecret.get, 1) - if lines.len > 0: - # Secret JWT key is parsed in constant time using nimcrypto: - # https://github.com/cheatfate/nimcrypto/pull/44 - let secret = utils.fromHex(lines[0]) - if secret.len == SECRET_LEN: - ok(secret) - else: - err("JWT secret not 256 bits") - else: - err("no hex string found") - except IOError: - err("couldn't open specified JWT secret file") - except ValueError: - err("invalid JWT hex string") + loadJwtSecretFile(jwtSecret.get) diff --git a/beacon_chain/spec/state_transition_block.nim b/beacon_chain/spec/state_transition_block.nim index 43181deb3c..56a49a9742 100644 --- a/beacon_chain/spec/state_transition_block.nim +++ b/beacon_chain/spec/state_transition_block.nim @@ -697,7 +697,7 @@ func is_partially_withdrawable_validator( has_max_effective_balance and has_excess_balance # https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.2/specs/capella/beacon-chain.md#new-get_expected_withdrawals -func get_expected_withdrawals(state: capella.BeaconState): seq[Withdrawal] = +func get_expected_withdrawals*(state: capella.BeaconState): seq[Withdrawal] = let epoch = get_current_epoch(state) var withdrawal_index = state.next_withdrawal_index @@ -730,7 +730,7 @@ func get_expected_withdrawals(state: capella.BeaconState): seq[Withdrawal] = withdrawals # https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.1/specs/eip4844/beacon-chain.md#disabling-withdrawals -func get_expected_withdrawals(state: eip4844.BeaconState): seq[Withdrawal] = +func get_expected_withdrawals*(state: eip4844.BeaconState): seq[Withdrawal] = # During testing we avoid Capella-specific updates to the state transition. # # ... diff --git a/beacon_chain/validators/keystore_management.nim b/beacon_chain/validators/keystore_management.nim index a3960fd11e..f4af581411 100644 --- a/beacon_chain/validators/keystore_management.nim +++ b/beacon_chain/validators/keystore_management.nim @@ -1280,7 +1280,8 @@ proc generateDistirbutedStore*(rng: var HmacDrbgContext, shareValidatorDir: string, remoteValidatorDir: string, remoteSignersUrls: seq[string], - threshold: uint32): Result[void, KeystoreGenerationError] = + threshold: uint32, + mode = KeystoreMode.Secure): Result[void, KeystoreGenerationError] = var signers: seq[RemoteSignerInfo] for idx, share in shares: var password = KeystorePass.init ncrutils.toHex(rng.generateBytes(32)) @@ -1292,7 +1293,7 @@ proc generateDistirbutedStore*(rng: var HmacDrbgContext, share.key, share.key.toPubKey, makeKeyPath(validatorIdx, signingKeyKind), password.str, - KeystoreMode.Secure) + mode) signers.add RemoteSignerInfo( url: HttpHostUri(parseUri(remoteSignersUrls[idx])), @@ -1417,7 +1418,8 @@ proc generateDeposits*(cfg: RuntimeConfig, validatorsDir & "_shares", validatorsDir, remoteSignersUrls, - threshold) + threshold, + mode) deposits.add prepareDeposit( cfg, withdrawalPubKey, derivedKey, signingPubKey) diff --git a/beacon_chain/validators/validator_duties.nim b/beacon_chain/validators/validator_duties.nim index 57f8d4dad0..b9efb8b7e4 100644 --- a/beacon_chain/validators/validator_duties.nim +++ b/beacon_chain/validators/validator_duties.nim @@ -263,71 +263,10 @@ proc createAndSendAttestation(node: BeaconNode, proc getBlockProposalEth1Data*(node: BeaconNode, state: ForkedHashedBeaconState): BlockProposalEth1Data = - if node.eth1Monitor.isNil: - let pendingDepositsCount = - getStateField(state, eth1_data).deposit_count - - getStateField(state, eth1_deposit_index) - if pendingDepositsCount > 0: - result.hasMissingDeposits = true - else: - result.vote = getStateField(state, eth1_data) - else: - let finalizedEpochRef = node.dag.getFinalizedEpochRef() - result = node.eth1Monitor.getBlockProposalData( - state, finalizedEpochRef.eth1_data, - finalizedEpochRef.eth1_deposit_index) - -from web3/engine_api import ForkchoiceUpdatedResponse - -proc forkchoice_updated( - head_block_hash: Eth2Digest, safe_block_hash: Eth2Digest, - finalized_block_hash: Eth2Digest, timestamp: uint64, random: Eth2Digest, - fee_recipient: ethtypes.Address, execution_engine: Eth1Monitor): - Future[Option[bellatrix.PayloadID]] {.async.} = - logScope: - head_block_hash - finalized_block_hash - - discard $capellaImplementationMissing & ": ensure fcU usage updated for capella" - let - forkchoiceResponse = - try: - awaitWithTimeout( - execution_engine.forkchoiceUpdated( - head_block_hash, safe_block_hash, finalized_block_hash, - timestamp, random.data, fee_recipient), - FORKCHOICEUPDATED_TIMEOUT): - error "Engine API fork-choice update timed out" - default(ForkchoiceUpdatedResponse) - except CatchableError as err: - error "Engine API fork-choice update failed", err = err.msg - default(ForkchoiceUpdatedResponse) - - payloadId = forkchoiceResponse.payloadId - - return if payloadId.isSome: - some(bellatrix.PayloadID(payloadId.get)) - else: - none(bellatrix.PayloadID) - -proc get_execution_payload[EP]( - payload_id: Option[bellatrix.PayloadID], execution_engine: Eth1Monitor): - Future[EP] {.async.} = - return if payload_id.isNone(): - # Pre-merge, empty payload - default(EP) - else: - when EP is bellatrix.ExecutionPayload: - asConsensusExecutionPayload( - await execution_engine.getPayloadV1(payload_id.get)) - elif EP is capella.ExecutionPayload: - asConsensusExecutionPayload( - await execution_engine.getPayloadV2(payload_id.get)) - elif EP is eip4844.ExecutionPayload: - debugRaiseAssert $eip4844ImplementationMissing & ": get_execution_payload" - default(EP) - else: - static: doAssert "unknown execution payload type" + let finalizedEpochRef = node.dag.getFinalizedEpochRef() + result = node.elManager.getBlockProposalData( + state, finalizedEpochRef.eth1_data, + finalizedEpochRef.eth1_deposit_index) proc getFeeRecipient(node: BeaconNode, pubkey: ValidatorPubKey, @@ -338,8 +277,10 @@ proc getFeeRecipient(node: BeaconNode, from web3/engine_api_types import PayloadExecutionStatus from ../spec/datatypes/capella import BeaconBlock, ExecutionPayload from ../spec/datatypes/eip4844 import BeaconBlock, ExecutionPayload +from ../spec/state_transition_block import get_expected_withdrawals -proc getExecutionPayload[T]( +proc getExecutionPayload( + T: type, node: BeaconNode, proposalState: ref ForkedHashedBeaconState, epoch: Epoch, validator_index: ValidatorIndex): Future[Opt[T]] {.async.} = # https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/bellatrix/validator.md#executionpayload @@ -369,74 +310,45 @@ proc getExecutionPayload[T]( else: default(T) - if node.eth1Monitor.isNil: - beacon_block_payload_errors.inc() - warn "getExecutionPayload: eth1Monitor not initialized; using empty execution payload" - return Opt.some empty_execution_payload - try: - # Minimize window for Eth1 monitor to shut down connection - await node.consensusManager.eth1Monitor.ensureDataProvider() - - # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#request-2 - const GETPAYLOAD_TIMEOUT = 1.seconds - let beaconHead = node.attestationPool[].getBeaconHead(node.dag.head) executionBlockRoot = node.dag.loadExecutionBlockRoot(beaconHead.blck) latestHead = if not executionBlockRoot.isZero: executionBlockRoot - elif node.eth1Monitor.terminalBlockHash.isSome: - node.eth1Monitor.terminalBlockHash.get.asEth2Digest + elif node.elManager.terminalBlockHash.isSome: + node.elManager.terminalBlockHash.get.asEth2Digest else: default(Eth2Digest) latestSafe = beaconHead.safeExecutionPayloadHash latestFinalized = beaconHead.finalizedExecutionPayloadHash - lastFcU = node.consensusManager.forkchoiceUpdatedInfo timestamp = withState(proposalState[]): compute_timestamp_at_slot(forkyState.data, forkyState.data.slot) - payload_id = - if lastFcU.isSome and - lastFcU.get.headBlockRoot == latestHead and - lastFcU.get.safeBlockRoot == latestSafe and - lastFcU.get.finalizedBlockRoot == latestFinalized and - lastFcU.get.timestamp == timestamp and - lastFcU.get.feeRecipient == feeRecipient: - some bellatrix.PayloadID(lastFcU.get.payloadId) + random = withState(proposalState[]): + get_randao_mix(forkyState.data, get_current_epoch(forkyState.data)) + withdrawals = withState(proposalState[]): + when stateFork >= BeaconStateFork.Capella: + get_expected_withdrawals(forkyState.data) else: - debug "getExecutionPayload: didn't find payloadId, re-querying", - latestHead, latestSafe, latestFinalized, - timestamp, - feeRecipient, - cachedForkchoiceUpdateInformation = lastFcU - - let random = withState(proposalState[]): - get_randao_mix(forkyState.data, get_current_epoch(forkyState.data)) - (await forkchoice_updated( - latestHead, latestSafe, latestFinalized, timestamp, random, - feeRecipient, node.consensusManager.eth1Monitor)) - payload = try: - awaitWithTimeout( - get_execution_payload[T](payload_id, node.consensusManager.eth1Monitor), - GETPAYLOAD_TIMEOUT): - beacon_block_payload_errors.inc() - warn "Getting execution payload from Engine API timed out", payload_id - empty_execution_payload - except CatchableError as err: - beacon_block_payload_errors.inc() - warn "Getting execution payload from Engine API failed", - payload_id, err = err.msg - empty_execution_payload - - return Opt.some payload + @[] + payload = await node.elManager.getPayload( + EngineApiPayloadType(T), latestHead, latestSafe, latestFinalized, + timestamp, random, feeRecipient, withdrawals) + + if payload.isNone: + error "Failed to obtain from EL" + return Opt.none(T) + + return Opt.some asConsensusExecutionPayload(payload.get) except CatchableError as err: beacon_block_payload_errors.inc() error "Error creating non-empty execution payload; using empty execution payload", msg = err.msg return Opt.some empty_execution_payload -proc makeBeaconBlockForHeadAndSlot*[EP]( +proc makeBeaconBlockForHeadAndSlot*( + EP: type, node: BeaconNode, randao_reveal: ValidatorSig, validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef, slot: Slot, @@ -469,15 +381,14 @@ proc makeBeaconBlockForHeadAndSlot*[EP]( elif slot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH or not ( is_merge_transition_complete(state[]) or - ((not node.eth1Monitor.isNil) and node.eth1Monitor.ttdReached)): + node.elManager.ttdReached): let fut = newFuture[Opt[EP]]("empty-payload") # https://github.com/nim-lang/Nim/issues/19802 fut.complete(Opt.some(default(EP))) fut else: # Create execution payload while packing attestations - getExecutionPayload[EP]( - node, state, slot.epoch, validator_index) + getExecutionPayload(EP, node, state, slot.epoch, validator_index) eth1Proposal = node.getBlockProposalEth1Data(state[]) @@ -530,13 +441,13 @@ proc makeBeaconBlockForHeadAndSlot*[EP]( # workaround for https://github.com/nim-lang/Nim/issues/20900 to avoid default # parameters -proc makeBeaconBlockForHeadAndSlot*[EP]( - node: BeaconNode, randao_reveal: ValidatorSig, +proc makeBeaconBlockForHeadAndSlot*( + EP: type, node: BeaconNode, randao_reveal: ValidatorSig, validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef, slot: Slot): - Future[ForkedBlockResult] = - return makeBeaconBlockForHeadAndSlot[EP]( - node, randao_reveal, validator_index, graffiti, head, slot, + Future[ForkedBlockResult] {.async.} = + return await makeBeaconBlockForHeadAndSlot( + EP, node, randao_reveal, validator_index, graffiti, head, slot, execution_payload = Opt.none(EP), transactions_root = Opt.none(Eth2Digest), execution_payload_root = Opt.none(Eth2Digest)) @@ -686,7 +597,8 @@ proc getBlindedBlockParts( shimExecutionPayload, executionPayloadHeader.get, getFieldNames(bellatrix.ExecutionPayloadHeader)) - let newBlock = await makeBeaconBlockForHeadAndSlot[bellatrix.ExecutionPayload]( + let newBlock = await makeBeaconBlockForHeadAndSlot( + bellatrix.ExecutionPayload, node, randao, validator_index, graffiti, head, slot, execution_payload = Opt.some shimExecutionPayload, transactions_root = Opt.some executionPayloadHeader.get.transactions_root, @@ -837,10 +749,12 @@ proc proposeBlock(node: BeaconNode, let newBlock = if slot.epoch >= node.dag.cfg.CAPELLA_FORK_EPOCH: - await makeBeaconBlockForHeadAndSlot[capella.ExecutionPayload]( + await makeBeaconBlockForHeadAndSlot( + capella.ExecutionPayload, node, randao, validator_index, node.graffitiBytes, head, slot) else: - await makeBeaconBlockForHeadAndSlot[bellatrix.ExecutionPayload]( + await makeBeaconBlockForHeadAndSlot( + bellatrix.ExecutionPayload, node, randao, validator_index, node.graffitiBytes, head, slot) if newBlock.isErr(): diff --git a/docs/the_nimbus_book/src/pi-guide.md b/docs/the_nimbus_book/src/pi-guide.md index 6877ac2665..8b6cc248ff 100644 --- a/docs/the_nimbus_book/src/pi-guide.md +++ b/docs/the_nimbus_book/src/pi-guide.md @@ -320,8 +320,8 @@ INF 2020-12-01 11:25:37.073+01:00 Generating new networking key ... NOT 2020-12-01 11:25:45.267+00:00 Local validator attached tid=22009 file=validator_pool.nim:33 pubkey=95e3cbe88c71ab2d0e3053b7b12ead329a37e9fb8358bdb4e56251993ab68e46b9f9fa61035fe4cf2abf4c07dfad6c45 validator=95e3cbe8 ... -NOT 2020-12-01 11:25:59.512+00:00 Eth1 sync progress topics="eth1" tid=21914 file=eth1_monitor.nim:705 blockNumber=3836397 depositsProcessed=106147 -NOT 2020-12-01 11:26:02.574+00:00 Eth1 sync progress topics="eth1" tid=21914 file=eth1_monitor.nim:705 blockNumber=3841412 depositsProcessed=106391 +NOT 2020-12-01 11:25:59.512+00:00 Eth1 sync progress topics="eth1" tid=21914 blockNumber=3836397 depositsProcessed=106147 +NOT 2020-12-01 11:26:02.574+00:00 Eth1 sync progress topics="eth1" tid=21914 blockNumber=3841412 depositsProcessed=106391 ... INF 2020-12-01 11:26:31.000+00:00 Slot start topics="beacnde" tid=21815 file=nimbus_beacon_node.nim:505 lastSlot=96566 scheduledSlot=96567 beaconTime=1w6d9h53m24s944us774ns peers=7 head=b54486c4:96563 headEpoch=3017 finalized=2f5d12e4:96479 finalizedEpoch=3014 INF 2020-12-01 11:26:36.285+00:00 Slot end topics="beacnde" tid=21815 file=nimbus_beacon_node.nim:593 slot=96567 nextSlot=96568 head=b54486c4:96563 headEpoch=3017 finalizedHead=2f5d12e4:96479 finalizedEpoch=3014 diff --git a/docs/the_nimbus_book/src/start-syncing.md b/docs/the_nimbus_book/src/start-syncing.md index 0b40bef346..4aacc06004 100644 --- a/docs/the_nimbus_book/src/start-syncing.md +++ b/docs/the_nimbus_book/src/start-syncing.md @@ -41,12 +41,12 @@ You should see the following output: ``` INF 2020-12-01 11:25:33.487+01:00 Launching beacon node ... -INF 2020-12-01 11:25:34.556+01:00 Loading block dag from database topics="beacnde" tid=19985314 file=nimbus_beacon_node.nim:198 path=build/data/shared_prater_0/db +INF 2020-12-01 11:25:34.556+01:00 Loading block dag from database topics="beacnde" tid=19985314 path=build/data/shared_prater_0/db INF 2020-12-01 11:25:35.921+01:00 Block dag initialized INF 2020-12-01 11:25:37.073+01:00 Generating new networking key ... -NOT 2020-12-01 11:25:59.512+00:00 Eth1 sync progress topics="eth1" tid=21914 file=eth1_monitor.nim:705 blockNumber=3836397 depositsProcessed=106147 -NOT 2020-12-01 11:26:02.574+00:00 Eth1 sync progress topics="eth1" tid=21914 file=eth1_monitor.nim:705 blockNumber=3841412 depositsProcessed=106391 +NOT 2020-12-01 11:25:59.512+00:00 Eth1 sync progress topics="eth1" tid=21914 blockNumber=3836397 depositsProcessed=106147 +NOT 2020-12-01 11:26:02.574+00:00 Eth1 sync progress topics="eth1" tid=21914 blockNumber=3841412 depositsProcessed=106391 ... INF 2020-12-01 11:26:31.000+00:00 Slot start topics="beacnde" tid=21815 file=nimbus_beacon_node.nim:505 lastSlot=96566 scheduledSlot=96567 beaconTime=1w6d9h53m24s944us774ns peers=7 head=b54486c4:96563 headEpoch=3017 finalized=2f5d12e4:96479 finalizedEpoch=3014 INF 2020-12-01 11:26:36.285+00:00 Slot end topics="beacnde" tid=21815 file=nimbus_beacon_node.nim:593 slot=96567 nextSlot=96568 head=b54486c4:96563 headEpoch=3017 finalizedHead=2f5d12e4:96479 finalizedEpoch=3014 diff --git a/docs/the_nimbus_book/src/troubleshooting.md b/docs/the_nimbus_book/src/troubleshooting.md index 2cf262fc5e..a002b79e88 100644 --- a/docs/the_nimbus_book/src/troubleshooting.md +++ b/docs/the_nimbus_book/src/troubleshooting.md @@ -112,7 +112,7 @@ If you're being flooded with `Catching up on validator duties` messages, your CP If you see an error that looks like the following: ``` -{"lvl":"ERR","ts":"2021-05-11 09:05:53.547+00:00","msg":"Eth1 chain monitoring failure, restarting","topics":"eth1","tid":1,"file":"eth1_monitor.nim:1158","err":"Trying to access value with err: Failed to setup web3 connection"} +{"lvl":"ERR","ts":"2021-05-11 09:05:53.547+00:00","msg":"Eth1 chain monitoring failure, restarting","topics":"eth1","tid":1,"err":"Trying to access value with err: Failed to setup web3 connection"} ``` It's because your node can't connect to the web3 provider you have specified. Please double check that you've correctly specified your provider. If you haven't done so already, we recommend [adding a backup](web3-backup.md). diff --git a/ncli/deposit_downloader.nim b/ncli/deposit_downloader.nim index c61fadb0fa..ed09b247ae 100644 --- a/ncli/deposit_downloader.nim +++ b/ncli/deposit_downloader.nim @@ -1,137 +1,74 @@ import - json, strutils, + std/[json, strutils, times, sequtils], chronos, confutils, chronicles, web3, web3/ethtypes as web3Types, eth/async_utils, + ../beacon_chain/beacon_chain_db, ../beacon_chain/networking/network_metadata, ../beacon_chain/eth1/eth1_monitor, - ../beacon_chain/spec/helpers + ../beacon_chain/spec/[presets, helpers] type CliFlags = object - web3Url {. - name: "web3-url".}: string - depositContractAddress {. - name: "deposit-contract".}: string - startBlock {. - name: "start-block".}: uint64 - endBlock {. - name: "start-block".}: Option[uint64] + network {. + defaultValue: "mainnet" + name: "network".}: string + elUrls {. + name: "el".}: seq[EngineApiUrlConfigValue] + jwtSecret {. + name: "jwt-secret".}: Option[InputFile] outDepositsFile {. - defaultValue: "deposits.csv" - name: "out-deposits-file".}: OutFile - -contract(DepositContract): - proc deposit(pubkey: Bytes48, - withdrawalCredentials: Bytes32, - signature: Bytes96, - deposit_data_root: FixedBytes[32]) - - proc get_deposit_root(): FixedBytes[32] - proc get_deposit_count(): Bytes8 - - proc DepositEvent(pubkey: Bytes48, - withdrawalCredentials: Bytes32, - amount: Bytes8, - signature: Bytes96, - index: Bytes8) {.event.} - -const - web3Timeouts = 60.seconds + name: "out-deposits-file".}: Option[OutFile] proc main(flags: CliFlags) {.async.} = - let web3 = waitFor newWeb3(flags.web3Url) - - let endBlock = if flags.endBlock.isSome: - flags.endBlock.get - else: - awaitWithRetries(web3.provider.eth_getBlockByNumber(blockId"latest", false)).number.uint64 - - let depositContract = web3.contractSender( - DepositContract, - Eth1Address.fromHex flags.depositContractAddress) - - var depositsFile = open(string flags.outDepositsFile, fmWrite) - depositsFile.write( - "block", ",", - "transaction", ",", - "depositor", ",", - "amount", ",", - "validatorKey", ",", - "withdrawalCredentials", "\n") - - var currentBlock = flags.startBlock - while currentBlock < endBlock: - var - blocksPerRequest = 5000'u64 # This is roughly a day of Eth1 blocks - backoff = 100 - - while true: - let maxBlockNumberRequested = min(endBlock, currentBlock + blocksPerRequest - 1) - - template retryOrRaise(err: ref CatchableError) = - blocksPerRequest = blocksPerRequest div 2 - if blocksPerRequest == 0: - raise err - continue - - debug "Obtaining deposit log events", - fromBlock = currentBlock, - toBlock = maxBlockNumberRequested, - backoff - - # Reduce all request rate until we have a more general solution - # for dealing with Infura's rate limits - await sleepAsync(milliseconds(backoff)) - - let jsonLogsFut = depositContract.getJsonLogs( - DepositEvent, - fromBlock = some blockId(currentBlock), - toBlock = some blockId(maxBlockNumberRequested)) - - let depositLogs = try: - # Downloading large amounts of deposits can be quite slow - awaitWithTimeout(jsonLogsFut, web3Timeouts): - retryOrRaise newException(DataProviderTimeout, - "Request time out while obtaining json logs") - except CatchableError as err: - debug "Request for deposit logs failed", err = err.msg - backoff = (backoff * 3) div 2 - retryOrRaise err - - currentBlock = maxBlockNumberRequested + 1 - for deposit in depositLogs: - let txNode = deposit{"transactionHash"} - if txNode != nil and txNode.kind == JString: - var - pubkey: Bytes48 - withdrawalCredentials: Bytes32 - amount: Bytes8 - signature: Bytes96 - index: Bytes8 - - let blockNum = parseHexInt deposit["blockNumber"].str - let depositData = strip0xPrefix(deposit["data"].getStr) - var offset = 0 - offset += decode(depositData, offset, pubkey) - offset += decode(depositData, offset, withdrawalCredentials) - offset += decode(depositData, offset, amount) - offset += decode(depositData, offset, signature) - offset += decode(depositData, offset, index) - - let txHash = TxHash.fromHex txNode.str - let tx = awaitWithRetries web3.provider.eth_getTransactionByHash(txHash) - + let + db = BeaconChainDB.new("", inMemory = true) + metadata = getMetadataForNetwork(flags.network) + beaconTimeFn = proc(): BeaconTime = + # BEWARE of this hack + # The EL manager consults the current time in order to determine when the + # transition configuration exchange should start. We assume Bellatrix has + # just arrived which should trigger the configuration exchange and allow + # the downloader to connect to ELs serving the Engine API. + start_beacon_time(Slot(metadata.cfg.BELLATRIX_FORK_EPOCH * SLOTS_PER_EPOCH)) + + let + elManager = ELManager.new( + metadata.cfg, + metadata.depositContractBlock, + metadata.depositContractBlockHash, + db, beaconTimeFn, + toFinalEngineApiUrls(flags.elUrls, flags.jwtSecret), + eth1Network = metadata.eth1Network, + ttdReached = false) + + elManager.start() + + var depositsFile: File + if flags.outDepositsFile.isSome: + depositsFile = open(string flags.outDepositsFile.get, fmWrite) + depositsFile.write( + "block", ",", + "validatorKey", ",", + "withdrawalCredentials", "\n") + depositsFile.flushFile() + + var blockIdx = 0 + while not elManager.isSynced(): + await sleepAsync chronos.seconds(1) + + if flags.outDepositsFile.isSome and + elManager.eth1ChainBlocks.len > blockIdx: + for i in blockIdx ..< elManager.eth1ChainBlocks.len: + for deposit in elManager.eth1ChainBlocks[i].deposits: depositsFile.write( - $blockNum, ",", - $txHash, ",", - $tx.source, ",", - $bytes_to_uint64(array[8, byte](amount)), ",", - $pubkey, ",", - $withdrawalCredentials, "\n") + $elManager.eth1ChainBlocks[i].number, ",", + $deposit.pubkey, ",", + $deposit.withdrawal_credentials, "\n") depositsFile.flushFile() - info "Done" + blockIdx = elManager.eth1ChainBlocks.len -waitFor main(load CliFlags) + info "All deposits downloaded" +waitFor main(load CliFlags) diff --git a/scripts/detect_platform.sh b/scripts/detect_platform.sh new file mode 100644 index 0000000000..82bd975ba5 --- /dev/null +++ b/scripts/detect_platform.sh @@ -0,0 +1,15 @@ +if [ -z "${DETECT_PLATFORM_SOURCED:-}" ]; then +DETECT_PLATFORM_SOURCED=1 + +# OS detection +OS="linux" +if uname | grep -qi darwin; then + OS="macos" +elif uname | grep -qiE "mingw|msys"; then + OS="windows" +fi + +# Architecture detection +ARCH="$(uname -m)" + +fi diff --git a/scripts/geth_binaries.sh b/scripts/geth_binaries.sh new file mode 100644 index 0000000000..b2e4287623 --- /dev/null +++ b/scripts/geth_binaries.sh @@ -0,0 +1,108 @@ +if [ -z "${GETH_BINARIES_SOURCED:-}" ]; then +GETH_BINARIES_SOURCED=1 + +SCRIPTS_DIR="$(dirname "${BASH_SOURCE[0]}")" +BUILD_DIR="$(cd "$SCRIPTS_DIR/../build"; pwd)" + +source "${SCRIPTS_DIR}/detect_platform.sh" + +: ${STABLE_GETH_BINARY:="${BUILD_DIR}/downloads/geth"} +: ${GETH_CAPELLA_BINARY:="${BUILD_DIR}/downloads/geth_capella"} +: ${GETH_EIP_4844_BINARY:="${BUILD_DIR}/downloads/geth_eip4844"} + +download_geth_stable() { + if [[ ! -e "${STABLE_GETH_BINARY}" ]]; then + GETH_VERSION="1.10.26-e5eb32ac" + GETH_URL="https://gethstore.blob.core.windows.net/builds/" + + case "${OS}-${ARCH}" in + linux-amd64|linux-x86_64) + GETH_TARBALL="geth-linux-amd64-${GETH_VERSION}.tar.gz" + ;; + linux-arm64|linux-aarch64) + GETH_TARBALL="geth-linux-arm64-${GETH_VERSION}.tar.gz" + ;; + macos-amd64|macos-x86_64) + GETH_TARBALL="geth-darwin-amd64-${GETH_VERSION}.tar.gz" + ;; + macos-arm64|macos-aarch64) + # There is no official binary for macOS/ARM at the moment + # The AMD64 binary should work under Rosetta + GETH_TARBALL="geth-darwin-amd64-${GETH_VERSION}.tar.gz" + ;; + windows-amd64|windows-x86_64) + GETH_TARBALL="geth-windows-amd64-${GETH_VERSION}.zip" + ;; + *) + echo "No Geth binaries available for platform: ${OS}-${ARCH}" + exit 1 + ;; + esac + + log "Downloading Geth binary" + + "$CURL_BINARY" -sSLO "$GETH_URL/$GETH_TARBALL" + local tmp_extract_dir + tmp_extract_dir=$(mktemp -d geth-stable-tarball-XXX) + CLEANUP_DIRS+=("$tmp_extract_dir") + tar -xzf "$GETH_TARBALL" -C "$tmp_extract_dir" --strip-components=1 + mkdir -p "$(dirname "$STABLE_GETH_BINARY")" + mv "$tmp_extract_dir/geth" "$STABLE_GETH_BINARY" + chmod +x "$STABLE_GETH_BINARY" + fi +} + +download_status_geth_binary() { + BINARY_NAME="$1" + BINARY_FS_PATH="$2" + + if [[ ! -e "${BINARY_FS_PATH}" ]]; then + case "${OS}-${ARCH}" in + linux-amd64|linux-x86_64) + GETH_PLATFORM=linux-amd64 + ;; + linux-arm64|linux-aarch64) + GETH_PLATFORM=linux-arm64 + ;; + macos-amd64|macos-x86_64) + GETH_PLATFORM=macos-amd64 + ;; + macos-arm64|macos-aarch64) + GETH_PLATFORM=macos-arm64 + ;; + windows-amd64|windows-x86_64) + GETH_PLATFORM=windows-amd64 + ;; + *) + echo "No Status Geth binaries available for platform: ${OS}-${ARCH}" + exit 1 + ;; + esac + + log "Downloading Status geth binary ($1)" + + GETH_TARBALL_NAME="geth-binaries-${GETH_PLATFORM}.tar.gz" + GETH_TARBALL_URL="https://github.com/status-im/nimbus-simulation-binaries/releases/download/latest/${GETH_TARBALL_NAME}" + GETH_BINARY_IN_TARBALL="geth/${BINARY_NAME}/geth" + + "$CURL_BINARY" -o "$GETH_TARBALL_NAME" -sSL "$GETH_TARBALL_URL" + local tmp_extract_dir + tmp_extract_dir=$(mktemp -d geth-status-tarball-XXX) + CLEANUP_DIRS+=("$tmp_extract_dir") + tar -xzf "$GETH_TARBALL_NAME" -C "$tmp_extract_dir" --strip-components 2 \ + "$GETH_BINARY_IN_TARBALL" + mkdir -p "$(dirname "$BINARY_FS_PATH")" + mv "$tmp_extract_dir/geth" "$BINARY_FS_PATH" + chmod +x "$BINARY_FS_PATH" + fi +} + +download_geth_capella() { + download_status_geth_binary capella "$GETH_CAPELLA_BINARY" +} + +download_geth_eip_4844() { + download_status_geth_binary eip-4844 "$GETH_EIP_4844_BINARY" +} + +fi diff --git a/scripts/geth_genesis.json b/scripts/geth_genesis.json deleted file mode 100644 index 9960dc6ab5..0000000000 --- a/scripts/geth_genesis.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "config": { - "chainId":9999, - "homesteadBlock":0, - "eip150Block":0, - "eip155Block":0, - "eip158Block":0, - "byzantiumBlock":0, - "constantinopleBlock":0, - "petersburgBlock":0, - "istanbulBlock":0, - "muirGlacierBlock":0, - "berlinBlock":0, - "londonBlock":0, - "clique": { - "period": 5, - "epoch": 30000 - }, - "terminalTotalDifficulty":0 - }, - "nonce":"0x42", - "timestamp":"0x0", - "extraData":"0x0000000000000000000000000000000000000000000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit":"0x1C9C380", - "difficulty":"0x400000000", - "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "coinbase":"0x0000000000000000000000000000000000000000", - "alloc":{ - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":{"balance":"0x6d6172697573766477000000"}, - "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf":{"balance":"0x6d6172697573766477000000"}, - "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf":{"balance":"0x6d6172697573766477000000"}, - "0x6813eb9362372eef6200f3b1dbc3f819671cba69":{"balance":"0x6d6172697573766477000000"}, - "0x1eff47bc3a10a45d4b230b5d10e37751fe6aa718":{"balance":"0x6d6172697573766477000000"}, - "0xe1ab8145f7e55dc933d51a18c793f901a3a0b276":{"balance":"0x6d6172697573766477000000"}, - "0xe57bfe9f44b819898f47bf37e5af72a0783e1141":{"balance":"0x6d6172697573766477000000"}, - "0xd41c057fd1c78805aac12b0a94a405c0461a6fbb":{"balance":"0x6d6172697573766477000000"}, - "0xf1f6619b38a98d6de0800f1defc0a6399eb6d30c":{"balance":"0x6d6172697573766477000000"}, - "0xf7edc8fa1ecc32967f827c9043fcae6ba73afa5c":{"balance":"0x6d6172697573766477000000"} - }, - "number":"0x0", - "gasUsed":"0x0", - "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "baseFeePerGas":"0x7" -} diff --git a/scripts/geth_vars.sh b/scripts/geth_vars.sh index e626969b27..784cef907b 100644 --- a/scripts/geth_vars.sh +++ b/scripts/geth_vars.sh @@ -5,13 +5,27 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -GETH_BINARY="${GETH_BINARY:-"${HOME}/go-ethereum/build/bin/geth"}" +if [ -z "${GETH_VARS_SOURCED:-}" ]; then +GETH_VARS_SOURCED=1 + GETH_NUM_NODES="${GETH_NUM_NODES:-4}" -GETH_BINARY="${GETH_BINARY:-${HOME}/go-ethereum/build/bin/geth}" GETH_BASE_NET_PORT="${BASE_EL_NET_PORT:-30303}" -GETH_BASE_HTTP_PORT="${BASE_EL_HTTP_PORT:-8545}" +GETH_BASE_RPC_PORT="${BASE_EL_RPC_PORT:-8545}" GETH_BASE_WS_PORT="${BASE_EL_WS_PORT:-8546}" GETH_BASE_AUTH_RPC_PORT="${BASE_EL_AUTH_RPC_PORT:-8551}" -GETH_PORT_OFFSET="${EL_PORT_OFFSET:-10}" -GENESISJSON="${GENESISJSON:-${BASEDIR}/geth_genesis.json}" +GETH_PORT_OFFSET="${EL_PORT_OFFSET:-20}" DISCOVER="--nodiscover" + +GETH_NET_PORTS=() +GETH_AUTH_RPC_PORTS=() +GETH_DATA_DIRS=() + +GETH_LAST_NODE_IDX=$((GETH_NUM_NODES - 1)) + +for GETH_NODE_IDX in $(seq 0 $GETH_LAST_NODE_IDX); do + GETH_NET_PORTS+=($(( GETH_NODE_IDX * GETH_PORT_OFFSET + GETH_BASE_NET_PORT ))) + GETH_AUTH_RPC_PORTS+=($(( GETH_NODE_IDX * GETH_PORT_OFFSET + GETH_BASE_AUTH_RPC_PORT ))) + GETH_DATA_DIRS+=("${DATA_DIR}/geth-${GETH_NODE_IDX}") +done + +fi diff --git a/scripts/launch_local_testnet.sh b/scripts/launch_local_testnet.sh index f5d88a014d..a3a8e8f73c 100755 --- a/scripts/launch_local_testnet.sh +++ b/scripts/launch_local_testnet.sh @@ -12,8 +12,9 @@ set -euo pipefail -BASEDIR="$(dirname "${BASH_SOURCE[0]}")" -cd "$BASEDIR/.." +SCRIPTS_DIR="$(dirname "${BASH_SOURCE[0]}")" +cd "$SCRIPTS_DIR/.." +BUILD_DIR="$(pwd)/build" VERBOSE="0" @@ -34,6 +35,9 @@ fi # architecture detection ARCH="$(uname -m)" +# Created processed that will be cleaned up when the script exits +PIDS="" + #################### # argument parsing # #################### @@ -55,60 +59,72 @@ CURL_BINARY="$(command -v curl)" || { echo "Curl not installed. Aborting."; exit JQ_BINARY="$(command -v jq)" || { echo "Jq not installed. Aborting."; exit 1; } OPTS="ht:n:d:g" -LONGOPTS="help,preset:,nodes:,data-dir:,remote-validators-count:,threshold:,remote-signers:,with-ganache,stop-at-epoch:,disable-htop,disable-vc,enable-logtrace,log-level:,base-port:,base-rest-port:,base-metrics-port:,base-vc-keymanager-port:,base-vc-metrics-port:,base-remote-signer-port:,base-el-net-port:,base-el-http-port:,base-el-ws-port:,base-el-auth-rpc-port:,el-port-offset:,reuse-existing-data-dir,reuse-binaries,timeout:,kill-old-processes,eth2-docker-image:,lighthouse-vc-nodes:,run-geth,dl-geth,dl-eth2,light-clients:,run-nimbus-el,verbose" +LONGOPTS="help,preset:,nodes:,data-dir:,remote-validators-count:,threshold:,nimbus-signer-nodes:,web3signer-nodes:,with-ganache,stop-at-epoch:,disable-htop,disable-vc,enable-payload-builder,enable-logtrace,log-level:,base-port:,base-rest-port:,base-metrics-port:,base-vc-metrics-port:,base-vc-keymanager-port:,base-remote-signer-port:,base-remote-signer-metrics-port:,base-el-net-port:,base-el-rpc-port:,base-el-ws-port:,base-el-auth-rpc-port:,el-port-offset:,reuse-existing-data-dir,reuse-binaries,timeout:,kill-old-processes,eth2-docker-image:,lighthouse-vc-nodes:,run-geth,dl-geth,dl-nimbus-eth1,dl-nimbus-eth2,light-clients:,run-nimbus-eth1,verbose,altair-fork-epoch:,bellatrix-fork-epoch:,capella-fork-epoch:,eip4844-fork-epoch:" # default values BINARIES="" -NIMFLAGS="${NIMFLAGS:-""}" NUM_NODES="10" DATA_DIR="local_testnet_data" USE_HTOP="1" USE_VC="1" +USE_PAYLOAD_BUILDER="false" +: ${PAYLOAD_BUILDER_HOST:=127.0.0.1} +: ${PAYLOAD_BUILDER_PORT:=4888} LIGHTHOUSE_VC_NODES="0" USE_GANACHE="0" LOG_LEVEL="DEBUG; TRACE:networking" BASE_PORT="9000" BASE_REMOTE_SIGNER_PORT="6000" +BASE_REMOTE_SIGNER_METRICS_PORT="6100" BASE_METRICS_PORT="8008" BASE_REST_PORT="7500" BASE_VC_KEYMANAGER_PORT="8500" BASE_VC_METRICS_PORT="9008" BASE_EL_NET_PORT="30303" -BASE_EL_HTTP_PORT="8545" +BASE_EL_RPC_PORT="8545" BASE_EL_WS_PORT="8546" BASE_EL_AUTH_RPC_PORT="8551" EL_PORT_OFFSET="10" -REUSE_EXISTING_DATA_DIR="0" -REUSE_BINARIES="0" -NIMFLAGS="" +: ${REUSE_EXISTING_DATA_DIR:=0} +: ${REUSE_BINARIES:=0} +: ${NIMFLAGS:=""} ENABLE_LOGTRACE="0" +STOP_AT_EPOCH=9999999 STOP_AT_EPOCH_FLAG="" TIMEOUT_DURATION="0" CONST_PRESET="mainnet" KILL_OLD_PROCESSES="0" ETH2_DOCKER_IMAGE="" -REMOTE_SIGNER_NODES=0 +NIMBUS_SIGNER_NODES=0 REMOTE_SIGNER_THRESHOLD=1 REMOTE_VALIDATORS_COUNT=0 LC_NODES=1 ACCOUNT_PASSWORD="nimbus" RUN_GETH="0" DL_GETH="0" -DL_ETH2="0" -BEACON_NODE_COMMAND="./build/nimbus_beacon_node" -WEB3_ARG=() -CLEANUP_DIRS=() - +: ${DL_NIMBUS_ETH1:="0"} +: ${DL_NIMBUS_ETH2:="0"} + +# TODO: Add command-line flags for these +: ${NIMBUS_ETH2_VERSION:=22.12.0} +: ${NIMBUS_ETH2_REVISION:=f6a5a5b1} + +: ${BEACON_NODE_COMMAND:="./build/nimbus_beacon_node"} +: ${ALTAIR_FORK_EPOCH:=1} +: ${BELLATRIX_FORK_EPOCH:=2} +: ${CAPELLA_FORK_EPOCH:=40} +: ${EIP4844_FORK_EPOCH:=50} #NIMBUS EL VARS -RUN_NIMBUS="0" -NIMBUSEL_BINARY="${NIMBUSEL_BINARY:-../nimbus-eth1/build/nimbus}" -echo "${NIMBUSEL_BINARY}" - -EL_HTTP_PORTS=() -EL_RPC_PORTS=() -EL_DATA_DIRS=() +RUN_NIMBUS_ETH1="0" +: ${NIMBUS_ETH1_BINARY:="./build/downloads/nimbus"} +: ${WEB3SIGNER_VERSION:=22.11.0} +: ${WEB3SIGNER_DIR:="${BUILD_DIR}/downloads/web3signer-${WEB3SIGNER_VERSION}"} +: ${WEB3SIGNER_BINARY:="${WEB3SIGNER_DIR}/bin/web3signer"} +WEB3SIGNER_NODES=0 PROCS_TO_KILL=("nimbus_beacon_node" "nimbus_validator_client" "nimbus_signing_node" "nimbus_light_client") PORTS_TO_KILL=() +WEB3_ARG=() +CLEANUP_DIRS=() print_help() { cat < "${DATA_DIR}/keymanager-token" +JWT_FILE="${DATA_DIR}/jwtsecret" +echo "Generating JWT file '$JWT_FILE'..." +openssl rand -hex 32 | tr -d "\n" > "${JWT_FILE}" + +if [[ "$CONST_PRESET" == "minimal" ]]; then + SECONDS_PER_SLOT=6 + SLOTS_PER_EPOCH=8 +else + SECONDS_PER_SLOT=12 + SLOTS_PER_EPOCH=32 +fi + VALIDATORS_DIR="${DATA_DIR}/validators" scripts/makedir.sh "${VALIDATORS_DIR}" @@ -378,21 +441,6 @@ else NPROC="$(nproc)" fi -if [[ "${RUN_NIMBUS}" == "1" && "${RUN_GETH}" == "1" ]]; then - echo "Use only one EL - geth or nimbus" - exit 1 -fi - - -if [[ "${RUN_GETH}" == "1" ]]; then - . ./scripts/geth_vars.sh -fi - -if [[ "${RUN_NIMBUS}" == "1" ]]; then - . ./scripts/nimbus_el_vars.sh -fi - - # Kill all processes which have open ports in the array passed as parameter kill_by_port() { local ports=("$@") @@ -411,41 +459,83 @@ kill_by_port() { } GETH_NUM_NODES="$(( NUM_NODES + LC_NODES ))" -NIMBUSEL_NUM_NODES="$(( NUM_NODES + LC_NODES ))" +NIMBUS_ETH1_NUM_NODES="$(( NUM_NODES + LC_NODES ))" +REMOTE_SIGNER_NODES=$(( NIMBUS_SIGNER_NODES + WEB3SIGNER_NODES )) +LAST_REMOTE_SIGNER_NODE_IDX=$(( REMOTE_SIGNER_NODES - 1 )) + +if [[ "${RUN_GETH}" == "1" ]]; then + source "${SCRIPTS_DIR}/geth_binaries.sh" + + if [[ $EIP4844_FORK_EPOCH -lt $STOP_AT_EPOCH ]]; then + download_geth_eip_4844 + GETH_BINARY="$GETH_EIP_4844_BINARY" + elif [[ $CAPELLA_FORK_EPOCH -lt $STOP_AT_EPOCH ]]; then + download_geth_capella + GETH_BINARY="$GETH_CAPELLA_BINARY" + else + download_geth_stable + GETH_BINARY="$STABLE_GETH_BINARY" + fi + + source ./scripts/geth_vars.sh +fi + +if [[ "${RUN_NIMBUS_ETH1}" == "1" ]]; then + . ./scripts/nimbus_el_vars.sh +fi # kill lingering processes from a previous run if [[ "${OS}" != "windows" ]]; then which lsof &>/dev/null || \ { echo "'lsof' not installed and we need it to check for ports already in use. Aborting."; exit 1; } - #Stop geth nodes + # Stop geth nodes if [[ "${RUN_GETH}" == "1" ]]; then - for NUM_NODE in $(seq 0 $(( GETH_NUM_NODES - 1 ))); do - for PORT in $(( NUM_NODE * GETH_PORT_OFFSET + GETH_BASE_NET_PORT )) \ - $(( NUM_NODE * GETH_PORT_OFFSET + GETH_BASE_HTTP_PORT )) \ - $(( NUM_NODE * GETH_PORT_OFFSET + GETH_BASE_WS_PORT )) \ - $(( NUM_NODE * GETH_PORT_OFFSET + GETH_BASE_AUTH_RPC_PORT )); + for GETH_NODE_IDX in $(seq 0 $GETH_LAST_NODE_IDX); do + for PORT in $(( GETH_NODE_IDX * GETH_PORT_OFFSET + GETH_BASE_NET_PORT )) \ + $(( GETH_NODE_IDX * GETH_PORT_OFFSET + GETH_BASE_RPC_PORT )) \ + $(( GETH_NODE_IDX * GETH_PORT_OFFSET + GETH_BASE_WS_PORT )) \ + $(( GETH_NODE_IDX * GETH_PORT_OFFSET + GETH_BASE_AUTH_RPC_PORT )); do PORTS_TO_KILL+=("${PORT}") done done fi - #Stop Nimbus EL nodes - if [[ "${RUN_NIMBUS}" == "1" ]]; then - for NUM_NODE in $(seq 0 $(( NIMBUSEL_NUM_NODES - 1 ))); do - for PORT in $(( NUM_NODE * NIMBUSEL_PORT_OFFSET + NIMBUSEL_BASE_NET_PORT )) \ - $(( NUM_NODE * NIMBUSEL_PORT_OFFSET + NIMBUSEL_BASE_HTTP_PORT )) \ - $(( NUM_NODE * NIMBUSEL_PORT_OFFSET + NIMBUSEL_BASE_WS_PORT )) \ - $(( NUM_NODE * NIMBUSEL_PORT_OFFSET + NIMBUSEL_BASE_AUTH_RPC_PORT )); + # Stop Nimbus EL nodes + if [[ "${RUN_NIMBUS_ETH1}" == "1" ]]; then + for NIMBUS_ETH1_NODE_IDX in $(seq 0 $NIMBUS_ETH1_LAST_NODE_IDX); do + for PORT in $(( NIMBUS_ETH1_NODE_IDX * NIMBUS_ETH1_PORT_OFFSET + 1 + NIMBUS_ETH1_BASE_NET_PORT )) \ + $(( NIMBUS_ETH1_NODE_IDX * NIMBUS_ETH1_PORT_OFFSET + 1 + NIMBUS_ETH1_BASE_RPC_PORT )) \ + $(( NIMBUS_ETH1_NODE_IDX * NIMBUS_ETH1_PORT_OFFSET + 1 + NIMBUS_ETH1_BASE_WS_PORT )) \ + $(( NIMBUS_ETH1_NODE_IDX * NIMBUS_ETH1_PORT_OFFSET + 1 + NIMBUS_ETH1_BASE_AUTH_RPC_PORT )); do PORTS_TO_KILL+=("${PORT}") done done fi - for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do - for PORT in $(( BASE_PORT + NUM_NODE )) $(( BASE_METRICS_PORT + NUM_NODE )) $(( BASE_REST_PORT + NUM_NODE )); do + # Stop Remote Signers + for NUM_REMOTE in $(seq 0 $LAST_REMOTE_SIGNER_NODE_IDX); do + for PORT in $(( BASE_REMOTE_SIGNER_PORT + NUM_REMOTE )) \ + $(( BASE_REMOTE_SIGNER_METRICS_PORT + NUM_REMOTE )) ; do + PORTS_TO_KILL+=("${PORT}") + done + done + + # Stop Nimbus validator clients + if [[ "${USE_VC}" == "1" ]]; then + for NUM_NODE in $(seq 1 $NUM_NODES); do + for PORT in $(( BASE_VC_METRICS_PORT + NUM_NODE - 1 )) \ + $(( BASE_VC_KEYMANAGER_PORT + NUM_NODE - 1 )); do + PORTS_TO_KILL+=("${PORT}") + done + done + fi + + # Stop Nimbus CL nodes + for NUM_NODE in $(seq 1 $NUM_NODES); do + for PORT in $(( BASE_PORT + NUM_NODE - 1 )) $(( BASE_METRICS_PORT + NUM_NODE - 1)) $(( BASE_REST_PORT + NUM_NODE - 1)); do PORTS_TO_KILL+=("${PORT}") done done @@ -453,104 +543,106 @@ if [[ "${OS}" != "windows" ]]; then kill_by_port "${PORTS_TO_KILL[@]}" fi +download_web3signer() { + if [[ ! -d "${WEB3SIGNER_DIR}" ]]; then + log "Downloading Web3Signer binary" + + WEB3SIGNER_TARBALL="web3signer-${WEB3SIGNER_VERSION}.tar.gz" + WEB3SIGNER_URL="https://artifacts.consensys.net/public/web3signer/raw/names/web3signer.tar.gz/versions/${WEB3SIGNER_VERSION}/${WEB3SIGNER_TARBALL}" + + mkdir -p "${WEB3SIGNER_DIR}" + "${CURL_BINARY}" -sSL "${WEB3SIGNER_URL}" \ + | tar -xzf - --directory "${WEB3SIGNER_DIR}" --strip-components=1 + fi +} -download_geth() { - GETH_VERSION="1.11.0-unstable-b818e73e" +download_nimbus_eth1() { + if [[ ! -e "${NIMBUS_ETH1_BINARY}" ]]; then + case "${OS}-${ARCH}" in + linux-amd64|linux-x86_64) + NIMBUS_ETH1_PLATFORM=Linux_amd64 + ;; + linux-arm|linux-arm32|linux-aarch32) + NIMBUS_PLATFORM=Linux_arm32v7 + ;; + linux-arm64|linux-aarch64) + NIMBUS_ETH1_PLATFORM=Linux_arm64v8 + ;; + macos-amd64|macos-x86_64) + NIMBUS_ETH1_PLATFORM=macOS_arm64 + ;; + macos-arm64|macos-aarch64) + NIMBUS_ETH1_PLATFORM=macOS_amd64 + ;; + windows-amd64|windows-x86_64) + NIMBUS_ETH1_PLATFORM=Windows_amd64 + ;; + *) + echo "No nimbus-eth1 binaries available for ${OS}-${ARCH}" + exit 1 + ;; + esac -# https://geth.ethereum.org/downloads/ -# "https://gethstore.blob.core.windows.net/builds/geth-linux-amd64-1.11.0-unstable-b818e73e.tar.gz" -# "https://gethstore.blob.core.windows.net/builds/geth-darwin-amd64-1.11.0-unstable-b818e73e.tar.gz" -# "https://gethstore.blob.core.windows.net/builds/geth-windows-amd64-1.11.0-unstable-b818e73e.zip" + NIMBUS_ETH1_FULL_BINARY_VERSION=20221205_f4cacdfc + NIMBUS_ETH1_TARBALL_NAME="nimbus-eth1_${NIMBUS_ETH1_PLATFORM}_${NIMBUS_ETH1_FULL_BINARY_VERSION}.tar.gz" - GETH_URL="https://gethstore.blob.core.windows.net/builds/" + NIMBUS_ETH1_TARBALL_URL="https://github.com/status-im/nimbus-simulation-binaries/raw/master/nimbus-eth1/nightly-20221205/${NIMBUS_ETH1_TARBALL_NAME}" - case "${OS}" in - linux) - GETH_TARBALL="geth-linux-amd64-${GETH_VERSION}.tar.gz" - ;; - macos) - GETH_TARBALL="geth-darwin-amd64-${GETH_VERSION}.tar.gz" - ;; - windows) - GETH_TARBALL="geth-windows-amd64-${GETH_VERSION}.zip" - ;; - esac + log "Downloading Nimbus ETH1 binary" - if [[ ! -e "build/${GETH_BINARY}" ]]; then - log "Downloading Geth binary" - pushd "build" >/dev/null - "${CURL_BINARY}" -sSLO "${GETH_URL}/${GETH_TARBALL}" + "${CURL_BINARY}" -o "$NIMBUS_ETH1_TARBALL_NAME" -sSLO "$NIMBUS_ETH1_TARBALL_URL" local tmp_extract_dir - tmp_extract_dir=$(mktemp -d geth-extract-XXX) - CLEANUP_DIRS+=("${tmp_extract_dir}") - tar -xzf "${GETH_TARBALL}" --directory "${tmp_extract_dir}" --strip-components=1 - mv "${tmp_extract_dir}/geth" . - GETH_BINARY="${PWD}/geth" - popd >/dev/null + tmp_extract_dir=$(mktemp -d nimbus-eth1-tarball-XXX) + CLEANUP_DIRS+=("$tmp_extract_dir") + tar -xzf "${NIMBUS_ETH1_TARBALL_NAME}" -C "$tmp_extract_dir" --strip-components=1 + mkdir -p "$(dirname "$NIMBUS_ETH1_BINARY")" + mv "$tmp_extract_dir/build/nimbus" "$NIMBUS_ETH1_BINARY" + chmod +x "$NIMBUS_ETH1_BINARY" fi } -download_eth2() { +download_nimbus_eth2() { + if [[ ! -e "${BEACON_NODE_COMMAND}" ]]; then + case "${OS}-${ARCH}" in + linux-amd64|linux-x86_64) + NIMBUS_PLATFORM=Linux_amd64 + ;; + linux-arm|linux-arm32|linux-aarch32) + NIMBUS_PLATFORM=Linux_arm32v7 + ;; + linux-arm64|linux-aarch64) + NIMBUS_PLATFORM=Linux_arm64v8 + ;; + macos-amd64|macos-x86_64) + NIMBUS_PLATFORM=macOS_amd64 + ;; + macos-arm64|macos-aarch64) + NIMBUS_PLATFORM=macOS_arm64 + ;; + windows-amd64|windows-x86_64) + NIMBUS_PLATFORM=Windows_amd64 + ;; + esac + + NIMBUS_ETH2_FULL_BINARY_VERSION="${NIMBUS_ETH2_VERSION}_${NIMBUS_ETH2_REVISION}" + NIMBUS_ETH2_TARBALL_NAME="nimbus-eth2_${NIMBUS_PLATFORM}_${NIMBUS_ETH2_FULL_BINARY_VERSION}.tar.gz" + NIMBUS_ETH2_TARBALL_URL="https://github.com/status-im/nimbus-eth2/releases/download/v${NIMBUS_ETH2_VERSION}/${NIMBUS_ETH2_TARBALL_NAME}" - # https://github.com/status-im/nimbus-eth2/releases/download/nightly/nimbus-eth2_Linux_amd64_nightly_latest.tar.gz + log "Downloading Nimbus ETH2 binary" - ETH2_URL="https://github.com/status-im/nimbus-eth2/releases/download/nightly/" - ETH2_VERSION="nightly_latest" - case "${OS}" in - linux) - ETH2_TARBALL="nimbus-eth2_Linux_amd64_${ETH2_VERSION}.tar.gz" - ;; - macos) - ETH2_TARBALL="nimbus-eth2_macOS_amd64_${ETH2_VERSION}.tar.gz" - ;; - windows) - ETH2_TARBALL="nimbus-eth2_Windows_amd64_${ETH2_VERSION}.tar.gz" - ;; - esac + "${CURL_BINARY}" -o "$NIMBUS_ETH2_TARBALL_NAME" -sSL "$NIMBUS_ETH2_TARBALL_URL" + local tmp_extract_dir + tmp_extract_dir=$(mktemp -d nimbus-eth2-tarball-XXX) + CLEANUP_DIRS+=("$tmp_extract_dir") + tar -xzf "${NIMBUS_ETH2_TARBALL_NAME}" -C "$tmp_extract_dir" --strip-components=1 + mkdir -p "$(dirname "$BEACON_NODE_COMMAND")" + mv "$tmp_extract_dir/build/nimbus_beacon_node" "$BEACON_NODE_COMMAND" + chmod +x "$BEACON_NODE_COMMAND" - if [[ ! -e "${BEACON_NODE_COMMAND}" ]]; then - log "Downloading Nimbus ETH2 binary" - "${CURL_BINARY}" -sSLO "${ETH2_URL}/${ETH2_TARBALL}" - # will extract it in build/ directory - tar -xzf "${ETH2_TARBALL}" --strip-components=1 REUSE_BINARIES=1 fi } -if [[ "${RUN_GETH}" == "1" ]]; then - if [[ ! -e "${GETH_BINARY}" ]]; then - if [[ "${DL_GETH}" == "1" ]]; then - log "Downloading geth ..." - download_geth - else - echo "Missing geth executable" - exit 1 - fi - fi - - log "Starting ${GETH_NUM_NODES} Geth Nodes ..." - . "./scripts/start_geth_nodes.sh" - EL_HTTP_PORTS+=("${GETH_HTTP_PORTS[@]}") - EL_RPC_PORTS+=("${GETH_RPC_PORTS[@]}") - EL_DATA_DIRS+=("${GETH_DATA_DIRS[@]}") - PROCS_TO_KILL+=("${GETH_BINARY}") - CLEANUP_DIRS+=("${GETH_DATA_DIRS[@]}") -fi - -if [[ "${RUN_NIMBUS}" == "1" ]]; then - if [[ ! -e "${NIMBUSEL_BINARY}" ]]; then - echo "Missing nimbus EL executable" - exit 1 - fi - - . "./scripts/start_nimbus_el_nodes.sh" - EL_HTTP_PORTS+=("${NIMBUSEL_HTTP_PORTS[@]}") - EL_RPC_PORTS+=("${NIMBUSEL_RPC_PORTS[@]}") - EL_DATA_DIRS+=("${NIMBUSEL_DATA_DIRS[@]}") - PROCS_TO_KILL+=("${NIMBUSEL_BINARY}") - CLEANUP_DIRS+=("${NIMBUSEL_DATA_DIRS[@]}") -fi - # Download the Lighthouse binary. LH_VERSION="2.1.3" LH_ARCH="${ARCH}" @@ -584,11 +676,11 @@ fi # Don't build binaries if we are downloading them -if [[ "${DL_ETH2}" != "1" ]]; then +if [[ "${DL_NIMBUS_ETH2}" != "1" ]]; then # Build the binaries BINARIES="deposit_contract" - if [ "$REMOTE_SIGNER_NODES" -ge "0" ]; then + if [[ "$NIMBUS_SIGNER_NODES" -gt "0" ]]; then BINARIES="${BINARIES} nimbus_signing_node" fi @@ -596,7 +688,7 @@ if [[ "${DL_ETH2}" != "1" ]]; then BINARIES="${BINARIES} nimbus_validator_client" fi - if [ "$LC_NODES" -ge "1" ]; then + if [[ "$LC_NODES" -ge "1" ]]; then BINARIES="${BINARIES} nimbus_light_client" fi @@ -607,6 +699,15 @@ if [[ "${DL_ETH2}" != "1" ]]; then BINARIES="${BINARIES} nimbus_beacon_node" fi +if [[ "$WEB3SIGNER_NODES" -gt "0" ]]; then + download_web3signer +fi + +if [[ "$WEB3SIGNER_NODES" -gt "0" && "$NIMBUS_SIGNER_NODES" -gt "0" ]]; then + echo "You can use either --web3signer-nodes or --nimbus-signer-nodes, but not together" + exit 1 +fi + if [[ -n "${ETH2_DOCKER_IMAGE}" ]]; then DATA_DIR_FULL_PATH="$(cd "${DATA_DIR}"; pwd)" # CONTAINER_DATA_DIR must be used everywhere where paths are supplied to BEACON_NODE_COMMAND executions. @@ -616,9 +717,8 @@ if [[ -n "${ETH2_DOCKER_IMAGE}" ]]; then else # When docker is not used CONTAINER_DATA_DIR is just an alias for DATA_DIR CONTAINER_DATA_DIR="${DATA_DIR}" - if [[ "${DL_ETH2}" == "1" ]]; then - log "Downloading nimbus_eth2" - download_eth2 + if [[ "${DL_NIMBUS_ETH2}" == "1" ]]; then + download_nimbus_eth2 BINARIES="" fi fi @@ -633,12 +733,18 @@ for BINARY in ${BINARIES}; do done if [[ "${REUSE_BINARIES}" == "0" || "${BINARIES_MISSING}" == "1" ]]; then - if [[ "${DL_ETH2}" == "0" ]]; then + if [[ "${DL_NIMBUS_ETH2}" == "0" ]]; then log "Rebuilding binaries ${BINARIES}" ${MAKE} -j ${NPROC} LOG_LEVEL=TRACE NIMFLAGS="${NIMFLAGS} -d:local_testnet -d:const_preset=${CONST_PRESET}" ${BINARIES} fi fi +if [[ "${RUN_NIMBUS_ETH1}" == "1" ]]; then + if [[ "${DL_NIMBUS_ETH1}" == "1" ]]; then + download_nimbus_eth1 + fi +fi + # Kill child processes on Ctrl-C/SIGTERM/exit, passing the PID of this shell # instance as the parent and the target process name as a pattern to the # "pkill" command. @@ -694,16 +800,13 @@ fi REMOTE_URLS="" -for NUM_REMOTE in $(seq 0 $(( REMOTE_SIGNER_NODES - 1 ))); do +for NUM_REMOTE in $(seq 0 $LAST_REMOTE_SIGNER_NODE_IDX); do REMOTE_PORT=$(( BASE_REMOTE_SIGNER_PORT + NUM_REMOTE )) REMOTE_URLS="${REMOTE_URLS} --remote-signer=http://127.0.0.1:${REMOTE_PORT}" done # deposit and testnet creation -PIDS="" BOOTSTRAP_TIMEOUT=30 # in seconds -DEPOSIT_CONTRACT_ADDRESS="0x0000000000000000000000000000000000000000" -DEPOSIT_CONTRACT_BLOCK="0x0000000000000000000000000000000000000000000000000000000000000000" RUNTIME_CONFIG_FILE="${DATA_DIR}/config.yaml" NUM_JOBS=${NUM_NODES} @@ -711,6 +814,14 @@ DEPOSITS_FILE="${DATA_DIR}/deposits.json" CONTAINER_DEPOSITS_FILE="${CONTAINER_DATA_DIR}/deposits.json" if [[ "$REUSE_EXISTING_DATA_DIR" == "0" ]]; then +echo ./build/deposit_contract generateSimulationDeposits \ + --count=${TOTAL_VALIDATORS} \ + --out-validators-dir="${VALIDATORS_DIR}" \ + --out-secrets-dir="${SECRETS_DIR}" \ + --out-deposits-file="${DEPOSITS_FILE}" \ + --threshold=${REMOTE_SIGNER_THRESHOLD} \ + --remote-validators-count=${REMOTE_VALIDATORS_COUNT} \ + ${REMOTE_URLS} ./build/deposit_contract generateSimulationDeposits \ --count=${TOTAL_VALIDATORS} \ --out-validators-dir="${VALIDATORS_DIR}" \ @@ -721,8 +832,17 @@ if [[ "$REUSE_EXISTING_DATA_DIR" == "0" ]]; then ${REMOTE_URLS} fi +GENESIS_OFFSET=30 +NOW_UNIX_TIMESTAMP=$(date +%s) +GENESIS_TIME=$((NOW_UNIX_TIMESTAMP + GENESIS_OFFSET)) +SHANGHAI_FORK_TIME=$((GENESIS_TIME + SECONDS_PER_SLOT * SLOTS_PER_EPOCH * CAPELLA_FORK_EPOCH)) +SHARDING_FORK_TIME=$((GENESIS_TIME + SECONDS_PER_SLOT * SLOTS_PER_EPOCH * EIP4844_FORK_EPOCH)) + +EXECUTION_GENESIS_JSON="${DATA_DIR}/local_sim_execution_genesis.json" +sed "s/SHANGHAI_FORK_TIME/${SHANGHAI_FORK_TIME}/g; s/SHARDING_FORK_TIME/${SHARDING_FORK_TIME}/g" \ + "${SCRIPTS_DIR}/local_sim_execution_genesis.json.template" > "$EXECUTION_GENESIS_JSON" + if [[ $USE_GANACHE == "0" ]]; then - GENESIS_OFFSET=30 BOOTSTRAP_IP="127.0.0.1" $BEACON_NODE_COMMAND createTestnet \ @@ -735,12 +855,13 @@ if [[ $USE_GANACHE == "0" ]]; then --bootstrap-port=${BASE_PORT} \ --netkey-file=network_key.json \ --insecure-netkey-password=true \ - --genesis-offset=${GENESIS_OFFSET} # Delay in seconds + --genesis-time=${GENESIS_TIME} # Delay in seconds + DEPOSIT_CONTRACT_ADDRESS="0x4242424242424242424242424242424242424242" + DEPOSIT_CONTRACT_BLOCK=0 else echo "Launching ganache" ganache-cli --blockTime 17 --gasLimit 100000000 -e 100000 --verbose > "${DATA_DIR}/log_ganache.txt" 2>&1 & - PIDS="${PIDS},$!" WEB3_ARG=("--web3-url=ws://localhost:8545") @@ -763,8 +884,6 @@ else --min-delay=$MIN_DELAY --max-delay=$MAX_DELAY \ "${WEB3_ARG[@]}" \ --deposit-contract=${DEPOSIT_CONTRACT_ADDRESS} > "${DATA_DIR}/log_deposit_maker.txt" 2>&1 & - - PIDS="${PIDS},$!" fi ./scripts/make_prometheus_config.sh \ @@ -772,43 +891,46 @@ fi --base-metrics-port ${BASE_METRICS_PORT} \ --config-file "${DATA_DIR}/prometheus.yml" || true # TODO: this currently fails on macOS, # but it can be considered non-critical -echo Wrote $RUNTIME_CONFIG_FILE: +cp "$SCRIPTS_DIR/$CONST_PRESET-non-overriden-config.yaml" "$RUNTIME_CONFIG_FILE" # TODO the runtime config file should be used during deposit generation as well! -tee "$RUNTIME_CONFIG_FILE" < "${DATA_DIR}/deposit_contract_block.txt" + +# TODO +# This value is effectively derived from the genesis.json file +# Automate the process of specifying it here. +# +# One way to obtain it is by calling eth_getBlockByNumber immediately after +# initialising the EL client +# +# curl -X POST \ +# -H 'Content-Type: application/json' \ +# --data '{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["latest", true],"id":1}' \ +# http://localhost:6307 +# +# We can also use this to make sure that Nimbus and the other clients agree on the handling of the genesis file +# +echo 0xe4b24524c45a45b4727c8929d7305e64095f65749e2dcad665b8ef8c654b4842 > "${DATA_DIR}/deposit_contract_block_hash.txt" + if [[ "${LIGHTHOUSE_VC_NODES}" != "0" ]]; then # I don't know what this is, but Lighthouse wants it, so we recreate it from # Lighthouse's own local testnet. - echo 0 > "${DATA_DIR}/deploy_block.txt" - - # Lighthouse wants all these variables here. Copying them from "beacon_chain/spec/presets.nim". - # Note: our parser can't handle quotes around numerical values. - cat >> "$RUNTIME_CONFIG_FILE" < "${DATA_DIR}/deploy_block.txt" fi dump_logs() { @@ -826,8 +948,8 @@ dump_logtrace() { fi } -NODES_WITH_VALIDATORS=${NODES_WITH_VALIDATORS:-4} -BOOTSTRAP_NODE=0 +NODES_WITH_VALIDATORS=${NODES_WITH_VALIDATORS:-$NUM_NODES} +BOOTSTRAP_NODE=1 SYSTEM_VALIDATORS=$(( TOTAL_VALIDATORS - USER_VALIDATORS )) VALIDATORS_PER_NODE=$(( SYSTEM_VALIDATORS / NODES_WITH_VALIDATORS )) if [[ "${USE_VC}" == "1" ]]; then @@ -838,31 +960,55 @@ if [[ "${USE_VC}" == "1" ]]; then NUM_JOBS=$(( NUM_JOBS * 2 )) fi -if [ "$REMOTE_SIGNER_NODES" -ge "0" ]; then +if [[ "$REMOTE_SIGNER_NODES" -ge "0" ]]; then NUM_JOBS=$(( NUM_JOBS + REMOTE_SIGNER_NODES )) fi -if [ "$LC_NODES" -ge "1" ]; then +if [[ "$LC_NODES" -ge "1" ]]; then NUM_JOBS=$(( NUM_JOBS + LC_NODES )) fi -if [ "${RUN_GETH}" == "1" ]; then +if [[ "${RUN_GETH}" == "1" ]]; then NUM_JOBS=$(( NUM_JOBS + GETH_NUM_NODES )) fi -if [ "${RUN_NIMBUS}" == "1" ]; then - NUM_JOBS=$(( NUM_JOBS + NIMBUSEL_NUM_NODES )) +if [[ "${RUN_NIMBUS_ETH1}" == "1" ]]; then + NUM_JOBS=$(( NUM_JOBS + NIMBUS_ETH1_NUM_NODES )) +fi + +if [[ "${RUN_GETH}" == "1" ]]; then + if [[ ! -e "${GETH_BINARY}" ]]; then + echo "Missing geth executable" + exit 1 + fi + + source "./scripts/start_geth_nodes.sh" + + PROCS_TO_KILL+=("${GETH_BINARY}") + CLEANUP_DIRS+=("${GETH_DATA_DIRS[@]}") +fi + +if [[ "${RUN_NIMBUS_ETH1}" == "1" ]]; then + if [[ ! -e "${NIMBUS_ETH1_BINARY}" ]]; then + echo "Missing nimbus EL executable" + exit 1 + fi + + source "./scripts/start_nimbus_el_nodes.sh" + + PROCS_TO_KILL+=("${NIMBUS_ETH1_BINARY}") + CLEANUP_DIRS+=("${NIMBUS_ETH1_DATA_DIRS[@]}") fi VALIDATORS_PER_VALIDATOR=$(( (SYSTEM_VALIDATORS / NODES_WITH_VALIDATORS) / 2 )) -VALIDATOR_OFFSET=$((SYSTEM_VALIDATORS / 2)) +VALIDATOR_OFFSET=$(( SYSTEM_VALIDATORS / 2 )) BOOTSTRAP_ENR="${DATA_DIR}/node${BOOTSTRAP_NODE}/beacon_node.enr" CONTAINER_BOOTSTRAP_ENR="${CONTAINER_DATA_DIR}/node${BOOTSTRAP_NODE}/beacon_node.enr" CONTAINER_NETWORK_KEYFILE="network_key.json" -for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do +for NUM_NODE in $(seq 1 $NUM_NODES); do # Copy validators to individual nodes. # The first $NODES_WITH_VALIDATORS nodes split them equally between them, # after skipping the first $USER_VALIDATORS. @@ -872,27 +1018,25 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do scripts/makedir.sh "${NODE_DATA_DIR}/validators" 2>&1 scripts/makedir.sh "${NODE_DATA_DIR}/secrets" 2>&1 - if [[ $NUM_NODE -lt $NODES_WITH_VALIDATORS ]]; then + if [[ $NUM_NODE -le $NODES_WITH_VALIDATORS ]]; then if [[ "${USE_VC}" == "1" ]]; then VALIDATOR_DATA_DIR="${DATA_DIR}/validator${NUM_NODE}" rm -rf "${VALIDATOR_DATA_DIR}" scripts/makedir.sh "${VALIDATOR_DATA_DIR}" 2>&1 scripts/makedir.sh "${VALIDATOR_DATA_DIR}/validators" 2>&1 scripts/makedir.sh "${VALIDATOR_DATA_DIR}/secrets" 2>&1 - for VALIDATOR in $(ls "${VALIDATORS_DIR}" | tail -n +$(( $USER_VALIDATORS + ($VALIDATORS_PER_VALIDATOR * $NUM_NODE) + 1 + $VALIDATOR_OFFSET )) | head -n $VALIDATORS_PER_VALIDATOR); do - if [[ -f "${VALIDATORS_DIR}/${VALIDATOR}/keystore.json" ]]; then - cp -a "${VALIDATORS_DIR}/${VALIDATOR}" "${VALIDATOR_DATA_DIR}/validators/" 2>&1 + for VALIDATOR in $(ls "${VALIDATORS_DIR}" | tail -n +$(( USER_VALIDATORS + (VALIDATORS_PER_VALIDATOR * (NUM_NODE - 1)) + 1 + VALIDATOR_OFFSET )) | head -n $VALIDATORS_PER_VALIDATOR); do + cp -a "${VALIDATORS_DIR}/${VALIDATOR}" "${VALIDATOR_DATA_DIR}/validators/" 2>&1 + # Remote validators won't have a secret file + if [ -f "${SECRETS_DIR}/${VALIDATOR}" ]; then cp -a "${SECRETS_DIR}/${VALIDATOR}" "${VALIDATOR_DATA_DIR}/secrets/" 2>&1 - else - # TODO: validators support remote signers - cp -a "${VALIDATORS_DIR}/${VALIDATOR}" "${NODE_DATA_DIR}/validators/" 2>&1 fi done if [[ "${OS}" == "Windows_NT" ]]; then find "${VALIDATOR_DATA_DIR}" -type f \( -iname "*.json" -o ! -iname "*.*" \) -exec icacls "{}" /inheritance:r /grant:r ${USERDOMAIN}\\${USERNAME}:\(F\) \; fi fi - for VALIDATOR in $(ls "${VALIDATORS_DIR}" | tail -n +$(( $USER_VALIDATORS + ($VALIDATORS_PER_NODE * $NUM_NODE) + 1 )) | head -n $VALIDATORS_PER_NODE); do + for VALIDATOR in $(ls "${VALIDATORS_DIR}" | tail -n +$(( USER_VALIDATORS + (VALIDATORS_PER_NODE * (NUM_NODE - 1)) + 1 )) | head -n $VALIDATORS_PER_NODE); do cp -a "${VALIDATORS_DIR}/${VALIDATOR}" "${NODE_DATA_DIR}/validators/" 2>&1 if [[ -f "${VALIDATORS_DIR}/${VALIDATOR}/keystore.json" ]]; then # Only remote key stores doesn't have a secret @@ -904,7 +1048,8 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do fi fi done -for NUM_LC in $(seq 0 $(( LC_NODES - 1 ))); do + +for NUM_LC in $(seq 1 $LC_NODES); do LC_DATA_DIR="${DATA_DIR}/lc${NUM_LC}" rm -rf "${LC_DATA_DIR}" scripts/makedir.sh "${LC_DATA_DIR}" 2>&1 @@ -926,22 +1071,76 @@ END_CLI_CONFIG # https://ss64.com/osx/seq.html documents that at macOS seq(1) counts backwards # as probably do some others -if ((REMOTE_SIGNER_NODES > 0)); then - for NUM_REMOTE in $(seq 0 $(( REMOTE_SIGNER_NODES - 1 ))); do +if ((NIMBUS_SIGNER_NODES > 0)); then + launch_nimbus_signing_node() { + SIGNING_NODE_IDX=$1 + ./build/nimbus_signing_node \ + --validators-dir="${DATA_DIR}/validators_shares/${SIGNING_NODE_IDX}" \ + --secrets-dir="${DATA_DIR}/secrets_shares/${SIGNING_NODE_IDX}" \ + --bind-port=$(( BASE_REMOTE_SIGNER_PORT + SIGNING_NODE_IDX - 1 )) + echo "Signing not exited with code $?" + } + + for NUM_REMOTE in $(seq 1 $NIMBUS_SIGNER_NODES); do # TODO find some way for this and other background-launched processes to # still participate in set -e, ideally - ./build/nimbus_signing_node \ - --validators-dir="${DATA_DIR}/validators_shares/${NUM_REMOTE}" \ - --secrets-dir="${DATA_DIR}/secrets_shares/${NUM_REMOTE}" \ - --bind-port=$(( BASE_REMOTE_SIGNER_PORT + NUM_REMOTE )) \ - > "${DATA_DIR}/log_remote_signer_${NUM_REMOTE}.txt" & + launch_nimbus_signing_node $NUM_REMOTE > "${DATA_DIR}/log_nimbus_signing_node_${NUM_REMOTE}.txt" & + done +fi + +if ((WEB3SIGNER_NODES > 0)); then + if ! command javac > /dev/null || ! javac -version > /dev/null; then + # On macOS, homebrew doesn't make java available in your PATH by default. + # Instead, macOS ships with a stub executable that displays a message that + # Java is not installed (javac -version exits with an error code 1). + # If the user is running under these default settings, but a homebrew + # installation is disovered, we are happy to use it just in this script: + if [[ -d /opt/homebrew/opt/openjdk/bin ]]; then + export PATH="/opt/homebrew/opt/openjdk/bin:$PATH" + fi + fi + + launch_web3signer() { + WEB3SIGNER_NODE_IDX=$1 + + local secrets_dir="${DATA_DIR}/secrets_shares/${WEB3SIGNER_NODE_IDX}" + local keystores_dir="${DATA_DIR}/validators_shares/${WEB3SIGNER_NODE_IDX}" + + # We re-arrange the keystore files to match the layout expected by the Web3Signer + # TODO generateSimulationDeposits can be refactored to produce the right layout from the start + for validator_pubkey in $(ls "$secrets_dir") + do + mv "$secrets_dir/$validator_pubkey" "$secrets_dir/$validator_pubkey.txt" + mv "$keystores_dir/$validator_pubkey/keystore.json" "$keystores_dir/$validator_pubkey.json" + done + + # still participate in set -e, ideally + # TODO find some way for this and other background-launched processes to + "${WEB3SIGNER_BINARY}" \ + --http-listen-port=$(( BASE_REMOTE_SIGNER_PORT + WEB3SIGNER_NODE_IDX - 1 )) \ + --logging=DEBUG \ + --metrics-enabled=true \ + --metrics-port=$(( BASE_REMOTE_SIGNER_METRICS_PORT + WEB3SIGNER_NODE_IDX - 1 )) \ + eth2 \ + --slashing-protection-enabled=false \ + --keystores-passwords-path="${secrets_dir}" \ + --keystores-path="${keystores_dir}" \ + --network="${RUNTIME_CONFIG_FILE}" + + echo "Web3Signer exited with code $?" + } + + PROCS_TO_KILL+=("${WEB3SIGNER_BINARY}") + + for NUM_REMOTE in $(seq 1 $WEB3SIGNER_NODES); do + launch_web3signer $NUM_REMOTE > "${DATA_DIR}/log_web3signer_${NUM_REMOTE}.txt" & done fi # give each node time to load keys sleep 10 -for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do +for NUM_NODE in $(seq 1 $NUM_NODES); do NODE_DATA_DIR="${DATA_DIR}/node${NUM_NODE}" CONTAINER_NODE_DATA_DIR="${CONTAINER_DATA_DIR}/node${NUM_NODE}" VALIDATOR_DATA_DIR="${DATA_DIR}/validator${NUM_NODE}" @@ -973,18 +1172,21 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do done fi - if [ ${#EL_RPC_PORTS[@]} -eq 0 ]; then # check if the array is empty - WEB3_ARG=( - "--require-engine-api-in-bellatrix=no" - ) - else - WEB3_ARG=( - "--web3-url=http://127.0.0.1:${EL_RPC_PORTS[${NUM_NODE}]}" - "--jwt-secret=${EL_DATA_DIRS[${NUM_NODE}]}/jwtsecret" - ) + WEB3_ARG=() + if [ "${RUN_NIMBUS_ETH1}" == "1" ]; then + WEB3_ARG+=("--web3-url=http://127.0.0.1:${NIMBUS_ETH1_RPC_PORTS[$(( NUM_NODE - 1 ))]}") + fi + + if [ "${RUN_GETH}" == "1" ]; then + WEB3_ARG+=("--web3-url=http://127.0.0.1:${GETH_AUTH_RPC_PORTS[$((NUM_NODE - 1))]}") fi - # We enabled the keymanager on half of the nodes + if [ ${#WEB3_ARG[@]} -eq 0 ]; then # check if the array is empty + WEB3_ARG=("--require-engine-api-in-bellatrix=no") + fi + + # We enabled the keymanager on half of the nodes in order + # to make sure that the client can work without it. KEYMANAGER_FLAG="" if [ $((NUM_NODE % 2)) -eq 0 ]; then KEYMANAGER_FLAG="--keymanager" @@ -992,24 +1194,30 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do ${BEACON_NODE_COMMAND} \ --config-file="${CLI_CONF_FILE}" \ - --tcp-port=$(( BASE_PORT + NUM_NODE )) \ - --udp-port=$(( BASE_PORT + NUM_NODE )) \ + --tcp-port=$(( BASE_PORT + NUM_NODE - 1 )) \ + --udp-port=$(( BASE_PORT + NUM_NODE - 1 )) \ --max-peers=$(( NUM_NODES + LC_NODES - 1 )) \ --data-dir="${CONTAINER_NODE_DATA_DIR}" \ ${BOOTSTRAP_ARG} \ + --jwt-secret=${JWT_FILE} \ "${WEB3_ARG[@]}" \ + --payload-builder=${USE_PAYLOAD_BUILDER} \ + --payload-builder-url="http://${PAYLOAD_BUILDER_HOST}:${PAYLOAD_BUILDER_PORT}" \ + --light-client-data-serve=on \ + --light-client-data-import-mode=full \ + --light-client-data-max-periods=999999 \ ${STOP_AT_EPOCH_FLAG} \ ${KEYMANAGER_FLAG} \ --keymanager-token-file="${DATA_DIR}/keymanager-token" \ - --rest-port="$(( BASE_REST_PORT + NUM_NODE ))" \ - --metrics-port="$(( BASE_METRICS_PORT + NUM_NODE ))" \ + --rest-port="$(( BASE_REST_PORT + NUM_NODE - 1 ))" \ + --metrics-port="$(( BASE_METRICS_PORT + NUM_NODE - 1 ))" \ ${EXTRA_ARGS} \ &> "${DATA_DIR}/log${NUM_NODE}.txt" & PIDS="${PIDS},$!" if [[ "${USE_VC}" == "1" ]]; then - if [[ "${LIGHTHOUSE_VC_NODES}" -gt "${NUM_NODE}" ]]; then + if [[ "${LIGHTHOUSE_VC_NODES}" -ge "${NUM_NODE}" ]]; then # Lighthouse needs a different keystore filename for its auto-discovery process. for D in "${VALIDATOR_DATA_DIR}/validators"/0x*; do if [[ -e "${D}/keystore.json" ]]; then @@ -1035,11 +1243,12 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do ${STOP_AT_EPOCH_FLAG} \ --data-dir="${VALIDATOR_DATA_DIR}" \ --metrics \ - --metrics-port:$((BASE_VC_METRICS_PORT + NUM_NODE)) \ + --metrics-port=$(( BASE_VC_METRICS_PORT + NUM_NODE - 1 )) \ + --payload-builder=${USE_PAYLOAD_BUILDER} \ ${KEYMANAGER_FLAG} \ - --keymanager-port=$((BASE_VC_KEYMANAGER_PORT + NUM_NODE)) \ + --keymanager-port=$(( BASE_VC_KEYMANAGER_PORT + NUM_NODE - 1 )) \ --keymanager-token-file="${DATA_DIR}/keymanager-token" \ - --beacon-node="http://127.0.0.1:$((BASE_REST_PORT + NUM_NODE))" \ + --beacon-node="http://127.0.0.1:$(( BASE_REST_PORT + NUM_NODE - 1 ))" \ &> "${DATA_DIR}/log_val${NUM_NODE}.txt" & PIDS="${PIDS},$!" fi @@ -1050,20 +1259,20 @@ done if [ "$LC_NODES" -ge "1" ]; then echo "Waiting for Altair finalization" while :; do - ALTAIR_FORK_EPOCH="$( + BN_ALTAIR_FORK_EPOCH="$( "${CURL_BINARY}" -s "http://localhost:${BASE_REST_PORT}/eth/v1/config/spec" | \ "${JQ_BINARY}" -r '.data.ALTAIR_FORK_EPOCH')" - if [ "${ALTAIR_FORK_EPOCH}" -eq "${ALTAIR_FORK_EPOCH}" ]; then # check for number + if [ "${BN_ALTAIR_FORK_EPOCH}" -eq "${BN_ALTAIR_FORK_EPOCH}" ]; then # check for number break fi - echo "ALTAIR_FORK_EPOCH: ${ALTAIR_FORK_EPOCH}" + echo "ALTAIR_FORK_EPOCH: ${BN_ALTAIR_FORK_EPOCH}" sleep 1 done while :; do CURRENT_FORK_EPOCH="$( "${CURL_BINARY}" -s "http://localhost:${BASE_REST_PORT}/eth/v1/beacon/states/finalized/fork" | \ "${JQ_BINARY}" -r '.data.epoch')" - if [ "${CURRENT_FORK_EPOCH}" -ge "${ALTAIR_FORK_EPOCH}" ]; then + if [ "${CURRENT_FORK_EPOCH}" -ge "${BN_ALTAIR_FORK_EPOCH}" ]; then break fi sleep 1 @@ -1078,16 +1287,16 @@ if [ "$LC_NODES" -ge "1" ]; then LC_TRUSTED_BLOCK_ROOT="$( "${CURL_BINARY}" -s "http://localhost:${BASE_REST_PORT}/eth/v1/beacon/headers/finalized" | \ "${JQ_BINARY}" -r '.data.root')" - for NUM_LC in $(seq 0 $(( LC_NODES - 1 ))); do + for NUM_LC in $(seq 1 $LC_NODES); do LC_DATA_DIR="${DATA_DIR}/lc${NUM_LC}" - if [ ${#EL_RPC_PORTS[@]} -eq 0 ]; then # check if the array is empty - WEB3_ARG=() - else - WEB3_ARG=( - "--web3-url=http://127.0.0.1:${EL_RPC_PORTS[$(( NUM_NODES + NUM_LC ))]}" - "--jwt-secret=${EL_DATA_DIRS[$(( NUM_NODES + NUM_LC ))]}/jwtsecret" - ) + WEB3_ARG=() + if [ "${RUN_NIMBUS_ETH1}" == "1" ]; then + WEB3_ARG+=("--web3-url=http://127.0.0.1:${NIMBUS_ETH1_RPC_PORTS[$(( NUM_NODES + NUM_LC - 1 ))]}") + fi + + if [ "${RUN_GETH}" == "1" ]; then + WEB3_ARG+=("--web3-url=http://127.0.0.1:${GETH_AUTH_RPC_PORTS[$(( NUM_NODES + NUM_LC - 1 ))]}") fi ./build/nimbus_light_client \ @@ -1096,11 +1305,12 @@ if [ "$LC_NODES" -ge "1" ]; then --data-dir="${LC_DATA_DIR}" \ --network="${CONTAINER_DATA_DIR}" \ --bootstrap-node="${LC_BOOTSTRAP_NODE}" \ - --tcp-port=$(( BASE_PORT + NUM_NODES + NUM_LC )) \ - --udp-port=$(( BASE_PORT + NUM_NODES + NUM_LC )) \ + --tcp-port=$(( BASE_PORT + NUM_NODES + NUM_LC - 1 )) \ + --udp-port=$(( BASE_PORT + NUM_NODES + NUM_LC - 1 )) \ --max-peers=$(( NUM_NODES + LC_NODES - 1 )) \ --nat="extip:127.0.0.1" \ --trusted-block-root="${LC_TRUSTED_BLOCK_ROOT}" \ + --jwt-secret="${JWT_FILE}" \ "${WEB3_ARG[@]}" \ ${STOP_AT_EPOCH_FLAG} \ &> "${DATA_DIR}/log_lc${NUM_LC}.txt" & diff --git a/scripts/local_sim_execution_genesis.json.template b/scripts/local_sim_execution_genesis.json.template new file mode 100644 index 0000000000..ac0db08653 --- /dev/null +++ b/scripts/local_sim_execution_genesis.json.template @@ -0,0 +1,83 @@ +{ + "config": { + "chainId":9999, + "homesteadBlock":0, + "eip150Block":0, + "eip155Block":0, + "eip158Block":0, + "byzantiumBlock":0, + "constantinopleBlock":0, + "petersburgBlock":0, + "istanbulBlock":0, + "muirGlacierBlock":0, + "berlinBlock":0, + "londonBlock":0, + "shanghaiTime": SHANGHAI_FORK_TIME, + "shardingForkTime": SHARDING_FORK_TIME, + "clique": { + "period": 5, + "epoch": 30000 + }, + "terminalTotalDifficulty":0 + }, + "nonce":"0x42", + "timestamp":"0x0", + "extraData":"0x0000000000000000000000000000000000000000000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "gasLimit":"0x1C9C380", + "difficulty":"0x400000000", + "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase":"0x0000000000000000000000000000000000000000", + "alloc":{ + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":{"balance":"0x6d6172697573766477000000"}, + "0x7e5f4552091a69125d5dfcb7b8c2659029395bdf":{"balance":"0x6d6172697573766477000000"}, + "0x2b5ad5c4795c026514f8317c7a215e218dccd6cf":{"balance":"0x6d6172697573766477000000"}, + "0x6813eb9362372eef6200f3b1dbc3f819671cba69":{"balance":"0x6d6172697573766477000000"}, + "0x1eff47bc3a10a45d4b230b5d10e37751fe6aa718":{"balance":"0x6d6172697573766477000000"}, + "0xe1ab8145f7e55dc933d51a18c793f901a3a0b276":{"balance":"0x6d6172697573766477000000"}, + "0xe57bfe9f44b819898f47bf37e5af72a0783e1141":{"balance":"0x6d6172697573766477000000"}, + "0xd41c057fd1c78805aac12b0a94a405c0461a6fbb":{"balance":"0x6d6172697573766477000000"}, + "0xf1f6619b38a98d6de0800f1defc0a6399eb6d30c":{"balance":"0x6d6172697573766477000000"}, + "0xf7edc8fa1ecc32967f827c9043fcae6ba73afa5c":{"balance":"0x6d6172697573766477000000"}, + "0x4242424242424242424242424242424242424242": { + "balance": "0", + "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", + "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", + "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", + "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", + "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", + "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", + "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", + "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", + "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", + "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", + "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", + "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", + "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", + "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", + "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", + "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", + "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", + "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", + "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", + "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", + "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", + "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", + "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", + "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", + "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", + "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", + "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", + "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" + } + } + }, + "number":"0x0", + "gasUsed":"0x0", + "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "baseFeePerGas":"0x7" +} diff --git a/scripts/mainnet-non-overriden-config.yaml b/scripts/mainnet-non-overriden-config.yaml new file mode 100644 index 0000000000..e63447dcbd --- /dev/null +++ b/scripts/mainnet-non-overriden-config.yaml @@ -0,0 +1,103 @@ +# This file should contain the origin run-time config for the mainnet +# network [1] without all properties overriden in the local network +# simulation. We use to generate a full run-time config as required +# by third-party binaries, such as Lighthouse and Web3Signer. +# +# [1]: https://raw.githubusercontent.com/ethereum/consensus-specs/dev/configs/mainnet.yaml + +# Mainnet config + +# Extends the mainnet preset +# (overriden in launch_local_testnet.sh) PRESET_BASE: 'mainnet' + +# Free-form short name of the network that this configuration applies to - known +# canonical network names include: +# * 'mainnet' - there can be only one +# * 'prater' - testnet +# Must match the regex: [a-z0-9\-] +CONFIG_NAME: 'mainnet' + +# Transition +# --------------------------------------------------------------- +# Estimated on Sept 15, 2022 +# (overriden in launch_local_testnet.sh) TERMINAL_TOTAL_DIFFICULTY: 58750000000000000000000 +# By default, don't use these params +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + + + +# Genesis +# --------------------------------------------------------------- +# `2**14` (= 16,384) +# (overriden in launch_local_testnet.sh) MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 +# Dec 1, 2020, 12pm UTC +# (overriden in launch_local_testnet.sh) MIN_GENESIS_TIME: 1606824000 +# Mainnet initial fork version, recommend altering for testnets +GENESIS_FORK_VERSION: 0x00000000 +# 604800 seconds (7 days) +# (overriden in launch_local_testnet.sh) GENESIS_DELAY: 604800 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x01000000 +# (overriden in launch_local_testnet.sh) ALTAIR_FORK_EPOCH: 74240 # Oct 27, 2021, 10:56:23am UTC +# Bellatrix +BELLATRIX_FORK_VERSION: 0x02000000 +# (overriden in launch_local_testnet.sh) BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC +# Capella +CAPELLA_FORK_VERSION: 0x03000000 +# (overriden in launch_local_testnet.sh) CAPELLA_FORK_EPOCH: 18446744073709551615 +# EIP4844 +EIP4844_FORK_VERSION: 0x04000000 +# (overriden in launch_local_testnet.sh) EIP4844_FORK_EPOCH: 18446744073709551615 + + + + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +# (overriden in launch_local_testnet.sh) ETH1_FOLLOW_DISTANCE: 2048 + + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 + + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 + +# Deposit contract +# --------------------------------------------------------------- +# Ethereum PoW Mainnet +DEPOSIT_CHAIN_ID: 1 +DEPOSIT_NETWORK_ID: 1 +# (overriden in launch_local_testnet.sh) DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + diff --git a/scripts/make_prometheus_config.sh b/scripts/make_prometheus_config.sh index 0e734de853..defcf2a6e2 100755 --- a/scripts/make_prometheus_config.sh +++ b/scripts/make_prometheus_config.sh @@ -90,7 +90,7 @@ scrape_configs: - job_name: "nimbus" static_configs: EOF -for NUM_NODE in $(seq 0 $(( ${NUM_NODES} - 1 ))); do +for NUM_NODE in $(seq 1 $NUM_NODES); do cat >> "${CONFIG_FILE}" < /dev/null && pwd) +BUILD_DIR=$(cd "$SCRIPTS_DIR/../build" &> /dev/null && pwd) + +create_data_dir_for_network() { + NETWORK_ID=$(cat "$NETWORK/genesis.json" | jq '.config.chainId') + NETWORK_DIR="$BUILD_DIR/data/$NETWORK_ID" + mkdir -p "$NETWORK_DIR" + echo "$NETWORK_DIR" +} + +create_jwt_token() { + if [ ! -f "$1" ]; then + openssl rand -hex 32 | tr -d "\n" > "$1" + fi +} + +fi diff --git a/scripts/run-catalyst.sh b/scripts/run-catalyst.sh index 0f1b76aab4..9eeb337df7 100755 --- a/scripts/run-catalyst.sh +++ b/scripts/run-catalyst.sh @@ -7,8 +7,8 @@ # To start miner, run miner.start() # To increase verbosity: debug.verbosity(4) -GENESISJSON=$(mktemp) -GETHDATADIR=$(mktemp -d) +GENESIS_JSON=$(mktemp) +GETH_DATA_DIR=$(mktemp -d) echo \{\ \"config\": \{\ @@ -44,13 +44,13 @@ echo \{\ \"gasUsed\":\"0x0\",\ \"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\ \"baseFeePerGas\":\"0x7\"\ -\} > "${GENESISJSON}" +\} > "${GENESIS_JSON}" # Initialize the genesis -~/go-ethereum/build/bin/geth --http --ws -http.api "engine" --datadir "${GETHDATADIR}" init "${GENESISJSON}" +~/go-ethereum/build/bin/geth --http --ws -http.api "engine" --datadir "${GETH_DATA_DIR}" init "${GENESIS_JSON}" # Import the signing key (press enter twice for empty password) -~/go-ethereum/build/bin/geth --http --ws -http.api "engine" --datadir "${GETHDATADIR}" account import <(echo 45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8) +~/go-ethereum/build/bin/geth --http --ws -http.api "engine" --datadir "${GETH_DATA_DIR}" account import <(echo 45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8) # Start the node (and press enter once to unlock the account) -~/go-ethereum/build/bin/geth --http --ws --http.api "eth,net,engine" -ws.api "eth,net,engine" --datadir "${GETHDATADIR}" --allow-insecure-unlock --unlock "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" --password "" --nodiscover console +~/go-ethereum/build/bin/geth --http --ws --http.api "eth,net,engine" -ws.api "eth,net,engine" --datadir "${GETH_DATA_DIR}" --allow-insecure-unlock --unlock "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" --password "" --nodiscover console diff --git a/scripts/run-geth-el.sh b/scripts/run-geth-el.sh index dfaf1e83a1..3fbbde5c1f 100755 --- a/scripts/run-geth-el.sh +++ b/scripts/run-geth-el.sh @@ -6,47 +6,72 @@ if [ -z "$1" ]; then exit 1 fi +if [ ! -d "$1" ]; then + echo "Please supply a valid network metadata directory" + exit 1 +fi + set -Eeu -NETWORK=$1 +NETWORK=$(cd "$1"; pwd) -NETWORK_ID=$(cat "$NETWORK/genesis.json" | jq '.config.chainId') +cd $(dirname "$0") + +source geth_binaries.sh +source repo_paths.sh + +: ${GETH_RPC_PORT:=18550} +: ${GETH_WS_PORT:=18551} -GETH=${HOME}/execution_clients/go-ethereum/build/bin/geth +DATA_DIR="$(create_data_dir_for_network "$NETWORK")" -# https://github.com/eth2-clients/merge-testnets/tree/main/kintsugi -EXECUTION_BOOTNODES=$(awk '{print $1}' "$NETWORK/el_bootnode.txt" | paste -s -d, -) +JWT_TOKEN="$DATA_DIR/jwt-token" +create_jwt_token "$JWT_TOKEN" -GETHDATADIR=$(mktemp -d) -GENESISJSON="${NETWORK}/genesis.json" +NETWORK_ID=$(cat "$NETWORK/genesis.json" | jq '.config.chainId') + +EXECUTION_BOOTNODES="" +if [[ -f "$NETWORK/el_bootnode.txt" ]]; then + EXECUTION_BOOTNODES+=$(awk '{print $1}' "$NETWORK/el_bootnode.txt" "$NETWORK/el_bootnode.txt" | paste -s -d, -) +fi -echo "GETHDATADIR = ${GETHDATADIR}" +if [[ -f "$NETWORK/el_bootnodes.txt" ]]; then + EXECUTION_BOOTNODES+=$(awk '{print $1}' "$NETWORK/el_bootnodes.txt" "$NETWORK/el_bootnode.txt" | paste -s -d, -) +fi + +GETH_DATA_DIR="$DATA_DIR/geth" +EXECUTION_GENESIS_JSON="${NETWORK}/genesis.json" -# Initialize the genesis -$GETH --http --ws -http.api "engine" --datadir "${GETHDATADIR}" init "${GENESISJSON}" +if [[ ! -d "$GETH_DATA_DIR/geth" ]]; then + # Initialize the genesis + $GETH_EIP_4844_BINARY --http --ws -http.api "engine" --datadir "${GETH_DATA_DIR}" init "${EXECUTION_GENESIS_JSON}" -# Import the signing key (press enter twice for empty password) -$GETH --http --ws -http.api "engine" --datadir "${GETHDATADIR}" account import <(echo 45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8) + # Import the signing key (press enter twice for empty password) + $GETH_EIP_4844_BINARY --http --ws -http.api "engine" --datadir "${GETH_DATA_DIR}" account import <(echo 45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8) +fi #--password "execution/geth/passfile.txt" #--nodekey "execution/signer.key" -$GETH \ +$GETH_EIP_4844_BINARY \ --http \ - --http.port 8550 \ - --http.api "engine,eth,net,admin,web3" \ --http.corsdomain="*" \ --http.vhosts="*" \ + --http.addr=127.0.0.1 \ + --http.port="$GETH_RPC_PORT" \ + -http.api=web3,debug,engine,eth,net,txpool \ --ws \ - --ws.port 8551 \ - --ws.api "engine,eth,net,admin,web3" \ + --ws.addr=127.0.0.1 \ + --ws.port="$GETH_WS_PORT" \ + --ws.origins="*" \ + --ws.api=debug,eth,txpool,net,engine \ + --authrpc.jwtsecret "$JWT_TOKEN" \ --allow-insecure-unlock \ - --datadir "${GETHDATADIR}" \ + --datadir "${GETH_DATA_DIR}" \ --bootnodes "${EXECUTION_BOOTNODES}" \ --port 30308 \ --password "" \ - --syncmode full \ + --metrics \ --unlock "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" \ - --mine \ - --networkid $NETWORK_ID \ - console + --syncmode=full \ + --networkid $NETWORK_ID diff --git a/scripts/run-nimbus-eth2-in-withdrawal-testnet.sh b/scripts/run-nimbus-eth2-in-withdrawal-testnet.sh new file mode 100755 index 0000000000..589ecea9e7 --- /dev/null +++ b/scripts/run-nimbus-eth2-in-withdrawal-testnet.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +if [ -z "$1" ]; then + echo "Usage: run-nimbus-eth2-in-withdrawal-testnet.sh " + exit 1 +fi + +if [ ! -d "$1" ]; then + echo "Please supply a valid network metadata directory" + exit 1 +fi + +set -Eeu + +NETWORK=$(cd "$1"; pwd) + +cd $(dirname "$0") + +source repo_paths.sh + +DATA_DIR="$(create_data_dir_for_network "$NETWORK")" + +JWT_TOKEN="$DATA_DIR/jwt-token" +create_jwt_token "$JWT_TOKEN" + +"$BUILD_DIR/nimbus_beacon_node" \ + --non-interactive \ + --udp-port=19000 \ + --tcp-port=19000 \ + --network="$NETWORK" \ + --log-level=DEBUG \ + --data-dir="$DATA_DIR/nimbus_bn" \ + --el=http://localhost:18550/ \ + --rest:on \ + --rest-port=15052 \ + --metrics=on \ + --metrics-port=18008 \ + --doppelganger-detection=no \ + --jwt-secret="$JWT_TOKEN" diff --git a/scripts/start_geth_nodes.sh b/scripts/start_geth_nodes.sh index 67e4517b6a..f51b60a833 100755 --- a/scripts/start_geth_nodes.sh +++ b/scripts/start_geth_nodes.sh @@ -2,58 +2,60 @@ set -euo pipefail -BASEDIR="$(dirname "${BASH_SOURCE[0]}")" +SCRIPTS_DIR="$(dirname "${BASH_SOURCE[0]}")" -. "${BASEDIR}/geth_vars.sh" +source "${SCRIPTS_DIR}/geth_binaries.sh" +source "${SCRIPTS_DIR}/geth_vars.sh" #These are used in the caller script GETH_ENODES=() -GETH_HTTP_PORTS=() -GETH_NET_PORTS=() -GETH_WS_PORTS=() -GETH_RPC_PORTS=() -GETH_DATA_DIRS=() log "Using ${GETH_BINARY}" -for GETH_NUM_NODE in $(seq 0 $(( GETH_NUM_NODES - 1 ))); do - GETH_NET_PORT=$(( GETH_NUM_NODE * GETH_PORT_OFFSET + GETH_BASE_NET_PORT )) - GETH_HTTP_PORT=$(( GETH_NUM_NODE * GETH_PORT_OFFSET + GETH_BASE_HTTP_PORT )) - GETH_WS_PORT=$(( GETH_NUM_NODE * GETH_PORT_OFFSET + GETH_BASE_WS_PORT )) - GETH_AUTH_RPC_PORT=$(( GETH_NUM_NODE * GETH_PORT_OFFSET + GETH_BASE_AUTH_RPC_PORT )) - log "Starting geth node ${GETH_NUM_NODE} on net port ${GETH_NET_PORT} HTTP port ${GETH_HTTP_PORT} WS port ${GETH_WS_PORT}" - GETHDATADIR=$(mktemp -d "${DATA_DIR}"/geth-data-XXX) - GETH_DATA_DIRS+=(${GETHDATADIR}) - openssl rand -hex 32 | tr -d "\n" > "${GETHDATADIR}/jwtsecret" - ${GETH_BINARY} --http --ws -http.api "engine" --datadir "${GETHDATADIR}" init "${GENESISJSON}" - ${GETH_BINARY} --http --ws --http.corsdomain '*' --http.api "eth,net,engine" -ws.api "eth,net,engine" --datadir "${GETHDATADIR}" ${DISCOVER} --port ${GETH_NET_PORT} --http.port ${GETH_HTTP_PORT} --ws.port ${GETH_WS_PORT} --authrpc.port ${GETH_AUTH_RPC_PORT} --authrpc.jwtsecret "${GETHDATADIR}/jwtsecret" &> "${DATA_DIR}/geth-log${GETH_NUM_NODE}.txt" & - GETH_RETRY=0 - while :; do - if [[ -S "${GETHDATADIR}/geth.ipc" ]]; then - echo "Geth ${GETH_NUM_NODE} started in $(( GETH_RETRY * 100 ))ms" - break - fi - if (( ++GETH_RETRY >= 300 )); then - echo "Geth ${GETH_NUM_NODE} failed to start" - exit 1 - fi - sleep 0.1 - done - NODE_ID=$(${GETH_BINARY} attach --datadir "${GETHDATADIR}" --exec admin.nodeInfo.enode) - GETH_ENODES+=("${NODE_ID}") - GETH_HTTP_PORTS+=("${GETH_HTTP_PORT}") - GETH_NET_PORTS+=("${GETH_NET_PORT}") - GETH_WS_PORTS+=("${GETH_WS_PORT}") - GETH_RPC_PORTS+=("${GETH_AUTH_RPC_PORT}") +start_geth_node() { + GETH_NODE_IDX=$1 + + ${GETH_BINARY} version + ${GETH_BINARY} --datadir "${GETH_DATA_DIRS[GETH_NODE_IDX]}" init "${EXECUTION_GENESIS_JSON}" + + ${GETH_BINARY} \ + --syncmode full \ + --datadir "${GETH_DATA_DIRS[GETH_NODE_IDX]}" \ + ${DISCOVER} \ + --port ${GETH_NET_PORTS[GETH_NODE_IDX]} \ + --authrpc.port ${GETH_AUTH_RPC_PORTS[GETH_NODE_IDX]} \ + --authrpc.jwtsecret "${JWT_FILE}" +} + +for GETH_NODE_IDX in $(seq 0 $GETH_LAST_NODE_IDX); do + start_geth_node $GETH_NODE_IDX \ + &> "${DATA_DIR}/geth-log${GETH_NODE_IDX}.txt" & +done + +for GETH_NODE_IDX in $(seq 0 $GETH_LAST_NODE_IDX); do + GETH_RETRY=0 + while :; do + if [[ -S "${GETH_DATA_DIRS[GETH_NODE_IDX]}/geth.ipc" ]]; then + echo "Geth ${GETH_NODE_IDX} started in $(( GETH_RETRY * 100 ))ms" + break + fi + if (( ++GETH_RETRY >= 300 )); then + echo "Geth ${GETH_NODE_IDX} failed to start" + exit 1 + fi + sleep 0.1 + done + NODE_ID=$(${GETH_BINARY} attach --datadir "${GETH_DATA_DIRS[GETH_NODE_IDX]}" --exec admin.nodeInfo.enode) + GETH_ENODES+=("${NODE_ID}") done #Add all nodes as peers for dir in "${GETH_DATA_DIRS[@]}" do - for enode in "${GETH_ENODES[@]}" - do - ${GETH_BINARY} attach --datadir "${dir}" --exec "admin.addPeer(${enode})" - done + for enode in "${GETH_ENODES[@]}" + do + ${GETH_BINARY} attach --datadir "${dir}" --exec "admin.addPeer(${enode})" & + done done -log "GETH HTTP Ports: ${GETH_HTTP_PORTS[*]}" +log "GETH RPC Ports: ${GETH_AUTH_RPC_PORTS[*]}" diff --git a/scripts/start_nimbus_el_nodes.sh b/scripts/start_nimbus_el_nodes.sh index 45278db4aa..2973bdcc79 100755 --- a/scripts/start_nimbus_el_nodes.sh +++ b/scripts/start_nimbus_el_nodes.sh @@ -2,66 +2,74 @@ set -euo pipefail -BASEDIR="$(dirname "${BASH_SOURCE[0]}")" +SCRIPTS_DIR="$(dirname "${BASH_SOURCE[0]}")" -. "${BASEDIR}/nimbus_el_vars.sh" +. "${SCRIPTS_DIR}/nimbus_el_vars.sh" -#These are used in the caller script -NIMBUSEL_ENODES=() -NIMBUSEL_HTTP_PORTS=() -NIMBUSEL_NET_PORTS=() -NIMBUSEL_WS_PORTS=() -NIMBUSEL_RPC_PORTS=() -NIMBUSEL_DATA_DIRS=() +NIMBUS_ETH1_ENODES=() +NIMBUS_ETH1_DATA_DIRS=() wait_for_port() { for EXPONENTIAL_BACKOFF in {1..10}; do - nc -w 1 -z $1 $2 && break; + nc -w 1 -z $1 $2 > /dev/null && break; DELAY=$((2**$EXPONENTIAL_BACKOFF)) echo "Port ${2} not yet available. Waiting ${DELAY} seconds" sleep $DELAY done } -log "Using ${NIMBUSEL_BINARY}" +if [ -d /opt/homebrew/lib ]; then + # BEWARE + # The recent versions of homebrew/macOS can't add the libraries + # installed by Homebrew in the system's library search path, so + # Nimbus will fail to load RocksDB on start-up. THe new rules in + # macOS make it very difficult for the user to solve the problem + # in their profile, so we add an override here as the lessed evil: + export DYLD_LIBRARY_PATH="${DYLD_LIBRARY_PATH:-}:/opt/homebrew/lib" + # See https://github.com/Homebrew/brew/issues/13481 for more details +fi -for NUM_NODE in $(seq 0 $(( NIMBUSEL_NUM_NODES - 1 ))); do - NIMBUSEL_NET_PORT=$(( NUM_NODE * NIMBUSEL_PORT_OFFSET + NIMBUSEL_BASE_NET_PORT )) - NIMBUSEL_HTTP_PORT=$(( NUM_NODE * NIMBUSEL_PORT_OFFSET + NIMBUSEL_BASE_HTTP_PORT )) - NIMBUSEL_WS_PORT=$(( NUM_NODE * NIMBUSEL_PORT_OFFSET + NIMBUSEL_BASE_WS_PORT )) - NIMBUSEL_AUTH_RPC_PORT=$(( NUM_NODE * NIMBUSEL_PORT_OFFSET + NIMBUSEL_BASE_AUTH_RPC_PORT )) - log "Starting nimbus EL node ${NUM_NODE} on net port ${NIMBUSEL_NET_PORT} HTTP port ${NIMBUSEL_HTTP_PORT} WS port ${NIMBUSEL_WS_PORT}" - NIMBUSEL_DATADIR=$(mktemp -d nimbusel-data-XXX) - NIMBUSEL_DATA_DIRS+=("${NIMBUSEL_DATADIR}") - openssl rand -hex 32 | tr -d "\n" > "${NIMBUSEL_DATADIR}/jwtsecret" - ${NIMBUSEL_BINARY} --data-dir="${NIMBUSEL_DATADIR}" --custom-network="${NIMBUSEL_GENESIS}" "${NIMBUSEL_DISCOVERY}" \ - --tcp-port="${NIMBUSEL_NET_PORT}" --engine-api --engine-api-port="${NIMBUSEL_AUTH_RPC_PORT}" \ - --rpc --rpc-port="${NIMBUSEL_HTTP_PORT}" &> "${DATA_DIR}/nimbusel_log${NUM_NODE}.txt" & +PROCS_TO_KILL+="(${NIMBUS_ETH1_BINARY})" - wait_for_port localhost "${NIMBUSEL_HTTP_PORT}" +for NIMBUS_ETH1_NODE_IDX in $(seq 0 $NIMBUS_ETH1_LAST_NODE_IDX); do + NIMBUS_ETH1_DATA_DIR=$(mktemp -d "${DATA_DIR}/nimbus-eth1-data-XXXXXX") + NIMBUS_ETH1_DATA_DIRS+=("${NIMBUS_ETH1_DATA_DIR}") - NODE_ID=$( - "${CURL_BINARY}" -sS -X POST \ - -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","id":"id","method":"net_nodeInfo"}' \ - "http://localhost:${NIMBUSEL_HTTP_PORT}" | "${JQ_BINARY}" .result.enode) - log "EL Node ID" "${NODE_ID}" - NIMBUSEL_ENODES+=("${NODE_ID}") - NIMBUSEL_HTTP_PORTS+=("${NIMBUSEL_HTTP_PORT}") - NIMBUSEL_NET_PORTS+=("${NIMBUSEL_NET_PORT}") - NIMBUSEL_WS_PORTS+=("${NIMBUSEL_WS_PORT}") - NIMBUSEL_RPC_PORTS+=("${NIMBUSEL_AUTH_RPC_PORT}") + ${NIMBUS_ETH1_BINARY} \ + --data-dir="${NIMBUS_ETH1_DATA_DIR}" \ + --custom-network="${EXECUTION_GENESIS_JSON}" \ + --discovery=None \ + --tcp-port="${NIMBUS_ETH1_NET_PORTS[NIMBUS_ETH1_NODE_IDX]}" \ + --jwt-secret="${JWT_FILE}" \ + --engine-api --engine-api-port="${NIMBUS_ETH1_AUTH_RPC_PORTS[NIMBUS_ETH1_NODE_IDX]}" \ + --rpc --rpc-port="${NIMBUS_ETH1_RPC_PORTS[NIMBUS_ETH1_NODE_IDX]}" \ + &> "${DATA_DIR}/nimbus_eth1_log${NIMBUS_ETH1_NODE_IDX}.txt" & done -for enode in "${NIMBUSEL_ENODES[@]}" +echo "Waiting for the Nimbus ETH1 nodes to come online..." +for NIMBUS_ETH1_NODE_IDX in $(seq 0 $NIMBUS_ETH1_LAST_NODE_IDX); do + wait_for_port localhost "${NIMBUS_ETH1_RPC_PORTS[NIMBUS_ETH1_NODE_IDX]}" + + NODE_ID=$( + "${CURL_BINARY}" -sS -X POST \ + -H 'Content-Type: application/json' \ + -d '{"jsonrpc":"2.0","id":"id","method":"net_nodeInfo"}' \ + "http://localhost:${NIMBUS_ETH1_RPC_PORTS[NIMBUS_ETH1_NODE_IDX]}" | "${JQ_BINARY}" .result.enode) + log "EL Node ID" "${NODE_ID}" + NIMBUS_ETH1_ENODES+=("${NODE_ID}") +done + +# TODO Here we should connect to the Geth nodes as well +echo "Connect all nodes though the nimbus_addPeer RPC call..." +for enode in "${NIMBUS_ETH1_ENODES[@]}" do - for port in "${NIMBUSEL_HTTP_PORTS[@]}" + for port in "${NIMBUS_ETH1_RPC_PORTS[@]}" do "${CURL_BINARY}" -sS -X POST \ -H 'Content-Type: application/json' \ -d '{"jsonrpc":"2.0","id":"1","method":"nimbus_addPeer","params": ['"${enode}"']}' \ - "http://localhost:${port}" - done + "http://localhost:${port}" & + done done -echo "NimbusEL HTTP Ports: ${NIMBUSEL_HTTP_PORTS[*]}" +echo "Nimbus ETH1 HTTP Ports: ${NIMBUS_ETH1_RPC_PORTS[*]}" diff --git a/scripts/test_merge_node.nim b/scripts/test_merge_node.nim index 188f207549..1144740084 100644 --- a/scripts/test_merge_node.nim +++ b/scripts/test_merge_node.nim @@ -56,14 +56,13 @@ proc run() {.async.} = echo "args are: web3url jwtsecretfilename" let - eth1Monitor = Eth1Monitor.init( + elManager = newClone ELManager.init( defaultRuntimeConfig, db = nil, nil, @[paramStr(1)], none(DepositTreeSnapshot), none(Eth1Network), false, some readJwtSecret(paramStr(2)).get) - await eth1Monitor.ensureDataProvider() try: - await eth1Monitor.exchangeTransitionConfiguration() + await elManager.exchangeTransitionConfiguration() except ValueError as exc: # Expected, since nothing here sets up the Nimbus TTD correctly echo "exchangeTransitionConfiguration ValueError: " & exc.msg diff --git a/scripts/test_merge_vectors.nim b/scripts/test_merge_vectors.nim index a3700d1b81..2101069a44 100644 --- a/scripts/test_merge_vectors.nim +++ b/scripts/test_merge_vectors.nim @@ -58,7 +58,7 @@ const proc run() {.async.} = let jwtSecret = some readJwtSecret("jwt.hex").get - eth1Monitor = Eth1Monitor.init( + elManager = newClone ELManager.init( defaultRuntimeConfig, db = nil, nil, @[web3Url], none(DepositTreeSnapshot), none(Eth1Network), false, jwtSecret) @@ -69,28 +69,26 @@ proc run() {.async.} = Eth1Address.fromHex("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b") let existingBlock = await web3Provider.getBlockByNumber(0) - await eth1Monitor.ensureDataProvider() - let - payloadId = await eth1Monitor.forkchoiceUpdated( + payloadId = await elManager.forkchoiceUpdated( existingBlock.hash.asEth2Digest, existingBlock.hash.asEth2Digest, existingBlock.timestamp.uint64 + 12, ZERO_HASH.data, # Random feeRecipient) - payload = await eth1Monitor.getPayload( + payload = await elManager.getPayload( array[8, byte] (payloadId.payloadId.get)) - payloadStatus = await eth1Monitor.newPayload(payload) - fcupdatedStatus = await eth1Monitor.forkchoiceUpdated( + payloadStatus = await elManager.sendNewPayload(payload) + fcupdatedStatus = await elManager.forkchoiceUpdated( payload.blockHash.asEth2Digest, payload.blockHash.asEth2Digest, existingBlock.timestamp.uint64 + 24, ZERO_HASH.data, # Random feeRecipient) - payload2 = await eth1Monitor.getPayload( + payload2 = await elManager.getPayload( array[8, byte] (fcupdatedStatus.payloadId.get)) - payloadStatus2 = await eth1Monitor.newPayload(payload2) - fcupdatedStatus2 = await eth1Monitor.forkchoiceUpdated( + payloadStatus2 = await elManager.sendNewPayload(payload2) + fcupdatedStatus2 = await elManager.forkchoiceUpdated( payload2.blockHash.asEth2Digest, payload2.blockHash.asEth2Digest, existingBlock.timestamp.uint64 + 36, diff --git a/tests/test_block_processor.nim b/tests/test_block_processor.nim index 9224c7be6b..678c9d3fa9 100644 --- a/tests/test_block_processor.nim +++ b/tests/test_block_processor.nim @@ -41,11 +41,11 @@ suite "Block processor" & preset(): verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool) quarantine = newClone(Quarantine.init()) attestationPool = newClone(AttestationPool.init(dag, quarantine)) - eth1Monitor = new Eth1Monitor + elManager = new ELManager # TODO: initialise this properly actionTracker: ActionTracker keymanagerHost: ref KeymanagerHost consensusManager = ConsensusManager.new( - dag, attestationPool, quarantine, eth1Monitor, actionTracker, + dag, attestationPool, quarantine, elManager, actionTracker, newClone(DynamicFeeRecipientsStore.init()), "", default(Eth1Address)) state = newClone(dag.headState) diff --git a/tests/test_eth1_monitor.nim b/tests/test_eth1_monitor.nim index dc2bd10e52..9a6e986c0f 100644 --- a/tests/test_eth1_monitor.nim +++ b/tests/test_eth1_monitor.nim @@ -9,7 +9,7 @@ import unittest2, - ../beacon_chain/eth1/eth1_monitor, + ../beacon_chain/eth1/[el_conf, eth1_monitor], ./testutil from ssz_serialization/types import Limit, List, init diff --git a/vendor/nim-eth b/vendor/nim-eth index 2b5f2a27e3..59e4f1d534 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit 2b5f2a27e303b13127bb525b0c7a309eaa7fbed9 +Subproject commit 59e4f1d534856d09057b8b4f1521d1ce070bbe2a diff --git a/vendor/nim-web3 b/vendor/nim-web3 index 43b710c5d6..d596e68c14 160000 --- a/vendor/nim-web3 +++ b/vendor/nim-web3 @@ -1 +1 @@ -Subproject commit 43b710c5d64eb0c6dbde954f470aed46f8ab31b7 +Subproject commit d596e68c14d0cf7fb3d52023ba16cac49b615173