From ded7767ccdc4e6a63a6020794da40fea2f75634c Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Mon, 10 Apr 2023 16:21:18 +0530 Subject: [PATCH] Merge qa to develop (#814) * Adding in Mumbai/Mainnet precursor deb packaging for tests to use during upgrade(iterations to come) * Added changes per discussion in PR, more changes may be necessary * Adding prerelease true * Disabling goreleaser * Removing README swap file * change bor_dir and add bor user for v0.3.0 release * rollback bor user and use root * metrics: handle equal to separated config flag (#596) * metrics: handle based config path * internal/cli/server: add more context to logs * use space separated flag and value in bor.service * fixed static-nodes related buf (os independent) (#598) * fixed static-nodes related buf (os independent) * taking static-nodes as input if default not present * Update default flags (#600) * internal/cli/server: use geth's default for txpool.pricelimit and add comments * builder/files: update config.toml for mainnet * packaging/templates: update defaults for mainnet and mumbai * internal/cli/server: skip overriding cache * packaging/templates: update cache value for mainnet * packaging/templates: update gcmode for archive mumbai node * metrics: handle nil telemetry config (#601) * resolve merge conflicts * update go version in release.yml * update goversion in makefile * update Docker login for goreleaser-cross v1.19 * Cleanup for the packager to use git tag in the package profile naming. Added conditional check for directory structure, this is in prep for v0.3.1, as this will create a failure on upgrade path in package due to file exist * added a toml configuration file with comments describing each flag (#607) * added a toml configuration file with comments describing each flag * internal/cli/server: update flag description * docs/cli: update example config and description of flags * docs: update new-cli docs Co-authored-by: Manav Darji * Adding of 0.3.0 package changes, control file updates, postinst changes, and packager update * added ancient datadir flag and toml field, need to decide on default value and update the conversion script * updated toml files with ancient field * Add support for new flags in new config.toml, which were present in old config.toml (#612) * added HTTPTimeouts, and TrieTimeout flag in new tol, from old toml * added RAW fields for these time.Duration flags * updated the conversion script to support these extra 4 flags * removed hcl and json config tests as we are only supporting toml config files * updated toml files with cache.timeout field * updated toml files with jsonrpc.timeouts field * tests/bor: expect a call for latest checkpoint * tests/bor: expect a call for latest checkpoint * packaging/templates: update cache values for archive nodes Co-authored-by: Manav Darji * remove unwanted code * Fix docker publish authentication issue In gorelease-cross 1.19+, dockerhub authentication will require docker logion action followed by mounting docker config file. See https://github.com/goreleaser/goreleaser-cross#github-actions. * Revert "update Docker login for goreleaser-cross v1.19" This reverts commit 4d19cf5342a439d98cca21b03c63a0bc075769cf. * Bump version to stable * Revert "Merge pull request #435 from maticnetwork/POS-553" This reverts commit 657d262defc9c94e9513b3d45230492d8b20eac7, reversing changes made to 88dbfa1c13c15464d3c1a3085a9f12d0ffb9b218. * revert change for release for go1.19 * Add default values to CLI helper and docs This commit adds default values to CLI helper and docs. When the default value of a string flag, slice string flag, or map string flag is empty, its helper message won't show any default value. * Add a summary of new CLI in docs * Updating packager as binutils changed version so that apt-get installs current versions * Add state pruning to new CLI * Minor wording fix in prune state description * Bumping control file versions * Mainnet Delhi fork * Set version to stable * change delhi hardfork block number * handle future chain import and skip peer drop (#650) * handle future chain import and skip peer drop * add block import metric * params: bump version to v0.3.3-stable * Bump bor version in control files for v0.3.3 mainnet release * Added checks to RPC requests and introduced new flags to customise the parameters (#657) * added a check to reject rpc requests with batch size > the one set using a newly added flag (rpcbatchlimit) * added a check to reject rpc requests whose result size > the one set using a newly added flag (rpcreturndatalimit) * updated the config files and docs * chg : trieTimeout from 60 to 10 mins (#692) * chg : trieTimeout from 60 to 10 mins * chg : cache.timout to 10m from 1h in configs * internal/cli/server : fix : added triesInMemory in config (#691) * changed version from 0.3.0 to 0.3.4-beta (#693) * fix nil state-sync issue, increase grpc limit (#695) * Increase grpc message size limit in pprof * consensus/bor/bor.go : stateSyncs init fixed [Fix #686] * eth/filters: handle nil state-sync before notify * eth/filters: update check Co-authored-by: Jerry Co-authored-by: Daniil * core, tests/bor: add more tests for state-sync validation (#710) * core: add get state sync function for tests * tests/bor: add validation for state sync events post consensus * Arpit/temp bor sync (#701) * Increase grpc message size limit in pprof * ReadBorReceipts improvements * use internal function * fix tests * fetch geth upstread for ReadBorReceiptRLP * Only query bor receipt when the query index is equal to # tx in block body This change reduces the frequency of calling ReadBorReceipt and ReadBorTransaction, which are CPU and db intensive. * Revert "fetch geth upstread for ReadBorReceiptRLP" This reverts commit 2e838a6b1313d26674f3a8df4b044e35dcbf35a0. * Restore ReadBorReceiptRLP * fix bor receipts * remove unused * fix lints --------- Co-authored-by: Jerry Co-authored-by: Manav Darji Co-authored-by: Evgeny Danienko <6655321@bk.ru> * Revert "chg : trieTimeout from 60 to 10 mins (#692)" (#720) This reverts commit 241843c7e7bb18e64d2e157fd6fbbd665f6ce9d9. * Arpit/add execution pool 2 (#719) * initial * linters * linters * remove timeout * update pool * change pool size function * check nil * check nil * fix tests * Use execution pool from server in all handlers * simplify things * test fix * add support for cli, config * add to cli and config * merge base branch * debug statements * fix bug * atomic pointer timeout * add apis * update workerpool * fix issues * change params * fix issues * fix ipc issue * remove execution pool from IPC * revert * fix tests * mutex * refactor flag and value names * ordering fix * refactor flag and value names * update default ep size to 40 * fix bor start issues * revert file changes * debug statements * fix bug * update workerpool * atomic pointer timeout * add apis * Merge branch 'add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * fix issues * change params * fix issues * fix ipc issue * remove execution pool from IPC * revert * merge base branch * Merge branch 'add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * mutex * fix tests * Merge branch 'arpit/add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * Change default size of execution pool to 40 * refactor flag and value names * fix merge conflicts * ordering fix * refactor flag and value names * update default ep size to 40 * fix bor start issues * revert file changes * fix linters * fix go.mod * change sec to ms * change default value for ep timeout * fix node api calls * comment setter for ep timeout --------- Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Jerry Co-authored-by: Manav Darji * version change (#721) * Event based pprof (#732) * feature * Save pprof to /tmp --------- Co-authored-by: Jerry * Cherry-pick changes from develop (#738) * Check if block is nil to prevent panic (#736) * miner: use env for tracing instead of block object (#728) --------- Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> * add max code init size check in txpool (#739) * Revert "Event based pprof" and update version (#742) * Revert "Event based pprof (#732)" This reverts commit 22fa4033e8fabb51c44e8d2a8c6bb695a6e9285e. * params: update version to 0.3.4-beta3 * packaging/templates: update bor version * internal/ethapi :: Fix : newRPCTransactionFromBlockIndex * Merge master to qa (#813) * Merge qa to master (#750) * Added checks to RPC requests and introduced new flags to customise the parameters (#657) * added a check to reject rpc requests with batch size > the one set using a newly added flag (rpcbatchlimit) * added a check to reject rpc requests whose result size > the one set using a newly added flag (rpcreturndatalimit) * updated the config files and docs * chg : trieTimeout from 60 to 10 mins (#692) * chg : trieTimeout from 60 to 10 mins * chg : cache.timout to 10m from 1h in configs * internal/cli/server : fix : added triesInMemory in config (#691) * changed version from 0.3.0 to 0.3.4-beta (#693) * fix nil state-sync issue, increase grpc limit (#695) * Increase grpc message size limit in pprof * consensus/bor/bor.go : stateSyncs init fixed [Fix #686] * eth/filters: handle nil state-sync before notify * eth/filters: update check Co-authored-by: Jerry Co-authored-by: Daniil * core, tests/bor: add more tests for state-sync validation (#710) * core: add get state sync function for tests * tests/bor: add validation for state sync events post consensus * Arpit/temp bor sync (#701) * Increase grpc message size limit in pprof * ReadBorReceipts improvements * use internal function * fix tests * fetch geth upstread for ReadBorReceiptRLP * Only query bor receipt when the query index is equal to # tx in block body This change reduces the frequency of calling ReadBorReceipt and ReadBorTransaction, which are CPU and db intensive. * Revert "fetch geth upstread for ReadBorReceiptRLP" This reverts commit 2e838a6b1313d26674f3a8df4b044e35dcbf35a0. * Restore ReadBorReceiptRLP * fix bor receipts * remove unused * fix lints --------- Co-authored-by: Jerry Co-authored-by: Manav Darji Co-authored-by: Evgeny Danienko <6655321@bk.ru> * Revert "chg : trieTimeout from 60 to 10 mins (#692)" (#720) This reverts commit 241843c7e7bb18e64d2e157fd6fbbd665f6ce9d9. * Arpit/add execution pool 2 (#719) * initial * linters * linters * remove timeout * update pool * change pool size function * check nil * check nil * fix tests * Use execution pool from server in all handlers * simplify things * test fix * add support for cli, config * add to cli and config * merge base branch * debug statements * fix bug * atomic pointer timeout * add apis * update workerpool * fix issues * change params * fix issues * fix ipc issue * remove execution pool from IPC * revert * fix tests * mutex * refactor flag and value names * ordering fix * refactor flag and value names * update default ep size to 40 * fix bor start issues * revert file changes * debug statements * fix bug * update workerpool * atomic pointer timeout * add apis * Merge branch 'add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * fix issues * change params * fix issues * fix ipc issue * remove execution pool from IPC * revert * merge base branch * Merge branch 'add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * mutex * fix tests * Merge branch 'arpit/add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * Change default size of execution pool to 40 * refactor flag and value names * fix merge conflicts * ordering fix * refactor flag and value names * update default ep size to 40 * fix bor start issues * revert file changes * fix linters * fix go.mod * change sec to ms * change default value for ep timeout * fix node api calls * comment setter for ep timeout --------- Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Jerry Co-authored-by: Manav Darji * version change (#721) * Event based pprof (#732) * feature * Save pprof to /tmp --------- Co-authored-by: Jerry * Cherry-pick changes from develop (#738) * Check if block is nil to prevent panic (#736) * miner: use env for tracing instead of block object (#728) --------- Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> * add max code init size check in txpool (#739) * Revert "Event based pprof" and update version (#742) * Revert "Event based pprof (#732)" This reverts commit 22fa4033e8fabb51c44e8d2a8c6bb695a6e9285e. * params: update version to 0.3.4-beta3 * packaging/templates: update bor version * params, packaging/templates: update bor version --------- Co-authored-by: SHIVAM SHARMA Co-authored-by: Pratik Patil Co-authored-by: Jerry Co-authored-by: Daniil Co-authored-by: Arpit Temani Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> * core, miner: add sub-spans for tracing (#753) * core, miner: add sub-spans for tracing * fix linters * core: add logs for debugging * core: add more logs to print tdd while reorg * fix linters * core: minor fix * core: remove debug logs * core: use different span for write block and set head * core: use internal context for sending traces (#755) * core: add : impossible reorg block dump (#754) * add : impossible reorg block dump * chg : 3 seperate files for impossoble reorg dump * add : use exportBlocks method and RLP blocks before writing * chg : small changes * bump : go version from 1.19 to 1.20.1 (#761) * Revert "bump : go version from 1.19 to 1.20.1 (#761)" This reverts commit 4561012af9a31d20c2715ce26e4b39ca93420b8b. * core/vm: use optimized bigint (#26021) * Add holiman/big * Fix linter * Bump version to v0.3.5 * fix lints from develop (few lints decided to appear from code that was untouched, weird) * upgrade crypto lib version (#770) * bump dep : github.com/Masterminds/goutils to v1.1.1 (#769) * mardizzone/pos-1313: bump crypto dependency (#772) * dev: chg: bumd net dependency * dev: chg: bump crypto dependency * dev: chg: bump crypto dependency * bump dep : golang.org/x/net to v0.8.0 (#771) * Verify validator set against local contract on receiving an end-of-sprint block (#768) * Verify validator set against local contract on receiving an end-of-sprint block * Fix tests * Respect error returned by ParseValidators * Keep going back until a parent block presents * core/txpool: implement DoS defenses from geth (#778) * Hotfixes and deps bump (#776) * dev: chg: bump deps * internal/cli/server, rpc: lower down http readtimeout to 10s * dev: chg: get p2p adapter * dev: chg: lower down jsonrpc readtimeout to 10s * cherry-pick txpool optimisation changes * add check for empty lists in txpool (#704) * add check * linters * core, miner: add empty instrumentation name for tracing --------- Co-authored-by: Raneet Debnath Co-authored-by: SHIVAM SHARMA Co-authored-by: Evgeny Danilenko <6655321@bk.ru> Co-authored-by: Manav Darji * packaging,params: bump to v0.3.6 (#782) * v0.3.6 fix (#787) * Fix get validator set in header verifier * chg : commit tx logs from info to debug (#673) * chg : commit tx logs from info to debug * fix : minor changes * chg : miner : commitTransactions-stats moved from info to debug * lint : fix linters * refactor logging * miner : chg : UnauthorizedSignerError to debug * lint : fix lint * fix : log.Logger interface compatibility --------- Co-authored-by: Evgeny Danienko <6655321@bk.ru> * Remove unnecessary sorting of valset from header in verification * dev: chg: version bump --------- Co-authored-by: SHIVAM SHARMA Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: marcello33 * core: improve locks in txpool (#807) * added a write lock to the txs.filter method and a read lock to the txs.reheap method - both of which are called by Filter during reorg adjustments to txpool * txpool reorg locks * more locks * locks * linters * params, packaging: update version for v0.3.8-beta release * core: add logs in reheap --------- Co-authored-by: Alex Co-authored-by: Evgeny Danienko <6655321@bk.ru> * Merge qa to master (#808) * Added checks to RPC requests and introduced new flags to customise the parameters (#657) * added a check to reject rpc requests with batch size > the one set using a newly added flag (rpcbatchlimit) * added a check to reject rpc requests whose result size > the one set using a newly added flag (rpcreturndatalimit) * updated the config files and docs * chg : trieTimeout from 60 to 10 mins (#692) * chg : trieTimeout from 60 to 10 mins * chg : cache.timout to 10m from 1h in configs * internal/cli/server : fix : added triesInMemory in config (#691) * changed version from 0.3.0 to 0.3.4-beta (#693) * fix nil state-sync issue, increase grpc limit (#695) * Increase grpc message size limit in pprof * consensus/bor/bor.go : stateSyncs init fixed [Fix #686] * eth/filters: handle nil state-sync before notify * eth/filters: update check Co-authored-by: Jerry Co-authored-by: Daniil * core, tests/bor: add more tests for state-sync validation (#710) * core: add get state sync function for tests * tests/bor: add validation for state sync events post consensus * Arpit/temp bor sync (#701) * Increase grpc message size limit in pprof * ReadBorReceipts improvements * use internal function * fix tests * fetch geth upstread for ReadBorReceiptRLP * Only query bor receipt when the query index is equal to # tx in block body This change reduces the frequency of calling ReadBorReceipt and ReadBorTransaction, which are CPU and db intensive. * Revert "fetch geth upstread for ReadBorReceiptRLP" This reverts commit 2e838a6b1313d26674f3a8df4b044e35dcbf35a0. * Restore ReadBorReceiptRLP * fix bor receipts * remove unused * fix lints --------- Co-authored-by: Jerry Co-authored-by: Manav Darji Co-authored-by: Evgeny Danienko <6655321@bk.ru> * Revert "chg : trieTimeout from 60 to 10 mins (#692)" (#720) This reverts commit 241843c7e7bb18e64d2e157fd6fbbd665f6ce9d9. * Arpit/add execution pool 2 (#719) * initial * linters * linters * remove timeout * update pool * change pool size function * check nil * check nil * fix tests * Use execution pool from server in all handlers * simplify things * test fix * add support for cli, config * add to cli and config * merge base branch * debug statements * fix bug * atomic pointer timeout * add apis * update workerpool * fix issues * change params * fix issues * fix ipc issue * remove execution pool from IPC * revert * fix tests * mutex * refactor flag and value names * ordering fix * refactor flag and value names * update default ep size to 40 * fix bor start issues * revert file changes * debug statements * fix bug * update workerpool * atomic pointer timeout * add apis * Merge branch 'add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * fix issues * change params * fix issues * fix ipc issue * remove execution pool from IPC * revert * merge base branch * Merge branch 'add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * mutex * fix tests * Merge branch 'arpit/add-execution-pool' of github.com:maticnetwork/bor into arpit/add-execution-pool * Change default size of execution pool to 40 * refactor flag and value names * fix merge conflicts * ordering fix * refactor flag and value names * update default ep size to 40 * fix bor start issues * revert file changes * fix linters * fix go.mod * change sec to ms * change default value for ep timeout * fix node api calls * comment setter for ep timeout --------- Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Jerry Co-authored-by: Manav Darji * version change (#721) * Event based pprof (#732) * feature * Save pprof to /tmp --------- Co-authored-by: Jerry * Cherry-pick changes from develop (#738) * Check if block is nil to prevent panic (#736) * miner: use env for tracing instead of block object (#728) --------- Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> * add max code init size check in txpool (#739) * Revert "Event based pprof" and update version (#742) * Revert "Event based pprof (#732)" This reverts commit 22fa4033e8fabb51c44e8d2a8c6bb695a6e9285e. * params: update version to 0.3.4-beta3 * packaging/templates: update bor version * internal/ethapi :: Fix : newRPCTransactionFromBlockIndex * fix: remove assignment for bor receipt --------- Co-authored-by: SHIVAM SHARMA Co-authored-by: Pratik Patil Co-authored-by: Jerry Co-authored-by: Daniil Co-authored-by: Arpit Temani Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> * Setting up bor to use hosted 18.04 runner as ubuntu provided 18.04 runner is end of life --------- Co-authored-by: SHIVAM SHARMA Co-authored-by: Pratik Patil Co-authored-by: Jerry Co-authored-by: Daniil Co-authored-by: Arpit Temani Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> Co-authored-by: Martin Holst Swende Co-authored-by: marcello33 Co-authored-by: Raneet Debnath Co-authored-by: Raneet Debnath <35629432+Raneet10@users.noreply.github.com> Co-authored-by: Alex Co-authored-by: Daniel Jones * core: remove duplicate tests * miner: use get validators by hash in tests --------- Co-authored-by: Daniel Jones Co-authored-by: Will Button Co-authored-by: Will Button Co-authored-by: Daniel Jones <105369507+djpolygon@users.noreply.github.com> Co-authored-by: Pratik Patil Co-authored-by: Arpit Temani Co-authored-by: Jerry Co-authored-by: SHIVAM SHARMA Co-authored-by: Daniil Co-authored-by: Evgeny Danienko <6655321@bk.ru> Co-authored-by: Dmitry <46797839+dkeysil@users.noreply.github.com> Co-authored-by: Martin Holst Swende Co-authored-by: marcello33 Co-authored-by: Raneet Debnath Co-authored-by: Raneet Debnath <35629432+Raneet10@users.noreply.github.com> Co-authored-by: Alex --- builder/files/config.toml | 8 +- cmd/clef/main.go | 2 +- consensus/bor/bor.go | 43 +++- consensus/bor/heimdall/span/spanner.go | 15 +- consensus/bor/span.go | 4 +- consensus/bor/span_mock.go | 46 ++-- core/blockchain.go | 149 ++++++++++-- core/blockchain_reader.go | 4 + core/bor_blockchain.go | 2 +- core/error.go | 4 + core/rawdb/bor_receipt.go | 63 ++--- core/tests/blockchain_repair_test.go | 2 +- core/tx_list.go | 86 +++++-- core/tx_pool.go | 101 ++++++-- core/tx_pool_test.go | 28 ++- core/txpool2_test.go | 229 ++++++++++++++++++ core/vm/contracts.go | 19 +- docs/cli/example_config.toml | 34 +-- docs/cli/server.md | 14 ++ eth/api_backend.go | 4 + eth/ethconfig/config.go | 14 +- eth/filters/bor_api.go | 8 +- eth/filters/test_backend.go | 2 +- eth/tracers/api.go | 2 +- eth/tracers/api_test.go | 4 + go.mod | 38 +-- go.sum | 47 ++-- internal/cli/debug_pprof.go | 4 +- internal/cli/dumpconfig.go | 2 + internal/cli/server/config.go | 64 +++-- internal/cli/server/flags.go | 40 +++ internal/ethapi/api.go | 42 +++- internal/ethapi/backend.go | 9 +- internal/web3ext/web3ext.go | 27 +++ les/api_backend.go | 4 + miner/fake_miner.go | 2 +- miner/worker.go | 4 +- miner/worker_test.go | 4 +- node/api.go | 89 +++++++ node/config.go | 9 + node/node.go | 31 ++- node/rpcstack.go | 21 +- node/rpcstack_test.go | 2 +- .../templates/mainnet-v1/archive/config.toml | 8 +- .../mainnet-v1/sentry/sentry/bor/config.toml | 8 +- .../sentry/validator/bor/config.toml | 8 +- .../mainnet-v1/without-sentry/bor/config.toml | 8 +- packaging/templates/package_scripts/control | 2 +- .../templates/package_scripts/control.arm64 | 2 +- .../package_scripts/control.profile.amd64 | 2 +- .../package_scripts/control.profile.arm64 | 2 +- .../package_scripts/control.validator | 2 +- .../package_scripts/control.validator.arm64 | 2 +- .../templates/testnet-v4/archive/config.toml | 8 +- .../testnet-v4/sentry/sentry/bor/config.toml | 8 +- .../sentry/validator/bor/config.toml | 9 +- .../testnet-v4/without-sentry/bor/config.toml | 8 +- params/config.go | 4 + params/protocol_params.go | 3 +- params/version.go | 13 +- rpc/client.go | 2 +- rpc/client_test.go | 7 +- rpc/endpoints.go | 2 +- rpc/execution_pool.go | 99 ++++++++ rpc/handler.go | 22 +- rpc/http.go | 2 +- rpc/http_test.go | 2 +- rpc/inproc.go | 8 +- rpc/ipc.go | 5 +- rpc/server.go | 58 ++++- rpc/server_test.go | 2 +- rpc/subscription_test.go | 2 +- rpc/testservice_test.go | 2 +- rpc/websocket_test.go | 2 +- tests/bor/bor_test.go | 33 +++ tests/bor/helper.go | 13 +- 76 files changed, 1388 insertions(+), 296 deletions(-) create mode 100644 core/txpool2_test.go create mode 100644 rpc/execution_pool.go diff --git a/builder/files/config.toml b/builder/files/config.toml index 4b917ac8903c..f2ed49227954 100644 --- a/builder/files/config.toml +++ b/builder/files/config.toml @@ -9,6 +9,8 @@ chain = "mainnet" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "/var/lib/bor/keystore" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true @@ -86,6 +88,8 @@ syncmode = "full" # api = ["eth", "net", "web3", "txpool", "bor"] # vhosts = ["*"] # corsdomain = ["*"] +# ep-size = 40 +# ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -93,6 +97,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] +# ep-size = 40 +# ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 @@ -106,7 +112,7 @@ syncmode = "full" # port = 8551 # vhosts = ["localhost"] # [jsonrpc.timeouts] -# read = "30s" +# read = "10s" # write = "30s" # idle = "2m0s" diff --git a/cmd/clef/main.go b/cmd/clef/main.go index f7c3adebc44a..1bfb2610e532 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -656,7 +656,7 @@ func signer(c *cli.Context) error { vhosts := utils.SplitAndTrim(c.GlobalString(utils.HTTPVirtualHostsFlag.Name)) cors := utils.SplitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name)) - srv := rpc.NewServer() + srv := rpc.NewServer(0, 0) err := node.RegisterApis(rpcAPI, []string{"account"}, srv, false) if err != nil { utils.Fatalf("Could not register API: %w", err) diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index a920a1992dcd..04953bbc3c65 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -301,6 +301,14 @@ func (c *Bor) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Head return c.verifyHeader(chain, header, nil) } +func (c *Bor) GetSpanner() Spanner { + return c.spanner +} + +func (c *Bor) SetSpanner(spanner Spanner) { + c.spanner = spanner +} + // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The // method returns a quit channel to abort the operations and a results channel to // retrieve the async verifications (the order is that of the input slice). @@ -457,6 +465,33 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t return err } + // Verify the validator list match the local contract + if IsSprintStart(number+1, c.config.CalculateSprint(number)) { + newValidators, err := c.spanner.GetCurrentValidatorsByBlockNrOrHash(context.Background(), rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), number+1) + + if err != nil { + return err + } + + sort.Sort(valset.ValidatorsByAddress(newValidators)) + + headerVals, err := valset.ParseValidators(header.Extra[extraVanity : len(header.Extra)-extraSeal]) + + if err != nil { + return err + } + + if len(newValidators) != len(headerVals) { + return errInvalidSpanValidators + } + + for i, val := range newValidators { + if !bytes.Equal(val.HeaderBytes(), headerVals[i].HeaderBytes()) { + return errInvalidSpanValidators + } + } + } + // verify the validator list in the last sprint block if IsSprintStart(number, c.config.CalculateSprint(number)) { parentValidatorBytes := parent.Extra[extraVanity : len(parent.Extra)-extraSeal] @@ -534,7 +569,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co hash := checkpoint.Hash() // get validators and current span - validators, err := c.spanner.GetCurrentValidators(context.Background(), hash, number+1) + validators, err := c.spanner.GetCurrentValidatorsByHash(context.Background(), hash, number+1) if err != nil { return nil, err } @@ -704,7 +739,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header) e // get validator set if number if IsSprintStart(number+1, c.config.CalculateSprint(number)) { - newValidators, err := c.spanner.GetCurrentValidators(context.Background(), header.ParentHash, number+1) + newValidators, err := c.spanner.GetCurrentValidatorsByHash(context.Background(), header.ParentHash, number+1) if err != nil { return errUnknownValidators } @@ -1177,7 +1212,7 @@ func (c *Bor) CommitStates( processStart := time.Now() totalGas := 0 /// limit on gas for state sync per block chainID := c.chainConfig.ChainID.String() - stateSyncs := make([]*types.StateSyncData, len(eventRecords)) + stateSyncs := make([]*types.StateSyncData, 0, len(eventRecords)) var gasUsed uint64 @@ -1234,7 +1269,7 @@ func (c *Bor) SetHeimdallClient(h IHeimdallClient) { } func (c *Bor) GetCurrentValidators(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) { - return c.spanner.GetCurrentValidators(ctx, headerHash, blockNumber) + return c.spanner.GetCurrentValidatorsByHash(ctx, headerHash, blockNumber) } // diff --git a/consensus/bor/heimdall/span/spanner.go b/consensus/bor/heimdall/span/spanner.go index e0f2d66c6bf5..9307a0337e11 100644 --- a/consensus/bor/heimdall/span/spanner.go +++ b/consensus/bor/heimdall/span/spanner.go @@ -89,7 +89,7 @@ func (c *ChainSpanner) GetCurrentSpan(ctx context.Context, headerHash common.Has } // GetCurrentValidators get current validators -func (c *ChainSpanner) GetCurrentValidators(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) { +func (c *ChainSpanner) GetCurrentValidatorsByBlockNrOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, blockNumber uint64) ([]*valset.Validator, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -107,16 +107,13 @@ func (c *ChainSpanner) GetCurrentValidators(ctx context.Context, headerHash comm toAddress := c.validatorContractAddress gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2)) - // block - blockNr := rpc.BlockNumberOrHashWithHash(headerHash, false) - result, err := c.ethAPI.Call(ctx, ethapi.TransactionArgs{ Gas: &gas, To: &toAddress, Data: &msgData, - }, blockNr, nil) + }, blockNrOrHash, nil) if err != nil { - panic(err) + return nil, err } var ( @@ -144,6 +141,12 @@ func (c *ChainSpanner) GetCurrentValidators(ctx context.Context, headerHash comm return valz, nil } +func (c *ChainSpanner) GetCurrentValidatorsByHash(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) { + blockNr := rpc.BlockNumberOrHashWithHash(headerHash, false) + + return c.GetCurrentValidatorsByBlockNrOrHash(ctx, blockNr, blockNumber) +} + const method = "commitSpan" func (c *ChainSpanner) CommitSpan(ctx context.Context, heimdallSpan HeimdallSpan, state *state.StateDB, header *types.Header, chainContext core.ChainContext) error { diff --git a/consensus/bor/span.go b/consensus/bor/span.go index 86a58fa42e4b..179f92c79c88 100644 --- a/consensus/bor/span.go +++ b/consensus/bor/span.go @@ -9,11 +9,13 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" ) //go:generate mockgen -destination=./span_mock.go -package=bor . Spanner type Spanner interface { GetCurrentSpan(ctx context.Context, headerHash common.Hash) (*span.Span, error) - GetCurrentValidators(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) + GetCurrentValidatorsByHash(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) + GetCurrentValidatorsByBlockNrOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, blockNumber uint64) ([]*valset.Validator, error) CommitSpan(ctx context.Context, heimdallSpan span.HeimdallSpan, state *state.StateDB, header *types.Header, chainContext core.ChainContext) error } diff --git a/consensus/bor/span_mock.go b/consensus/bor/span_mock.go index 6d5f62e25d52..910e81716ca9 100644 --- a/consensus/bor/span_mock.go +++ b/consensus/bor/span_mock.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ethereum/go-ethereum/consensus/bor (interfaces: Spanner) +// Source: consensus/bor/span.go // Package bor is a generated GoMock package. package bor @@ -14,6 +14,7 @@ import ( core "github.com/ethereum/go-ethereum/core" state "github.com/ethereum/go-ethereum/core/state" types "github.com/ethereum/go-ethereum/core/types" + rpc "github.com/ethereum/go-ethereum/rpc" gomock "github.com/golang/mock/gomock" ) @@ -41,45 +42,60 @@ func (m *MockSpanner) EXPECT() *MockSpannerMockRecorder { } // CommitSpan mocks base method. -func (m *MockSpanner) CommitSpan(arg0 context.Context, arg1 span.HeimdallSpan, arg2 *state.StateDB, arg3 *types.Header, arg4 core.ChainContext) error { +func (m *MockSpanner) CommitSpan(ctx context.Context, heimdallSpan span.HeimdallSpan, state *state.StateDB, header *types.Header, chainContext core.ChainContext) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CommitSpan", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "CommitSpan", ctx, heimdallSpan, state, header, chainContext) ret0, _ := ret[0].(error) return ret0 } // CommitSpan indicates an expected call of CommitSpan. -func (mr *MockSpannerMockRecorder) CommitSpan(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockSpannerMockRecorder) CommitSpan(ctx, heimdallSpan, state, header, chainContext interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitSpan", reflect.TypeOf((*MockSpanner)(nil).CommitSpan), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitSpan", reflect.TypeOf((*MockSpanner)(nil).CommitSpan), ctx, heimdallSpan, state, header, chainContext) } // GetCurrentSpan mocks base method. -func (m *MockSpanner) GetCurrentSpan(arg0 context.Context, arg1 common.Hash) (*span.Span, error) { +func (m *MockSpanner) GetCurrentSpan(ctx context.Context, headerHash common.Hash) (*span.Span, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentSpan", arg0, arg1) + ret := m.ctrl.Call(m, "GetCurrentSpan", ctx, headerHash) ret0, _ := ret[0].(*span.Span) ret1, _ := ret[1].(error) return ret0, ret1 } // GetCurrentSpan indicates an expected call of GetCurrentSpan. -func (mr *MockSpannerMockRecorder) GetCurrentSpan(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockSpannerMockRecorder) GetCurrentSpan(ctx, headerHash interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSpan", reflect.TypeOf((*MockSpanner)(nil).GetCurrentSpan), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSpan", reflect.TypeOf((*MockSpanner)(nil).GetCurrentSpan), ctx, headerHash) } -// GetCurrentValidators mocks base method. -func (m *MockSpanner) GetCurrentValidators(arg0 context.Context, arg1 common.Hash, arg2 uint64) ([]*valset.Validator, error) { +// GetCurrentValidatorsByBlockNrOrHash mocks base method. +func (m *MockSpanner) GetCurrentValidatorsByBlockNrOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, blockNumber uint64) ([]*valset.Validator, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentValidators", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "GetCurrentValidatorsByBlockNrOrHash", ctx, blockNrOrHash, blockNumber) ret0, _ := ret[0].([]*valset.Validator) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetCurrentValidators indicates an expected call of GetCurrentValidators. -func (mr *MockSpannerMockRecorder) GetCurrentValidators(arg0, arg1, arg2 interface{}) *gomock.Call { +// GetCurrentValidatorsByBlockNrOrHash indicates an expected call of GetCurrentValidatorsByBlockNrOrHash. +func (mr *MockSpannerMockRecorder) GetCurrentValidatorsByBlockNrOrHash(ctx, blockNrOrHash, blockNumber interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidators", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidators), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidatorsByBlockNrOrHash", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidatorsByBlockNrOrHash), ctx, blockNrOrHash, blockNumber) +} + +// GetCurrentValidatorsByHash mocks base method. +func (m *MockSpanner) GetCurrentValidatorsByHash(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentValidatorsByHash", ctx, headerHash, blockNumber) + ret0, _ := ret[0].([]*valset.Validator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentValidatorsByHash indicates an expected call of GetCurrentValidatorsByHash. +func (mr *MockSpannerMockRecorder) GetCurrentValidatorsByHash(ctx, headerHash, blockNumber interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidatorsByHash", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidatorsByHash), ctx, headerHash, blockNumber) } diff --git a/core/blockchain.go b/core/blockchain.go index 9f1a4f30a4de..680cb7dce62f 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -18,21 +18,29 @@ package core import ( + "compress/gzip" + "context" "errors" "fmt" "io" "math/big" + "os" + "path/filepath" "sort" + "strings" "sync" "sync/atomic" "time" lru "github.com/hashicorp/golang-lru" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/common/tracing" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" @@ -1349,47 +1357,89 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } // WriteBlockWithState writes the block and all associated state to the database. -func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { +func (bc *BlockChain) WriteBlockAndSetHead(ctx context.Context, block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { if !bc.chainmu.TryLock() { return NonStatTy, errChainStopped } defer bc.chainmu.Unlock() - return bc.writeBlockAndSetHead(block, receipts, logs, state, emitHeadEvent) + return bc.writeBlockAndSetHead(ctx, block, receipts, logs, state, emitHeadEvent) } // writeBlockAndSetHead writes the block and all associated state to the database, // and also it applies the given block as the new chain head. This function expects // the chain mutex to be held. -func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { +func (bc *BlockChain) writeBlockAndSetHead(ctx context.Context, block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { + writeBlockAndSetHeadCtx, span := tracing.StartSpan(ctx, "blockchain.writeBlockAndSetHead") + defer tracing.EndSpan(span) + var stateSyncLogs []*types.Log - if stateSyncLogs, err = bc.writeBlockWithState(block, receipts, logs, state); err != nil { + tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.writeBlockWithState", func(_ context.Context, span trace.Span) { + stateSyncLogs, err = bc.writeBlockWithState(block, receipts, logs, state) + tracing.SetAttributes( + span, + attribute.Int("number", int(block.Number().Uint64())), + attribute.Bool("error", err != nil), + ) + }) + + if err != nil { return NonStatTy, err } currentBlock := bc.CurrentBlock() - reorg, err := bc.forker.ReorgNeeded(currentBlock.Header(), block.Header()) + + var reorg bool + + tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.ReorgNeeded", func(_ context.Context, span trace.Span) { + reorg, err = bc.forker.ReorgNeeded(currentBlock.Header(), block.Header()) + tracing.SetAttributes( + span, + attribute.Int("number", int(block.Number().Uint64())), + attribute.Int("current block", int(currentBlock.Number().Uint64())), + attribute.Bool("reorg needed", reorg), + attribute.Bool("error", err != nil), + ) + }) if err != nil { return NonStatTy, err } - if reorg { - // Reorganise the chain if the parent is not the head block - if block.ParentHash() != currentBlock.Hash() { - if err := bc.reorg(currentBlock, block); err != nil { - return NonStatTy, err + tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.reorg", func(_ context.Context, span trace.Span) { + if reorg { + // Reorganise the chain if the parent is not the head block + if block.ParentHash() != currentBlock.Hash() { + if err = bc.reorg(currentBlock, block); err != nil { + status = NonStatTy + } } + status = CanonStatTy + } else { + status = SideStatTy } - status = CanonStatTy - } else { - status = SideStatTy + tracing.SetAttributes( + span, + attribute.Int("number", int(block.Number().Uint64())), + attribute.Int("current block", int(currentBlock.Number().Uint64())), + attribute.Bool("reorg needed", reorg), + attribute.Bool("error", err != nil), + attribute.String("status", string(status)), + ) + }) + + if status == NonStatTy { + return } + // Set new head. if status == CanonStatTy { - bc.writeHeadBlock(block) + tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.writeHeadBlock", func(_ context.Context, _ trace.Span) { + bc.writeHeadBlock(block) + }) } + bc.futureBlocks.Remove(block.Hash()) if status == CanonStatTy { @@ -1786,7 +1836,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) // Don't set the head, only insert the block _, err = bc.writeBlockWithState(block, receipts, logs, statedb) } else { - status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false) + status, err = bc.writeBlockAndSetHead(context.Background(), block, receipts, logs, statedb, false) } atomic.StoreUint32(&followupInterrupt, 1) if err != nil { @@ -2063,7 +2113,7 @@ func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log { receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) // Append bor receipt - borReceipt := rawdb.ReadBorReceipt(bc.db, hash, *number) + borReceipt := rawdb.ReadBorReceipt(bc.db, hash, *number, bc.chainConfig) if borReceipt != nil { receipts = append(receipts, borReceipt) } @@ -2195,6 +2245,35 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { } else { // len(newChain) == 0 && len(oldChain) > 0 // rewind the canonical chain to a lower point. + + home, err := os.UserHomeDir() + if err != nil { + fmt.Println("Impossible reorg : Unable to get user home dir", "Error", err) + } + outPath := filepath.Join(home, "impossible-reorgs", fmt.Sprintf("%v-impossibleReorg", time.Now().Format(time.RFC3339))) + + if _, err := os.Stat(outPath); errors.Is(err, os.ErrNotExist) { + err := os.MkdirAll(outPath, os.ModePerm) + if err != nil { + log.Error("Impossible reorg : Unable to create Dir", "Error", err) + } + } else { + err = ExportBlocks(oldChain, filepath.Join(outPath, "oldChain.gz")) + if err != nil { + log.Error("Impossible reorg : Unable to export oldChain", "Error", err) + } + + err = ExportBlocks([]*types.Block{oldBlock}, filepath.Join(outPath, "oldBlock.gz")) + if err != nil { + log.Error("Impossible reorg : Unable to export oldBlock", "Error", err) + } + + err = ExportBlocks([]*types.Block{newBlock}, filepath.Join(outPath, "newBlock.gz")) + if err != nil { + log.Error("Impossible reorg : Unable to export newBlock", "Error", err) + } + } + log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain)) } // Insert the new chain(except the head block(reverse order)), @@ -2247,6 +2326,44 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { return nil } +// ExportBlocks exports blocks into the specified file, truncating any data +// already present in the file. +func ExportBlocks(blocks []*types.Block, fn string) error { + log.Info("Exporting blockchain", "file", fn) + + // Open the file handle and potentially wrap with a gzip stream + fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + if err != nil { + return err + } + defer fh.Close() + + var writer io.Writer = fh + if strings.HasSuffix(fn, ".gz") { + writer = gzip.NewWriter(writer) + defer writer.(*gzip.Writer).Close() + } + // Iterate over the blocks and export them + if err := ExportN(writer, blocks); err != nil { + return err + } + + log.Info("Exported blocks", "file", fn) + + return nil +} + +// ExportBlock writes a block to the given writer. +func ExportN(w io.Writer, blocks []*types.Block) error { + for _, block := range blocks { + if err := block.EncodeRLP(w); err != nil { + return err + } + } + + return nil +} + // InsertBlockWithoutSetHead executes the block, runs the necessary verification // upon it and then persist the block and the associate state into the database. // The key difference between the InsertChain is it won't do the canonical chain diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index f61f93049669..8405d4a54ccc 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -422,6 +422,10 @@ func (bc *BlockChain) SetStateSync(stateData []*types.StateSyncData) { bc.stateSyncData = stateData } +func (bc *BlockChain) GetStateSync() []*types.StateSyncData { + return bc.stateSyncData +} + // SubscribeStateSyncEvent registers a subscription of StateSyncEvent. func (bc *BlockChain) SubscribeStateSyncEvent(ch chan<- StateSyncEvent) event.Subscription { return bc.scope.Track(bc.stateSyncFeed.Subscribe(ch)) diff --git a/core/bor_blockchain.go b/core/bor_blockchain.go index ae2cdf3c6f65..49973421bdd8 100644 --- a/core/bor_blockchain.go +++ b/core/bor_blockchain.go @@ -19,7 +19,7 @@ func (bc *BlockChain) GetBorReceiptByHash(hash common.Hash) *types.Receipt { } // read bor reciept by hash and number - receipt := rawdb.ReadBorReceipt(bc.db, hash, *number) + receipt := rawdb.ReadBorReceipt(bc.db, hash, *number, bc.chainConfig) if receipt == nil { return nil } diff --git a/core/error.go b/core/error.go index 51ebefc137bc..234620ee4b6e 100644 --- a/core/error.go +++ b/core/error.go @@ -63,6 +63,10 @@ var ( // have enough funds for transfer(topmost call only). ErrInsufficientFundsForTransfer = errors.New("insufficient funds for transfer") + // ErrMaxInitCodeSizeExceeded is returned if creation transaction provides the init code bigger + // than init code size limit. + ErrMaxInitCodeSizeExceeded = errors.New("max initcode size exceeded") + // ErrInsufficientFunds is returned if the total cost of executing a transaction // is higher than the balance of the user's account. ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value") diff --git a/core/rawdb/bor_receipt.go b/core/rawdb/bor_receipt.go index d061dedc9e9e..0739c67a9f6b 100644 --- a/core/rawdb/bor_receipt.go +++ b/core/rawdb/bor_receipt.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" ) @@ -33,50 +34,28 @@ func borTxLookupKey(hash common.Hash) []byte { return append(borTxLookupPrefix, hash.Bytes()...) } -// HasBorReceipt verifies the existence of all block receipt belonging -// to a block. -func HasBorReceipt(db ethdb.Reader, hash common.Hash, number uint64) bool { - if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { - return true - } - - if has, err := db.Has(borReceiptKey(number, hash)); !has || err != nil { - return false - } +func ReadBorReceiptRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { + var data []byte - return true -} + err := db.ReadAncients(func(reader ethdb.AncientReader) error { + // Check if the data is in ancients + if isCanon(reader, number, hash) { + data, _ = reader.Ancient(freezerBorReceiptTable, number) -// ReadBorReceiptRLP retrieves the block receipt belonging to a block in RLP encoding. -func ReadBorReceiptRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { - // First try to look up the data in ancient database. Extra hash - // comparison is necessary since ancient database only maintains - // the canonical data. - data, _ := db.Ancient(freezerBorReceiptTable, number) - if len(data) > 0 { - h, _ := db.Ancient(freezerHashTable, number) - if common.BytesToHash(h) == hash { - return data - } - } - // Then try to look up the data in leveldb. - data, _ = db.Get(borReceiptKey(number, hash)) - if len(data) > 0 { - return data - } - // In the background freezer is moving data from leveldb to flatten files. - // So during the first check for ancient db, the data is not yet in there, - // but when we reach into leveldb, the data was already moved. That would - // result in a not found error. - data, _ = db.Ancient(freezerBorReceiptTable, number) - if len(data) > 0 { - h, _ := db.Ancient(freezerHashTable, number) - if common.BytesToHash(h) == hash { - return data + return nil } + + // If not, try reading from leveldb + data, _ = db.Get(borReceiptKey(number, hash)) + + return nil + }) + + if err != nil { + log.Warn("during ReadBorReceiptRLP", "number", number, "hash", hash, "err", err) } - return nil // Can't find the data anywhere. + return data } // ReadRawBorReceipt retrieves the block receipt belonging to a block. @@ -102,7 +81,11 @@ func ReadRawBorReceipt(db ethdb.Reader, hash common.Hash, number uint64) *types. // ReadBorReceipt retrieves all the bor block receipts belonging to a block, including // its correspoinding metadata fields. If it is unable to populate these metadata // fields then nil is returned. -func ReadBorReceipt(db ethdb.Reader, hash common.Hash, number uint64) *types.Receipt { +func ReadBorReceipt(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) *types.Receipt { + if config != nil && config.Bor != nil && config.Bor.Sprint != nil && !config.Bor.IsSprintStart(number) { + return nil + } + // We're deriving many fields from the block body, retrieve beside the receipt borReceipt := ReadRawBorReceipt(db, hash, number) if borReceipt == nil { diff --git a/core/tests/blockchain_repair_test.go b/core/tests/blockchain_repair_test.go index e27b37693125..0d4a86b06978 100644 --- a/core/tests/blockchain_repair_test.go +++ b/core/tests/blockchain_repair_test.go @@ -1796,7 +1796,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() spanner := bor.NewMockSpanner(ctrl) - spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ + spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ { ID: 0, Address: miner.TestBankAddress, diff --git a/core/tx_list.go b/core/tx_list.go index fea4434b9be5..851f732905e7 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -19,6 +19,7 @@ package core import ( "container/heap" "math" + "math/big" "sort" "sync" "sync/atomic" @@ -29,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/common" cmath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" ) // nonceHeap is a heap.Interface implementation over 64bit unsigned integers for @@ -149,19 +151,38 @@ func (m *txSortedMap) Filter(filter func(*types.Transaction) bool) types.Transac removed := m.filter(filter) // If transactions were removed, the heap and cache are ruined if len(removed) > 0 { - m.reheap() + m.reheap(false) } return removed } -func (m *txSortedMap) reheap() { - *m.index = make([]uint64, 0, len(m.items)) +func (m *txSortedMap) reheap(withRlock bool) { + index := make(nonceHeap, 0, len(m.items)) + + if withRlock { + m.m.RLock() + log.Info("[DEBUG] Acquired lock over txpool map while performing reheap") + } for nonce := range m.items { - *m.index = append(*m.index, nonce) + index = append(index, nonce) } - heap.Init(m.index) + if withRlock { + m.m.RUnlock() + } + + heap.Init(&index) + + if withRlock { + m.m.Lock() + } + + m.index = &index + + if withRlock { + m.m.Unlock() + } m.cacheMu.Lock() m.cache = nil @@ -402,16 +423,18 @@ type txList struct { strict bool // Whether nonces are strictly continuous or not txs *txSortedMap // Heap indexed sorted hash map of the transactions - costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance) - gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) + costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance) + gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) + totalcost *big.Int // Total cost of all transactions in the list } // newTxList create a new transaction list for maintaining nonce-indexable fast, // gapped, sortable transaction lists. func newTxList(strict bool) *txList { return &txList{ - strict: strict, - txs: newTxSortedMap(), + strict: strict, + txs: newTxSortedMap(), + totalcost: new(big.Int), } } @@ -450,8 +473,13 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran if tx.GasFeeCapUIntLt(thresholdFeeCap) || tx.GasTipCapUIntLt(thresholdTip) { return false, nil } + // Old is being replaced, subtract old cost + l.subTotalCost([]*types.Transaction{old}) } + // Add new tx cost to totalcost + l.totalcost.Add(l.totalcost, tx.Cost()) + // Otherwise overwrite the old transaction with the current one l.txs.Put(tx) @@ -470,7 +498,10 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran // provided threshold. Every removed transaction is returned for any post-removal // maintenance. func (l *txList) Forward(threshold uint64) types.Transactions { - return l.txs.Forward(threshold) + txs := l.txs.Forward(threshold) + l.subTotalCost(txs) + + return txs } // Filter removes all transactions from the list with a cost or gas limit higher @@ -510,16 +541,27 @@ func (l *txList) Filter(costLimit *uint256.Int, gasLimit uint64) (types.Transact lowest = nonce } } + + l.txs.m.Lock() invalids = l.txs.filter(func(tx *types.Transaction) bool { return tx.Nonce() > lowest }) + l.txs.m.Unlock() } - l.txs.reheap() + // Reset total cost + l.subTotalCost(removed) + l.subTotalCost(invalids) + + l.txs.reheap(true) + return removed, invalids } // Cap places a hard limit on the number of items, returning all transactions // exceeding that limit. func (l *txList) Cap(threshold int) types.Transactions { - return l.txs.Cap(threshold) + txs := l.txs.Cap(threshold) + l.subTotalCost(txs) + + return txs } // Remove deletes a transaction from the maintained list, returning whether the @@ -531,9 +573,14 @@ func (l *txList) Remove(tx *types.Transaction) (bool, types.Transactions) { if removed := l.txs.Remove(nonce); !removed { return false, nil } + + l.subTotalCost([]*types.Transaction{tx}) // In strict mode, filter out non-executable transactions if l.strict { - return true, l.txs.Filter(func(tx *types.Transaction) bool { return tx.Nonce() > nonce }) + txs := l.txs.Filter(func(tx *types.Transaction) bool { return tx.Nonce() > nonce }) + l.subTotalCost(txs) + + return true, txs } return true, nil } @@ -546,7 +593,10 @@ func (l *txList) Remove(tx *types.Transaction) (bool, types.Transactions) { // prevent getting into and invalid state. This is not something that should ever // happen but better to be self correcting than failing! func (l *txList) Ready(start uint64) types.Transactions { - return l.txs.Ready(start) + txs := l.txs.Ready(start) + l.subTotalCost(txs) + + return txs } // Len returns the length of the transaction list. @@ -576,6 +626,14 @@ func (l *txList) Has(nonce uint64) bool { return l != nil && l.txs.items[nonce] != nil } +// subTotalCost subtracts the cost of the given transactions from the +// total cost of all transactions. +func (l *txList) subTotalCost(txs []*types.Transaction) { + for _, tx := range txs { + l.totalcost.Sub(l.totalcost, tx.Cost()) + } +} + // priceHeap is a heap.Interface implementation over transactions for retrieving // price-sorted transactions to discard when the pool fills up. If baseFee is set // then the heap is sorted based on the effective tip based on the given base fee. diff --git a/core/tx_pool.go b/core/tx_pool.go index 25819c845389..f33f075260b4 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -17,8 +17,10 @@ package core import ( + "container/heap" "context" "errors" + "fmt" "math" "math/big" "sort" @@ -90,6 +92,14 @@ var ( // than some meaningful limit a user might use. This is not a consensus error // making the transaction invalid, rather a DOS protection. ErrOversizedData = errors.New("oversized data") + + // ErrFutureReplacePending is returned if a future transaction replaces a pending + // transaction. Future transactions should only be able to replace other future transactions. + ErrFutureReplacePending = errors.New("future transaction tries to replace pending") + + // ErrOverdraft is returned if a transaction would cause the senders balance to go negative + // thus invalidating a potential large number of transactions. + ErrOverdraft = errors.New("transaction would cause overdraft") ) var ( @@ -729,7 +739,11 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if uint64(tx.Size()) > txMaxSize { return ErrOversizedData } - + // Check whether the init code size has been exceeded. + // (TODO): Add a hardfork check here while pulling upstream changes. + if tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { + return fmt.Errorf("%w: code size %v limit %v", ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) + } // Transactions can't be negative. This may never happen using RLP decoded // transactions but may occur if you create a transaction using the RPC. if tx.Value().Sign() < 0 { @@ -783,10 +797,24 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { // Transactor should have enough funds to cover the costs // cost == V + GP * GL - if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { + balance := pool.currentState.GetBalance(from) + if balance.Cmp(tx.Cost()) < 0 { return ErrInsufficientFunds } + // Verify that replacing transactions will not result in overdraft + list := pool.pending[from] + if list != nil { // Sender already has pending txs + sum := new(big.Int).Add(tx.Cost(), list.totalcost) + if repl := list.txs.Get(tx.Nonce()); repl != nil { + // Deduct the cost of a transaction replaced by this + sum.Sub(sum, repl.Cost()) + } + if balance.Cmp(sum) < 0 { + log.Trace("Replacing transactions would overdraft", "sender", from, "balance", pool.currentState.GetBalance(from), "required", sum) + return ErrOverdraft + } + } // Ensure the transaction has more gas than the basic tx fee. intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) if err != nil { @@ -825,6 +853,10 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e invalidTxMeter.Mark(1) return false, err } + + // already validated by this point + from, _ := types.Sender(pool.signer, tx) + // If the transaction pool is full, discard underpriced transactions if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it @@ -833,6 +865,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e underpricedTxMeter.Mark(1) return false, ErrUnderpriced } + // We're about to replace a transaction. The reorg does a more thorough // analysis of what to remove and how, but it runs async. We don't want to // do too many replacements between reorg-runs, so we cap the number of @@ -853,18 +886,39 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e overflowedTxMeter.Mark(1) return false, ErrTxPoolOverflow } - // Bump the counter of rejections-since-reorg - pool.changesSinceReorg += len(drop) + // If the new transaction is a future transaction it should never churn pending transactions + if pool.isFuture(from, tx) { + var replacesPending bool + + for _, dropTx := range drop { + dropSender, _ := types.Sender(pool.signer, dropTx) + if list := pool.pending[dropSender]; list != nil && list.Overlaps(dropTx) { + replacesPending = true + break + } + } + // Add all transactions back to the priced queue + if replacesPending { + for _, dropTx := range drop { + heap.Push(&pool.priced.urgent, dropTx) + } + + log.Trace("Discarding future transaction replacing pending tx", "hash", hash) + + return false, ErrFutureReplacePending + } + } // Kick out the underpriced remote transactions. for _, tx := range drop { log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCapUint(), "gasFeeCap", tx.GasFeeCapUint()) underpricedTxMeter.Mark(1) - pool.removeTx(tx.Hash(), false) + + dropped := pool.removeTx(tx.Hash(), false) + pool.changesSinceReorg += dropped } } - // Try to replace an existing transaction in the pending pool - from, _ := types.Sender(pool.signer, tx) // already validated + // Try to replace an existing transaction in the pending pool pool.pendingMu.RLock() list := pool.pending[from] @@ -922,6 +976,20 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e return replaced, nil } +// isFuture reports whether the given transaction is immediately executable. +func (pool *TxPool) isFuture(from common.Address, tx *types.Transaction) bool { + list := pool.pending[from] + if list == nil { + return pool.pendingNonces.get(from) != tx.Nonce() + } + // Sender has pending transactions. + if old := list.txs.Get(tx.Nonce()); old != nil { + return false // It replaces a pending transaction. + } + // Not replacing, check if parent nonce exists in pending. + return list.txs.Get(tx.Nonce()-1) == nil +} + // enqueueTx inserts a new transaction into the non-executable transaction queue. // // Note, this method assumes the pool lock is held! @@ -1095,14 +1163,13 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { continue } - // Exclude transactions with invalid signatures as soon as - // possible and cache senders in transactions before - // obtaining lock - if pool.config.AllowUnprotectedTxs { pool.signer = types.NewFakeSigner(tx.ChainId()) } + // Exclude transactions with invalid signatures as soon as + // possible and cache senders in transactions before + // obtaining lock _, err = types.Sender(pool.signer, tx) if err != nil { errs = append(errs, ErrInvalidSender) @@ -1163,9 +1230,8 @@ func (pool *TxPool) addTx(tx *types.Transaction, local, sync bool) error { _, err = types.Sender(pool.signer, tx) if err != nil { invalidTxMeter.Mark(1) + return - } else { - err = nil } }() @@ -1294,11 +1360,12 @@ func (pool *TxPool) Has(hash common.Hash) bool { // removeTx removes a single transaction from the queue, moving all subsequent // transactions back to the future queue. -func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { +// Returns the number of transactions removed from the pending queue. +func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) int { // Fetch the transaction we wish to delete tx := pool.all.Get(hash) if tx == nil { - return + return 0 } addr, _ := types.Sender(pool.signer, tx) // already validated during insertion @@ -1339,7 +1406,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { // Reduce the pending counter pendingGauge.Dec(int64(1 + len(invalids))) - return + return 1 + len(invalids) } pool.pendingMu.TryLock() @@ -1359,6 +1426,8 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { delete(pool.beats, addr) } } + + return 0 } // requestReset requests a pool reset to the new head block. diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 900462282840..38d19f4cafbd 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -183,6 +183,10 @@ func validateTxPoolInternals(pool *TxPool) error { if nonce := pool.pendingNonces.get(addr); nonce != last+1 { return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1) } + + if txs.totalcost.Cmp(common.Big0) < 0 { + return fmt.Errorf("totalcost went negative: %v", txs.totalcost) + } } return nil @@ -1262,7 +1266,7 @@ func TestTransactionPendingLimiting(t *testing.T) { defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) - testAddBalance(pool, account, big.NewInt(1000000)) + testAddBalance(pool, account, big.NewInt(1000000000000)) // Keep track of transaction events to ensure all executables get announced events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5) @@ -1802,7 +1806,7 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) { keys := make([]*ecdsa.PrivateKey, 3) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() - testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000*1000000)) + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(100000*1000000)) } // Create transaction (both pending and queued) with a linearly growing gasprice for i := uint64(0); i < 500; i++ { @@ -1881,7 +1885,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) { defer sub.Unsubscribe() // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 4) + keys := make([]*ecdsa.PrivateKey, 5) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) @@ -1917,6 +1921,10 @@ func TestTransactionPoolUnderpricing(t *testing.T) { if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, ErrUnderpriced) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } + // Replace a future transaction with a future transaction + if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1 + t.Fatalf("failed to add well priced transaction: %v", err) + } // Ensure that adding high priced transactions drops cheap ones, but not own if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - t.Fatalf("failed to add well priced transaction: %v", err) @@ -1927,6 +1935,10 @@ func TestTransactionPoolUnderpricing(t *testing.T) { if err := pool.AddRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } + // Ensure that replacing a pending transaction with a future transaction fails + if err := pool.AddRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != ErrFutureReplacePending { + t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, ErrFutureReplacePending) + } pending, queued = pool.Stats() if pending != 2 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) @@ -1935,7 +1947,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validateEvents(events, 1); err != nil { + if err := validateEvents(events, 2); err != nil { t.Fatalf("additional event firing failed: %v", err) } @@ -2098,12 +2110,12 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { t.Fatalf("failed to add well priced transaction: %v", err) } - tx = pricedTransaction(2, 100000, big.NewInt(3), keys[1]) + tx = pricedTransaction(1, 100000, big.NewInt(3), keys[1]) if err := pool.AddRemote(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2 t.Fatalf("failed to add well priced transaction: %v", err) } - tx = dynamicFeeTx(3, 100000, big.NewInt(4), big.NewInt(1), keys[1]) + tx = dynamicFeeTx(2, 100000, big.NewInt(4), big.NewInt(1), keys[1]) if err := pool.AddRemote(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } @@ -2115,7 +2127,7 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validateEvents(events, 1); err != nil { + if err := validateEvents(events, 2); err != nil { t.Fatalf("additional event firing failed: %v", err) } @@ -2730,7 +2742,7 @@ func BenchmarkPoolBatchInsert(b *testing.B) { defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) - testAddBalance(pool, account, big.NewInt(1000000)) + testAddBalance(pool, account, big.NewInt(1000000000000000000)) const format = "size %d, is local %t" diff --git a/core/txpool2_test.go b/core/txpool2_test.go new file mode 100644 index 000000000000..45f784f343f8 --- /dev/null +++ b/core/txpool2_test.go @@ -0,0 +1,229 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package core + +import ( + "crypto/ecdsa" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/event" +) + +func pricedValuedTransaction(nonce uint64, value int64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction { + tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(value), gaslimit, gasprice, nil), types.HomesteadSigner{}, key) + return tx +} + +func count(t *testing.T, pool *TxPool) (pending int, queued int) { + t.Helper() + + pending, queued = pool.stats() + + if err := validateTxPoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + return pending, queued +} + +func fillPool(t *testing.T, pool *TxPool) { + t.Helper() + // Create a number of test accounts, fund them and make transactions + executableTxs := types.Transactions{} + nonExecutableTxs := types.Transactions{} + + for i := 0; i < 384; i++ { + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(10000000000)) + // Add executable ones + for j := 0; j < int(pool.config.AccountSlots); j++ { + executableTxs = append(executableTxs, pricedTransaction(uint64(j), 100000, big.NewInt(300), key)) + } + } + // Import the batch and verify that limits have been enforced + pool.AddRemotesSync(executableTxs) + pool.AddRemotesSync(nonExecutableTxs) + pending, queued := pool.Stats() + slots := pool.all.Slots() + // sanity-check that the test prerequisites are ok (pending full) + if have, want := pending, slots; have != want { + t.Fatalf("have %d, want %d", have, want) + } + + if have, want := queued, 0; have != want { + t.Fatalf("have %d, want %d", have, want) + } + + t.Logf("pool.config: GlobalSlots=%d, GlobalQueue=%d\n", pool.config.GlobalSlots, pool.config.GlobalQueue) + t.Logf("pending: %d queued: %d, all: %d\n", pending, queued, slots) +} + +// Tests that if a batch high-priced of non-executables arrive, they do not kick out +// executable transactions +func TestTransactionFutureAttack(t *testing.T) { + t.Parallel() + + // Create the pool to test the limit enforcement with + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + config := testTxPoolConfig + config.GlobalQueue = 100 + config.GlobalSlots = 100 + pool := NewTxPool(config, eip1559Config, blockchain) + + defer pool.Stop() + fillPool(t, pool) + pending, _ := pool.Stats() + // Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops + { + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + futureTxs := types.Transactions{} + for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ { + futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 100000, big.NewInt(500), key)) + } + for i := 0; i < 5; i++ { + pool.AddRemotesSync(futureTxs) + newPending, newQueued := count(t, pool) + t.Logf("pending: %d queued: %d, all: %d\n", newPending, newQueued, pool.all.Slots()) + } + } + + newPending, _ := pool.Stats() + // Pending should not have been touched + if have, want := newPending, pending; have < want { + t.Errorf("wrong pending-count, have %d, want %d (GlobalSlots: %d)", + have, want, pool.config.GlobalSlots) + } +} + +// Tests that if a batch high-priced of non-executables arrive, they do not kick out +// executable transactions +func TestTransactionFuture1559(t *testing.T) { + t.Parallel() + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) + + defer pool.Stop() + + // Create a number of test accounts, fund them and make transactions + fillPool(t, pool) + pending, _ := pool.Stats() + + // Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops + { + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + futureTxs := types.Transactions{} + for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ { + futureTxs = append(futureTxs, dynamicFeeTx(1000+uint64(j), 100000, big.NewInt(200), big.NewInt(101), key)) + } + pool.AddRemotesSync(futureTxs) + } + + newPending, _ := pool.Stats() + // Pending should not have been touched + if have, want := newPending, pending; have != want { + t.Errorf("Wrong pending-count, have %d, want %d (GlobalSlots: %d)", + have, want, pool.config.GlobalSlots) + } +} + +// Tests that if a batch of balance-overdraft txs arrive, they do not kick out +// executable transactions +func TestTransactionZAttack(t *testing.T) { + t.Parallel() + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := &testBlockChain{1000000, statedb, new(event.Feed)} + pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) + + defer pool.Stop() + + // Create a number of test accounts, fund them and make transactions + fillPool(t, pool) + + countInvalidPending := func() int { + t.Helper() + + var ivpendingNum int + + pendingtxs, _ := pool.Content() + + for account, txs := range pendingtxs { + cur_balance := new(big.Int).Set(pool.currentState.GetBalance(account)) + for _, tx := range txs { + if cur_balance.Cmp(tx.Value()) <= 0 { + ivpendingNum++ + } else { + cur_balance.Sub(cur_balance, tx.Value()) + } + } + } + + if err := validateTxPoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + return ivpendingNum + } + ivPending := countInvalidPending() + t.Logf("invalid pending: %d\n", ivPending) + + // Now, DETER-Z attack starts, let's add a bunch of expensive non-executables (from N accounts) along with balance-overdraft txs (from one account), and see if the pending-count drops + for j := 0; j < int(pool.config.GlobalQueue); j++ { + futureTxs := types.Transactions{} + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 21000, big.NewInt(500), key)) + pool.AddRemotesSync(futureTxs) + } + + overDraftTxs := types.Transactions{} + { + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + for j := 0; j < int(pool.config.GlobalSlots); j++ { + overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 60000000000, 21000, big.NewInt(500), key)) + } + } + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + + newPending, newQueued := count(t, pool) + newIvPending := countInvalidPending() + + t.Logf("pool.all.Slots(): %d\n", pool.all.Slots()) + t.Logf("pending: %d queued: %d, all: %d\n", newPending, newQueued, pool.all.Slots()) + t.Logf("invalid pending: %d\n", newIvPending) + + // Pending should not have been touched + if newIvPending != ivPending { + t.Errorf("Wrong invalid pending-count, have %d, want %d (GlobalSlots: %d, queued: %d)", + newIvPending, ivPending, pool.config.GlobalSlots, newQueued) + } +} diff --git a/core/vm/contracts.go b/core/vm/contracts.go index c5304790fa3a..681f954cbe96 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -30,7 +30,7 @@ import ( "github.com/ethereum/go-ethereum/crypto/bn256" "github.com/ethereum/go-ethereum/params" - //lint:ignore SA1019 Needed for precompile + big2 "github.com/holiman/big" "golang.org/x/crypto/ripemd160" ) @@ -381,17 +381,24 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) { } // Retrieve the operands and execute the exponentiation var ( - base = new(big.Int).SetBytes(getData(input, 0, baseLen)) - exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) - mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) + base = new(big2.Int).SetBytes(getData(input, 0, baseLen)) + exp = new(big2.Int).SetBytes(getData(input, baseLen, expLen)) + mod = new(big2.Int).SetBytes(getData(input, baseLen+expLen, modLen)) + v []byte ) - if mod.BitLen() == 0 { + switch { + case mod.BitLen() == 0: // Modulo 0 is undefined, return zero return common.LeftPadBytes([]byte{}, int(modLen)), nil + case base.BitLen() == 1: // a bit length of 1 means it's 1 (or -1). + //If base == 1, then we can just return base % mod (if mod >= 1, which it is) + v = base.Mod(base, mod).Bytes() + default: + v = base.Exp(base, exp, mod).Bytes() } - return common.LeftPadBytes(base.Exp(base, exp, mod).Bytes(), int(modLen)), nil + return common.LeftPadBytes(v, int(modLen)), nil } // newCurvePoint unmarshals a binary blob into a bn256 elliptic curve point, diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml index 040c5ba63d25..bae7b038e235 100644 --- a/docs/cli/example_config.toml +++ b/docs/cli/example_config.toml @@ -2,19 +2,21 @@ # The default value of the flags is provided below (except a few flags which has custom defaults which are explicitly mentioned). # Recommended values for mainnet and/or mumbai are also provided. -chain = "mainnet" # Name of the chain to sync ("mumbai", "mainnet") or path to a genesis file -identity = "Annon-Identity" # Name/Identity of the node (default = OS hostname) -verbosity = 3 # Logging verbosity for the server (5=trace|4=debug|3=info|2=warn|1=error|0=crit) (`log-level` was replaced by `verbosity`, and thus will be deprecated soon) -vmdebug = false # Record information useful for VM and contract debugging -datadir = "var/lib/bor" # Path of the data directory to store information -ancient = "" # Data directory for ancient chain segments (default = inside chaindata) -keystore = "" # Path of the directory where keystores are located -syncmode = "full" # Blockchain sync mode (only "full" sync supported) -gcmode = "full" # Blockchain garbage collection mode ("full", "archive") -snapshot = true # Enables the snapshot-database mode -"bor.logs" = false # Enables bor log retrieval -ethstats = "" # Reporting URL of a ethstats service (nodename:secret@host:port) -devfakeauthor = false # Run miner without validator set authorization [dev mode] : Use with '--bor.withoutheimdall' (default: false) +chain = "mainnet" # Name of the chain to sync ("mumbai", "mainnet") or path to a genesis file +identity = "Annon-Identity" # Name/Identity of the node (default = OS hostname) +verbosity = 3 # Logging verbosity for the server (5=trace|4=debug|3=info|2=warn|1=error|0=crit) (`log-level` was replaced by `verbosity`, and thus will be deprecated soon) +vmdebug = false # Record information useful for VM and contract debugging +datadir = "var/lib/bor" # Path of the data directory to store information +ancient = "" # Data directory for ancient chain segments (default = inside chaindata) +keystore = "" # Path of the directory where keystores are located +"rpc.batchlimit" = 100 # Maximum number of messages in a batch (default=100, use 0 for no limits) +"rpc.returndatalimit" = 100000 # Maximum size (in bytes) a result of an rpc request could have (default=100000, use 0 for no limits) +syncmode = "full" # Blockchain sync mode (only "full" sync supported) +gcmode = "full" # Blockchain garbage collection mode ("full", "archive") +snapshot = true # Enables the snapshot-database mode +"bor.logs" = false # Enables bor log retrieval +ethstats = "" # Reporting URL of a ethstats service (nodename:secret@host:port) +devfakeauthor = false # Run miner without validator set authorization [dev mode] : Use with '--bor.withoutheimdall' (default: false) ["eth.requiredblocks"] # Comma separated block number-to-hash mappings to require for peering (=) (default = empty map) "31000000" = "0x2087b9e2b353209c2c21e370c82daa12278efd0fe5f0febe6c29035352cf050e" @@ -86,6 +88,8 @@ devfakeauthor = false # Run miner without validator set authorization [de api = ["eth", "net", "web3", "txpool", "bor"] # API's offered over the HTTP-RPC interface vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. corsdomain = ["localhost"] # Comma separated list of domains from which to accept cross origin requests (browser enforced) + ep-size = 40 # Maximum size of workers to run in rpc execution pool for HTTP requests (default: 40) + ep-requesttimeout = "0s" # Request Timeout for rpc execution pool for HTTP requests (default: 0s, 0s = disabled) [jsonrpc.ws] enabled = false # Enable the WS-RPC server port = 8546 # WS-RPC server listening port @@ -93,6 +97,8 @@ devfakeauthor = false # Run miner without validator set authorization [de host = "localhost" # ws.addr api = ["net", "web3"] # API's offered over the WS-RPC interface origins = ["localhost"] # Origins from which to accept websockets requests + ep-size = 40 # Maximum size of workers to run in rpc execution pool for WS requests (default: 40) + ep-requesttimeout = "0s" # Request Timeout for rpc execution pool for WS requests (default: 0s, 0s = disabled) [jsonrpc.graphql] enabled = false # Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well. port = 0 # @@ -106,7 +112,7 @@ devfakeauthor = false # Run miner without validator set authorization [de port = 8551 # Listening port for authenticated APIs vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. [jsonrpc.timeouts] - read = "30s" + read = "10s" write = "30s" idle = "2m0s" diff --git a/docs/cli/server.md b/docs/cli/server.md index 4ac024491aee..1c5d85d1a15d 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -20,6 +20,10 @@ The ```bor server``` command runs the Bor client. - ```keystore```: Path of the directory where keystores are located +- ```rpc.batchlimit```: Maximum number of messages in a batch (default=100, use 0 for no limits) (default: 100) + +- ```rpc.returndatalimit```: Maximum size (in bytes) a result of an rpc request could have (default=100000, use 0 for no limits) (default: 100000) + - ```config```: Path to the TOML configuration file - ```syncmode```: Blockchain sync mode (only "full" sync supported) (default: full) @@ -44,6 +48,8 @@ The ```bor server``` command runs the Bor client. - ```bor.runheimdallargs```: Arguments to pass to Heimdall service +- ```bor.useheimdallapp```: Use child heimdall process to fetch data, Only works when bor.runheimdall is true (default: false) + - ```ethstats```: Reporting URL of a ethstats service (nodename:secret@host:port) - ```gpo.blocks```: Number of recent blocks to check for gas prices (default: 20) @@ -156,6 +162,10 @@ The ```bor server``` command runs the Bor client. - ```http.api```: API's offered over the HTTP-RPC interface (default: eth,net,web3,txpool,bor) +- ```http.ep-size```: Maximum size of workers to run in rpc execution pool for HTTP requests (default: 40) + +- ```http.ep-requesttimeout```: Request Timeout for rpc execution pool for HTTP requests (default: 0s) + - ```ws```: Enable the WS-RPC server (default: false) - ```ws.addr```: WS-RPC server listening interface (default: localhost) @@ -166,6 +176,10 @@ The ```bor server``` command runs the Bor client. - ```ws.api```: API's offered over the WS-RPC interface (default: net,web3) +- ```ws.ep-size```: Maximum size of workers to run in rpc execution pool for WS requests (default: 40) + +- ```ws.ep-requesttimeout```: Request Timeout for rpc execution pool for WS requests (default: 0s) + - ```graphql```: Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well. (default: false) ### Logging Options diff --git a/eth/api_backend.go b/eth/api_backend.go index 2c93e60d871e..c8825dc58290 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -324,6 +324,10 @@ func (b *EthAPIBackend) RPCGasCap() uint64 { return b.eth.config.RPCGasCap } +func (b *EthAPIBackend) RPCRpcReturnDataLimit() uint64 { + return b.eth.config.RPCReturnDataLimit +} + func (b *EthAPIBackend) RPCEVMTimeout() time.Duration { return b.eth.config.RPCEVMTimeout } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 133f21432e60..7581e16761b1 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -95,11 +95,12 @@ var Defaults = Config{ GasPrice: big.NewInt(params.GWei), Recommit: 125 * time.Second, }, - TxPool: core.DefaultTxPoolConfig, - RPCGasCap: 50000000, - RPCEVMTimeout: 5 * time.Second, - GPO: FullNodeGPO, - RPCTxFeeCap: 5, // 5 matic + TxPool: core.DefaultTxPoolConfig, + RPCGasCap: 50000000, + RPCReturnDataLimit: 100000, + RPCEVMTimeout: 5 * time.Second, + GPO: FullNodeGPO, + RPCTxFeeCap: 5, // 5 matic } func init() { @@ -200,6 +201,9 @@ type Config struct { // RPCGasCap is the global gas cap for eth-call variants. RPCGasCap uint64 + // Maximum size (in bytes) a result of an rpc request could have + RPCReturnDataLimit uint64 + // RPCEVMTimeout is the global timeout for eth-call. RPCEVMTimeout time.Duration diff --git a/eth/filters/bor_api.go b/eth/filters/bor_api.go index 2666b1da32f5..4dd557cccce2 100644 --- a/eth/filters/bor_api.go +++ b/eth/filters/bor_api.go @@ -1,7 +1,6 @@ package filters import ( - "bytes" "context" "errors" @@ -19,7 +18,7 @@ func (api *PublicFilterAPI) SetChainConfig(chainConfig *params.ChainConfig) { func (api *PublicFilterAPI) GetBorBlockLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) { if api.chainConfig == nil { - return nil, errors.New("No chain config found. Proper PublicFilterAPI initialization required") + return nil, errors.New("no chain config found. Proper PublicFilterAPI initialization required") } // get sprint from bor config @@ -68,9 +67,8 @@ func (api *PublicFilterAPI) NewDeposits(ctx context.Context, crit ethereum.State for { select { case h := <-stateSyncData: - // nolint : gosimple - if crit.ID == h.ID || bytes.Compare(crit.Contract.Bytes(), h.Contract.Bytes()) == 0 || - (crit.ID == 0 && crit.Contract == common.Address{}) { + if h != nil && (crit.ID == h.ID || crit.Contract == h.Contract || + (crit.ID == 0 && crit.Contract == common.Address{})) { notifier.Notify(rpcSub.ID, h) } case <-rpcSub.Err(): diff --git a/eth/filters/test_backend.go b/eth/filters/test_backend.go index 979ed3efb6f5..8b2ef4a7f29f 100644 --- a/eth/filters/test_backend.go +++ b/eth/filters/test_backend.go @@ -38,7 +38,7 @@ func (b *TestBackend) GetBorBlockReceipt(ctx context.Context, hash common.Hash) return &types.Receipt{}, nil } - receipt := rawdb.ReadBorReceipt(b.DB, hash, *number) + receipt := rawdb.ReadBorReceipt(b.DB, hash, *number, nil) if receipt == nil { return &types.Receipt{}, nil } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 3fce91ac9c3c..13f5c627cd82 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -177,7 +177,7 @@ func (api *API) getAllBlockTransactions(ctx context.Context, block *types.Block) stateSyncPresent := false - borReceipt := rawdb.ReadBorReceipt(api.backend.ChainDb(), block.Hash(), block.NumberU64()) + borReceipt := rawdb.ReadBorReceipt(api.backend.ChainDb(), block.Hash(), block.NumberU64(), api.backend.ChainConfig()) if borReceipt != nil { txHash := types.GetDerivedBorTxHash(types.BorReceiptKey(block.Number().Uint64(), block.Hash())) if txHash != (common.Hash{}) { diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 6dd94e487089..d394e4fbe35f 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -126,6 +126,10 @@ func (b *testBackend) RPCGasCap() uint64 { return 25000000 } +func (b *testBackend) RPCRpcReturnDataLimit() uint64 { + return 100000 +} + func (b *testBackend) ChainConfig() *params.ChainConfig { return b.chainConfig } diff --git a/go.mod b/go.mod index d9a7963f59cd..ddfa2e0feb43 100644 --- a/go.mod +++ b/go.mod @@ -7,19 +7,19 @@ require ( github.com/BurntSushi/toml v1.1.0 github.com/JekaMas/crand v1.0.1 github.com/JekaMas/go-grpc-net-conn v0.0.0-20220708155319-6aff21f2d13d + github.com/JekaMas/workerpool v1.1.5 github.com/VictoriaMetrics/fastcache v1.6.0 github.com/aws/aws-sdk-go-v2 v1.2.0 github.com/aws/aws-sdk-go-v2/config v1.1.1 github.com/aws/aws-sdk-go-v2/credentials v1.1.1 github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1 - github.com/btcsuite/btcd v0.22.0-beta // indirect - github.com/btcsuite/btcd/btcec/v2 v2.1.2 + github.com/btcsuite/btcd/btcec/v2 v2.1.3 github.com/cespare/cp v1.1.1 github.com/cloudflare/cloudflare-go v0.14.0 github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v1.8.0 - github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf + github.com/docker/docker v1.6.1 github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 github.com/edsrzf/mmap-go v1.0.0 github.com/fatih/color v1.9.0 @@ -38,6 +38,7 @@ require ( github.com/hashicorp/go-bexpr v0.1.10 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/hashicorp/hcl/v2 v2.10.1 + github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e github.com/holiman/bloomfilter/v2 v2.0.3 github.com/holiman/uint256 v1.2.0 github.com/huin/goupnp v1.0.3 @@ -71,12 +72,12 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.2.0 go.opentelemetry.io/otel/sdk v1.2.0 go.uber.org/goleak v1.1.12 - golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 - golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 - golang.org/x/text v0.4.0 + golang.org/x/crypto v0.1.0 + golang.org/x/sync v0.1.0 + golang.org/x/sys v0.6.0 + golang.org/x/text v0.8.0 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac - golang.org/x/tools v0.1.12 + golang.org/x/tools v0.6.0 gonum.org/v1/gonum v0.11.0 google.golang.org/grpc v1.51.0 google.golang.org/protobuf v1.28.1 @@ -87,12 +88,19 @@ require ( pgregory.net/rapid v0.4.8 ) +require ( + github.com/btcsuite/btcd v0.22.0-beta // indirect + github.com/gammazero/deque v0.2.1 // indirect + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect +) + require ( cloud.google.com/go v0.65.0 // indirect cloud.google.com/go/pubsub v1.3.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect - github.com/Masterminds/goutils v1.1.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect github.com/RichardKnop/logging v0.0.0-20190827224416-1a693bdd4fae // indirect @@ -116,7 +124,7 @@ require ( github.com/cbergoon/merkletree v0.2.0 // indirect github.com/cenkalti/backoff/v4 v4.1.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/cosmos/cosmos-sdk v0.37.4 // indirect + github.com/cosmos/cosmos-sdk v0.37.4 github.com/cosmos/go-bip39 v0.0.0-20180618194314-52158e4697b8 // indirect github.com/cosmos/ledger-cosmos-go v0.10.3 // indirect github.com/cosmos/ledger-go v0.9.2 // indirect @@ -145,7 +153,7 @@ require ( github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 + github.com/json-iterator/go v1.1.12 // indirect github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/klauspost/compress v1.13.6 // indirect @@ -197,11 +205,9 @@ require ( go.opentelemetry.io/otel/trace v1.2.0 go.opentelemetry.io/proto/otlp v0.10.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/net v0.0.0-20220728030405-41545e8bf201 // indirect - golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/term v0.6.0 // indirect golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect google.golang.org/api v0.34.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index b3aac45f243a..844cb0e86caf 100644 --- a/go.sum +++ b/go.sum @@ -52,6 +52,8 @@ github.com/JekaMas/crand v1.0.1 h1:FMPxkUQqH/hExl0aUXsr0UCGYZ4lJH9IJ5H/KbM6Y9A= github.com/JekaMas/crand v1.0.1/go.mod h1:GGzGpMCht/tbaNQ5A4kSiKSqEoNAhhyTfSDQyIENBQU= github.com/JekaMas/go-grpc-net-conn v0.0.0-20220708155319-6aff21f2d13d h1:RO27lgfZF8s9lZ3pWyzc0gCE0RZC+6/PXbRjAa0CNp8= github.com/JekaMas/go-grpc-net-conn v0.0.0-20220708155319-6aff21f2d13d/go.mod h1:romz7UPgSYhfJkKOalzEEyV6sWtt/eAEm0nX2aOrod0= +github.com/JekaMas/workerpool v1.1.5 h1:xmrx2Zyft95CEGiEqzDxiawptCIRZQ0zZDhTGDFOCaw= +github.com/JekaMas/workerpool v1.1.5/go.mod h1:IoDWPpwMcA27qbuugZKeBslDrgX09lVmksuh9sjzbhc= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= @@ -143,9 +145,11 @@ github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BR github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo= github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= -github.com/btcsuite/btcd/btcec/v2 v2.1.2 h1:YoYoC9J0jwfukodSBMzZYUVQ8PTiYg4BnOWiJVzTmLs= github.com/btcsuite/btcd/btcec/v2 v2.1.2/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -237,8 +241,9 @@ github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwu github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf h1:sh8rkQZavChcmakYiSlqu2425CHyFXLZZnvm7PDpU8M= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.6.1 h1:4xYASHy5cScPkLD7PO0uTmnVc860m9NarPN1X8zeMe8= +github.com/docker/docker v1.6.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= @@ -279,6 +284,8 @@ github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= @@ -491,6 +498,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw= +github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= @@ -997,8 +1006,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 h1:NvGWuYG8dkDHFSKksI1P9faiVJ9rayE6l0+ouWVIDs8= -golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1027,9 +1036,8 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1039,8 +1047,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1091,8 +1099,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220728030405-41545e8bf201 h1:bvOltf3SADAfG05iRml8lAB3qjoEX5RCyN4K6G5v3N0= -golang.org/x/net v0.0.0-20220728030405-41545e8bf201/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1113,8 +1121,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1188,12 +1196,13 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 h1:Sx/u41w+OwrInGdEckYmEuU5gHoGSL4QbDz3S9s6j4U= golang.org/x/sys v0.0.0-20220818161305-2296e01440c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1203,8 +1212,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1275,8 +1284,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/cli/debug_pprof.go b/internal/cli/debug_pprof.go index ca14604b97fc..6bbe664baa57 100644 --- a/internal/cli/debug_pprof.go +++ b/internal/cli/debug_pprof.go @@ -7,6 +7,8 @@ import ( "fmt" "strings" + "google.golang.org/grpc" + "github.com/ethereum/go-ethereum/internal/cli/flagset" "github.com/ethereum/go-ethereum/internal/cli/server/proto" ) @@ -111,7 +113,7 @@ func (d *DebugPprofCommand) Run(args []string) int { req.Profile = profile } - stream, err := clt.DebugPprof(ctx, req) + stream, err := clt.DebugPprof(ctx, req, grpc.MaxCallRecvMsgSize(1024*1024*1024)) if err != nil { return err diff --git a/internal/cli/dumpconfig.go b/internal/cli/dumpconfig.go index 14444f164ed1..55495bf22a93 100644 --- a/internal/cli/dumpconfig.go +++ b/internal/cli/dumpconfig.go @@ -56,6 +56,8 @@ func (c *DumpconfigCommand) Run(args []string) int { userConfig.JsonRPC.HttpTimeout.ReadTimeoutRaw = userConfig.JsonRPC.HttpTimeout.ReadTimeout.String() userConfig.JsonRPC.HttpTimeout.WriteTimeoutRaw = userConfig.JsonRPC.HttpTimeout.WriteTimeout.String() userConfig.JsonRPC.HttpTimeout.IdleTimeoutRaw = userConfig.JsonRPC.HttpTimeout.IdleTimeout.String() + userConfig.JsonRPC.Http.ExecutionPoolRequestTimeoutRaw = userConfig.JsonRPC.Http.ExecutionPoolRequestTimeout.String() + userConfig.JsonRPC.Ws.ExecutionPoolRequestTimeoutRaw = userConfig.JsonRPC.Ws.ExecutionPoolRequestTimeout.String() userConfig.TxPool.RejournalRaw = userConfig.TxPool.Rejournal.String() userConfig.TxPool.LifeTimeRaw = userConfig.TxPool.LifeTime.String() userConfig.Sealer.GasPriceRaw = userConfig.Sealer.GasPrice.String() diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index a3384d8d61bf..47cb9a7848ee 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -70,6 +70,12 @@ type Config struct { // KeyStoreDir is the directory to store keystores KeyStoreDir string `hcl:"keystore,optional" toml:"keystore,optional"` + // Maximum number of messages in a batch (default=100, use 0 for no limits) + RPCBatchLimit uint64 `hcl:"rpc.batchlimit,optional" toml:"rpc.batchlimit,optional"` + + // Maximum size (in bytes) a result of an rpc request could have (default=100000, use 0 for no limits) + RPCReturnDataLimit uint64 `hcl:"rpc.returndatalimit,optional" toml:"rpc.returndatalimit,optional"` + // SyncMode selects the sync protocol SyncMode string `hcl:"syncmode,optional" toml:"syncmode,optional"` @@ -380,6 +386,13 @@ type APIConfig struct { // Origins is the list of endpoints to accept requests from (only consumed for websockets) Origins []string `hcl:"origins,optional" toml:"origins,optional"` + + // ExecutionPoolSize is max size of workers to be used for rpc execution + ExecutionPoolSize uint64 `hcl:"ep-size,optional" toml:"ep-size,optional"` + + // ExecutionPoolRequestTimeout is timeout used by execution pool for rpc execution + ExecutionPoolRequestTimeout time.Duration `hcl:"-,optional" toml:"-"` + ExecutionPoolRequestTimeoutRaw string `hcl:"ep-requesttimeout,optional" toml:"ep-requesttimeout,optional"` } // Used from rpc.HTTPTimeouts @@ -566,6 +579,8 @@ func DefaultConfig() *Config { Backtrace: "", Debug: false, }, + RPCBatchLimit: 100, + RPCReturnDataLimit: 100000, P2P: &P2PConfig{ MaxPeers: 50, MaxPendPeers: 50, @@ -630,21 +645,25 @@ func DefaultConfig() *Config { RPCEVMTimeout: ethconfig.Defaults.RPCEVMTimeout, AllowUnprotectedTxs: false, Http: &APIConfig{ - Enabled: false, - Port: 8545, - Prefix: "", - Host: "localhost", - API: []string{"eth", "net", "web3", "txpool", "bor"}, - Cors: []string{"localhost"}, - VHost: []string{"localhost"}, + Enabled: false, + Port: 8545, + Prefix: "", + Host: "localhost", + API: []string{"eth", "net", "web3", "txpool", "bor"}, + Cors: []string{"localhost"}, + VHost: []string{"localhost"}, + ExecutionPoolSize: 40, + ExecutionPoolRequestTimeout: 0, }, Ws: &APIConfig{ - Enabled: false, - Port: 8546, - Prefix: "", - Host: "localhost", - API: []string{"net", "web3"}, - Origins: []string{"localhost"}, + Enabled: false, + Port: 8546, + Prefix: "", + Host: "localhost", + API: []string{"net", "web3"}, + Origins: []string{"localhost"}, + ExecutionPoolSize: 40, + ExecutionPoolRequestTimeout: 0, }, Graphql: &APIConfig{ Enabled: false, @@ -652,7 +671,7 @@ func DefaultConfig() *Config { VHost: []string{"localhost"}, }, HttpTimeout: &HttpTimeouts{ - ReadTimeout: 30 * time.Second, + ReadTimeout: 10 * time.Second, WriteTimeout: 30 * time.Second, IdleTimeout: 120 * time.Second, }, @@ -770,6 +789,8 @@ func (c *Config) fillTimeDurations() error { {"jsonrpc.timeouts.read", &c.JsonRPC.HttpTimeout.ReadTimeout, &c.JsonRPC.HttpTimeout.ReadTimeoutRaw}, {"jsonrpc.timeouts.write", &c.JsonRPC.HttpTimeout.WriteTimeout, &c.JsonRPC.HttpTimeout.WriteTimeoutRaw}, {"jsonrpc.timeouts.idle", &c.JsonRPC.HttpTimeout.IdleTimeout, &c.JsonRPC.HttpTimeout.IdleTimeoutRaw}, + {"jsonrpc.ws.ep-requesttimeout", &c.JsonRPC.Ws.ExecutionPoolRequestTimeout, &c.JsonRPC.Ws.ExecutionPoolRequestTimeoutRaw}, + {"jsonrpc.http.ep-requesttimeout", &c.JsonRPC.Http.ExecutionPoolRequestTimeout, &c.JsonRPC.Http.ExecutionPoolRequestTimeoutRaw}, {"txpool.lifetime", &c.TxPool.LifeTime, &c.TxPool.LifeTimeRaw}, {"txpool.rejournal", &c.TxPool.Rejournal, &c.TxPool.RejournalRaw}, {"cache.rejournal", &c.Cache.Rejournal, &c.Cache.RejournalRaw}, @@ -1100,6 +1121,8 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (* n.BorLogs = c.BorLogs n.DatabaseHandles = dbHandles + n.RPCReturnDataLimit = c.RPCReturnDataLimit + if c.Ancient != "" { n.DatabaseFreezer = c.Ancient } @@ -1249,10 +1272,15 @@ func (c *Config) buildNode() (*node.Config, error) { WriteTimeout: c.JsonRPC.HttpTimeout.WriteTimeout, IdleTimeout: c.JsonRPC.HttpTimeout.IdleTimeout, }, - JWTSecret: c.JsonRPC.Auth.JWTSecret, - AuthPort: int(c.JsonRPC.Auth.Port), - AuthAddr: c.JsonRPC.Auth.Addr, - AuthVirtualHosts: c.JsonRPC.Auth.VHosts, + JWTSecret: c.JsonRPC.Auth.JWTSecret, + AuthPort: int(c.JsonRPC.Auth.Port), + AuthAddr: c.JsonRPC.Auth.Addr, + AuthVirtualHosts: c.JsonRPC.Auth.VHosts, + RPCBatchLimit: c.RPCBatchLimit, + WSJsonRPCExecutionPoolSize: c.JsonRPC.Ws.ExecutionPoolSize, + WSJsonRPCExecutionPoolRequestTimeout: c.JsonRPC.Ws.ExecutionPoolRequestTimeout, + HTTPJsonRPCExecutionPoolSize: c.JsonRPC.Http.ExecutionPoolSize, + HTTPJsonRPCExecutionPoolRequestTimeout: c.JsonRPC.Http.ExecutionPoolRequestTimeout, } if c.P2P.NetRestrict != "" { diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go index e597323061a9..82b99090d488 100644 --- a/internal/cli/server/flags.go +++ b/internal/cli/server/flags.go @@ -58,6 +58,18 @@ func (c *Command) Flags() *flagset.Flagset { Usage: "Path of the directory where keystores are located", Value: &c.cliConfig.KeyStoreDir, }) + f.Uint64Flag(&flagset.Uint64Flag{ + Name: "rpc.batchlimit", + Usage: "Maximum number of messages in a batch (default=100, use 0 for no limits)", + Value: &c.cliConfig.RPCBatchLimit, + Default: c.cliConfig.RPCBatchLimit, + }) + f.Uint64Flag(&flagset.Uint64Flag{ + Name: "rpc.returndatalimit", + Usage: "Maximum size (in bytes) a result of an rpc request could have (default=100000, use 0 for no limits)", + Value: &c.cliConfig.RPCReturnDataLimit, + Default: c.cliConfig.RPCReturnDataLimit, + }) f.StringFlag(&flagset.StringFlag{ Name: "config", Usage: "Path to the TOML configuration file", @@ -566,6 +578,20 @@ func (c *Command) Flags() *flagset.Flagset { Default: c.cliConfig.JsonRPC.Http.API, Group: "JsonRPC", }) + f.Uint64Flag(&flagset.Uint64Flag{ + Name: "http.ep-size", + Usage: "Maximum size of workers to run in rpc execution pool for HTTP requests", + Value: &c.cliConfig.JsonRPC.Http.ExecutionPoolSize, + Default: c.cliConfig.JsonRPC.Http.ExecutionPoolSize, + Group: "JsonRPC", + }) + f.DurationFlag(&flagset.DurationFlag{ + Name: "http.ep-requesttimeout", + Usage: "Request Timeout for rpc execution pool for HTTP requests", + Value: &c.cliConfig.JsonRPC.Http.ExecutionPoolRequestTimeout, + Default: c.cliConfig.JsonRPC.Http.ExecutionPoolRequestTimeout, + Group: "JsonRPC", + }) // ws options f.BoolFlag(&flagset.BoolFlag{ @@ -603,6 +629,20 @@ func (c *Command) Flags() *flagset.Flagset { Default: c.cliConfig.JsonRPC.Ws.API, Group: "JsonRPC", }) + f.Uint64Flag(&flagset.Uint64Flag{ + Name: "ws.ep-size", + Usage: "Maximum size of workers to run in rpc execution pool for WS requests", + Value: &c.cliConfig.JsonRPC.Ws.ExecutionPoolSize, + Default: c.cliConfig.JsonRPC.Ws.ExecutionPoolSize, + Group: "JsonRPC", + }) + f.DurationFlag(&flagset.DurationFlag{ + Name: "ws.ep-requesttimeout", + Usage: "Request Timeout for rpc execution pool for WS requests", + Value: &c.cliConfig.JsonRPC.Ws.ExecutionPoolRequestTimeout, + Default: c.cliConfig.JsonRPC.Ws.ExecutionPoolRequestTimeout, + Group: "JsonRPC", + }) // graphql options f.BoolFlag(&flagset.BoolFlag{ diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index f3e2fd46c5a4..3ce2c6552b69 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -641,7 +641,7 @@ func (s *PublicBlockChainAPI) GetTransactionReceiptsByBlock(ctx context.Context, var txHash common.Hash - borReceipt := rawdb.ReadBorReceipt(s.b.ChainDb(), block.Hash(), block.NumberU64()) + borReceipt := rawdb.ReadBorReceipt(s.b.ChainDb(), block.Hash(), block.NumberU64(), s.b.ChainConfig()) if borReceipt != nil { receipts = append(receipts, borReceipt) txHash = types.GetDerivedBorTxHash(types.BorReceiptKey(block.Number().Uint64(), block.Hash())) @@ -1083,6 +1083,11 @@ func (s *PublicBlockChainAPI) Call(ctx context.Context, args TransactionArgs, bl if err != nil { return nil, err } + + if int(s.b.RPCRpcReturnDataLimit()) > 0 && len(result.ReturnData) > int(s.b.RPCRpcReturnDataLimit()) { + return nil, fmt.Errorf("call returned result of length %d exceeding limit %d", len(result.ReturnData), int(s.b.RPCRpcReturnDataLimit())) + } + // If the result contains a revert reason, try to unpack and return it. if len(result.Revert()) > 0 { return nil, newRevertError(result) @@ -1453,19 +1458,38 @@ func newRPCPendingTransaction(tx *types.Transaction, current *types.Header, conf func newRPCTransactionFromBlockIndex(b *types.Block, index uint64, config *params.ChainConfig, db ethdb.Database) *RPCTransaction { txs := b.Transactions() - borReceipt := rawdb.ReadBorReceipt(db, b.Hash(), b.NumberU64()) - if borReceipt != nil { - tx, _, _, _ := rawdb.ReadBorTransaction(db, borReceipt.TxHash) + if index >= uint64(len(txs)+1) { + return nil + } + + var borReceipt *types.Receipt - if tx != nil { - txs = append(txs, tx) + // Read bor receipts if a state-sync transaction is requested + if index == uint64(len(txs)) { + borReceipt = rawdb.ReadBorReceipt(db, b.Hash(), b.NumberU64(), config) + if borReceipt != nil { + if borReceipt.TxHash != (common.Hash{}) { + borTx, _, _, _ := rawdb.ReadBorTransactionWithBlockHash(db, borReceipt.TxHash, b.Hash()) + if borTx != nil { + txs = append(txs, borTx) + } + } } } + // If the index is still out of the range after checking bor state sync transaction, it means that the transaction index is invalid if index >= uint64(len(txs)) { return nil } - return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee(), config) + + rpcTx := newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee(), config) + + // If the transaction is a bor transaction, we need to set the hash to the derived bor tx hash. BorTx is always the last index. + if borReceipt != nil && index == uint64(len(txs)-1) { + rpcTx.Hash = borReceipt.TxHash + } + + return rpcTx } // newRPCRawTransactionFromBlockIndex returns the bytes of a transaction given a block and a transaction index. @@ -1596,7 +1620,7 @@ func (api *PublicTransactionPoolAPI) getAllBlockTransactions(ctx context.Context stateSyncPresent := false - borReceipt := rawdb.ReadBorReceipt(api.b.ChainDb(), block.Hash(), block.NumberU64()) + borReceipt := rawdb.ReadBorReceipt(api.b.ChainDb(), block.Hash(), block.NumberU64(), api.b.ChainConfig()) if borReceipt != nil { txHash := types.GetDerivedBorTxHash(types.BorReceiptKey(block.Number().Uint64(), block.Hash())) if txHash != (common.Hash{}) { @@ -1766,7 +1790,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(ctx context.Context, ha if borTx { // Fetch bor block receipt - receipt = rawdb.ReadBorReceipt(s.b.ChainDb(), blockHash, blockNumber) + receipt = rawdb.ReadBorReceipt(s.b.ChainDb(), blockHash, blockNumber, s.b.ChainConfig()) } else { receipts, err := s.b.GetReceipts(ctx, blockHash) if err != nil { diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 1287640b83b7..14ddbba70ed9 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -48,10 +48,11 @@ type Backend interface { ChainDb() ethdb.Database AccountManager() *accounts.Manager ExtRPCEnabled() bool - RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection - RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection - RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs - UnprotectedAllowed() bool // allows only for EIP155 transactions. + RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection + RPCRpcReturnDataLimit() uint64 // Maximum size (in bytes) a result of an rpc request could have + RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection + RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs + UnprotectedAllowed() bool // allows only for EIP155 transactions. // Blockchain API SetHead(number uint64) diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 316aff3b38a7..57551e64c30b 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -201,6 +201,33 @@ web3._extend({ call: 'admin_setMaxPeers', params: 1 }), + new web3._extend.Method({ + name: 'getExecutionPoolSize', + call: 'admin_getExecutionPoolSize' + }), + new web3._extend.Method({ + name: 'getExecutionPoolRequestTimeout', + call: 'admin_getExecutionPoolRequestTimeout' + }), + // new web3._extend.Method({ + // name: 'setWSExecutionPoolRequestTimeout', + // call: 'admin_setWSExecutionPoolRequestTimeout', + // params: 1 + // }), + // new web3._extend.Method({ + // name: 'setHttpExecutionPoolRequestTimeout', + // call: 'admin_setHttpExecutionPoolRequestTimeout', + // params: 1 + // }), + new web3._extend.Method({ + name: 'setWSExecutionPoolSize', + call: 'admin_setWSExecutionPoolSize', + params: 1 + }), + new web3._extend.Method({ + name: 'setHttpExecutionPoolSize', + call: 'admin_setHttpExecutionPoolSize', + }), ], properties: [ new web3._extend.Property({ diff --git a/les/api_backend.go b/les/api_backend.go index c716a3967f79..786e77ed46c8 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -294,6 +294,10 @@ func (b *LesApiBackend) RPCGasCap() uint64 { return b.eth.config.RPCGasCap } +func (b *LesApiBackend) RPCRpcReturnDataLimit() uint64 { + return b.eth.config.RPCReturnDataLimit +} + func (b *LesApiBackend) RPCEVMTimeout() time.Duration { return b.eth.config.RPCEVMTimeout } diff --git a/miner/fake_miner.go b/miner/fake_miner.go index a09d868b2668..38fd2b82d778 100644 --- a/miner/fake_miner.go +++ b/miner/fake_miner.go @@ -47,7 +47,7 @@ func NewBorDefaultMiner(t *testing.T) *DefaultBorMiner { ethAPI.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() spanner := bor.NewMockSpanner(ctrl) - spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ + spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ { ID: 0, Address: common.Address{0x1}, diff --git a/miner/worker.go b/miner/worker.go index 8404b208d2a8..8188b7f3ec50 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -810,8 +810,8 @@ func (w *worker) resultLoop() { } // Commit block and state to database. - tracing.ElapsedTime(ctx, span, "WriteBlockAndSetHead time taken", func(_ context.Context, _ trace.Span) { - _, err = w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true) + tracing.Exec(ctx, "", "resultLoop.WriteBlockAndSetHead", func(ctx context.Context, span trace.Span) { + _, err = w.chain.WriteBlockAndSetHead(ctx, block, receipts, logs, task.state, true) }) tracing.SetAttributes( diff --git a/miner/worker_test.go b/miner/worker_test.go index c4eab34f134a..24577a4fb23c 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -163,7 +163,7 @@ func getFakeBorFromConfig(t *testing.T, chainConfig *params.ChainConfig) (consen ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() spanner := bor.NewMockSpanner(ctrl) - spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ + spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ { ID: 0, Address: TestBankAddress, @@ -695,7 +695,7 @@ func BenchmarkBorMining(b *testing.B) { ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() spanner := bor.NewMockSpanner(ctrl) - spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ + spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{ { ID: 0, Address: TestBankAddress, diff --git a/node/api.go b/node/api.go index d838404f7dd9..e6c5a009d4e2 100644 --- a/node/api.go +++ b/node/api.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "strings" + "time" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" @@ -366,3 +367,91 @@ func (s *publicWeb3API) ClientVersion() string { func (s *publicWeb3API) Sha3(input hexutil.Bytes) hexutil.Bytes { return crypto.Keccak256(input) } + +type ExecutionPoolSize struct { + HttpLimit int + WSLimit int +} + +type ExecutionPoolRequestTimeout struct { + HttpLimit time.Duration + WSLimit time.Duration +} + +func (api *privateAdminAPI) GetExecutionPoolSize() *ExecutionPoolSize { + var httpLimit int + if api.node.http.host != "" { + httpLimit = api.node.http.httpHandler.Load().(*rpcHandler).server.GetExecutionPoolSize() + } + + var wsLimit int + if api.node.ws.host != "" { + wsLimit = api.node.ws.wsHandler.Load().(*rpcHandler).server.GetExecutionPoolSize() + } + + executionPoolSize := &ExecutionPoolSize{ + HttpLimit: httpLimit, + WSLimit: wsLimit, + } + + return executionPoolSize +} + +func (api *privateAdminAPI) GetExecutionPoolRequestTimeout() *ExecutionPoolRequestTimeout { + var httpLimit time.Duration + if api.node.http.host != "" { + httpLimit = api.node.http.httpHandler.Load().(*rpcHandler).server.GetExecutionPoolRequestTimeout() + } + + var wsLimit time.Duration + if api.node.ws.host != "" { + wsLimit = api.node.ws.wsHandler.Load().(*rpcHandler).server.GetExecutionPoolRequestTimeout() + } + + executionPoolRequestTimeout := &ExecutionPoolRequestTimeout{ + HttpLimit: httpLimit, + WSLimit: wsLimit, + } + + return executionPoolRequestTimeout +} + +// func (api *privateAdminAPI) SetWSExecutionPoolRequestTimeout(n int) *ExecutionPoolRequestTimeout { +// if api.node.ws.host != "" { +// api.node.ws.wsConfig.executionPoolRequestTimeout = time.Duration(n) * time.Millisecond +// api.node.ws.wsHandler.Load().(*rpcHandler).server.SetExecutionPoolRequestTimeout(time.Duration(n) * time.Millisecond) +// log.Warn("updating ws execution pool request timeout", "timeout", n) +// } + +// return api.GetExecutionPoolRequestTimeout() +// } + +// func (api *privateAdminAPI) SetHttpExecutionPoolRequestTimeout(n int) *ExecutionPoolRequestTimeout { +// if api.node.http.host != "" { +// api.node.http.httpConfig.executionPoolRequestTimeout = time.Duration(n) * time.Millisecond +// api.node.http.httpHandler.Load().(*rpcHandler).server.SetExecutionPoolRequestTimeout(time.Duration(n) * time.Millisecond) +// log.Warn("updating http execution pool request timeout", "timeout", n) +// } + +// return api.GetExecutionPoolRequestTimeout() +// } + +func (api *privateAdminAPI) SetWSExecutionPoolSize(n int) *ExecutionPoolSize { + if api.node.ws.host != "" { + api.node.ws.wsConfig.executionPoolSize = uint64(n) + api.node.ws.wsHandler.Load().(*rpcHandler).server.SetExecutionPoolSize(n) + log.Warn("updating ws execution pool size", "threads", n) + } + + return api.GetExecutionPoolSize() +} + +func (api *privateAdminAPI) SetHttpExecutionPoolSize(n int) *ExecutionPoolSize { + if api.node.http.host != "" { + api.node.http.httpConfig.executionPoolSize = uint64(n) + api.node.http.httpHandler.Load().(*rpcHandler).server.SetExecutionPoolSize(n) + log.Warn("updating http execution pool size", "threads", n) + } + + return api.GetExecutionPoolSize() +} diff --git a/node/config.go b/node/config.go index 853190c95f71..c8f40c10627a 100644 --- a/node/config.go +++ b/node/config.go @@ -25,6 +25,7 @@ import ( "runtime" "strings" "sync" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -204,6 +205,14 @@ type Config struct { // JWTSecret is the hex-encoded jwt secret. JWTSecret string `toml:",omitempty"` + + // Maximum number of messages in a batch + RPCBatchLimit uint64 `toml:",omitempty"` + // Configs for RPC execution pool + WSJsonRPCExecutionPoolSize uint64 `toml:",omitempty"` + WSJsonRPCExecutionPoolRequestTimeout time.Duration `toml:",omitempty"` + HTTPJsonRPCExecutionPoolSize uint64 `toml:",omitempty"` + HTTPJsonRPCExecutionPoolRequestTimeout time.Duration `toml:",omitempty"` } // IPCEndpoint resolves an IPC endpoint based on a configured value, taking into diff --git a/node/node.go b/node/node.go index e12bcf6675f8..5cf233d17afb 100644 --- a/node/node.go +++ b/node/node.go @@ -105,7 +105,7 @@ func New(conf *Config) (*Node, error) { node := &Node{ config: conf, - inprocHandler: rpc.NewServer(), + inprocHandler: rpc.NewServer(0, 0), eventmux: new(event.TypeMux), log: conf.Logger, stop: make(chan struct{}), @@ -113,6 +113,9 @@ func New(conf *Config) (*Node, error) { databases: make(map[*closeTrackingDB]struct{}), } + // set RPC batch limit + node.inprocHandler.SetRPCBatchLimit(conf.RPCBatchLimit) + // Register built-in APIs. node.rpcAPIs = append(node.rpcAPIs, node.apis()...) @@ -153,10 +156,10 @@ func New(conf *Config) (*Node, error) { } // Configure RPC servers. - node.http = newHTTPServer(node.log, conf.HTTPTimeouts) - node.httpAuth = newHTTPServer(node.log, conf.HTTPTimeouts) - node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts) - node.wsAuth = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts) + node.http = newHTTPServer(node.log, conf.HTTPTimeouts, conf.RPCBatchLimit) + node.httpAuth = newHTTPServer(node.log, conf.HTTPTimeouts, conf.RPCBatchLimit) + node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts, conf.RPCBatchLimit) + node.wsAuth = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts, conf.RPCBatchLimit) node.ipc = newIPCServer(node.log, conf.IPCEndpoint()) return node, nil @@ -402,10 +405,12 @@ func (n *Node) startRPC() error { return err } if err := server.enableRPC(apis, httpConfig{ - CorsAllowedOrigins: n.config.HTTPCors, - Vhosts: n.config.HTTPVirtualHosts, - Modules: n.config.HTTPModules, - prefix: n.config.HTTPPathPrefix, + CorsAllowedOrigins: n.config.HTTPCors, + Vhosts: n.config.HTTPVirtualHosts, + Modules: n.config.HTTPModules, + prefix: n.config.HTTPPathPrefix, + executionPoolSize: n.config.HTTPJsonRPCExecutionPoolSize, + executionPoolRequestTimeout: n.config.HTTPJsonRPCExecutionPoolRequestTimeout, }); err != nil { return err } @@ -419,9 +424,11 @@ func (n *Node) startRPC() error { return err } if err := server.enableWS(n.rpcAPIs, wsConfig{ - Modules: n.config.WSModules, - Origins: n.config.WSOrigins, - prefix: n.config.WSPathPrefix, + Modules: n.config.WSModules, + Origins: n.config.WSOrigins, + prefix: n.config.WSPathPrefix, + executionPoolSize: n.config.WSJsonRPCExecutionPoolSize, + executionPoolRequestTimeout: n.config.WSJsonRPCExecutionPoolRequestTimeout, }); err != nil { return err } diff --git a/node/rpcstack.go b/node/rpcstack.go index eabf1dcae73c..cba9a22f6f02 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -28,6 +28,7 @@ import ( "strings" "sync" "sync/atomic" + "time" "github.com/rs/cors" @@ -42,6 +43,10 @@ type httpConfig struct { Vhosts []string prefix string // path prefix on which to mount http handler jwtSecret []byte // optional JWT secret + + // Execution pool config + executionPoolSize uint64 + executionPoolRequestTimeout time.Duration } // wsConfig is the JSON-RPC/Websocket configuration @@ -50,6 +55,10 @@ type wsConfig struct { Modules []string prefix string // path prefix on which to mount ws handler jwtSecret []byte // optional JWT secret + + // Execution pool config + executionPoolSize uint64 + executionPoolRequestTimeout time.Duration } type rpcHandler struct { @@ -81,10 +90,12 @@ type httpServer struct { port int handlerNames map[string]string + + RPCBatchLimit uint64 } -func newHTTPServer(log log.Logger, timeouts rpc.HTTPTimeouts) *httpServer { - h := &httpServer{log: log, timeouts: timeouts, handlerNames: make(map[string]string)} +func newHTTPServer(log log.Logger, timeouts rpc.HTTPTimeouts, rpcBatchLimit uint64) *httpServer { + h := &httpServer{log: log, timeouts: timeouts, handlerNames: make(map[string]string), RPCBatchLimit: rpcBatchLimit} h.httpHandler.Store((*rpcHandler)(nil)) h.wsHandler.Store((*rpcHandler)(nil)) @@ -282,7 +293,8 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error { } // Create RPC server and handler. - srv := rpc.NewServer() + srv := rpc.NewServer(config.executionPoolSize, config.executionPoolRequestTimeout) + srv.SetRPCBatchLimit(h.RPCBatchLimit) if err := RegisterApis(apis, config.Modules, srv, false); err != nil { return err } @@ -313,7 +325,8 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error { return fmt.Errorf("JSON-RPC over WebSocket is already enabled") } // Create RPC server and handler. - srv := rpc.NewServer() + srv := rpc.NewServer(config.executionPoolSize, config.executionPoolRequestTimeout) + srv.SetRPCBatchLimit(h.RPCBatchLimit) if err := RegisterApis(apis, config.Modules, srv, false); err != nil { return err } diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go index 60fcab5a9001..49db8435aca0 100644 --- a/node/rpcstack_test.go +++ b/node/rpcstack_test.go @@ -234,7 +234,7 @@ func Test_checkPath(t *testing.T) { func createAndStartServer(t *testing.T, conf *httpConfig, ws bool, wsConf *wsConfig) *httpServer { t.Helper() - srv := newHTTPServer(testlog.Logger(t, log.LvlDebug), rpc.DefaultHTTPTimeouts) + srv := newHTTPServer(testlog.Logger(t, log.LvlDebug), rpc.DefaultHTTPTimeouts, 100) assert.NoError(t, srv.enableRPC(nil, *conf)) if ws { assert.NoError(t, srv.enableWS(nil, *wsConf)) diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml index 0cd966aec141..7326ca13e13a 100644 --- a/packaging/templates/mainnet-v1/archive/config.toml +++ b/packaging/templates/mainnet-v1/archive/config.toml @@ -5,6 +5,8 @@ chain = "mainnet" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" gcmode = "archive" # snapshot = true @@ -79,6 +81,8 @@ gcmode = "archive" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 + # ep-requesttimeout = "0s" [jsonrpc.ws] enabled = true port = 8546 @@ -86,6 +90,8 @@ gcmode = "archive" # host = "localhost" # api = ["web3", "net"] origins = ["*"] + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 @@ -99,7 +105,7 @@ gcmode = "archive" # port = 8551 # vhosts = ["localhost"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml index 0d8badca7ab7..09125d4aff0b 100644 --- a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml @@ -5,6 +5,8 @@ chain = "mainnet" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true @@ -79,6 +81,8 @@ syncmode = "full" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -86,6 +90,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 @@ -99,7 +105,7 @@ syncmode = "full" # port = 8551 # vhosts = ["localhost"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml index f860c2602091..59d0ef967232 100644 --- a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml +++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml @@ -7,6 +7,8 @@ chain = "mainnet" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "$BOR_DIR/keystore" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true @@ -81,6 +83,8 @@ syncmode = "full" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -88,6 +92,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 @@ -101,7 +107,7 @@ syncmode = "full" # port = 8551 # vhosts = ["localhost"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml index 4703ede9d5aa..00bdca179d80 100644 --- a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml +++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml @@ -7,6 +7,8 @@ chain = "mainnet" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "$BOR_DIR/keystore" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true @@ -81,6 +83,8 @@ syncmode = "full" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -88,6 +92,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 @@ -101,7 +107,7 @@ syncmode = "full" # port = 8551 # vhosts = ["localhost"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control index cb62165a5e96..130226241b4e 100644 --- a/packaging/templates/package_scripts/control +++ b/packaging/templates/package_scripts/control @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.3 +Version: 0.3.8-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64 index 56276cb43ac9..e8073532af6d 100644 --- a/packaging/templates/package_scripts/control.arm64 +++ b/packaging/templates/package_scripts/control.arm64 @@ -1,5 +1,5 @@ Source: bor -Version: 0.3.3 +Version: 0.3.8-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64 index 4ddd8424ff48..a5b46bff79e2 100644 --- a/packaging/templates/package_scripts/control.profile.amd64 +++ b/packaging/templates/package_scripts/control.profile.amd64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.3 +Version: 0.3.8-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64 index 9f9301c9258d..b0d94da3388a 100644 --- a/packaging/templates/package_scripts/control.profile.arm64 +++ b/packaging/templates/package_scripts/control.profile.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.3 +Version: 0.3.8-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator index d43250c89131..887713056abb 100644 --- a/packaging/templates/package_scripts/control.validator +++ b/packaging/templates/package_scripts/control.validator @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.3 +Version: 0.3.8-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64 index 5a50f8cb39c7..f9fa7635a940 100644 --- a/packaging/templates/package_scripts/control.validator.arm64 +++ b/packaging/templates/package_scripts/control.validator.arm64 @@ -1,5 +1,5 @@ Source: bor-profile -Version: 0.3.3 +Version: 0.3.8-beta Section: develop Priority: standard Maintainer: Polygon diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml index e25e017ab724..6b8c13610b39 100644 --- a/packaging/templates/testnet-v4/archive/config.toml +++ b/packaging/templates/testnet-v4/archive/config.toml @@ -5,6 +5,8 @@ chain = "mumbai" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" gcmode = "archive" # snapshot = true @@ -79,6 +81,8 @@ gcmode = "archive" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 + # ep-requesttimeout = "0s" [jsonrpc.ws] enabled = true port = 8546 @@ -86,6 +90,8 @@ gcmode = "archive" # host = "localhost" # api = ["web3", "net"] origins = ["*"] + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 @@ -99,7 +105,7 @@ gcmode = "archive" # port = 8551 # vhosts = ["localhost"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml index 37845b734063..b9632fe33633 100644 --- a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml @@ -5,6 +5,8 @@ chain = "mumbai" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true @@ -79,6 +81,8 @@ syncmode = "full" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -86,6 +90,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 @@ -99,7 +105,7 @@ syncmode = "full" # port = 8551 # vhosts = ["localhost"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml index b75f47b35d32..8dc6daa5ecaf 100644 --- a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml +++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml @@ -7,6 +7,8 @@ chain = "mumbai" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "$BOR_DIR/keystore" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true @@ -81,6 +83,8 @@ syncmode = "full" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -88,6 +92,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 @@ -101,7 +107,7 @@ syncmode = "full" # port = 8551 # vhosts = ["localhost"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" @@ -159,7 +165,6 @@ syncmode = "full" # period = 0 # gaslimit = 11500000 -# [pprof] # pprof = false # port = 6060 # addr = "127.0.0.1" diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml index d3e6cf2c7413..97a9162e0905 100644 --- a/packaging/templates/testnet-v4/without-sentry/bor/config.toml +++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml @@ -7,6 +7,8 @@ chain = "mumbai" datadir = "/var/lib/bor/data" # ancient = "" # keystore = "$BOR_DIR/keystore" +# "rpc.batchlimit" = 100 +# "rpc.returndatalimit" = 100000 syncmode = "full" # gcmode = "full" # snapshot = true @@ -81,6 +83,8 @@ syncmode = "full" vhosts = ["*"] corsdomain = ["*"] # prefix = "" + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.ws] # enabled = false # port = 8546 @@ -88,6 +92,8 @@ syncmode = "full" # host = "localhost" # api = ["web3", "net"] # origins = ["*"] + # ep-size = 40 + # ep-requesttimeout = "0s" # [jsonrpc.graphql] # enabled = false # port = 0 @@ -101,7 +107,7 @@ syncmode = "full" # port = 8551 # vhosts = ["localhost"] # [jsonrpc.timeouts] - # read = "30s" + # read = "10s" # write = "30s" # idle = "2m0s" diff --git a/params/config.go b/params/config.go index 94729224bb49..9833c9eac5ca 100644 --- a/params/config.go +++ b/params/config.go @@ -617,6 +617,10 @@ func (c *BorConfig) IsDelhi(number *big.Int) bool { return isForked(c.DelhiBlock, number) } +func (c *BorConfig) IsSprintStart(number uint64) bool { + return number%c.CalculateSprint(number) == 0 +} + func (c *BorConfig) calculateBorConfigHelper(field map[string]uint64, number uint64) uint64 { keys := make([]string, 0, len(field)) for k := range field { diff --git a/params/protocol_params.go b/params/protocol_params.go index d468af5d3c96..103266caff08 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -125,7 +125,8 @@ const ( ElasticityMultiplier = 2 // Bounds the maximum gas limit an EIP-1559 block may have. InitialBaseFee = 1000000000 // Initial base fee for EIP-1559 blocks. - MaxCodeSize = 24576 // Maximum bytecode to permit for a contract + MaxCodeSize = 24576 // Maximum bytecode to permit for a contract + MaxInitCodeSize = 2 * MaxCodeSize // Maximum initcode to permit in a creation transaction and create instructions // Precompiled contract gas prices diff --git a/params/version.go b/params/version.go index 0e4afc7bcb98..28c32e9df3ef 100644 --- a/params/version.go +++ b/params/version.go @@ -21,14 +21,11 @@ import ( ) const ( - VersionMajor = 0 // Major version component of the current release - VersionMinor = 3 // Minor version component of the current release - VersionPatch = 3 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string -) - -var ( - GitCommit = "" + GitCommit = "" + VersionMajor = 0 // Major version component of the current release + VersionMinor = 3 // Minor version component of the current release + VersionPatch = 8 // Patch version component of the current release + VersionMeta = "beta" // Version metadata to append to the version string ) // Version holds the textual version string. diff --git a/rpc/client.go b/rpc/client.go index d3ce0297754c..fc286fe8dc74 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -112,7 +112,7 @@ func (c *Client) newClientConn(conn ServerCodec) *clientConn { ctx := context.Background() ctx = context.WithValue(ctx, clientContextKey{}, c) ctx = context.WithValue(ctx, peerInfoContextKey{}, conn.peerInfo()) - handler := newHandler(ctx, conn, c.idgen, c.services) + handler := newHandler(ctx, conn, c.idgen, c.services, NewExecutionPool(100, 0)) return &clientConn{conn, handler} } diff --git a/rpc/client_test.go b/rpc/client_test.go index fa6010bb199c..1bebd2767774 100644 --- a/rpc/client_test.go +++ b/rpc/client_test.go @@ -33,12 +33,14 @@ import ( "time" "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/log" ) func TestClientRequest(t *testing.T) { server := newTestServer() defer server.Stop() + client := DialInProc(server) defer client.Close() @@ -46,6 +48,7 @@ func TestClientRequest(t *testing.T) { if err := client.Call(&resp, "test_echo", "hello", 10, &echoArgs{"world"}); err != nil { t.Fatal(err) } + if !reflect.DeepEqual(resp, echoResult{"hello", 10, &echoArgs{"world"}}) { t.Errorf("incorrect result %#v", resp) } @@ -407,7 +410,7 @@ func TestClientSubscriptionUnsubscribeServer(t *testing.T) { t.Parallel() // Create the server. - srv := NewServer() + srv := NewServer(0, 0) srv.RegisterName("nftest", new(notificationTestService)) p1, p2 := net.Pipe() recorder := &unsubscribeRecorder{ServerCodec: NewCodec(p1)} @@ -443,7 +446,7 @@ func TestClientSubscriptionChannelClose(t *testing.T) { t.Parallel() var ( - srv = NewServer() + srv = NewServer(0, 0) httpsrv = httptest.NewServer(srv.WebsocketHandler(nil)) wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") ) diff --git a/rpc/endpoints.go b/rpc/endpoints.go index d78ebe2858bc..2a539d4fc51f 100644 --- a/rpc/endpoints.go +++ b/rpc/endpoints.go @@ -27,7 +27,7 @@ import ( func StartIPCEndpoint(ipcEndpoint string, apis []API) (net.Listener, *Server, error) { // Register all the APIs exposed by the services. var ( - handler = NewServer() + handler = NewServer(0, 0) regMap = make(map[string]struct{}) registered []string ) diff --git a/rpc/execution_pool.go b/rpc/execution_pool.go new file mode 100644 index 000000000000..d0f5ab5daa59 --- /dev/null +++ b/rpc/execution_pool.go @@ -0,0 +1,99 @@ +package rpc + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/JekaMas/workerpool" +) + +type SafePool struct { + executionPool *atomic.Pointer[workerpool.WorkerPool] + + sync.RWMutex + + timeout time.Duration + size int + + // Skip sending task to execution pool + fastPath bool +} + +func NewExecutionPool(initialSize int, timeout time.Duration) *SafePool { + sp := &SafePool{ + size: initialSize, + timeout: timeout, + } + + if initialSize == 0 { + sp.fastPath = true + + return sp + } + + var ptr atomic.Pointer[workerpool.WorkerPool] + + p := workerpool.New(initialSize) + ptr.Store(p) + sp.executionPool = &ptr + + return sp +} + +func (s *SafePool) Submit(ctx context.Context, fn func() error) (<-chan error, bool) { + if s.fastPath { + go func() { + _ = fn() + }() + + return nil, true + } + + if s.executionPool == nil { + return nil, false + } + + pool := s.executionPool.Load() + if pool == nil { + return nil, false + } + + return pool.Submit(ctx, fn, s.Timeout()), true +} + +func (s *SafePool) ChangeSize(n int) { + oldPool := s.executionPool.Swap(workerpool.New(n)) + + if oldPool != nil { + go func() { + oldPool.StopWait() + }() + } + + s.Lock() + s.size = n + s.Unlock() +} + +func (s *SafePool) ChangeTimeout(n time.Duration) { + s.Lock() + defer s.Unlock() + + s.timeout = n +} + +func (s *SafePool) Timeout() time.Duration { + s.RLock() + defer s.RUnlock() + + return s.timeout +} + +func (s *SafePool) Size() int { + s.RLock() + defer s.RUnlock() + + return s.size +} diff --git a/rpc/handler.go b/rpc/handler.go index e3c72c66b1f2..8b219ce792cb 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -64,6 +64,8 @@ type handler struct { subLock sync.Mutex serverSubs map[ID]*Subscription + + executionPool *SafePool } type callProc struct { @@ -71,7 +73,7 @@ type callProc struct { notifiers []*Notifier } -func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry) *handler { +func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, pool *SafePool) *handler { rootCtx, cancelRoot := context.WithCancel(connCtx) h := &handler{ reg: reg, @@ -84,11 +86,13 @@ func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg * allowSubscribe: true, serverSubs: make(map[ID]*Subscription), log: log.Root(), + executionPool: pool, } if conn.remoteAddr() != "" { h.log = h.log.New("conn", conn.remoteAddr()) } h.unsubscribeCb = newCallback(reflect.Value{}, reflect.ValueOf(h.unsubscribe)) + return h } @@ -220,12 +224,15 @@ func (h *handler) cancelServerSubscriptions(err error) { func (h *handler) startCallProc(fn func(*callProc)) { h.callWG.Add(1) - go func() { - ctx, cancel := context.WithCancel(h.rootCtx) + ctx, cancel := context.WithCancel(h.rootCtx) + + h.executionPool.Submit(context.Background(), func() error { defer h.callWG.Done() defer cancel() fn(&callProc{ctx: ctx}) - }() + + return nil + }) } // handleImmediate executes non-call messages. It returns false if the message is a @@ -262,6 +269,7 @@ func (h *handler) handleSubscriptionResult(msg *jsonrpcMessage) { // handleResponse processes method call responses. func (h *handler) handleResponse(msg *jsonrpcMessage) { + op := h.respWait[string(msg.ID)] if op == nil { h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID}) @@ -282,7 +290,11 @@ func (h *handler) handleResponse(msg *jsonrpcMessage) { return } if op.err = json.Unmarshal(msg.Result, &op.sub.subid); op.err == nil { - go op.sub.run() + h.executionPool.Submit(context.Background(), func() error { + op.sub.run() + return nil + }) + h.clientSubs[op.sub.subid] = op.sub } } diff --git a/rpc/http.go b/rpc/http.go index 18404c060a86..09594d028050 100644 --- a/rpc/http.go +++ b/rpc/http.go @@ -104,7 +104,7 @@ type HTTPTimeouts struct { // DefaultHTTPTimeouts represents the default timeout values used if further // configuration is not provided. var DefaultHTTPTimeouts = HTTPTimeouts{ - ReadTimeout: 30 * time.Second, + ReadTimeout: 10 * time.Second, WriteTimeout: 30 * time.Second, IdleTimeout: 120 * time.Second, } diff --git a/rpc/http_test.go b/rpc/http_test.go index c84d7705f205..9737e64e918c 100644 --- a/rpc/http_test.go +++ b/rpc/http_test.go @@ -103,7 +103,7 @@ func TestHTTPResponseWithEmptyGet(t *testing.T) { func TestHTTPRespBodyUnlimited(t *testing.T) { const respLength = maxRequestContentLength * 3 - s := NewServer() + s := NewServer(0, 0) defer s.Stop() s.RegisterName("test", largeRespService{respLength}) ts := httptest.NewServer(s) diff --git a/rpc/inproc.go b/rpc/inproc.go index 3c6002e56f08..29af5507b9be 100644 --- a/rpc/inproc.go +++ b/rpc/inproc.go @@ -27,8 +27,12 @@ func DialInProc(handler *Server) *Client { c, _ := newClient(initctx, func(context.Context) (ServerCodec, error) { p1, p2 := net.Pipe() - // nolint: contextcheck - go handler.ServeCodec(NewCodec(p1), 0) + //nolint:contextcheck + handler.executionPool.Submit(initctx, func() error { + handler.ServeCodec(NewCodec(p1), 0) + return nil + }) + return NewCodec(p2), nil }) return c diff --git a/rpc/ipc.go b/rpc/ipc.go index 5e782454f820..76fbd13f92fb 100644 --- a/rpc/ipc.go +++ b/rpc/ipc.go @@ -36,7 +36,10 @@ func (s *Server) ServeListener(l net.Listener) error { } log.Trace("Accepted RPC connection", "conn", conn.RemoteAddr()) - go s.ServeCodec(NewCodec(conn), 0) + s.executionPool.Submit(context.Background(), func() error { + s.ServeCodec(NewCodec(conn), 0) + return nil + }) } } diff --git a/rpc/server.go b/rpc/server.go index 50282c921233..04ee2dc87b71 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -18,10 +18,13 @@ package rpc import ( "context" + "fmt" "io" "sync/atomic" + "time" mapset "github.com/deckarep/golang-set" + "github.com/ethereum/go-ethereum/log" ) @@ -47,11 +50,20 @@ type Server struct { idgen func() ID run int32 codecs mapset.Set + + BatchLimit uint64 + executionPool *SafePool } // NewServer creates a new server instance with no registered handlers. -func NewServer() *Server { - server := &Server{idgen: randomIDGenerator(), codecs: mapset.NewSet(), run: 1} +func NewServer(executionPoolSize uint64, executionPoolRequesttimeout time.Duration) *Server { + server := &Server{ + idgen: randomIDGenerator(), + codecs: mapset.NewSet(), + run: 1, + executionPool: NewExecutionPool(int(executionPoolSize), executionPoolRequesttimeout), + } + // Register the default service providing meta information about the RPC service such // as the services and methods it offers. rpcService := &RPCService{server} @@ -59,6 +71,26 @@ func NewServer() *Server { return server } +func (s *Server) SetRPCBatchLimit(batchLimit uint64) { + s.BatchLimit = batchLimit +} + +func (s *Server) SetExecutionPoolSize(n int) { + s.executionPool.ChangeSize(n) +} + +func (s *Server) SetExecutionPoolRequestTimeout(n time.Duration) { + s.executionPool.ChangeTimeout(n) +} + +func (s *Server) GetExecutionPoolRequestTimeout() time.Duration { + return s.executionPool.Timeout() +} + +func (s *Server) GetExecutionPoolSize() int { + return s.executionPool.Size() +} + // RegisterName creates a service for the given receiver type under the given name. When no // methods on the given receiver match the criteria to be either a RPC method or a // subscription an error is returned. Otherwise a new service is created and added to the @@ -98,24 +130,34 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { return } - h := newHandler(ctx, codec, s.idgen, &s.services) + h := newHandler(ctx, codec, s.idgen, &s.services, s.executionPool) + h.allowSubscribe = false defer h.close(io.EOF, nil) reqs, batch, err := codec.readBatch() if err != nil { if err != io.EOF { - // nolint: errcheck - codec.writeJSON(ctx, errorMessage(&invalidMessageError{"parse error"})) + if err1 := codec.writeJSON(ctx, err); err1 != nil { + log.Warn("WARNING - error in reading batch", "err", err1) + return + } } return } - // nolint: contextcheck if batch { - // nolint: contextcheck - h.handleBatch(reqs) + if s.BatchLimit > 0 && len(reqs) > int(s.BatchLimit) { + if err1 := codec.writeJSON(ctx, errorMessage(fmt.Errorf("batch limit %d exceeded: %d requests given", s.BatchLimit, len(reqs)))); err1 != nil { + log.Warn("WARNING - requests given exceeds the batch limit", "err", err1) + log.Debug("batch limit %d exceeded: %d requests given", s.BatchLimit, len(reqs)) + } + } else { + //nolint:contextcheck + h.handleBatch(reqs) + } } else { + //nolint:contextcheck h.handleMsg(reqs[0]) } } diff --git a/rpc/server_test.go b/rpc/server_test.go index e67893710dc2..166956681bdc 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -29,7 +29,7 @@ import ( ) func TestServerRegisterName(t *testing.T) { - server := NewServer() + server := NewServer(0, 0) service := new(testService) if err := server.RegisterName("test", service); err != nil { diff --git a/rpc/subscription_test.go b/rpc/subscription_test.go index 54a053dba805..cfca1b24b92e 100644 --- a/rpc/subscription_test.go +++ b/rpc/subscription_test.go @@ -53,7 +53,7 @@ func TestSubscriptions(t *testing.T) { subCount = len(namespaces) notificationCount = 3 - server = NewServer() + server = NewServer(0, 0) clientConn, serverConn = net.Pipe() out = json.NewEncoder(clientConn) in = json.NewDecoder(clientConn) diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go index 253e26328900..228582177904 100644 --- a/rpc/testservice_test.go +++ b/rpc/testservice_test.go @@ -26,7 +26,7 @@ import ( ) func newTestServer() *Server { - server := NewServer() + server := NewServer(0, 0) server.idgen = sequentialIDGenerator() if err := server.RegisterName("test", new(testService)); err != nil { panic(err) diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go index f74b7fd08bb4..b805ed20234f 100644 --- a/rpc/websocket_test.go +++ b/rpc/websocket_test.go @@ -203,7 +203,7 @@ func TestClientWebsocketPing(t *testing.T) { // This checks that the websocket transport can deal with large messages. func TestClientWebsocketLargeMessage(t *testing.T) { var ( - srv = NewServer() + srv = NewServer(0, 0) httpsrv = httptest.NewServer(srv.WebsocketHandler(nil)) wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") ) diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go index d059956e6a4d..e6e8188ce001 100644 --- a/tests/bor/bor_test.go +++ b/tests/bor/bor_test.go @@ -7,6 +7,7 @@ import ( "context" "crypto/ecdsa" "encoding/hex" + "fmt" "io" "math/big" "os" @@ -391,12 +392,18 @@ func TestInsertingSpanSizeBlocks(t *testing.T) { currentValidators := []*valset.Validator{valset.NewValidator(addr, 10)} + spanner := getMockedSpanner(t, currentValidators) + _bor.SetSpanner(spanner) + // Insert sprintSize # of blocks so that span is fetched at the start of a new sprint for i := uint64(1); i <= spanSize; i++ { block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, currentValidators) insertNewBlock(t, chain, block) } + spanner = getMockedSpanner(t, currentSpan.ValidatorSet.Validators) + _bor.SetSpanner(spanner) + validators, err := _bor.GetCurrentValidators(context.Background(), block.Hash(), spanSize) // check validator set at the first block of new span if err != nil { t.Fatalf("%s", err) @@ -426,6 +433,9 @@ func TestFetchStateSyncEvents(t *testing.T) { currentValidators := []*valset.Validator{valset.NewValidator(addr, 10)} + spanner := getMockedSpanner(t, currentValidators) + _bor.SetSpanner(spanner) + // Insert sprintSize # of blocks so that span is fetched at the start of a new sprint for i := uint64(1); i < sprintSize; i++ { if IsSpanEnd(i) { @@ -458,9 +468,21 @@ func TestFetchStateSyncEvents(t *testing.T) { _bor.SetHeimdallClient(h) block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, res.Result.ValidatorSet.Validators) + + // Validate the state sync transactions set by consensus + validateStateSyncEvents(t, eventRecords, chain.GetStateSync()) + insertNewBlock(t, chain, block) } +func validateStateSyncEvents(t *testing.T, expected []*clerk.EventRecordWithTime, got []*types.StateSyncData) { + require.Equal(t, len(expected), len(got), "number of state sync events should be equal") + + for i := 0; i < len(expected); i++ { + require.Equal(t, expected[i].ID, got[i].ID, fmt.Sprintf("state sync ids should be equal - index: %d, expected: %d, got: %d", i, expected[i].ID, got[i].ID)) + } +} + func TestFetchStateSyncEvents_2(t *testing.T) { init := buildEthereumInstance(t, rawdb.NewMemoryDatabase()) chain := init.ethereum.BlockChain() @@ -515,6 +537,9 @@ func TestFetchStateSyncEvents_2(t *testing.T) { currentValidators = []*valset.Validator{valset.NewValidator(addr, 10)} } + spanner := getMockedSpanner(t, currentValidators) + _bor.SetSpanner(spanner) + block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, currentValidators) insertNewBlock(t, chain, block) } @@ -541,6 +566,9 @@ func TestFetchStateSyncEvents_2(t *testing.T) { currentValidators = []*valset.Validator{valset.NewValidator(addr, 10)} } + spanner := getMockedSpanner(t, currentValidators) + _bor.SetSpanner(spanner) + block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, res.Result.ValidatorSet.Validators) insertNewBlock(t, chain, block) } @@ -567,6 +595,8 @@ func TestOutOfTurnSigning(t *testing.T) { h.EXPECT().Close().AnyTimes() + spanner := getMockedSpanner(t, heimdallSpan.ValidatorSet.Validators) + _bor.SetSpanner(spanner) _bor.SetHeimdallClient(h) db := init.ethereum.ChainDb() @@ -1069,6 +1099,9 @@ func TestJaipurFork(t *testing.T) { res, _ := loadSpanFromFile(t) + spanner := getMockedSpanner(t, res.Result.ValidatorSet.Validators) + _bor.SetSpanner(spanner) + for i := uint64(1); i < sprintSize; i++ { block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, res.Result.ValidatorSet.Validators) insertNewBlock(t, chain, block) diff --git a/tests/bor/helper.go b/tests/bor/helper.go index 64d5c299ac6e..c4b45f970de0 100644 --- a/tests/bor/helper.go +++ b/tests/bor/helper.go @@ -352,6 +352,17 @@ func getMockedHeimdallClient(t *testing.T, heimdallSpan *span.HeimdallSpan) (*mo return h, ctrl } +func getMockedSpanner(t *testing.T, validators []*valset.Validator) *bor.MockSpanner { + t.Helper() + + spanner := bor.NewMockSpanner(gomock.NewController(t)) + spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return(validators, nil).AnyTimes() + spanner.EXPECT().GetCurrentValidatorsByBlockNrOrHash(gomock.Any(), gomock.Any(), gomock.Any()).Return(validators, nil).AnyTimes() + spanner.EXPECT().GetCurrentSpan(gomock.Any(), gomock.Any()).Return(&span.Span{0, 0, 0}, nil).AnyTimes() + spanner.EXPECT().CommitSpan(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + return spanner +} + func generateFakeStateSyncEvents(sample *clerk.EventRecordWithTime, count int) []*clerk.EventRecordWithTime { events := make([]*clerk.EventRecordWithTime, count) event := *sample @@ -360,7 +371,7 @@ func generateFakeStateSyncEvents(sample *clerk.EventRecordWithTime, count int) [ *events[0] = event for i := 1; i < count; i++ { - event.ID = uint64(i) + event.ID = uint64(i + 1) event.Time = event.Time.Add(1 * time.Second) events[i] = &clerk.EventRecordWithTime{} *events[i] = event