diff --git a/.github/matic-cli-config.yml b/.github/matic-cli-config.yml
index 7233eb0497..8c31e17dc6 100644
--- a/.github/matic-cli-config.yml
+++ b/.github/matic-cli-config.yml
@@ -10,8 +10,12 @@ blockNumber:
- '0'
blockTime:
- '2'
-numOfValidators: 3
-numOfNonValidators: 0
+numOfBorValidators: 3
+numOfBorSentries: 0
+numOfBorArchiveNodes: 0
+numOfErigonValidators: 0
+numOfErigonSentries: 0
+numOfErigonArchiveNodes: 0
ethURL: http://ganache:9545
ethHostUser: ubuntu
devnetType: docker
@@ -19,4 +23,3 @@ borDockerBuildContext: "../../bor"
heimdallDockerBuildContext: "https://github.com/maticnetwork/heimdall.git#develop"
sprintSizeBlockNumber:
- '0'
-numOfArchiveNodes: 0
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index c467800e58..7b58af0bc0 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -23,6 +23,8 @@ In case this PR includes changes that must be applied only to a subset of nodes,
- [ ] I have added at least 2 reviewer or the whole pos-v1 team
- [ ] I have added sufficient documentation in code
- [ ] I will be resolving comments - if any - by pushing each fix in a separate commit and linking the commit hash in the comment reply
+- [ ] Created a task in Jira and informed the team for implementation in Erigon client (if applicable)
+- [ ] Includes RPC methods changes, and the Notion documentation has been updated
# Cross repository changes
diff --git a/.github/workflows/packager.yml b/.github/workflows/packager.yml
index f2aef42485..bad6be092c 100644
--- a/.github/workflows/packager.yml
+++ b/.github/workflows/packager.yml
@@ -61,6 +61,116 @@ jobs:
- name: Copying systemd file
run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor/lib/systemd/system/bor.service
+ - name: Prepping ${{ env.NETWORK }} ${{ env.NODE }} bootnode for ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Copying systemd file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/systemd/bor_bootnode.service packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/lib/systemd/system/
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Running package build for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mumbai
+
+ - name: Prepping ${{ env.NETWORK }} ${{ env.NODE }} bootnode for ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Copying systemd file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/systemd/bor_bootnode.service packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/lib/systemd/system/
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Running package build for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: amd64
+ NODE: bootnode
+ NETWORK: mainnet
+
- name: Prepping ${{ env.NETWORK }} ${{ env.NODE }} node for ${{ env.ARCH }}
run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
env:
@@ -416,6 +526,116 @@ jobs:
- name: Updating the control file to use with the arm64 profile
run: cp -rp packaging/templates/package_scripts/control.profile.arm64 packaging/deb/bor/DEBIAN/control
+ - name: Prepping ${{ env.NETWORK }} ${{ env.NODE }} bootnode for ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/testnet-v4/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Copying systemd file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/systemd/bor_bootnode.service packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/lib/systemd/system/
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mumbai
+ - name: Running package build for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mumbai
+
+ - name: Prepping ${{ env.NETWORK }} ${{ env.NODE }} bootnode for ${{ env.ARCH }}
+ run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Putting toml for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/var/lib/bor/
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Copying the preinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/preinst packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/preinst
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Copying the postinst for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postinst.profile packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postinst
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Copying the prerm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/prerm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/prerm
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Copying the postrm for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/postrm packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/postrm
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Copying systemd file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/systemd/bor_bootnode.service packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/lib/systemd/system/
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Copying profile control file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: cp -rp packaging/templates/package_scripts/control.profile.amd64 packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/DEBIAN/control
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mainnet
+ - name: Running package build for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
+ run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
+ env:
+ ARCH: arm64
+ NODE: bootnode
+ NETWORK: mainnet
+
- name: Setting up bor for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
run: cp -rp packaging/deb/bor packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
env:
diff --git a/Makefile b/Makefile
index 0f4d4afdd9..eb9c734af0 100644
--- a/Makefile
+++ b/Makefile
@@ -17,7 +17,7 @@ GIT_COMMIT ?= $(shell git rev-list -1 HEAD)
PACKAGE = github.com/ethereum/go-ethereum
GO_FLAGS += -buildvcs=false
-GO_LDFLAGS += -ldflags "-X ${PACKAGE}/params.GitCommit=${GIT_COMMIT} "
+GO_LDFLAGS += -ldflags "-X ${PACKAGE}/params.GitCommit=${GIT_COMMIT}"
TESTALL = $$(go list ./... | grep -v go-ethereum/cmd/)
TESTE2E = ./tests/...
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 2431b5644a..57cfd5a438 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -637,8 +637,8 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
// about the transaction and calling mechanisms.
vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true})
gasPool := new(core.GasPool).AddGas(math.MaxUint64)
-
- return core.NewStateTransition(vmEnv, msg, gasPool).TransitionDb()
+ // nolint : contextcheck
+ return core.NewStateTransition(vmEnv, msg, gasPool).TransitionDb(context.Background())
}
// SendTransaction updates the pending block to include the given transaction.
diff --git a/builder/files/config.toml b/builder/files/config.toml
index f2ed492279..2e758bc8ee 100644
--- a/builder/files/config.toml
+++ b/builder/files/config.toml
@@ -71,6 +71,7 @@ syncmode = "full"
# etherbase = "VALIDATOR ADDRESS"
# extradata = ""
# recommit = "2m5s"
+ # commitinterrupt = true
# [jsonrpc]
@@ -128,7 +129,7 @@ syncmode = "full"
metrics = true
# expensive = false
# prometheus-addr = "127.0.0.1:7071"
- # opencollector-endpoint = "127.0.0.1:4317"
+ # opencollector-endpoint = ""
# [telemetry.influx]
# influxdb = false
# endpoint = ""
@@ -171,6 +172,10 @@ syncmode = "full"
# period = 0
# gaslimit = 11500000
+# [parallelevm]
+ # enable = true
+ # procs = 8
+
# [pprof]
# pprof = false
# port = 6060
diff --git a/builder/files/genesis-mainnet-v1.json b/builder/files/genesis-mainnet-v1.json
index b01313bd57..d4c89c67f0 100644
--- a/builder/files/genesis-mainnet-v1.json
+++ b/builder/files/genesis-mainnet-v1.json
@@ -16,6 +16,11 @@
"bor": {
"jaipurBlock": 23850000,
"delhiBlock": 38189056,
+ "parallelUniverseBlock": 0,
+ "indoreBlock": 44934656,
+ "stateSyncConfirmationDelay": {
+ "44934656": 128
+ },
"period": {
"0": 2
},
diff --git a/builder/files/genesis-testnet-v4.json b/builder/files/genesis-testnet-v4.json
index fe066411a3..34407a391a 100644
--- a/builder/files/genesis-testnet-v4.json
+++ b/builder/files/genesis-testnet-v4.json
@@ -16,6 +16,11 @@
"bor": {
"jaipurBlock": 22770000,
"delhiBlock": 29638656,
+ "parallelUniverseBlock": 0,
+ "indoreBlock": 37075456,
+ "stateSyncConfirmationDelay": {
+ "37075456": 128
+ },
"period": {
"0": 2,
"25275000": 5,
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index c848b953f8..ca0dd5b0d4 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -173,7 +173,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig)
// (ret []byte, usedGas uint64, failed bool, err error)
- msgResult, err := core.ApplyMessage(evm, msg, gaspool)
+ msgResult, err := core.ApplyMessage(evm, msg, gaspool, nil)
if err != nil {
statedb.RevertToSnapshot(snapshot)
log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From(), "error", err)
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 1772913c0e..fc6848c9ee 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -754,6 +754,13 @@ var (
Usage: "Gas price below which gpo will ignore transactions",
Value: ethconfig.Defaults.GPO.IgnorePrice.Int64(),
}
+ // flag to set the transaction fetcher's txArrivalWait value, which is the maximum waiting
+ // period the fetcher will wait to receive an announced tx before explicitly requesting it
+ TxArrivalWaitFlag = cli.DurationFlag{
+ Name: "txarrivalwait",
+ Usage: "Maximum duration to wait for a transaction before requesting it (defaults to 500ms)",
+ Value: node.DefaultConfig.P2P.TxArrivalWait,
+ }
// Metrics flags
MetricsEnabledFlag = cli.BoolFlag{
@@ -1288,6 +1295,10 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
cfg.NoDiscovery = true
cfg.DiscoveryV5 = false
}
+
+ if ctx.GlobalIsSet(TxArrivalWaitFlag.Name) {
+ cfg.TxArrivalWait = TxArrivalWaitFlag.Value
+ }
}
// SetNodeConfig applies node-related command line flags to the config.
diff --git a/consensus/bor/api.go b/consensus/bor/api.go
index 26d1efdaf1..6d72e309e3 100644
--- a/consensus/bor/api.go
+++ b/consensus/bor/api.go
@@ -75,13 +75,34 @@ func rankMapDifficulties(values map[common.Address]uint64) []difficultiesKV {
}
// GetSnapshotProposerSequence retrieves the in-turn signers of all sprints in a span
-func (api *API) GetSnapshotProposerSequence(number *rpc.BlockNumber) (BlockSigners, error) {
- snapNumber := *number - 1
+func (api *API) GetSnapshotProposerSequence(blockNrOrHash *rpc.BlockNumberOrHash) (BlockSigners, error) {
+ var header *types.Header
+ //nolint:nestif
+ if blockNrOrHash == nil {
+ header = api.chain.CurrentHeader()
+ } else {
+ if blockNr, ok := blockNrOrHash.Number(); ok {
+ if blockNr == rpc.LatestBlockNumber {
+ header = api.chain.CurrentHeader()
+ } else {
+ header = api.chain.GetHeaderByNumber(uint64(blockNr))
+ }
+ } else {
+ if blockHash, ok := blockNrOrHash.Hash(); ok {
+ header = api.chain.GetHeaderByHash(blockHash)
+ }
+ }
+ }
- var difficulties = make(map[common.Address]uint64)
+ if header == nil {
+ return BlockSigners{}, errUnknownBlock
+ }
+ snapNumber := rpc.BlockNumber(header.Number.Int64() - 1)
snap, err := api.GetSnapshot(&snapNumber)
+ var difficulties = make(map[common.Address]uint64)
+
if err != nil {
return BlockSigners{}, err
}
@@ -101,7 +122,7 @@ func (api *API) GetSnapshotProposerSequence(number *rpc.BlockNumber) (BlockSigne
rankedDifficulties := rankMapDifficulties(difficulties)
- author, err := api.GetAuthor(number)
+ author, err := api.GetAuthor(blockNrOrHash)
if err != nil {
return BlockSigners{}, err
}
@@ -117,9 +138,31 @@ func (api *API) GetSnapshotProposerSequence(number *rpc.BlockNumber) (BlockSigne
}
// GetSnapshotProposer retrieves the in-turn signer at a given block.
-func (api *API) GetSnapshotProposer(number *rpc.BlockNumber) (common.Address, error) {
- *number -= 1
- snap, err := api.GetSnapshot(number)
+func (api *API) GetSnapshotProposer(blockNrOrHash *rpc.BlockNumberOrHash) (common.Address, error) {
+ var header *types.Header
+ //nolint:nestif
+ if blockNrOrHash == nil {
+ header = api.chain.CurrentHeader()
+ } else {
+ if blockNr, ok := blockNrOrHash.Number(); ok {
+ if blockNr == rpc.LatestBlockNumber {
+ header = api.chain.CurrentHeader()
+ } else {
+ header = api.chain.GetHeaderByNumber(uint64(blockNr))
+ }
+ } else {
+ if blockHash, ok := blockNrOrHash.Hash(); ok {
+ header = api.chain.GetHeaderByHash(blockHash)
+ }
+ }
+ }
+
+ if header == nil {
+ return common.Address{}, errUnknownBlock
+ }
+
+ snapNumber := rpc.BlockNumber(header.Number.Int64() - 1)
+ snap, err := api.GetSnapshot(&snapNumber)
if err != nil {
return common.Address{}, err
@@ -129,14 +172,26 @@ func (api *API) GetSnapshotProposer(number *rpc.BlockNumber) (common.Address, er
}
// GetAuthor retrieves the author a block.
-func (api *API) GetAuthor(number *rpc.BlockNumber) (*common.Address, error) {
+func (api *API) GetAuthor(blockNrOrHash *rpc.BlockNumberOrHash) (*common.Address, error) {
// Retrieve the requested block number (or current if none requested)
var header *types.Header
- if number == nil || *number == rpc.LatestBlockNumber {
+
+ //nolint:nestif
+ if blockNrOrHash == nil {
header = api.chain.CurrentHeader()
} else {
- header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
+ if blockNr, ok := blockNrOrHash.Number(); ok {
+ header = api.chain.GetHeaderByNumber(uint64(blockNr))
+ if blockNr == rpc.LatestBlockNumber {
+ header = api.chain.CurrentHeader()
+ }
+ } else {
+ if blockHash, ok := blockNrOrHash.Hash(); ok {
+ header = api.chain.GetHeaderByHash(blockHash)
+ }
+ }
}
+
// Ensure we have an actually valid block and return its snapshot
if header == nil {
return nil, errUnknownBlock
diff --git a/consensus/bor/api/caller.go b/consensus/bor/api/caller.go
index d5fe259c97..49c06b3f73 100644
--- a/consensus/bor/api/caller.go
+++ b/consensus/bor/api/caller.go
@@ -4,6 +4,7 @@ import (
"context"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/rpc"
)
@@ -11,4 +12,5 @@ import (
//go:generate mockgen -destination=./caller_mock.go -package=api . Caller
type Caller interface {
Call(ctx context.Context, args ethapi.TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *ethapi.StateOverride) (hexutil.Bytes, error)
+ CallWithState(ctx context.Context, args ethapi.TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, state *state.StateDB, overrides *ethapi.StateOverride) (hexutil.Bytes, error)
}
diff --git a/consensus/bor/api/caller_mock.go b/consensus/bor/api/caller_mock.go
index 940c99d178..e734d6c899 100644
--- a/consensus/bor/api/caller_mock.go
+++ b/consensus/bor/api/caller_mock.go
@@ -9,6 +9,7 @@ import (
reflect "reflect"
hexutil "github.com/ethereum/go-ethereum/common/hexutil"
+ state "github.com/ethereum/go-ethereum/core/state"
ethapi "github.com/ethereum/go-ethereum/internal/ethapi"
rpc "github.com/ethereum/go-ethereum/rpc"
gomock "github.com/golang/mock/gomock"
@@ -51,3 +52,18 @@ func (mr *MockCallerMockRecorder) Call(arg0, arg1, arg2, arg3 interface{}) *gomo
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Call", reflect.TypeOf((*MockCaller)(nil).Call), arg0, arg1, arg2, arg3)
}
+
+// CallWithState mocks base method.
+func (m *MockCaller) CallWithState(arg0 context.Context, arg1 ethapi.TransactionArgs, arg2 rpc.BlockNumberOrHash, arg3 *state.StateDB, arg4 *ethapi.StateOverride) (hexutil.Bytes, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CallWithState", arg0, arg1, arg2, arg3, arg4)
+ ret0, _ := ret[0].(hexutil.Bytes)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CallWithState indicates an expected call of CallWithState.
+func (mr *MockCallerMockRecorder) CallWithState(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CallWithState", reflect.TypeOf((*MockCaller)(nil).CallWithState), arg0, arg1, arg2, arg3, arg4)
+}
diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go
index 04953bbc3c..44dd48f867 100644
--- a/consensus/bor/bor.go
+++ b/consensus/bor/bor.go
@@ -1184,22 +1184,44 @@ func (c *Bor) CommitStates(
fetchStart := time.Now()
number := header.Number.Uint64()
- _lastStateID, err := c.GenesisContractsClient.LastStateId(number - 1)
- if err != nil {
- return nil, err
+ var (
+ lastStateIDBig *big.Int
+ from uint64
+ to time.Time
+ err error
+ )
+
+ if c.config.IsIndore(header.Number) {
+ // Fetch the LastStateId from contract via current state instance
+ lastStateIDBig, err = c.GenesisContractsClient.LastStateId(state.Copy(), number-1, header.ParentHash)
+ if err != nil {
+ return nil, err
+ }
+
+ stateSyncDelay := c.config.CalculateStateSyncDelay(number)
+ to = time.Unix(int64(header.Time-stateSyncDelay), 0)
+ log.Debug("Post Indore", "lastStateIDBig", lastStateIDBig, "to", to, "stateSyncDelay", stateSyncDelay)
+ } else {
+ lastStateIDBig, err = c.GenesisContractsClient.LastStateId(nil, number-1, header.ParentHash)
+ if err != nil {
+ return nil, err
+ }
+
+ to = time.Unix(int64(chain.Chain.GetHeaderByNumber(number-c.config.CalculateSprint(number)).Time), 0)
+ log.Debug("Pre Indore", "lastStateIDBig", lastStateIDBig, "to", to)
}
- to := time.Unix(int64(chain.Chain.GetHeaderByNumber(number-c.config.CalculateSprint(number)).Time), 0)
- lastStateID := _lastStateID.Uint64()
+ lastStateID := lastStateIDBig.Uint64()
+ from = lastStateID + 1
log.Info(
"Fetching state updates from Heimdall",
- "fromID", lastStateID+1,
+ "fromID", from,
"to", to.Format(time.RFC3339))
- eventRecords, err := c.HeimdallClient.StateSyncEvents(ctx, lastStateID+1, to.Unix())
+ eventRecords, err := c.HeimdallClient.StateSyncEvents(ctx, from, to.Unix())
if err != nil {
- log.Error("Error occurred when fetching state sync events", "stateID", lastStateID+1, "error", err)
+ log.Error("Error occurred when fetching state sync events", "fromID", from, "to", to.Unix(), "err", err)
}
if c.config.OverrideStateSyncRecords != nil {
@@ -1222,7 +1244,7 @@ func (c *Bor) CommitStates(
}
if err = validateEventRecord(eventRecord, number, to, lastStateID, chainID); err != nil {
- log.Error("while validating event record", "block", number, "to", to, "stateID", lastStateID, "error", err.Error())
+ log.Error("while validating event record", "block", number, "to", to, "stateID", lastStateID+1, "error", err.Error())
break
}
diff --git a/consensus/bor/contract/client.go b/consensus/bor/contract/client.go
index 9e9e1392dd..fa8ad215f2 100644
--- a/consensus/bor/contract/client.go
+++ b/consensus/bor/contract/client.go
@@ -100,8 +100,8 @@ func (gc *GenesisContractsClient) CommitState(
return gasUsed, nil
}
-func (gc *GenesisContractsClient) LastStateId(snapshotNumber uint64) (*big.Int, error) {
- blockNr := rpc.BlockNumber(snapshotNumber)
+func (gc *GenesisContractsClient) LastStateId(state *state.StateDB, number uint64, hash common.Hash) (*big.Int, error) {
+ blockNr := rpc.BlockNumber(number)
const method = "lastStateId"
@@ -116,11 +116,13 @@ func (gc *GenesisContractsClient) LastStateId(snapshotNumber uint64) (*big.Int,
toAddress := common.HexToAddress(gc.StateReceiverContract)
gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
- result, err := gc.ethAPI.Call(context.Background(), ethapi.TransactionArgs{
+ // Do a call with state so that we can fetch the last state ID from a given (incoming)
+ // state instead of local(canonical) chain.
+ result, err := gc.ethAPI.CallWithState(context.Background(), ethapi.TransactionArgs{
Gas: &gas,
To: &toAddress,
Data: &msgData,
- }, rpc.BlockNumberOrHash{BlockNumber: &blockNr}, nil)
+ }, rpc.BlockNumberOrHash{BlockNumber: &blockNr, BlockHash: &hash}, state, nil)
if err != nil {
return nil, err
}
diff --git a/consensus/bor/genesis.go b/consensus/bor/genesis.go
index 33de53f9ba..9519b18847 100644
--- a/consensus/bor/genesis.go
+++ b/consensus/bor/genesis.go
@@ -3,6 +3,7 @@ package bor
import (
"math/big"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/bor/clerk"
"github.com/ethereum/go-ethereum/consensus/bor/statefull"
"github.com/ethereum/go-ethereum/core/state"
@@ -12,5 +13,5 @@ import (
//go:generate mockgen -destination=./genesis_contract_mock.go -package=bor . GenesisContract
type GenesisContract interface {
CommitState(event *clerk.EventRecordWithTime, state *state.StateDB, header *types.Header, chCtx statefull.ChainContext) (uint64, error)
- LastStateId(snapshotNumber uint64) (*big.Int, error)
+ LastStateId(state *state.StateDB, number uint64, hash common.Hash) (*big.Int, error)
}
diff --git a/consensus/bor/genesis_contract_mock.go b/consensus/bor/genesis_contract_mock.go
index dfe9390509..0296cbe905 100644
--- a/consensus/bor/genesis_contract_mock.go
+++ b/consensus/bor/genesis_contract_mock.go
@@ -8,6 +8,7 @@ import (
big "math/big"
reflect "reflect"
+ common "github.com/ethereum/go-ethereum/common"
clerk "github.com/ethereum/go-ethereum/consensus/bor/clerk"
statefull "github.com/ethereum/go-ethereum/consensus/bor/statefull"
state "github.com/ethereum/go-ethereum/core/state"
@@ -54,16 +55,16 @@ func (mr *MockGenesisContractMockRecorder) CommitState(arg0, arg1, arg2, arg3 in
}
// LastStateId mocks base method.
-func (m *MockGenesisContract) LastStateId(arg0 uint64) (*big.Int, error) {
+func (m *MockGenesisContract) LastStateId(arg0 *state.StateDB, arg1 uint64, arg2 common.Hash) (*big.Int, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "LastStateId", arg0)
+ ret := m.ctrl.Call(m, "LastStateId", arg0, arg1, arg2)
ret0, _ := ret[0].(*big.Int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LastStateId indicates an expected call of LastStateId.
-func (mr *MockGenesisContractMockRecorder) LastStateId(arg0 interface{}) *gomock.Call {
+func (mr *MockGenesisContractMockRecorder) LastStateId(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastStateId", reflect.TypeOf((*MockGenesisContract)(nil).LastStateId), arg0)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastStateId", reflect.TypeOf((*MockGenesisContract)(nil).LastStateId), arg0, arg1, arg2)
}
diff --git a/consensus/bor/span_mock.go b/consensus/bor/span_mock.go
index 910e81716c..099807161c 100644
--- a/consensus/bor/span_mock.go
+++ b/consensus/bor/span_mock.go
@@ -1,5 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: consensus/bor/span.go
+// Source: github.com/ethereum/go-ethereum/consensus/bor (interfaces: Spanner)
// Package bor is a generated GoMock package.
package bor
@@ -42,60 +42,60 @@ func (m *MockSpanner) EXPECT() *MockSpannerMockRecorder {
}
// CommitSpan mocks base method.
-func (m *MockSpanner) CommitSpan(ctx context.Context, heimdallSpan span.HeimdallSpan, state *state.StateDB, header *types.Header, chainContext core.ChainContext) error {
+func (m *MockSpanner) CommitSpan(arg0 context.Context, arg1 span.HeimdallSpan, arg2 *state.StateDB, arg3 *types.Header, arg4 core.ChainContext) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CommitSpan", ctx, heimdallSpan, state, header, chainContext)
+ ret := m.ctrl.Call(m, "CommitSpan", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// CommitSpan indicates an expected call of CommitSpan.
-func (mr *MockSpannerMockRecorder) CommitSpan(ctx, heimdallSpan, state, header, chainContext interface{}) *gomock.Call {
+func (mr *MockSpannerMockRecorder) CommitSpan(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitSpan", reflect.TypeOf((*MockSpanner)(nil).CommitSpan), ctx, heimdallSpan, state, header, chainContext)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitSpan", reflect.TypeOf((*MockSpanner)(nil).CommitSpan), arg0, arg1, arg2, arg3, arg4)
}
// GetCurrentSpan mocks base method.
-func (m *MockSpanner) GetCurrentSpan(ctx context.Context, headerHash common.Hash) (*span.Span, error) {
+func (m *MockSpanner) GetCurrentSpan(arg0 context.Context, arg1 common.Hash) (*span.Span, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetCurrentSpan", ctx, headerHash)
+ ret := m.ctrl.Call(m, "GetCurrentSpan", arg0, arg1)
ret0, _ := ret[0].(*span.Span)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetCurrentSpan indicates an expected call of GetCurrentSpan.
-func (mr *MockSpannerMockRecorder) GetCurrentSpan(ctx, headerHash interface{}) *gomock.Call {
+func (mr *MockSpannerMockRecorder) GetCurrentSpan(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSpan", reflect.TypeOf((*MockSpanner)(nil).GetCurrentSpan), ctx, headerHash)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSpan", reflect.TypeOf((*MockSpanner)(nil).GetCurrentSpan), arg0, arg1)
}
// GetCurrentValidatorsByBlockNrOrHash mocks base method.
-func (m *MockSpanner) GetCurrentValidatorsByBlockNrOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, blockNumber uint64) ([]*valset.Validator, error) {
+func (m *MockSpanner) GetCurrentValidatorsByBlockNrOrHash(arg0 context.Context, arg1 rpc.BlockNumberOrHash, arg2 uint64) ([]*valset.Validator, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetCurrentValidatorsByBlockNrOrHash", ctx, blockNrOrHash, blockNumber)
+ ret := m.ctrl.Call(m, "GetCurrentValidatorsByBlockNrOrHash", arg0, arg1, arg2)
ret0, _ := ret[0].([]*valset.Validator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetCurrentValidatorsByBlockNrOrHash indicates an expected call of GetCurrentValidatorsByBlockNrOrHash.
-func (mr *MockSpannerMockRecorder) GetCurrentValidatorsByBlockNrOrHash(ctx, blockNrOrHash, blockNumber interface{}) *gomock.Call {
+func (mr *MockSpannerMockRecorder) GetCurrentValidatorsByBlockNrOrHash(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidatorsByBlockNrOrHash", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidatorsByBlockNrOrHash), ctx, blockNrOrHash, blockNumber)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidatorsByBlockNrOrHash", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidatorsByBlockNrOrHash), arg0, arg1, arg2)
}
// GetCurrentValidatorsByHash mocks base method.
-func (m *MockSpanner) GetCurrentValidatorsByHash(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) {
+func (m *MockSpanner) GetCurrentValidatorsByHash(arg0 context.Context, arg1 common.Hash, arg2 uint64) ([]*valset.Validator, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetCurrentValidatorsByHash", ctx, headerHash, blockNumber)
+ ret := m.ctrl.Call(m, "GetCurrentValidatorsByHash", arg0, arg1, arg2)
ret0, _ := ret[0].([]*valset.Validator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetCurrentValidatorsByHash indicates an expected call of GetCurrentValidatorsByHash.
-func (mr *MockSpannerMockRecorder) GetCurrentValidatorsByHash(ctx, headerHash, blockNumber interface{}) *gomock.Call {
+func (mr *MockSpannerMockRecorder) GetCurrentValidatorsByHash(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidatorsByHash", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidatorsByHash), ctx, headerHash, blockNumber)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidatorsByHash", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidatorsByHash), arg0, arg1, arg2)
}
diff --git a/consensus/bor/statefull/processor.go b/consensus/bor/statefull/processor.go
index 0fe9baeeba..a78359a309 100644
--- a/consensus/bor/statefull/processor.go
+++ b/consensus/bor/statefull/processor.go
@@ -83,6 +83,7 @@ func ApplyMessage(
msg.Data(),
msg.Gas(),
msg.Value(),
+ nil,
)
// Update the state with pending changes
if err != nil {
@@ -104,6 +105,7 @@ func ApplyBorMessage(vmenv vm.EVM, msg Callmsg) (*core.ExecutionResult, error) {
msg.Data(),
msg.Gas(),
msg.Value(),
+ nil,
)
// Update the state with pending changes
if err != nil {
diff --git a/core/blockchain.go b/core/blockchain.go
index 680cb7dce6..6a44a3acff 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -42,6 +42,7 @@ import (
"github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/common/tracing"
"github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/core/blockstm"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/snapshot"
@@ -76,11 +77,13 @@ var (
snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil)
snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil)
- blockImportTimer = metrics.NewRegisteredMeter("chain/imports", nil)
- blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
- blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
- blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
- blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
+ blockImportTimer = metrics.NewRegisteredMeter("chain/imports", nil)
+ blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
+ blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
+ blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
+ blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
+ blockExecutionParallelCounter = metrics.NewRegisteredCounter("chain/execution/parallel", nil)
+ blockExecutionSerialCounter = metrics.NewRegisteredCounter("chain/execution/serial", nil)
blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
@@ -216,12 +219,13 @@ type BlockChain struct {
running int32 // 0 if chain is running, 1 when stopped
procInterrupt int32 // interrupt signaler for block processing
- engine consensus.Engine
- validator Validator // Block and state validator interface
- prefetcher Prefetcher
- processor Processor // Block transaction processor interface
- forker *ForkChoice
- vmConfig vm.Config
+ engine consensus.Engine
+ validator Validator // Block and state validator interface
+ prefetcher Prefetcher
+ processor Processor // Block transaction processor interface
+ parallelProcessor Processor // Parallel block transaction processor interface
+ forker *ForkChoice
+ vmConfig vm.Config
// Bor related changes
borReceiptsCache *lru.Cache // Cache for the most recent bor receipt receipts per block
@@ -435,6 +439,93 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
return bc, nil
}
+// Similar to NewBlockChain, this function creates a new blockchain object, but with a parallel state processor
+func NewParallelBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64, checker ethereum.ChainValidator) (*BlockChain, error) {
+ bc, err := NewBlockChain(db, cacheConfig, chainConfig, engine, vmConfig, shouldPreserve, txLookupLimit, checker)
+
+ if err != nil {
+ return nil, err
+ }
+
+ bc.parallelProcessor = NewParallelStateProcessor(chainConfig, bc, engine)
+
+ return bc, nil
+}
+
+func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header) (types.Receipts, []*types.Log, uint64, *state.StateDB, error) {
+ // Process the block using processor and parallelProcessor at the same time, take the one which finishes first, cancel the other, and return the result
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ type Result struct {
+ receipts types.Receipts
+ logs []*types.Log
+ usedGas uint64
+ err error
+ statedb *state.StateDB
+ counter metrics.Counter
+ }
+
+ resultChan := make(chan Result, 2)
+
+ processorCount := 0
+
+ if bc.parallelProcessor != nil {
+ parallelStatedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
+ if err != nil {
+ return nil, nil, 0, nil, err
+ }
+
+ processorCount++
+
+ go func() {
+ parallelStatedb.StartPrefetcher("chain")
+ receipts, logs, usedGas, err := bc.parallelProcessor.Process(block, parallelStatedb, bc.vmConfig, ctx)
+ resultChan <- Result{receipts, logs, usedGas, err, parallelStatedb, blockExecutionParallelCounter}
+ }()
+ }
+
+ if bc.processor != nil {
+ statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
+ if err != nil {
+ return nil, nil, 0, nil, err
+ }
+
+ processorCount++
+
+ go func() {
+ statedb.StartPrefetcher("chain")
+ receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig, ctx)
+ resultChan <- Result{receipts, logs, usedGas, err, statedb, blockExecutionSerialCounter}
+ }()
+ }
+
+ result := <-resultChan
+
+ if _, ok := result.err.(blockstm.ParallelExecFailedError); ok {
+ log.Warn("Parallel state processor failed", "err", result.err)
+
+ // If the parallel processor failed, we will fallback to the serial processor if enabled
+ if processorCount == 2 {
+ result.statedb.StopPrefetcher()
+ result = <-resultChan
+ processorCount--
+ }
+ }
+
+ result.counter.Inc(1)
+
+ // Make sure we are not leaking any prefetchers
+ if processorCount == 2 {
+ go func() {
+ second_result := <-resultChan
+ second_result.statedb.StopPrefetcher()
+ }()
+ }
+
+ return result.receipts, result.logs, result.usedGas, result.statedb, result.err
+}
+
// empty returns an indicator whether the blockchain is empty.
// Note, it's a special case that we connect a non-empty ancient
// database with an empty node, so that we can plugin the ancient
@@ -1761,14 +1852,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
if parent == nil {
parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
}
- statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
- if err != nil {
- return it.index, err
- }
-
- // Enable prefetching to pull in trie node paths while processing transactions
- statedb.StartPrefetcher("chain")
- activeState = statedb
// If we have a followup block, run that against the current state to pre-cache
// transactions and probabilistically some of the account/storage trie nodes.
@@ -1790,7 +1873,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
// Process block using the parent state as reference point
substart := time.Now()
- receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
+ receipts, logs, usedGas, statedb, err := bc.ProcessBlock(block, parent)
+ activeState = statedb
if err != nil {
bc.reportBlock(block, receipts, err)
atomic.StoreUint32(&followupInterrupt, 1)
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index fa6b61225e..88093accab 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -33,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
@@ -123,9 +122,11 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
if full {
cur := blockchain.CurrentBlock()
tdPre = blockchain.GetTd(cur.Hash(), cur.NumberU64())
+
if err := testBlockChainImport(blockChainB, blockchain); err != nil {
t.Fatalf("failed to import forked block chain: %v", err)
}
+
last := blockChainB[len(blockChainB)-1]
tdPost = blockchain.GetTd(last.Hash(), last.NumberU64())
} else {
@@ -156,11 +157,9 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
}
return err
}
- statedb, err := state.New(blockchain.GetBlockByHash(block.ParentHash()).Root(), blockchain.stateCache, nil)
- if err != nil {
- return err
- }
- receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{})
+
+ receipts, _, usedGas, statedb, err := blockchain.ProcessBlock(block, blockchain.GetBlockByHash(block.ParentHash()).Header())
+
if err != nil {
blockchain.reportBlock(block, receipts, err)
return err
@@ -180,6 +179,25 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
return nil
}
+func TestParallelBlockChainImport(t *testing.T) {
+ t.Parallel()
+
+ db, blockchain, err := newCanonical(ethash.NewFaker(), 10, true)
+ blockchain.parallelProcessor = NewParallelStateProcessor(blockchain.chainConfig, blockchain, blockchain.engine)
+
+ if err != nil {
+ t.Fatalf("failed to make new canonical chain: %v", err)
+ }
+
+ defer blockchain.Stop()
+
+ blockChainB := makeFakeNonEmptyBlockChain(blockchain.CurrentBlock(), 5, ethash.NewFaker(), db, forkSeed, 5)
+
+ if err := testBlockChainImport(blockChainB, blockchain); err == nil {
+ t.Fatalf("expected error for bad tx")
+ }
+}
+
// testHeaderChainImport tries to process a chain of header, writing them into
// the database if successful.
func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
diff --git a/core/blockstm/dag.go b/core/blockstm/dag.go
new file mode 100644
index 0000000000..47bd0685a3
--- /dev/null
+++ b/core/blockstm/dag.go
@@ -0,0 +1,201 @@
+package blockstm
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/heimdalr/dag"
+
+ "github.com/ethereum/go-ethereum/log"
+)
+
+type DAG struct {
+ *dag.DAG
+}
+
+type TxDep struct {
+ Index int
+ ReadList []ReadDescriptor
+ FullWriteList [][]WriteDescriptor
+}
+
+func HasReadDep(txFrom TxnOutput, txTo TxnInput) bool {
+ reads := make(map[Key]bool)
+
+ for _, v := range txTo {
+ reads[v.Path] = true
+ }
+
+ for _, rd := range txFrom {
+ if _, ok := reads[rd.Path]; ok {
+ return true
+ }
+ }
+
+ return false
+}
+
+func BuildDAG(deps TxnInputOutput) (d DAG) {
+ d = DAG{dag.NewDAG()}
+ ids := make(map[int]string)
+
+ for i := len(deps.inputs) - 1; i > 0; i-- {
+ txTo := deps.inputs[i]
+
+ var txToId string
+
+ if _, ok := ids[i]; ok {
+ txToId = ids[i]
+ } else {
+ txToId, _ = d.AddVertex(i)
+ ids[i] = txToId
+ }
+
+ for j := i - 1; j >= 0; j-- {
+ txFrom := deps.allOutputs[j]
+
+ if HasReadDep(txFrom, txTo) {
+ var txFromId string
+ if _, ok := ids[j]; ok {
+ txFromId = ids[j]
+ } else {
+ txFromId, _ = d.AddVertex(j)
+ ids[j] = txFromId
+ }
+
+ err := d.AddEdge(txFromId, txToId)
+ if err != nil {
+ log.Warn("Failed to add edge", "from", txFromId, "to", txToId, "err", err)
+ }
+ }
+ }
+ }
+
+ return
+}
+
+func depsHelper(dependencies map[int]map[int]bool, txFrom TxnOutput, txTo TxnInput, i int, j int) map[int]map[int]bool {
+ if HasReadDep(txFrom, txTo) {
+ dependencies[i][j] = true
+
+ for k := range dependencies[i] {
+ _, foundDep := dependencies[j][k]
+
+ if foundDep {
+ delete(dependencies[i], k)
+ }
+ }
+ }
+
+ return dependencies
+}
+
+func UpdateDeps(deps map[int]map[int]bool, t TxDep) map[int]map[int]bool {
+ txTo := t.ReadList
+
+ deps[t.Index] = map[int]bool{}
+
+ for j := 0; j <= t.Index-1; j++ {
+ txFrom := t.FullWriteList[j]
+
+ deps = depsHelper(deps, txFrom, txTo, t.Index, j)
+ }
+
+ return deps
+}
+
+func GetDep(deps TxnInputOutput) map[int]map[int]bool {
+ newDependencies := map[int]map[int]bool{}
+
+ for i := 1; i < len(deps.inputs); i++ {
+ txTo := deps.inputs[i]
+
+ newDependencies[i] = map[int]bool{}
+
+ for j := 0; j <= i-1; j++ {
+ txFrom := deps.allOutputs[j]
+
+ newDependencies = depsHelper(newDependencies, txFrom, txTo, i, j)
+ }
+ }
+
+ return newDependencies
+}
+
+// Find the longest execution path in the DAG
+func (d DAG) LongestPath(stats map[int]ExecutionStat) ([]int, uint64) {
+ prev := make(map[int]int, len(d.GetVertices()))
+
+ for i := 0; i < len(d.GetVertices()); i++ {
+ prev[i] = -1
+ }
+
+ pathWeights := make(map[int]uint64, len(d.GetVertices()))
+
+ maxPath := 0
+ maxPathWeight := uint64(0)
+
+ idxToId := make(map[int]string, len(d.GetVertices()))
+
+ for k, i := range d.GetVertices() {
+ idxToId[i.(int)] = k
+ }
+
+ for i := 0; i < len(idxToId); i++ {
+ parents, _ := d.GetParents(idxToId[i])
+
+ if len(parents) > 0 {
+ for _, p := range parents {
+ weight := pathWeights[p.(int)] + stats[i].End - stats[i].Start
+ if weight > pathWeights[i] {
+ pathWeights[i] = weight
+ prev[i] = p.(int)
+ }
+ }
+ } else {
+ pathWeights[i] = stats[i].End - stats[i].Start
+ }
+
+ if pathWeights[i] > maxPathWeight {
+ maxPath = i
+ maxPathWeight = pathWeights[i]
+ }
+ }
+
+ path := make([]int, 0)
+ for i := maxPath; i != -1; i = prev[i] {
+ path = append(path, i)
+ }
+
+ // Reverse the path so the transactions are in the ascending order
+ for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
+ path[i], path[j] = path[j], path[i]
+ }
+
+ return path, maxPathWeight
+}
+
+func (d DAG) Report(stats map[int]ExecutionStat, out func(string)) {
+ longestPath, weight := d.LongestPath(stats)
+
+ serialWeight := uint64(0)
+
+ for i := 0; i < len(d.GetVertices()); i++ {
+ serialWeight += stats[i].End - stats[i].Start
+ }
+
+ makeStrs := func(ints []int) (ret []string) {
+ for _, v := range ints {
+ ret = append(ret, fmt.Sprint(v))
+ }
+
+ return
+ }
+
+ out("Longest execution path:")
+ out(fmt.Sprintf("(%v) %v", len(longestPath), strings.Join(makeStrs(longestPath), "->")))
+
+ out(fmt.Sprintf("Longest path ideal execution time: %v of %v (serial total), %v%%", time.Duration(weight),
+ time.Duration(serialWeight), fmt.Sprintf("%.1f", float64(weight)*100.0/float64(serialWeight))))
+}
diff --git a/core/blockstm/executor.go b/core/blockstm/executor.go
new file mode 100644
index 0000000000..7ce99f9492
--- /dev/null
+++ b/core/blockstm/executor.go
@@ -0,0 +1,641 @@
+package blockstm
+
+import (
+ "container/heap"
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+type ExecResult struct {
+ err error
+ ver Version
+ txIn TxnInput
+ txOut TxnOutput
+ txAllOut TxnOutput
+}
+
+type ExecTask interface {
+ Execute(mvh *MVHashMap, incarnation int) error
+ MVReadList() []ReadDescriptor
+ MVWriteList() []WriteDescriptor
+ MVFullWriteList() []WriteDescriptor
+ Hash() common.Hash
+ Sender() common.Address
+ Settle()
+ Dependencies() []int
+}
+
+type ExecVersionView struct {
+ ver Version
+ et ExecTask
+ mvh *MVHashMap
+ sender common.Address
+}
+
+var NumSpeculativeProcs int = 8
+
+func SetProcs(specProcs int) {
+ NumSpeculativeProcs = specProcs
+}
+
+func (ev *ExecVersionView) Execute() (er ExecResult) {
+ er.ver = ev.ver
+ if er.err = ev.et.Execute(ev.mvh, ev.ver.Incarnation); er.err != nil {
+ return
+ }
+
+ er.txIn = ev.et.MVReadList()
+ er.txOut = ev.et.MVWriteList()
+ er.txAllOut = ev.et.MVFullWriteList()
+
+ return
+}
+
+type ErrExecAbortError struct {
+ Dependency int
+ OriginError error
+}
+
+func (e ErrExecAbortError) Error() string {
+ if e.Dependency >= 0 {
+ return fmt.Sprintf("Execution aborted due to dependency %d", e.Dependency)
+ } else {
+ return "Execution aborted"
+ }
+}
+
+type ParallelExecFailedError struct {
+ Msg string
+}
+
+func (e ParallelExecFailedError) Error() string {
+ return e.Msg
+}
+
+type IntHeap []int
+
+func (h IntHeap) Len() int { return len(h) }
+func (h IntHeap) Less(i, j int) bool { return h[i] < h[j] }
+func (h IntHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
+
+func (h *IntHeap) Push(x any) {
+ // Push and Pop use pointer receivers because they modify the slice's length,
+ // not just its contents.
+ *h = append(*h, x.(int))
+}
+
+func (h *IntHeap) Pop() any {
+ old := *h
+ n := len(old)
+ x := old[n-1]
+ *h = old[0 : n-1]
+
+ return x
+}
+
+type SafeQueue interface {
+ Push(v int, d interface{})
+ Pop() interface{}
+ Len() int
+}
+
+type SafeFIFOQueue struct {
+ c chan interface{}
+}
+
+func NewSafeFIFOQueue(capacity int) *SafeFIFOQueue {
+ return &SafeFIFOQueue{
+ c: make(chan interface{}, capacity),
+ }
+}
+
+func (q *SafeFIFOQueue) Push(v int, d interface{}) {
+ q.c <- d
+}
+
+func (q *SafeFIFOQueue) Pop() interface{} {
+ return <-q.c
+}
+
+func (q *SafeFIFOQueue) Len() int {
+ return len(q.c)
+}
+
+// A thread safe priority queue
+type SafePriorityQueue struct {
+ m sync.Mutex
+ queue *IntHeap
+ data map[int]interface{}
+}
+
+func NewSafePriorityQueue(capacity int) *SafePriorityQueue {
+ q := make(IntHeap, 0, capacity)
+
+ return &SafePriorityQueue{
+ m: sync.Mutex{},
+ queue: &q,
+ data: make(map[int]interface{}, capacity),
+ }
+}
+
+func (pq *SafePriorityQueue) Push(v int, d interface{}) {
+ pq.m.Lock()
+
+ heap.Push(pq.queue, v)
+ pq.data[v] = d
+
+ pq.m.Unlock()
+}
+
+func (pq *SafePriorityQueue) Pop() interface{} {
+ pq.m.Lock()
+ defer pq.m.Unlock()
+
+ v := heap.Pop(pq.queue).(int)
+
+ return pq.data[v]
+}
+
+func (pq *SafePriorityQueue) Len() int {
+ return pq.queue.Len()
+}
+
+type ParallelExecutionResult struct {
+ TxIO *TxnInputOutput
+ Stats *map[int]ExecutionStat
+ Deps *DAG
+ AllDeps map[int]map[int]bool
+}
+
+const numGoProcs = 1
+
+type ParallelExecutor struct {
+ tasks []ExecTask
+
+ // Stores the execution statistics for the last incarnation of each task
+ stats map[int]ExecutionStat
+
+ statsMutex sync.Mutex
+
+ // Channel for tasks that should be prioritized
+ chTasks chan ExecVersionView
+
+ // Channel for speculative tasks
+ chSpeculativeTasks chan struct{}
+
+ // Channel to signal that the result of a transaction could be written to storage
+ specTaskQueue SafeQueue
+
+ // A priority queue that stores speculative tasks
+ chSettle chan int
+
+ // Channel to signal that a transaction has finished executing
+ chResults chan struct{}
+
+ // A priority queue that stores the transaction index of results, so we can validate the results in order
+ resultQueue SafeQueue
+
+ // A wait group to wait for all settling tasks to finish
+ settleWg sync.WaitGroup
+
+ // An integer that tracks the index of last settled transaction
+ lastSettled int
+
+ // For a task that runs only after all of its preceding tasks have finished and passed validation,
+ // its result will be absolutely valid and therefore its validation could be skipped.
+ // This map stores the boolean value indicating whether a task satisfy this condition ( absolutely valid).
+ skipCheck map[int]bool
+
+ // Execution tasks stores the state of each execution task
+ execTasks taskStatusManager
+
+ // Validate tasks stores the state of each validation task
+ validateTasks taskStatusManager
+
+ // Stats for debugging purposes
+ cntExec, cntSuccess, cntAbort, cntTotalValidations, cntValidationFail int
+
+ diagExecSuccess, diagExecAbort []int
+
+ // Multi-version hash map
+ mvh *MVHashMap
+
+ // Stores the inputs and outputs of the last incardanotion of all transactions
+ lastTxIO *TxnInputOutput
+
+ // Tracks the incarnation number of each transaction
+ txIncarnations []int
+
+ // A map that stores the estimated dependency of a transaction if it is aborted without any known dependency
+ estimateDeps map[int][]int
+
+ // A map that records whether a transaction result has been speculatively validated
+ preValidated map[int]bool
+
+ // Time records when the parallel execution starts
+ begin time.Time
+
+ // Enable profiling
+ profile bool
+
+ // Worker wait group
+ workerWg sync.WaitGroup
+}
+
+type ExecutionStat struct {
+ TxIdx int
+ Incarnation int
+ Start uint64
+ End uint64
+ Worker int
+}
+
+func NewParallelExecutor(tasks []ExecTask, profile bool, metadata bool) *ParallelExecutor {
+ numTasks := len(tasks)
+
+ var resultQueue SafeQueue
+
+ var specTaskQueue SafeQueue
+
+ if metadata {
+ resultQueue = NewSafeFIFOQueue(numTasks)
+ specTaskQueue = NewSafeFIFOQueue(numTasks)
+ } else {
+ resultQueue = NewSafePriorityQueue(numTasks)
+ specTaskQueue = NewSafePriorityQueue(numTasks)
+ }
+
+ pe := &ParallelExecutor{
+ tasks: tasks,
+ stats: make(map[int]ExecutionStat, numTasks),
+ chTasks: make(chan ExecVersionView, numTasks),
+ chSpeculativeTasks: make(chan struct{}, numTasks),
+ chSettle: make(chan int, numTasks),
+ chResults: make(chan struct{}, numTasks),
+ specTaskQueue: specTaskQueue,
+ resultQueue: resultQueue,
+ lastSettled: -1,
+ skipCheck: make(map[int]bool),
+ execTasks: makeStatusManager(numTasks),
+ validateTasks: makeStatusManager(0),
+ diagExecSuccess: make([]int, numTasks),
+ diagExecAbort: make([]int, numTasks),
+ mvh: MakeMVHashMap(),
+ lastTxIO: MakeTxnInputOutput(numTasks),
+ txIncarnations: make([]int, numTasks),
+ estimateDeps: make(map[int][]int),
+ preValidated: make(map[int]bool),
+ begin: time.Now(),
+ profile: profile,
+ }
+
+ return pe
+}
+
+// nolint: gocognit
+func (pe *ParallelExecutor) Prepare() error {
+ prevSenderTx := make(map[common.Address]int)
+
+ for i, t := range pe.tasks {
+ clearPendingFlag := false
+
+ pe.skipCheck[i] = false
+ pe.estimateDeps[i] = make([]int, 0)
+
+ if len(t.Dependencies()) > 0 {
+ for _, val := range t.Dependencies() {
+ clearPendingFlag = true
+
+ pe.execTasks.addDependencies(val, i)
+ }
+
+ if clearPendingFlag {
+ pe.execTasks.clearPending(i)
+
+ clearPendingFlag = false
+ }
+ } else {
+ if tx, ok := prevSenderTx[t.Sender()]; ok {
+ pe.execTasks.addDependencies(tx, i)
+ pe.execTasks.clearPending(i)
+ }
+
+ prevSenderTx[t.Sender()] = i
+ }
+ }
+
+ pe.workerWg.Add(NumSpeculativeProcs + numGoProcs)
+
+ // Launch workers that execute transactions
+ for i := 0; i < NumSpeculativeProcs+numGoProcs; i++ {
+ go func(procNum int) {
+ defer pe.workerWg.Done()
+
+ doWork := func(task ExecVersionView) {
+ start := time.Duration(0)
+ if pe.profile {
+ start = time.Since(pe.begin)
+ }
+
+ res := task.Execute()
+
+ if res.err == nil {
+ pe.mvh.FlushMVWriteSet(res.txAllOut)
+ }
+
+ pe.resultQueue.Push(res.ver.TxnIndex, res)
+ pe.chResults <- struct{}{}
+
+ if pe.profile {
+ end := time.Since(pe.begin)
+
+ pe.statsMutex.Lock()
+ pe.stats[res.ver.TxnIndex] = ExecutionStat{
+ TxIdx: res.ver.TxnIndex,
+ Incarnation: res.ver.Incarnation,
+ Start: uint64(start),
+ End: uint64(end),
+ Worker: procNum,
+ }
+ pe.statsMutex.Unlock()
+ }
+ }
+
+ if procNum < NumSpeculativeProcs {
+ for range pe.chSpeculativeTasks {
+ doWork(pe.specTaskQueue.Pop().(ExecVersionView))
+ }
+ } else {
+ for task := range pe.chTasks {
+ doWork(task)
+ }
+ }
+ }(i)
+ }
+
+ pe.settleWg.Add(1)
+
+ go func() {
+ for t := range pe.chSettle {
+ pe.tasks[t].Settle()
+ }
+
+ pe.settleWg.Done()
+ }()
+
+ // bootstrap first execution
+ tx := pe.execTasks.takeNextPending()
+
+ if tx == -1 {
+ return ParallelExecFailedError{"no executable transactions due to bad dependency"}
+ }
+
+ pe.cntExec++
+
+ pe.chTasks <- ExecVersionView{ver: Version{tx, 0}, et: pe.tasks[tx], mvh: pe.mvh, sender: pe.tasks[tx].Sender()}
+
+ return nil
+}
+
+func (pe *ParallelExecutor) Close(wait bool) {
+ close(pe.chTasks)
+ close(pe.chSpeculativeTasks)
+ close(pe.chSettle)
+
+ if wait {
+ pe.workerWg.Wait()
+ }
+
+ if wait {
+ pe.settleWg.Wait()
+ }
+}
+
+// nolint: gocognit
+func (pe *ParallelExecutor) Step(res *ExecResult) (result ParallelExecutionResult, err error) {
+ tx := res.ver.TxnIndex
+
+ if abortErr, ok := res.err.(ErrExecAbortError); ok && abortErr.OriginError != nil && pe.skipCheck[tx] {
+ // If the transaction failed when we know it should not fail, this means the transaction itself is
+ // bad (e.g. wrong nonce), and we should exit the execution immediately
+ err = fmt.Errorf("could not apply tx %d [%v]: %w", tx, pe.tasks[tx].Hash(), abortErr.OriginError)
+ pe.Close(true)
+
+ return
+ }
+
+ // nolint: nestif
+ if execErr, ok := res.err.(ErrExecAbortError); ok {
+ addedDependencies := false
+
+ if execErr.Dependency >= 0 {
+ l := len(pe.estimateDeps[tx])
+ for l > 0 && pe.estimateDeps[tx][l-1] > execErr.Dependency {
+ pe.execTasks.removeDependency(pe.estimateDeps[tx][l-1])
+ pe.estimateDeps[tx] = pe.estimateDeps[tx][:l-1]
+ l--
+ }
+
+ addedDependencies = pe.execTasks.addDependencies(execErr.Dependency, tx)
+ } else {
+ estimate := 0
+
+ if len(pe.estimateDeps[tx]) > 0 {
+ estimate = pe.estimateDeps[tx][len(pe.estimateDeps[tx])-1]
+ }
+ addedDependencies = pe.execTasks.addDependencies(estimate, tx)
+ newEstimate := estimate + (estimate+tx)/2
+ if newEstimate >= tx {
+ newEstimate = tx - 1
+ }
+ pe.estimateDeps[tx] = append(pe.estimateDeps[tx], newEstimate)
+ }
+
+ pe.execTasks.clearInProgress(tx)
+
+ if !addedDependencies {
+ pe.execTasks.pushPending(tx)
+ }
+ pe.txIncarnations[tx]++
+ pe.diagExecAbort[tx]++
+ pe.cntAbort++
+ } else {
+ pe.lastTxIO.recordRead(tx, res.txIn)
+
+ if res.ver.Incarnation == 0 {
+ pe.lastTxIO.recordWrite(tx, res.txOut)
+ pe.lastTxIO.recordAllWrite(tx, res.txAllOut)
+ } else {
+ if res.txAllOut.hasNewWrite(pe.lastTxIO.AllWriteSet(tx)) {
+ pe.validateTasks.pushPendingSet(pe.execTasks.getRevalidationRange(tx + 1))
+ }
+
+ prevWrite := pe.lastTxIO.AllWriteSet(tx)
+
+ // Remove entries that were previously written but are no longer written
+
+ cmpMap := make(map[Key]bool)
+
+ for _, w := range res.txAllOut {
+ cmpMap[w.Path] = true
+ }
+
+ for _, v := range prevWrite {
+ if _, ok := cmpMap[v.Path]; !ok {
+ pe.mvh.Delete(v.Path, tx)
+ }
+ }
+
+ pe.lastTxIO.recordWrite(tx, res.txOut)
+ pe.lastTxIO.recordAllWrite(tx, res.txAllOut)
+ }
+
+ pe.validateTasks.pushPending(tx)
+ pe.execTasks.markComplete(tx)
+ pe.diagExecSuccess[tx]++
+ pe.cntSuccess++
+
+ pe.execTasks.removeDependency(tx)
+ }
+
+ // do validations ...
+ maxComplete := pe.execTasks.maxAllComplete()
+
+ toValidate := make([]int, 0, 2)
+
+ for pe.validateTasks.minPending() <= maxComplete && pe.validateTasks.minPending() >= 0 {
+ toValidate = append(toValidate, pe.validateTasks.takeNextPending())
+ }
+
+ for i := 0; i < len(toValidate); i++ {
+ pe.cntTotalValidations++
+
+ tx := toValidate[i]
+
+ if pe.skipCheck[tx] || ValidateVersion(tx, pe.lastTxIO, pe.mvh) {
+ pe.validateTasks.markComplete(tx)
+ } else {
+ pe.cntValidationFail++
+ pe.diagExecAbort[tx]++
+ for _, v := range pe.lastTxIO.AllWriteSet(tx) {
+ pe.mvh.MarkEstimate(v.Path, tx)
+ }
+ // 'create validation tasks for all transactions > tx ...'
+ pe.validateTasks.pushPendingSet(pe.execTasks.getRevalidationRange(tx + 1))
+ pe.validateTasks.clearInProgress(tx) // clear in progress - pending will be added again once new incarnation executes
+
+ pe.execTasks.clearComplete(tx)
+ pe.execTasks.pushPending(tx)
+
+ pe.preValidated[tx] = false
+ pe.txIncarnations[tx]++
+ }
+ }
+
+ // Settle transactions that have been validated to be correct and that won't be re-executed again
+ maxValidated := pe.validateTasks.maxAllComplete()
+
+ for pe.lastSettled < maxValidated {
+ pe.lastSettled++
+ if pe.execTasks.checkInProgress(pe.lastSettled) || pe.execTasks.checkPending(pe.lastSettled) || pe.execTasks.isBlocked(pe.lastSettled) {
+ pe.lastSettled--
+ break
+ }
+ pe.chSettle <- pe.lastSettled
+ }
+
+ if pe.validateTasks.countComplete() == len(pe.tasks) && pe.execTasks.countComplete() == len(pe.tasks) {
+ log.Debug("blockstm exec summary", "execs", pe.cntExec, "success", pe.cntSuccess, "aborts", pe.cntAbort, "validations", pe.cntTotalValidations, "failures", pe.cntValidationFail, "#tasks/#execs", fmt.Sprintf("%.2f%%", float64(len(pe.tasks))/float64(pe.cntExec)*100))
+
+ pe.Close(true)
+
+ var allDeps map[int]map[int]bool
+
+ var deps DAG
+
+ if pe.profile {
+ allDeps = GetDep(*pe.lastTxIO)
+ deps = BuildDAG(*pe.lastTxIO)
+ }
+
+ return ParallelExecutionResult{pe.lastTxIO, &pe.stats, &deps, allDeps}, err
+ }
+
+ // Send the next immediate pending transaction to be executed
+ if pe.execTasks.minPending() != -1 && pe.execTasks.minPending() == maxValidated+1 {
+ nextTx := pe.execTasks.takeNextPending()
+ if nextTx != -1 {
+ pe.cntExec++
+
+ pe.skipCheck[nextTx] = true
+
+ pe.chTasks <- ExecVersionView{ver: Version{nextTx, pe.txIncarnations[nextTx]}, et: pe.tasks[nextTx], mvh: pe.mvh, sender: pe.tasks[nextTx].Sender()}
+ }
+ }
+
+ // Send speculative tasks
+ for pe.execTasks.minPending() != -1 {
+ nextTx := pe.execTasks.takeNextPending()
+
+ if nextTx != -1 {
+ pe.cntExec++
+
+ task := ExecVersionView{ver: Version{nextTx, pe.txIncarnations[nextTx]}, et: pe.tasks[nextTx], mvh: pe.mvh, sender: pe.tasks[nextTx].Sender()}
+
+ pe.specTaskQueue.Push(nextTx, task)
+ pe.chSpeculativeTasks <- struct{}{}
+ }
+ }
+
+ return
+}
+
+type PropertyCheck func(*ParallelExecutor) error
+
+func executeParallelWithCheck(tasks []ExecTask, profile bool, check PropertyCheck, metadata bool, interruptCtx context.Context) (result ParallelExecutionResult, err error) {
+ if len(tasks) == 0 {
+ return ParallelExecutionResult{MakeTxnInputOutput(len(tasks)), nil, nil, nil}, nil
+ }
+
+ pe := NewParallelExecutor(tasks, profile, metadata)
+ err = pe.Prepare()
+
+ if err != nil {
+ pe.Close(true)
+ return
+ }
+
+ for range pe.chResults {
+ if interruptCtx != nil && interruptCtx.Err() != nil {
+ pe.Close(true)
+ return result, interruptCtx.Err()
+ }
+
+ res := pe.resultQueue.Pop().(ExecResult)
+
+ result, err = pe.Step(&res)
+
+ if err != nil {
+ return result, err
+ }
+
+ if check != nil {
+ err = check(pe)
+ }
+
+ if result.TxIO != nil || err != nil {
+ return result, err
+ }
+ }
+
+ return
+}
+
+func ExecuteParallel(tasks []ExecTask, profile bool, metadata bool, interruptCtx context.Context) (result ParallelExecutionResult, err error) {
+ return executeParallelWithCheck(tasks, profile, nil, metadata, interruptCtx)
+}
diff --git a/core/blockstm/executor_test.go b/core/blockstm/executor_test.go
new file mode 100644
index 0000000000..511d9c774a
--- /dev/null
+++ b/core/blockstm/executor_test.go
@@ -0,0 +1,984 @@
+package blockstm
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "math/rand"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+type OpType int
+
+const readType = 0
+const writeType = 1
+const otherType = 2
+const greenTick = "✅"
+const redCross = "❌"
+
+const threeRockets = "🚀🚀🚀"
+
+type Op struct {
+ key Key
+ duration time.Duration
+ opType OpType
+ val int
+}
+
+type testExecTask struct {
+ txIdx int
+ ops []Op
+ readMap map[Key]ReadDescriptor
+ writeMap map[Key]WriteDescriptor
+ sender common.Address
+ nonce int
+ dependencies []int
+}
+
+type PathGenerator func(addr common.Address, i int, j int, total int) Key
+
+type TaskRunner func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration)
+
+type TaskRunnerWithMetadata func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration)
+
+type Timer func(txIdx int, opIdx int) time.Duration
+
+type Sender func(int) common.Address
+
+func NewTestExecTask(txIdx int, ops []Op, sender common.Address, nonce int) *testExecTask {
+ return &testExecTask{
+ txIdx: txIdx,
+ ops: ops,
+ readMap: make(map[Key]ReadDescriptor),
+ writeMap: make(map[Key]WriteDescriptor),
+ sender: sender,
+ nonce: nonce,
+ dependencies: []int{},
+ }
+}
+
+func sleep(i time.Duration) {
+ start := time.Now()
+ for time.Since(start) < i {
+ }
+}
+
+func (t *testExecTask) Execute(mvh *MVHashMap, incarnation int) error {
+ // Sleep for 50 microsecond to simulate setup time
+ sleep(time.Microsecond * 50)
+
+ version := Version{TxnIndex: t.txIdx, Incarnation: incarnation}
+
+ t.readMap = make(map[Key]ReadDescriptor)
+ t.writeMap = make(map[Key]WriteDescriptor)
+
+ deps := -1
+
+ for i, op := range t.ops {
+ k := op.key
+
+ switch op.opType {
+ case readType:
+ if _, ok := t.writeMap[k]; ok {
+ sleep(op.duration)
+ continue
+ }
+
+ result := mvh.Read(k, t.txIdx)
+
+ val := result.Value()
+
+ if i == 0 && val != nil && (val.(int) != t.nonce) {
+ return ErrExecAbortError{}
+ }
+
+ if result.Status() == MVReadResultDependency {
+ if result.depIdx > deps {
+ deps = result.depIdx
+ }
+ }
+
+ var readKind int
+
+ if result.Status() == MVReadResultDone {
+ readKind = ReadKindMap
+ } else if result.Status() == MVReadResultNone {
+ readKind = ReadKindStorage
+ }
+
+ sleep(op.duration)
+
+ t.readMap[k] = ReadDescriptor{k, readKind, Version{TxnIndex: result.depIdx, Incarnation: result.incarnation}}
+ case writeType:
+ t.writeMap[k] = WriteDescriptor{k, version, op.val}
+ case otherType:
+ sleep(op.duration)
+ default:
+ panic(fmt.Sprintf("Unknown op type: %d", op.opType))
+ }
+ }
+
+ if deps != -1 {
+ return ErrExecAbortError{deps, fmt.Errorf("Dependency error")}
+ }
+
+ return nil
+}
+
+func (t *testExecTask) MVWriteList() []WriteDescriptor {
+ return t.MVFullWriteList()
+}
+
+func (t *testExecTask) MVFullWriteList() []WriteDescriptor {
+ writes := make([]WriteDescriptor, 0, len(t.writeMap))
+
+ for _, v := range t.writeMap {
+ writes = append(writes, v)
+ }
+
+ return writes
+}
+
+func (t *testExecTask) MVReadList() []ReadDescriptor {
+ reads := make([]ReadDescriptor, 0, len(t.readMap))
+
+ for _, v := range t.readMap {
+ reads = append(reads, v)
+ }
+
+ return reads
+}
+
+func (t *testExecTask) Settle() {}
+
+func (t *testExecTask) Sender() common.Address {
+ return t.sender
+}
+
+func (t *testExecTask) Hash() common.Hash {
+ return common.BytesToHash([]byte(fmt.Sprintf("%d", t.txIdx)))
+}
+
+func (t *testExecTask) Dependencies() []int {
+ return t.dependencies
+}
+
+func randTimeGenerator(min time.Duration, max time.Duration) func(txIdx int, opIdx int) time.Duration {
+ return func(txIdx int, opIdx int) time.Duration {
+ return time.Duration(rand.Int63n(int64(max-min))) + min
+ }
+}
+
+func longTailTimeGenerator(min time.Duration, max time.Duration, i int, j int) func(txIdx int, opIdx int) time.Duration {
+ return func(txIdx int, opIdx int) time.Duration {
+ if txIdx%i == 0 && opIdx == j {
+ return max * 100
+ } else {
+ return time.Duration(rand.Int63n(int64(max-min))) + min
+ }
+ }
+}
+
+var randomPathGenerator = func(sender common.Address, i int, j int, total int) Key {
+ return NewStateKey(common.BigToAddress((big.NewInt(int64(i % 10)))), common.BigToHash((big.NewInt(int64(total)))))
+}
+
+var dexPathGenerator = func(sender common.Address, i int, j int, total int) Key {
+ if j == total-1 || j == 2 {
+ return NewSubpathKey(common.BigToAddress(big.NewInt(int64(0))), 1)
+ } else {
+ return NewSubpathKey(common.BigToAddress(big.NewInt(int64(j))), 1)
+ }
+}
+
+var readTime = randTimeGenerator(4*time.Microsecond, 12*time.Microsecond)
+var writeTime = randTimeGenerator(2*time.Microsecond, 6*time.Microsecond)
+var nonIOTime = randTimeGenerator(1*time.Microsecond, 2*time.Microsecond)
+
+func taskFactory(numTask int, sender Sender, readsPerT int, writesPerT int, nonIOPerT int, pathGenerator PathGenerator, readTime Timer, writeTime Timer, nonIOTime Timer) ([]ExecTask, time.Duration) {
+ exec := make([]ExecTask, 0, numTask)
+
+ var serialDuration time.Duration
+
+ senderNonces := make(map[common.Address]int)
+
+ for i := 0; i < numTask; i++ {
+ s := sender(i)
+
+ // Set first two ops to always read and write nonce
+ ops := make([]Op, 0, readsPerT+writesPerT+nonIOPerT)
+
+ ops = append(ops, Op{opType: readType, key: NewSubpathKey(s, 2), duration: readTime(i, 0), val: senderNonces[s]})
+
+ senderNonces[s]++
+
+ ops = append(ops, Op{opType: writeType, key: NewSubpathKey(s, 2), duration: writeTime(i, 1), val: senderNonces[s]})
+
+ for j := 0; j < readsPerT-1; j++ {
+ ops = append(ops, Op{opType: readType})
+ }
+
+ for j := 0; j < nonIOPerT; j++ {
+ ops = append(ops, Op{opType: otherType})
+ }
+
+ for j := 0; j < writesPerT-1; j++ {
+ ops = append(ops, Op{opType: writeType})
+ }
+
+ // shuffle ops except for the first three (read nonce, write nonce, another read) ops and last write op.
+ // This enables random path generator to generate deterministic paths for these "special" ops.
+ for j := 3; j < len(ops)-1; j++ {
+ k := rand.Intn(len(ops)-j-1) + j
+ ops[j], ops[k] = ops[k], ops[j]
+ }
+
+ // Generate time and key path for each op except first two that are always read and write nonce
+ for j := 2; j < len(ops); j++ {
+ if ops[j].opType == readType {
+ ops[j].key = pathGenerator(s, i, j, len(ops))
+ ops[j].duration = readTime(i, j)
+ } else if ops[j].opType == writeType {
+ ops[j].key = pathGenerator(s, i, j, len(ops))
+ ops[j].duration = writeTime(i, j)
+ } else {
+ ops[j].duration = nonIOTime(i, j)
+ }
+
+ serialDuration += ops[j].duration
+ }
+
+ if ops[len(ops)-1].opType != writeType {
+ panic("Last op must be a write")
+ }
+
+ t := NewTestExecTask(i, ops, s, senderNonces[s]-1)
+ exec = append(exec, t)
+ }
+
+ return exec, serialDuration
+}
+
+func testExecutorComb(t *testing.T, totalTxs []int, numReads []int, numWrites []int, numNonIO []int, taskRunner TaskRunner) {
+ t.Helper()
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
+
+ improved := 0
+ total := 0
+
+ totalExecDuration := time.Duration(0)
+ totalSerialDuration := time.Duration(0)
+
+ for _, numTx := range totalTxs {
+ for _, numRead := range numReads {
+ for _, numWrite := range numWrites {
+ for _, numNonIO := range numNonIO {
+ log.Info("Executing block", "numTx", numTx, "numRead", numRead, "numWrite", numWrite, "numNonIO", numNonIO)
+ execDuration, expectedSerialDuration := taskRunner(numTx, numRead, numWrite, numNonIO)
+
+ if execDuration < expectedSerialDuration {
+ improved++
+ }
+ total++
+
+ performance := greenTick
+
+ if execDuration >= expectedSerialDuration {
+ performance = redCross
+ }
+
+ fmt.Printf("exec duration %v, serial duration %v, time reduced %v %.2f%%, %v \n", execDuration, expectedSerialDuration, expectedSerialDuration-execDuration, float64(expectedSerialDuration-execDuration)/float64(expectedSerialDuration)*100, performance)
+
+ totalExecDuration += execDuration
+ totalSerialDuration += expectedSerialDuration
+ }
+ }
+ }
+ }
+
+ fmt.Println("Improved: ", improved, "Total: ", total, "success rate: ", float64(improved)/float64(total)*100)
+ fmt.Printf("Total exec duration: %v, total serial duration: %v, time reduced: %v, time reduced percent: %.2f%%\n", totalExecDuration, totalSerialDuration, totalSerialDuration-totalExecDuration, float64(totalSerialDuration-totalExecDuration)/float64(totalSerialDuration)*100)
+}
+
+// nolint: gocognit
+func testExecutorCombWithMetadata(t *testing.T, totalTxs []int, numReads []int, numWrites []int, numNonIOs []int, taskRunner TaskRunnerWithMetadata) {
+ t.Helper()
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
+
+ improved := 0
+ improvedMetadata := 0
+ rocket := 0
+ total := 0
+
+ totalExecDuration := time.Duration(0)
+ totalExecDurationMetadata := time.Duration(0)
+ totalSerialDuration := time.Duration(0)
+
+ for _, numTx := range totalTxs {
+ for _, numRead := range numReads {
+ for _, numWrite := range numWrites {
+ for _, numNonIO := range numNonIOs {
+ log.Info("Executing block", "numTx", numTx, "numRead", numRead, "numWrite", numWrite, "numNonIO", numNonIO)
+ execDuration, execDurationMetadata, expectedSerialDuration := taskRunner(numTx, numRead, numWrite, numNonIO)
+
+ if execDuration < expectedSerialDuration {
+ improved++
+ }
+ total++
+
+ performance := greenTick
+
+ if execDuration >= expectedSerialDuration {
+ performance = redCross
+
+ if execDurationMetadata <= expectedSerialDuration {
+ performance = threeRockets
+ rocket++
+ }
+ }
+
+ if execDuration >= execDurationMetadata {
+ improvedMetadata++
+ }
+
+ fmt.Printf("WITHOUT METADATA: exec duration %v, serial duration %v, time reduced %v %.2f%%, %v \n", execDuration, expectedSerialDuration, expectedSerialDuration-execDuration, float64(expectedSerialDuration-execDuration)/float64(expectedSerialDuration)*100, performance)
+ fmt.Printf("WITH METADATA: exec duration %v, exec duration with metadata %v, time reduced %v %.2f%%\n", execDuration, execDurationMetadata, execDuration-execDurationMetadata, float64(execDuration-execDurationMetadata)/float64(execDuration)*100)
+
+ totalExecDuration += execDuration
+ totalExecDurationMetadata += execDurationMetadata
+ totalSerialDuration += expectedSerialDuration
+ }
+ }
+ }
+ }
+
+ fmt.Println("\nImproved: ", improved, "Total: ", total, "success rate: ", float64(improved)/float64(total)*100)
+ fmt.Println("Metadata Better: ", improvedMetadata, "out of: ", total, "success rate: ", float64(improvedMetadata)/float64(total)*100)
+ fmt.Println("Rockets (Time of: metadata < serial < without metadata): ", rocket)
+ fmt.Printf("\nWithout metadata <> serial: Total exec duration: %v, total serial duration : %v, time reduced: %v, time reduced percent: %.2f%%\n", totalExecDuration, totalSerialDuration, totalSerialDuration-totalExecDuration, float64(totalSerialDuration-totalExecDuration)/float64(totalSerialDuration)*100)
+ fmt.Printf("With metadata <> serial: Total exec duration metadata: %v, total serial duration : %v, time reduced: %v, time reduced percent: %.2f%%\n", totalExecDurationMetadata, totalSerialDuration, totalSerialDuration-totalExecDurationMetadata, float64(totalSerialDuration-totalExecDurationMetadata)/float64(totalSerialDuration)*100)
+ fmt.Printf("Without metadata <> with metadata: Total exec duration: %v, total exec duration metadata: %v, time reduced: %v, time reduced percent: %.2f%%\n", totalExecDuration, totalExecDurationMetadata, totalExecDuration-totalExecDurationMetadata, float64(totalExecDuration-totalExecDurationMetadata)/float64(totalExecDuration)*100)
+}
+
+func composeValidations(checks []PropertyCheck) PropertyCheck {
+ return func(pe *ParallelExecutor) error {
+ for _, check := range checks {
+ err := check(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+}
+
+func checkNoStatusOverlap(pe *ParallelExecutor) error {
+ seen := make(map[int]string)
+
+ for _, tx := range pe.execTasks.complete {
+ seen[tx] = "complete"
+ }
+
+ for _, tx := range pe.execTasks.inProgress {
+ if v, ok := seen[tx]; ok {
+ return fmt.Errorf("tx %v is in both %v and inProgress", v, tx)
+ }
+
+ seen[tx] = "inProgress"
+ }
+
+ for _, tx := range pe.execTasks.pending {
+ if v, ok := seen[tx]; ok {
+ return fmt.Errorf("tx %v is in both %v complete and pending", v, tx)
+ }
+
+ seen[tx] = "pending"
+ }
+
+ return nil
+}
+
+func checkNoDroppedTx(pe *ParallelExecutor) error {
+ for i := 0; i < len(pe.tasks); i++ {
+ if !pe.execTasks.checkComplete(i) && !pe.execTasks.checkInProgress(i) && !pe.execTasks.checkPending(i) {
+ if !pe.execTasks.isBlocked(i) {
+ return fmt.Errorf("tx %v is not in any status and is not blocked by any other tx", i)
+ }
+ }
+ }
+
+ return nil
+}
+
+// nolint: unparam
+func runParallel(t *testing.T, tasks []ExecTask, validation PropertyCheck, metadata bool) time.Duration {
+ t.Helper()
+
+ profile := false
+
+ start := time.Now()
+ result, err := executeParallelWithCheck(tasks, false, validation, metadata, nil)
+
+ if result.Deps != nil && profile {
+ result.Deps.Report(*result.Stats, func(str string) { fmt.Println(str) })
+ }
+
+ assert.NoError(t, err, "error occur during parallel execution")
+
+ // Need to apply the final write set to storage
+
+ finalWriteSet := make(map[Key]time.Duration)
+
+ for _, task := range tasks {
+ task := task.(*testExecTask)
+ for _, op := range task.ops {
+ if op.opType == writeType {
+ finalWriteSet[op.key] = op.duration
+ }
+ }
+ }
+
+ for _, v := range finalWriteSet {
+ sleep(v)
+ }
+
+ duration := time.Since(start)
+
+ return duration
+}
+
+func runParallelGetMetadata(t *testing.T, tasks []ExecTask, validation PropertyCheck) map[int]map[int]bool {
+ t.Helper()
+
+ res, err := executeParallelWithCheck(tasks, true, validation, false, nil)
+
+ assert.NoError(t, err, "error occur during parallel execution")
+
+ return res.AllDeps
+}
+
+func TestLessConflicts(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{10, 50, 100, 200, 300}
+ numReads := []int{20, 100, 200}
+ numWrites := []int{20, 100, 200}
+ numNonIO := []int{100, 500}
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
+ sender := func(i int) common.Address {
+ randomness := rand.Intn(10) + 10
+ return common.BigToAddress(big.NewInt(int64(i % randomness)))
+ }
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
+
+ return runParallel(t, tasks, checks, false), serialDuration
+ }
+
+ testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
+}
+
+func TestLessConflictsWithMetadata(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{300}
+ numReads := []int{100, 200}
+ numWrites := []int{100, 200}
+ numNonIOs := []int{100, 500}
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration) {
+ sender := func(i int) common.Address {
+ randomness := rand.Intn(10) + 10
+ return common.BigToAddress(big.NewInt(int64(i % randomness)))
+ }
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
+
+ parallelDuration := runParallel(t, tasks, checks, false)
+
+ allDeps := runParallelGetMetadata(t, tasks, checks)
+
+ newTasks := make([]ExecTask, 0, len(tasks))
+
+ for _, t := range tasks {
+ temp := t.(*testExecTask)
+
+ keys := make([]int, len(allDeps[temp.txIdx]))
+
+ i := 0
+
+ for k := range allDeps[temp.txIdx] {
+ keys[i] = k
+ i++
+ }
+
+ temp.dependencies = keys
+ newTasks = append(newTasks, temp)
+ }
+
+ return parallelDuration, runParallel(t, newTasks, checks, true), serialDuration
+ }
+
+ testExecutorCombWithMetadata(t, totalTxs, numReads, numWrites, numNonIOs, taskRunner)
+}
+
+func TestZeroTx(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{0}
+ numReads := []int{20}
+ numWrites := []int{20}
+ numNonIO := []int{100}
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
+ sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(1))) }
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
+
+ return runParallel(t, tasks, checks, false), serialDuration
+ }
+
+ testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
+}
+
+func TestAlternatingTx(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{200}
+ numReads := []int{20}
+ numWrites := []int{20}
+ numNonIO := []int{100}
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
+ sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(i % 2))) }
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
+
+ return runParallel(t, tasks, checks, false), serialDuration
+ }
+
+ testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
+}
+
+func TestAlternatingTxWithMetadata(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{200}
+ numReads := []int{20}
+ numWrites := []int{20}
+ numNonIO := []int{100}
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration) {
+ sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(i % 2))) }
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
+
+ parallelDuration := runParallel(t, tasks, checks, false)
+
+ allDeps := runParallelGetMetadata(t, tasks, checks)
+
+ newTasks := make([]ExecTask, 0, len(tasks))
+
+ for _, t := range tasks {
+ temp := t.(*testExecTask)
+
+ keys := make([]int, len(allDeps[temp.txIdx]))
+
+ i := 0
+
+ for k := range allDeps[temp.txIdx] {
+ keys[i] = k
+ i++
+ }
+
+ temp.dependencies = keys
+ newTasks = append(newTasks, temp)
+ }
+
+ return parallelDuration, runParallel(t, newTasks, checks, true), serialDuration
+ }
+
+ testExecutorCombWithMetadata(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
+}
+
+func TestMoreConflicts(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{10, 50, 100, 200, 300}
+ numReads := []int{20, 100, 200}
+ numWrites := []int{20, 100, 200}
+ numNonIO := []int{100, 500}
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
+ sender := func(i int) common.Address {
+ randomness := rand.Intn(10) + 10
+ return common.BigToAddress(big.NewInt(int64(i / randomness)))
+ }
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
+
+ return runParallel(t, tasks, checks, false), serialDuration
+ }
+
+ testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
+}
+
+func TestMoreConflictsWithMetadata(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{300}
+ numReads := []int{100, 200}
+ numWrites := []int{100, 200}
+ numNonIO := []int{100, 500}
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration) {
+ sender := func(i int) common.Address {
+ randomness := rand.Intn(10) + 10
+ return common.BigToAddress(big.NewInt(int64(i / randomness)))
+ }
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
+
+ parallelDuration := runParallel(t, tasks, checks, false)
+
+ allDeps := runParallelGetMetadata(t, tasks, checks)
+
+ newTasks := make([]ExecTask, 0, len(tasks))
+
+ for _, t := range tasks {
+ temp := t.(*testExecTask)
+
+ keys := make([]int, len(allDeps[temp.txIdx]))
+
+ i := 0
+
+ for k := range allDeps[temp.txIdx] {
+ keys[i] = k
+ i++
+ }
+
+ temp.dependencies = keys
+ newTasks = append(newTasks, temp)
+ }
+
+ return parallelDuration, runParallel(t, newTasks, checks, true), serialDuration
+ }
+
+ testExecutorCombWithMetadata(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
+}
+
+func TestRandomTx(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{10, 50, 100, 200, 300}
+ numReads := []int{20, 100, 200}
+ numWrites := []int{20, 100, 200}
+ numNonIO := []int{100, 500}
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
+ // Randomly assign this tx to one of 10 senders
+ sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(rand.Intn(10)))) }
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
+
+ return runParallel(t, tasks, checks, false), serialDuration
+ }
+
+ testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
+}
+
+func TestRandomTxWithMetadata(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{300}
+ numReads := []int{100, 200}
+ numWrites := []int{100, 200}
+ numNonIO := []int{100, 500}
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration) {
+ // Randomly assign this tx to one of 10 senders
+ sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(rand.Intn(10)))) }
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, readTime, writeTime, nonIOTime)
+
+ parallelDuration := runParallel(t, tasks, checks, false)
+
+ allDeps := runParallelGetMetadata(t, tasks, checks)
+
+ newTasks := make([]ExecTask, 0, len(tasks))
+
+ for _, t := range tasks {
+ temp := t.(*testExecTask)
+
+ keys := make([]int, len(allDeps[temp.txIdx]))
+
+ i := 0
+
+ for k := range allDeps[temp.txIdx] {
+ keys[i] = k
+ i++
+ }
+
+ temp.dependencies = keys
+ newTasks = append(newTasks, temp)
+ }
+
+ return parallelDuration, runParallel(t, newTasks, checks, true), serialDuration
+ }
+
+ testExecutorCombWithMetadata(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
+}
+
+func TestTxWithLongTailRead(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{10, 50, 100, 200, 300}
+ numReads := []int{20, 100, 200}
+ numWrites := []int{20, 100, 200}
+ numNonIO := []int{100, 500}
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
+ sender := func(i int) common.Address {
+ randomness := rand.Intn(10) + 10
+ return common.BigToAddress(big.NewInt(int64(i / randomness)))
+ }
+
+ longTailReadTimer := longTailTimeGenerator(4*time.Microsecond, 12*time.Microsecond, 7, 10)
+
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, longTailReadTimer, writeTime, nonIOTime)
+
+ return runParallel(t, tasks, checks, false), serialDuration
+ }
+
+ testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
+}
+
+func TestTxWithLongTailReadWithMetadata(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{300}
+ numReads := []int{100, 200}
+ numWrites := []int{100, 200}
+ numNonIO := []int{100, 500}
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration) {
+ sender := func(i int) common.Address {
+ randomness := rand.Intn(10) + 10
+ return common.BigToAddress(big.NewInt(int64(i / randomness)))
+ }
+
+ longTailReadTimer := longTailTimeGenerator(4*time.Microsecond, 12*time.Microsecond, 7, 10)
+
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, randomPathGenerator, longTailReadTimer, writeTime, nonIOTime)
+
+ parallelDuration := runParallel(t, tasks, checks, false)
+
+ allDeps := runParallelGetMetadata(t, tasks, checks)
+
+ newTasks := make([]ExecTask, 0, len(tasks))
+
+ for _, t := range tasks {
+ temp := t.(*testExecTask)
+
+ keys := make([]int, len(allDeps[temp.txIdx]))
+
+ i := 0
+
+ for k := range allDeps[temp.txIdx] {
+ keys[i] = k
+ i++
+ }
+
+ temp.dependencies = keys
+ newTasks = append(newTasks, temp)
+ }
+
+ return parallelDuration, runParallel(t, newTasks, checks, true), serialDuration
+ }
+
+ testExecutorCombWithMetadata(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
+}
+
+func TestDexScenario(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{10, 50, 100, 200, 300}
+ numReads := []int{20, 100, 200}
+ numWrites := []int{20, 100, 200}
+ numNonIO := []int{100, 500}
+
+ postValidation := func(pe *ParallelExecutor) error {
+ if pe.lastSettled == len(pe.tasks) {
+ for i, inputs := range pe.lastTxIO.inputs {
+ for _, input := range inputs {
+ if input.V.TxnIndex != i-1 {
+ return fmt.Errorf("Tx %d should depend on tx %d, but it actually depends on %d", i, i-1, input.V.TxnIndex)
+ }
+ }
+ }
+ }
+
+ return nil
+ }
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, postValidation, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration) {
+ sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(i))) }
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, dexPathGenerator, readTime, writeTime, nonIOTime)
+
+ return runParallel(t, tasks, checks, false), serialDuration
+ }
+
+ testExecutorComb(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
+}
+
+func TestDexScenarioWithMetadata(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ totalTxs := []int{300}
+ numReads := []int{100, 200}
+ numWrites := []int{100, 200}
+ numNonIO := []int{100, 500}
+
+ postValidation := func(pe *ParallelExecutor) error {
+ if pe.lastSettled == len(pe.tasks) {
+ for i, inputs := range pe.lastTxIO.inputs {
+ for _, input := range inputs {
+ if input.V.TxnIndex != i-1 {
+ return fmt.Errorf("Tx %d should depend on tx %d, but it actually depends on %d", i, i-1, input.V.TxnIndex)
+ }
+ }
+ }
+ }
+
+ return nil
+ }
+
+ checks := composeValidations([]PropertyCheck{checkNoStatusOverlap, postValidation, checkNoDroppedTx})
+
+ taskRunner := func(numTx int, numRead int, numWrite int, numNonIO int) (time.Duration, time.Duration, time.Duration) {
+ sender := func(i int) common.Address { return common.BigToAddress(big.NewInt(int64(i))) }
+ tasks, serialDuration := taskFactory(numTx, sender, numRead, numWrite, numNonIO, dexPathGenerator, readTime, writeTime, nonIOTime)
+
+ parallelDuration := runParallel(t, tasks, checks, false)
+
+ allDeps := runParallelGetMetadata(t, tasks, checks)
+
+ newTasks := make([]ExecTask, 0, len(tasks))
+
+ for _, t := range tasks {
+ temp := t.(*testExecTask)
+
+ keys := make([]int, len(allDeps[temp.txIdx]))
+
+ i := 0
+
+ for k := range allDeps[temp.txIdx] {
+ keys[i] = k
+ i++
+ }
+
+ temp.dependencies = keys
+ newTasks = append(newTasks, temp)
+ }
+
+ return parallelDuration, runParallel(t, newTasks, checks, true), serialDuration
+ }
+
+ testExecutorCombWithMetadata(t, totalTxs, numReads, numWrites, numNonIO, taskRunner)
+}
+
+func TestBreakFromCircularDependency(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ tasks := make([]ExecTask, 5)
+
+ for i := range tasks {
+ tasks[i] = &testExecTask{
+ txIdx: i,
+ dependencies: []int{
+ (i + len(tasks) - 1) % len(tasks),
+ },
+ }
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ // This should not hang
+ _, err := ExecuteParallel(tasks, false, true, ctx)
+
+ if err == nil {
+ t.Error("Expected cancel error")
+ }
+}
+
+func TestBreakFromPartialCircularDependency(t *testing.T) {
+ t.Parallel()
+ rand.Seed(0)
+
+ tasks := make([]ExecTask, 5)
+
+ for i := range tasks {
+ if i < 3 {
+ tasks[i] = &testExecTask{
+ txIdx: i,
+ dependencies: []int{
+ (i + 2) % 3,
+ },
+ }
+ } else {
+ tasks[i] = &testExecTask{
+ txIdx: i,
+ dependencies: []int{},
+ }
+ }
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ // This should not hang
+ _, err := ExecuteParallel(tasks, false, true, ctx)
+
+ if err == nil {
+ t.Error("Expected cancel error")
+ }
+}
diff --git a/core/blockstm/mvhashmap.go b/core/blockstm/mvhashmap.go
new file mode 100644
index 0000000000..2a517bcc84
--- /dev/null
+++ b/core/blockstm/mvhashmap.go
@@ -0,0 +1,290 @@
+package blockstm
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/emirpasic/gods/maps/treemap"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+const FlagDone = 0
+const FlagEstimate = 1
+
+const addressType = 1
+const stateType = 2
+const subpathType = 3
+
+const KeyLength = common.AddressLength + common.HashLength + 2
+
+type Key [KeyLength]byte
+
+func (k Key) IsAddress() bool {
+ return k[KeyLength-1] == addressType
+}
+
+func (k Key) IsState() bool {
+ return k[KeyLength-1] == stateType
+}
+
+func (k Key) IsSubpath() bool {
+ return k[KeyLength-1] == subpathType
+}
+
+func (k Key) GetAddress() common.Address {
+ return common.BytesToAddress(k[:common.AddressLength])
+}
+
+func (k Key) GetStateKey() common.Hash {
+ return common.BytesToHash(k[common.AddressLength : KeyLength-2])
+}
+
+func (k Key) GetSubpath() byte {
+ return k[KeyLength-2]
+}
+
+func newKey(addr common.Address, hash common.Hash, subpath byte, keyType byte) Key {
+ var k Key
+
+ copy(k[:common.AddressLength], addr.Bytes())
+ copy(k[common.AddressLength:KeyLength-2], hash.Bytes())
+ k[KeyLength-2] = subpath
+ k[KeyLength-1] = keyType
+
+ return k
+}
+
+func NewAddressKey(addr common.Address) Key {
+ return newKey(addr, common.Hash{}, 0, addressType)
+}
+
+func NewStateKey(addr common.Address, hash common.Hash) Key {
+ k := newKey(addr, hash, 0, stateType)
+ if !k.IsState() {
+ panic(fmt.Errorf("key is not a state key"))
+ }
+
+ return k
+}
+
+func NewSubpathKey(addr common.Address, subpath byte) Key {
+ return newKey(addr, common.Hash{}, subpath, subpathType)
+}
+
+type MVHashMap struct {
+ m sync.Map
+ s sync.Map
+}
+
+func MakeMVHashMap() *MVHashMap {
+ return &MVHashMap{}
+}
+
+type WriteCell struct {
+ flag uint
+ incarnation int
+ data interface{}
+}
+
+type TxnIndexCells struct {
+ rw sync.RWMutex
+ tm *treemap.Map
+}
+
+type Version struct {
+ TxnIndex int
+ Incarnation int
+}
+
+func (mv *MVHashMap) getKeyCells(k Key, fNoKey func(kenc Key) *TxnIndexCells) (cells *TxnIndexCells) {
+ val, ok := mv.m.Load(k)
+
+ if !ok {
+ cells = fNoKey(k)
+ } else {
+ cells = val.(*TxnIndexCells)
+ }
+
+ return
+}
+
+func (mv *MVHashMap) Write(k Key, v Version, data interface{}) {
+ cells := mv.getKeyCells(k, func(kenc Key) (cells *TxnIndexCells) {
+ n := &TxnIndexCells{
+ rw: sync.RWMutex{},
+ tm: treemap.NewWithIntComparator(),
+ }
+ cells = n
+ val, _ := mv.m.LoadOrStore(kenc, n)
+ cells = val.(*TxnIndexCells)
+ return
+ })
+
+ cells.rw.RLock()
+ ci, ok := cells.tm.Get(v.TxnIndex)
+ cells.rw.RUnlock()
+
+ if ok {
+ if ci.(*WriteCell).incarnation > v.Incarnation {
+ panic(fmt.Errorf("existing transaction value does not have lower incarnation: %v, %v",
+ k, v.TxnIndex))
+ }
+
+ ci.(*WriteCell).flag = FlagDone
+ ci.(*WriteCell).incarnation = v.Incarnation
+ ci.(*WriteCell).data = data
+ } else {
+ cells.rw.Lock()
+ if ci, ok = cells.tm.Get(v.TxnIndex); !ok {
+ cells.tm.Put(v.TxnIndex, &WriteCell{
+ flag: FlagDone,
+ incarnation: v.Incarnation,
+ data: data,
+ })
+ } else {
+ ci.(*WriteCell).flag = FlagDone
+ ci.(*WriteCell).incarnation = v.Incarnation
+ ci.(*WriteCell).data = data
+ }
+ cells.rw.Unlock()
+ }
+}
+
+func (mv *MVHashMap) ReadStorage(k Key, fallBack func() any) any {
+ data, ok := mv.s.Load(string(k[:]))
+ if !ok {
+ data = fallBack()
+ data, _ = mv.s.LoadOrStore(string(k[:]), data)
+ }
+
+ return data
+}
+
+func (mv *MVHashMap) MarkEstimate(k Key, txIdx int) {
+ cells := mv.getKeyCells(k, func(_ Key) *TxnIndexCells {
+ panic(fmt.Errorf("path must already exist"))
+ })
+
+ cells.rw.RLock()
+ if ci, ok := cells.tm.Get(txIdx); !ok {
+ panic(fmt.Sprintf("should not happen - cell should be present for path. TxIdx: %v, path, %x, cells keys: %v", txIdx, k, cells.tm.Keys()))
+ } else {
+ ci.(*WriteCell).flag = FlagEstimate
+ }
+ cells.rw.RUnlock()
+}
+
+func (mv *MVHashMap) Delete(k Key, txIdx int) {
+ cells := mv.getKeyCells(k, func(_ Key) *TxnIndexCells {
+ panic(fmt.Errorf("path must already exist"))
+ })
+
+ cells.rw.Lock()
+ defer cells.rw.Unlock()
+ cells.tm.Remove(txIdx)
+}
+
+const (
+ MVReadResultDone = 0
+ MVReadResultDependency = 1
+ MVReadResultNone = 2
+)
+
+type MVReadResult struct {
+ depIdx int
+ incarnation int
+ value interface{}
+}
+
+func (res *MVReadResult) DepIdx() int {
+ return res.depIdx
+}
+
+func (res *MVReadResult) Incarnation() int {
+ return res.incarnation
+}
+
+func (res *MVReadResult) Value() interface{} {
+ return res.value
+}
+
+func (mvr MVReadResult) Status() int {
+ if mvr.depIdx != -1 {
+ if mvr.incarnation == -1 {
+ return MVReadResultDependency
+ } else {
+ return MVReadResultDone
+ }
+ }
+
+ return MVReadResultNone
+}
+
+func (mv *MVHashMap) Read(k Key, txIdx int) (res MVReadResult) {
+ res.depIdx = -1
+ res.incarnation = -1
+
+ cells := mv.getKeyCells(k, func(_ Key) *TxnIndexCells {
+ return nil
+ })
+ if cells == nil {
+ return
+ }
+
+ cells.rw.RLock()
+ fk, fv := cells.tm.Floor(txIdx - 1)
+ cells.rw.RUnlock()
+
+ if fk != nil && fv != nil {
+ c := fv.(*WriteCell)
+ switch c.flag {
+ case FlagEstimate:
+ res.depIdx = fk.(int)
+ res.value = c.data
+ case FlagDone:
+ {
+ res.depIdx = fk.(int)
+ res.incarnation = c.incarnation
+ res.value = c.data
+ }
+ default:
+ panic(fmt.Errorf("should not happen - unknown flag value"))
+ }
+ }
+
+ return
+}
+
+func (mv *MVHashMap) FlushMVWriteSet(writes []WriteDescriptor) {
+ for _, v := range writes {
+ mv.Write(v.Path, v.V, v.Val)
+ }
+}
+
+func ValidateVersion(txIdx int, lastInputOutput *TxnInputOutput, versionedData *MVHashMap) (valid bool) {
+ valid = true
+
+ for _, rd := range lastInputOutput.ReadSet(txIdx) {
+ mvResult := versionedData.Read(rd.Path, txIdx)
+ switch mvResult.Status() {
+ case MVReadResultDone:
+ valid = rd.Kind == ReadKindMap && rd.V == Version{
+ TxnIndex: mvResult.depIdx,
+ Incarnation: mvResult.incarnation,
+ }
+ case MVReadResultDependency:
+ valid = false
+ case MVReadResultNone:
+ valid = rd.Kind == ReadKindStorage // feels like an assertion?
+ default:
+ panic(fmt.Errorf("should not happen - undefined mv read status: %ver", mvResult.Status()))
+ }
+
+ if !valid {
+ break
+ }
+ }
+
+ return
+}
diff --git a/core/blockstm/mvhashmap_test.go b/core/blockstm/mvhashmap_test.go
new file mode 100644
index 0000000000..7ed728426c
--- /dev/null
+++ b/core/blockstm/mvhashmap_test.go
@@ -0,0 +1,344 @@
+package blockstm
+
+import (
+ "fmt"
+ "math/big"
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+var randomness = rand.Intn(10) + 10
+
+// create test data for a given txIdx and incarnation
+func valueFor(txIdx, inc int) []byte {
+ return []byte(fmt.Sprintf("%ver:%ver:%ver", txIdx*5, txIdx+inc, inc*5))
+}
+
+func getCommonAddress(i int) common.Address {
+ return common.BigToAddress(big.NewInt(int64(i % randomness)))
+}
+
+func TestHelperFunctions(t *testing.T) {
+ t.Parallel()
+
+ ap1 := NewAddressKey(getCommonAddress(1))
+ ap2 := NewAddressKey(getCommonAddress(2))
+
+ mvh := MakeMVHashMap()
+
+ mvh.Write(ap1, Version{0, 1}, valueFor(0, 1))
+ mvh.Write(ap1, Version{0, 2}, valueFor(0, 2))
+ res := mvh.Read(ap1, 0)
+ require.Equal(t, -1, res.DepIdx())
+ require.Equal(t, -1, res.Incarnation())
+ require.Equal(t, 2, res.Status())
+
+ mvh.Write(ap2, Version{1, 1}, valueFor(1, 1))
+ mvh.Write(ap2, Version{1, 2}, valueFor(1, 2))
+ res = mvh.Read(ap2, 1)
+ require.Equal(t, -1, res.DepIdx())
+ require.Equal(t, -1, res.Incarnation())
+ require.Equal(t, 2, res.Status())
+
+ mvh.Write(ap1, Version{2, 1}, valueFor(2, 1))
+ mvh.Write(ap1, Version{2, 2}, valueFor(2, 2))
+ res = mvh.Read(ap1, 2)
+ require.Equal(t, 0, res.DepIdx())
+ require.Equal(t, 2, res.Incarnation())
+ require.Equal(t, valueFor(0, 2), res.Value().([]byte))
+ require.Equal(t, 0, res.Status())
+}
+
+func TestFlushMVWrite(t *testing.T) {
+ t.Parallel()
+
+ ap1 := NewAddressKey(getCommonAddress(1))
+ ap2 := NewAddressKey(getCommonAddress(2))
+
+ mvh := MakeMVHashMap()
+
+ var res MVReadResult
+
+ wd := []WriteDescriptor{}
+
+ wd = append(wd, WriteDescriptor{
+ Path: ap1,
+ V: Version{0, 1},
+ Val: valueFor(0, 1),
+ })
+ wd = append(wd, WriteDescriptor{
+ Path: ap1,
+ V: Version{0, 2},
+ Val: valueFor(0, 2),
+ })
+ wd = append(wd, WriteDescriptor{
+ Path: ap2,
+ V: Version{1, 1},
+ Val: valueFor(1, 1),
+ })
+ wd = append(wd, WriteDescriptor{
+ Path: ap2,
+ V: Version{1, 2},
+ Val: valueFor(1, 2),
+ })
+ wd = append(wd, WriteDescriptor{
+ Path: ap1,
+ V: Version{2, 1},
+ Val: valueFor(2, 1),
+ })
+ wd = append(wd, WriteDescriptor{
+ Path: ap1,
+ V: Version{2, 2},
+ Val: valueFor(2, 2),
+ })
+
+ mvh.FlushMVWriteSet(wd)
+
+ res = mvh.Read(ap1, 0)
+ require.Equal(t, -1, res.DepIdx())
+ require.Equal(t, -1, res.Incarnation())
+ require.Equal(t, 2, res.Status())
+
+ res = mvh.Read(ap2, 1)
+ require.Equal(t, -1, res.DepIdx())
+ require.Equal(t, -1, res.Incarnation())
+ require.Equal(t, 2, res.Status())
+
+ res = mvh.Read(ap1, 2)
+ require.Equal(t, 0, res.DepIdx())
+ require.Equal(t, 2, res.Incarnation())
+ require.Equal(t, valueFor(0, 2), res.Value().([]byte))
+ require.Equal(t, 0, res.Status())
+}
+
+// TODO - handle panic
+func TestLowerIncarnation(t *testing.T) {
+ t.Parallel()
+
+ ap1 := NewAddressKey(getCommonAddress(1))
+
+ mvh := MakeMVHashMap()
+
+ mvh.Write(ap1, Version{0, 2}, valueFor(0, 2))
+ mvh.Read(ap1, 0)
+ mvh.Write(ap1, Version{1, 2}, valueFor(1, 2))
+ mvh.Write(ap1, Version{0, 5}, valueFor(0, 5))
+ mvh.Write(ap1, Version{1, 5}, valueFor(1, 5))
+}
+
+func TestMarkEstimate(t *testing.T) {
+ t.Parallel()
+
+ ap1 := NewAddressKey(getCommonAddress(1))
+
+ mvh := MakeMVHashMap()
+
+ mvh.Write(ap1, Version{7, 2}, valueFor(7, 2))
+ mvh.MarkEstimate(ap1, 7)
+ mvh.Write(ap1, Version{7, 4}, valueFor(7, 4))
+}
+
+func TestMVHashMapBasics(t *testing.T) {
+ t.Parallel()
+
+ // memory locations
+ ap1 := NewAddressKey(getCommonAddress(1))
+ ap2 := NewAddressKey(getCommonAddress(2))
+ ap3 := NewAddressKey(getCommonAddress(3))
+
+ mvh := MakeMVHashMap()
+
+ res := mvh.Read(ap1, 5)
+ require.Equal(t, -1, res.depIdx)
+
+ mvh.Write(ap1, Version{10, 1}, valueFor(10, 1))
+
+ res = mvh.Read(ap1, 9)
+ require.Equal(t, -1, res.depIdx, "reads that should go the the DB return dependency -1")
+ res = mvh.Read(ap1, 10)
+ require.Equal(t, -1, res.depIdx, "Read returns entries from smaller txns, not txn 10")
+
+ // Reads for a higher txn return the entry written by txn 10.
+ res = mvh.Read(ap1, 15)
+ require.Equal(t, 10, res.depIdx, "reads for a higher txn return the entry written by txn 10.")
+ require.Equal(t, 1, res.incarnation)
+ require.Equal(t, valueFor(10, 1), res.value)
+
+ // More writes.
+ mvh.Write(ap1, Version{12, 0}, valueFor(12, 0))
+ mvh.Write(ap1, Version{8, 3}, valueFor(8, 3))
+
+ // Verify reads.
+ res = mvh.Read(ap1, 15)
+ require.Equal(t, 12, res.depIdx)
+ require.Equal(t, 0, res.incarnation)
+ require.Equal(t, valueFor(12, 0), res.value)
+
+ res = mvh.Read(ap1, 11)
+ require.Equal(t, 10, res.depIdx)
+ require.Equal(t, 1, res.incarnation)
+ require.Equal(t, valueFor(10, 1), res.value)
+
+ res = mvh.Read(ap1, 10)
+ require.Equal(t, 8, res.depIdx)
+ require.Equal(t, 3, res.incarnation)
+ require.Equal(t, valueFor(8, 3), res.value)
+
+ // Mark the entry written by 10 as an estimate.
+ mvh.MarkEstimate(ap1, 10)
+
+ res = mvh.Read(ap1, 11)
+ require.Equal(t, 10, res.depIdx)
+ require.Equal(t, -1, res.incarnation, "dep at tx 10 is now an estimate")
+
+ // Delete the entry written by 10, write to a different ap.
+ mvh.Delete(ap1, 10)
+ mvh.Write(ap2, Version{10, 2}, valueFor(10, 2))
+
+ // Read by txn 11 no longer observes entry from txn 10.
+ res = mvh.Read(ap1, 11)
+ require.Equal(t, 8, res.depIdx)
+ require.Equal(t, 3, res.incarnation)
+ require.Equal(t, valueFor(8, 3), res.value)
+
+ // Reads, writes for ap2 and ap3.
+ mvh.Write(ap2, Version{5, 0}, valueFor(5, 0))
+ mvh.Write(ap3, Version{20, 4}, valueFor(20, 4))
+
+ res = mvh.Read(ap2, 10)
+ require.Equal(t, 5, res.depIdx)
+ require.Equal(t, 0, res.incarnation)
+ require.Equal(t, valueFor(5, 0), res.value)
+
+ res = mvh.Read(ap3, 21)
+ require.Equal(t, 20, res.depIdx)
+ require.Equal(t, 4, res.incarnation)
+ require.Equal(t, valueFor(20, 4), res.value)
+
+ // Clear ap1 and ap3.
+ mvh.Delete(ap1, 12)
+ mvh.Delete(ap1, 8)
+ mvh.Delete(ap3, 20)
+
+ // Reads from ap1 and ap3 go to db.
+ res = mvh.Read(ap1, 30)
+ require.Equal(t, -1, res.depIdx)
+
+ res = mvh.Read(ap3, 30)
+ require.Equal(t, -1, res.depIdx)
+
+ // No-op delete at ap2 - doesn't panic because ap2 does exist
+ mvh.Delete(ap2, 11)
+
+ // Read entry by txn 10 at ap2.
+ res = mvh.Read(ap2, 15)
+ require.Equal(t, 10, res.depIdx)
+ require.Equal(t, 2, res.incarnation)
+ require.Equal(t, valueFor(10, 2), res.value)
+}
+
+func BenchmarkWriteTimeSameLocationDifferentTxIdx(b *testing.B) {
+ mvh2 := MakeMVHashMap()
+ ap2 := NewAddressKey(getCommonAddress(2))
+
+ randInts := []int{}
+ for i := 0; i < b.N; i++ {
+ randInts = append(randInts, rand.Intn(1000000000000000))
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ mvh2.Write(ap2, Version{randInts[i], 1}, valueFor(randInts[i], 1))
+ }
+}
+
+func BenchmarkReadTimeSameLocationDifferentTxIdx(b *testing.B) {
+ mvh2 := MakeMVHashMap()
+ ap2 := NewAddressKey(getCommonAddress(2))
+ txIdxSlice := []int{}
+
+ for i := 0; i < b.N; i++ {
+ txIdx := rand.Intn(1000000000000000)
+ txIdxSlice = append(txIdxSlice, txIdx)
+ mvh2.Write(ap2, Version{txIdx, 1}, valueFor(txIdx, 1))
+ }
+
+ b.ResetTimer()
+
+ for _, value := range txIdxSlice {
+ mvh2.Read(ap2, value)
+ }
+}
+
+func TestTimeComplexity(t *testing.T) {
+ t.Parallel()
+
+ // for 1000000 read and write with no dependency at different memory location
+ mvh1 := MakeMVHashMap()
+
+ for i := 0; i < 1000000; i++ {
+ ap1 := NewAddressKey(getCommonAddress(i))
+ mvh1.Write(ap1, Version{i, 1}, valueFor(i, 1))
+ mvh1.Read(ap1, i)
+ }
+
+ // for 1000000 read and write with dependency at same memory location
+ mvh2 := MakeMVHashMap()
+ ap2 := NewAddressKey(getCommonAddress(2))
+
+ for i := 0; i < 1000000; i++ {
+ mvh2.Write(ap2, Version{i, 1}, valueFor(i, 1))
+ mvh2.Read(ap2, i)
+ }
+}
+
+func TestWriteTimeSameLocationDifferentTxnIdx(t *testing.T) {
+ t.Parallel()
+
+ mvh1 := MakeMVHashMap()
+ ap1 := NewAddressKey(getCommonAddress(1))
+
+ for i := 0; i < 1000000; i++ {
+ mvh1.Write(ap1, Version{i, 1}, valueFor(i, 1))
+ }
+}
+
+func TestWriteTimeSameLocationSameTxnIdx(t *testing.T) {
+ t.Parallel()
+
+ mvh1 := MakeMVHashMap()
+ ap1 := NewAddressKey(getCommonAddress(1))
+
+ for i := 0; i < 1000000; i++ {
+ mvh1.Write(ap1, Version{1, i}, valueFor(i, 1))
+ }
+}
+
+func TestWriteTimeDifferentLocation(t *testing.T) {
+ t.Parallel()
+
+ mvh1 := MakeMVHashMap()
+
+ for i := 0; i < 1000000; i++ {
+ ap1 := NewAddressKey(getCommonAddress(i))
+ mvh1.Write(ap1, Version{i, 1}, valueFor(i, 1))
+ }
+}
+
+func TestReadTimeSameLocation(t *testing.T) {
+ t.Parallel()
+
+ mvh1 := MakeMVHashMap()
+ ap1 := NewAddressKey(getCommonAddress(1))
+
+ mvh1.Write(ap1, Version{1, 1}, valueFor(1, 1))
+
+ for i := 0; i < 1000000; i++ {
+ mvh1.Read(ap1, 2)
+ }
+}
diff --git a/core/blockstm/status.go b/core/blockstm/status.go
new file mode 100644
index 0000000000..3025cf6c3e
--- /dev/null
+++ b/core/blockstm/status.go
@@ -0,0 +1,225 @@
+package blockstm
+
+import (
+ "fmt"
+ "sort"
+)
+
+func makeStatusManager(numTasks int) (t taskStatusManager) {
+ t.pending = make([]int, numTasks)
+ for i := 0; i < numTasks; i++ {
+ t.pending[i] = i
+ }
+
+ t.dependency = make(map[int]map[int]bool, numTasks)
+ t.blocker = make(map[int]map[int]bool, numTasks)
+
+ for i := 0; i < numTasks; i++ {
+ t.blocker[i] = make(map[int]bool)
+ }
+
+ return
+}
+
+type taskStatusManager struct {
+ pending []int
+ inProgress []int
+ complete []int
+ dependency map[int]map[int]bool
+ blocker map[int]map[int]bool
+}
+
+func insertInList(l []int, v int) []int {
+ if len(l) == 0 || v > l[len(l)-1] {
+ return append(l, v)
+ } else {
+ x := sort.SearchInts(l, v)
+ if x < len(l) && l[x] == v {
+ // already in list
+ return l
+ }
+ a := append(l[:x+1], l[x:]...)
+ a[x] = v
+ return a
+ }
+}
+
+func (m *taskStatusManager) takeNextPending() int {
+ if len(m.pending) == 0 {
+ return -1
+ }
+
+ x := m.pending[0]
+ m.pending = m.pending[1:]
+ m.inProgress = insertInList(m.inProgress, x)
+
+ return x
+}
+
+func hasNoGap(l []int) bool {
+ return l[0]+len(l) == l[len(l)-1]+1
+}
+
+func (m taskStatusManager) maxAllComplete() int {
+ if len(m.complete) == 0 || m.complete[0] != 0 {
+ return -1
+ } else if m.complete[len(m.complete)-1] == len(m.complete)-1 {
+ return m.complete[len(m.complete)-1]
+ } else {
+ for i := len(m.complete) - 2; i >= 0; i-- {
+ if hasNoGap(m.complete[:i+1]) {
+ return m.complete[i]
+ }
+ }
+ }
+
+ return -1
+}
+
+func (m *taskStatusManager) pushPending(tx int) {
+ m.pending = insertInList(m.pending, tx)
+}
+
+func removeFromList(l []int, v int, expect bool) []int {
+ x := sort.SearchInts(l, v)
+ if x == -1 || l[x] != v {
+ if expect {
+ panic(fmt.Errorf("should not happen - element expected in list"))
+ }
+
+ return l
+ }
+
+ switch x {
+ case 0:
+ return l[1:]
+ case len(l) - 1:
+ return l[:len(l)-1]
+ default:
+ return append(l[:x], l[x+1:]...)
+ }
+}
+
+func (m *taskStatusManager) markComplete(tx int) {
+ m.inProgress = removeFromList(m.inProgress, tx, true)
+ m.complete = insertInList(m.complete, tx)
+}
+
+func (m *taskStatusManager) minPending() int {
+ if len(m.pending) == 0 {
+ return -1
+ } else {
+ return m.pending[0]
+ }
+}
+
+func (m *taskStatusManager) countComplete() int {
+ return len(m.complete)
+}
+
+func (m *taskStatusManager) addDependencies(blocker int, dependent int) bool {
+ if blocker < 0 || blocker >= dependent {
+ return false
+ }
+
+ curblockers := m.blocker[dependent]
+
+ if m.checkComplete(blocker) {
+ // Blocker has already completed
+ delete(curblockers, blocker)
+
+ return len(curblockers) > 0
+ }
+
+ if _, ok := m.dependency[blocker]; !ok {
+ m.dependency[blocker] = make(map[int]bool)
+ }
+
+ m.dependency[blocker][dependent] = true
+ curblockers[blocker] = true
+
+ return true
+}
+
+func (m *taskStatusManager) isBlocked(tx int) bool {
+ return len(m.blocker[tx]) > 0
+}
+
+func (m *taskStatusManager) removeDependency(tx int) {
+ if deps, ok := m.dependency[tx]; ok && len(deps) > 0 {
+ for k := range deps {
+ delete(m.blocker[k], tx)
+
+ if len(m.blocker[k]) == 0 {
+ if !m.checkComplete(k) && !m.checkPending(k) && !m.checkInProgress(k) {
+ m.pushPending(k)
+ }
+ }
+ }
+
+ delete(m.dependency, tx)
+ }
+}
+
+func (m *taskStatusManager) clearInProgress(tx int) {
+ m.inProgress = removeFromList(m.inProgress, tx, true)
+}
+
+func (m *taskStatusManager) checkInProgress(tx int) bool {
+ x := sort.SearchInts(m.inProgress, tx)
+ if x < len(m.inProgress) && m.inProgress[x] == tx {
+ return true
+ }
+
+ return false
+}
+
+func (m *taskStatusManager) checkPending(tx int) bool {
+ x := sort.SearchInts(m.pending, tx)
+ if x < len(m.pending) && m.pending[x] == tx {
+ return true
+ }
+
+ return false
+}
+
+func (m *taskStatusManager) checkComplete(tx int) bool {
+ x := sort.SearchInts(m.complete, tx)
+ if x < len(m.complete) && m.complete[x] == tx {
+ return true
+ }
+
+ return false
+}
+
+// getRevalidationRange: this range will be all tasks from tx (inclusive) that are not currently in progress up to the
+//
+// 'all complete' limit
+func (m *taskStatusManager) getRevalidationRange(txFrom int) (ret []int) {
+ max := m.maxAllComplete() // haven't learned to trust compilers :)
+ for x := txFrom; x <= max; x++ {
+ if !m.checkInProgress(x) {
+ ret = append(ret, x)
+ }
+ }
+
+ return
+}
+
+func (m *taskStatusManager) pushPendingSet(set []int) {
+ for _, v := range set {
+ if m.checkComplete(v) {
+ m.clearComplete(v)
+ }
+
+ m.pushPending(v)
+ }
+}
+
+func (m *taskStatusManager) clearComplete(tx int) {
+ m.complete = removeFromList(m.complete, tx, false)
+}
+
+func (m *taskStatusManager) clearPending(tx int) {
+ m.pending = removeFromList(m.pending, tx, false)
+}
diff --git a/core/blockstm/status_test.go b/core/blockstm/status_test.go
new file mode 100644
index 0000000000..aff00d9a2f
--- /dev/null
+++ b/core/blockstm/status_test.go
@@ -0,0 +1,82 @@
+package blockstm
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestStatusBasics(t *testing.T) {
+ t.Parallel()
+
+ s := makeStatusManager(10)
+
+ x := s.takeNextPending()
+ require.Equal(t, 0, x)
+ require.True(t, s.checkInProgress(x))
+
+ x = s.takeNextPending()
+ require.Equal(t, 1, x)
+ require.True(t, s.checkInProgress(x))
+
+ x = s.takeNextPending()
+ require.Equal(t, 2, x)
+ require.True(t, s.checkInProgress(x))
+
+ s.markComplete(0)
+ require.False(t, s.checkInProgress(0))
+ s.markComplete(1)
+ s.markComplete(2)
+ require.False(t, s.checkInProgress(1))
+ require.False(t, s.checkInProgress(2))
+ require.Equal(t, 2, s.maxAllComplete())
+
+ x = s.takeNextPending()
+ require.Equal(t, 3, x)
+
+ x = s.takeNextPending()
+ require.Equal(t, 4, x)
+
+ s.markComplete(x)
+ require.False(t, s.checkInProgress(4))
+ require.Equal(t, 2, s.maxAllComplete(), "zero should still be min complete")
+
+ exp := []int{1, 2}
+ require.Equal(t, exp, s.getRevalidationRange(1))
+}
+
+func TestMaxComplete(t *testing.T) {
+ t.Parallel()
+
+ s := makeStatusManager(10)
+
+ for {
+ tx := s.takeNextPending()
+
+ if tx == -1 {
+ break
+ }
+
+ if tx != 7 {
+ s.markComplete(tx)
+ }
+ }
+
+ require.Equal(t, 6, s.maxAllComplete())
+
+ s2 := makeStatusManager(10)
+
+ for {
+ tx := s2.takeNextPending()
+
+ if tx == -1 {
+ break
+ }
+ }
+ s2.markComplete(2)
+ s2.markComplete(4)
+ require.Equal(t, -1, s2.maxAllComplete())
+
+ s2.complete = insertInList(s2.complete, 4)
+ require.Equal(t, 2, s2.countComplete())
+}
diff --git a/core/blockstm/txio.go b/core/blockstm/txio.go
new file mode 100644
index 0000000000..19955fb152
--- /dev/null
+++ b/core/blockstm/txio.go
@@ -0,0 +1,106 @@
+package blockstm
+
+const (
+ ReadKindMap = 0
+ ReadKindStorage = 1
+)
+
+type ReadDescriptor struct {
+ Path Key
+ Kind int
+ V Version
+}
+
+type WriteDescriptor struct {
+ Path Key
+ V Version
+ Val interface{}
+}
+
+type TxnInput []ReadDescriptor
+type TxnOutput []WriteDescriptor
+
+// hasNewWrite: returns true if the current set has a new write compared to the input
+func (txo TxnOutput) hasNewWrite(cmpSet []WriteDescriptor) bool {
+ if len(txo) == 0 {
+ return false
+ } else if len(cmpSet) == 0 || len(txo) > len(cmpSet) {
+ return true
+ }
+
+ cmpMap := map[Key]bool{cmpSet[0].Path: true}
+
+ for i := 1; i < len(cmpSet); i++ {
+ cmpMap[cmpSet[i].Path] = true
+ }
+
+ for _, v := range txo {
+ if !cmpMap[v.Path] {
+ return true
+ }
+ }
+
+ return false
+}
+
+type TxnInputOutput struct {
+ inputs []TxnInput
+ outputs []TxnOutput // write sets that should be checked during validation
+ outputsSet []map[Key]struct{}
+ allOutputs []TxnOutput // entire write sets in MVHashMap. allOutputs should always be a parent set of outputs
+}
+
+func (io *TxnInputOutput) ReadSet(txnIdx int) []ReadDescriptor {
+ return io.inputs[txnIdx]
+}
+
+func (io *TxnInputOutput) WriteSet(txnIdx int) []WriteDescriptor {
+ return io.outputs[txnIdx]
+}
+
+func (io *TxnInputOutput) AllWriteSet(txnIdx int) []WriteDescriptor {
+ return io.allOutputs[txnIdx]
+}
+
+func (io *TxnInputOutput) HasWritten(txnIdx int, k Key) bool {
+ _, ok := io.outputsSet[txnIdx][k]
+ return ok
+}
+
+func MakeTxnInputOutput(numTx int) *TxnInputOutput {
+ return &TxnInputOutput{
+ inputs: make([]TxnInput, numTx),
+ outputs: make([]TxnOutput, numTx),
+ outputsSet: make([]map[Key]struct{}, numTx),
+ allOutputs: make([]TxnOutput, numTx),
+ }
+}
+
+func (io *TxnInputOutput) recordRead(txId int, input []ReadDescriptor) {
+ io.inputs[txId] = input
+}
+
+func (io *TxnInputOutput) recordWrite(txId int, output []WriteDescriptor) {
+ io.outputs[txId] = output
+ io.outputsSet[txId] = make(map[Key]struct{}, len(output))
+
+ for _, v := range output {
+ io.outputsSet[txId][v.Path] = struct{}{}
+ }
+}
+
+func (io *TxnInputOutput) recordAllWrite(txId int, output []WriteDescriptor) {
+ io.allOutputs[txId] = output
+}
+
+func (io *TxnInputOutput) RecordReadAtOnce(inputs [][]ReadDescriptor) {
+ for ind, val := range inputs {
+ io.inputs[ind] = val
+ }
+}
+
+func (io *TxnInputOutput) RecordAllWriteAtOnce(outputs [][]WriteDescriptor) {
+ for ind, val := range outputs {
+ io.allOutputs[ind] = val
+ }
+}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index e9944e4744..e4bcffd5c1 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -105,7 +105,7 @@ func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) {
b.SetCoinbase(common.Address{})
}
b.statedb.Prepare(tx.Hash(), len(b.txs))
- receipt, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vm.Config{})
+ receipt, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vm.Config{}, nil)
if err != nil {
panic(err)
}
@@ -335,6 +335,19 @@ func makeBlockChain(parent *types.Block, n int, engine consensus.Engine, db ethd
return blocks
}
+// makeBlockChain creates a deterministic chain of blocks rooted at parent with fake invalid transactions.
+func makeFakeNonEmptyBlockChain(parent *types.Block, n int, engine consensus.Engine, db ethdb.Database, seed int, numTx int) []*types.Block {
+ blocks, _ := GenerateChain(params.TestChainConfig, parent, engine, db, n, func(i int, b *BlockGen) {
+ addr := common.Address{0: byte(seed), 19: byte(i)}
+ b.SetCoinbase(addr)
+ for j := 0; j < numTx; j++ {
+ b.txs = append(b.txs, types.NewTransaction(0, addr, big.NewInt(1000), params.TxGas, nil, nil))
+ }
+ })
+
+ return blocks
+}
+
type fakeChainReader struct {
config *params.ChainConfig
stateSyncData []*types.StateSyncData
diff --git a/core/evm.go b/core/evm.go
index bb3afc0006..0adc0ac27e 100644
--- a/core/evm.go
+++ b/core/evm.go
@@ -18,6 +18,7 @@ package core
import (
"math/big"
+ "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
@@ -83,7 +84,12 @@ func GetHashFn(ref *types.Header, chain ChainContext) func(n uint64) common.Hash
// Then fill up with [refHash.p, refHash.pp, refHash.ppp, ...]
var cache []common.Hash
+ cacheMutex := &sync.Mutex{}
+
return func(n uint64) common.Hash {
+ cacheMutex.Lock()
+ defer cacheMutex.Unlock()
+
// If there's no hash cache yet, make one
if len(cache) == 0 {
cache = append(cache, ref.ParentHash)
diff --git a/core/parallel_state_processor.go b/core/parallel_state_processor.go
new file mode 100644
index 0000000000..8956f6f4d2
--- /dev/null
+++ b/core/parallel_state_processor.go
@@ -0,0 +1,429 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package core
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/misc"
+ "github.com/ethereum/go-ethereum/core/blockstm"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+type ParallelEVMConfig struct {
+ Enable bool
+ SpeculativeProcesses int
+}
+
+// StateProcessor is a basic Processor, which takes care of transitioning
+// state from one point to another.
+//
+// StateProcessor implements Processor.
+type ParallelStateProcessor struct {
+ config *params.ChainConfig // Chain configuration options
+ bc *BlockChain // Canonical block chain
+ engine consensus.Engine // Consensus engine used for block rewards
+}
+
+// NewStateProcessor initialises a new StateProcessor.
+func NewParallelStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *ParallelStateProcessor {
+ return &ParallelStateProcessor{
+ config: config,
+ bc: bc,
+ engine: engine,
+ }
+}
+
+type ExecutionTask struct {
+ msg types.Message
+ config *params.ChainConfig
+
+ gasLimit uint64
+ blockNumber *big.Int
+ blockHash common.Hash
+ tx *types.Transaction
+ index int
+ statedb *state.StateDB // State database that stores the modified values after tx execution.
+ cleanStateDB *state.StateDB // A clean copy of the initial statedb. It should not be modified.
+ finalStateDB *state.StateDB // The final statedb.
+ header *types.Header
+ blockChain *BlockChain
+ evmConfig vm.Config
+ result *ExecutionResult
+ shouldDelayFeeCal *bool
+ shouldRerunWithoutFeeDelay bool
+ sender common.Address
+ totalUsedGas *uint64
+ receipts *types.Receipts
+ allLogs *[]*types.Log
+
+ // length of dependencies -> 2 + k (k = a whole number)
+ // first 2 element in dependencies -> transaction index, and flag representing if delay is allowed or not
+ // (0 -> delay is not allowed, 1 -> delay is allowed)
+ // next k elements in dependencies -> transaction indexes on which transaction i is dependent on
+ dependencies []int
+ coinbase common.Address
+ blockContext vm.BlockContext
+}
+
+func (task *ExecutionTask) Execute(mvh *blockstm.MVHashMap, incarnation int) (err error) {
+ task.statedb = task.cleanStateDB.Copy()
+ task.statedb.Prepare(task.tx.Hash(), task.index)
+ task.statedb.SetMVHashmap(mvh)
+ task.statedb.SetIncarnation(incarnation)
+
+ evm := vm.NewEVM(task.blockContext, vm.TxContext{}, task.statedb, task.config, task.evmConfig)
+
+ // Create a new context to be used in the EVM environment.
+ txContext := NewEVMTxContext(task.msg)
+ evm.Reset(txContext, task.statedb)
+
+ defer func() {
+ if r := recover(); r != nil {
+ // In some pre-matured executions, EVM will panic. Recover from panic and retry the execution.
+ log.Debug("Recovered from EVM failure.", "Error:", r)
+
+ err = blockstm.ErrExecAbortError{Dependency: task.statedb.DepTxIndex()}
+
+ return
+ }
+ }()
+
+ // Apply the transaction to the current state (included in the env).
+ if *task.shouldDelayFeeCal {
+ task.result, err = ApplyMessageNoFeeBurnOrTip(evm, task.msg, new(GasPool).AddGas(task.gasLimit), nil)
+
+ if task.result == nil || err != nil {
+ return blockstm.ErrExecAbortError{Dependency: task.statedb.DepTxIndex(), OriginError: err}
+ }
+
+ reads := task.statedb.MVReadMap()
+
+ if _, ok := reads[blockstm.NewSubpathKey(task.blockContext.Coinbase, state.BalancePath)]; ok {
+ log.Info("Coinbase is in MVReadMap", "address", task.blockContext.Coinbase)
+
+ task.shouldRerunWithoutFeeDelay = true
+ }
+
+ if _, ok := reads[blockstm.NewSubpathKey(task.result.BurntContractAddress, state.BalancePath)]; ok {
+ log.Info("BurntContractAddress is in MVReadMap", "address", task.result.BurntContractAddress)
+
+ task.shouldRerunWithoutFeeDelay = true
+ }
+ } else {
+ task.result, err = ApplyMessage(evm, task.msg, new(GasPool).AddGas(task.gasLimit), nil)
+ }
+
+ if task.statedb.HadInvalidRead() || err != nil {
+ err = blockstm.ErrExecAbortError{Dependency: task.statedb.DepTxIndex(), OriginError: err}
+ return
+ }
+
+ task.statedb.Finalise(task.config.IsEIP158(task.blockNumber))
+
+ return
+}
+
+func (task *ExecutionTask) MVReadList() []blockstm.ReadDescriptor {
+ return task.statedb.MVReadList()
+}
+
+func (task *ExecutionTask) MVWriteList() []blockstm.WriteDescriptor {
+ return task.statedb.MVWriteList()
+}
+
+func (task *ExecutionTask) MVFullWriteList() []blockstm.WriteDescriptor {
+ return task.statedb.MVFullWriteList()
+}
+
+func (task *ExecutionTask) Sender() common.Address {
+ return task.sender
+}
+
+func (task *ExecutionTask) Hash() common.Hash {
+ return task.tx.Hash()
+}
+
+func (task *ExecutionTask) Dependencies() []int {
+ return task.dependencies
+}
+
+func (task *ExecutionTask) Settle() {
+ task.finalStateDB.Prepare(task.tx.Hash(), task.index)
+
+ coinbaseBalance := task.finalStateDB.GetBalance(task.coinbase)
+
+ task.finalStateDB.ApplyMVWriteSet(task.statedb.MVFullWriteList())
+
+ for _, l := range task.statedb.GetLogs(task.tx.Hash(), task.blockHash) {
+ task.finalStateDB.AddLog(l)
+ }
+
+ if *task.shouldDelayFeeCal {
+ if task.config.IsLondon(task.blockNumber) {
+ task.finalStateDB.AddBalance(task.result.BurntContractAddress, task.result.FeeBurnt)
+ }
+
+ task.finalStateDB.AddBalance(task.coinbase, task.result.FeeTipped)
+ output1 := new(big.Int).SetBytes(task.result.SenderInitBalance.Bytes())
+ output2 := new(big.Int).SetBytes(coinbaseBalance.Bytes())
+
+ // Deprecating transfer log and will be removed in future fork. PLEASE DO NOT USE this transfer log going forward. Parameters won't get updated as expected going forward with EIP1559
+ // add transfer log
+ AddFeeTransferLog(
+ task.finalStateDB,
+
+ task.msg.From(),
+ task.coinbase,
+
+ task.result.FeeTipped,
+ task.result.SenderInitBalance,
+ coinbaseBalance,
+ output1.Sub(output1, task.result.FeeTipped),
+ output2.Add(output2, task.result.FeeTipped),
+ )
+ }
+
+ for k, v := range task.statedb.Preimages() {
+ task.finalStateDB.AddPreimage(k, v)
+ }
+
+ // Update the state with pending changes.
+ var root []byte
+
+ if task.config.IsByzantium(task.blockNumber) {
+ task.finalStateDB.Finalise(true)
+ } else {
+ root = task.finalStateDB.IntermediateRoot(task.config.IsEIP158(task.blockNumber)).Bytes()
+ }
+
+ *task.totalUsedGas += task.result.UsedGas
+
+ // Create a new receipt for the transaction, storing the intermediate root and gas used
+ // by the tx.
+ receipt := &types.Receipt{Type: task.tx.Type(), PostState: root, CumulativeGasUsed: *task.totalUsedGas}
+ if task.result.Failed() {
+ receipt.Status = types.ReceiptStatusFailed
+ } else {
+ receipt.Status = types.ReceiptStatusSuccessful
+ }
+
+ receipt.TxHash = task.tx.Hash()
+ receipt.GasUsed = task.result.UsedGas
+
+ // If the transaction created a contract, store the creation address in the receipt.
+ if task.msg.To() == nil {
+ receipt.ContractAddress = crypto.CreateAddress(task.msg.From(), task.tx.Nonce())
+ }
+
+ // Set the receipt logs and create the bloom filter.
+ receipt.Logs = task.finalStateDB.GetLogs(task.tx.Hash(), task.blockHash)
+ receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
+ receipt.BlockHash = task.blockHash
+ receipt.BlockNumber = task.blockNumber
+ receipt.TransactionIndex = uint(task.finalStateDB.TxIndex())
+
+ *task.receipts = append(*task.receipts, receipt)
+ *task.allLogs = append(*task.allLogs, receipt.Logs...)
+}
+
+var parallelizabilityTimer = metrics.NewRegisteredTimer("block/parallelizability", nil)
+
+// Process processes the state changes according to the Ethereum rules by running
+// the transaction messages using the statedb and applying any rewards to both
+// the processor (coinbase) and any included uncles.
+//
+// Process returns the receipts and logs accumulated during the process and
+// returns the amount of gas that was used in the process. If any of the
+// transactions failed to execute due to insufficient gas it will return an error.
+// nolint:gocognit
+func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config, interruptCtx context.Context) (types.Receipts, []*types.Log, uint64, error) {
+ blockstm.SetProcs(cfg.ParallelSpeculativeProcesses)
+
+ var (
+ receipts types.Receipts
+ header = block.Header()
+ blockHash = block.Hash()
+ blockNumber = block.Number()
+ allLogs []*types.Log
+ usedGas = new(uint64)
+ metadata bool
+ )
+
+ // Mutate the block and state according to any hard-fork specs
+ if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
+ misc.ApplyDAOHardFork(statedb)
+ }
+
+ tasks := make([]blockstm.ExecTask, 0, len(block.Transactions()))
+
+ shouldDelayFeeCal := true
+
+ coinbase, _ := p.bc.Engine().Author(header)
+
+ deps := GetDeps(block.Header().TxDependency)
+
+ if block.Header().TxDependency != nil {
+ metadata = true
+ }
+
+ blockContext := NewEVMBlockContext(header, p.bc, nil)
+
+ // Iterate over and process the individual transactions
+ for i, tx := range block.Transactions() {
+ msg, err := tx.AsMessage(types.MakeSigner(p.config, header.Number), header.BaseFee)
+ if err != nil {
+ log.Error("error creating message", "err", err)
+ return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
+ }
+
+ cleansdb := statedb.Copy()
+
+ if msg.From() == coinbase {
+ shouldDelayFeeCal = false
+ }
+
+ if len(header.TxDependency) != len(block.Transactions()) {
+ task := &ExecutionTask{
+ msg: msg,
+ config: p.config,
+ gasLimit: block.GasLimit(),
+ blockNumber: blockNumber,
+ blockHash: blockHash,
+ tx: tx,
+ index: i,
+ cleanStateDB: cleansdb,
+ finalStateDB: statedb,
+ blockChain: p.bc,
+ header: header,
+ evmConfig: cfg,
+ shouldDelayFeeCal: &shouldDelayFeeCal,
+ sender: msg.From(),
+ totalUsedGas: usedGas,
+ receipts: &receipts,
+ allLogs: &allLogs,
+ dependencies: deps[i],
+ coinbase: coinbase,
+ blockContext: blockContext,
+ }
+
+ tasks = append(tasks, task)
+ } else {
+ task := &ExecutionTask{
+ msg: msg,
+ config: p.config,
+ gasLimit: block.GasLimit(),
+ blockNumber: blockNumber,
+ blockHash: blockHash,
+ tx: tx,
+ index: i,
+ cleanStateDB: cleansdb,
+ finalStateDB: statedb,
+ blockChain: p.bc,
+ header: header,
+ evmConfig: cfg,
+ shouldDelayFeeCal: &shouldDelayFeeCal,
+ sender: msg.From(),
+ totalUsedGas: usedGas,
+ receipts: &receipts,
+ allLogs: &allLogs,
+ dependencies: nil,
+ coinbase: coinbase,
+ blockContext: blockContext,
+ }
+
+ tasks = append(tasks, task)
+ }
+ }
+
+ backupStateDB := statedb.Copy()
+
+ profile := false
+ result, err := blockstm.ExecuteParallel(tasks, profile, metadata, interruptCtx)
+
+ if err == nil && profile && result.Deps != nil {
+ _, weight := result.Deps.LongestPath(*result.Stats)
+
+ serialWeight := uint64(0)
+
+ for i := 0; i < len(result.Deps.GetVertices()); i++ {
+ serialWeight += (*result.Stats)[i].End - (*result.Stats)[i].Start
+ }
+
+ parallelizabilityTimer.Update(time.Duration(serialWeight * 100 / weight))
+ }
+
+ for _, task := range tasks {
+ task := task.(*ExecutionTask)
+ if task.shouldRerunWithoutFeeDelay {
+ shouldDelayFeeCal = false
+
+ statedb.StopPrefetcher()
+ *statedb = *backupStateDB
+
+ allLogs = []*types.Log{}
+ receipts = types.Receipts{}
+ usedGas = new(uint64)
+
+ for _, t := range tasks {
+ t := t.(*ExecutionTask)
+ t.finalStateDB = backupStateDB
+ t.allLogs = &allLogs
+ t.receipts = &receipts
+ t.totalUsedGas = usedGas
+ }
+
+ _, err = blockstm.ExecuteParallel(tasks, false, metadata, interruptCtx)
+
+ break
+ }
+ }
+
+ if err != nil {
+ return nil, nil, 0, err
+ }
+
+ // Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
+ p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles())
+
+ return receipts, allLogs, *usedGas, nil
+}
+
+func GetDeps(txDependency [][]uint64) map[int][]int {
+ deps := make(map[int][]int)
+
+ for i := 0; i <= len(txDependency)-1; i++ {
+ deps[i] = []int{}
+
+ for j := 0; j <= len(txDependency[i])-1; j++ {
+ deps[i] = append(deps[i], int(txDependency[i][j]))
+ }
+ }
+
+ return deps
+}
diff --git a/core/state/journal.go b/core/state/journal.go
index 57a692dc7f..8d55e75b90 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -20,6 +20,7 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/blockstm"
)
// journalEntry is a modification entry in the state change journal that can be
@@ -143,6 +144,7 @@ type (
func (ch createObjectChange) revert(s *StateDB) {
delete(s.stateObjects, *ch.account)
delete(s.stateObjectsDirty, *ch.account)
+ RevertWrite(s, blockstm.NewAddressKey(*ch.account))
}
func (ch createObjectChange) dirtied() *common.Address {
@@ -151,6 +153,7 @@ func (ch createObjectChange) dirtied() *common.Address {
func (ch resetObjectChange) revert(s *StateDB) {
s.setStateObject(ch.prev)
+ RevertWrite(s, blockstm.NewAddressKey(ch.prev.address))
if !ch.prevdestruct && s.snap != nil {
delete(s.snapDestructs, ch.prev.addrHash)
}
@@ -165,6 +168,7 @@ func (ch suicideChange) revert(s *StateDB) {
if obj != nil {
obj.suicided = ch.prev
obj.setBalance(ch.prevbalance)
+ RevertWrite(s, blockstm.NewSubpathKey(*ch.account, SuicidePath))
}
}
@@ -199,6 +203,7 @@ func (ch nonceChange) dirtied() *common.Address {
func (ch codeChange) revert(s *StateDB) {
s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode)
+ RevertWrite(s, blockstm.NewSubpathKey(*ch.account, CodePath))
}
func (ch codeChange) dirtied() *common.Address {
@@ -207,6 +212,7 @@ func (ch codeChange) dirtied() *common.Address {
func (ch storageChange) revert(s *StateDB) {
s.getStateObject(*ch.account).setState(ch.key, ch.prevalue)
+ RevertWrite(s, blockstm.NewStateKey(*ch.account, ch.key))
}
func (ch storageChange) dirtied() *common.Address {
diff --git a/core/state/statedb.go b/core/state/statedb.go
index c236a79b5a..881ea8c110 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/blockstm"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
@@ -79,6 +80,14 @@ type StateDB struct {
stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
+ // Block-stm related fields
+ mvHashmap *blockstm.MVHashMap
+ incarnation int
+ readMap map[blockstm.Key]blockstm.ReadDescriptor
+ writeMap map[blockstm.Key]blockstm.WriteDescriptor
+ revertedKeys map[blockstm.Key]struct{}
+ dep int
+
// DB error.
// State objects are used by the consensus core and VM which are
// unable to deal with database-level errors. Any error that occurs
@@ -138,6 +147,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
stateObjects: make(map[common.Address]*stateObject),
stateObjectsPending: make(map[common.Address]struct{}),
stateObjectsDirty: make(map[common.Address]struct{}),
+ revertedKeys: make(map[blockstm.Key]struct{}),
logs: make(map[common.Hash][]*types.Log),
preimages: make(map[common.Hash][]byte),
journal: newJournal(),
@@ -154,6 +164,281 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
return sdb, nil
}
+func NewWithMVHashmap(root common.Hash, db Database, snaps *snapshot.Tree, mvhm *blockstm.MVHashMap) (*StateDB, error) {
+ if sdb, err := New(root, db, snaps); err != nil {
+ return nil, err
+ } else {
+ sdb.mvHashmap = mvhm
+ sdb.dep = -1
+ return sdb, nil
+ }
+}
+
+func (sdb *StateDB) SetMVHashmap(mvhm *blockstm.MVHashMap) {
+ sdb.mvHashmap = mvhm
+ sdb.dep = -1
+}
+
+func (sdb *StateDB) GetMVHashmap() *blockstm.MVHashMap {
+ return sdb.mvHashmap
+}
+
+func (s *StateDB) MVWriteList() []blockstm.WriteDescriptor {
+ writes := make([]blockstm.WriteDescriptor, 0, len(s.writeMap))
+
+ for _, v := range s.writeMap {
+ if _, ok := s.revertedKeys[v.Path]; !ok {
+ writes = append(writes, v)
+ }
+ }
+
+ return writes
+}
+
+func (s *StateDB) MVFullWriteList() []blockstm.WriteDescriptor {
+ writes := make([]blockstm.WriteDescriptor, 0, len(s.writeMap))
+
+ for _, v := range s.writeMap {
+ writes = append(writes, v)
+ }
+
+ return writes
+}
+
+func (s *StateDB) MVReadMap() map[blockstm.Key]blockstm.ReadDescriptor {
+ return s.readMap
+}
+
+func (s *StateDB) MVReadList() []blockstm.ReadDescriptor {
+ reads := make([]blockstm.ReadDescriptor, 0, len(s.readMap))
+
+ for _, v := range s.MVReadMap() {
+ reads = append(reads, v)
+ }
+
+ return reads
+}
+
+func (s *StateDB) ensureReadMap() {
+ if s.readMap == nil {
+ s.readMap = make(map[blockstm.Key]blockstm.ReadDescriptor)
+ }
+}
+
+func (s *StateDB) ensureWriteMap() {
+ if s.writeMap == nil {
+ s.writeMap = make(map[blockstm.Key]blockstm.WriteDescriptor)
+ }
+}
+
+func (s *StateDB) ClearReadMap() {
+ s.readMap = make(map[blockstm.Key]blockstm.ReadDescriptor)
+}
+
+func (s *StateDB) ClearWriteMap() {
+ s.writeMap = make(map[blockstm.Key]blockstm.WriteDescriptor)
+}
+
+func (s *StateDB) HadInvalidRead() bool {
+ return s.dep >= 0
+}
+
+func (s *StateDB) DepTxIndex() int {
+ return s.dep
+}
+
+func (s *StateDB) SetIncarnation(inc int) {
+ s.incarnation = inc
+}
+
+type StorageVal[T any] struct {
+ Value *T
+}
+
+func MVRead[T any](s *StateDB, k blockstm.Key, defaultV T, readStorage func(s *StateDB) T) (v T) {
+ if s.mvHashmap == nil {
+ return readStorage(s)
+ }
+
+ s.ensureReadMap()
+
+ if s.writeMap != nil {
+ if _, ok := s.writeMap[k]; ok {
+ return readStorage(s)
+ }
+ }
+
+ if !k.IsAddress() {
+ // If we are reading subpath from a deleted account, return default value instead of reading from MVHashmap
+ addr := k.GetAddress()
+ if s.getStateObject(addr) == nil {
+ return defaultV
+ }
+ }
+
+ res := s.mvHashmap.Read(k, s.txIndex)
+
+ var rd blockstm.ReadDescriptor
+
+ rd.V = blockstm.Version{
+ TxnIndex: res.DepIdx(),
+ Incarnation: res.Incarnation(),
+ }
+
+ rd.Path = k
+
+ switch res.Status() {
+ case blockstm.MVReadResultDone:
+ {
+ v = readStorage(res.Value().(*StateDB))
+ rd.Kind = blockstm.ReadKindMap
+ }
+ case blockstm.MVReadResultDependency:
+ {
+ s.dep = res.DepIdx()
+
+ panic("Found dependency")
+ }
+ case blockstm.MVReadResultNone:
+ {
+ v = readStorage(s)
+ rd.Kind = blockstm.ReadKindStorage
+ }
+ default:
+ return defaultV
+ }
+
+ // TODO: I assume we don't want to overwrite an existing read because this could - for example - change a storage
+ // read to map if the same value is read multiple times.
+ if _, ok := s.readMap[k]; !ok {
+ s.readMap[k] = rd
+ }
+
+ return
+}
+
+func MVWrite(s *StateDB, k blockstm.Key) {
+ if s.mvHashmap != nil {
+ s.ensureWriteMap()
+ s.writeMap[k] = blockstm.WriteDescriptor{
+ Path: k,
+ V: s.Version(),
+ Val: s,
+ }
+ }
+}
+
+func RevertWrite(s *StateDB, k blockstm.Key) {
+ s.revertedKeys[k] = struct{}{}
+}
+
+func MVWritten(s *StateDB, k blockstm.Key) bool {
+ if s.mvHashmap == nil || s.writeMap == nil {
+ return false
+ }
+
+ _, ok := s.writeMap[k]
+
+ return ok
+}
+
+// Apply entries in the write set to MVHashMap. Note that this function does not clear the write set.
+func (s *StateDB) FlushMVWriteSet() {
+ if s.mvHashmap != nil && s.writeMap != nil {
+ s.mvHashmap.FlushMVWriteSet(s.MVFullWriteList())
+ }
+}
+
+// Apply entries in a given write set to StateDB. Note that this function does not change MVHashMap nor write set
+// of the current StateDB.
+func (sw *StateDB) ApplyMVWriteSet(writes []blockstm.WriteDescriptor) {
+ for i := range writes {
+ path := writes[i].Path
+ sr := writes[i].Val.(*StateDB)
+
+ if path.IsState() {
+ addr := path.GetAddress()
+ stateKey := path.GetStateKey()
+ state := sr.GetState(addr, stateKey)
+ sw.SetState(addr, stateKey, state)
+ } else if path.IsAddress() {
+ continue
+ } else {
+ addr := path.GetAddress()
+ switch path.GetSubpath() {
+ case BalancePath:
+ sw.SetBalance(addr, sr.GetBalance(addr))
+ case NoncePath:
+ sw.SetNonce(addr, sr.GetNonce(addr))
+ case CodePath:
+ sw.SetCode(addr, sr.GetCode(addr))
+ case SuicidePath:
+ stateObject := sr.getDeletedStateObject(addr)
+ if stateObject != nil && stateObject.deleted {
+ sw.Suicide(addr)
+ }
+ default:
+ panic(fmt.Errorf("unknown key type: %d", path.GetSubpath()))
+ }
+ }
+ }
+}
+
+type DumpStruct struct {
+ TxIdx int
+ TxInc int
+ VerIdx int
+ VerInc int
+ Path []byte
+ Op string
+}
+
+// get readMap Dump of format: "TxIdx, Inc, Path, Read"
+func (s *StateDB) GetReadMapDump() []DumpStruct {
+ readList := s.MVReadList()
+ res := make([]DumpStruct, 0, len(readList))
+
+ for _, val := range readList {
+ temp := &DumpStruct{
+ TxIdx: s.txIndex,
+ TxInc: s.incarnation,
+ VerIdx: val.V.TxnIndex,
+ VerInc: val.V.Incarnation,
+ Path: val.Path[:],
+ Op: "Read\n",
+ }
+ res = append(res, *temp)
+ }
+
+ return res
+}
+
+// get writeMap Dump of format: "TxIdx, Inc, Path, Write"
+func (s *StateDB) GetWriteMapDump() []DumpStruct {
+ writeList := s.MVReadList()
+ res := make([]DumpStruct, 0, len(writeList))
+
+ for _, val := range writeList {
+ temp := &DumpStruct{
+ TxIdx: s.txIndex,
+ TxInc: s.incarnation,
+ VerIdx: val.V.TxnIndex,
+ VerInc: val.V.Incarnation,
+ Path: val.Path[:],
+ Op: "Write\n",
+ }
+ res = append(res, *temp)
+ }
+
+ return res
+}
+
+// add empty MVHashMap to StateDB
+func (s *StateDB) AddEmptyMVHashMap() {
+ mvh := blockstm.MakeMVHashMap()
+ s.mvHashmap = mvh
+}
+
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
// state trie concurrently while the state is mutated so that when we reach the
// commit phase, most of the needed data is already hot.
@@ -257,22 +542,40 @@ func (s *StateDB) Empty(addr common.Address) bool {
return so == nil || so.empty()
}
+// Create a unique path for special fields (e.g. balance, code) in a state object.
+// func subPath(prefix []byte, s uint8) [blockstm.KeyLength]byte {
+// path := append(prefix, common.Hash{}.Bytes()...) // append a full empty hash to avoid collision with storage state
+// path = append(path, s) // append the special field identifier
+
+// return path
+// }
+
+const BalancePath = 1
+const NoncePath = 2
+const CodePath = 3
+const SuicidePath = 4
+
// GetBalance retrieves the balance from the given address or 0 if object not found
func (s *StateDB) GetBalance(addr common.Address) *big.Int {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.Balance()
- }
- return common.Big0
+ return MVRead(s, blockstm.NewSubpathKey(addr, BalancePath), common.Big0, func(s *StateDB) *big.Int {
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ return stateObject.Balance()
+ }
+
+ return common.Big0
+ })
}
func (s *StateDB) GetNonce(addr common.Address) uint64 {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.Nonce()
- }
+ return MVRead(s, blockstm.NewSubpathKey(addr, NoncePath), 0, func(s *StateDB) uint64 {
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ return stateObject.Nonce()
+ }
- return 0
+ return 0
+ })
}
// TxIndex returns the current transaction index set by Prepare.
@@ -280,37 +583,52 @@ func (s *StateDB) TxIndex() int {
return s.txIndex
}
-func (s *StateDB) GetCode(addr common.Address) []byte {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.Code(s.db)
+func (s *StateDB) Version() blockstm.Version {
+ return blockstm.Version{
+ TxnIndex: s.txIndex,
+ Incarnation: s.incarnation,
}
- return nil
+}
+
+func (s *StateDB) GetCode(addr common.Address) []byte {
+ return MVRead(s, blockstm.NewSubpathKey(addr, CodePath), nil, func(s *StateDB) []byte {
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ return stateObject.Code(s.db)
+ }
+ return nil
+ })
}
func (s *StateDB) GetCodeSize(addr common.Address) int {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.CodeSize(s.db)
- }
- return 0
+ return MVRead(s, blockstm.NewSubpathKey(addr, CodePath), 0, func(s *StateDB) int {
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ return stateObject.CodeSize(s.db)
+ }
+ return 0
+ })
}
func (s *StateDB) GetCodeHash(addr common.Address) common.Hash {
- stateObject := s.getStateObject(addr)
- if stateObject == nil {
- return common.Hash{}
- }
- return common.BytesToHash(stateObject.CodeHash())
+ return MVRead(s, blockstm.NewSubpathKey(addr, CodePath), common.Hash{}, func(s *StateDB) common.Hash {
+ stateObject := s.getStateObject(addr)
+ if stateObject == nil {
+ return common.Hash{}
+ }
+ return common.BytesToHash(stateObject.CodeHash())
+ })
}
// GetState retrieves a value from the given account's storage trie.
func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.GetState(s.db, hash)
- }
- return common.Hash{}
+ return MVRead(s, blockstm.NewStateKey(addr, hash), common.Hash{}, func(s *StateDB) common.Hash {
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ return stateObject.GetState(s.db, hash)
+ }
+ return common.Hash{}
+ })
}
// GetProof returns the Merkle proof for a given account.
@@ -338,11 +656,13 @@ func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte,
// GetCommittedState retrieves a value from the given account's committed storage trie.
func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.GetCommittedState(s.db, hash)
- }
- return common.Hash{}
+ return MVRead(s, blockstm.NewStateKey(addr, hash), common.Hash{}, func(s *StateDB) common.Hash {
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ return stateObject.GetCommittedState(s.db, hash)
+ }
+ return common.Hash{}
+ })
}
// Database retrieves the low level database supporting the lower level trie ops.
@@ -363,11 +683,13 @@ func (s *StateDB) StorageTrie(addr common.Address) Trie {
}
func (s *StateDB) HasSuicided(addr common.Address) bool {
- stateObject := s.getStateObject(addr)
- if stateObject != nil {
- return stateObject.suicided
- }
- return false
+ return MVRead(s, blockstm.NewSubpathKey(addr, SuicidePath), false, func(s *StateDB) bool {
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ return stateObject.suicided
+ }
+ return false
+ })
}
/*
@@ -377,44 +699,68 @@ func (s *StateDB) HasSuicided(addr common.Address) bool {
// AddBalance adds amount to the account associated with addr.
func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) {
stateObject := s.GetOrNewStateObject(addr)
+
+ if s.mvHashmap != nil {
+ // ensure a read balance operation is recorded in mvHashmap
+ s.GetBalance(addr)
+ }
+
if stateObject != nil {
+ stateObject = s.mvRecordWritten(stateObject)
stateObject.AddBalance(amount)
+ MVWrite(s, blockstm.NewSubpathKey(addr, BalancePath))
}
}
// SubBalance subtracts amount from the account associated with addr.
func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) {
stateObject := s.GetOrNewStateObject(addr)
+
+ if s.mvHashmap != nil {
+ // ensure a read balance operation is recorded in mvHashmap
+ s.GetBalance(addr)
+ }
+
if stateObject != nil {
+ stateObject = s.mvRecordWritten(stateObject)
stateObject.SubBalance(amount)
+ MVWrite(s, blockstm.NewSubpathKey(addr, BalancePath))
}
}
func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) {
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
+ stateObject = s.mvRecordWritten(stateObject)
stateObject.SetBalance(amount)
+ MVWrite(s, blockstm.NewSubpathKey(addr, BalancePath))
}
}
func (s *StateDB) SetNonce(addr common.Address, nonce uint64) {
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
+ stateObject = s.mvRecordWritten(stateObject)
stateObject.SetNonce(nonce)
+ MVWrite(s, blockstm.NewSubpathKey(addr, NoncePath))
}
}
func (s *StateDB) SetCode(addr common.Address, code []byte) {
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
+ stateObject = s.mvRecordWritten(stateObject)
stateObject.SetCode(crypto.Keccak256Hash(code), code)
+ MVWrite(s, blockstm.NewSubpathKey(addr, CodePath))
}
}
func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
+ stateObject = s.mvRecordWritten(stateObject)
stateObject.SetState(s.db, key, value)
+ MVWrite(s, blockstm.NewStateKey(addr, key))
}
}
@@ -437,6 +783,8 @@ func (s *StateDB) Suicide(addr common.Address) bool {
if stateObject == nil {
return false
}
+
+ stateObject = s.mvRecordWritten(stateObject)
s.journal.append(suicideChange{
account: &addr,
prev: stateObject.suicided,
@@ -445,6 +793,9 @@ func (s *StateDB) Suicide(addr common.Address) bool {
stateObject.markSuicided()
stateObject.data.Balance = new(big.Int)
+ MVWrite(s, blockstm.NewSubpathKey(addr, SuicidePath))
+ MVWrite(s, blockstm.NewSubpathKey(addr, BalancePath))
+
return true
}
@@ -501,60 +852,62 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
// flag set. This is needed by the state journal to revert to the correct s-
// destructed object instead of wiping all knowledge about the state object.
func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
- // Prefer live objects if any is available
- if obj := s.stateObjects[addr]; obj != nil {
- return obj
- }
- // If no live objects are available, attempt to use snapshots
- var data *types.StateAccount
- if s.snap != nil {
- start := time.Now()
- acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes()))
- if metrics.EnabledExpensive {
- s.SnapshotAccountReads += time.Since(start)
+ return MVRead(s, blockstm.NewAddressKey(addr), nil, func(s *StateDB) *stateObject {
+ // Prefer live objects if any is available
+ if obj := s.stateObjects[addr]; obj != nil {
+ return obj
}
- if err == nil {
- if acc == nil {
- return nil
+ // If no live objects are available, attempt to use snapshots
+ var data *types.StateAccount
+ if s.snap != nil { // nolint
+ start := time.Now()
+ acc, err := s.snap.Account(crypto.HashData(crypto.NewKeccakState(), addr.Bytes()))
+ if metrics.EnabledExpensive {
+ s.SnapshotAccountReads += time.Since(start)
}
- data = &types.StateAccount{
- Nonce: acc.Nonce,
- Balance: acc.Balance,
- CodeHash: acc.CodeHash,
- Root: common.BytesToHash(acc.Root),
+ if err == nil {
+ if acc == nil {
+ return nil
+ }
+ data = &types.StateAccount{
+ Nonce: acc.Nonce,
+ Balance: acc.Balance,
+ CodeHash: acc.CodeHash,
+ Root: common.BytesToHash(acc.Root),
+ }
+ if len(data.CodeHash) == 0 {
+ data.CodeHash = emptyCodeHash
+ }
+ if data.Root == (common.Hash{}) {
+ data.Root = emptyRoot
+ }
}
- if len(data.CodeHash) == 0 {
- data.CodeHash = emptyCodeHash
+ }
+ // If snapshot unavailable or reading from it failed, load from the database
+ if data == nil {
+ start := time.Now()
+ enc, err := s.trie.TryGet(addr.Bytes())
+ if metrics.EnabledExpensive {
+ s.AccountReads += time.Since(start)
}
- if data.Root == (common.Hash{}) {
- data.Root = emptyRoot
+ if err != nil {
+ s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err))
+ return nil
+ }
+ if len(enc) == 0 {
+ return nil
+ }
+ data = new(types.StateAccount)
+ if err := rlp.DecodeBytes(enc, data); err != nil {
+ log.Error("Failed to decode state object", "addr", addr, "err", err)
+ return nil
}
}
- }
- // If snapshot unavailable or reading from it failed, load from the database
- if data == nil {
- start := time.Now()
- enc, err := s.trie.TryGet(addr.Bytes())
- if metrics.EnabledExpensive {
- s.AccountReads += time.Since(start)
- }
- if err != nil {
- s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err))
- return nil
- }
- if len(enc) == 0 {
- return nil
- }
- data = new(types.StateAccount)
- if err := rlp.DecodeBytes(enc, data); err != nil {
- log.Error("Failed to decode state object", "addr", addr, "err", err)
- return nil
- }
- }
- // Insert into the live set
- obj := newObject(s, addr, *data)
- s.setStateObject(obj)
- return obj
+ // Insert into the live set
+ obj := newObject(s, addr, *data)
+ s.setStateObject(obj)
+ return obj
+ })
}
func (s *StateDB) setStateObject(object *stateObject) {
@@ -570,6 +923,28 @@ func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
return stateObject
}
+// mvRecordWritten checks whether a state object is already present in the current MV writeMap.
+// If yes, it returns the object directly.
+// If not, it clones the object and inserts it into the writeMap before returning it.
+func (s *StateDB) mvRecordWritten(object *stateObject) *stateObject {
+ if s.mvHashmap == nil {
+ return object
+ }
+
+ addrKey := blockstm.NewAddressKey(object.Address())
+
+ if MVWritten(s, addrKey) {
+ return object
+ }
+
+ // Deepcopy is needed to ensure that objects are not written by multiple transactions at the same time, because
+ // the input state object can come from a different transaction.
+ s.setStateObject(object.deepCopy(s))
+ MVWrite(s, addrKey)
+
+ return s.stateObjects[object.Address()]
+}
+
// createObject creates a new state object. If there is an existing account with
// the given address, it is overwritten and returned as the second return value.
func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
@@ -589,6 +964,8 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject)
s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct})
}
s.setStateObject(newobj)
+
+ MVWrite(s, blockstm.NewAddressKey(addr))
if prev != nil && !prev.deleted {
return newobj, prev
}
@@ -609,6 +986,7 @@ func (s *StateDB) CreateAccount(addr common.Address) {
newObj, prev := s.createObject(addr)
if prev != nil {
newObj.setBalance(prev.data.Balance)
+ MVWrite(s, blockstm.NewSubpathKey(addr, BalancePath))
}
}
@@ -651,6 +1029,7 @@ func (s *StateDB) Copy() *StateDB {
stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)),
stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)),
stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)),
+ revertedKeys: make(map[blockstm.Key]struct{}),
refund: s.refund,
logs: make(map[common.Hash][]*types.Log, len(s.logs)),
logSize: s.logSize,
@@ -738,6 +1117,10 @@ func (s *StateDB) Copy() *StateDB {
state.snapStorage[k] = temp
}
}
+
+ if s.mvHashmap != nil {
+ state.mvHashmap = s.mvHashmap
+ }
return state
}
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index e9576d4dc4..053d57470a 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -29,7 +29,10 @@ import (
"testing"
"testing/quick"
+ "github.com/stretchr/testify/assert"
+
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/blockstm"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
)
@@ -488,6 +491,457 @@ func TestTouchDelete(t *testing.T) {
}
}
+func TestMVHashMapReadWriteDelete(t *testing.T) {
+ t.Parallel()
+
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ mvhm := blockstm.MakeMVHashMap()
+ s, _ := NewWithMVHashmap(common.Hash{}, db, nil, mvhm)
+
+ states := []*StateDB{s}
+
+ // Create copies of the original state for each transition
+ for i := 1; i <= 4; i++ {
+ sCopy := s.Copy()
+ sCopy.txIndex = i
+ states = append(states, sCopy)
+ }
+
+ addr := common.HexToAddress("0x01")
+ key := common.HexToHash("0x01")
+ val := common.HexToHash("0x01")
+ balance := new(big.Int).SetUint64(uint64(100))
+
+ // Tx0 read
+ v := states[0].GetState(addr, key)
+
+ assert.Equal(t, common.Hash{}, v)
+
+ // Tx1 write
+ states[1].GetOrNewStateObject(addr)
+ states[1].SetState(addr, key, val)
+ states[1].SetBalance(addr, balance)
+ states[1].FlushMVWriteSet()
+
+ // Tx1 read
+ v = states[1].GetState(addr, key)
+ b := states[1].GetBalance(addr)
+
+ assert.Equal(t, val, v)
+ assert.Equal(t, balance, b)
+
+ // Tx2 read
+ v = states[2].GetState(addr, key)
+ b = states[2].GetBalance(addr)
+
+ assert.Equal(t, val, v)
+ assert.Equal(t, balance, b)
+
+ // Tx3 delete
+ states[3].Suicide(addr)
+
+ // Within Tx 3, the state should not change before finalize
+ v = states[3].GetState(addr, key)
+ assert.Equal(t, val, v)
+
+ // After finalizing Tx 3, the state will change
+ states[3].Finalise(false)
+ v = states[3].GetState(addr, key)
+ assert.Equal(t, common.Hash{}, v)
+ states[3].FlushMVWriteSet()
+
+ // Tx4 read
+ v = states[4].GetState(addr, key)
+ b = states[4].GetBalance(addr)
+
+ assert.Equal(t, common.Hash{}, v)
+ assert.Equal(t, common.Big0, b)
+}
+
+func TestMVHashMapRevert(t *testing.T) {
+ t.Parallel()
+
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ mvhm := blockstm.MakeMVHashMap()
+ s, _ := NewWithMVHashmap(common.Hash{}, db, nil, mvhm)
+
+ states := []*StateDB{s}
+
+ // Create copies of the original state for each transition
+ for i := 1; i <= 4; i++ {
+ sCopy := s.Copy()
+ sCopy.txIndex = i
+ states = append(states, sCopy)
+ }
+
+ addr := common.HexToAddress("0x01")
+ key := common.HexToHash("0x01")
+ val := common.HexToHash("0x01")
+ balance := new(big.Int).SetUint64(uint64(100))
+
+ // Tx0 write
+ states[0].GetOrNewStateObject(addr)
+ states[0].SetState(addr, key, val)
+ states[0].SetBalance(addr, balance)
+ states[0].FlushMVWriteSet()
+
+ // Tx1 perform some ops and then revert
+ snapshot := states[1].Snapshot()
+ states[1].AddBalance(addr, new(big.Int).SetUint64(uint64(100)))
+ states[1].SetState(addr, key, common.HexToHash("0x02"))
+ v := states[1].GetState(addr, key)
+ b := states[1].GetBalance(addr)
+ assert.Equal(t, new(big.Int).SetUint64(uint64(200)), b)
+ assert.Equal(t, common.HexToHash("0x02"), v)
+
+ states[1].Suicide(addr)
+
+ states[1].RevertToSnapshot(snapshot)
+
+ v = states[1].GetState(addr, key)
+ b = states[1].GetBalance(addr)
+
+ assert.Equal(t, val, v)
+ assert.Equal(t, balance, b)
+ states[1].Finalise(false)
+ states[1].FlushMVWriteSet()
+
+ // Tx2 check the state and balance
+ v = states[2].GetState(addr, key)
+ b = states[2].GetBalance(addr)
+
+ assert.Equal(t, val, v)
+ assert.Equal(t, balance, b)
+}
+
+func TestMVHashMapMarkEstimate(t *testing.T) {
+ t.Parallel()
+
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ mvhm := blockstm.MakeMVHashMap()
+ s, _ := NewWithMVHashmap(common.Hash{}, db, nil, mvhm)
+
+ states := []*StateDB{s}
+
+ // Create copies of the original state for each transition
+ for i := 1; i <= 4; i++ {
+ sCopy := s.Copy()
+ sCopy.txIndex = i
+ states = append(states, sCopy)
+ }
+
+ addr := common.HexToAddress("0x01")
+ key := common.HexToHash("0x01")
+ val := common.HexToHash("0x01")
+ balance := new(big.Int).SetUint64(uint64(100))
+
+ // Tx0 read
+ v := states[0].GetState(addr, key)
+ assert.Equal(t, common.Hash{}, v)
+
+ // Tx0 write
+ states[0].SetState(addr, key, val)
+ v = states[0].GetState(addr, key)
+ assert.Equal(t, val, v)
+ states[0].FlushMVWriteSet()
+
+ // Tx1 write
+ states[1].GetOrNewStateObject(addr)
+ states[1].SetState(addr, key, val)
+ states[1].SetBalance(addr, balance)
+ states[1].FlushMVWriteSet()
+
+ // Tx2 read
+ v = states[2].GetState(addr, key)
+ b := states[2].GetBalance(addr)
+
+ assert.Equal(t, val, v)
+ assert.Equal(t, balance, b)
+
+ // Tx1 mark estimate
+ for _, v := range states[1].MVWriteList() {
+ mvhm.MarkEstimate(v.Path, 1)
+ }
+
+ defer func() {
+ if r := recover(); r == nil {
+ t.Errorf("The code did not panic")
+ } else {
+ t.Log("Recovered in f", r)
+ }
+ }()
+
+ // Tx2 read again should get default (empty) vals because its dependency Tx1 is marked as estimate
+ states[2].GetState(addr, key)
+ states[2].GetBalance(addr)
+
+ // Tx1 read again should get Tx0 vals
+ v = states[1].GetState(addr, key)
+ assert.Equal(t, val, v)
+}
+
+func TestMVHashMapOverwrite(t *testing.T) {
+ t.Parallel()
+
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ mvhm := blockstm.MakeMVHashMap()
+ s, _ := NewWithMVHashmap(common.Hash{}, db, nil, mvhm)
+
+ states := []*StateDB{s}
+
+ // Create copies of the original state for each transition
+ for i := 1; i <= 4; i++ {
+ sCopy := s.Copy()
+ sCopy.txIndex = i
+ states = append(states, sCopy)
+ }
+
+ addr := common.HexToAddress("0x01")
+ key := common.HexToHash("0x01")
+ val1 := common.HexToHash("0x01")
+ balance1 := new(big.Int).SetUint64(uint64(100))
+ val2 := common.HexToHash("0x02")
+ balance2 := new(big.Int).SetUint64(uint64(200))
+
+ // Tx0 write
+ states[0].GetOrNewStateObject(addr)
+ states[0].SetState(addr, key, val1)
+ states[0].SetBalance(addr, balance1)
+ states[0].FlushMVWriteSet()
+
+ // Tx1 write
+ states[1].SetState(addr, key, val2)
+ states[1].SetBalance(addr, balance2)
+ v := states[1].GetState(addr, key)
+ b := states[1].GetBalance(addr)
+ states[1].FlushMVWriteSet()
+
+ assert.Equal(t, val2, v)
+ assert.Equal(t, balance2, b)
+
+ // Tx2 read should get Tx1's value
+ v = states[2].GetState(addr, key)
+ b = states[2].GetBalance(addr)
+
+ assert.Equal(t, val2, v)
+ assert.Equal(t, balance2, b)
+
+ // Tx1 delete
+ for _, v := range states[1].writeMap {
+ mvhm.Delete(v.Path, 1)
+
+ states[1].writeMap = nil
+ }
+
+ // Tx2 read should get Tx0's value
+ v = states[2].GetState(addr, key)
+ b = states[2].GetBalance(addr)
+
+ assert.Equal(t, val1, v)
+ assert.Equal(t, balance1, b)
+
+ // Tx1 read should get Tx0's value
+ v = states[1].GetState(addr, key)
+ b = states[1].GetBalance(addr)
+
+ assert.Equal(t, val1, v)
+ assert.Equal(t, balance1, b)
+
+ // Tx0 delete
+ for _, v := range states[0].writeMap {
+ mvhm.Delete(v.Path, 0)
+
+ states[0].writeMap = nil
+ }
+
+ // Tx2 read again should get default vals
+ v = states[2].GetState(addr, key)
+ b = states[2].GetBalance(addr)
+
+ assert.Equal(t, common.Hash{}, v)
+ assert.Equal(t, common.Big0, b)
+}
+
+func TestMVHashMapWriteNoConflict(t *testing.T) {
+ t.Parallel()
+
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ mvhm := blockstm.MakeMVHashMap()
+ s, _ := NewWithMVHashmap(common.Hash{}, db, nil, mvhm)
+
+ states := []*StateDB{s}
+
+ // Create copies of the original state for each transition
+ for i := 1; i <= 4; i++ {
+ sCopy := s.Copy()
+ sCopy.txIndex = i
+ states = append(states, sCopy)
+ }
+
+ addr := common.HexToAddress("0x01")
+ key1 := common.HexToHash("0x01")
+ key2 := common.HexToHash("0x02")
+ val1 := common.HexToHash("0x01")
+ balance1 := new(big.Int).SetUint64(uint64(100))
+ val2 := common.HexToHash("0x02")
+
+ // Tx0 write
+ states[0].GetOrNewStateObject(addr)
+ states[0].FlushMVWriteSet()
+
+ // Tx2 write
+ states[2].SetState(addr, key2, val2)
+ states[2].FlushMVWriteSet()
+
+ // Tx1 write
+ tx1Snapshot := states[1].Snapshot()
+ states[1].SetState(addr, key1, val1)
+ states[1].SetBalance(addr, balance1)
+ states[1].FlushMVWriteSet()
+
+ // Tx1 read
+ assert.Equal(t, val1, states[1].GetState(addr, key1))
+ assert.Equal(t, balance1, states[1].GetBalance(addr))
+ // Tx1 should see empty value in key2
+ assert.Equal(t, common.Hash{}, states[1].GetState(addr, key2))
+
+ // Tx2 read
+ assert.Equal(t, val2, states[2].GetState(addr, key2))
+ // Tx2 should see values written by Tx1
+ assert.Equal(t, val1, states[2].GetState(addr, key1))
+ assert.Equal(t, balance1, states[2].GetBalance(addr))
+
+ // Tx3 read
+ assert.Equal(t, val1, states[3].GetState(addr, key1))
+ assert.Equal(t, val2, states[3].GetState(addr, key2))
+ assert.Equal(t, balance1, states[3].GetBalance(addr))
+
+ // Tx2 delete
+ for _, v := range states[2].writeMap {
+ mvhm.Delete(v.Path, 2)
+
+ states[2].writeMap = nil
+ }
+
+ assert.Equal(t, val1, states[3].GetState(addr, key1))
+ assert.Equal(t, balance1, states[3].GetBalance(addr))
+ assert.Equal(t, common.Hash{}, states[3].GetState(addr, key2))
+
+ // Tx1 revert
+ states[1].RevertToSnapshot(tx1Snapshot)
+ states[1].FlushMVWriteSet()
+
+ assert.Equal(t, common.Hash{}, states[3].GetState(addr, key1))
+ assert.Equal(t, common.Hash{}, states[3].GetState(addr, key2))
+ assert.Equal(t, common.Big0, states[3].GetBalance(addr))
+
+ // Tx1 delete
+ for _, v := range states[1].writeMap {
+ mvhm.Delete(v.Path, 1)
+
+ states[1].writeMap = nil
+ }
+
+ assert.Equal(t, common.Hash{}, states[3].GetState(addr, key1))
+ assert.Equal(t, common.Hash{}, states[3].GetState(addr, key2))
+ assert.Equal(t, common.Big0, states[3].GetBalance(addr))
+}
+
+func TestApplyMVWriteSet(t *testing.T) {
+ t.Parallel()
+
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ mvhm := blockstm.MakeMVHashMap()
+ s, _ := NewWithMVHashmap(common.Hash{}, db, nil, mvhm)
+
+ sClean := s.Copy()
+ sClean.mvHashmap = nil
+
+ sSingleProcess := sClean.Copy()
+
+ states := []*StateDB{s}
+
+ // Create copies of the original state for each transition
+ for i := 1; i <= 4; i++ {
+ sCopy := s.Copy()
+ sCopy.txIndex = i
+ states = append(states, sCopy)
+ }
+
+ addr1 := common.HexToAddress("0x01")
+ addr2 := common.HexToAddress("0x02")
+ addr3 := common.HexToAddress("0x03")
+ key1 := common.HexToHash("0x01")
+ key2 := common.HexToHash("0x02")
+ val1 := common.HexToHash("0x01")
+ balance1 := new(big.Int).SetUint64(uint64(100))
+ val2 := common.HexToHash("0x02")
+ balance2 := new(big.Int).SetUint64(uint64(200))
+ code := []byte{1, 2, 3}
+
+ // Tx0 write
+ states[0].GetOrNewStateObject(addr1)
+ states[0].SetState(addr1, key1, val1)
+ states[0].SetBalance(addr1, balance1)
+ states[0].SetState(addr2, key2, val2)
+ states[0].GetOrNewStateObject(addr3)
+ states[0].Finalise(true)
+ states[0].FlushMVWriteSet()
+
+ sSingleProcess.GetOrNewStateObject(addr1)
+ sSingleProcess.SetState(addr1, key1, val1)
+ sSingleProcess.SetBalance(addr1, balance1)
+ sSingleProcess.SetState(addr2, key2, val2)
+ sSingleProcess.GetOrNewStateObject(addr3)
+
+ sClean.ApplyMVWriteSet(states[0].MVWriteList())
+
+ assert.Equal(t, sSingleProcess.IntermediateRoot(true), sClean.IntermediateRoot(true))
+
+ // Tx1 write
+ states[1].SetState(addr1, key2, val2)
+ states[1].SetBalance(addr1, balance2)
+ states[1].SetNonce(addr1, 1)
+ states[1].Finalise(true)
+ states[1].FlushMVWriteSet()
+
+ sSingleProcess.SetState(addr1, key2, val2)
+ sSingleProcess.SetBalance(addr1, balance2)
+ sSingleProcess.SetNonce(addr1, 1)
+
+ sClean.ApplyMVWriteSet(states[1].MVWriteList())
+
+ assert.Equal(t, sSingleProcess.IntermediateRoot(true), sClean.IntermediateRoot(true))
+
+ // Tx2 write
+ states[2].SetState(addr1, key1, val2)
+ states[2].SetBalance(addr1, balance2)
+ states[2].SetNonce(addr1, 2)
+ states[2].Finalise(true)
+ states[2].FlushMVWriteSet()
+
+ sSingleProcess.SetState(addr1, key1, val2)
+ sSingleProcess.SetBalance(addr1, balance2)
+ sSingleProcess.SetNonce(addr1, 2)
+
+ sClean.ApplyMVWriteSet(states[2].MVWriteList())
+
+ assert.Equal(t, sSingleProcess.IntermediateRoot(true), sClean.IntermediateRoot(true))
+
+ // Tx3 write
+ states[3].Suicide(addr2)
+ states[3].SetCode(addr1, code)
+ states[3].Finalise(true)
+ states[3].FlushMVWriteSet()
+
+ sSingleProcess.Suicide(addr2)
+ sSingleProcess.SetCode(addr1, code)
+
+ sClean.ApplyMVWriteSet(states[3].MVWriteList())
+
+ assert.Equal(t, sSingleProcess.IntermediateRoot(true), sClean.IntermediateRoot(true))
+}
+
// TestCopyOfCopy tests that modified objects are carried over to the copy, and the copy of the copy.
// See https://github.com/ethereum/go-ethereum/pull/15225#issuecomment-380191512
func TestCopyOfCopy(t *testing.T) {
diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go
index 10a1722940..215734a590 100644
--- a/core/state_prefetcher.go
+++ b/core/state_prefetcher.go
@@ -17,6 +17,7 @@
package core
import (
+ "context"
"sync/atomic"
"github.com/ethereum/go-ethereum/consensus"
@@ -89,6 +90,6 @@ func precacheTransaction(msg types.Message, config *params.ChainConfig, gaspool
// Update the evm with the new transaction context.
evm.Reset(NewEVMTxContext(msg), statedb)
// Add addresses to access list if applicable
- _, err := ApplyMessage(evm, msg, gaspool)
+ _, err := ApplyMessage(evm, msg, gaspool, context.Background())
return err
}
diff --git a/core/state_processor.go b/core/state_processor.go
index d4c77ae410..fac921cd2f 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -17,6 +17,7 @@
package core
import (
+ "context"
"fmt"
"math/big"
@@ -56,7 +57,7 @@ func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consen
// Process returns the receipts and logs accumulated during the process and
// returns the amount of gas that was used in the process. If any of the
// transactions failed to execute due to insufficient gas it will return an error.
-func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) {
+func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config, interruptCtx context.Context) (types.Receipts, []*types.Log, uint64, error) {
var (
receipts types.Receipts
usedGas = new(uint64)
@@ -74,12 +75,20 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg)
// Iterate over and process the individual transactions
for i, tx := range block.Transactions() {
+ if interruptCtx != nil {
+ select {
+ case <-interruptCtx.Done():
+ return nil, nil, 0, interruptCtx.Err()
+ default:
+ }
+ }
+
msg, err := tx.AsMessage(types.MakeSigner(p.config, header.Number), header.BaseFee)
if err != nil {
return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
}
statedb.Prepare(tx.Hash(), i)
- receipt, err := applyTransaction(msg, p.config, p.bc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv)
+ receipt, err := applyTransaction(msg, p.config, p.bc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv, interruptCtx)
if err != nil {
return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
}
@@ -92,17 +101,61 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
return receipts, allLogs, *usedGas, nil
}
-func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) {
+// nolint : unparam
+func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM, interruptCtx context.Context) (*types.Receipt, error) {
// Create a new context to be used in the EVM environment.
txContext := NewEVMTxContext(msg)
evm.Reset(txContext, statedb)
- // Apply the transaction to the current state (included in the env).
- result, err := ApplyMessage(evm, msg, gp)
+ var result *ExecutionResult
+
+ var err error
+
+ backupMVHashMap := statedb.GetMVHashmap()
+
+ // pause recording read and write
+ statedb.SetMVHashmap(nil)
+
+ coinbaseBalance := statedb.GetBalance(evm.Context.Coinbase)
+
+ // resume recording read and write
+ statedb.SetMVHashmap(backupMVHashMap)
+
+ result, err = ApplyMessageNoFeeBurnOrTip(evm, msg, gp, interruptCtx)
if err != nil {
return nil, err
}
+ // stop recording read and write
+ statedb.SetMVHashmap(nil)
+
+ if evm.ChainConfig().IsLondon(blockNumber) {
+ statedb.AddBalance(result.BurntContractAddress, result.FeeBurnt)
+ }
+
+ statedb.AddBalance(evm.Context.Coinbase, result.FeeTipped)
+ output1 := new(big.Int).SetBytes(result.SenderInitBalance.Bytes())
+ output2 := new(big.Int).SetBytes(coinbaseBalance.Bytes())
+
+ // Deprecating transfer log and will be removed in future fork. PLEASE DO NOT USE this transfer log going forward. Parameters won't get updated as expected going forward with EIP1559
+ // add transfer log
+ AddFeeTransferLog(
+ statedb,
+
+ msg.From(),
+ evm.Context.Coinbase,
+
+ result.FeeTipped,
+ result.SenderInitBalance,
+ coinbaseBalance,
+ output1.Sub(output1, result.FeeTipped),
+ output2.Add(output2, result.FeeTipped),
+ )
+
+ if result.Err == vm.ErrInterrupt {
+ return nil, result.Err
+ }
+
// Update the state with pending changes.
var root []byte
if config.IsByzantium(blockNumber) {
@@ -141,7 +194,7 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon
// and uses the input parameters for its environment. It returns the receipt
// for the transaction, gas used and an error if the transaction failed,
// indicating the block was invalid.
-func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) {
+func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config, interruptCtx context.Context) (*types.Receipt, error) {
msg, err := tx.AsMessage(types.MakeSigner(config, header.Number), header.BaseFee)
if err != nil {
return nil, err
@@ -149,5 +202,6 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
// Create a new context to be used in the EVM environment
blockContext := NewEVMBlockContext(header, bc, author)
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg)
- return applyTransaction(msg, config, bc, author, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv)
+
+ return applyTransaction(msg, config, bc, author, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv, interruptCtx)
}
diff --git a/core/state_processor_test.go b/core/state_processor_test.go
index 5dc076a11c..35e53a2e54 100644
--- a/core/state_processor_test.go
+++ b/core/state_processor_test.go
@@ -234,28 +234,33 @@ func TestStateProcessorErrors(t *testing.T) {
},
},
}
- genesis = gspec.MustCommit(db)
- blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil, nil)
+ genesis = gspec.MustCommit(db)
+ blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil, nil)
+ parallelBlockchain, _ = NewParallelBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{ParallelEnable: true, ParallelSpeculativeProcesses: 8}, nil, nil, nil)
)
defer blockchain.Stop()
- for i, tt := range []struct {
- txs []*types.Transaction
- want string
- }{
- { // ErrTxTypeNotSupported
- txs: []*types.Transaction{
- mkDynamicTx(0, common.Address{}, params.TxGas-1000, big.NewInt(0), big.NewInt(0)),
+ defer parallelBlockchain.Stop()
+
+ for _, bc := range []*BlockChain{blockchain, parallelBlockchain} {
+ for i, tt := range []struct {
+ txs []*types.Transaction
+ want string
+ }{
+ { // ErrTxTypeNotSupported
+ txs: []*types.Transaction{
+ mkDynamicTx(0, common.Address{}, params.TxGas-1000, big.NewInt(0), big.NewInt(0)),
+ },
+ want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: transaction type not supported",
},
- want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: transaction type not supported",
- },
- } {
- block := GenerateBadBlock(genesis, ethash.NewFaker(), tt.txs, gspec.Config)
- _, err := blockchain.InsertChain(types.Blocks{block})
- if err == nil {
- t.Fatal("block imported without errors")
- }
- if have, want := err.Error(), tt.want; have != want {
- t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want)
+ } {
+ block := GenerateBadBlock(genesis, ethash.NewFaker(), tt.txs, gspec.Config)
+ _, err := bc.InsertChain(types.Blocks{block})
+ if err == nil {
+ t.Fatal("block imported without errors")
+ }
+ if have, want := err.Error(), tt.want; have != want {
+ t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want)
+ }
}
}
}
@@ -274,28 +279,33 @@ func TestStateProcessorErrors(t *testing.T) {
},
},
}
- genesis = gspec.MustCommit(db)
- blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil, nil)
+ genesis = gspec.MustCommit(db)
+ blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil, nil)
+ parallelBlockchain, _ = NewParallelBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{ParallelEnable: true, ParallelSpeculativeProcesses: 8}, nil, nil, nil)
)
defer blockchain.Stop()
- for i, tt := range []struct {
- txs []*types.Transaction
- want string
- }{
- { // ErrSenderNoEOA
- txs: []*types.Transaction{
- mkDynamicTx(0, common.Address{}, params.TxGas-1000, big.NewInt(0), big.NewInt(0)),
+ defer parallelBlockchain.Stop()
+
+ for _, bc := range []*BlockChain{blockchain, parallelBlockchain} {
+ for i, tt := range []struct {
+ txs []*types.Transaction
+ want string
+ }{
+ { // ErrSenderNoEOA
+ txs: []*types.Transaction{
+ mkDynamicTx(0, common.Address{}, params.TxGas-1000, big.NewInt(0), big.NewInt(0)),
+ },
+ want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: sender not an eoa: address 0x71562b71999873DB5b286dF957af199Ec94617F7, codehash: 0x9280914443471259d4570a8661015ae4a5b80186dbc619658fb494bebc3da3d1",
},
- want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: sender not an eoa: address 0x71562b71999873DB5b286dF957af199Ec94617F7, codehash: 0x9280914443471259d4570a8661015ae4a5b80186dbc619658fb494bebc3da3d1",
- },
- } {
- block := GenerateBadBlock(genesis, ethash.NewFaker(), tt.txs, gspec.Config)
- _, err := blockchain.InsertChain(types.Blocks{block})
- if err == nil {
- t.Fatal("block imported without errors")
- }
- if have, want := err.Error(), tt.want; have != want {
- t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want)
+ } {
+ block := GenerateBadBlock(genesis, ethash.NewFaker(), tt.txs, gspec.Config)
+ _, err := bc.InsertChain(types.Blocks{block})
+ if err == nil {
+ t.Fatal("block imported without errors")
+ }
+ if have, want := err.Error(), tt.want; have != want {
+ t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want)
+ }
}
}
}
diff --git a/core/state_transition.go b/core/state_transition.go
index 3fc5a635e9..39e23f3ecb 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -17,6 +17,7 @@
package core
import (
+ "context"
"fmt"
"math"
"math/big"
@@ -62,6 +63,11 @@ type StateTransition struct {
data []byte
state vm.StateDB
evm *vm.EVM
+
+ // If true, fee burning and tipping won't happen during transition. Instead, their values will be included in the
+ // ExecutionResult, which caller can use the values to update the balance of burner and coinbase account.
+ // This is useful during parallel state transition, where the common account read/write should be minimized.
+ noFeeBurnAndTip bool
}
// Message represents a message sent to a contract.
@@ -84,9 +90,13 @@ type Message interface {
// ExecutionResult includes all output after executing given evm
// message no matter the execution itself is successful or not.
type ExecutionResult struct {
- UsedGas uint64 // Total used gas but include the refunded gas
- Err error // Any error encountered during the execution(listed in core/vm/errors.go)
- ReturnData []byte // Returned data from evm(function result or data supplied with revert opcode)
+ UsedGas uint64 // Total used gas but include the refunded gas
+ Err error // Any error encountered during the execution(listed in core/vm/errors.go)
+ ReturnData []byte // Returned data from evm(function result or data supplied with revert opcode)
+ SenderInitBalance *big.Int
+ FeeBurnt *big.Int
+ BurntContractAddress common.Address
+ FeeTipped *big.Int
}
// Unwrap returns the internal evm error which allows us for further
@@ -179,8 +189,15 @@ func NewStateTransition(evm *vm.EVM, msg Message, gp *GasPool) *StateTransition
// the gas used (which includes gas refunds) and an error if it failed. An error always
// indicates a core error meaning that the message would always fail for that particular
// state and would never be accepted within a block.
-func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool) (*ExecutionResult, error) {
- return NewStateTransition(evm, msg, gp).TransitionDb()
+func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool, interruptCtx context.Context) (*ExecutionResult, error) {
+ return NewStateTransition(evm, msg, gp).TransitionDb(interruptCtx)
+}
+
+func ApplyMessageNoFeeBurnOrTip(evm *vm.EVM, msg Message, gp *GasPool, interruptCtx context.Context) (*ExecutionResult, error) {
+ st := NewStateTransition(evm, msg, gp)
+ st.noFeeBurnAndTip = true
+
+ return st.TransitionDb(interruptCtx)
}
// to returns the recipient of the message.
@@ -274,9 +291,14 @@ func (st *StateTransition) preCheck() error {
//
// However if any consensus issue encountered, return the error directly with
// nil evm execution result.
-func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
+func (st *StateTransition) TransitionDb(interruptCtx context.Context) (*ExecutionResult, error) {
input1 := st.state.GetBalance(st.msg.From())
- input2 := st.state.GetBalance(st.evm.Context.Coinbase)
+
+ var input2 *big.Int
+
+ if !st.noFeeBurnAndTip {
+ input2 = st.state.GetBalance(st.evm.Context.Coinbase)
+ }
// First check this message satisfies all consensus rules before
// applying the message. The rules include these clauses
@@ -327,7 +349,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
} else {
// Increment the nonce for the next transaction
st.state.SetNonce(msg.From(), st.state.GetNonce(sender.Address())+1)
- ret, st.gas, vmerr = st.evm.Call(sender, st.to(), st.data, st.gas, st.value)
+ ret, st.gas, vmerr = st.evm.Call(sender, st.to(), st.data, st.gas, st.value, interruptCtx)
}
if !london {
@@ -342,34 +364,50 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
effectiveTip = cmath.BigMin(st.gasTipCap, new(big.Int).Sub(st.gasFeeCap, st.evm.Context.BaseFee))
}
amount := new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), effectiveTip)
+
+ var burnAmount *big.Int
+
+ var burntContractAddress common.Address
+
if london {
- burntContractAddress := common.HexToAddress(st.evm.ChainConfig().Bor.CalculateBurntContract(st.evm.Context.BlockNumber.Uint64()))
- burnAmount := new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.evm.Context.BaseFee)
- st.state.AddBalance(burntContractAddress, burnAmount)
+ burntContractAddress = common.HexToAddress(st.evm.ChainConfig().Bor.CalculateBurntContract(st.evm.Context.BlockNumber.Uint64()))
+ burnAmount = new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.evm.Context.BaseFee)
+
+ if !st.noFeeBurnAndTip {
+ st.state.AddBalance(burntContractAddress, burnAmount)
+ }
+ }
+
+ if !st.noFeeBurnAndTip {
+ st.state.AddBalance(st.evm.Context.Coinbase, amount)
+
+ output1 := new(big.Int).SetBytes(input1.Bytes())
+ output2 := new(big.Int).SetBytes(input2.Bytes())
+
+ // Deprecating transfer log and will be removed in future fork. PLEASE DO NOT USE this transfer log going forward. Parameters won't get updated as expected going forward with EIP1559
+ // add transfer log
+ AddFeeTransferLog(
+ st.state,
+
+ msg.From(),
+ st.evm.Context.Coinbase,
+
+ amount,
+ input1,
+ input2,
+ output1.Sub(output1, amount),
+ output2.Add(output2, amount),
+ )
}
- st.state.AddBalance(st.evm.Context.Coinbase, amount)
- output1 := new(big.Int).SetBytes(input1.Bytes())
- output2 := new(big.Int).SetBytes(input2.Bytes())
-
- // Deprecating transfer log and will be removed in future fork. PLEASE DO NOT USE this transfer log going forward. Parameters won't get updated as expected going forward with EIP1559
- // add transfer log
- AddFeeTransferLog(
- st.state,
-
- msg.From(),
- st.evm.Context.Coinbase,
-
- amount,
- input1,
- input2,
- output1.Sub(output1, amount),
- output2.Add(output2, amount),
- )
return &ExecutionResult{
- UsedGas: st.gasUsed(),
- Err: vmerr,
- ReturnData: ret,
+ UsedGas: st.gasUsed(),
+ Err: vmerr,
+ ReturnData: ret,
+ SenderInitBalance: input1,
+ FeeBurnt: burnAmount,
+ BurntContractAddress: burntContractAddress,
+ FeeTipped: amount,
}, nil
}
diff --git a/core/tests/blockchain_repair_test.go b/core/tests/blockchain_repair_test.go
index 0d4a86b069..9cb6b8f899 100644
--- a/core/tests/blockchain_repair_test.go
+++ b/core/tests/blockchain_repair_test.go
@@ -1815,7 +1815,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
chainConfig.LondonBlock = big.NewInt(0)
- _, back, closeFn := miner.NewTestWorker(t, chainConfig, engine, db, 0, 0, 0)
+ _, back, closeFn := miner.NewTestWorker(t, chainConfig, engine, db, 0, 0, 0, 0)
defer closeFn()
genesis := back.BlockChain().Genesis()
diff --git a/core/types.go b/core/types.go
index 4c5b74a498..9cdab38483 100644
--- a/core/types.go
+++ b/core/types.go
@@ -17,6 +17,8 @@
package core
import (
+ "context"
+
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -47,5 +49,5 @@ type Processor interface {
// Process processes the state changes according to the Ethereum rules by running
// the transaction messages using the statedb and applying any rewards to both
// the processor (coinbase) and any included uncles.
- Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error)
+ Process(block *types.Block, statedb *state.StateDB, cfg vm.Config, interruptCtx context.Context) (types.Receipts, []*types.Log, uint64, error)
}
diff --git a/core/types/block.go b/core/types/block.go
index 314990dc99..0af6a35501 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -87,6 +87,11 @@ type Header struct {
// BaseFee was added by EIP-1559 and is ignored in legacy headers.
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
+ // length of TxDependency -> n (n = number of transactions in the block)
+ // length of TxDependency[i] -> k (k = a whole number)
+ // k elements in TxDependency[i] -> transaction indexes on which transaction i is dependent on
+ TxDependency [][]uint64 `json:"txDependency" rlp:"optional"`
+
/*
TODO (MariusVanDerWijden) Add this field once needed
// Random was added during the merge and contains the BeaconState randomness
@@ -252,6 +257,15 @@ func CopyHeader(h *Header) *Header {
cpy.Extra = make([]byte, len(h.Extra))
copy(cpy.Extra, h.Extra)
}
+
+ if len(h.TxDependency) > 0 {
+ cpy.TxDependency = make([][]uint64, len(h.TxDependency))
+
+ for i, dep := range h.TxDependency {
+ cpy.TxDependency[i] = make([]uint64, len(dep))
+ copy(cpy.TxDependency[i], dep)
+ }
+ }
return &cpy
}
@@ -307,6 +321,7 @@ func (b *Block) TxHash() common.Hash { return b.header.TxHash }
func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash }
func (b *Block) UncleHash() common.Hash { return b.header.UncleHash }
func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) }
+func (b *Block) TxDependency() [][]uint64 { return b.header.TxDependency }
func (b *Block) BaseFee() *big.Int {
if b.header.BaseFee == nil {
diff --git a/core/types/block_test.go b/core/types/block_test.go
index aa1db2f4fa..dede213bf6 100644
--- a/core/types/block_test.go
+++ b/core/types/block_test.go
@@ -68,6 +68,51 @@ func TestBlockEncoding(t *testing.T) {
}
}
+func TestTxDependencyBlockEncoding(t *testing.T) {
+ t.Parallel()
+
+ blockEnc := common.FromHex("f90268f90201a083cafc574e1f51ba9dc0568fc617a08ea2429fb384059c972f13b19fa1c8dd55a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017a05fe50b260da6308036625b850b5d6ced6d0a9f814c0688bc91ffb7b7a3a54b67a0bc37d79753ad738a6dac4921e57392f145d8887476de3f783dfa7edae9283e52b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001832fefd8825208845506eb0780a0bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff49888a13a5a8c8f2bb1c480c6c20201c20180f861f85f800a82c35094095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba09bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094fa08a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b1c0")
+
+ var block Block
+
+ if err := rlp.DecodeBytes(blockEnc, &block); err != nil {
+ t.Fatal("decode error: ", err)
+ }
+
+ check := func(f string, got, want interface{}) {
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%s mismatch: got %v, want %v", f, got, want)
+ }
+ }
+
+ check("Difficulty", block.Difficulty(), big.NewInt(131072))
+ check("GasLimit", block.GasLimit(), uint64(3141592))
+ check("GasUsed", block.GasUsed(), uint64(21000))
+ check("Coinbase", block.Coinbase(), common.HexToAddress("8888f1f195afa192cfee860698584c030f4c9db1"))
+ check("MixDigest", block.MixDigest(), common.HexToHash("bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff498"))
+ check("Root", block.Root(), common.HexToHash("ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017"))
+ check("Hash", block.Hash(), common.HexToHash("0xc6d8dc8995c0a4374bb9f87bd0dd8c0761e6e026a71edbfed5e961c9e55dbd6a"))
+ check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4))
+ check("Time", block.Time(), uint64(1426516743))
+ check("Size", block.Size(), common.StorageSize(len(blockEnc)))
+ check("TxDependency", block.TxDependency(), [][]uint64{{2, 1}, {1, 0}})
+
+ tx1 := NewTransaction(0, common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), big.NewInt(10), 50000, big.NewInt(10), nil)
+ tx1, _ = tx1.WithSignature(HomesteadSigner{}, common.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100"))
+
+ check("len(Transactions)", len(block.Transactions()), 1)
+ check("Transactions[0].Hash", block.Transactions()[0].Hash(), tx1.Hash())
+ ourBlockEnc, err := rlp.EncodeToBytes(&block)
+
+ if err != nil {
+ t.Fatal("encode error: ", err)
+ }
+
+ if !bytes.Equal(ourBlockEnc, blockEnc) {
+ t.Errorf("encoded block mismatch:\ngot: %x\nwant: %x", ourBlockEnc, blockEnc)
+ }
+}
+
func TestEIP1559BlockEncoding(t *testing.T) {
blockEnc := common.FromHex("f9030bf901fea083cafc574e1f51ba9dc0568fc617a08ea2429fb384059c972f13b19fa1c8dd55a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017a05fe50b260da6308036625b850b5d6ced6d0a9f814c0688bc91ffb7b7a3a54b67a0bc37d79753ad738a6dac4921e57392f145d8887476de3f783dfa7edae9283e52b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001832fefd8825208845506eb0780a0bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff49888a13a5a8c8f2bb1c4843b9aca00f90106f85f800a82c35094095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba09bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094fa08a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b1b8a302f8a0018080843b9aca008301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8c0")
var block Block
diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go
index 75e24b34d6..10f7156749 100644
--- a/core/types/gen_header_json.go
+++ b/core/types/gen_header_json.go
@@ -16,23 +16,24 @@ var _ = (*headerMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (h Header) MarshalJSON() ([]byte, error) {
type Header struct {
- ParentHash common.Hash `json:"parentHash" gencodec:"required"`
- UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
- Coinbase common.Address `json:"miner" gencodec:"required"`
- Root common.Hash `json:"stateRoot" gencodec:"required"`
- TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
- ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
- Bloom Bloom `json:"logsBloom" gencodec:"required"`
- Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
- Number *hexutil.Big `json:"number" gencodec:"required"`
- GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
- Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
- Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
- MixDigest common.Hash `json:"mixHash"`
- Nonce BlockNonce `json:"nonce"`
- BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
- Hash common.Hash `json:"hash"`
+ ParentHash common.Hash `json:"parentHash" gencodec:"required"`
+ UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
+ Coinbase common.Address `json:"miner" gencodec:"required"`
+ Root common.Hash `json:"stateRoot" gencodec:"required"`
+ TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
+ ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
+ Bloom Bloom `json:"logsBloom" gencodec:"required"`
+ Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
+ Number *hexutil.Big `json:"number" gencodec:"required"`
+ GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
+ Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
+ Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce BlockNonce `json:"nonce"`
+ BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
+ TxDependency [][]uint64 `json:"txDependency" rlp:"optional"`
+ Hash common.Hash `json:"hash"`
}
var enc Header
enc.ParentHash = h.ParentHash
@@ -51,6 +52,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
enc.MixDigest = h.MixDigest
enc.Nonce = h.Nonce
enc.BaseFee = (*hexutil.Big)(h.BaseFee)
+ enc.TxDependency = h.TxDependency
enc.Hash = h.Hash()
return json.Marshal(&enc)
}
@@ -58,22 +60,23 @@ func (h Header) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (h *Header) UnmarshalJSON(input []byte) error {
type Header struct {
- ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
- UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"`
- Coinbase *common.Address `json:"miner" gencodec:"required"`
- Root *common.Hash `json:"stateRoot" gencodec:"required"`
- TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"`
- ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"`
- Bloom *Bloom `json:"logsBloom" gencodec:"required"`
- Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
- Number *hexutil.Big `json:"number" gencodec:"required"`
- GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
- Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
- Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
- MixDigest *common.Hash `json:"mixHash"`
- Nonce *BlockNonce `json:"nonce"`
- BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
+ ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
+ UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"`
+ Coinbase *common.Address `json:"miner" gencodec:"required"`
+ Root *common.Hash `json:"stateRoot" gencodec:"required"`
+ TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"`
+ ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"`
+ Bloom *Bloom `json:"logsBloom" gencodec:"required"`
+ Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"`
+ Number *hexutil.Big `json:"number" gencodec:"required"`
+ GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
+ Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
+ Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
+ MixDigest *common.Hash `json:"mixHash"`
+ Nonce *BlockNonce `json:"nonce"`
+ BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"`
+ TxDependency [][]uint64 `json:"txDependency" rlp:"optional"`
}
var dec Header
if err := json.Unmarshal(input, &dec); err != nil {
@@ -140,5 +143,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
if dec.BaseFee != nil {
h.BaseFee = (*big.Int)(dec.BaseFee)
}
+ if dec.TxDependency != nil {
+ h.TxDependency = dec.TxDependency
+ }
return nil
}
diff --git a/core/types/gen_header_rlp.go b/core/types/gen_header_rlp.go
index e1a6873318..10377e2ad2 100644
--- a/core/types/gen_header_rlp.go
+++ b/core/types/gen_header_rlp.go
@@ -41,7 +41,8 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBytes(obj.MixDigest[:])
w.WriteBytes(obj.Nonce[:])
_tmp1 := obj.BaseFee != nil
- if _tmp1 {
+ _tmp2 := len(obj.TxDependency) > 0
+ if _tmp1 || _tmp2 {
if obj.BaseFee == nil {
w.Write(rlp.EmptyString)
} else {
@@ -51,6 +52,17 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBigInt(obj.BaseFee)
}
}
+ if _tmp2 {
+ _tmp3 := w.List()
+ for _, _tmp4 := range obj.TxDependency {
+ _tmp5 := w.List()
+ for _, _tmp6 := range _tmp4 {
+ w.WriteUint64(_tmp6)
+ }
+ w.ListEnd(_tmp5)
+ }
+ w.ListEnd(_tmp3)
+ }
w.ListEnd(_tmp0)
return w.Flush()
}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index dd55618bf8..31689fa506 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -17,6 +17,7 @@
package vm
import (
+ "context"
"math/big"
"sync/atomic"
"time"
@@ -165,7 +166,7 @@ func (evm *EVM) Interpreter() *EVMInterpreter {
// parameters. It also handles any necessary value transfer required and takes
// the necessary steps to create accounts and reverses the state in case of an
// execution error or failed value transfer.
-func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) {
+func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int, interruptCtx context.Context) (ret []byte, leftOverGas uint64, err error) {
// Fail if we're trying to execute above the call depth limit
if evm.depth > int(params.CallCreateDepth) {
return nil, gas, ErrDepth
@@ -225,7 +226,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
// The depth-check is already done, and precompiles handled above
contract := NewContract(caller, AccountRef(addrCopy), value, gas)
contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), code)
- ret, err = evm.interpreter.Run(contract, input, false)
+ ret, err = evm.interpreter.PreRun(contract, input, false, interruptCtx)
gas = contract.Gas
}
}
@@ -282,7 +283,7 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
// The contract is a scoped environment for this execution context only.
contract := NewContract(caller, AccountRef(caller.Address()), value, gas)
contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), evm.StateDB.GetCode(addrCopy))
- ret, err = evm.interpreter.Run(contract, input, false)
+ ret, err = evm.interpreter.PreRun(contract, input, false, nil)
gas = contract.Gas
}
if err != nil {
@@ -322,7 +323,7 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
// Initialise a new contract and make initialise the delegate values
contract := NewContract(caller, AccountRef(caller.Address()), nil, gas).AsDelegate()
contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), evm.StateDB.GetCode(addrCopy))
- ret, err = evm.interpreter.Run(contract, input, false)
+ ret, err = evm.interpreter.PreRun(contract, input, false, nil)
gas = contract.Gas
}
if err != nil {
@@ -378,7 +379,7 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
// when we're in Homestead this also counts for code storage gas errors.
- ret, err = evm.interpreter.Run(contract, input, true)
+ ret, err = evm.interpreter.PreRun(contract, input, true, nil)
gas = contract.Gas
}
if err != nil {
@@ -450,7 +451,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
start := time.Now()
- ret, err := evm.interpreter.Run(contract, nil, false)
+ ret, err := evm.interpreter.PreRun(contract, nil, false, nil)
// Check whether the max code size has been exceeded, assign err if the case.
if err == nil && evm.chainRules.IsEIP158 && len(ret) > params.MaxCodeSize {
diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go
index 6cd126c9b4..f9aec3b40d 100644
--- a/core/vm/gas_table_test.go
+++ b/core/vm/gas_table_test.go
@@ -93,7 +93,7 @@ func TestEIP2200(t *testing.T) {
}
vmenv := NewEVM(vmctx, TxContext{}, statedb, params.AllEthashProtocolChanges, Config{ExtraEips: []int{2200}})
- _, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, tt.gaspool, new(big.Int))
+ _, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, tt.gaspool, new(big.Int), nil)
if err != tt.failure {
t.Errorf("test %d: failure mismatch: have %v, want %v", i, err, tt.failure)
}
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index db507c4811..2f3608f2b2 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -392,16 +392,21 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
// opExtCodeHash returns the code hash of a specified account.
// There are several cases when the function is called, while we can relay everything
// to `state.GetCodeHash` function to ensure the correctness.
-// (1) Caller tries to get the code hash of a normal contract account, state
+//
+// (1) Caller tries to get the code hash of a normal contract account, state
+//
// should return the relative code hash and set it as the result.
//
-// (2) Caller tries to get the code hash of a non-existent account, state should
+// (2) Caller tries to get the code hash of a non-existent account, state should
+//
// return common.Hash{} and zero will be set as the result.
//
-// (3) Caller tries to get the code hash for an account without contract code,
+// (3) Caller tries to get the code hash for an account without contract code,
+//
// state should return emptyCodeHash(0xc5d246...) as the result.
//
-// (4) Caller tries to get the code hash of a precompiled account, the result
+// (4) Caller tries to get the code hash of a precompiled account, the result
+//
// should be zero or emptyCodeHash.
//
// It is worth noting that in order to avoid unnecessary create and clean,
@@ -410,10 +415,12 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
// If the precompile account is not transferred any amount on a private or
// customized chain, the return value will be zero.
//
-// (5) Caller tries to get the code hash for an account which is marked as suicided
+// (5) Caller tries to get the code hash for an account which is marked as suicided
+//
// in the current transaction, the code hash of this account should be returned.
//
-// (6) Caller tries to get the code hash for an account which is marked as deleted,
+// (6) Caller tries to get the code hash for an account which is marked as deleted,
+//
// this account should be regarded as a non-existent account and zero should be returned.
func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek()
@@ -688,7 +695,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
bigVal = value.ToBig()
}
- ret, returnGas, err := interpreter.evm.Call(scope.Contract, toAddr, args, gas, bigVal)
+ ret, returnGas, err := interpreter.evm.Call(scope.Contract, toAddr, args, gas, bigVal, nil)
if err != nil {
temp.Clear()
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 21e3c914e1..04184b95f7 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -17,11 +17,33 @@
package vm
import (
+ "context"
+ "errors"
"hash"
+ "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+
+ lru "github.com/hashicorp/golang-lru"
+)
+
+var (
+ opcodeCommitInterruptCounter = metrics.NewRegisteredCounter("worker/opcodeCommitInterrupt", nil)
+ ErrInterrupt = errors.New("EVM execution interrupted")
+ ErrNoCache = errors.New("no tx cache found")
+ ErrNoCurrentTx = errors.New("no current tx found in interruptCtx")
+)
+
+const (
+ // These are keys for the interruptCtx
+ InterruptCtxDelayKey = "delay"
+ InterruptCtxOpcodeDelayKey = "opcodeDelay"
+
+ // InterruptedTxCacheSize is size of lru cache for interrupted txs
+ InterruptedTxCacheSize = 90000
)
// Config are the configuration options for the Interpreter
@@ -34,6 +56,10 @@ type Config struct {
JumpTable *JumpTable // EVM instruction table, automatically populated if unset
ExtraEips []int // Additional EIPS that are to be enabled
+
+ // parallel EVM configs
+ ParallelEnable bool
+ ParallelSpeculativeProcesses int
}
// ScopeContext contains the things that are per-call, such as stack and memory,
@@ -64,6 +90,54 @@ type EVMInterpreter struct {
returnData []byte // Last CALL's return data for subsequent reuse
}
+// TxCacher is an wrapper of lru.cache for caching transactions that get interrupted
+type TxCache struct {
+ Cache *lru.Cache
+}
+
+type txCacheKey struct{}
+type InterruptedTxContext_currenttxKey struct{}
+
+// SetCurrentTxOnContext sets the current tx on the context
+func SetCurrentTxOnContext(ctx context.Context, txHash common.Hash) context.Context {
+ return context.WithValue(ctx, InterruptedTxContext_currenttxKey{}, txHash)
+}
+
+// GetCurrentTxFromContext gets the current tx from the context
+func GetCurrentTxFromContext(ctx context.Context) (common.Hash, error) {
+ val := ctx.Value(InterruptedTxContext_currenttxKey{})
+ if val == nil {
+ return common.Hash{}, ErrNoCurrentTx
+ }
+
+ c, ok := val.(common.Hash)
+ if !ok {
+ return common.Hash{}, ErrNoCurrentTx
+ }
+
+ return c, nil
+}
+
+// GetCache returns the txCache from the context
+func GetCache(ctx context.Context) (*TxCache, error) {
+ val := ctx.Value(txCacheKey{})
+ if val == nil {
+ return nil, ErrNoCache
+ }
+
+ c, ok := val.(*TxCache)
+ if !ok {
+ return nil, ErrNoCache
+ }
+
+ return c, nil
+}
+
+// PutCache puts the txCache into the context
+func PutCache(ctx context.Context, cache *TxCache) context.Context {
+ return context.WithValue(ctx, txCacheKey{}, cache)
+}
+
// NewEVMInterpreter returns a new instance of the Interpreter.
func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
// If jump table was not initialised we set the default one.
@@ -107,14 +181,196 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
}
}
+// PreRun is a wrapper around Run that allows for a delay to be injected before each opcode when induced by tests else it calls the lagace Run() method
+func (in *EVMInterpreter) PreRun(contract *Contract, input []byte, readOnly bool, interruptCtx context.Context) (ret []byte, err error) {
+ var opcodeDelay interface{}
+
+ if interruptCtx != nil {
+ if interruptCtx.Value(InterruptCtxOpcodeDelayKey) != nil {
+ opcodeDelay = interruptCtx.Value(InterruptCtxOpcodeDelayKey)
+ }
+ }
+
+ if opcodeDelay != nil {
+ return in.RunWithDelay(contract, input, readOnly, interruptCtx, opcodeDelay.(uint))
+ }
+
+ return in.Run(contract, input, readOnly, interruptCtx)
+}
+
// Run loops and evaluates the contract's code with the given input data and returns
// the return byte-slice and an error if one occurred.
//
// It's important to note that any errors returned by the interpreter should be
// considered a revert-and-consume-all-gas operation except for
// ErrExecutionReverted which means revert-and-keep-gas-left.
-func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (ret []byte, err error) {
+// nolint: gocognit
+func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool, interruptCtx context.Context) (ret []byte, err error) {
+ // Increment the call depth which is restricted to 1024
+ in.evm.depth++
+ defer func() { in.evm.depth-- }()
+
+ // Make sure the readOnly is only set if we aren't in readOnly yet.
+ // This also makes sure that the readOnly flag isn't removed for child calls.
+ if readOnly && !in.readOnly {
+ in.readOnly = true
+ defer func() { in.readOnly = false }()
+ }
+
+ // Reset the previous call's return data. It's unimportant to preserve the old buffer
+ // as every returning call will return new data anyway.
+ in.returnData = nil
+
+ // Don't bother with the execution if there's no code.
+ if len(contract.Code) == 0 {
+ return nil, nil
+ }
+
+ var (
+ op OpCode // current opcode
+ mem = NewMemory() // bound memory
+ stack = newstack() // local stack
+ callContext = &ScopeContext{
+ Memory: mem,
+ Stack: stack,
+ Contract: contract,
+ }
+ // For optimisation reason we're using uint64 as the program counter.
+ // It's theoretically possible to go above 2^64. The YP defines the PC
+ // to be uint256. Practically much less so feasible.
+ pc = uint64(0) // program counter
+ cost uint64
+ // copies used by tracer
+ pcCopy uint64 // needed for the deferred EVMLogger
+ gasCopy uint64 // for EVMLogger to log gas remaining before execution
+ logged bool // deferred EVMLogger should ignore already logged steps
+ res []byte // result of the opcode execution function
+ )
+ // Don't move this deferrred function, it's placed before the capturestate-deferred method,
+ // so that it get's executed _after_: the capturestate needs the stacks before
+ // they are returned to the pools
+ defer func() {
+ returnStack(stack)
+ }()
+
+ contract.Input = input
+
+ if in.cfg.Debug {
+ defer func() {
+ if err != nil {
+ if !logged {
+ in.cfg.Tracer.CaptureState(pcCopy, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
+ } else {
+ in.cfg.Tracer.CaptureFault(pcCopy, op, gasCopy, cost, callContext, in.evm.depth, err)
+ }
+ }
+ }()
+ }
+ // The Interpreter main run loop (contextual). This loop runs until either an
+ // explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
+ // the execution of one of the operations or until the done flag is set by the
+ // parent context.
+ for {
+ if interruptCtx != nil {
+ // case of interrupting by timeout
+ select {
+ case <-interruptCtx.Done():
+ txHash, _ := GetCurrentTxFromContext(interruptCtx)
+ interruptedTxCache, _ := GetCache(interruptCtx)
+
+ if interruptedTxCache == nil {
+ break
+ }
+
+ // if the tx is already in the cache, it means that it has been interrupted before and we will not interrupt it again
+ found, _ := interruptedTxCache.Cache.ContainsOrAdd(txHash, true)
+ if found {
+ interruptedTxCache.Cache.Remove(txHash)
+ } else {
+ // if the tx is not in the cache, it means that it has not been interrupted before and we will interrupt it
+ opcodeCommitInterruptCounter.Inc(1)
+ log.Warn("OPCODE Level interrupt")
+
+ return nil, ErrInterrupt
+ }
+ default:
+ }
+ }
+
+ if in.cfg.Debug {
+ // Capture pre-execution values for tracing.
+ logged, pcCopy, gasCopy = false, pc, contract.Gas
+ }
+ // Get the operation from the jump table and validate the stack to ensure there are
+ // enough stack items available to perform the operation.
+ op = contract.GetOp(pc)
+ operation := in.cfg.JumpTable[op]
+ cost = operation.constantGas // For tracing
+ // Validate stack
+ if sLen := stack.len(); sLen < operation.minStack {
+ return nil, &ErrStackUnderflow{stackLen: sLen, required: operation.minStack}
+ } else if sLen > operation.maxStack {
+ return nil, &ErrStackOverflow{stackLen: sLen, limit: operation.maxStack}
+ }
+
+ if !contract.UseGas(cost) {
+ return nil, ErrOutOfGas
+ }
+ // nolint : nestif
+ if operation.dynamicGas != nil {
+ // All ops with a dynamic memory usage also has a dynamic gas cost.
+ var memorySize uint64
+ // calculate the new memory size and expand the memory to fit
+ // the operation
+ // Memory check needs to be done prior to evaluating the dynamic gas portion,
+ // to detect calculation overflows
+ if operation.memorySize != nil {
+ memSize, overflow := operation.memorySize(stack)
+ if overflow {
+ return nil, ErrGasUintOverflow
+ }
+ // memory is expanded in words of 32 bytes. Gas
+ // is also calculated in words.
+ if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow {
+ return nil, ErrGasUintOverflow
+ }
+ }
+ // Consume the gas and return an error if not enough gas is available.
+ // cost is explicitly set so that the capture state defer method can get the proper cost
+ var dynamicCost uint64
+ dynamicCost, err = operation.dynamicGas(in.evm, contract, stack, mem, memorySize)
+ cost += dynamicCost // for tracing
+ if err != nil || !contract.UseGas(dynamicCost) {
+ return nil, ErrOutOfGas
+ }
+ if memorySize > 0 {
+ mem.Resize(memorySize)
+ }
+ }
+
+ if in.cfg.Debug {
+ in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
+ logged = true
+ }
+ // execute the operation
+ res, err = operation.execute(&pc, in, callContext)
+ if err != nil {
+ break
+ }
+ pc++
+ }
+
+ if err == errStopToken {
+ err = nil // clear stop token error
+ }
+
+ return res, err
+}
+
+// nolint: gocognit
+// RunWithDelay is Run() with a delay between each opcode. Only used by testcases.
+func (in *EVMInterpreter) RunWithDelay(contract *Contract, input []byte, readOnly bool, interruptCtx context.Context, opcodeDelay uint) (ret []byte, err error) {
// Increment the call depth which is restricted to 1024
in.evm.depth++
defer func() { in.evm.depth-- }()
@@ -179,6 +435,36 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
// the execution of one of the operations or until the done flag is set by the
// parent context.
for {
+ if interruptCtx != nil {
+ // case of interrupting by timeout
+ select {
+ case <-interruptCtx.Done():
+ txHash, _ := GetCurrentTxFromContext(interruptCtx)
+ interruptedTxCache, _ := GetCache(interruptCtx)
+
+ if interruptedTxCache == nil {
+ break
+ }
+
+ // if the tx is already in the cache, it means that it has been interrupted before and we will not interrupt it again
+ found, _ := interruptedTxCache.Cache.ContainsOrAdd(txHash, true)
+ log.Info("FOUND", "found", found, "txHash", txHash)
+
+ if found {
+ interruptedTxCache.Cache.Remove(txHash)
+ } else {
+ // if the tx is not in the cache, it means that it has not been interrupted before and we will interrupt it
+ opcodeCommitInterruptCounter.Inc(1)
+ log.Warn("OPCODE Level interrupt")
+
+ return nil, ErrInterrupt
+ }
+ default:
+ }
+ }
+
+ time.Sleep(time.Duration(opcodeDelay) * time.Millisecond)
+
if in.cfg.Debug {
// Capture pre-execution values for tracing.
logged, pcCopy, gasCopy = false, pc, contract.Gas
diff --git a/core/vm/interpreter_test.go b/core/vm/interpreter_test.go
index dfae0f2e2a..9a7affb53d 100644
--- a/core/vm/interpreter_test.go
+++ b/core/vm/interpreter_test.go
@@ -53,7 +53,7 @@ func TestLoopInterrupt(t *testing.T) {
timeout := make(chan bool)
go func(evm *EVM) {
- _, _, err := evm.Call(AccountRef(common.Address{}), address, nil, math.MaxUint64, new(big.Int))
+ _, _, err := evm.Call(AccountRef(common.Address{}), address, nil, math.MaxUint64, new(big.Int), nil)
errChannel <- err
}(evm)
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 7861fb92db..2c5505ec85 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -131,6 +131,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
input,
cfg.GasLimit,
cfg.Value,
+ nil,
)
return ret, cfg.State, err
@@ -186,6 +187,7 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er
input,
cfg.GasLimit,
cfg.Value,
+ nil,
)
return ret, leftOverGas, err
}
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index 97673b4906..cf744e3f29 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -386,12 +386,15 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode
//cfg.State.CreateAccount(cfg.Origin)
// set the receiver's (the executing contract) code for execution.
cfg.State.SetCode(destination, code)
- vmenv.Call(sender, destination, nil, gas, cfg.Value)
+
+ // nolint: errcheck
+ vmenv.Call(sender, destination, nil, gas, cfg.Value, nil)
b.Run(name, func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
- vmenv.Call(sender, destination, nil, gas, cfg.Value)
+ // nolint: errcheck
+ vmenv.Call(sender, destination, nil, gas, cfg.Value, nil)
}
})
}
diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml
index d202a1cf82..f62fcfebdb 100644
--- a/docs/cli/example_config.toml
+++ b/docs/cli/example_config.toml
@@ -29,15 +29,16 @@ devfakeauthor = false # Run miner without validator set authorization
debug = true # Prepends log messages with call-site location (file and line number) - {requires some effort}
[p2p]
- maxpeers = 50 # Maximum number of network peers (network disabled if set to 0)
- maxpendpeers = 50 # Maximum number of pending connection attempts
- bind = "0.0.0.0" # Network binding address
- port = 30303 # Network listening port
- nodiscover = false # Disables the peer discovery mechanism (manual peer addition)
- nat = "any" # NAT port mapping mechanism (any|none|upnp|pmp|extip:)
- netrestrict = "" # Restricts network communication to the given IP networks (CIDR masks)
- nodekey = "" # P2P node key file
- nodekeyhex = "" # P2P node key as hex
+ maxpeers = 50 # Maximum number of network peers (network disabled if set to 0)
+ maxpendpeers = 50 # Maximum number of pending connection attempts
+ bind = "0.0.0.0" # Network binding address
+ port = 30303 # Network listening port
+ nodiscover = false # Disables the peer discovery mechanism (manual peer addition)
+ nat = "any" # NAT port mapping mechanism (any|none|upnp|pmp|extip:)
+ netrestrict = "" # Restricts network communication to the given IP networks (CIDR masks)
+ nodekey = "" # P2P node key file
+ nodekeyhex = "" # P2P node key as hex
+ txarrivalwait = "500ms" # Maximum duration to wait before requesting an announced transaction
[p2p.discovery]
v5disc = false # Enables the experimental RLPx V5 (Topic Discovery) mechanism
bootnodes = [] # Comma separated enode URLs for P2P discovery bootstrap
@@ -72,6 +73,7 @@ devfakeauthor = false # Run miner without validator set authorization
gaslimit = 30000000 # Target gas ceiling for mined blocks
gasprice = "1000000000" # Minimum gas price for mining a transaction (recommended for mainnet = 30000000000, default suitable for mumbai/devnet)
recommit = "2m5s" # The time interval for miner to re-create mining work
+ commitinterrupt = true # Interrupt the current mining work when time is exceeded and create partial blocks
[jsonrpc]
ipcdisable = false # Disable the IPC-RPC server
@@ -128,7 +130,7 @@ devfakeauthor = false # Run miner without validator set authorization
metrics = false # Enable metrics collection and reporting
expensive = false # Enable expensive metrics collection and reporting
prometheus-addr = "127.0.0.1:7071" # Address for Prometheus Server
- opencollector-endpoint = "127.0.0.1:4317" # OpenCollector Endpoint (host:port)
+ opencollector-endpoint = "" # OpenCollector Endpoint (host:port)
[telemetry.influx]
influxdb = false # Enable metrics export/push to an external InfluxDB database (v1)
endpoint = "" # InfluxDB API endpoint to report metrics to
diff --git a/docs/cli/server.md b/docs/cli/server.md
index 19deb5ca27..d7a9a1296e 100644
--- a/docs/cli/server.md
+++ b/docs/cli/server.md
@@ -72,6 +72,10 @@ The ```bor server``` command runs the Bor client.
- ```dev.period```: Block period to use in developer mode (0 = mine only if transaction pending) (default: 0)
+- ```parallelevm.enable```: Enable Block STM (default: true)
+
+- ```parallelevm.procs```: Number of speculative processes (cores) in Block STM (default: 8)
+
- ```dev.gaslimit```: Initial block gas limit (default: 11500000)
- ```pprof```: Enable the pprof HTTP server (default: false)
@@ -216,6 +220,8 @@ The ```bor server``` command runs the Bor client.
- ```v5disc```: Enables the experimental RLPx V5 (Topic Discovery) mechanism (default: false)
+- ```txarrivalwait```: Maximum duration to wait for a transaction before explicitly requesting it (defaults to 500ms) (default: 500ms)
+
### Sealer Options
- ```mine```: Enable mining (default: false)
@@ -230,6 +236,8 @@ The ```bor server``` command runs the Bor client.
- ```miner.recommit```: The time interval for miner to re-create mining work (default: 2m5s)
+- ```miner.interruptcommit```: Interrupt block commit when block creation time is passed (default: true)
+
### Telemetry Options
- ```metrics```: Enable metrics collection and reporting (default: false)
diff --git a/eth/backend.go b/eth/backend.go
index 869566a7ac..0c5648cf46 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -208,7 +208,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
var (
vmConfig = vm.Config{
- EnablePreimageRecording: config.EnablePreimageRecording,
+ EnablePreimageRecording: config.EnablePreimageRecording,
+ ParallelEnable: config.ParallelEVM.Enable,
+ ParallelSpeculativeProcesses: config.ParallelEVM.SpeculativeProcesses,
}
cacheConfig = &core.CacheConfig{
TrieCleanLimit: config.TrieCleanCache,
@@ -226,7 +228,14 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
checker := whitelist.NewService(10)
- eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit, checker)
+ // check if Parallel EVM is enabled
+ // if enabled, use parallel state processor
+ if config.ParallelEVM.Enable {
+ eth.blockchain, err = core.NewParallelBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit, checker)
+ } else {
+ eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit, checker)
+ }
+
if err != nil {
return nil, err
}
@@ -268,6 +277,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
EthAPI: ethAPI,
PeerRequiredBlocks: config.PeerRequiredBlocks,
checker: checker,
+ txArrivalWait: eth.p2pServer.TxArrivalWait,
}); err != nil {
return nil, err
}
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index a9242fba5b..ba3d2c98c5 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -1500,9 +1500,9 @@ func TestFakedSyncProgress66NoRemoteCheckpoint(t *testing.T) {
tester := newTester()
validate := func(count int) (bool, error) {
- // only return the `ErrNoRemoteCheckoint` error for the first call
+ // only return the `ErrNoRemoteCheckpoint` error for the first call
if count == 0 {
- return false, whitelist.ErrNoRemoteCheckoint
+ return false, whitelist.ErrNoRemoteCheckpoint
}
return true, nil
@@ -1518,7 +1518,7 @@ func TestFakedSyncProgress66NoRemoteCheckpoint(t *testing.T) {
// Synchronise with the peer and make sure all blocks were retrieved
// Should fail in first attempt
if err := tester.sync("light", nil, mode); err != nil {
- assert.Equal(t, whitelist.ErrNoRemoteCheckoint, err, "failed synchronisation")
+ assert.Equal(t, whitelist.ErrNoRemoteCheckpoint, err, "failed synchronisation")
}
// Try syncing again, should succeed
diff --git a/eth/downloader/whitelist/service.go b/eth/downloader/whitelist/service.go
index a2edcfb797..5d3fe477d3 100644
--- a/eth/downloader/whitelist/service.go
+++ b/eth/downloader/whitelist/service.go
@@ -31,7 +31,7 @@ func NewService(maxCapacity uint) *Service {
var (
ErrCheckpointMismatch = errors.New("checkpoint mismatch")
ErrLongFutureChain = errors.New("received future chain of unacceptable length")
- ErrNoRemoteCheckoint = errors.New("remote peer doesn't have a checkoint")
+ ErrNoRemoteCheckpoint = errors.New("remote peer doesn't have a checkpoint")
)
// IsValidPeer checks if the chain we're about to receive from a peer is valid or not
@@ -55,11 +55,11 @@ func (w *Service) IsValidPeer(remoteHeader *types.Header, fetchHeadersByNumber f
// todo: we can extract this as an interface and mock as well or just test IsValidChain in isolation from downloader passing fake fetchHeadersByNumber functions
headers, hashes, err := fetchHeadersByNumber(lastCheckpointBlockNum, 1, 0, false)
if err != nil {
- return false, fmt.Errorf("%w: last checkpoint %d, err %v", ErrNoRemoteCheckoint, lastCheckpointBlockNum, err)
+ return false, fmt.Errorf("%w: last checkpoint %d, err %v", ErrNoRemoteCheckpoint, lastCheckpointBlockNum, err)
}
if len(headers) == 0 {
- return false, fmt.Errorf("%w: last checkpoint %d", ErrNoRemoteCheckoint, lastCheckpointBlockNum)
+ return false, fmt.Errorf("%w: last checkpoint %d", ErrNoRemoteCheckpoint, lastCheckpointBlockNum)
}
reqBlockNum := headers[0].Number.Uint64()
diff --git a/eth/downloader/whitelist/service_test.go b/eth/downloader/whitelist/service_test.go
index a91148c761..cf0b076cfd 100644
--- a/eth/downloader/whitelist/service_test.go
+++ b/eth/downloader/whitelist/service_test.go
@@ -64,14 +64,14 @@ func TestIsValidPeer(t *testing.T) {
}
// case2: false fetchHeadersByNumber function provided, should consider the chain as invalid
- // and throw `ErrNoRemoteCheckoint` error
+ // and throw `ErrNoRemoteCheckpoint` error
res, err = s.IsValidPeer(nil, falseFetchHeadersByNumber)
if err == nil {
t.Fatal("expected error, got nil")
}
- if !errors.Is(err, ErrNoRemoteCheckoint) {
- t.Fatalf("expected error ErrNoRemoteCheckoint, got %v", err)
+ if !errors.Is(err, ErrNoRemoteCheckpoint) {
+ t.Fatalf("expected error ErrNoRemoteCheckpoint, got %v", err)
}
require.Equal(t, res, false, "expected chain to be invalid")
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 7581e16761..5d58c70fe4 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -238,6 +238,9 @@ type Config struct {
// Bor logs flag
BorLogs bool
+ // Parallel EVM (Block-STM) related config
+ ParallelEVM core.ParallelEVMConfig `toml:",omitempty"`
+
// Arrow Glacier block override (TODO: remove after the fork)
OverrideArrowGlacier *big.Int `toml:",omitempty"`
diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go
index b10c0db9ee..8bdb1547e8 100644
--- a/eth/fetcher/tx_fetcher.go
+++ b/eth/fetcher/tx_fetcher.go
@@ -53,13 +53,13 @@ const (
// re-request them.
maxTxUnderpricedSetSize = 32768
- // txArriveTimeout is the time allowance before an announced transaction is
- // explicitly requested.
- txArriveTimeout = 500 * time.Millisecond
-
// txGatherSlack is the interval used to collate almost-expired announces
// with network fetches.
txGatherSlack = 100 * time.Millisecond
+
+ // maxTxArrivalWait is the longest acceptable duration for the txArrivalWait
+ // configuration value. Longer config values will default to this.
+ maxTxArrivalWait = 500 * time.Millisecond
)
var (
@@ -176,38 +176,41 @@ type TxFetcher struct {
step chan struct{} // Notification channel when the fetcher loop iterates
clock mclock.Clock // Time wrapper to simulate in tests
rand *mrand.Rand // Randomizer to use in tests instead of map range loops (soft-random)
+
+ txArrivalWait time.Duration // txArrivalWait is the time allowance before an announced transaction is explicitly requested.
}
// NewTxFetcher creates a transaction fetcher to retrieve transaction
// based on hash announcements.
-func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher {
- return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil)
+func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, txArrivalWait time.Duration) *TxFetcher {
+ return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil, txArrivalWait)
}
// NewTxFetcherForTests is a testing method to mock out the realtime clock with
// a simulated version and the internal randomness with a deterministic one.
func NewTxFetcherForTests(
hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error,
- clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
+ clock mclock.Clock, rand *mrand.Rand, txArrivalWait time.Duration) *TxFetcher {
return &TxFetcher{
- notify: make(chan *txAnnounce),
- cleanup: make(chan *txDelivery),
- drop: make(chan *txDrop),
- quit: make(chan struct{}),
- waitlist: make(map[common.Hash]map[string]struct{}),
- waittime: make(map[common.Hash]mclock.AbsTime),
- waitslots: make(map[string]map[common.Hash]struct{}),
- announces: make(map[string]map[common.Hash]struct{}),
- announced: make(map[common.Hash]map[string]struct{}),
- fetching: make(map[common.Hash]string),
- requests: make(map[string]*txRequest),
- alternates: make(map[common.Hash]map[string]struct{}),
- underpriced: mapset.NewSet(),
- hasTx: hasTx,
- addTxs: addTxs,
- fetchTxs: fetchTxs,
- clock: clock,
- rand: rand,
+ notify: make(chan *txAnnounce),
+ cleanup: make(chan *txDelivery),
+ drop: make(chan *txDrop),
+ quit: make(chan struct{}),
+ waitlist: make(map[common.Hash]map[string]struct{}),
+ waittime: make(map[common.Hash]mclock.AbsTime),
+ waitslots: make(map[string]map[common.Hash]struct{}),
+ announces: make(map[string]map[common.Hash]struct{}),
+ announced: make(map[common.Hash]map[string]struct{}),
+ fetching: make(map[common.Hash]string),
+ requests: make(map[string]*txRequest),
+ alternates: make(map[common.Hash]map[string]struct{}),
+ underpriced: mapset.NewSet(),
+ hasTx: hasTx,
+ addTxs: addTxs,
+ fetchTxs: fetchTxs,
+ clock: clock,
+ rand: rand,
+ txArrivalWait: txArrivalWait,
}
}
@@ -333,6 +336,16 @@ func (f *TxFetcher) Drop(peer string) error {
// Start boots up the announcement based synchroniser, accepting and processing
// hash notifications and block fetches until termination requested.
func (f *TxFetcher) Start() {
+ // the txArrivalWait duration should not be less than the txGatherSlack duration
+ if f.txArrivalWait < txGatherSlack {
+ f.txArrivalWait = txGatherSlack
+ }
+
+ // the txArrivalWait duration should not be greater than the maxTxArrivalWait duration
+ if f.txArrivalWait > maxTxArrivalWait {
+ f.txArrivalWait = maxTxArrivalWait
+ }
+
go f.loop()
}
@@ -350,6 +363,9 @@ func (f *TxFetcher) loop() {
waitTrigger = make(chan struct{}, 1)
timeoutTrigger = make(chan struct{}, 1)
)
+
+ log.Info("TxFetcher", "txArrivalWait", f.txArrivalWait.String())
+
for {
select {
case ann := <-f.notify:
@@ -441,7 +457,7 @@ func (f *TxFetcher) loop() {
// ones into the retrieval queues
actives := make(map[string]struct{})
for hash, instance := range f.waittime {
- if time.Duration(f.clock.Now()-instance)+txGatherSlack > txArriveTimeout {
+ if time.Duration(f.clock.Now()-instance)+txGatherSlack > f.txArrivalWait {
// Transaction expired without propagation, schedule for retrieval
if f.announced[hash] != nil {
panic("announce tracker already contains waitlist item")
@@ -698,14 +714,16 @@ func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) {
for _, instance := range f.waittime {
if earliest > instance {
earliest = instance
- if txArriveTimeout-time.Duration(now-earliest) < gatherSlack {
+ if f.txArrivalWait-time.Duration(now-earliest) < gatherSlack {
break
}
}
}
- *timer = f.clock.AfterFunc(txArriveTimeout-time.Duration(now-earliest), func() {
- trigger <- struct{}{}
- })
+
+ *timer = f.clock.AfterFunc(
+ f.txArrivalWait-time.Duration(now-earliest),
+ func() { trigger <- struct{}{} },
+ )
}
// rescheduleTimeout iterates over all the transactions currently in flight and
diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go
index 796d4caf0f..b3c3ee3bb7 100644
--- a/eth/fetcher/tx_fetcher_test.go
+++ b/eth/fetcher/tx_fetcher_test.go
@@ -38,7 +38,8 @@ var (
types.NewTransaction(9828766684487745566, common.Address{0xac}, new(big.Int), 0, new(big.Int), nil),
}
// testTxsHashes is the hashes of the test transactions above
- testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()}
+ testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()}
+ testTxArrivalWait = 500 * time.Millisecond
)
type doTxNotify struct {
@@ -81,6 +82,7 @@ func TestTransactionFetcherWaiting(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
@@ -113,7 +115,7 @@ func TestTransactionFetcherWaiting(t *testing.T) {
// Wait for the arrival timeout which should move all expired items
// from the wait list to the scheduler
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -132,7 +134,7 @@ func TestTransactionFetcherWaiting(t *testing.T) {
isWaiting(map[string][]common.Hash{
"C": {{0x06}, {0x07}},
}),
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isScheduled{
tracking: map[string][]common.Hash{
"A": {{0x01}, {0x02}, {0x03}, {0x05}},
@@ -171,6 +173,7 @@ func TestTransactionFetcherSkipWaiting(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
@@ -181,7 +184,7 @@ func TestTransactionFetcherSkipWaiting(t *testing.T) {
}),
isScheduled{tracking: nil, fetching: nil},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -234,6 +237,7 @@ func TestTransactionFetcherSingletonRequesting(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
@@ -244,7 +248,7 @@ func TestTransactionFetcherSingletonRequesting(t *testing.T) {
}),
isScheduled{tracking: nil, fetching: nil},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -268,7 +272,7 @@ func TestTransactionFetcherSingletonRequesting(t *testing.T) {
"A": {{0x01}, {0x02}},
},
},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -314,6 +318,7 @@ func TestTransactionFetcherFailedRescheduling(t *testing.T) {
<-proceed
return errors.New("peer disconnected")
},
+ testTxArrivalWait,
)
},
steps: []interface{}{
@@ -324,7 +329,7 @@ func TestTransactionFetcherFailedRescheduling(t *testing.T) {
}),
isScheduled{tracking: nil, fetching: nil},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -383,6 +388,7 @@ func TestTransactionFetcherCleanup(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
@@ -393,7 +399,7 @@ func TestTransactionFetcherCleanup(t *testing.T) {
}),
isScheduled{tracking: nil, fetching: nil},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -422,6 +428,7 @@ func TestTransactionFetcherCleanupEmpty(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
@@ -432,7 +439,7 @@ func TestTransactionFetcherCleanupEmpty(t *testing.T) {
}),
isScheduled{tracking: nil, fetching: nil},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -460,6 +467,7 @@ func TestTransactionFetcherMissingRescheduling(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
@@ -470,7 +478,7 @@ func TestTransactionFetcherMissingRescheduling(t *testing.T) {
}),
isScheduled{tracking: nil, fetching: nil},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -506,6 +514,7 @@ func TestTransactionFetcherMissingCleanup(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
@@ -516,7 +525,7 @@ func TestTransactionFetcherMissingCleanup(t *testing.T) {
}),
isScheduled{tracking: nil, fetching: nil},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -544,14 +553,15 @@ func TestTransactionFetcherBroadcasts(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
// Set up three transactions to be in different stats, waiting, queued and fetching
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}},
isWaiting(map[string][]common.Hash{
@@ -592,6 +602,7 @@ func TestTransactionFetcherWaitTimerResets(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
@@ -600,7 +611,7 @@ func TestTransactionFetcherWaitTimerResets(t *testing.T) {
"A": {{0x01}},
}),
isScheduled{nil, nil, nil},
- doWait{time: txArriveTimeout / 2, step: false},
+ doWait{time: testTxArrivalWait / 2, step: false},
isWaiting(map[string][]common.Hash{
"A": {{0x01}},
}),
@@ -611,7 +622,7 @@ func TestTransactionFetcherWaitTimerResets(t *testing.T) {
"A": {{0x01}, {0x02}},
}),
isScheduled{nil, nil, nil},
- doWait{time: txArriveTimeout / 2, step: true},
+ doWait{time: testTxArrivalWait / 2, step: true},
isWaiting(map[string][]common.Hash{
"A": {{0x02}},
}),
@@ -624,7 +635,7 @@ func TestTransactionFetcherWaitTimerResets(t *testing.T) {
},
},
- doWait{time: txArriveTimeout / 2, step: true},
+ doWait{time: testTxArrivalWait / 2, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -649,6 +660,7 @@ func TestTransactionFetcherTimeoutRescheduling(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
@@ -659,7 +671,7 @@ func TestTransactionFetcherTimeoutRescheduling(t *testing.T) {
}),
isScheduled{tracking: nil, fetching: nil},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -681,7 +693,7 @@ func TestTransactionFetcherTimeoutRescheduling(t *testing.T) {
},
// Ensure that followup announcements don't get scheduled
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isScheduled{
tracking: map[string][]common.Hash{
"A": {testTxsHashes[1]},
@@ -714,13 +726,14 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
@@ -733,7 +746,7 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
"B": {{0x02}},
},
},
- doWait{time: txFetchTimeout - txArriveTimeout, step: true},
+ doWait{time: txFetchTimeout - testTxArrivalWait, step: true},
isScheduled{
tracking: map[string][]common.Hash{
"B": {{0x02}},
@@ -745,7 +758,7 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
"A": {},
},
},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isScheduled{
tracking: nil,
fetching: nil,
@@ -773,13 +786,14 @@ func TestTransactionFetcherRateLimiting(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
// Announce all the transactions, wait a bit and ensure only a small
// percentage gets requested
doTxNotify{peer: "A", hashes: hashes},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -811,13 +825,14 @@ func TestTransactionFetcherDoSProtection(t *testing.T) {
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
// Announce half of the transaction and wait for them to be scheduled
doTxNotify{peer: "A", hashes: hashesA[:maxTxAnnounces/2]},
doTxNotify{peer: "B", hashes: hashesB[:maxTxAnnounces/2-1]},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
// Announce the second half and keep them in the wait list
doTxNotify{peer: "A", hashes: hashesA[maxTxAnnounces/2 : maxTxAnnounces]},
@@ -878,12 +893,13 @@ func TestTransactionFetcherUnderpricedDedup(t *testing.T) {
return errs
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
// Deliver a transaction through the fetcher, but reject as underpriced
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}, direct: true},
isScheduled{nil, nil, nil},
@@ -921,7 +937,7 @@ func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) {
steps = append(steps, isWaiting(map[string][]common.Hash{
"A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
}))
- steps = append(steps, doWait{time: txArriveTimeout, step: true})
+ steps = append(steps, doWait{time: testTxArrivalWait, step: true})
steps = append(steps, isScheduled{
tracking: map[string][]common.Hash{
"A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
@@ -947,12 +963,13 @@ func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) {
return errs
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: append(steps, []interface{}{
// The preparation of the test has already been done in `steps`, add the last check
doTxNotify{peer: "A", hashes: []common.Hash{hashes[maxTxUnderpricedSetSize]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxEnqueue{peer: "A", txs: []*types.Transaction{txs[maxTxUnderpricedSetSize]}, direct: true},
isUnderpriced(maxTxUnderpricedSetSize),
}...),
@@ -969,6 +986,7 @@ func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
@@ -981,9 +999,9 @@ func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) {
// Set up a few hashes into various stages
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}},
isWaiting(map[string][]common.Hash{
@@ -1022,14 +1040,15 @@ func TestTransactionFetcherDrop(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
// Set up a few hashes into various stages
doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxNotify{peer: "A", hashes: []common.Hash{{0x03}}},
isWaiting(map[string][]common.Hash{
@@ -1050,7 +1069,7 @@ func TestTransactionFetcherDrop(t *testing.T) {
// Push the node into a dangling (timeout) state
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
isWaiting(nil),
isScheduled{
tracking: map[string][]common.Hash{
@@ -1088,12 +1107,13 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
// Set up a few hashes into various stages
doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxNotify{peer: "B", hashes: []common.Hash{{0x01}}},
isWaiting(nil),
@@ -1133,12 +1153,13 @@ func TestTransactionFetcherFuzzCrash01(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
// Get a transaction into fetching mode and make it dangling with a broadcast
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
// Notify the dangling transaction once more and crash via a timeout
@@ -1160,17 +1181,18 @@ func TestTransactionFetcherFuzzCrash02(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
// Get a transaction into fetching mode and make it dangling with a broadcast
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
// Notify the dangling transaction once more, re-fetch, and crash via a drop and timeout
doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doDrop("A"),
doWait{time: txFetchTimeout, step: true},
},
@@ -1189,6 +1211,7 @@ func TestTransactionFetcherFuzzCrash03(t *testing.T) {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ testTxArrivalWait,
)
},
steps: []interface{}{
@@ -1199,7 +1222,7 @@ func TestTransactionFetcherFuzzCrash03(t *testing.T) {
// Notify the dangling transaction once more, partially deliver, clash&crash with a timeout
doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true},
doWait{time: txFetchTimeout, step: true},
@@ -1225,17 +1248,18 @@ func TestTransactionFetcherFuzzCrash04(t *testing.T) {
<-proceed
return errors.New("peer disconnected")
},
+ testTxArrivalWait,
)
},
steps: []interface{}{
// Get a transaction into fetching mode and make it dangling with a broadcast
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
// Notify the dangling transaction once more, re-fetch, and crash via an in-flight disconnect
doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
- doWait{time: txArriveTimeout, step: true},
+ doWait{time: testTxArrivalWait, step: true},
doFunc(func() {
proceed <- struct{}{} // Allow peer A to return the failure
}),
diff --git a/eth/handler.go b/eth/handler.go
index 48bdf8eb15..b58fab1773 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -93,6 +93,7 @@ type handlerConfig struct {
PeerRequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges
checker ethereum.ChainValidator
+ txArrivalWait time.Duration // Maximum duration to wait for an announced tx before requesting it
}
type handler struct {
@@ -307,7 +308,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
}
return p.RequestTxs(hashes)
}
- h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, h.txpool.AddRemotes, fetchTx)
+ h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, h.txpool.AddRemotes, fetchTx, config.txArrivalWait)
h.chainSync = newChainSyncer(h)
return h, nil
}
diff --git a/eth/protocols/eth/dispatcher.go b/eth/protocols/eth/dispatcher.go
index bf88d400d4..8353515193 100644
--- a/eth/protocols/eth/dispatcher.go
+++ b/eth/protocols/eth/dispatcher.go
@@ -212,6 +212,7 @@ func (p *Peer) dispatcher() {
}
// Stop tracking the request
delete(pending, cancelOp.id)
+ requestTracker.Fulfil(p.id, p.version, req.code, cancelOp.id)
cancelOp.fail <- nil
case resOp := <-p.resDispatch:
diff --git a/eth/state_accessor.go b/eth/state_accessor.go
index f01db93a67..b8672e94f8 100644
--- a/eth/state_accessor.go
+++ b/eth/state_accessor.go
@@ -36,15 +36,15 @@ import (
// base layer statedb can be passed then it's regarded as the statedb of the
// parent block.
// Parameters:
-// - block: The block for which we want the state (== state at the stateRoot of the parent)
-// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state
-// - base: If the caller is tracing multiple blocks, the caller can provide the parent state
-// continuously from the callsite.
-// - checklive: if true, then the live 'blockchain' state database is used. If the caller want to
-// perform Commit or other 'save-to-disk' changes, this should be set to false to avoid
-// storing trash persistently
-// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is provided,
-// it would be preferrable to start from a fresh state, if we have it on disk.
+// - block: The block for which we want the state (== state at the stateRoot of the parent)
+// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state
+// - base: If the caller is tracing multiple blocks, the caller can provide the parent state
+// continuously from the callsite.
+// - checklive: if true, then the live 'blockchain' state database is used. If the caller want to
+// perform Commit or other 'save-to-disk' changes, this should be set to false to avoid
+// storing trash persistently
+// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is provided,
+// it would be preferrable to start from a fresh state, if we have it on disk.
func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) {
var (
current *types.Block
@@ -131,7 +131,9 @@ func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state
if current = eth.blockchain.GetBlockByNumber(next); current == nil {
return nil, fmt.Errorf("block #%d not found", next)
}
- _, _, _, err := eth.blockchain.Processor().Process(current, statedb, vm.Config{})
+
+ _, _, _, err := eth.blockchain.Processor().Process(current, statedb, vm.Config{}, nil)
+
if err != nil {
return nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
}
@@ -191,9 +193,11 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec
// Not yet the searched for transaction, execute on top of the current state
vmenv := vm.NewEVM(context, txContext, statedb, eth.blockchain.Config(), vm.Config{})
statedb.Prepare(tx.Hash(), idx)
- if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
+
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()), nil); err != nil {
return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
+
// Ensure any modifications are committed to the state
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index 13f5c627cd..54f624b0a8 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -20,10 +20,13 @@ import (
"bufio"
"bytes"
"context"
+ "encoding/hex"
"errors"
"fmt"
"io/ioutil"
+ "math/big"
"os"
+ "path/filepath"
"runtime"
"sync"
"time"
@@ -63,10 +66,16 @@ const (
// For non-archive nodes, this limit _will_ be overblown, as disk-backed tries
// will only be found every ~15K blocks or so.
defaultTracechainMemLimit = common.StorageSize(500 * 1024 * 1024)
+
+ defaultPath = string(".")
+
+ defaultIOFlag = false
)
var defaultBorTraceEnabled = newBoolPtr(false)
+var allowIOTracing = false // Change this to true to enable IO tracing for debugging
+
// Backend interface provides the common API services (that are provided by
// both full and light clients) with access to necessary functions.
type Backend interface {
@@ -196,6 +205,8 @@ type TraceConfig struct {
Tracer *string
Timeout *string
Reexec *uint64
+ Path *string
+ IOFlag *bool
BorTraceEnabled *bool
BorTx *bool
}
@@ -643,7 +654,8 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config
break
}
} else {
- if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
+ // nolint : contextcheck
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()), context.Background()); err != nil {
log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err)
// We intentionally don't return the error here: if we do, then the RPC server will not
// return the roots. Most likely, the caller already knows that a certain transaction fails to
@@ -693,18 +705,37 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
if block.NumberU64() == 0 {
return nil, errors.New("genesis is not traceable")
}
+
parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())
if err != nil {
return nil, err
}
+
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
reexec = *config.Reexec
}
+
+ path := defaultPath
+ if config != nil && config.Path != nil {
+ path = *config.Path
+ }
+
+ ioflag := defaultIOFlag
+ if allowIOTracing && config != nil && config.IOFlag != nil {
+ ioflag = *config.IOFlag
+ }
+
statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
if err != nil {
return nil, err
}
+
+ // create and add empty mvHashMap in statedb as StateAtBlock does not have mvHashmap in it.
+ if ioflag {
+ statedb.AddEmptyMVHashMap()
+ }
+
// Execute all the transaction contained within the block concurrently
var (
signer = types.MakeSigner(api.backend.ChainConfig(), block.Number())
@@ -755,10 +786,31 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
}
}()
}
+
+ var IOdump string
+
+ var RWstruct []state.DumpStruct
+
+ var london bool
+
+ if ioflag {
+ IOdump = "TransactionIndex, Incarnation, VersionTxIdx, VersionInc, Path, Operation\n"
+ RWstruct = []state.DumpStruct{}
+ }
// Feed the transactions into the tracers and return
var failed error
blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
+
+ if ioflag {
+ london = api.backend.ChainConfig().IsLondon(block.Number())
+ }
+
for i, tx := range txs {
+ if ioflag {
+ // copy of statedb
+ statedb = statedb.Copy()
+ }
+
// Send the trace task over for execution
jobs <- &txTraceTask{statedb: statedb.Copy(), index: i}
@@ -767,28 +819,89 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
statedb.Prepare(tx.Hash(), i)
vmenv := vm.NewEVM(blockCtx, core.NewEVMTxContext(msg), statedb, api.backend.ChainConfig(), vm.Config{})
- //nolint: nestif
- if stateSyncPresent && i == len(txs)-1 {
- if *config.BorTraceEnabled {
- callmsg := prepareCallMessage(msg)
- if _, err := statefull.ApplyBorMessage(*vmenv, callmsg); err != nil {
- failed = err
+
+ // nolint: nestif
+ if !ioflag {
+ //nolint: nestif
+ if stateSyncPresent && i == len(txs)-1 {
+ if *config.BorTraceEnabled {
+ callmsg := prepareCallMessage(msg)
+ // nolint : contextcheck
+ if _, err := statefull.ApplyBorMessage(*vmenv, callmsg); err != nil {
+ failed = err
+ break
+ }
+ } else {
break
}
} else {
- break
+ // nolint : contextcheck
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()), context.Background()); err != nil {
+ failed = err
+ break
+ }
+ // Finalize the state so any modifications are written to the trie
+ // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
+ statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
}
} else {
- if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
+ coinbaseBalance := statedb.GetBalance(blockCtx.Coinbase)
+ // nolint : contextcheck
+ result, err := core.ApplyMessageNoFeeBurnOrTip(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()), context.Background())
+
+ if err != nil {
failed = err
break
}
+
+ if london {
+ statedb.AddBalance(result.BurntContractAddress, result.FeeBurnt)
+ }
+
+ statedb.AddBalance(blockCtx.Coinbase, result.FeeTipped)
+ output1 := new(big.Int).SetBytes(result.SenderInitBalance.Bytes())
+ output2 := new(big.Int).SetBytes(coinbaseBalance.Bytes())
+
+ // Deprecating transfer log and will be removed in future fork. PLEASE DO NOT USE this transfer log going forward. Parameters won't get updated as expected going forward with EIP1559
+ // add transfer log
+ core.AddFeeTransferLog(
+ statedb,
+
+ msg.From(),
+ blockCtx.Coinbase,
+
+ result.FeeTipped,
+ result.SenderInitBalance,
+ coinbaseBalance,
+ output1.Sub(output1, result.FeeTipped),
+ output2.Add(output2, result.FeeTipped),
+ )
+
+ // Finalize the state so any modifications are written to the trie
+ // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
+ statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
+ statedb.FlushMVWriteSet()
+
+ structRead := statedb.GetReadMapDump()
+ structWrite := statedb.GetWriteMapDump()
+
+ RWstruct = append(RWstruct, structRead...)
+ RWstruct = append(RWstruct, structWrite...)
}
+ }
- // Finalize the state so any modifications are written to the trie
- // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
- statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
+ if ioflag {
+ for _, val := range RWstruct {
+ IOdump += fmt.Sprintf("%v , %v, %v , %v, ", val.TxIdx, val.TxInc, val.VerIdx, val.VerInc) + hex.EncodeToString(val.Path) + ", " + val.Op
+ }
+
+ // make sure that the file exists and write IOdump
+ err = ioutil.WriteFile(filepath.Join(path, "data.csv"), []byte(fmt.Sprint(IOdump)), 0600)
+ if err != nil {
+ return nil, err
+ }
}
+
close(jobs)
pend.Wait()
@@ -926,7 +1039,8 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
}
}
} else {
- _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()))
+ // nolint : contextcheck
+ _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()), context.Background())
if writer != nil {
writer.Flush()
}
@@ -1138,7 +1252,8 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Contex
return nil, fmt.Errorf("tracing failed: %w", err)
}
} else {
- result, err = core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()))
+ // nolint : contextcheck
+ result, err = core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()), context.Background())
if err != nil {
return nil, fmt.Errorf("tracing failed: %w", err)
}
diff --git a/eth/tracers/api_bor.go b/eth/tracers/api_bor.go
index b93baae432..2351aed2a9 100644
--- a/eth/tracers/api_bor.go
+++ b/eth/tracers/api_bor.go
@@ -95,7 +95,7 @@ func (api *API) traceBorBlock(ctx context.Context, block *types.Block, config *T
callmsg := prepareCallMessage(message)
execRes, err = statefull.ApplyBorMessage(*vmenv, callmsg)
} else {
- execRes, err = core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()))
+ execRes, err = core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()), nil)
}
if err != nil {
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index d394e4fbe3..897e412f2c 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -167,14 +167,17 @@ func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block
for idx, tx := range block.Transactions() {
msg, _ := tx.AsMessage(signer, block.BaseFee())
txContext := core.NewEVMTxContext(msg)
- context := core.NewEVMBlockContext(block.Header(), b.chain, nil)
+ blockContext := core.NewEVMBlockContext(block.Header(), b.chain, nil)
if idx == txIndex {
- return msg, context, statedb, nil
+ return msg, blockContext, statedb, nil
}
- vmenv := vm.NewEVM(context, txContext, statedb, b.chainConfig, vm.Config{})
- if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
+
+ vmenv := vm.NewEVM(blockContext, txContext, statedb, b.chainConfig, vm.Config{})
+ // nolint : contextcheck
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()), context.Background()); err != nil {
return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
+
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
}
return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash())
@@ -424,6 +427,85 @@ func TestTraceBlock(t *testing.T) {
}
}
+func TestIOdump(t *testing.T) {
+ t.Parallel()
+
+ // Initialize test accounts
+ accounts := newAccounts(5)
+ genesis := &core.Genesis{Alloc: core.GenesisAlloc{
+ accounts[0].addr: {Balance: big.NewInt(params.Ether)},
+ accounts[1].addr: {Balance: big.NewInt(params.Ether)},
+ accounts[2].addr: {Balance: big.NewInt(params.Ether)},
+ accounts[3].addr: {Balance: big.NewInt(params.Ether)},
+ accounts[4].addr: {Balance: big.NewInt(params.Ether)},
+ }}
+ genBlocks := 1
+ signer := types.HomesteadSigner{}
+ api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
+ // Transfer from account[0] to account[1], account[1] to account[2], account[2] to account[3], account[3] to account[4], account[4] to account[0]
+ // value: 1000 wei
+ // fee: 0 wei
+
+ for j := 0; j < 5; j++ {
+ tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[(j+1)%5].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[j].key)
+ b.AddTx(tx)
+ }
+ }))
+
+ allowIOTracing = true
+
+ ioflag := new(bool)
+
+ *ioflag = true
+
+ var testSuite = []struct {
+ blockNumber rpc.BlockNumber
+ config *TraceConfig
+ want string
+ expectErr error
+ }{
+ // Trace head block
+ {
+ config: &TraceConfig{
+ IOFlag: ioflag,
+ },
+ blockNumber: rpc.BlockNumber(genBlocks),
+ want: `[{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}},{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}},{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}},{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}},{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`,
+ },
+ }
+
+ for i, tc := range testSuite {
+ result, err := api.TraceBlockByNumber(context.Background(), tc.blockNumber, tc.config)
+ if tc.expectErr != nil {
+ if err == nil {
+ t.Errorf("test %d, want error %v", i, tc.expectErr)
+ continue
+ }
+
+ if !reflect.DeepEqual(err, tc.expectErr) {
+ t.Errorf("test %d: error mismatch, want %v, get %v", i, tc.expectErr, err)
+ }
+
+ continue
+ }
+
+ if err != nil {
+ t.Errorf("test %d, want no error, have %v", i, err)
+ continue
+ }
+
+ have, err := json.Marshal(result)
+ if err != nil {
+ t.Errorf("Error in Marshal: %v", err)
+ }
+
+ want := tc.want
+ if string(have) != want {
+ t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, string(have), want)
+ }
+ }
+}
+
func TestTracingWithOverrides(t *testing.T) {
t.Parallel()
// Initialize test accounts
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index cf7c1e6c0d..938edccbd8 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -17,6 +17,7 @@
package tracetest
import (
+ "context"
"encoding/json"
"io/ioutil"
"math/big"
@@ -168,7 +169,7 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
Origin: origin,
GasPrice: tx.GasPrice(),
}
- context = vm.BlockContext{
+ blockContext = vm.BlockContext{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: test.Context.Miner,
@@ -183,13 +184,13 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
- evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
+ evm := vm.NewEVM(blockContext, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
msg, err := tx.AsMessage(signer, nil)
if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
- if _, err = st.TransitionDb(); err != nil {
+ if _, err = st.TransitionDb(context.Background()); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
// Retrieve the trace result and compare against the etalon
@@ -279,7 +280,7 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
Origin: origin,
GasPrice: tx.GasPrice(),
}
- context := vm.BlockContext{
+ blockContext := vm.BlockContext{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: test.Context.Miner,
@@ -297,15 +298,19 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
if err != nil {
b.Fatalf("failed to create call tracer: %v", err)
}
- evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
+
+ evm := vm.NewEVM(blockContext, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
snap := statedb.Snapshot()
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
- if _, err = st.TransitionDb(); err != nil {
+
+ if _, err = st.TransitionDb(context.Background()); err != nil {
b.Fatalf("failed to execute transaction: %v", err)
}
+
if _, err = tracer.GetResult(); err != nil {
b.Fatal(err)
}
+
statedb.RevertToSnapshot(snap)
}
}
@@ -333,7 +338,7 @@ func TestZeroValueToNotExitCall(t *testing.T) {
Origin: origin,
GasPrice: big.NewInt(1),
}
- context := vm.BlockContext{
+ blockContext := vm.BlockContext{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: common.Address{},
@@ -363,15 +368,18 @@ func TestZeroValueToNotExitCall(t *testing.T) {
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
- evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer})
+
+ evm := vm.NewEVM(blockContext, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer})
msg, err := tx.AsMessage(signer, nil)
if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
- if _, err = st.TransitionDb(); err != nil {
+
+ if _, err = st.TransitionDb(context.Background()); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
+
// Retrieve the trace result and compare against the etalon
res, err := tracer.GetResult()
if err != nil {
diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go
index cf0a4aa828..a17e5f3150 100644
--- a/eth/tracers/js/tracer_test.go
+++ b/eth/tracers/js/tracer_test.go
@@ -69,7 +69,7 @@ func runTrace(tracer tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainCon
contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0}
tracer.CaptureStart(env, contract.Caller(), contract.Address(), false, []byte{}, startGas, value)
- ret, err := env.Interpreter().Run(contract, []byte{}, false)
+ ret, err := env.Interpreter().Run(contract, []byte{}, false, nil)
tracer.CaptureEnd(ret, startGas-contract.Gas, 1, err)
if err != nil {
return nil, err
diff --git a/eth/tracers/logger/logger_test.go b/eth/tracers/logger/logger_test.go
index 205ee31120..546a3df87c 100644
--- a/eth/tracers/logger/logger_test.go
+++ b/eth/tracers/logger/logger_test.go
@@ -59,7 +59,7 @@ func TestStoreCapture(t *testing.T) {
contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x0, byte(vm.SSTORE)}
var index common.Hash
logger.CaptureStart(env, common.Address{}, contract.Address(), false, nil, 0, nil)
- _, err := env.Interpreter().Run(contract, []byte{}, false)
+ _, err := env.Interpreter().PreRun(contract, []byte{}, false, nil)
if err != nil {
t.Fatal(err)
}
diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go
index ce9289dd75..85cb16a985 100644
--- a/eth/tracers/tracers_test.go
+++ b/eth/tracers/tracers_test.go
@@ -17,6 +17,7 @@
package tracers
import (
+ "context"
"math/big"
"testing"
@@ -66,7 +67,7 @@ func BenchmarkTransactionTrace(b *testing.B) {
Origin: from,
GasPrice: tx.GasPrice(),
}
- context := vm.BlockContext{
+ blockContext := vm.BlockContext{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: common.Address{},
@@ -102,7 +103,7 @@ func BenchmarkTransactionTrace(b *testing.B) {
//EnableMemory: false,
//EnableReturnData: false,
})
- evm := vm.NewEVM(context, txContext, statedb, params.AllEthashProtocolChanges, vm.Config{Debug: true, Tracer: tracer})
+ evm := vm.NewEVM(blockContext, txContext, statedb, params.AllEthashProtocolChanges, vm.Config{Debug: true, Tracer: tracer})
msg, err := tx.AsMessage(signer, nil)
if err != nil {
b.Fatalf("failed to prepare transaction for tracing: %v", err)
@@ -113,7 +114,7 @@ func BenchmarkTransactionTrace(b *testing.B) {
for i := 0; i < b.N; i++ {
snap := statedb.Snapshot()
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
- _, err = st.TransitionDb()
+ _, err = st.TransitionDb(context.Background())
if err != nil {
b.Fatal(err)
}
diff --git a/go.mod b/go.mod
index ddfa2e0feb..c5fef38ed6 100644
--- a/go.mod
+++ b/go.mod
@@ -38,6 +38,7 @@ require (
github.com/hashicorp/go-bexpr v0.1.10
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/hashicorp/hcl/v2 v2.10.1
+ github.com/heimdalr/dag v1.2.1
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e
github.com/holiman/bloomfilter/v2 v2.0.3
github.com/holiman/uint256 v1.2.0
@@ -131,6 +132,7 @@ require (
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect
+ github.com/emirpasic/gods v1.18.1
github.com/etcd-io/bbolt v1.3.3 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-kit/kit v0.10.0 // indirect
diff --git a/go.sum b/go.sum
index 844cb0e86c..99084176ed 100644
--- a/go.sum
+++ b/go.sum
@@ -254,6 +254,8 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
+github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -324,6 +326,8 @@ github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M=
+github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
@@ -498,6 +502,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/heimdalr/dag v1.2.1 h1:XJOMaoWqJK1UKdp+4zaO2uwav9GFbHMGCirdViKMRIQ=
+github.com/heimdalr/dag v1.2.1/go.mod h1:Of/wUB7Yoj4dwiOcGOOYIq6MHlPF/8/QMBKFJpwg+yc=
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw=
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
diff --git a/graphql/graphql.go b/graphql/graphql.go
index cbd76465d6..e17ad65d6d 100644
--- a/graphql/graphql.go
+++ b/graphql/graphql.go
@@ -1006,7 +1006,8 @@ func (b *Block) Call(ctx context.Context, args struct {
return nil, err
}
}
- result, err := ethapi.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, b.backend.RPCEVMTimeout(), b.backend.RPCGasCap())
+
+ result, err := ethapi.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, nil, b.backend.RPCEVMTimeout(), b.backend.RPCGasCap())
if err != nil {
return nil, err
}
@@ -1076,7 +1077,7 @@ func (p *Pending) Call(ctx context.Context, args struct {
Data ethapi.TransactionArgs
}) (*CallResult, error) {
pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
- result, err := ethapi.DoCall(ctx, p.backend, args.Data, pendingBlockNr, nil, p.backend.RPCEVMTimeout(), p.backend.RPCGasCap())
+ result, err := ethapi.DoCall(ctx, p.backend, args.Data, pendingBlockNr, nil, nil, p.backend.RPCEVMTimeout(), p.backend.RPCGasCap())
if err != nil {
return nil, err
}
diff --git a/internal/cli/dumpconfig.go b/internal/cli/dumpconfig.go
index 55495bf22a..0cd0958ae9 100644
--- a/internal/cli/dumpconfig.go
+++ b/internal/cli/dumpconfig.go
@@ -66,6 +66,7 @@ func (c *DumpconfigCommand) Run(args []string) int {
userConfig.Gpo.IgnorePriceRaw = userConfig.Gpo.IgnorePrice.String()
userConfig.Cache.RejournalRaw = userConfig.Cache.Rejournal.String()
userConfig.Cache.TrieTimeoutRaw = userConfig.Cache.TrieTimeout.String()
+ userConfig.P2P.TxArrivalWaitRaw = userConfig.P2P.TxArrivalWait.String()
if err := toml.NewEncoder(os.Stdout).Encode(userConfig); err != nil {
c.UI.Error(err.Error())
diff --git a/internal/cli/server/chains/mainnet.go b/internal/cli/server/chains/mainnet.go
index b2570e9b2f..3d10dfb3b5 100644
--- a/internal/cli/server/chains/mainnet.go
+++ b/internal/cli/server/chains/mainnet.go
@@ -29,8 +29,13 @@ var mainnetBor = &Chain{
BerlinBlock: big.NewInt(14750000),
LondonBlock: big.NewInt(23850000),
Bor: ¶ms.BorConfig{
- JaipurBlock: big.NewInt(23850000),
- DelhiBlock: big.NewInt(38189056),
+ JaipurBlock: big.NewInt(23850000),
+ DelhiBlock: big.NewInt(38189056),
+ ParallelUniverseBlock: big.NewInt(0),
+ IndoreBlock: big.NewInt(44934656),
+ StateSyncConfirmationDelay: map[string]uint64{
+ "44934656": 128,
+ },
Period: map[string]uint64{
"0": 2,
},
diff --git a/internal/cli/server/chains/mumbai.go b/internal/cli/server/chains/mumbai.go
index 64a5b80060..693105b35e 100644
--- a/internal/cli/server/chains/mumbai.go
+++ b/internal/cli/server/chains/mumbai.go
@@ -29,8 +29,13 @@ var mumbaiTestnet = &Chain{
BerlinBlock: big.NewInt(13996000),
LondonBlock: big.NewInt(22640000),
Bor: ¶ms.BorConfig{
- JaipurBlock: big.NewInt(22770000),
- DelhiBlock: big.NewInt(29638656),
+ JaipurBlock: big.NewInt(22770000),
+ DelhiBlock: big.NewInt(29638656),
+ ParallelUniverseBlock: big.NewInt(0),
+ IndoreBlock: big.NewInt(37075456),
+ StateSyncConfirmationDelay: map[string]uint64{
+ "37075456": 128,
+ },
Period: map[string]uint64{
"0": 2,
"25275000": 5,
diff --git a/internal/cli/server/chains/test_files/chain_legacy_test.json b/internal/cli/server/chains/test_files/chain_legacy_test.json
index b97c8b1f8e..a9f6a0923a 100644
--- a/internal/cli/server/chains/test_files/chain_legacy_test.json
+++ b/internal/cli/server/chains/test_files/chain_legacy_test.json
@@ -48,7 +48,12 @@
"22640000": "0x70bcA57F4579f58670aB2d18Ef16e02C17553C38"
},
"jaipurBlock": 22770000,
- "delhiBlock": 29638656
+ "delhiBlock": 29638656,
+ "parallelUniverseBlock": 0,
+ "indoreBlock": 37075456,
+ "stateSyncConfirmationDelay": {
+ "37075456": 128
+ }
}
},
"nonce": "0x0",
diff --git a/internal/cli/server/chains/test_files/chain_test.json b/internal/cli/server/chains/test_files/chain_test.json
index 7907adfcfa..e367547b6a 100644
--- a/internal/cli/server/chains/test_files/chain_test.json
+++ b/internal/cli/server/chains/test_files/chain_test.json
@@ -50,7 +50,12 @@
"22640000":"0x70bcA57F4579f58670aB2d18Ef16e02C17553C38"
},
"jaipurBlock":22770000,
- "delhiBlock": 29638656
+ "delhiBlock": 29638656,
+ "parallelUniverseBlock": 0,
+ "indoreBlock": 37075456,
+ "stateSyncConfirmationDelay": {
+ "37075456": 128
+ }
}
},
"nonce":"0x0",
diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go
index 47cb9a7848..5cf3d3064b 100644
--- a/internal/cli/server/config.go
+++ b/internal/cli/server/config.go
@@ -127,6 +127,9 @@ type Config struct {
// Developer has the developer mode related settings
Developer *DeveloperConfig `hcl:"developer,block" toml:"developer,block"`
+ // ParallelEVM has the parallel evm related settings
+ ParallelEVM *ParallelEVMConfig `hcl:"parallelevm,block" toml:"parallelevm,block"`
+
// Develop Fake Author mode to produce blocks without authorisation
DevFakeAuthor bool `hcl:"devfakeauthor,optional" toml:"devfakeauthor,optional"`
@@ -204,6 +207,11 @@ type P2PConfig struct {
// Discovery has the p2p discovery related settings
Discovery *P2PDiscovery `hcl:"discovery,block" toml:"discovery,block"`
+
+ // TxArrivalWait sets the maximum duration the transaction fetcher will wait for
+ // an announced transaction to arrive before explicitly requesting it
+ TxArrivalWait time.Duration `hcl:"-,optional" toml:"-"`
+ TxArrivalWaitRaw string `hcl:"txarrivalwait,optional" toml:"txarrivalwait,optional"`
}
type P2PDiscovery struct {
@@ -306,6 +314,8 @@ type SealerConfig struct {
// The time interval for miner to re-create mining work.
Recommit time.Duration `hcl:"-,optional" toml:"-"`
RecommitRaw string `hcl:"recommit,optional" toml:"recommit,optional"`
+
+ CommitInterruptFlag bool `hcl:"commitinterrupt,optional" toml:"commitinterrupt,optional"`
}
type JsonRPCConfig struct {
@@ -563,6 +573,12 @@ type DeveloperConfig struct {
GasLimit uint64 `hcl:"gaslimit,optional" toml:"gaslimit,optional"`
}
+type ParallelEVMConfig struct {
+ Enable bool `hcl:"enable,optional" toml:"enable,optional"`
+
+ SpeculativeProcesses int `hcl:"procs,optional" toml:"procs,optional"`
+}
+
func DefaultConfig() *Config {
return &Config{
Chain: "mainnet",
@@ -582,13 +598,14 @@ func DefaultConfig() *Config {
RPCBatchLimit: 100,
RPCReturnDataLimit: 100000,
P2P: &P2PConfig{
- MaxPeers: 50,
- MaxPendPeers: 50,
- Bind: "0.0.0.0",
- Port: 30303,
- NoDiscover: false,
- NAT: "any",
- NetRestrict: "",
+ MaxPeers: 50,
+ MaxPendPeers: 50,
+ Bind: "0.0.0.0",
+ Port: 30303,
+ NoDiscover: false,
+ NAT: "any",
+ NetRestrict: "",
+ TxArrivalWait: 500 * time.Millisecond,
Discovery: &P2PDiscovery{
V5Enabled: false,
Bootnodes: []string{},
@@ -622,12 +639,13 @@ func DefaultConfig() *Config {
LifeTime: 3 * time.Hour,
},
Sealer: &SealerConfig{
- Enabled: false,
- Etherbase: "",
- GasCeil: 30_000_000, // geth's default
- GasPrice: big.NewInt(1 * params.GWei), // geth's default
- ExtraData: "",
- Recommit: 125 * time.Second,
+ Enabled: false,
+ Etherbase: "",
+ GasCeil: 30_000_000, // geth's default
+ GasPrice: big.NewInt(1 * params.GWei), // geth's default
+ ExtraData: "",
+ Recommit: 125 * time.Second,
+ CommitInterruptFlag: true,
},
Gpo: &GpoConfig{
Blocks: 20,
@@ -687,7 +705,7 @@ func DefaultConfig() *Config {
Enabled: false,
Expensive: false,
PrometheusAddr: "127.0.0.1:7071",
- OpenCollectorEndpoint: "127.0.0.1:4317",
+ OpenCollectorEndpoint: "",
InfluxDB: &InfluxDBConfig{
V1Enabled: false,
Endpoint: "",
@@ -740,6 +758,10 @@ func DefaultConfig() *Config {
BlockProfileRate: 0,
// CPUProfile: "",
},
+ ParallelEVM: &ParallelEVMConfig{
+ Enable: true,
+ SpeculativeProcesses: 8,
+ },
}
}
@@ -795,6 +817,7 @@ func (c *Config) fillTimeDurations() error {
{"txpool.rejournal", &c.TxPool.Rejournal, &c.TxPool.RejournalRaw},
{"cache.rejournal", &c.Cache.Rejournal, &c.Cache.RejournalRaw},
{"cache.timeout", &c.Cache.TrieTimeout, &c.Cache.TrieTimeoutRaw},
+ {"p2p.txarrivalwait", &c.P2P.TxArrivalWait, &c.P2P.TxArrivalWaitRaw},
}
for _, x := range tds {
@@ -916,6 +939,7 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (*
n.Miner.GasPrice = c.Sealer.GasPrice
n.Miner.GasCeil = c.Sealer.GasCeil
n.Miner.ExtraData = []byte(c.Sealer.ExtraData)
+ n.Miner.CommitInterruptFlag = c.Sealer.CommitInterruptFlag
if etherbase := c.Sealer.Etherbase; etherbase != "" {
if !common.IsHexAddress(etherbase) {
@@ -1121,6 +1145,8 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (*
n.BorLogs = c.BorLogs
n.DatabaseHandles = dbHandles
+ n.ParallelEVM.Enable = c.ParallelEVM.Enable
+ n.ParallelEVM.SpeculativeProcesses = c.ParallelEVM.SpeculativeProcesses
n.RPCReturnDataLimit = c.RPCReturnDataLimit
if c.Ancient != "" {
@@ -1257,6 +1283,7 @@ func (c *Config) buildNode() (*node.Config, error) {
MaxPendingPeers: int(c.P2P.MaxPendPeers),
ListenAddr: c.P2P.Bind + ":" + strconv.Itoa(int(c.P2P.Port)),
DiscoveryV5: c.P2P.Discovery.V5Enabled,
+ TxArrivalWait: c.P2P.TxArrivalWait,
},
HTTPModules: c.JsonRPC.Http.API,
HTTPCors: c.JsonRPC.Http.Cors,
diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go
index 82b99090d4..470a377012 100644
--- a/internal/cli/server/flags.go
+++ b/internal/cli/server/flags.go
@@ -302,6 +302,13 @@ func (c *Command) Flags() *flagset.Flagset {
Default: c.cliConfig.Sealer.Recommit,
Group: "Sealer",
})
+ f.BoolFlag(&flagset.BoolFlag{
+ Name: "miner.interruptcommit",
+ Usage: "Interrupt block commit when block creation time is passed",
+ Value: &c.cliConfig.Sealer.CommitInterruptFlag,
+ Default: c.cliConfig.Sealer.CommitInterruptFlag,
+ Group: "Sealer",
+ })
// ethstats
f.StringFlag(&flagset.StringFlag{
@@ -731,6 +738,13 @@ func (c *Command) Flags() *flagset.Flagset {
Default: c.cliConfig.P2P.Discovery.V5Enabled,
Group: "P2P",
})
+ f.DurationFlag(&flagset.DurationFlag{
+ Name: "txarrivalwait",
+ Usage: "Maximum duration to wait for a transaction before explicitly requesting it (defaults to 500ms)",
+ Value: &c.cliConfig.P2P.TxArrivalWait,
+ Default: c.cliConfig.P2P.TxArrivalWait,
+ Group: "P2P",
+ })
// metrics
f.BoolFlag(&flagset.BoolFlag{
@@ -890,6 +904,20 @@ func (c *Command) Flags() *flagset.Flagset {
Value: &c.cliConfig.Developer.Period,
Default: c.cliConfig.Developer.Period,
})
+
+ // parallelevm
+ f.BoolFlag(&flagset.BoolFlag{
+ Name: "parallelevm.enable",
+ Usage: "Enable Block STM",
+ Value: &c.cliConfig.ParallelEVM.Enable,
+ Default: c.cliConfig.ParallelEVM.Enable,
+ })
+ f.IntFlag(&flagset.IntFlag{
+ Name: "parallelevm.procs",
+ Usage: "Number of speculative processes (cores) in Block STM",
+ Value: &c.cliConfig.ParallelEVM.SpeculativeProcesses,
+ Default: c.cliConfig.ParallelEVM.SpeculativeProcesses,
+ })
f.Uint64Flag(&flagset.Uint64Flag{
Name: "dev.gaslimit",
Usage: "Initial block gas limit",
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 3ce2c6552b..6295c82e70 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -987,16 +987,40 @@ func (diff *StateOverride) Apply(state *state.StateDB) error {
return nil
}
-func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
+func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, state *state.StateDB, overrides *StateOverride, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
- state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
- if state == nil || err != nil {
- return nil, err
+ var (
+ header *types.Header
+ err error
+ )
+
+ // Fetch the state and header from blockNumberOrHash if it's coming from normal eth_call path.
+ if state == nil {
+ state, header, err = b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
+ if state == nil || err != nil {
+ return nil, err
+ }
+ } else {
+ // Fetch the header from the given blockNumberOrHash. Note that this path is only taken
+ // when we're doing a call from bor consensus to fetch data from genesis contracts. It's
+ // necessary to fetch header using header hash as we might be experiencing a reorg and there
+ // can be multiple headers with same number.
+ header, err = b.HeaderByHash(ctx, *blockNrOrHash.BlockHash)
+ if header == nil || err != nil {
+ log.Warn("Error fetching header on CallWithState", "err", err)
+ return nil, err
+ }
}
+
if err := overrides.Apply(state); err != nil {
return nil, err
}
+
+ return doCallWithState(ctx, b, args, header, state, timeout, globalGasCap)
+}
+
+func doCallWithState(ctx context.Context, b Backend, args TransactionArgs, header *types.Header, state *state.StateDB, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
// Setup context so it may be cancelled the call has completed
// or, in case of unmetered gas, setup a context with a timeout.
var cancel context.CancelFunc
@@ -1027,7 +1051,8 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash
// Execute the message.
gp := new(core.GasPool).AddGas(math.MaxUint64)
- result, err := core.ApplyMessage(evm, msg, gp)
+ // nolint : contextcheck
+ result, err := core.ApplyMessage(evm, msg, gp, context.Background())
if err := vmError(); err != nil {
return nil, err
}
@@ -1079,7 +1104,20 @@ func (e *revertError) ErrorData() interface{} {
// Note, this function doesn't make and changes in the state/blockchain and is
// useful to execute and retrieve values.
func (s *PublicBlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Bytes, error) {
- result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap())
+ return s.CallWithState(ctx, args, blockNrOrHash, nil, overrides)
+}
+
+// CallWithState executes the given transaction on the given state for
+// the given block number. Note that as it does an EVM call, fields in
+// the underlying state will change. Make sure to handle it outside of
+// this function (ideally by sending a copy of state).
+//
+// Additionally, the caller can specify a batch of contract for fields overriding.
+//
+// Note, this function doesn't make and changes in the state/blockchain and is
+// useful to execute and retrieve values.
+func (s *PublicBlockChainAPI) CallWithState(ctx context.Context, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, state *state.StateDB, overrides *StateOverride) (hexutil.Bytes, error) {
+ result, err := DoCall(ctx, s.b, args, blockNrOrHash, state, overrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap())
if err != nil {
return nil, err
}
@@ -1092,6 +1130,7 @@ func (s *PublicBlockChainAPI) Call(ctx context.Context, args TransactionArgs, bl
if len(result.Revert()) > 0 {
return nil, newRevertError(result)
}
+
return result.Return(), result.Err
}
@@ -1169,7 +1208,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
executable := func(gas uint64) (bool, *core.ExecutionResult, error) {
args.Gas = (*hexutil.Uint64)(&gas)
- result, err := DoCall(ctx, b, args, blockNrOrHash, nil, 0, gasCap)
+ result, err := DoCall(ctx, b, args, blockNrOrHash, nil, nil, 0, gasCap)
if err != nil {
if errors.Is(err, core.ErrIntrinsicGas) {
return true, nil, nil // Special case, raise gas limit
@@ -1595,13 +1634,16 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
if err != nil {
return nil, 0, nil, err
}
- res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()))
+ // nolint : contextcheck
+ res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()), context.Background())
if err != nil {
return nil, 0, nil, fmt.Errorf("failed to apply transaction: %v err: %v", args.toTransaction().Hash(), err)
}
+
if tracer.Equal(prevTracer) {
return accessList, res.UsedGas, res.Err, nil
}
+
prevTracer = tracer
}
}
diff --git a/les/odr_test.go b/les/odr_test.go
index ad77abf5b9..291755cfd3 100644
--- a/les/odr_test.go
+++ b/les/odr_test.go
@@ -137,13 +137,14 @@ func odrContractCall(ctx context.Context, db ethdb.Database, config *params.Chai
msg := callmsg{types.NewMessage(from.Address(), &testContractAddr, 0, new(big.Int), 100000, big.NewInt(params.InitialBaseFee), big.NewInt(params.InitialBaseFee), new(big.Int), data, nil, true)}
- context := core.NewEVMBlockContext(header, bc, nil)
+ blockContext := core.NewEVMBlockContext(header, bc, nil)
txContext := core.NewEVMTxContext(msg)
- vmenv := vm.NewEVM(context, txContext, statedb, config, vm.Config{NoBaseFee: true})
+ vmenv := vm.NewEVM(blockContext, txContext, statedb, config, vm.Config{NoBaseFee: true})
//vmenv := core.NewEnv(statedb, config, bc, msg, header, vm.Config{})
gp := new(core.GasPool).AddGas(math.MaxUint64)
- result, _ := core.ApplyMessage(vmenv, msg, gp)
+ // nolint : contextcheck
+ result, _ := core.ApplyMessage(vmenv, msg, gp, context.Background())
res = append(res, result.Return()...)
}
} else {
@@ -151,11 +152,12 @@ func odrContractCall(ctx context.Context, db ethdb.Database, config *params.Chai
state := light.NewState(ctx, header, lc.Odr())
state.SetBalance(bankAddr, math.MaxBig256)
msg := callmsg{types.NewMessage(bankAddr, &testContractAddr, 0, new(big.Int), 100000, big.NewInt(params.InitialBaseFee), big.NewInt(params.InitialBaseFee), new(big.Int), data, nil, true)}
- context := core.NewEVMBlockContext(header, lc, nil)
+ blockContext := core.NewEVMBlockContext(header, lc, nil)
txContext := core.NewEVMTxContext(msg)
- vmenv := vm.NewEVM(context, txContext, state, config, vm.Config{NoBaseFee: true})
+ vmenv := vm.NewEVM(blockContext, txContext, state, config, vm.Config{NoBaseFee: true})
gp := new(core.GasPool).AddGas(math.MaxUint64)
- result, _ := core.ApplyMessage(vmenv, msg, gp)
+ // nolint : contextcheck
+ result, _ := core.ApplyMessage(vmenv, msg, gp, context.Background())
if state.Error() == nil {
res = append(res, result.Return()...)
}
diff --git a/les/state_accessor.go b/les/state_accessor.go
index 112e6fd44d..de881032f4 100644
--- a/les/state_accessor.go
+++ b/les/state_accessor.go
@@ -57,14 +57,15 @@ func (leth *LightEthereum) stateAtTransaction(ctx context.Context, block *types.
// Assemble the transaction call message and return if the requested offset
msg, _ := tx.AsMessage(signer, block.BaseFee())
txContext := core.NewEVMTxContext(msg)
- context := core.NewEVMBlockContext(block.Header(), leth.blockchain, nil)
+ blockContext := core.NewEVMBlockContext(block.Header(), leth.blockchain, nil)
statedb.Prepare(tx.Hash(), idx)
if idx == txIndex {
- return msg, context, statedb, nil
+ return msg, blockContext, statedb, nil
}
// Not yet the searched for transaction, execute on top of the current state
- vmenv := vm.NewEVM(context, txContext, statedb, leth.blockchain.Config(), vm.Config{})
- if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
+ vmenv := vm.NewEVM(blockContext, txContext, statedb, leth.blockchain.Config(), vm.Config{})
+ // nolint : contextcheck
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()), context.Background()); err != nil {
return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
// Ensure any modifications are committed to the state
diff --git a/light/odr_test.go b/light/odr_test.go
index 9f4b42e675..c8e5c27ed9 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -196,10 +196,11 @@ func odrContractCall(ctx context.Context, db ethdb.Database, bc *core.BlockChain
st.SetBalance(testBankAddress, math.MaxBig256)
msg := callmsg{types.NewMessage(testBankAddress, &testContractAddr, 0, new(big.Int), 1000000, big.NewInt(params.InitialBaseFee), big.NewInt(params.InitialBaseFee), new(big.Int), data, nil, true)}
txContext := core.NewEVMTxContext(msg)
- context := core.NewEVMBlockContext(header, chain, nil)
- vmenv := vm.NewEVM(context, txContext, st, config, vm.Config{NoBaseFee: true})
+ blockContext := core.NewEVMBlockContext(header, chain, nil)
+ vmenv := vm.NewEVM(blockContext, txContext, st, config, vm.Config{NoBaseFee: true})
gp := new(core.GasPool).AddGas(math.MaxUint64)
- result, _ := core.ApplyMessage(vmenv, msg, gp)
+ // nolint : contextcheck
+ result, _ := core.ApplyMessage(vmenv, msg, gp, context.Background())
res = append(res, result.Return()...)
if st.Error() != nil {
return res, st.Error()
diff --git a/log/logger.go b/log/logger.go
index c2678259bf..e5eb34cfb9 100644
--- a/log/logger.go
+++ b/log/logger.go
@@ -18,13 +18,13 @@ const skipLevel = 2
type Lvl int
const (
- LvlDiscard Lvl = -1
- LvlCrit Lvl = iota
+ LvlCrit Lvl = iota
LvlError
LvlWarn
LvlInfo
LvlDebug
LvlTrace
+ LvlDiscard Lvl = -1
)
// AlignedString returns a 5-character string containing the name of a Lvl.
diff --git a/metrics/prometheus/collector.go b/metrics/prometheus/collector.go
index 3959cbf5e1..160b72bde2 100644
--- a/metrics/prometheus/collector.go
+++ b/metrics/prometheus/collector.go
@@ -27,9 +27,9 @@ import (
var (
typeGaugeTpl = "# TYPE %s gauge\n"
- typeCounterTpl = "# TYPE %s counter\n"
typeSummaryTpl = "# TYPE %s summary\n"
keyValueTpl = "%s %v\n\n"
+ keyCounterTpl = "%s %v\n"
keyQuantileTagValueTpl = "%s {quantile=\"%s\"} %v\n"
)
@@ -61,11 +61,16 @@ func (c *collector) addGaugeFloat64(name string, m metrics.GaugeFloat64) {
func (c *collector) addHistogram(name string, m metrics.Histogram) {
pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
ps := m.Percentiles(pv)
- c.writeSummaryCounter(name, m.Count())
+
+ var sum float64 = 0
c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name)))
for i := range pv {
c.writeSummaryPercentile(name, strconv.FormatFloat(pv[i], 'f', -1, 64), ps[i])
+ sum += ps[i]
}
+
+ c.writeSummarySum(name, fmt.Sprintf("%f", sum))
+ c.writeSummaryCounter(name, len(ps))
c.buff.WriteRune('\n')
}
@@ -76,11 +81,16 @@ func (c *collector) addMeter(name string, m metrics.Meter) {
func (c *collector) addTimer(name string, m metrics.Timer) {
pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
ps := m.Percentiles(pv)
- c.writeSummaryCounter(name, m.Count())
+
+ var sum float64 = 0
c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name)))
for i := range pv {
c.writeSummaryPercentile(name, strconv.FormatFloat(pv[i], 'f', -1, 64), ps[i])
+ sum += ps[i]
}
+
+ c.writeSummarySum(name, fmt.Sprintf("%f", sum))
+ c.writeSummaryCounter(name, len(ps))
c.buff.WriteRune('\n')
}
@@ -90,11 +100,19 @@ func (c *collector) addResettingTimer(name string, m metrics.ResettingTimer) {
}
ps := m.Percentiles([]float64{50, 95, 99})
val := m.Values()
- c.writeSummaryCounter(name, len(val))
c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name)))
c.writeSummaryPercentile(name, "0.50", ps[0])
c.writeSummaryPercentile(name, "0.95", ps[1])
c.writeSummaryPercentile(name, "0.99", ps[2])
+
+ var sum int64 = 0
+
+ for _, v := range val {
+ sum += v
+ }
+
+ c.writeSummarySum(name, fmt.Sprintf("%d", sum))
+ c.writeSummaryCounter(name, len(val))
c.buff.WriteRune('\n')
}
@@ -106,8 +124,7 @@ func (c *collector) writeGaugeCounter(name string, value interface{}) {
func (c *collector) writeSummaryCounter(name string, value interface{}) {
name = mutateKey(name + "_count")
- c.buff.WriteString(fmt.Sprintf(typeCounterTpl, name))
- c.buff.WriteString(fmt.Sprintf(keyValueTpl, name, value))
+ c.buff.WriteString(fmt.Sprintf(keyCounterTpl, name, value))
}
func (c *collector) writeSummaryPercentile(name, p string, value interface{}) {
@@ -115,6 +132,11 @@ func (c *collector) writeSummaryPercentile(name, p string, value interface{}) {
c.buff.WriteString(fmt.Sprintf(keyQuantileTagValueTpl, name, p, value))
}
+func (c *collector) writeSummarySum(name string, value string) {
+ name = mutateKey(name + "_sum")
+ c.buff.WriteString(fmt.Sprintf(keyCounterTpl, name, value))
+}
+
func mutateKey(key string) string {
return strings.Replace(key, "/", "_", -1)
}
diff --git a/metrics/prometheus/collector_test.go b/metrics/prometheus/collector_test.go
index 43f2f804d3..979fdbc6a3 100644
--- a/metrics/prometheus/collector_test.go
+++ b/metrics/prometheus/collector_test.go
@@ -67,9 +67,6 @@ test_gauge 23456
# TYPE test_gauge_float64 gauge
test_gauge_float64 34567.89
-# TYPE test_histogram_count counter
-test_histogram_count 0
-
# TYPE test_histogram summary
test_histogram {quantile="0.5"} 0
test_histogram {quantile="0.75"} 0
@@ -77,13 +74,12 @@ test_histogram {quantile="0.95"} 0
test_histogram {quantile="0.99"} 0
test_histogram {quantile="0.999"} 0
test_histogram {quantile="0.9999"} 0
+test_histogram_sum 0.000000
+test_histogram_count 6
# TYPE test_meter gauge
test_meter 9999999
-# TYPE test_timer_count counter
-test_timer_count 6
-
# TYPE test_timer summary
test_timer {quantile="0.5"} 2.25e+07
test_timer {quantile="0.75"} 4.8e+07
@@ -91,16 +87,20 @@ test_timer {quantile="0.95"} 1.2e+08
test_timer {quantile="0.99"} 1.2e+08
test_timer {quantile="0.999"} 1.2e+08
test_timer {quantile="0.9999"} 1.2e+08
-
-# TYPE test_resetting_timer_count counter
-test_resetting_timer_count 6
+test_timer_sum 550500000.000000
+test_timer_count 6
# TYPE test_resetting_timer summary
test_resetting_timer {quantile="0.50"} 12000000
test_resetting_timer {quantile="0.95"} 120000000
test_resetting_timer {quantile="0.99"} 120000000
+test_resetting_timer_sum 180000000
+test_resetting_timer_count 6
`
+
+ c.addResettingTimer("test/empty_resetting_timer", emptyResettingTimer)
+
exp := c.buff.String()
if exp != expectedOutput {
t.Log("Expected Output:\n", expectedOutput)
diff --git a/miner/fake_miner.go b/miner/fake_miner.go
index bd632d32c9..3176994a4e 100644
--- a/miner/fake_miner.go
+++ b/miner/fake_miner.go
@@ -222,8 +222,9 @@ var (
newTxs []*types.Transaction
testConfig = &Config{
- Recommit: time.Second,
- GasCeil: params.GenesisGasLimit,
+ Recommit: time.Second,
+ GasCeil: params.GenesisGasLimit,
+ CommitInterruptFlag: true,
}
)
diff --git a/miner/miner.go b/miner/miner.go
index 20e12c240e..14a6de7c14 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -45,15 +45,16 @@ type Backend interface {
// Config is the configuration parameters of mining.
type Config struct {
- Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account)
- Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash).
- NotifyFull bool `toml:",omitempty"` // Notify with pending block headers instead of work packages
- ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner
- GasFloor uint64 // Target gas floor for mined blocks.
- GasCeil uint64 // Target gas ceiling for mined blocks.
- GasPrice *big.Int // Minimum gas price for mining a transaction
- Recommit time.Duration // The time interval for miner to re-create mining work.
- Noverify bool // Disable remote mining solution verification(only useful in ethash).
+ Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account)
+ Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash).
+ NotifyFull bool `toml:",omitempty"` // Notify with pending block headers instead of work packages
+ ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner
+ GasFloor uint64 // Target gas floor for mined blocks.
+ GasCeil uint64 // Target gas ceiling for mined blocks.
+ GasPrice *big.Int // Minimum gas price for mining a transaction
+ Recommit time.Duration // The time interval for miner to re-create mining work.
+ Noverify bool // Disable remote mining solution verification(only useful in ethash).
+ CommitInterruptFlag bool // Interrupt commit when time is up ( default = true)
}
// Miner creates blocks and searches for proof-of-work values.
diff --git a/miner/test_backend.go b/miner/test_backend.go
index 4ba346d589..a0d04c7923 100644
--- a/miner/test_backend.go
+++ b/miner/test_backend.go
@@ -31,6 +31,8 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
+
+ lru "github.com/hashicorp/golang-lru"
)
const (
@@ -40,6 +42,10 @@ const (
// testGas is the gas required for contract deployment.
testGas = 144109
+
+ storageContractByteCode = "608060405234801561001057600080fd5b50610150806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80632e64cec11461003b5780636057361d14610059575b600080fd5b610043610075565b60405161005091906100a1565b60405180910390f35b610073600480360381019061006e91906100ed565b61007e565b005b60008054905090565b8060008190555050565b6000819050919050565b61009b81610088565b82525050565b60006020820190506100b66000830184610092565b92915050565b600080fd5b6100ca81610088565b81146100d557600080fd5b50565b6000813590506100e7816100c1565b92915050565b600060208284031215610103576101026100bc565b5b6000610111848285016100d8565b9150509291505056fea2646970667358221220322c78243e61b783558509c9cc22cb8493dde6925aa5e89a08cdf6e22f279ef164736f6c63430008120033"
+ storageContractTxCallData = "0x6057361d0000000000000000000000000000000000000000000000000000000000000001"
+ storageCallTxGas = 100000
)
func init() {
@@ -167,6 +173,7 @@ func (b *testWorkerBackend) newRandomUncle() (*types.Block, error) {
return blocks[0], err
}
+// newRandomTx creates a new transaction.
func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction {
var tx *types.Transaction
@@ -181,15 +188,54 @@ func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction {
return tx
}
-func NewTestWorker(t TensingObject, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int, noempty uint32, delay uint) (*worker, *testWorkerBackend, func()) {
+// newRandomTxWithNonce creates a new transaction with the given nonce.
+func (b *testWorkerBackend) newRandomTxWithNonce(creation bool, nonce uint64) *types.Transaction {
+ var tx *types.Transaction
+
+ gasPrice := big.NewInt(100 * params.InitialBaseFee)
+
+ if creation {
+ tx, _ = types.SignTx(types.NewContractCreation(b.txPool.Nonce(TestBankAddress), big.NewInt(0), testGas, gasPrice, common.FromHex(testCode)), types.HomesteadSigner{}, testBankKey)
+ } else {
+ tx, _ = types.SignTx(types.NewTransaction(nonce, testUserAddress, big.NewInt(1000), params.TxGas, gasPrice, nil), types.HomesteadSigner{}, testBankKey)
+ }
+
+ return tx
+}
+
+// newRandomTxWithGas creates a new transaction to deploy a storage smart contract.
+func (b *testWorkerBackend) newStorageCreateContractTx() (*types.Transaction, common.Address) {
+ var tx *types.Transaction
+
+ gasPrice := big.NewInt(10 * params.InitialBaseFee)
+
+ tx, _ = types.SignTx(types.NewContractCreation(b.txPool.Nonce(TestBankAddress), big.NewInt(0), testGas, gasPrice, common.FromHex(storageContractByteCode)), types.HomesteadSigner{}, testBankKey)
+ contractAddr := crypto.CreateAddress(TestBankAddress, b.txPool.Nonce(TestBankAddress))
+
+ return tx, contractAddr
+}
+
+// newStorageContractCallTx creates a new transaction to call a storage smart contract.
+func (b *testWorkerBackend) newStorageContractCallTx(to common.Address, nonce uint64) *types.Transaction {
+ var tx *types.Transaction
+
+ gasPrice := big.NewInt(10 * params.InitialBaseFee)
+
+ tx, _ = types.SignTx(types.NewTransaction(nonce, to, nil, storageCallTxGas, gasPrice, common.FromHex(storageContractTxCallData)), types.HomesteadSigner{}, testBankKey)
+
+ return tx
+}
+
+// NewTestWorker creates a new test worker with the given parameters.
+func NewTestWorker(t TensingObject, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int, noempty uint32, delay uint, opcodeDelay uint) (*worker, *testWorkerBackend, func()) {
backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks)
backend.txPool.AddLocals(pendingTxs)
var w *worker
- if delay != 0 {
+ if delay != 0 || opcodeDelay != 0 {
//nolint:staticcheck
- w = newWorkerWithDelay(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false, delay)
+ w = newWorkerWithDelay(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false, delay, opcodeDelay)
} else {
//nolint:staticcheck
w = newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false)
@@ -203,32 +249,34 @@ func NewTestWorker(t TensingObject, chainConfig *params.ChainConfig, engine cons
return w, backend, w.close
}
-//nolint:staticcheck
-func newWorkerWithDelay(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool, delay uint) *worker {
+// newWorkerWithDelay is newWorker() with extra params to induce artficial delays for tests such as commit-interrupt.
+// nolint:staticcheck
+func newWorkerWithDelay(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool, delay uint, opcodeDelay uint) *worker {
worker := &worker{
- config: config,
- chainConfig: chainConfig,
- engine: engine,
- eth: eth,
- mux: mux,
- chain: eth.BlockChain(),
- isLocalBlock: isLocalBlock,
- localUncles: make(map[common.Hash]*types.Block),
- remoteUncles: make(map[common.Hash]*types.Block),
- unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), sealingLogAtDepth),
- pendingTasks: make(map[common.Hash]*task),
- txsCh: make(chan core.NewTxsEvent, txChanSize),
- chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
- chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize),
- newWorkCh: make(chan *newWorkReq),
- getWorkCh: make(chan *getWorkReq),
- taskCh: make(chan *task),
- resultCh: make(chan *types.Block, resultQueueSize),
- exitCh: make(chan struct{}),
- startCh: make(chan struct{}, 1),
- resubmitIntervalCh: make(chan time.Duration),
- resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize),
- noempty: 1,
+ config: config,
+ chainConfig: chainConfig,
+ engine: engine,
+ eth: eth,
+ mux: mux,
+ chain: eth.BlockChain(),
+ isLocalBlock: isLocalBlock,
+ localUncles: make(map[common.Hash]*types.Block),
+ remoteUncles: make(map[common.Hash]*types.Block),
+ unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), sealingLogAtDepth),
+ pendingTasks: make(map[common.Hash]*task),
+ txsCh: make(chan core.NewTxsEvent, txChanSize),
+ chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
+ chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize),
+ newWorkCh: make(chan *newWorkReq),
+ getWorkCh: make(chan *getWorkReq),
+ taskCh: make(chan *task),
+ resultCh: make(chan *types.Block, resultQueueSize),
+ exitCh: make(chan struct{}),
+ startCh: make(chan struct{}, 1),
+ resubmitIntervalCh: make(chan time.Duration),
+ resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize),
+ noempty: 1,
+ interruptCommitFlag: config.CommitInterruptFlag,
}
worker.profileCount = new(int32)
// Subscribe NewTxsEvent for tx pool
@@ -237,6 +285,19 @@ func newWorkerWithDelay(config *Config, chainConfig *params.ChainConfig, engine
worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh)
worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh)
+ interruptedTxCache, err := lru.New(vm.InterruptedTxCacheSize)
+ if err != nil {
+ log.Warn("Failed to create interrupted tx cache", "err", err)
+ }
+
+ worker.interruptedTxCache = &vm.TxCache{
+ Cache: interruptedTxCache,
+ }
+
+ if !worker.interruptCommitFlag {
+ worker.noempty = 0
+ }
+
// Sanitize recommit interval if the user-specified one is too short.
recommit := worker.config.Recommit
if recommit < minRecommitInterval {
@@ -248,7 +309,7 @@ func newWorkerWithDelay(config *Config, chainConfig *params.ChainConfig, engine
worker.wg.Add(4)
- go worker.mainLoopWithDelay(ctx, delay)
+ go worker.mainLoopWithDelay(ctx, delay, opcodeDelay)
go worker.newWorkLoop(ctx, recommit)
go worker.resultLoop()
go worker.taskLoop()
@@ -261,8 +322,9 @@ func newWorkerWithDelay(config *Config, chainConfig *params.ChainConfig, engine
return worker
}
+// mainLoopWithDelay is mainLoop() with extra params to induce artficial delays for tests such as commit-interrupt.
// nolint:gocognit
-func (w *worker) mainLoopWithDelay(ctx context.Context, delay uint) {
+func (w *worker) mainLoopWithDelay(ctx context.Context, delay uint, opcodeDelay uint) {
defer w.wg.Done()
defer w.txsSub.Unsubscribe()
defer w.chainHeadSub.Unsubscribe()
@@ -280,7 +342,7 @@ func (w *worker) mainLoopWithDelay(ctx context.Context, delay uint) {
select {
case req := <-w.newWorkCh:
//nolint:contextcheck
- w.commitWorkWithDelay(req.ctx, req.interrupt, req.noempty, req.timestamp, delay)
+ w.commitWorkWithDelay(req.ctx, req.interrupt, req.noempty, req.timestamp, delay, opcodeDelay)
case req := <-w.getWorkCh:
//nolint:contextcheck
@@ -342,6 +404,7 @@ func (w *worker) mainLoopWithDelay(ctx context.Context, delay uint) {
// Note all transactions received may not be continuous with transactions
// already included in the current sealing block. These transactions will
// be automatically eliminated.
+ // nolint : nestif
if !w.isRunning() && w.current != nil {
// If block is already full, abort
if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas {
@@ -358,7 +421,6 @@ func (w *worker) mainLoopWithDelay(ctx context.Context, delay uint) {
txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, cmath.FromBig(w.current.header.BaseFee))
tcount := w.current.tcount
- //nolint:contextcheck
w.commitTransactions(w.current, txset, nil, context.Background())
// Only update the snapshot if any new transactions were added
@@ -390,170 +452,8 @@ func (w *worker) mainLoopWithDelay(ctx context.Context, delay uint) {
}
}
-// nolint:gocognit
-func (w *worker) commitTransactionsWithDelay(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *int32, interruptCtx context.Context, delay uint) bool {
- gasLimit := env.header.GasLimit
- if env.gasPool == nil {
- env.gasPool = new(core.GasPool).AddGas(gasLimit)
- }
-
- var coalescedLogs []*types.Log
-
- initialGasLimit := env.gasPool.Gas()
- initialTxs := txs.GetTxs()
-
- var breakCause string
-
- defer func() {
- log.OnDebug(func(lg log.Logging) {
- lg("commitTransactions-stats",
- "initialTxsCount", initialTxs,
- "initialGasLimit", initialGasLimit,
- "resultTxsCount", txs.GetTxs(),
- "resultGapPool", env.gasPool.Gas(),
- "exitCause", breakCause)
- })
- }()
-
-mainloop:
- for {
- // case of interrupting by timeout
- if interruptCtx != nil {
- // case of interrupting by timeout
- select {
- case <-interruptCtx.Done():
- commitInterruptCounter.Inc(1)
- log.Warn("Tx Level Interrupt")
- break mainloop
- default:
- }
- }
- // In the following three cases, we will interrupt the execution of the transaction.
- // (1) new head block event arrival, the interrupt signal is 1
- // (2) worker start or restart, the interrupt signal is 1
- // (3) worker recreate the sealing block with any newly arrived transactions, the interrupt signal is 2.
- // For the first two cases, the semi-finished work will be discarded.
- // For the third case, the semi-finished work will be submitted to the consensus engine.
- if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone {
- // Notify resubmit loop to increase resubmitting interval due to too frequent commits.
- if atomic.LoadInt32(interrupt) == commitInterruptResubmit {
- ratio := float64(gasLimit-env.gasPool.Gas()) / float64(gasLimit)
- if ratio < 0.1 {
- // nolint:goconst
- ratio = 0.1
- }
- w.resubmitAdjustCh <- &intervalAdjust{
- ratio: ratio,
- inc: true,
- }
- }
- // nolint:goconst
- breakCause = "interrupt"
- return atomic.LoadInt32(interrupt) == commitInterruptNewHead
- }
- // If we don't have enough gas for any further transactions then we're done
- if env.gasPool.Gas() < params.TxGas {
- log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas)
- // nolint:goconst
- breakCause = "Not enough gas for further transactions"
- break
- }
- // Retrieve the next transaction and abort if all done
- tx := txs.Peek()
- if tx == nil {
- // nolint:goconst
- breakCause = "all transactions has been included"
- break
- }
- // Error may be ignored here. The error has already been checked
- // during transaction acceptance is the transaction pool.
- //
- // We use the eip155 signer regardless of the current hf.
- from, _ := types.Sender(env.signer, tx)
- // Check whether the tx is replay protected. If we're not in the EIP155 hf
- // phase, start ignoring the sender until we do.
- if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
- log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block)
-
- txs.Pop()
- continue
- }
- // Start executing the transaction
- env.state.Prepare(tx.Hash(), env.tcount)
-
- var start time.Time
-
- log.OnDebug(func(log.Logging) {
- start = time.Now()
- })
-
- logs, err := w.commitTransaction(env, tx)
- time.Sleep(time.Duration(delay) * time.Millisecond)
-
- switch {
- case errors.Is(err, core.ErrGasLimitReached):
- // Pop the current out-of-gas transaction without shifting in the next from the account
- log.Trace("Gas limit exceeded for current block", "sender", from)
- txs.Pop()
-
- case errors.Is(err, core.ErrNonceTooLow):
- // New head notification data race between the transaction pool and miner, shift
- log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
- txs.Shift()
-
- case errors.Is(err, core.ErrNonceTooHigh):
- // Reorg notification data race between the transaction pool and miner, skip account =
- log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce())
- txs.Pop()
-
- case errors.Is(err, nil):
- // Everything ok, collect the logs and shift in the next transaction from the same account
- coalescedLogs = append(coalescedLogs, logs...)
- env.tcount++
- txs.Shift()
-
- log.OnDebug(func(lg log.Logging) {
- lg("Committed new tx", "tx hash", tx.Hash(), "from", from, "to", tx.To(), "nonce", tx.Nonce(), "gas", tx.Gas(), "gasPrice", tx.GasPrice(), "value", tx.Value(), "time spent", time.Since(start))
- })
-
- case errors.Is(err, core.ErrTxTypeNotSupported):
- // Pop the unsupported transaction without shifting in the next from the account
- log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type())
- txs.Pop()
-
- default:
- // Strange error, discard the transaction and get the next in line (note, the
- // nonce-too-high clause will prevent us from executing in vain).
- log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
- txs.Shift()
- }
- }
-
- if !w.isRunning() && len(coalescedLogs) > 0 {
- // We don't push the pendingLogsEvent while we are sealing. The reason is that
- // when we are sealing, the worker will regenerate a sealing block every 3 seconds.
- // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing.
- // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
- // logs by filling in the block hash when the block was mined by the local miner. This can
- // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed.
- cpy := make([]*types.Log, len(coalescedLogs))
- for i, l := range coalescedLogs {
- cpy[i] = new(types.Log)
- *cpy[i] = *l
- }
-
- w.pendingLogsFeed.Send(cpy)
- }
- // Notify resubmit loop to decrease resubmitting interval if current interval is larger
- // than the user-specified one.
- if interrupt != nil {
- w.resubmitAdjustCh <- &intervalAdjust{inc: false}
- }
-
- return false
-}
-
-func (w *worker) commitWorkWithDelay(ctx context.Context, interrupt *int32, noempty bool, timestamp int64, delay uint) {
+// commitWorkWithDelay is commitWork() with extra params to induce artficial delays for tests such as commit-interrupt.
+func (w *worker) commitWorkWithDelay(ctx context.Context, interrupt *int32, noempty bool, timestamp int64, delay uint, opcodeDelay uint) {
start := time.Now()
var (
@@ -591,8 +491,14 @@ func (w *worker) commitWorkWithDelay(ctx context.Context, interrupt *int32, noem
stopFn()
}()
- if !noempty {
+ if !noempty && w.interruptCommitFlag {
interruptCtx, stopFn = getInterruptTimer(ctx, work, w.chain.CurrentBlock())
+ // nolint : staticcheck
+ interruptCtx = vm.PutCache(interruptCtx, w.interruptedTxCache)
+ // nolint : staticcheck
+ interruptCtx = context.WithValue(interruptCtx, vm.InterruptCtxDelayKey, delay)
+ // nolint : staticcheck
+ interruptCtx = context.WithValue(interruptCtx, vm.InterruptCtxOpcodeDelayKey, opcodeDelay)
}
ctx, span := tracing.StartSpan(ctx, "commitWork")
@@ -613,7 +519,7 @@ func (w *worker) commitWorkWithDelay(ctx context.Context, interrupt *int32, noem
}
// Fill pending transactions from the txpool
- w.fillTransactionsWithDelay(ctx, interrupt, work, interruptCtx, delay)
+ w.fillTransactionsWithDelay(ctx, interrupt, work, interruptCtx)
err = w.commit(ctx, work.copy(), w.fullTaskHook, true, start)
if err != nil {
@@ -629,8 +535,9 @@ func (w *worker) commitWorkWithDelay(ctx context.Context, interrupt *int32, noem
w.current = work
}
+// fillTransactionsWithDelay is fillTransactions() with extra params to induce artficial delays for tests such as commit-interrupt.
// nolint:gocognit
-func (w *worker) fillTransactionsWithDelay(ctx context.Context, interrupt *int32, env *environment, interruptCtx context.Context, delay uint) {
+func (w *worker) fillTransactionsWithDelay(ctx context.Context, interrupt *int32, env *environment, interruptCtx context.Context) {
ctx, span := tracing.StartSpan(ctx, "fillTransactions")
defer tracing.EndSpan(span)
@@ -754,7 +661,7 @@ func (w *worker) fillTransactionsWithDelay(ctx context.Context, interrupt *int32
})
tracing.Exec(ctx, "", "worker.LocalCommitTransactions", func(ctx context.Context, span trace.Span) {
- committed = w.commitTransactionsWithDelay(env, txs, interrupt, interruptCtx, delay)
+ committed = w.commitTransactionsWithDelay(env, txs, interrupt, interruptCtx)
})
if committed {
@@ -777,7 +684,7 @@ func (w *worker) fillTransactionsWithDelay(ctx context.Context, interrupt *int32
})
tracing.Exec(ctx, "", "worker.RemoteCommitTransactions", func(ctx context.Context, span trace.Span) {
- committed = w.commitTransactionsWithDelay(env, txs, interrupt, interruptCtx, delay)
+ committed = w.commitTransactionsWithDelay(env, txs, interrupt, interruptCtx)
})
if committed {
@@ -793,3 +700,176 @@ func (w *worker) fillTransactionsWithDelay(ctx context.Context, interrupt *int32
attribute.Int("len of final remote txs", remoteEnvTCount),
)
}
+
+// commitTransactionsWithDelay is commitTransactions() with extra params to induce artficial delays for tests such as commit-interrupt.
+// nolint:gocognit, unparam
+func (w *worker) commitTransactionsWithDelay(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *int32, interruptCtx context.Context) bool {
+ gasLimit := env.header.GasLimit
+ if env.gasPool == nil {
+ env.gasPool = new(core.GasPool).AddGas(gasLimit)
+ }
+
+ var coalescedLogs []*types.Log
+
+ initialGasLimit := env.gasPool.Gas()
+ initialTxs := txs.GetTxs()
+
+ var breakCause string
+
+ defer func() {
+ log.OnDebug(func(lg log.Logging) {
+ lg("commitTransactions-stats",
+ "initialTxsCount", initialTxs,
+ "initialGasLimit", initialGasLimit,
+ "resultTxsCount", txs.GetTxs(),
+ "resultGapPool", env.gasPool.Gas(),
+ "exitCause", breakCause)
+ })
+ }()
+
+mainloop:
+ for {
+ if interruptCtx != nil {
+ // case of interrupting by timeout
+ select {
+ case <-interruptCtx.Done():
+ log.Warn("Interrupt")
+ break mainloop
+ default:
+ }
+ }
+
+ // In the following three cases, we will interrupt the execution of the transaction.
+ // (1) new head block event arrival, the interrupt signal is 1
+ // (2) worker start or restart, the interrupt signal is 1
+ // (3) worker recreate the sealing block with any newly arrived transactions, the interrupt signal is 2.
+ // For the first two cases, the semi-finished work will be discarded.
+ // For the third case, the semi-finished work will be submitted to the consensus engine.
+ if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone {
+ // Notify resubmit loop to increase resubmitting interval due to too frequent commits.
+ if atomic.LoadInt32(interrupt) == commitInterruptResubmit {
+ ratio := float64(gasLimit-env.gasPool.Gas()) / float64(gasLimit)
+ if ratio < 0.1 {
+ // nolint:goconst
+ ratio = 0.1
+ }
+ w.resubmitAdjustCh <- &intervalAdjust{
+ ratio: ratio,
+ inc: true,
+ }
+ }
+ // nolint:goconst
+ breakCause = "interrupt"
+
+ return atomic.LoadInt32(interrupt) == commitInterruptNewHead
+ }
+ // If we don't have enough gas for any further transactions then we're done
+ if env.gasPool.Gas() < params.TxGas {
+ log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas)
+ // nolint:goconst
+ breakCause = "Not enough gas for further transactions"
+
+ break
+ }
+ // Retrieve the next transaction and abort if all done
+ tx := txs.Peek()
+ if tx == nil {
+ // nolint:goconst
+ breakCause = "all transactions has been included"
+ break
+ }
+ // Error may be ignored here. The error has already been checked
+ // during transaction acceptance is the transaction pool.
+ //
+ // We use the eip155 signer regardless of the current hf.
+ from, _ := types.Sender(env.signer, tx)
+ // Check whether the tx is replay protected. If we're not in the EIP155 hf
+ // phase, start ignoring the sender until we do.
+ if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
+ log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block)
+
+ txs.Pop()
+
+ continue
+ }
+ // Start executing the transaction
+ env.state.Prepare(tx.Hash(), env.tcount)
+
+ var start time.Time
+
+ log.OnDebug(func(log.Logging) {
+ start = time.Now()
+ })
+
+ logs, err := w.commitTransaction(env, tx, interruptCtx)
+
+ if interruptCtx != nil {
+ if delay := interruptCtx.Value(vm.InterruptCtxDelayKey); delay != nil {
+ // nolint : durationcheck
+ time.Sleep(time.Duration(delay.(uint)) * time.Millisecond)
+ }
+ }
+
+ switch {
+ case errors.Is(err, core.ErrGasLimitReached):
+ // Pop the current out-of-gas transaction without shifting in the next from the account
+ log.Trace("Gas limit exceeded for current block", "sender", from)
+ txs.Pop()
+
+ case errors.Is(err, core.ErrNonceTooLow):
+ // New head notification data race between the transaction pool and miner, shift
+ log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
+ txs.Shift()
+
+ case errors.Is(err, core.ErrNonceTooHigh):
+ // Reorg notification data race between the transaction pool and miner, skip account =
+ log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce())
+ txs.Pop()
+
+ case errors.Is(err, nil):
+ // Everything ok, collect the logs and shift in the next transaction from the same account
+ coalescedLogs = append(coalescedLogs, logs...)
+ env.tcount++
+
+ txs.Shift()
+
+ log.OnDebug(func(lg log.Logging) {
+ lg("Committed new tx", "tx hash", tx.Hash(), "from", from, "to", tx.To(), "nonce", tx.Nonce(), "gas", tx.Gas(), "gasPrice", tx.GasPrice(), "value", tx.Value(), "time spent", time.Since(start))
+ })
+
+ case errors.Is(err, core.ErrTxTypeNotSupported):
+ // Pop the unsupported transaction without shifting in the next from the account
+ log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type())
+ txs.Pop()
+
+ default:
+ // Strange error, discard the transaction and get the next in line (note, the
+ // nonce-too-high clause will prevent us from executing in vain).
+ log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
+ txs.Shift()
+ }
+ }
+
+ if !w.isRunning() && len(coalescedLogs) > 0 {
+ // We don't push the pendingLogsEvent while we are sealing. The reason is that
+ // when we are sealing, the worker will regenerate a sealing block every 3 seconds.
+ // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing.
+ // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
+ // logs by filling in the block hash when the block was mined by the local miner. This can
+ // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed.
+ cpy := make([]*types.Log, len(coalescedLogs))
+ for i, l := range coalescedLogs {
+ cpy[i] = new(types.Log)
+ *cpy[i] = *l
+ }
+
+ w.pendingLogsFeed.Send(cpy)
+ }
+ // Notify resubmit loop to decrease resubmitting interval if current interval is larger
+ // than the user-specified one.
+ if interrupt != nil {
+ w.resubmitAdjustCh <- &intervalAdjust{inc: false}
+ }
+
+ return false
+}
diff --git a/miner/worker.go b/miner/worker.go
index 8570bdde22..c46d0b2c8d 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -31,6 +31,7 @@ import (
"time"
mapset "github.com/deckarep/golang-set"
+ lru "github.com/hashicorp/golang-lru"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -42,8 +43,10 @@ import (
"github.com/ethereum/go-ethereum/consensus/bor"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/blockstm"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -95,7 +98,7 @@ const (
var (
sealedBlocksCounter = metrics.NewRegisteredCounter("worker/sealedBlocks", nil)
sealedEmptyBlocksCounter = metrics.NewRegisteredCounter("worker/sealedEmptyBlocks", nil)
- commitInterruptCounter = metrics.NewRegisteredCounter("worker/commitInterrupt", nil)
+ txCommitInterruptCounter = metrics.NewRegisteredCounter("worker/txCommitInterrupt", nil)
)
// environment is the worker's current environment and holds all
@@ -273,35 +276,38 @@ type worker struct {
fullTaskHook func() // Method to call before pushing the full sealing task.
resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval.
- profileCount *int32 // Global count for profiling
+ profileCount *int32 // Global count for profiling
+ interruptCommitFlag bool // Interrupt commit ( Default true )
+ interruptedTxCache *vm.TxCache
}
//nolint:staticcheck
func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool) *worker {
worker := &worker{
- config: config,
- chainConfig: chainConfig,
- engine: engine,
- eth: eth,
- mux: mux,
- chain: eth.BlockChain(),
- isLocalBlock: isLocalBlock,
- localUncles: make(map[common.Hash]*types.Block),
- remoteUncles: make(map[common.Hash]*types.Block),
- unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), sealingLogAtDepth),
- pendingTasks: make(map[common.Hash]*task),
- txsCh: make(chan core.NewTxsEvent, txChanSize),
- chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
- chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize),
- newWorkCh: make(chan *newWorkReq),
- getWorkCh: make(chan *getWorkReq),
- taskCh: make(chan *task),
- resultCh: make(chan *types.Block, resultQueueSize),
- exitCh: make(chan struct{}),
- startCh: make(chan struct{}, 1),
- resubmitIntervalCh: make(chan time.Duration),
- resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize),
- noempty: 1,
+ config: config,
+ chainConfig: chainConfig,
+ engine: engine,
+ eth: eth,
+ mux: mux,
+ chain: eth.BlockChain(),
+ isLocalBlock: isLocalBlock,
+ localUncles: make(map[common.Hash]*types.Block),
+ remoteUncles: make(map[common.Hash]*types.Block),
+ unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), sealingLogAtDepth),
+ pendingTasks: make(map[common.Hash]*task),
+ txsCh: make(chan core.NewTxsEvent, txChanSize),
+ chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
+ chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize),
+ newWorkCh: make(chan *newWorkReq),
+ getWorkCh: make(chan *getWorkReq),
+ taskCh: make(chan *task),
+ resultCh: make(chan *types.Block, resultQueueSize),
+ exitCh: make(chan struct{}),
+ startCh: make(chan struct{}, 1),
+ resubmitIntervalCh: make(chan time.Duration),
+ resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize),
+ noempty: 1,
+ interruptCommitFlag: config.CommitInterruptFlag,
}
worker.profileCount = new(int32)
// Subscribe NewTxsEvent for tx pool
@@ -310,6 +316,19 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus
worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh)
worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh)
+ interruptedTxCache, err := lru.New(vm.InterruptedTxCacheSize)
+ if err != nil {
+ log.Warn("Failed to create interrupted tx cache", "err", err)
+ }
+
+ worker.interruptedTxCache = &vm.TxCache{
+ Cache: interruptedTxCache,
+ }
+
+ if !worker.interruptCommitFlag {
+ worker.noempty = 0
+ }
+
// Sanitize recommit interval if the user-specified one is too short.
recommit := worker.config.Recommit
if recommit < minRecommitInterval {
@@ -929,10 +948,12 @@ func (w *worker) updateSnapshot(env *environment) {
w.snapshotState = env.state.Copy()
}
-func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) {
+func (w *worker) commitTransaction(env *environment, tx *types.Transaction, interruptCtx context.Context) ([]*types.Log, error) {
snap := env.state.Snapshot()
- receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig())
+ // nolint : staticcheck
+ interruptCtx = vm.SetCurrentTxOnContext(interruptCtx, tx.Hash())
+ receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig(), interruptCtx)
if err != nil {
env.state.RevertToSnapshot(snap)
return nil, err
@@ -951,6 +972,47 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP
}
var coalescedLogs []*types.Log
+ var depsMVReadList [][]blockstm.ReadDescriptor
+
+ var depsMVFullWriteList [][]blockstm.WriteDescriptor
+
+ var mvReadMapList []map[blockstm.Key]blockstm.ReadDescriptor
+
+ var deps map[int]map[int]bool
+
+ chDeps := make(chan blockstm.TxDep)
+
+ var count int
+
+ var depsWg sync.WaitGroup
+
+ EnableMVHashMap := false
+
+ // create and add empty mvHashMap in statedb
+ if EnableMVHashMap {
+ depsMVReadList = [][]blockstm.ReadDescriptor{}
+
+ depsMVFullWriteList = [][]blockstm.WriteDescriptor{}
+
+ mvReadMapList = []map[blockstm.Key]blockstm.ReadDescriptor{}
+
+ deps = map[int]map[int]bool{}
+
+ chDeps = make(chan blockstm.TxDep)
+
+ count = 0
+
+ depsWg.Add(1)
+
+ go func(chDeps chan blockstm.TxDep) {
+ for t := range chDeps {
+ deps = blockstm.UpdateDeps(deps, t)
+ }
+
+ depsWg.Done()
+ }(chDeps)
+ }
+
initialGasLimit := env.gasPool.Gas()
initialTxs := txs.GetTxs()
@@ -969,12 +1031,15 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP
mainloop:
for {
- // case of interrupting by timeout
if interruptCtx != nil {
+ if EnableMVHashMap {
+ env.state.AddEmptyMVHashMap()
+ }
+
// case of interrupting by timeout
select {
case <-interruptCtx.Done():
- commitInterruptCounter.Inc(1)
+ txCommitInterruptCounter.Inc(1)
log.Warn("Tx Level Interrupt")
break mainloop
default:
@@ -1038,7 +1103,7 @@ mainloop:
start = time.Now()
})
- logs, err := w.commitTransaction(env, tx)
+ logs, err := w.commitTransaction(env, tx, interruptCtx)
switch {
case errors.Is(err, core.ErrGasLimitReached):
@@ -1060,6 +1125,22 @@ mainloop:
// Everything ok, collect the logs and shift in the next transaction from the same account
coalescedLogs = append(coalescedLogs, logs...)
env.tcount++
+
+ if EnableMVHashMap {
+ depsMVReadList = append(depsMVReadList, env.state.MVReadList())
+ depsMVFullWriteList = append(depsMVFullWriteList, env.state.MVFullWriteList())
+ mvReadMapList = append(mvReadMapList, env.state.MVReadMap())
+
+ temp := blockstm.TxDep{
+ Index: env.tcount - 1,
+ ReadList: depsMVReadList[count],
+ FullWriteList: depsMVFullWriteList,
+ }
+
+ chDeps <- temp
+ count++
+ }
+
txs.Shift()
log.OnDebug(func(lg log.Logging) {
@@ -1077,6 +1158,50 @@ mainloop:
log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
txs.Shift()
}
+
+ if EnableMVHashMap {
+ env.state.ClearReadMap()
+ env.state.ClearWriteMap()
+ }
+ }
+
+ // nolint:nestif
+ if EnableMVHashMap {
+ close(chDeps)
+ depsWg.Wait()
+
+ if len(mvReadMapList) > 0 {
+ tempDeps := make([][]uint64, len(mvReadMapList))
+
+ for j := range deps[0] {
+ tempDeps[0] = append(tempDeps[0], uint64(j))
+ }
+
+ delayFlag := true
+
+ for i := 1; i <= len(mvReadMapList)-1; i++ {
+ reads := mvReadMapList[i-1]
+
+ _, ok1 := reads[blockstm.NewSubpathKey(env.coinbase, state.BalancePath)]
+ _, ok2 := reads[blockstm.NewSubpathKey(common.HexToAddress(w.chainConfig.Bor.CalculateBurntContract(env.header.Number.Uint64())), state.BalancePath)]
+
+ if ok1 || ok2 {
+ delayFlag = false
+ }
+
+ for j := range deps[i] {
+ tempDeps[i] = append(tempDeps[i], uint64(j))
+ }
+ }
+
+ if delayFlag {
+ env.header.TxDependency = tempDeps
+ } else {
+ env.header.TxDependency = nil
+ }
+ } else {
+ env.header.TxDependency = nil
+ }
}
if !w.isRunning() && len(coalescedLogs) > 0 {
@@ -1441,8 +1566,20 @@ func (w *worker) generateWork(ctx context.Context, params *generateParams) (*typ
}
defer work.discard()
- interruptCtx, stopFn := getInterruptTimer(ctx, work, w.chain.CurrentBlock())
- defer stopFn()
+ // nolint : contextcheck
+ var interruptCtx = context.Background()
+
+ stopFn := func() {}
+
+ defer func() {
+ stopFn()
+ }()
+
+ if w.interruptCommitFlag {
+ interruptCtx, stopFn = getInterruptTimer(ctx, work, w.chain.CurrentBlock())
+ // nolint : staticcheck
+ interruptCtx = vm.PutCache(interruptCtx, w.interruptedTxCache)
+ }
w.fillTransactions(ctx, nil, work, interruptCtx)
@@ -1482,7 +1619,7 @@ func (w *worker) commitWork(ctx context.Context, interrupt *int32, noempty bool,
return
}
- //nolint:contextcheck
+ // nolint:contextcheck
var interruptCtx = context.Background()
stopFn := func() {}
@@ -1490,8 +1627,10 @@ func (w *worker) commitWork(ctx context.Context, interrupt *int32, noempty bool,
stopFn()
}()
- if !noempty {
+ if !noempty && w.interruptCommitFlag {
interruptCtx, stopFn = getInterruptTimer(ctx, work, w.chain.CurrentBlock())
+ // nolint : staticcheck
+ interruptCtx = vm.PutCache(interruptCtx, w.interruptedTxCache)
}
ctx, span := tracing.StartSpan(ctx, "commitWork")
diff --git a/miner/worker_test.go b/miner/worker_test.go
index 7b975c7d5e..81743dd59d 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -19,7 +19,6 @@ package miner
import (
"math/big"
"os"
- "sync"
"sync/atomic"
"testing"
"time"
@@ -87,7 +86,7 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool, isBor bool) {
chainConfig.LondonBlock = big.NewInt(0)
- w, b, _ := NewTestWorker(t, chainConfig, engine, db, 0, 0, 0)
+ w, b, _ := NewTestWorker(t, chainConfig, engine, db, 0, 0, 0, 0)
defer w.close()
// This test chain imports the mined blocks.
@@ -193,7 +192,7 @@ func TestEmptyWorkClique(t *testing.T) {
func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) {
defer engine.Close()
- w, _, _ := NewTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, 0, 0)
+ w, _, _ := NewTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, 0, 0, 0)
defer w.close()
var (
@@ -247,7 +246,7 @@ func TestStreamUncleBlock(t *testing.T) {
ethash := ethash.NewFaker()
defer ethash.Close()
- w, b, _ := NewTestWorker(t, ethashChainConfig, ethash, rawdb.NewMemoryDatabase(), 1, 0, 0)
+ w, b, _ := NewTestWorker(t, ethashChainConfig, ethash, rawdb.NewMemoryDatabase(), 1, 0, 0, 0)
defer w.close()
var taskCh = make(chan struct{})
@@ -309,7 +308,7 @@ func TestRegenerateMiningBlockClique(t *testing.T) {
func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) {
defer engine.Close()
- w, b, _ := NewTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, 0, 0)
+ w, b, _ := NewTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, 0, 0, 0)
defer w.close()
var taskCh = make(chan struct{}, 3)
@@ -380,7 +379,7 @@ func TestAdjustIntervalClique(t *testing.T) {
func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) {
defer engine.Close()
- w, _, _ := NewTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, 0, 0)
+ w, _, _ := NewTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, 0, 0, 0)
defer w.close()
w.skipSealHook = func(task *task) bool {
@@ -488,7 +487,7 @@ func TestGetSealingWorkPostMerge(t *testing.T) {
func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, postMerge bool) {
defer engine.Close()
- w, b, _ := NewTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, 0, 0)
+ w, b, _ := NewTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0, 0, 0, 0)
defer w.close()
w.setExtra([]byte{0x01, 0x02})
@@ -624,23 +623,41 @@ func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine co
}
}
-// nolint:paralleltest
+// nolint : paralleltest
+// TestCommitInterruptExperimentBor tests the commit interrupt experiment for bor consensus by inducing an artificial delay at transaction level.
func TestCommitInterruptExperimentBor(t *testing.T) {
// with 1 sec block time and 200 millisec tx delay we should get 5 txs per block
- testCommitInterruptExperimentBor(t, 200, 5)
+ testCommitInterruptExperimentBor(t, 200, 5, 0)
+
+ time.Sleep(2 * time.Second)
- time.Sleep(3 * time.Second)
// with 1 sec block time and 100 millisec tx delay we should get 10 txs per block
- testCommitInterruptExperimentBor(t, 100, 10)
+ testCommitInterruptExperimentBor(t, 100, 10, 0)
+}
+
+// nolint : paralleltest
+// TestCommitInterruptExperimentBorContract tests the commit interrupt experiment for bor consensus by inducing an artificial delay at OPCODE level.
+func TestCommitInterruptExperimentBorContract(t *testing.T) {
+ // pre-calculated number of OPCODES = 123. 7*123=861 < 1000, 1 tx is possible but 2 tx per block will not be possible.
+ testCommitInterruptExperimentBorContract(t, 0, 1, 7)
+ time.Sleep(2 * time.Second)
+ // pre-calculated number of OPCODES = 123. 2*123=246 < 1000, 4 tx is possible but 5 tx per block will not be possible. But 3 happen due to other overheads.
+ testCommitInterruptExperimentBorContract(t, 0, 3, 2)
+ time.Sleep(2 * time.Second)
+ // pre-calculated number of OPCODES = 123. 3*123=369 < 1000, 2 tx is possible but 3 tx per block will not be possible.
+ testCommitInterruptExperimentBorContract(t, 0, 2, 3)
}
-// nolint:thelper
-func testCommitInterruptExperimentBor(t *testing.T, delay uint, txCount int) {
+// nolint : thelper
+// testCommitInterruptExperimentBorContract is a helper function for testing the commit interrupt experiment for bor consensus.
+func testCommitInterruptExperimentBorContract(t *testing.T, delay uint, txCount int, opcodeDelay uint) {
var (
engine consensus.Engine
chainConfig *params.ChainConfig
db = rawdb.NewMemoryDatabase()
ctrl *gomock.Controller
+ txInTxpool = 100
+ txs = make([]*types.Transaction, 0, txInTxpool)
)
chainConfig = params.BorUnittestChainConfig
@@ -648,38 +665,91 @@ func testCommitInterruptExperimentBor(t *testing.T, delay uint, txCount int) {
log.Root().SetHandler(log.LvlFilterHandler(4, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
engine, ctrl = getFakeBorFromConfig(t, chainConfig)
+
+ w, b, _ := NewTestWorker(t, chainConfig, engine, db, 0, 1, delay, opcodeDelay)
defer func() {
+ w.close()
engine.Close()
+ db.Close()
ctrl.Finish()
}()
- w, b, _ := NewTestWorker(t, chainConfig, engine, db, 0, 1, delay)
- defer w.close()
+ // nonce 0 tx
+ tx, addr := b.newStorageCreateContractTx()
+ if err := b.TxPool().AddRemote(tx); err != nil {
+ t.Fatal(err)
+ }
- wg := new(sync.WaitGroup)
- wg.Add(1)
+ time.Sleep(4 * time.Second)
- go func() {
- wg.Done()
+ // nonce starts from 1 because we already have one tx
+ initNonce := uint64(1)
- for {
- tx := b.newRandomTx(false)
- if err := b.TxPool().AddRemote(tx); err != nil {
- t.Log(err)
- }
+ for i := 0; i < txInTxpool; i++ {
+ tx := b.newStorageContractCallTx(addr, initNonce+uint64(i))
+ txs = append(txs, tx)
+ }
- time.Sleep(20 * time.Millisecond)
- }
+ if err := b.TxPool().AddRemotes(txs); err != nil {
+ t.Fatal(err)
+ }
+
+ // Start mining!
+ w.start()
+ time.Sleep(5 * time.Second)
+ w.stop()
+
+ currentBlockNumber := w.current.header.Number.Uint64()
+ assert.Check(t, txCount >= w.chain.GetBlockByNumber(currentBlockNumber-1).Transactions().Len())
+ assert.Check(t, 0 < w.chain.GetBlockByNumber(currentBlockNumber-1).Transactions().Len()+1)
+}
+
+// nolint : thelper
+// testCommitInterruptExperimentBor is a helper function for testing the commit interrupt experiment for bor consensus.
+func testCommitInterruptExperimentBor(t *testing.T, delay uint, txCount int, opcodeDelay uint) {
+ var (
+ engine consensus.Engine
+ chainConfig *params.ChainConfig
+ db = rawdb.NewMemoryDatabase()
+ ctrl *gomock.Controller
+ txInTxpool = 100
+ txs = make([]*types.Transaction, 0, txInTxpool)
+ )
+
+ chainConfig = params.BorUnittestChainConfig
+
+ log.Root().SetHandler(log.LvlFilterHandler(4, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+
+ engine, ctrl = getFakeBorFromConfig(t, chainConfig)
+
+ w, b, _ := NewTestWorker(t, chainConfig, engine, db, 0, 1, delay, opcodeDelay)
+ defer func() {
+ w.close()
+ engine.Close()
+ db.Close()
+ ctrl.Finish()
}()
- wg.Wait()
+ // nonce starts from 0 because have no txs yet
+ initNonce := uint64(0)
+
+ for i := 0; i < txInTxpool; i++ {
+ tx := b.newRandomTxWithNonce(false, initNonce+uint64(i))
+ txs = append(txs, tx)
+ }
+
+ if err := b.TxPool().AddRemotes(txs); err != nil {
+ t.Fatal(err)
+ }
// Start mining!
w.start()
time.Sleep(5 * time.Second)
w.stop()
- assert.Equal(t, txCount, w.chain.CurrentBlock().Transactions().Len())
+ currentBlockNumber := w.current.header.Number.Uint64()
+ assert.Check(t, txCount >= w.chain.GetBlockByNumber(currentBlockNumber-1).Transactions().Len())
+ assert.Check(t, 0 < w.chain.GetBlockByNumber(currentBlockNumber-1).Transactions().Len())
}
func BenchmarkBorMining(b *testing.B) {
@@ -713,7 +783,7 @@ func BenchmarkBorMining(b *testing.B) {
chainConfig.LondonBlock = big.NewInt(0)
- w, back, _ := NewTestWorker(b, chainConfig, engine, db, 0, 0, 0)
+ w, back, _ := NewTestWorker(b, chainConfig, engine, db, 0, 0, 0, 0)
defer w.close()
// This test chain imports the mined blocks.
@@ -779,3 +849,126 @@ func BenchmarkBorMining(b *testing.B) {
}
}
}
+
+// uses core.NewParallelBlockChain to use the dependencies present in the block header
+// params.BorUnittestChainConfig contains the ParallelUniverseBlock ad big.NewInt(5), so the first 4 blocks will not have metadata.
+// nolint: gocognit
+func BenchmarkBorMiningBlockSTMMetadata(b *testing.B) {
+ chainConfig := params.BorUnittestChainConfig
+
+ ctrl := gomock.NewController(b)
+ defer ctrl.Finish()
+
+ ethAPIMock := api.NewMockCaller(ctrl)
+ ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
+
+ spanner := bor.NewMockSpanner(ctrl)
+ spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{
+ {
+ ID: 0,
+ Address: TestBankAddress,
+ VotingPower: 100,
+ ProposerPriority: 0,
+ },
+ }, nil).AnyTimes()
+
+ heimdallClientMock := mocks.NewMockIHeimdallClient(ctrl)
+ heimdallClientMock.EXPECT().Close().Times(1)
+
+ contractMock := bor.NewMockGenesisContract(ctrl)
+
+ db, _, _ := NewDBForFakes(b)
+
+ engine := NewFakeBor(b, db, chainConfig, ethAPIMock, spanner, heimdallClientMock, contractMock)
+ defer engine.Close()
+
+ chainConfig.LondonBlock = big.NewInt(0)
+
+ w, back, _ := NewTestWorker(b, chainConfig, engine, db, 0, 0, 0, 0)
+ defer w.close()
+
+ // This test chain imports the mined blocks.
+ db2 := rawdb.NewMemoryDatabase()
+ back.Genesis.MustCommit(db2)
+
+ chain, _ := core.NewParallelBlockChain(db2, nil, back.chain.Config(), engine, vm.Config{ParallelEnable: true, ParallelSpeculativeProcesses: 8}, nil, nil, nil)
+ defer chain.Stop()
+
+ // Ignore empty commit here for less noise.
+ w.skipSealHook = func(task *task) bool {
+ return len(task.receipts) == 0
+ }
+
+ // fulfill tx pool
+ const (
+ totalGas = testGas + params.TxGas
+ totalBlocks = 10
+ )
+
+ var err error
+
+ txInBlock := int(back.Genesis.GasLimit/totalGas) + 1
+
+ // a bit risky
+ for i := 0; i < 2*totalBlocks*txInBlock; i++ {
+ err = back.txPool.AddLocal(back.newRandomTx(true))
+ if err != nil {
+ b.Fatal("while adding a local transaction", err)
+ }
+
+ err = back.txPool.AddLocal(back.newRandomTx(false))
+ if err != nil {
+ b.Fatal("while adding a remote transaction", err)
+ }
+ }
+
+ // Wait for mined blocks.
+ sub := w.mux.Subscribe(core.NewMinedBlockEvent{})
+ defer sub.Unsubscribe()
+
+ b.ResetTimer()
+
+ prev := uint64(time.Now().Unix())
+
+ // Start mining!
+ w.start()
+
+ blockPeriod, ok := back.Genesis.Config.Bor.Period["0"]
+ if !ok {
+ blockPeriod = 1
+ }
+
+ for i := 0; i < totalBlocks; i++ {
+ select {
+ case ev := <-sub.Chan():
+ block := ev.Data.(core.NewMinedBlockEvent).Block
+
+ if _, err := chain.InsertChain([]*types.Block{block}); err != nil {
+ b.Fatalf("failed to insert new mined block %d: %v", block.NumberU64(), err)
+ }
+
+ // check for dependencies for block number > 4
+ if block.NumberU64() <= 4 {
+ if block.TxDependency() != nil {
+ b.Fatalf("dependency not nil")
+ }
+ } else {
+ deps := block.TxDependency()
+ if len(deps[0]) != 0 {
+ b.Fatalf("wrong dependency")
+ }
+ for i := 1; i < block.Transactions().Len(); i++ {
+ if deps[i][0] != uint64(i-1) || len(deps[i]) != 1 {
+ b.Fatalf("wrong dependency")
+ }
+ }
+ }
+
+ b.Log("block", block.NumberU64(), "time", block.Time()-prev, "txs", block.Transactions().Len(), "gasUsed", block.GasUsed(), "gasLimit", block.GasLimit())
+
+ prev = block.Time()
+ case <-time.After(time.Duration(blockPeriod) * time.Second):
+ b.Fatalf("timeout")
+ }
+ }
+}
diff --git a/node/defaults.go b/node/defaults.go
index fd0277e29d..a32fa868ef 100644
--- a/node/defaults.go
+++ b/node/defaults.go
@@ -21,6 +21,7 @@ import (
"os/user"
"path/filepath"
"runtime"
+ "time"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/nat"
@@ -60,9 +61,10 @@ var DefaultConfig = Config{
WSModules: []string{"net", "web3"},
GraphQLVirtualHosts: []string{"localhost"},
P2P: p2p.Config{
- ListenAddr: ":30303",
- MaxPeers: 50,
- NAT: nat.Any(),
+ ListenAddr: ":30303",
+ MaxPeers: 50,
+ NAT: nat.Any(),
+ TxArrivalWait: 500 * time.Millisecond,
},
}
diff --git a/p2p/server.go b/p2p/server.go
index 7de8504bdc..49ed77bb39 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -156,6 +156,10 @@ type Config struct {
Logger log.Logger `toml:",omitempty"`
clock mclock.Clock
+
+ // TxArrivalWait is the duration (ms) that the node will wait after seeing
+ // an announced transaction before explicitly requesting it
+ TxArrivalWait time.Duration
}
// Server manages all peer connections.
diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml
index 7326ca13e1..869dae3c58 100644
--- a/packaging/templates/mainnet-v1/archive/config.toml
+++ b/packaging/templates/mainnet-v1/archive/config.toml
@@ -31,6 +31,7 @@ gcmode = "archive"
# netrestrict = ""
# nodekey = ""
# nodekeyhex = ""
+ # txarrivalwait = "500ms"
# [p2p.discovery]
# v5disc = false
# bootnodes = []
@@ -65,6 +66,7 @@ gcmode = "archive"
# etherbase = ""
# extradata = ""
# recommit = "2m5s"
+ # commitinterrupt = true
[jsonrpc]
ipcpath = "/var/lib/bor/bor.ipc"
diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml
index 09125d4aff..7413bc63f2 100644
--- a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml
+++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml
@@ -31,6 +31,7 @@ syncmode = "full"
# netrestrict = ""
# nodekey = ""
# nodekeyhex = ""
+ # txarrivalwait = "500ms"
# [p2p.discovery]
# v5disc = false
# bootnodes = []
@@ -65,6 +66,7 @@ syncmode = "full"
# etherbase = ""
# extradata = ""
# recommit = "2m5s"
+ # commitinterrupt = true
[jsonrpc]
ipcpath = "/var/lib/bor/bor.ipc"
diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml
index 59d0ef9672..1b21818adc 100644
--- a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml
+++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml
@@ -33,6 +33,7 @@ syncmode = "full"
# netrestrict = ""
# nodekey = ""
# nodekeyhex = ""
+ # txarrivalwait = "500ms"
# [p2p.discovery]
# v5disc = false
# bootnodes = []
@@ -67,6 +68,7 @@ syncmode = "full"
# etherbase = ""
# extradata = ""
# recommit = "2m5s"
+ # commitinterrupt = true
[jsonrpc]
ipcpath = "/var/lib/bor/bor.ipc"
diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml
index 00bdca179d..ffbaf984ca 100644
--- a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml
+++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml
@@ -33,6 +33,7 @@ syncmode = "full"
# netrestrict = ""
# nodekey = ""
# nodekeyhex = ""
+ # txarrivalwait = "500ms"
# [p2p.discovery]
# v5disc = false
# bootnodes = []
@@ -67,6 +68,7 @@ syncmode = "full"
# etherbase = ""
# extradata = ""
# recommit = "2m5s"
+ # commitinterrupt = true
[jsonrpc]
ipcpath = "/var/lib/bor/bor.ipc"
diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control
index 8e50843583..4000915de1 100644
--- a/packaging/templates/package_scripts/control
+++ b/packaging/templates/package_scripts/control
@@ -1,5 +1,5 @@
Source: bor
-Version: 0.3.9-stable
+Version: 0.4.0
Section: develop
Priority: standard
Maintainer: Polygon
diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64
index 64682b7b31..94c5bf09c0 100644
--- a/packaging/templates/package_scripts/control.arm64
+++ b/packaging/templates/package_scripts/control.arm64
@@ -1,5 +1,5 @@
Source: bor
-Version: 0.3.9-stable
+Version: 0.4.0
Section: develop
Priority: standard
Maintainer: Polygon
diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64
index 66dc5f4f30..1a285a6af0 100644
--- a/packaging/templates/package_scripts/control.profile.amd64
+++ b/packaging/templates/package_scripts/control.profile.amd64
@@ -1,5 +1,5 @@
Source: bor-profile
-Version: 0.3.9-stable
+Version: 0.4.0
Section: develop
Priority: standard
Maintainer: Polygon
diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64
index 5c9a9ca472..f1658add4c 100644
--- a/packaging/templates/package_scripts/control.profile.arm64
+++ b/packaging/templates/package_scripts/control.profile.arm64
@@ -1,5 +1,5 @@
Source: bor-profile
-Version: 0.3.9-stable
+Version: 0.4.0
Section: develop
Priority: standard
Maintainer: Polygon
diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator
index dbe6ad6c63..97fc1adcbd 100644
--- a/packaging/templates/package_scripts/control.validator
+++ b/packaging/templates/package_scripts/control.validator
@@ -1,5 +1,5 @@
Source: bor-profile
-Version: 0.3.9-stable
+Version: 0.4.0
Section: develop
Priority: standard
Maintainer: Polygon
diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64
index 90da8544e4..f88dd5836d 100644
--- a/packaging/templates/package_scripts/control.validator.arm64
+++ b/packaging/templates/package_scripts/control.validator.arm64
@@ -1,5 +1,5 @@
Source: bor-profile
-Version: 0.3.9-stable
+Version: 0.4.0
Section: develop
Priority: standard
Maintainer: Polygon
diff --git a/packaging/templates/systemd/bor_bootnode.service b/packaging/templates/systemd/bor_bootnode.service
new file mode 100644
index 0000000000..756adfb08c
--- /dev/null
+++ b/packaging/templates/systemd/bor_bootnode.service
@@ -0,0 +1,16 @@
+[Unit]
+ Description=bor
+ StartLimitIntervalSec=500
+ StartLimitBurst=5
+
+[Service]
+ Restart=on-failure
+ RestartSec=5s
+ ExecStart=/usr/bin/bor bootnode -nodekey /var/lib/bor/config/nodekey -addr ":30303" -verbosity 5
+ Type=simple
+ KillSignal=SIGINT
+ User=bor
+ TimeoutStopSec=120
+
+[Install]
+ WantedBy=multi-user.target
diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml
index 6b8c13610b..6b372e9b60 100644
--- a/packaging/templates/testnet-v4/archive/config.toml
+++ b/packaging/templates/testnet-v4/archive/config.toml
@@ -31,6 +31,7 @@ gcmode = "archive"
# netrestrict = ""
# nodekey = ""
# nodekeyhex = ""
+ # txarrivalwait = "500ms"
# [p2p.discovery]
# v5disc = false
# bootnodes = []
@@ -65,6 +66,7 @@ gcmode = "archive"
# etherbase = ""
# extradata = ""
# recommit = "2m5s"
+ # commitinterrupt = true
[jsonrpc]
ipcpath = "/var/lib/bor/bor.ipc"
diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml
index b9632fe336..cf81d9a9d6 100644
--- a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml
+++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml
@@ -31,6 +31,7 @@ syncmode = "full"
# netrestrict = ""
# nodekey = ""
# nodekeyhex = ""
+ # txarrivalwait = "500ms"
# [p2p.discovery]
# v5disc = false
# bootnodes = []
@@ -65,6 +66,7 @@ syncmode = "full"
# etherbase = ""
# extradata = ""
# recommit = "2m5s"
+ # commitinterrupt = true
[jsonrpc]
ipcpath = "/var/lib/bor/bor.ipc"
diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml
index 8dc6daa5ec..57e3ed286c 100644
--- a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml
+++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml
@@ -33,6 +33,7 @@ syncmode = "full"
# netrestrict = ""
# nodekey = ""
# nodekeyhex = ""
+ # txarrivalwait = "500ms"
# [p2p.discovery]
# v5disc = false
# bootnodes = []
@@ -67,6 +68,7 @@ syncmode = "full"
# etherbase = ""
# extradata = ""
# recommit = "2m5s"
+ # commitinterrupt = true
[jsonrpc]
ipcpath = "/var/lib/bor/bor.ipc"
diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml
index 97a9162e09..1f0dabff5d 100644
--- a/packaging/templates/testnet-v4/without-sentry/bor/config.toml
+++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml
@@ -33,6 +33,7 @@ syncmode = "full"
# netrestrict = ""
# nodekey = ""
# nodekeyhex = ""
+ # txarrivalwait = "500ms"
# [p2p.discovery]
# v5disc = false
# bootnodes = []
@@ -67,6 +68,7 @@ syncmode = "full"
# etherbase = ""
# extradata = ""
# recommit = "2m5s"
+ # commitinterrupt = true
[jsonrpc]
ipcpath = "/var/lib/bor/bor.ipc"
diff --git a/params/config.go b/params/config.go
index 9833c9eac5..8c4e5fb524 100644
--- a/params/config.go
+++ b/params/config.go
@@ -311,6 +311,7 @@ var (
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
Bor: &BorConfig{
+ ParallelUniverseBlock: big.NewInt(5),
Period: map[string]uint64{
"0": 1,
},
@@ -349,8 +350,13 @@ var (
BerlinBlock: big.NewInt(13996000),
LondonBlock: big.NewInt(22640000),
Bor: &BorConfig{
- JaipurBlock: big.NewInt(22770000),
- DelhiBlock: big.NewInt(29638656),
+ JaipurBlock: big.NewInt(22770000),
+ DelhiBlock: big.NewInt(29638656),
+ ParallelUniverseBlock: big.NewInt(0),
+ IndoreBlock: big.NewInt(37075456),
+ StateSyncConfirmationDelay: map[string]uint64{
+ "37075456": 128,
+ },
Period: map[string]uint64{
"0": 2,
"25275000": 5,
@@ -403,8 +409,14 @@ var (
BerlinBlock: big.NewInt(14750000),
LondonBlock: big.NewInt(23850000),
Bor: &BorConfig{
- JaipurBlock: big.NewInt(23850000),
- DelhiBlock: big.NewInt(38189056),
+ JaipurBlock: big.NewInt(23850000),
+ DelhiBlock: big.NewInt(38189056),
+ ParallelUniverseBlock: big.NewInt(0),
+ IndoreBlock: big.NewInt(44934656),
+ StateSyncConfirmationDelay: map[string]uint64{
+ "44934656": 128,
+ },
+
Period: map[string]uint64{
"0": 2,
},
@@ -575,17 +587,20 @@ func (c *CliqueConfig) String() string {
// BorConfig is the consensus engine configs for Matic bor based sealing.
type BorConfig struct {
- Period map[string]uint64 `json:"period"` // Number of seconds between blocks to enforce
- ProducerDelay map[string]uint64 `json:"producerDelay"` // Number of seconds delay between two producer interval
- Sprint map[string]uint64 `json:"sprint"` // Epoch length to proposer
- BackupMultiplier map[string]uint64 `json:"backupMultiplier"` // Backup multiplier to determine the wiggle time
- ValidatorContract string `json:"validatorContract"` // Validator set contract
- StateReceiverContract string `json:"stateReceiverContract"` // State receiver contract
- OverrideStateSyncRecords map[string]int `json:"overrideStateSyncRecords"` // override state records count
- BlockAlloc map[string]interface{} `json:"blockAlloc"`
- BurntContract map[string]string `json:"burntContract"` // governance contract where the token will be sent to and burnt in london fork
- JaipurBlock *big.Int `json:"jaipurBlock"` // Jaipur switch block (nil = no fork, 0 = already on jaipur)
- DelhiBlock *big.Int `json:"delhiBlock"` // Delhi switch block (nil = no fork, 0 = already on delhi)
+ Period map[string]uint64 `json:"period"` // Number of seconds between blocks to enforce
+ ProducerDelay map[string]uint64 `json:"producerDelay"` // Number of seconds delay between two producer interval
+ Sprint map[string]uint64 `json:"sprint"` // Epoch length to proposer
+ BackupMultiplier map[string]uint64 `json:"backupMultiplier"` // Backup multiplier to determine the wiggle time
+ ValidatorContract string `json:"validatorContract"` // Validator set contract
+ StateReceiverContract string `json:"stateReceiverContract"` // State receiver contract
+ OverrideStateSyncRecords map[string]int `json:"overrideStateSyncRecords"` // override state records count
+ BlockAlloc map[string]interface{} `json:"blockAlloc"`
+ BurntContract map[string]string `json:"burntContract"` // governance contract where the token will be sent to and burnt in london fork
+ JaipurBlock *big.Int `json:"jaipurBlock"` // Jaipur switch block (nil = no fork, 0 = already on jaipur)
+ DelhiBlock *big.Int `json:"delhiBlock"` // Delhi switch block (nil = no fork, 0 = already on delhi)
+ ParallelUniverseBlock *big.Int `json:"parallelUniverseBlock"` // TODO: update all occurrence, change name and finalize number (hardfork for block-stm related changes)
+ IndoreBlock *big.Int `json:"indoreBlock"` // Indore switch block (nil = no fork, 0 = already on indore)
+ StateSyncConfirmationDelay map[string]uint64 `json:"stateSyncConfirmationDelay"` // StateSync Confirmation Delay, in seconds, to calculate `to`
}
// String implements the stringer interface, returning the consensus engine details.
@@ -594,11 +609,11 @@ func (b *BorConfig) String() string {
}
func (c *BorConfig) CalculateProducerDelay(number uint64) uint64 {
- return c.calculateSprintSizeHelper(c.ProducerDelay, number)
+ return borKeyValueConfigHelper(c.ProducerDelay, number)
}
func (c *BorConfig) CalculateSprint(number uint64) uint64 {
- return c.calculateSprintSizeHelper(c.Sprint, number)
+ return borKeyValueConfigHelper(c.Sprint, number)
}
func (c *BorConfig) CalculateBackupMultiplier(number uint64) uint64 {
@@ -617,6 +632,23 @@ func (c *BorConfig) IsDelhi(number *big.Int) bool {
return isForked(c.DelhiBlock, number)
}
+func (c *BorConfig) IsIndore(number *big.Int) bool {
+ return isForked(c.IndoreBlock, number)
+}
+
+func (c *BorConfig) CalculateStateSyncDelay(number uint64) uint64 {
+ return borKeyValueConfigHelper(c.StateSyncConfirmationDelay, number)
+}
+
+// TODO: modify this function once the block number is finalized
+func (c *BorConfig) IsParallelUniverse(number *big.Int) bool {
+ if c.ParallelUniverseBlock == big.NewInt(0) {
+ return false
+ }
+
+ return isForked(c.ParallelUniverseBlock, number)
+}
+
func (c *BorConfig) IsSprintStart(number uint64) bool {
return number%c.CalculateSprint(number) == 0
}
@@ -641,7 +673,7 @@ func (c *BorConfig) calculateBorConfigHelper(field map[string]uint64, number uin
return field[keys[len(keys)-1]]
}
-func (c *BorConfig) calculateSprintSizeHelper(field map[string]uint64, number uint64) uint64 {
+func borKeyValueConfigHelper(field map[string]uint64, number uint64) uint64 {
keys := make([]string, 0, len(field))
for k := range field {
keys = append(keys, k)
diff --git a/params/version.go b/params/version.go
index c8cb20f902..f1f644eb41 100644
--- a/params/version.go
+++ b/params/version.go
@@ -21,13 +21,14 @@ import (
)
const (
- GitCommit = ""
- VersionMajor = 0 // Major version component of the current release
- VersionMinor = 3 // Minor version component of the current release
- VersionPatch = 9 // Patch version component of the current release
- VersionMeta = "stable" // Version metadata to append to the version string
+ VersionMajor = 0 // Major version component of the current release
+ VersionMinor = 4 // Minor version component of the current release
+ VersionPatch = 0 // Patch version component of the current release
+ VersionMeta = "" // Version metadata to append to the version string
)
+var GitCommit string
+
// Version holds the textual version string.
var Version = func() string {
return fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch)
@@ -48,7 +49,7 @@ var VersionWithMetaCommitDetails = func() string {
if VersionMeta != "" {
v += "-" + VersionMeta
}
- v_git := fmt.Sprintf("Version : %s\nGitCommit : %s\n", v, GitCommit)
+ v_git := fmt.Sprintf("Version: %s\nGitCommit: %s", v, GitCommit)
return v_git
}()
diff --git a/scripts/getconfig.go b/scripts/getconfig.go
index 0d44a84016..c609fbb606 100644
--- a/scripts/getconfig.go
+++ b/scripts/getconfig.go
@@ -172,6 +172,7 @@ var nameTagMap = map[string]string{
"bootnodes": "bootnodes",
"maxpeers": "maxpeers",
"maxpendpeers": "maxpendpeers",
+ "txarrivalwait": "txarrivalwait",
"nat": "nat",
"nodiscover": "nodiscover",
"v5disc": "v5disc",
diff --git a/scripts/updateVersion.sh b/scripts/updateVersion.sh
new file mode 100755
index 0000000000..ab8f3aedfa
--- /dev/null
+++ b/scripts/updateVersion.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+set -e
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+
+echo "The version is of form - VersionMajor.VersionMinor.VersionPatch-VersionMeta"
+echo "Let's take 0.3.4-beta as an example. Here:"
+echo "* VersionMajor is - 0"
+echo "* VersionMinor is - 3"
+echo "* VersionPatch is - 4"
+echo "* VersionMeta is - beta"
+echo ""
+echo "Now, enter the new version step-by-step below:"
+
+version=""
+
+# VersionMajor
+read -p "* VersionMajor: " VersionMajor
+if [ -z "$VersionMajor" ]
+then
+ echo "VersionMajor cannot be NULL"
+ exit -1
+fi
+version+=$VersionMajor
+
+# VersionMinor
+read -p "* VersionMinor: " VersionMinor
+if [ -z "$VersionMinor" ]
+then
+ echo "VersionMinor cannot be NULL"
+ exit -1
+fi
+version+="."$VersionMinor
+
+# VersionPatch
+read -p "* VersionPatch: " VersionPatch
+if [ -z "$VersionPatch" ]
+then
+ echo "VersionPatch cannot be NULL"
+ exit -1
+fi
+version+="."$VersionPatch
+
+# VersionMeta (optional)
+read -p "* VersionMeta (optional, press enter if not needed): " VersionMeta
+if [[ ! -z "$VersionMeta" ]]
+then
+ version+="-"$VersionMeta
+fi
+
+echo ""
+echo "New version is: $version"
+
+# update version in all the 6 templates
+replace="Version: "$version
+fileArray=(
+ "${DIR}/../packaging/templates/package_scripts/control"
+ "${DIR}/../packaging/templates/package_scripts/control.arm64"
+ "${DIR}/../packaging/templates/package_scripts/control.profile.amd64"
+ "${DIR}/../packaging/templates/package_scripts/control.profile.arm64"
+ "${DIR}/../packaging/templates/package_scripts/control.validator"
+ "${DIR}/../packaging/templates/package_scripts/control.validator.arm64"
+)
+for file in ${fileArray[@]}; do
+ # get the line starting with `Version` in the control file and store it in the $temp variable
+ temp=$(grep "^Version.*" $file)
+ sed -i '' "s%$temp%$replace%" $file
+done
+
+# update version in ../params/version.go
+versionFile="${DIR}/../params/version.go"
+sed -i '' "s% = .*// Major% = $VersionMajor // Major%g" $versionFile
+sed -i '' "s% = .*// Minor% = $VersionMinor // Minor%g" $versionFile
+sed -i '' "s% = .*// Patch% = $VersionPatch // Patch%g" $versionFile
+sed -i '' "s% = .*// Version metadata% = \"$VersionMeta\" // Version metadata%g" $versionFile
+gofmt -w $versionFile
+
+echo ""
+echo "Updating Version Done"
+
+exit 0
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index 487fd2d4d8..64b9008fe3 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -176,17 +176,18 @@ func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis {
}
}
-/* See https://github.com/ethereum/tests/wiki/Blockchain-Tests-II
+/*
+See https://github.com/ethereum/tests/wiki/Blockchain-Tests-II
- Whether a block is valid or not is a bit subtle, it's defined by presence of
- blockHeader, transactions and uncleHeaders fields. If they are missing, the block is
- invalid and we must verify that we do not accept it.
+ Whether a block is valid or not is a bit subtle, it's defined by presence of
+ blockHeader, transactions and uncleHeaders fields. If they are missing, the block is
+ invalid and we must verify that we do not accept it.
- Since some tests mix valid and invalid blocks we need to check this for every block.
+ Since some tests mix valid and invalid blocks we need to check this for every block.
- If a block is invalid it does not necessarily fail the test, if it's invalidness is
- expected we are expected to ignore it and continue processing and then validate the
- post state.
+ If a block is invalid it does not necessarily fail the test, if it's invalidness is
+ expected we are expected to ignore it and continue processing and then validate the
+ post state.
*/
func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error) {
validBlocks := make([]btBlock, 0)
diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go
index e6e8188ce0..dd3359cb28 100644
--- a/tests/bor/bor_test.go
+++ b/tests/bor/bor_test.go
@@ -544,7 +544,7 @@ func TestFetchStateSyncEvents_2(t *testing.T) {
insertNewBlock(t, chain, block)
}
- lastStateID, _ := _bor.GenesisContractsClient.LastStateId(sprintSize)
+ lastStateID, _ := _bor.GenesisContractsClient.LastStateId(nil, sprintSize, block.Hash())
// state 6 was not written
require.Equal(t, uint64(4), lastStateID.Uint64())
@@ -573,7 +573,7 @@ func TestFetchStateSyncEvents_2(t *testing.T) {
insertNewBlock(t, chain, block)
}
- lastStateID, _ = _bor.GenesisContractsClient.LastStateId(spanSize)
+ lastStateID, _ = _bor.GenesisContractsClient.LastStateId(nil, spanSize, block.Hash())
require.Equal(t, uint64(6), lastStateID.Uint64())
}
diff --git a/tests/bor/helper.go b/tests/bor/helper.go
index c4b45f970d..06d2c6a069 100644
--- a/tests/bor/helper.go
+++ b/tests/bor/helper.go
@@ -257,7 +257,7 @@ func (b *blockGen) addTxWithChain(bc *core.BlockChain, statedb *state.StateDB, t
statedb.Prepare(tx.Hash(), len(b.txs))
- receipt, err := core.ApplyTransaction(bc.Config(), bc, &b.header.Coinbase, b.gasPool, statedb, b.header, tx, &b.header.GasUsed, vm.Config{})
+ receipt, err := core.ApplyTransaction(bc.Config(), bc, &b.header.Coinbase, b.gasPool, statedb, b.header, tx, &b.header.GasUsed, vm.Config{}, nil)
if err != nil {
panic(err)
}
diff --git a/tests/fuzzers/txfetcher/txfetcher_fuzzer.go b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
index d1d6fdc665..fc15e07c7e 100644
--- a/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
+++ b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
@@ -83,7 +83,7 @@ func Fuzz(input []byte) int {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
- clock, rand,
+ clock, rand, 500*time.Millisecond,
)
f.Start()
defer f.Stop()
diff --git a/tests/state_test.go b/tests/state_test.go
index f18b84d16e..3ef251de14 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -303,7 +303,7 @@ func runBenchmark(b *testing.B, t *StateTest) {
for n := 0; n < b.N; n++ {
// Execute the message.
snapshot := statedb.Snapshot()
- _, _, err = evm.Call(sender, *msg.To(), msg.Data(), msg.Gas(), msg.Value())
+ _, _, err = evm.Call(sender, *msg.To(), msg.Data(), msg.Gas(), msg.Value(), nil)
if err != nil {
b.Error(err)
return
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 65f93bfbe3..ffee265e3f 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -232,7 +232,8 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
snapshot := statedb.Snapshot()
gaspool := new(core.GasPool)
gaspool.AddGas(block.GasLimit())
- if _, err := core.ApplyMessage(evm, msg, gaspool); err != nil {
+
+ if _, err := core.ApplyMessage(evm, msg, gaspool, nil); err != nil {
statedb.RevertToSnapshot(snapshot)
}