diff --git a/.github/workflows/packager.yml b/.github/workflows/packager.yml
index 7485aca976..f2aef42485 100644
--- a/.github/workflows/packager.yml
+++ b/.github/workflows/packager.yml
@@ -12,7 +12,9 @@ on:
jobs:
build:
- runs-on: ubuntu-18.04
+ runs-on:
+ group: ubuntu-runners
+ labels: 18.04RunnerT2Large
steps:
- name: Checkout
uses: actions/checkout@v2
diff --git a/Makefile b/Makefile
index 242435df76..a8a4b66e8d 100644
--- a/Makefile
+++ b/Makefile
@@ -59,7 +59,10 @@ ios:
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
test:
- $(GOTEST) --timeout 5m -shuffle=on -cover -coverprofile=cover.out $(TESTALL)
+ $(GOTEST) --timeout 5m -shuffle=on -cover -short -coverprofile=cover.out -covermode=atomic $(TESTALL)
+
+test-txpool-race:
+ $(GOTEST) -run=TestPoolMiningDataRaces --timeout 600m -race -v ./core/
test-race:
$(GOTEST) --timeout 15m -race -shuffle=on $(TESTALL)
@@ -75,7 +78,7 @@ lint:
lintci-deps:
rm -f ./build/bin/golangci-lint
- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.48.0
+ curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.50.1
goimports:
goimports -local "$(PACKAGE)" -w .
diff --git a/builder/files/config.toml b/builder/files/config.toml
index 1b8d915b7b..cb790f371c 100644
--- a/builder/files/config.toml
+++ b/builder/files/config.toml
@@ -94,7 +94,7 @@ syncmode = "full"
# vhosts = ["*"]
# corsdomain = ["*"]
# [jsonrpc.timeouts]
-# read = "30s"
+# read = "10s"
# write = "30s"
# idle = "2m0s"
diff --git a/cmd/evm/internal/t8ntool/transaction.go b/cmd/evm/internal/t8ntool/transaction.go
index 6f1c964ada..cf2039b66c 100644
--- a/cmd/evm/internal/t8ntool/transaction.go
+++ b/cmd/evm/internal/t8ntool/transaction.go
@@ -24,6 +24,8 @@ import (
"os"
"strings"
+ "gopkg.in/urfave/cli.v1"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
@@ -32,7 +34,6 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests"
- "gopkg.in/urfave/cli.v1"
)
type result struct {
diff --git a/common/debug/debug.go b/common/debug/debug.go
index 6a677e495d..056ebe2fa7 100644
--- a/common/debug/debug.go
+++ b/common/debug/debug.go
@@ -1,6 +1,7 @@
package debug
import (
+ "fmt"
"runtime"
)
@@ -26,3 +27,26 @@ func Callers(show int) []string {
return callers
}
+
+func CodeLine() (string, string, int) {
+ pc, filename, line, _ := runtime.Caller(1)
+ return runtime.FuncForPC(pc).Name(), filename, line
+}
+
+func CodeLineStr() string {
+ pc, filename, line, _ := runtime.Caller(1)
+ return fmt.Sprintf("%s:%d - %s", filename, line, runtime.FuncForPC(pc).Name())
+}
+
+func Stack(all bool) []byte {
+ buf := make([]byte, 4096)
+
+ for {
+ n := runtime.Stack(buf, all)
+ if n < len(buf) {
+ return buf[:n]
+ }
+
+ buf = make([]byte, 2*len(buf))
+ }
+}
diff --git a/common/math/big.go b/common/math/big.go
index 1af5b4d879..4ccf89e38c 100644
--- a/common/math/big.go
+++ b/common/math/big.go
@@ -20,6 +20,8 @@ package math
import (
"fmt"
"math/big"
+
+ "github.com/holiman/uint256"
)
// Various big integer limit values.
@@ -132,6 +134,7 @@ func MustParseBig256(s string) *big.Int {
// BigPow returns a ** b as a big integer.
func BigPow(a, b int64) *big.Int {
r := big.NewInt(a)
+
return r.Exp(r, big.NewInt(b), nil)
}
@@ -140,6 +143,15 @@ func BigMax(x, y *big.Int) *big.Int {
if x.Cmp(y) < 0 {
return y
}
+
+ return x
+}
+
+func BigMaxUint(x, y *uint256.Int) *uint256.Int {
+ if x.Lt(y) {
+ return y
+ }
+
return x
}
@@ -148,6 +160,15 @@ func BigMin(x, y *big.Int) *big.Int {
if x.Cmp(y) > 0 {
return y
}
+
+ return x
+}
+
+func BigMinUint256(x, y *uint256.Int) *uint256.Int {
+ if x.Gt(y) {
+ return y
+ }
+
return x
}
@@ -227,10 +248,10 @@ func U256Bytes(n *big.Int) []byte {
// S256 interprets x as a two's complement number.
// x must not exceed 256 bits (the result is undefined if it does) and is not modified.
//
-// S256(0) = 0
-// S256(1) = 1
-// S256(2**255) = -2**255
-// S256(2**256-1) = -1
+// S256(0) = 0
+// S256(1) = 1
+// S256(2**255) = -2**255
+// S256(2**256-1) = -1
func S256(x *big.Int) *big.Int {
if x.Cmp(tt255) < 0 {
return x
diff --git a/common/math/uint.go b/common/math/uint.go
new file mode 100644
index 0000000000..96b8261884
--- /dev/null
+++ b/common/math/uint.go
@@ -0,0 +1,23 @@
+package math
+
+import (
+ "math/big"
+
+ "github.com/holiman/uint256"
+)
+
+var (
+ U0 = uint256.NewInt(0)
+ U1 = uint256.NewInt(1)
+ U100 = uint256.NewInt(100)
+)
+
+func U256LTE(a, b *uint256.Int) bool {
+ return a.Lt(b) || a.Eq(b)
+}
+
+func FromBig(v *big.Int) *uint256.Int {
+ u, _ := uint256.FromBig(v)
+
+ return u
+}
diff --git a/common/time.go b/common/time.go
new file mode 100644
index 0000000000..6c7662e04c
--- /dev/null
+++ b/common/time.go
@@ -0,0 +1,9 @@
+package common
+
+import "time"
+
+const TimeMilliseconds = "15:04:05.000"
+
+func NowMilliseconds() string {
+ return time.Now().Format(TimeMilliseconds)
+}
diff --git a/common/tracing/context.go b/common/tracing/context.go
index 510e45d775..c3c6342502 100644
--- a/common/tracing/context.go
+++ b/common/tracing/context.go
@@ -4,6 +4,7 @@ import (
"context"
"time"
+ "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -51,11 +52,16 @@ func Trace(ctx context.Context, spanName string) (context.Context, trace.Span) {
return tr.Start(ctx, spanName)
}
-func Exec(ctx context.Context, spanName string, opts ...Option) {
+func Exec(ctx context.Context, instrumentationName, spanName string, opts ...Option) {
var span trace.Span
tr := FromContext(ctx)
+ if tr == nil && len(instrumentationName) != 0 {
+ tr = otel.GetTracerProvider().Tracer(instrumentationName)
+ ctx = WithTracer(ctx, tr)
+ }
+
if tr != nil {
ctx, span = tr.Start(ctx, spanName)
}
@@ -85,7 +91,7 @@ func ElapsedTime(ctx context.Context, span trace.Span, msg string, fn func(conte
fn(ctx, span)
if span != nil {
- span.SetAttributes(attribute.Int(msg, int(time.Since(now).Milliseconds())))
+ span.SetAttributes(attribute.Int(msg, int(time.Since(now).Microseconds())))
}
}
diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go
index 5b32263762..e01b26b688 100644
--- a/consensus/bor/bor.go
+++ b/consensus/bor/bor.go
@@ -298,6 +298,14 @@ func (c *Bor) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Head
return c.verifyHeader(chain, header, nil)
}
+func (c *Bor) GetSpanner() Spanner {
+ return c.spanner
+}
+
+func (c *Bor) SetSpanner(spanner Spanner) {
+ c.spanner = spanner
+}
+
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The
// method returns a quit channel to abort the operations and a results channel to
// retrieve the async verifications (the order is that of the input slice).
@@ -454,6 +462,33 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t
return err
}
+ // Verify the validator list match the local contract
+ if IsSprintStart(number+1, c.config.CalculateSprint(number)) {
+ newValidators, err := c.spanner.GetCurrentValidatorsByBlockNrOrHash(context.Background(), rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), number+1)
+
+ if err != nil {
+ return err
+ }
+
+ sort.Sort(valset.ValidatorsByAddress(newValidators))
+
+ headerVals, err := valset.ParseValidators(header.Extra[extraVanity : len(header.Extra)-extraSeal])
+
+ if err != nil {
+ return err
+ }
+
+ if len(newValidators) != len(headerVals) {
+ return errInvalidSpanValidators
+ }
+
+ for i, val := range newValidators {
+ if !bytes.Equal(val.HeaderBytes(), headerVals[i].HeaderBytes()) {
+ return errInvalidSpanValidators
+ }
+ }
+ }
+
// verify the validator list in the last sprint block
if IsSprintStart(number, c.config.CalculateSprint(number)) {
parentValidatorBytes := parent.Extra[extraVanity : len(parent.Extra)-extraSeal]
@@ -518,7 +553,7 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co
hash := checkpoint.Hash()
// get validators and current span
- validators, err := c.spanner.GetCurrentValidators(context.Background(), hash, number+1)
+ validators, err := c.spanner.GetCurrentValidatorsByHash(context.Background(), hash, number+1)
if err != nil {
return nil, err
}
@@ -688,7 +723,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header) e
// get validator set if number
if IsSprintStart(number+1, c.config.CalculateSprint(number)) {
- newValidators, err := c.spanner.GetCurrentValidators(context.Background(), header.ParentHash, number+1)
+ newValidators, err := c.spanner.GetCurrentValidatorsByHash(context.Background(), header.ParentHash, number+1)
if err != nil {
return errUnknownValidators
}
@@ -821,7 +856,7 @@ func (c *Bor) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHead
if IsSprintStart(headerNumber, c.config.CalculateSprint(headerNumber)) {
cx := statefull.ChainContext{Chain: chain, Bor: c}
- tracing.Exec(finalizeCtx, "bor.checkAndCommitSpan", func(ctx context.Context, span trace.Span) {
+ tracing.Exec(finalizeCtx, "", "bor.checkAndCommitSpan", func(ctx context.Context, span trace.Span) {
// check and commit span
err = c.checkAndCommitSpan(finalizeCtx, state, header, cx)
})
@@ -832,7 +867,7 @@ func (c *Bor) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHead
}
if c.HeimdallClient != nil {
- tracing.Exec(finalizeCtx, "bor.checkAndCommitSpan", func(ctx context.Context, span trace.Span) {
+ tracing.Exec(finalizeCtx, "", "bor.checkAndCommitSpan", func(ctx context.Context, span trace.Span) {
// commit states
stateSyncData, err = c.CommitStates(finalizeCtx, state, header, cx)
})
@@ -844,7 +879,7 @@ func (c *Bor) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHead
}
}
- tracing.Exec(finalizeCtx, "bor.changeContractCodeIfNeeded", func(ctx context.Context, span trace.Span) {
+ tracing.Exec(finalizeCtx, "", "bor.changeContractCodeIfNeeded", func(ctx context.Context, span trace.Span) {
err = c.changeContractCodeIfNeeded(headerNumber, state)
})
@@ -854,7 +889,7 @@ func (c *Bor) FinalizeAndAssemble(ctx context.Context, chain consensus.ChainHead
}
// No block rewards in PoA, so the state remains as it is
- tracing.Exec(finalizeCtx, "bor.IntermediateRoot", func(ctx context.Context, span trace.Span) {
+ tracing.Exec(finalizeCtx, "", "bor.IntermediateRoot", func(ctx context.Context, span trace.Span) {
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
})
@@ -1218,7 +1253,7 @@ func (c *Bor) SetHeimdallClient(h IHeimdallClient) {
}
func (c *Bor) GetCurrentValidators(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) {
- return c.spanner.GetCurrentValidators(ctx, headerHash, blockNumber)
+ return c.spanner.GetCurrentValidatorsByHash(ctx, headerHash, blockNumber)
}
//
diff --git a/consensus/bor/heimdall/span/spanner.go b/consensus/bor/heimdall/span/spanner.go
index e0f2d66c6b..9307a0337e 100644
--- a/consensus/bor/heimdall/span/spanner.go
+++ b/consensus/bor/heimdall/span/spanner.go
@@ -89,7 +89,7 @@ func (c *ChainSpanner) GetCurrentSpan(ctx context.Context, headerHash common.Has
}
// GetCurrentValidators get current validators
-func (c *ChainSpanner) GetCurrentValidators(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) {
+func (c *ChainSpanner) GetCurrentValidatorsByBlockNrOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, blockNumber uint64) ([]*valset.Validator, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@@ -107,16 +107,13 @@ func (c *ChainSpanner) GetCurrentValidators(ctx context.Context, headerHash comm
toAddress := c.validatorContractAddress
gas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2))
- // block
- blockNr := rpc.BlockNumberOrHashWithHash(headerHash, false)
-
result, err := c.ethAPI.Call(ctx, ethapi.TransactionArgs{
Gas: &gas,
To: &toAddress,
Data: &msgData,
- }, blockNr, nil)
+ }, blockNrOrHash, nil)
if err != nil {
- panic(err)
+ return nil, err
}
var (
@@ -144,6 +141,12 @@ func (c *ChainSpanner) GetCurrentValidators(ctx context.Context, headerHash comm
return valz, nil
}
+func (c *ChainSpanner) GetCurrentValidatorsByHash(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) {
+ blockNr := rpc.BlockNumberOrHashWithHash(headerHash, false)
+
+ return c.GetCurrentValidatorsByBlockNrOrHash(ctx, blockNr, blockNumber)
+}
+
const method = "commitSpan"
func (c *ChainSpanner) CommitSpan(ctx context.Context, heimdallSpan HeimdallSpan, state *state.StateDB, header *types.Header, chainContext core.ChainContext) error {
diff --git a/consensus/bor/span.go b/consensus/bor/span.go
index 86a58fa42e..179f92c79c 100644
--- a/consensus/bor/span.go
+++ b/consensus/bor/span.go
@@ -9,11 +9,13 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rpc"
)
//go:generate mockgen -destination=./span_mock.go -package=bor . Spanner
type Spanner interface {
GetCurrentSpan(ctx context.Context, headerHash common.Hash) (*span.Span, error)
- GetCurrentValidators(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error)
+ GetCurrentValidatorsByHash(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error)
+ GetCurrentValidatorsByBlockNrOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, blockNumber uint64) ([]*valset.Validator, error)
CommitSpan(ctx context.Context, heimdallSpan span.HeimdallSpan, state *state.StateDB, header *types.Header, chainContext core.ChainContext) error
}
diff --git a/consensus/bor/span_mock.go b/consensus/bor/span_mock.go
index 6d5f62e25d..910e81716c 100644
--- a/consensus/bor/span_mock.go
+++ b/consensus/bor/span_mock.go
@@ -1,5 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: github.com/ethereum/go-ethereum/consensus/bor (interfaces: Spanner)
+// Source: consensus/bor/span.go
// Package bor is a generated GoMock package.
package bor
@@ -14,6 +14,7 @@ import (
core "github.com/ethereum/go-ethereum/core"
state "github.com/ethereum/go-ethereum/core/state"
types "github.com/ethereum/go-ethereum/core/types"
+ rpc "github.com/ethereum/go-ethereum/rpc"
gomock "github.com/golang/mock/gomock"
)
@@ -41,45 +42,60 @@ func (m *MockSpanner) EXPECT() *MockSpannerMockRecorder {
}
// CommitSpan mocks base method.
-func (m *MockSpanner) CommitSpan(arg0 context.Context, arg1 span.HeimdallSpan, arg2 *state.StateDB, arg3 *types.Header, arg4 core.ChainContext) error {
+func (m *MockSpanner) CommitSpan(ctx context.Context, heimdallSpan span.HeimdallSpan, state *state.StateDB, header *types.Header, chainContext core.ChainContext) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CommitSpan", arg0, arg1, arg2, arg3, arg4)
+ ret := m.ctrl.Call(m, "CommitSpan", ctx, heimdallSpan, state, header, chainContext)
ret0, _ := ret[0].(error)
return ret0
}
// CommitSpan indicates an expected call of CommitSpan.
-func (mr *MockSpannerMockRecorder) CommitSpan(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
+func (mr *MockSpannerMockRecorder) CommitSpan(ctx, heimdallSpan, state, header, chainContext interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitSpan", reflect.TypeOf((*MockSpanner)(nil).CommitSpan), arg0, arg1, arg2, arg3, arg4)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitSpan", reflect.TypeOf((*MockSpanner)(nil).CommitSpan), ctx, heimdallSpan, state, header, chainContext)
}
// GetCurrentSpan mocks base method.
-func (m *MockSpanner) GetCurrentSpan(arg0 context.Context, arg1 common.Hash) (*span.Span, error) {
+func (m *MockSpanner) GetCurrentSpan(ctx context.Context, headerHash common.Hash) (*span.Span, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetCurrentSpan", arg0, arg1)
+ ret := m.ctrl.Call(m, "GetCurrentSpan", ctx, headerHash)
ret0, _ := ret[0].(*span.Span)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetCurrentSpan indicates an expected call of GetCurrentSpan.
-func (mr *MockSpannerMockRecorder) GetCurrentSpan(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockSpannerMockRecorder) GetCurrentSpan(ctx, headerHash interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSpan", reflect.TypeOf((*MockSpanner)(nil).GetCurrentSpan), arg0, arg1)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSpan", reflect.TypeOf((*MockSpanner)(nil).GetCurrentSpan), ctx, headerHash)
}
-// GetCurrentValidators mocks base method.
-func (m *MockSpanner) GetCurrentValidators(arg0 context.Context, arg1 common.Hash, arg2 uint64) ([]*valset.Validator, error) {
+// GetCurrentValidatorsByBlockNrOrHash mocks base method.
+func (m *MockSpanner) GetCurrentValidatorsByBlockNrOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, blockNumber uint64) ([]*valset.Validator, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetCurrentValidators", arg0, arg1, arg2)
+ ret := m.ctrl.Call(m, "GetCurrentValidatorsByBlockNrOrHash", ctx, blockNrOrHash, blockNumber)
ret0, _ := ret[0].([]*valset.Validator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// GetCurrentValidators indicates an expected call of GetCurrentValidators.
-func (mr *MockSpannerMockRecorder) GetCurrentValidators(arg0, arg1, arg2 interface{}) *gomock.Call {
+// GetCurrentValidatorsByBlockNrOrHash indicates an expected call of GetCurrentValidatorsByBlockNrOrHash.
+func (mr *MockSpannerMockRecorder) GetCurrentValidatorsByBlockNrOrHash(ctx, blockNrOrHash, blockNumber interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidators", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidators), arg0, arg1, arg2)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidatorsByBlockNrOrHash", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidatorsByBlockNrOrHash), ctx, blockNrOrHash, blockNumber)
+}
+
+// GetCurrentValidatorsByHash mocks base method.
+func (m *MockSpanner) GetCurrentValidatorsByHash(ctx context.Context, headerHash common.Hash, blockNumber uint64) ([]*valset.Validator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetCurrentValidatorsByHash", ctx, headerHash, blockNumber)
+ ret0, _ := ret[0].([]*valset.Validator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetCurrentValidatorsByHash indicates an expected call of GetCurrentValidatorsByHash.
+func (mr *MockSpannerMockRecorder) GetCurrentValidatorsByHash(ctx, headerHash, blockNumber interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidatorsByHash", reflect.TypeOf((*MockSpanner)(nil).GetCurrentValidatorsByHash), ctx, headerHash, blockNumber)
}
diff --git a/consensus/misc/eip1559.go b/consensus/misc/eip1559.go
index 193a5b84e2..00a8ab5b58 100644
--- a/consensus/misc/eip1559.go
+++ b/consensus/misc/eip1559.go
@@ -20,6 +20,8 @@ import (
"fmt"
"math/big"
+ "github.com/holiman/uint256"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
@@ -92,3 +94,54 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int {
)
}
}
+
+// CalcBaseFee calculates the basefee of the header.
+func CalcBaseFeeUint(config *params.ChainConfig, parent *types.Header) *uint256.Int {
+ var (
+ initialBaseFeeUint = uint256.NewInt(params.InitialBaseFee)
+ baseFeeChangeDenominatorUint64 = params.BaseFeeChangeDenominator(config.Bor, parent.Number)
+ baseFeeChangeDenominatorUint = uint256.NewInt(baseFeeChangeDenominatorUint64)
+ )
+
+ // If the current block is the first EIP-1559 block, return the InitialBaseFee.
+ if !config.IsLondon(parent.Number) {
+ return initialBaseFeeUint.Clone()
+ }
+
+ var (
+ parentGasTarget = parent.GasLimit / params.ElasticityMultiplier
+ parentGasTargetBig = uint256.NewInt(parentGasTarget)
+ )
+
+ // If the parent gasUsed is the same as the target, the baseFee remains unchanged.
+ if parent.GasUsed == parentGasTarget {
+ return math.FromBig(parent.BaseFee)
+ }
+
+ if parent.GasUsed > parentGasTarget {
+ // If the parent block used more gas than its target, the baseFee should increase.
+ gasUsedDelta := uint256.NewInt(parent.GasUsed - parentGasTarget)
+
+ parentBaseFee := math.FromBig(parent.BaseFee)
+ x := gasUsedDelta.Mul(parentBaseFee, gasUsedDelta)
+ y := x.Div(x, parentGasTargetBig)
+ baseFeeDelta := math.BigMaxUint(
+ x.Div(y, baseFeeChangeDenominatorUint),
+ math.U1,
+ )
+
+ return x.Add(parentBaseFee, baseFeeDelta)
+ }
+
+ // Otherwise if the parent block used less gas than its target, the baseFee should decrease.
+ gasUsedDelta := uint256.NewInt(parentGasTarget - parent.GasUsed)
+ parentBaseFee := math.FromBig(parent.BaseFee)
+ x := gasUsedDelta.Mul(parentBaseFee, gasUsedDelta)
+ y := x.Div(x, parentGasTargetBig)
+ baseFeeDelta := x.Div(y, baseFeeChangeDenominatorUint)
+
+ return math.BigMaxUint(
+ x.Sub(parentBaseFee, baseFeeDelta),
+ math.U0.Clone(),
+ )
+}
diff --git a/core/blockchain.go b/core/blockchain.go
index fed1d04268..680cb7dce6 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -18,21 +18,29 @@
package core
import (
+ "compress/gzip"
+ "context"
"errors"
"fmt"
"io"
"math/big"
+ "os"
+ "path/filepath"
"sort"
+ "strings"
"sync"
"sync/atomic"
"time"
lru "github.com/hashicorp/golang-lru"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque"
+ "github.com/ethereum/go-ethereum/common/tracing"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
@@ -1349,46 +1357,89 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
}
// WriteBlockWithState writes the block and all associated state to the database.
-func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
+func (bc *BlockChain) WriteBlockAndSetHead(ctx context.Context, block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
if !bc.chainmu.TryLock() {
return NonStatTy, errChainStopped
}
defer bc.chainmu.Unlock()
- return bc.writeBlockAndSetHead(block, receipts, logs, state, emitHeadEvent)
+ return bc.writeBlockAndSetHead(ctx, block, receipts, logs, state, emitHeadEvent)
}
// writeBlockAndSetHead writes the block and all associated state to the database,
// and also it applies the given block as the new chain head. This function expects
// the chain mutex to be held.
-func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
+func (bc *BlockChain) writeBlockAndSetHead(ctx context.Context, block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
+ writeBlockAndSetHeadCtx, span := tracing.StartSpan(ctx, "blockchain.writeBlockAndSetHead")
+ defer tracing.EndSpan(span)
+
var stateSyncLogs []*types.Log
- if stateSyncLogs, err = bc.writeBlockWithState(block, receipts, logs, state); err != nil {
+ tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.writeBlockWithState", func(_ context.Context, span trace.Span) {
+ stateSyncLogs, err = bc.writeBlockWithState(block, receipts, logs, state)
+ tracing.SetAttributes(
+ span,
+ attribute.Int("number", int(block.Number().Uint64())),
+ attribute.Bool("error", err != nil),
+ )
+ })
+
+ if err != nil {
return NonStatTy, err
}
+
currentBlock := bc.CurrentBlock()
- reorg, err := bc.forker.ReorgNeeded(currentBlock.Header(), block.Header())
+
+ var reorg bool
+
+ tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.ReorgNeeded", func(_ context.Context, span trace.Span) {
+ reorg, err = bc.forker.ReorgNeeded(currentBlock.Header(), block.Header())
+ tracing.SetAttributes(
+ span,
+ attribute.Int("number", int(block.Number().Uint64())),
+ attribute.Int("current block", int(currentBlock.Number().Uint64())),
+ attribute.Bool("reorg needed", reorg),
+ attribute.Bool("error", err != nil),
+ )
+ })
if err != nil {
return NonStatTy, err
}
- if reorg {
- // Reorganise the chain if the parent is not the head block
- if block.ParentHash() != currentBlock.Hash() {
- if err := bc.reorg(currentBlock, block); err != nil {
- return NonStatTy, err
+ tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.reorg", func(_ context.Context, span trace.Span) {
+ if reorg {
+ // Reorganise the chain if the parent is not the head block
+ if block.ParentHash() != currentBlock.Hash() {
+ if err = bc.reorg(currentBlock, block); err != nil {
+ status = NonStatTy
+ }
}
+ status = CanonStatTy
+ } else {
+ status = SideStatTy
}
- status = CanonStatTy
- } else {
- status = SideStatTy
+ tracing.SetAttributes(
+ span,
+ attribute.Int("number", int(block.Number().Uint64())),
+ attribute.Int("current block", int(currentBlock.Number().Uint64())),
+ attribute.Bool("reorg needed", reorg),
+ attribute.Bool("error", err != nil),
+ attribute.String("status", string(status)),
+ )
+ })
+
+ if status == NonStatTy {
+ return
}
+
// Set new head.
if status == CanonStatTy {
- bc.writeHeadBlock(block)
+ tracing.Exec(writeBlockAndSetHeadCtx, "", "blockchain.writeHeadBlock", func(_ context.Context, _ trace.Span) {
+ bc.writeHeadBlock(block)
+ })
}
+
bc.futureBlocks.Remove(block.Hash())
if status == CanonStatTy {
@@ -1785,7 +1836,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
// Don't set the head, only insert the block
_, err = bc.writeBlockWithState(block, receipts, logs, statedb)
} else {
- status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false)
+ status, err = bc.writeBlockAndSetHead(context.Background(), block, receipts, logs, statedb, false)
}
atomic.StoreUint32(&followupInterrupt, 1)
if err != nil {
@@ -2194,6 +2245,35 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
} else {
// len(newChain) == 0 && len(oldChain) > 0
// rewind the canonical chain to a lower point.
+
+ home, err := os.UserHomeDir()
+ if err != nil {
+ fmt.Println("Impossible reorg : Unable to get user home dir", "Error", err)
+ }
+ outPath := filepath.Join(home, "impossible-reorgs", fmt.Sprintf("%v-impossibleReorg", time.Now().Format(time.RFC3339)))
+
+ if _, err := os.Stat(outPath); errors.Is(err, os.ErrNotExist) {
+ err := os.MkdirAll(outPath, os.ModePerm)
+ if err != nil {
+ log.Error("Impossible reorg : Unable to create Dir", "Error", err)
+ }
+ } else {
+ err = ExportBlocks(oldChain, filepath.Join(outPath, "oldChain.gz"))
+ if err != nil {
+ log.Error("Impossible reorg : Unable to export oldChain", "Error", err)
+ }
+
+ err = ExportBlocks([]*types.Block{oldBlock}, filepath.Join(outPath, "oldBlock.gz"))
+ if err != nil {
+ log.Error("Impossible reorg : Unable to export oldBlock", "Error", err)
+ }
+
+ err = ExportBlocks([]*types.Block{newBlock}, filepath.Join(outPath, "newBlock.gz"))
+ if err != nil {
+ log.Error("Impossible reorg : Unable to export newBlock", "Error", err)
+ }
+ }
+
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
}
// Insert the new chain(except the head block(reverse order)),
@@ -2246,6 +2326,44 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
return nil
}
+// ExportBlocks exports blocks into the specified file, truncating any data
+// already present in the file.
+func ExportBlocks(blocks []*types.Block, fn string) error {
+ log.Info("Exporting blockchain", "file", fn)
+
+ // Open the file handle and potentially wrap with a gzip stream
+ fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
+ if err != nil {
+ return err
+ }
+ defer fh.Close()
+
+ var writer io.Writer = fh
+ if strings.HasSuffix(fn, ".gz") {
+ writer = gzip.NewWriter(writer)
+ defer writer.(*gzip.Writer).Close()
+ }
+ // Iterate over the blocks and export them
+ if err := ExportN(writer, blocks); err != nil {
+ return err
+ }
+
+ log.Info("Exported blocks", "file", fn)
+
+ return nil
+}
+
+// ExportBlock writes a block to the given writer.
+func ExportN(w io.Writer, blocks []*types.Block) error {
+ for _, block := range blocks {
+ if err := block.EncodeRLP(w); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
// InsertBlockWithoutSetHead executes the block, runs the necessary verification
// upon it and then persist the block and the associate state into the database.
// The key difference between the InsertChain is it won't do the canonical chain
diff --git a/core/tests/blockchain_repair_test.go b/core/tests/blockchain_repair_test.go
index 9b166b7165..d18418727b 100644
--- a/core/tests/blockchain_repair_test.go
+++ b/core/tests/blockchain_repair_test.go
@@ -1796,7 +1796,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
spanner := bor.NewMockSpanner(ctrl)
- spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{
+ spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{
{
ID: 0,
Address: miner.TestBankAddress,
diff --git a/core/tx_journal.go b/core/tx_journal.go
index d282126a08..980bdb9864 100644
--- a/core/tx_journal.go
+++ b/core/tx_journal.go
@@ -61,11 +61,13 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
if _, err := os.Stat(journal.path); os.IsNotExist(err) {
return nil
}
+
// Open the journal for loading any past transactions
input, err := os.Open(journal.path)
if err != nil {
return err
}
+
defer input.Close()
// Temporarily discard any journal additions (don't double add on load)
@@ -80,29 +82,35 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
// appropriate progress counters. Then use this method to load all the
// journaled transactions in small-ish batches.
loadBatch := func(txs types.Transactions) {
+ errs := add(txs)
+
+ dropped = len(errs)
+
for _, err := range add(txs) {
- if err != nil {
- log.Debug("Failed to add journaled transaction", "err", err)
- dropped++
- }
+ log.Debug("Failed to add journaled transaction", "err", err)
}
}
var (
failure error
batch types.Transactions
)
+
for {
// Parse the next transaction and terminate on error
tx := new(types.Transaction)
+
if err = stream.Decode(tx); err != nil {
if err != io.EOF {
failure = err
}
+
if batch.Len() > 0 {
loadBatch(batch)
}
+
break
}
+
// New transaction parsed, queue up for later, import if threshold is reached
total++
@@ -111,6 +119,7 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
batch = batch[:0]
}
}
+
log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped)
return failure
diff --git a/core/tx_list.go b/core/tx_list.go
index f141a03bbd..851f732905 100644
--- a/core/tx_list.go
+++ b/core/tx_list.go
@@ -25,8 +25,12 @@ import (
"sync/atomic"
"time"
+ "github.com/holiman/uint256"
+
"github.com/ethereum/go-ethereum/common"
+ cmath "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
)
// nonceHeap is a heap.Interface implementation over 64bit unsigned integers for
@@ -54,36 +58,67 @@ func (h *nonceHeap) Pop() interface{} {
type txSortedMap struct {
items map[uint64]*types.Transaction // Hash map storing the transaction data
index *nonceHeap // Heap of nonces of all the stored transactions (non-strict mode)
- cache types.Transactions // Cache of the transactions already sorted
+ m sync.RWMutex
+
+ cache types.Transactions // Cache of the transactions already sorted
+ isEmpty bool
+ cacheMu sync.RWMutex
}
// newTxSortedMap creates a new nonce-sorted transaction map.
func newTxSortedMap() *txSortedMap {
return &txSortedMap{
- items: make(map[uint64]*types.Transaction),
- index: new(nonceHeap),
+ items: make(map[uint64]*types.Transaction),
+ index: new(nonceHeap),
+ isEmpty: true,
}
}
// Get retrieves the current transactions associated with the given nonce.
func (m *txSortedMap) Get(nonce uint64) *types.Transaction {
+ m.m.RLock()
+ defer m.m.RUnlock()
+
return m.items[nonce]
}
+func (m *txSortedMap) Has(nonce uint64) bool {
+ if m == nil {
+ return false
+ }
+
+ m.m.RLock()
+ defer m.m.RUnlock()
+
+ return m.items[nonce] != nil
+}
+
// Put inserts a new transaction into the map, also updating the map's nonce
// index. If a transaction already exists with the same nonce, it's overwritten.
func (m *txSortedMap) Put(tx *types.Transaction) {
+ m.m.Lock()
+ defer m.m.Unlock()
+
nonce := tx.Nonce()
if m.items[nonce] == nil {
heap.Push(m.index, nonce)
}
- m.items[nonce], m.cache = tx, nil
+
+ m.items[nonce] = tx
+
+ m.cacheMu.Lock()
+ m.isEmpty = true
+ m.cache = nil
+ m.cacheMu.Unlock()
}
// Forward removes all transactions from the map with a nonce lower than the
// provided threshold. Every removed transaction is returned for any post-removal
// maintenance.
func (m *txSortedMap) Forward(threshold uint64) types.Transactions {
+ m.m.Lock()
+ defer m.m.Unlock()
+
var removed types.Transactions
// Pop off heap items until the threshold is reached
@@ -92,10 +127,15 @@ func (m *txSortedMap) Forward(threshold uint64) types.Transactions {
removed = append(removed, m.items[nonce])
delete(m.items, nonce)
}
+
// If we had a cached order, shift the front
+ m.cacheMu.Lock()
if m.cache != nil {
+ hitCacheCounter.Inc(1)
m.cache = m.cache[len(removed):]
}
+ m.cacheMu.Unlock()
+
return removed
}
@@ -105,21 +145,51 @@ func (m *txSortedMap) Forward(threshold uint64) types.Transactions {
// If you want to do several consecutive filterings, it's therefore better to first
// do a .filter(func1) followed by .Filter(func2) or reheap()
func (m *txSortedMap) Filter(filter func(*types.Transaction) bool) types.Transactions {
+ m.m.Lock()
+ defer m.m.Unlock()
+
removed := m.filter(filter)
// If transactions were removed, the heap and cache are ruined
if len(removed) > 0 {
- m.reheap()
+ m.reheap(false)
}
return removed
}
-func (m *txSortedMap) reheap() {
- *m.index = make([]uint64, 0, len(m.items))
+func (m *txSortedMap) reheap(withRlock bool) {
+ index := make(nonceHeap, 0, len(m.items))
+
+ if withRlock {
+ m.m.RLock()
+ log.Info("[DEBUG] Acquired lock over txpool map while performing reheap")
+ }
+
for nonce := range m.items {
- *m.index = append(*m.index, nonce)
+ index = append(index, nonce)
}
- heap.Init(m.index)
+
+ if withRlock {
+ m.m.RUnlock()
+ }
+
+ heap.Init(&index)
+
+ if withRlock {
+ m.m.Lock()
+ }
+
+ m.index = &index
+
+ if withRlock {
+ m.m.Unlock()
+ }
+
+ m.cacheMu.Lock()
m.cache = nil
+ m.isEmpty = true
+ m.cacheMu.Unlock()
+
+ resetCacheGauge.Inc(1)
}
// filter is identical to Filter, but **does not** regenerate the heap. This method
@@ -135,7 +205,12 @@ func (m *txSortedMap) filter(filter func(*types.Transaction) bool) types.Transac
}
}
if len(removed) > 0 {
+ m.cacheMu.Lock()
m.cache = nil
+ m.isEmpty = true
+ m.cacheMu.Unlock()
+
+ resetCacheGauge.Inc(1)
}
return removed
}
@@ -143,45 +218,66 @@ func (m *txSortedMap) filter(filter func(*types.Transaction) bool) types.Transac
// Cap places a hard limit on the number of items, returning all transactions
// exceeding that limit.
func (m *txSortedMap) Cap(threshold int) types.Transactions {
+ m.m.Lock()
+ defer m.m.Unlock()
+
// Short circuit if the number of items is under the limit
if len(m.items) <= threshold {
return nil
}
+
// Otherwise gather and drop the highest nonce'd transactions
var drops types.Transactions
sort.Sort(*m.index)
+
for size := len(m.items); size > threshold; size-- {
drops = append(drops, m.items[(*m.index)[size-1]])
delete(m.items, (*m.index)[size-1])
}
+
*m.index = (*m.index)[:threshold]
heap.Init(m.index)
// If we had a cache, shift the back
+ m.cacheMu.Lock()
if m.cache != nil {
m.cache = m.cache[:len(m.cache)-len(drops)]
}
+ m.cacheMu.Unlock()
+
return drops
}
// Remove deletes a transaction from the maintained map, returning whether the
// transaction was found.
func (m *txSortedMap) Remove(nonce uint64) bool {
+ m.m.Lock()
+ defer m.m.Unlock()
+
// Short circuit if no transaction is present
_, ok := m.items[nonce]
if !ok {
return false
}
+
// Otherwise delete the transaction and fix the heap index
for i := 0; i < m.index.Len(); i++ {
if (*m.index)[i] == nonce {
heap.Remove(m.index, i)
+
break
}
}
+
delete(m.items, nonce)
+
+ m.cacheMu.Lock()
m.cache = nil
+ m.isEmpty = true
+ m.cacheMu.Unlock()
+
+ resetCacheGauge.Inc(1)
return true
}
@@ -194,55 +290,129 @@ func (m *txSortedMap) Remove(nonce uint64) bool {
// prevent getting into and invalid state. This is not something that should ever
// happen but better to be self correcting than failing!
func (m *txSortedMap) Ready(start uint64) types.Transactions {
+ m.m.Lock()
+ defer m.m.Unlock()
+
// Short circuit if no transactions are available
if m.index.Len() == 0 || (*m.index)[0] > start {
return nil
}
+
// Otherwise start accumulating incremental transactions
var ready types.Transactions
+
for next := (*m.index)[0]; m.index.Len() > 0 && (*m.index)[0] == next; next++ {
ready = append(ready, m.items[next])
delete(m.items, next)
heap.Pop(m.index)
}
+
+ m.cacheMu.Lock()
m.cache = nil
+ m.isEmpty = true
+ m.cacheMu.Unlock()
+
+ resetCacheGauge.Inc(1)
return ready
}
// Len returns the length of the transaction map.
func (m *txSortedMap) Len() int {
+ m.m.RLock()
+ defer m.m.RUnlock()
+
return len(m.items)
}
func (m *txSortedMap) flatten() types.Transactions {
// If the sorting was not cached yet, create and cache it
- if m.cache == nil {
- m.cache = make(types.Transactions, 0, len(m.items))
+ m.cacheMu.Lock()
+ defer m.cacheMu.Unlock()
+
+ if m.isEmpty {
+ m.isEmpty = false // to simulate sync.Once
+
+ m.cacheMu.Unlock()
+
+ m.m.RLock()
+
+ cache := make(types.Transactions, 0, len(m.items))
+
for _, tx := range m.items {
- m.cache = append(m.cache, tx)
+ cache = append(cache, tx)
}
- sort.Sort(types.TxByNonce(m.cache))
+
+ m.m.RUnlock()
+
+ // exclude sorting from locks
+ sort.Sort(types.TxByNonce(cache))
+
+ m.cacheMu.Lock()
+ m.cache = cache
+
+ reinitCacheGauge.Inc(1)
+ missCacheCounter.Inc(1)
+ } else {
+ hitCacheCounter.Inc(1)
}
+
return m.cache
}
+func (m *txSortedMap) lastElement() *types.Transaction {
+ // If the sorting was not cached yet, create and cache it
+ m.cacheMu.Lock()
+ defer m.cacheMu.Unlock()
+
+ cache := m.cache
+
+ if m.isEmpty {
+ m.isEmpty = false // to simulate sync.Once
+
+ m.cacheMu.Unlock()
+
+ m.m.RLock()
+ cache = make(types.Transactions, 0, len(m.items))
+
+ for _, tx := range m.items {
+ cache = append(cache, tx)
+ }
+
+ m.m.RUnlock()
+
+ // exclude sorting from locks
+ sort.Sort(types.TxByNonce(cache))
+
+ m.cacheMu.Lock()
+ m.cache = cache
+
+ reinitCacheGauge.Inc(1)
+ missCacheCounter.Inc(1)
+ } else {
+ hitCacheCounter.Inc(1)
+ }
+
+ ln := len(cache)
+ if ln == 0 {
+ return nil
+ }
+
+ return cache[len(cache)-1]
+}
+
// Flatten creates a nonce-sorted slice of transactions based on the loosely
// sorted internal representation. The result of the sorting is cached in case
// it's requested again before any modifications are made to the contents.
func (m *txSortedMap) Flatten() types.Transactions {
// Copy the cache to prevent accidental modifications
- cache := m.flatten()
- txs := make(types.Transactions, len(cache))
- copy(txs, cache)
- return txs
+ return m.flatten()
}
// LastElement returns the last element of a flattened list, thus, the
// transaction with the highest nonce
func (m *txSortedMap) LastElement() *types.Transaction {
- cache := m.flatten()
- return cache[len(cache)-1]
+ return m.lastElement()
}
// txList is a "list" of transactions belonging to an account, sorted by account
@@ -253,17 +423,18 @@ type txList struct {
strict bool // Whether nonces are strictly continuous or not
txs *txSortedMap // Heap indexed sorted hash map of the transactions
- costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance)
- gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit)
+ costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance)
+ gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit)
+ totalcost *big.Int // Total cost of all transactions in the list
}
// newTxList create a new transaction list for maintaining nonce-indexable fast,
// gapped, sortable transaction lists.
func newTxList(strict bool) *txList {
return &txList{
- strict: strict,
- txs: newTxSortedMap(),
- costcap: new(big.Int),
+ strict: strict,
+ txs: newTxSortedMap(),
+ totalcost: new(big.Int),
}
}
@@ -285,31 +456,41 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran
if old.GasFeeCapCmp(tx) >= 0 || old.GasTipCapCmp(tx) >= 0 {
return false, nil
}
+
// thresholdFeeCap = oldFC * (100 + priceBump) / 100
- a := big.NewInt(100 + int64(priceBump))
- aFeeCap := new(big.Int).Mul(a, old.GasFeeCap())
- aTip := a.Mul(a, old.GasTipCap())
+ a := uint256.NewInt(100 + priceBump)
+ aFeeCap := uint256.NewInt(0).Mul(a, old.GasFeeCapUint())
+ aTip := a.Mul(a, old.GasTipCapUint())
// thresholdTip = oldTip * (100 + priceBump) / 100
- b := big.NewInt(100)
+ b := cmath.U100
thresholdFeeCap := aFeeCap.Div(aFeeCap, b)
thresholdTip := aTip.Div(aTip, b)
// We have to ensure that both the new fee cap and tip are higher than the
// old ones as well as checking the percentage threshold to ensure that
// this is accurate for low (Wei-level) gas price replacements.
- if tx.GasFeeCapIntCmp(thresholdFeeCap) < 0 || tx.GasTipCapIntCmp(thresholdTip) < 0 {
+ if tx.GasFeeCapUIntLt(thresholdFeeCap) || tx.GasTipCapUIntLt(thresholdTip) {
return false, nil
}
+ // Old is being replaced, subtract old cost
+ l.subTotalCost([]*types.Transaction{old})
}
+
+ // Add new tx cost to totalcost
+ l.totalcost.Add(l.totalcost, tx.Cost())
+
// Otherwise overwrite the old transaction with the current one
l.txs.Put(tx)
- if cost := tx.Cost(); l.costcap.Cmp(cost) < 0 {
+
+ if cost := tx.CostUint(); l.costcap == nil || l.costcap.Lt(cost) {
l.costcap = cost
}
+
if gas := tx.Gas(); l.gascap < gas {
l.gascap = gas
}
+
return true, old
}
@@ -317,7 +498,10 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran
// provided threshold. Every removed transaction is returned for any post-removal
// maintenance.
func (l *txList) Forward(threshold uint64) types.Transactions {
- return l.txs.Forward(threshold)
+ txs := l.txs.Forward(threshold)
+ l.subTotalCost(txs)
+
+ return txs
}
// Filter removes all transactions from the list with a cost or gas limit higher
@@ -329,17 +513,20 @@ func (l *txList) Forward(threshold uint64) types.Transactions {
// a point in calculating all the costs or if the balance covers all. If the threshold
// is lower than the costgas cap, the caps will be reset to a new high after removing
// the newly invalidated transactions.
-func (l *txList) Filter(costLimit *big.Int, gasLimit uint64) (types.Transactions, types.Transactions) {
+func (l *txList) Filter(costLimit *uint256.Int, gasLimit uint64) (types.Transactions, types.Transactions) {
// If all transactions are below the threshold, short circuit
- if l.costcap.Cmp(costLimit) <= 0 && l.gascap <= gasLimit {
+ if cmath.U256LTE(l.costcap, costLimit) && l.gascap <= gasLimit {
return nil, nil
}
- l.costcap = new(big.Int).Set(costLimit) // Lower the caps to the thresholds
+
+ l.costcap = costLimit.Clone() // Lower the caps to the thresholds
l.gascap = gasLimit
// Filter out all the transactions above the account's funds
+ cost := uint256.NewInt(0)
removed := l.txs.Filter(func(tx *types.Transaction) bool {
- return tx.Gas() > gasLimit || tx.Cost().Cmp(costLimit) > 0
+ cost.SetFromBig(tx.Cost())
+ return tx.Gas() > gasLimit || cost.Gt(costLimit)
})
if len(removed) == 0 {
@@ -354,16 +541,27 @@ func (l *txList) Filter(costLimit *big.Int, gasLimit uint64) (types.Transactions
lowest = nonce
}
}
+
+ l.txs.m.Lock()
invalids = l.txs.filter(func(tx *types.Transaction) bool { return tx.Nonce() > lowest })
+ l.txs.m.Unlock()
}
- l.txs.reheap()
+ // Reset total cost
+ l.subTotalCost(removed)
+ l.subTotalCost(invalids)
+
+ l.txs.reheap(true)
+
return removed, invalids
}
// Cap places a hard limit on the number of items, returning all transactions
// exceeding that limit.
func (l *txList) Cap(threshold int) types.Transactions {
- return l.txs.Cap(threshold)
+ txs := l.txs.Cap(threshold)
+ l.subTotalCost(txs)
+
+ return txs
}
// Remove deletes a transaction from the maintained list, returning whether the
@@ -375,9 +573,14 @@ func (l *txList) Remove(tx *types.Transaction) (bool, types.Transactions) {
if removed := l.txs.Remove(nonce); !removed {
return false, nil
}
+
+ l.subTotalCost([]*types.Transaction{tx})
// In strict mode, filter out non-executable transactions
if l.strict {
- return true, l.txs.Filter(func(tx *types.Transaction) bool { return tx.Nonce() > nonce })
+ txs := l.txs.Filter(func(tx *types.Transaction) bool { return tx.Nonce() > nonce })
+ l.subTotalCost(txs)
+
+ return true, txs
}
return true, nil
}
@@ -390,7 +593,10 @@ func (l *txList) Remove(tx *types.Transaction) (bool, types.Transactions) {
// prevent getting into and invalid state. This is not something that should ever
// happen but better to be self correcting than failing!
func (l *txList) Ready(start uint64) types.Transactions {
- return l.txs.Ready(start)
+ txs := l.txs.Ready(start)
+ l.subTotalCost(txs)
+
+ return txs
}
// Len returns the length of the transaction list.
@@ -416,13 +622,26 @@ func (l *txList) LastElement() *types.Transaction {
return l.txs.LastElement()
}
+func (l *txList) Has(nonce uint64) bool {
+ return l != nil && l.txs.items[nonce] != nil
+}
+
+// subTotalCost subtracts the cost of the given transactions from the
+// total cost of all transactions.
+func (l *txList) subTotalCost(txs []*types.Transaction) {
+ for _, tx := range txs {
+ l.totalcost.Sub(l.totalcost, tx.Cost())
+ }
+}
+
// priceHeap is a heap.Interface implementation over transactions for retrieving
// price-sorted transactions to discard when the pool fills up. If baseFee is set
// then the heap is sorted based on the effective tip based on the given base fee.
// If baseFee is nil then the sorting is based on gasFeeCap.
type priceHeap struct {
- baseFee *big.Int // heap should always be re-sorted after baseFee is changed
- list []*types.Transaction
+ baseFee *uint256.Int // heap should always be re-sorted after baseFee is changed
+ list []*types.Transaction
+ baseFeeMu sync.RWMutex
}
func (h *priceHeap) Len() int { return len(h.list) }
@@ -440,16 +659,24 @@ func (h *priceHeap) Less(i, j int) bool {
}
func (h *priceHeap) cmp(a, b *types.Transaction) int {
+ h.baseFeeMu.RLock()
+
if h.baseFee != nil {
// Compare effective tips if baseFee is specified
- if c := a.EffectiveGasTipCmp(b, h.baseFee); c != 0 {
+ if c := a.EffectiveGasTipTxUintCmp(b, h.baseFee); c != 0 {
+ h.baseFeeMu.RUnlock()
+
return c
}
}
+
+ h.baseFeeMu.RUnlock()
+
// Compare fee caps if baseFee is not specified or effective tips are equal
if c := a.GasFeeCapCmp(b); c != 0 {
return c
}
+
// Compare tips if effective tips and fee caps are equal
return a.GasTipCapCmp(b)
}
@@ -629,7 +856,10 @@ func (l *txPricedList) Reheap() {
// SetBaseFee updates the base fee and triggers a re-heap. Note that Removed is not
// necessary to call right before SetBaseFee when processing a new block.
-func (l *txPricedList) SetBaseFee(baseFee *big.Int) {
+func (l *txPricedList) SetBaseFee(baseFee *uint256.Int) {
+ l.urgent.baseFeeMu.Lock()
l.urgent.baseFee = baseFee
+ l.urgent.baseFeeMu.Unlock()
+
l.Reheap()
}
diff --git a/core/tx_list_test.go b/core/tx_list_test.go
index ef49cae1dd..80b8c1ef32 100644
--- a/core/tx_list_test.go
+++ b/core/tx_list_test.go
@@ -17,10 +17,11 @@
package core
import (
- "math/big"
"math/rand"
"testing"
+ "github.com/holiman/uint256"
+
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
)
@@ -59,11 +60,15 @@ func BenchmarkTxListAdd(b *testing.B) {
for i := 0; i < len(txs); i++ {
txs[i] = transaction(uint64(i), 0, key)
}
+
// Insert the transactions in a random order
- priceLimit := big.NewInt(int64(DefaultTxPoolConfig.PriceLimit))
+ priceLimit := uint256.NewInt(DefaultTxPoolConfig.PriceLimit)
b.ResetTimer()
+ b.ReportAllocs()
+
for i := 0; i < b.N; i++ {
list := newTxList(true)
+
for _, v := range rand.Perm(len(txs)) {
list.Add(txs[v], DefaultTxPoolConfig.PriceBump)
list.Filter(priceLimit, DefaultTxPoolConfig.PriceBump)
diff --git a/core/tx_pool.go b/core/tx_pool.go
index 3d3f01eecb..ce73aa26ac 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -17,6 +17,8 @@
package core
import (
+ "container/heap"
+ "context"
"errors"
"fmt"
"math"
@@ -26,8 +28,12 @@ import (
"sync/atomic"
"time"
+ "github.com/holiman/uint256"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/prque"
+ "github.com/ethereum/go-ethereum/common/tracing"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@@ -86,6 +92,14 @@ var (
// than some meaningful limit a user might use. This is not a consensus error
// making the transaction invalid, rather a DOS protection.
ErrOversizedData = errors.New("oversized data")
+
+ // ErrFutureReplacePending is returned if a future transaction replaces a pending
+ // transaction. Future transactions should only be able to replace other future transactions.
+ ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
+
+ // ErrOverdraft is returned if a transaction would cause the senders balance to go negative
+ // thus invalidating a potential large number of transactions.
+ ErrOverdraft = errors.New("transaction would cause overdraft")
)
var (
@@ -127,6 +141,11 @@ var (
localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil)
+ resetCacheGauge = metrics.NewRegisteredGauge("txpool/resetcache", nil)
+ reinitCacheGauge = metrics.NewRegisteredGauge("txpool/reinittcache", nil)
+ hitCacheCounter = metrics.NewRegisteredCounter("txpool/cachehit", nil)
+ missCacheCounter = metrics.NewRegisteredCounter("txpool/cachemiss", nil)
+
reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
)
@@ -232,14 +251,17 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig {
// current state) and future transactions. Transactions move between those
// two states over time as they are received and processed.
type TxPool struct {
- config TxPoolConfig
- chainconfig *params.ChainConfig
- chain blockChain
- gasPrice *big.Int
- txFeed event.Feed
- scope event.SubscriptionScope
- signer types.Signer
- mu sync.RWMutex
+ config TxPoolConfig
+ chainconfig *params.ChainConfig
+ chain blockChain
+ gasPrice *big.Int
+ gasPriceUint *uint256.Int
+ gasPriceMu sync.RWMutex
+
+ txFeed event.Feed
+ scope event.SubscriptionScope
+ signer types.Signer
+ mu sync.RWMutex
istanbul bool // Fork indicator whether we are in the istanbul stage.
eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions.
@@ -252,11 +274,13 @@ type TxPool struct {
locals *accountSet // Set of local transaction to exempt from eviction rules
journal *txJournal // Journal of local transaction to back up to disk
- pending map[common.Address]*txList // All currently processable transactions
- queue map[common.Address]*txList // Queued but non-processable transactions
- beats map[common.Address]time.Time // Last heartbeat from each known account
- all *txLookup // All transactions to allow lookups
- priced *txPricedList // All transactions sorted by price
+ pending map[common.Address]*txList // All currently processable transactions
+ pendingCount int
+ pendingMu sync.RWMutex
+ queue map[common.Address]*txList // Queued but non-processable transactions
+ beats map[common.Address]time.Time // Last heartbeat from each known account
+ all *txLookup // All transactions to allow lookups
+ priced *txPricedList // All transactions sorted by price
chainHeadCh chan ChainHeadEvent
chainHeadSub event.Subscription
@@ -301,6 +325,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block
reorgShutdownCh: make(chan struct{}),
initDoneCh: make(chan struct{}),
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
+ gasPriceUint: uint256.NewInt(config.PriceLimit),
}
pool.locals = newAccountSet(pool.signer)
@@ -377,9 +402,7 @@ func (pool *TxPool) loop() {
// Handle stats reporting ticks
case <-report.C:
- pool.mu.RLock()
pending, queued := pool.stats()
- pool.mu.RUnlock()
stales := int(atomic.LoadInt64(&pool.priced.stales))
if pending != prevPending || queued != prevQueued || stales != prevStales {
@@ -389,22 +412,45 @@ func (pool *TxPool) loop() {
// Handle inactive account transaction eviction
case <-evict.C:
- pool.mu.Lock()
+ now := time.Now()
+
+ var (
+ list types.Transactions
+ tx *types.Transaction
+ toRemove []common.Hash
+ )
+
+ pool.mu.RLock()
for addr := range pool.queue {
// Skip local transactions from the eviction mechanism
if pool.locals.contains(addr) {
continue
}
+
// Any non-locals old enough should be removed
- if time.Since(pool.beats[addr]) > pool.config.Lifetime {
- list := pool.queue[addr].Flatten()
- for _, tx := range list {
- pool.removeTx(tx.Hash(), true)
+ if now.Sub(pool.beats[addr]) > pool.config.Lifetime {
+ list = pool.queue[addr].Flatten()
+ for _, tx = range list {
+ toRemove = append(toRemove, tx.Hash())
}
+
queuedEvictionMeter.Mark(int64(len(list)))
}
}
- pool.mu.Unlock()
+
+ pool.mu.RUnlock()
+
+ if len(toRemove) > 0 {
+ pool.mu.Lock()
+
+ var hash common.Hash
+
+ for _, hash = range toRemove {
+ pool.removeTx(hash, true)
+ }
+
+ pool.mu.Unlock()
+ }
// Handle local transaction journal rotation
case <-journal.C:
@@ -442,27 +488,45 @@ func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscripti
// GasPrice returns the current gas price enforced by the transaction pool.
func (pool *TxPool) GasPrice() *big.Int {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
+ pool.gasPriceMu.RLock()
+ defer pool.gasPriceMu.RUnlock()
return new(big.Int).Set(pool.gasPrice)
}
+func (pool *TxPool) GasPriceUint256() *uint256.Int {
+ pool.gasPriceMu.RLock()
+ defer pool.gasPriceMu.RUnlock()
+
+ return pool.gasPriceUint.Clone()
+}
+
// SetGasPrice updates the minimum price required by the transaction pool for a
// new transaction, and drops all transactions below this threshold.
func (pool *TxPool) SetGasPrice(price *big.Int) {
- pool.mu.Lock()
- defer pool.mu.Unlock()
+ pool.gasPriceMu.Lock()
+ defer pool.gasPriceMu.Unlock()
old := pool.gasPrice
pool.gasPrice = price
+
+ if pool.gasPriceUint == nil {
+ pool.gasPriceUint, _ = uint256.FromBig(price)
+ } else {
+ pool.gasPriceUint.SetFromBig(price)
+ }
+
// if the min miner fee increased, remove transactions below the new threshold
if price.Cmp(old) > 0 {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
drop := pool.all.RemotesBelowTip(price)
for _, tx := range drop {
pool.removeTx(tx.Hash(), false)
}
+
pool.priced.Removed(len(drop))
}
@@ -481,9 +545,6 @@ func (pool *TxPool) Nonce(addr common.Address) uint64 {
// Stats retrieves the current pool stats, namely the number of pending and the
// number of queued (non-executable) transactions.
func (pool *TxPool) Stats() (int, int) {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
return pool.stats()
}
@@ -491,47 +552,69 @@ func (pool *TxPool) Stats() (int, int) {
// number of queued (non-executable) transactions.
func (pool *TxPool) stats() (int, int) {
pending := 0
+
+ pool.pendingMu.RLock()
for _, list := range pool.pending {
pending += list.Len()
}
+ pool.pendingMu.RUnlock()
+
+ pool.mu.RLock()
+
queued := 0
for _, list := range pool.queue {
queued += list.Len()
}
+
+ pool.mu.RUnlock()
+
return pending, queued
}
// Content retrieves the data content of the transaction pool, returning all the
// pending as well as queued transactions, grouped by account and sorted by nonce.
func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
pending := make(map[common.Address]types.Transactions)
+
+ pool.pendingMu.RLock()
for addr, list := range pool.pending {
pending[addr] = list.Flatten()
}
+ pool.pendingMu.RUnlock()
+
queued := make(map[common.Address]types.Transactions)
+
+ pool.mu.RLock()
+
for addr, list := range pool.queue {
queued[addr] = list.Flatten()
}
+
+ pool.mu.RUnlock()
+
return pending, queued
}
// ContentFrom retrieves the data content of the transaction pool, returning the
// pending as well as queued transactions of this address, grouped by nonce.
func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
- pool.mu.RLock()
- defer pool.mu.RUnlock()
-
var pending types.Transactions
+
+ pool.pendingMu.RLock()
if list, ok := pool.pending[addr]; ok {
pending = list.Flatten()
}
+ pool.pendingMu.RUnlock()
+
+ pool.mu.RLock()
+
var queued types.Transactions
if list, ok := pool.queue[addr]; ok {
queued = list.Flatten()
}
+
+ pool.mu.RUnlock()
+
return pending, queued
}
@@ -542,35 +625,74 @@ func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.
// The enforceTips parameter can be used to do an extra filtering on the pending
// transactions and only return those whose **effective** tip is large enough in
// the next pending execution environment.
-func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions {
- pool.mu.Lock()
- defer pool.mu.Unlock()
+//
+//nolint:gocognit
+func (pool *TxPool) Pending(ctx context.Context, enforceTips bool) map[common.Address]types.Transactions {
+ pending := make(map[common.Address]types.Transactions, 10)
- pending := make(map[common.Address]types.Transactions)
- for addr, list := range pool.pending {
- txs := list.Flatten()
+ tracing.Exec(ctx, "TxpoolPending", "txpool.Pending()", func(ctx context.Context, span trace.Span) {
+ tracing.ElapsedTime(ctx, span, "txpool.Pending.RLock()", func(ctx context.Context, s trace.Span) {
+ pool.pendingMu.RLock()
+ })
- // If the miner requests tip enforcement, cap the lists now
- if enforceTips && !pool.locals.contains(addr) {
- for i, tx := range txs {
- if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 {
- txs = txs[:i]
- break
+ defer pool.pendingMu.RUnlock()
+
+ pendingAccounts := len(pool.pending)
+
+ var pendingTxs int
+
+ tracing.ElapsedTime(ctx, span, "Loop", func(ctx context.Context, s trace.Span) {
+ gasPriceUint := uint256.NewInt(0)
+ baseFee := uint256.NewInt(0)
+
+ for addr, list := range pool.pending {
+ txs := list.Flatten()
+
+ // If the miner requests tip enforcement, cap the lists now
+ if enforceTips && !pool.locals.contains(addr) {
+ for i, tx := range txs {
+ pool.pendingMu.RUnlock()
+
+ pool.gasPriceMu.RLock()
+ if pool.gasPriceUint != nil {
+ gasPriceUint.Set(pool.gasPriceUint)
+ }
+
+ pool.priced.urgent.baseFeeMu.Lock()
+ if pool.priced.urgent.baseFee != nil {
+ baseFee.Set(pool.priced.urgent.baseFee)
+ }
+ pool.priced.urgent.baseFeeMu.Unlock()
+
+ pool.gasPriceMu.RUnlock()
+
+ pool.pendingMu.RLock()
+
+ if tx.EffectiveGasTipUintLt(gasPriceUint, baseFee) {
+ txs = txs[:i]
+ break
+ }
+ }
+ }
+
+ if len(txs) > 0 {
+ pending[addr] = txs
+ pendingTxs += len(txs)
}
}
- }
- if len(txs) > 0 {
- pending[addr] = txs
- }
- }
+
+ tracing.SetAttributes(span,
+ attribute.Int("pending-transactions", pendingTxs),
+ attribute.Int("pending-accounts", pendingAccounts),
+ )
+ })
+ })
+
return pending
}
// Locals retrieves the accounts currently considered local by the pool.
func (pool *TxPool) Locals() []common.Address {
- pool.mu.Lock()
- defer pool.mu.Unlock()
-
return pool.locals.flatten()
}
@@ -579,14 +701,22 @@ func (pool *TxPool) Locals() []common.Address {
// freely modified by calling code.
func (pool *TxPool) local() map[common.Address]types.Transactions {
txs := make(map[common.Address]types.Transactions)
+
+ pool.locals.m.RLock()
+ defer pool.locals.m.RUnlock()
+
for addr := range pool.locals.accounts {
+ pool.pendingMu.RLock()
if pending := pool.pending[addr]; pending != nil {
txs[addr] = append(txs[addr], pending.Flatten()...)
}
+ pool.pendingMu.RUnlock()
+
if queued := pool.queue[addr]; queued != nil {
txs[addr] = append(txs[addr], queued.Flatten()...)
}
}
+
return txs
}
@@ -597,10 +727,12 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
if !pool.eip2718 && tx.Type() != types.LegacyTxType {
return ErrTxTypeNotSupported
}
+
// Reject dynamic fee transactions until EIP-1559 activates.
if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType {
return ErrTxTypeNotSupported
}
+
// Reject transactions over defined size to prevent DOS attacks
if uint64(tx.Size()) > txMaxSize {
return ErrOversizedData
@@ -615,47 +747,82 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
if tx.Value().Sign() < 0 {
return ErrNegativeValue
}
+
// Ensure the transaction doesn't exceed the current block limit gas.
if pool.currentMaxGas < tx.Gas() {
return ErrGasLimit
}
+
// Sanity check for extremely large numbers
- if tx.GasFeeCap().BitLen() > 256 {
+ gasFeeCap := tx.GasFeeCapRef()
+ if gasFeeCap.BitLen() > 256 {
return ErrFeeCapVeryHigh
}
- if tx.GasTipCap().BitLen() > 256 {
+
+ // do NOT use uint256 here. results vs *big.Int are different
+ gasTipCap := tx.GasTipCapRef()
+ if gasTipCap.BitLen() > 256 {
return ErrTipVeryHigh
}
+
// Ensure gasFeeCap is greater than or equal to gasTipCap.
- if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 {
+ gasTipCapU, _ := uint256.FromBig(gasTipCap)
+ if tx.GasFeeCapUIntLt(gasTipCapU) {
return ErrTipAboveFeeCap
}
+
// Make sure the transaction is signed properly.
from, err := types.Sender(pool.signer, tx)
if err != nil {
return ErrInvalidSender
}
+
// Drop non-local transactions under our own minimal accepted gas price or tip
- if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 {
+ pool.gasPriceMu.RLock()
+
+ if !local && tx.GasTipCapUIntLt(pool.gasPriceUint) {
+ pool.gasPriceMu.RUnlock()
+
return ErrUnderpriced
}
+
+ pool.gasPriceMu.RUnlock()
+
// Ensure the transaction adheres to nonce ordering
if pool.currentState.GetNonce(from) > tx.Nonce() {
return ErrNonceTooLow
}
+
// Transactor should have enough funds to cover the costs
// cost == V + GP * GL
- if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 {
+ balance := pool.currentState.GetBalance(from)
+ if balance.Cmp(tx.Cost()) < 0 {
return ErrInsufficientFunds
}
+ // Verify that replacing transactions will not result in overdraft
+ list := pool.pending[from]
+ if list != nil { // Sender already has pending txs
+ sum := new(big.Int).Add(tx.Cost(), list.totalcost)
+ if repl := list.txs.Get(tx.Nonce()); repl != nil {
+ // Deduct the cost of a transaction replaced by this
+ sum.Sub(sum, repl.Cost())
+ }
+
+ if balance.Cmp(sum) < 0 {
+ log.Trace("Replacing transactions would overdraft", "sender", from, "balance", pool.currentState.GetBalance(from), "required", sum)
+ return ErrOverdraft
+ }
+ }
// Ensure the transaction has more gas than the basic tx fee.
intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul)
if err != nil {
return err
}
+
if tx.Gas() < intrGas {
return ErrIntrinsicGas
}
+
return nil
}
@@ -684,14 +851,19 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
invalidTxMeter.Mark(1)
return false, err
}
+
+ // already validated by this point
+ from, _ := types.Sender(pool.signer, tx)
+
// If the transaction pool is full, discard underpriced transactions
if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
// If the new transaction is underpriced, don't accept it
if !isLocal && pool.priced.Underpriced(tx) {
- log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
+ log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCapUint(), "gasFeeCap", tx.GasFeeCapUint())
underpricedTxMeter.Mark(1)
return false, ErrUnderpriced
}
+
// We're about to replace a transaction. The reorg does a more thorough
// analysis of what to remove and how, but it runs async. We don't want to
// do too many replacements between reorg-runs, so we cap the number of
@@ -712,30 +884,61 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
overflowedTxMeter.Mark(1)
return false, ErrTxPoolOverflow
}
- // Bump the counter of rejections-since-reorg
- pool.changesSinceReorg += len(drop)
+ // If the new transaction is a future transaction it should never churn pending transactions
+ if pool.isFuture(from, tx) {
+ var replacesPending bool
+
+ for _, dropTx := range drop {
+ dropSender, _ := types.Sender(pool.signer, dropTx)
+ if list := pool.pending[dropSender]; list != nil && list.Overlaps(dropTx) {
+ replacesPending = true
+ break
+ }
+ }
+ // Add all transactions back to the priced queue
+ if replacesPending {
+ for _, dropTx := range drop {
+ heap.Push(&pool.priced.urgent, dropTx)
+ }
+
+ log.Trace("Discarding future transaction replacing pending tx", "hash", hash)
+
+ return false, ErrFutureReplacePending
+ }
+ }
// Kick out the underpriced remote transactions.
for _, tx := range drop {
- log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
+ log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCapUint(), "gasFeeCap", tx.GasFeeCapUint())
underpricedTxMeter.Mark(1)
- pool.removeTx(tx.Hash(), false)
+
+ dropped := pool.removeTx(tx.Hash(), false)
+ pool.changesSinceReorg += dropped
}
}
+
// Try to replace an existing transaction in the pending pool
- from, _ := types.Sender(pool.signer, tx) // already validated
- if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
+ pool.pendingMu.RLock()
+
+ list := pool.pending[from]
+
+ if list != nil && list.Overlaps(tx) {
// Nonce already pending, check if required price bump is met
inserted, old := list.Add(tx, pool.config.PriceBump)
+ pool.pendingCount++
+ pool.pendingMu.RUnlock()
+
if !inserted {
pendingDiscardMeter.Mark(1)
return false, ErrReplaceUnderpriced
}
+
// New transaction is better, replace old one
if old != nil {
pool.all.Remove(old.Hash())
pool.priced.Removed(1)
pendingReplaceMeter.Mark(1)
}
+
pool.all.Add(tx, isLocal)
pool.priced.Put(tx, isLocal)
pool.journalTx(from, tx)
@@ -744,8 +947,13 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
// Successful promotion, bump the heartbeat
pool.beats[from] = time.Now()
+
return old != nil, nil
}
+
+ // it is not an unlocking of unlocked because of the return in previous 'if'
+ pool.pendingMu.RUnlock()
+
// New transaction isn't replacing a pending one, push into queue
replaced, err = pool.enqueueTx(hash, tx, isLocal, true)
if err != nil {
@@ -766,6 +974,20 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
return replaced, nil
}
+// isFuture reports whether the given transaction is immediately executable.
+func (pool *TxPool) isFuture(from common.Address, tx *types.Transaction) bool {
+ list := pool.pending[from]
+ if list == nil {
+ return pool.pendingNonces.get(from) != tx.Nonce()
+ }
+ // Sender has pending transactions.
+ if old := list.txs.Get(tx.Nonce()); old != nil {
+ return false // It replaces a pending transaction.
+ }
+ // Not replacing, check if parent nonce exists in pending.
+ return list.txs.Get(tx.Nonce()-1) == nil
+}
+
// enqueueTx inserts a new transaction into the non-executable transaction queue.
//
// Note, this method assumes the pool lock is held!
@@ -835,19 +1057,25 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
}()
// Try to insert the transaction into the pending queue
+ pool.pendingMu.Lock()
if pool.pending[addr] == nil {
pool.pending[addr] = newTxList(true)
}
list := pool.pending[addr]
inserted, old := list.Add(tx, pool.config.PriceBump)
+ pool.pendingCount++
+ pool.pendingMu.Unlock()
+
if !inserted {
// An older transaction was better, discard this
pool.all.Remove(hash)
pool.priced.Removed(1)
pendingDiscardMeter.Mark(1)
+
return false
}
+
// Otherwise discard any previous transaction and mark this
if old != nil {
pool.all.Remove(old.Hash())
@@ -857,11 +1085,13 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
// Nothing was replaced, bump the pending counter
pendingGauge.Inc(1)
}
+
// Set the potentially new pending nonce and notify any subsystems of the new tx
pool.pendingNonces.set(addr, tx.Nonce()+1)
// Successful promotion, bump the heartbeat
pool.beats[addr] = time.Now()
+
return true
}
@@ -877,8 +1107,7 @@ func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
// AddLocal enqueues a single local transaction into the pool if it is valid. This is
// a convenience wrapper aroundd AddLocals.
func (pool *TxPool) AddLocal(tx *types.Transaction) error {
- errs := pool.AddLocals([]*types.Transaction{tx})
- return errs[0]
+ return pool.addTx(tx, !pool.config.NoLocals, true)
}
// AddRemotes enqueues a batch of transactions into the pool if they are valid. If the
@@ -895,108 +1124,216 @@ func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error {
return pool.addTxs(txs, false, true)
}
+func (pool *TxPool) AddRemoteSync(txs *types.Transaction) error {
+ return pool.addTx(txs, false, true)
+}
+
// This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
func (pool *TxPool) addRemoteSync(tx *types.Transaction) error {
- errs := pool.AddRemotesSync([]*types.Transaction{tx})
- return errs[0]
+ return pool.AddRemoteSync(tx)
}
// AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience
// wrapper around AddRemotes.
-//
-// Deprecated: use AddRemotes
func (pool *TxPool) AddRemote(tx *types.Transaction) error {
- errs := pool.AddRemotes([]*types.Transaction{tx})
- return errs[0]
+ return pool.addTx(tx, false, false)
}
// addTxs attempts to queue a batch of transactions if they are valid.
func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
// Filter out known ones without obtaining the pool lock or recovering signatures
var (
- errs = make([]error, len(txs))
+ errs []error
news = make([]*types.Transaction, 0, len(txs))
+ err error
+
+ hash common.Hash
)
- for i, tx := range txs {
+
+ for _, tx := range txs {
// If the transaction is known, pre-set the error slot
- if pool.all.Get(tx.Hash()) != nil {
- errs[i] = ErrAlreadyKnown
+ hash = tx.Hash()
+
+ if pool.all.Get(hash) != nil {
+ errs = append(errs, ErrAlreadyKnown)
knownTxMeter.Mark(1)
+
continue
}
+
// Exclude transactions with invalid signatures as soon as
// possible and cache senders in transactions before
// obtaining lock
- _, err := types.Sender(pool.signer, tx)
+ _, err = types.Sender(pool.signer, tx)
if err != nil {
- errs[i] = ErrInvalidSender
+ errs = append(errs, ErrInvalidSender)
invalidTxMeter.Mark(1)
+
continue
}
+
// Accumulate all unknown transactions for deeper processing
news = append(news, tx)
}
+
if len(news) == 0 {
return errs
}
// Process all the new transaction and merge any errors into the original slice
pool.mu.Lock()
- newErrs, dirtyAddrs := pool.addTxsLocked(news, local)
+ errs, dirtyAddrs := pool.addTxsLocked(news, local)
pool.mu.Unlock()
- var nilSlot = 0
- for _, err := range newErrs {
- for errs[nilSlot] != nil {
- nilSlot++
+ // Reorg the pool internals if needed and return
+ done := pool.requestPromoteExecutables(dirtyAddrs)
+ if sync {
+ <-done
+ }
+
+ return errs
+}
+
+// addTxs attempts to queue a batch of transactions if they are valid.
+func (pool *TxPool) addTx(tx *types.Transaction, local, sync bool) error {
+ // Filter out known ones without obtaining the pool lock or recovering signatures
+ var (
+ err error
+ hash common.Hash
+ )
+
+ func() {
+ // If the transaction is known, pre-set the error slot
+ hash = tx.Hash()
+
+ if pool.all.Get(hash) != nil {
+ err = ErrAlreadyKnown
+
+ knownTxMeter.Mark(1)
+
+ return
}
- errs[nilSlot] = err
- nilSlot++
+
+ // Exclude transactions with invalid signatures as soon as
+ // possible and cache senders in transactions before
+ // obtaining lock
+ _, err = types.Sender(pool.signer, tx)
+ if err != nil {
+ invalidTxMeter.Mark(1)
+
+ return
+ }
+ }()
+
+ if err != nil {
+ return err
}
+
+ var dirtyAddrs *accountSet
+
+ // Process all the new transaction and merge any errors into the original slice
+ pool.mu.Lock()
+ err, dirtyAddrs = pool.addTxLocked(tx, local)
+ pool.mu.Unlock()
+
// Reorg the pool internals if needed and return
done := pool.requestPromoteExecutables(dirtyAddrs)
if sync {
<-done
}
- return errs
+
+ return err
}
// addTxsLocked attempts to queue a batch of transactions if they are valid.
// The transaction pool lock must be held.
func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) {
dirty := newAccountSet(pool.signer)
- errs := make([]error, len(txs))
- for i, tx := range txs {
- replaced, err := pool.add(tx, local)
- errs[i] = err
+
+ var (
+ replaced bool
+ errs []error
+ )
+
+ for _, tx := range txs {
+ var err error
+
+ replaced, err = pool.add(tx, local)
if err == nil && !replaced {
dirty.addTx(tx)
}
+
+ if err != nil {
+ errs = append(errs, err)
+ }
}
+
validTxMeter.Mark(int64(len(dirty.accounts)))
+
return errs, dirty
}
+func (pool *TxPool) addTxLocked(tx *types.Transaction, local bool) (error, *accountSet) {
+ dirty := newAccountSet(pool.signer)
+
+ var (
+ replaced bool
+ err error
+ )
+
+ replaced, err = pool.add(tx, local)
+ if err == nil && !replaced {
+ dirty.addTx(tx)
+ }
+
+ validTxMeter.Mark(int64(len(dirty.accounts)))
+
+ return err, dirty
+}
+
// Status returns the status (unknown/pending/queued) of a batch of transactions
// identified by their hashes.
func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
status := make([]TxStatus, len(hashes))
+
+ var (
+ txList *txList
+ isPending bool
+ )
+
for i, hash := range hashes {
tx := pool.Get(hash)
if tx == nil {
continue
}
+
from, _ := types.Sender(pool.signer, tx) // already validated
- pool.mu.RLock()
- if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
+
+ pool.pendingMu.RLock()
+
+ if txList = pool.pending[from]; txList != nil && txList.txs.Has(tx.Nonce()) {
status[i] = TxStatusPending
- } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
- status[i] = TxStatusQueued
+ isPending = true
+ } else {
+ isPending = false
}
+
+ pool.pendingMu.RUnlock()
+
+ if !isPending {
+ pool.mu.RLock()
+
+ if txList := pool.queue[from]; txList != nil && txList.txs.Has(tx.Nonce()) {
+ status[i] = TxStatusQueued
+ }
+
+ pool.mu.RUnlock()
+ }
+
// implicit else: the tx may have been included into a block between
// checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct
- pool.mu.RUnlock()
}
+
return status
}
@@ -1013,12 +1350,14 @@ func (pool *TxPool) Has(hash common.Hash) bool {
// removeTx removes a single transaction from the queue, moving all subsequent
// transactions back to the future queue.
-func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
+// Returns the number of transactions removed from the pending queue.
+func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) int {
// Fetch the transaction we wish to delete
tx := pool.all.Get(hash)
if tx == nil {
- return
+ return 0
}
+
addr, _ := types.Sender(pool.signer, tx) // already validated during insertion
// Remove it from the list of known transactions
@@ -1026,39 +1365,59 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
if outofbound {
pool.priced.Removed(1)
}
+
if pool.locals.contains(addr) {
localGauge.Dec(1)
}
+
// Remove the transaction from the pending lists and reset the account nonce
+ pool.pendingMu.Lock()
+
if pending := pool.pending[addr]; pending != nil {
if removed, invalids := pending.Remove(tx); removed {
+ pool.pendingCount--
+
// If no more pending transactions are left, remove the list
if pending.Empty() {
delete(pool.pending, addr)
}
+
+ pool.pendingMu.Unlock()
+
// Postpone any invalidated transactions
for _, tx := range invalids {
// Internal shuffle shouldn't touch the lookup set.
pool.enqueueTx(tx.Hash(), tx, false, false)
}
+
// Update the account nonce if needed
pool.pendingNonces.setIfLower(addr, tx.Nonce())
+
// Reduce the pending counter
pendingGauge.Dec(int64(1 + len(invalids)))
- return
+
+ return 1 + len(invalids)
}
+
+ pool.pendingMu.TryLock()
}
+
+ pool.pendingMu.Unlock()
+
// Transaction is in the future queue
if future := pool.queue[addr]; future != nil {
if removed, _ := future.Remove(tx); removed {
// Reduce the queued counter
queuedGauge.Dec(1)
}
+
if future.Empty() {
delete(pool.queue, addr)
delete(pool.beats, addr)
}
}
+
+ return 0
}
// requestReset requests a pool reset to the new head block.
@@ -1109,8 +1468,10 @@ func (pool *TxPool) scheduleReorgLoop() {
for {
// Launch next background reorg if needed
if curDone == nil && launchNextRun {
+ ctx := context.Background()
+
// Run the background reorg and announcements
- go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents)
+ go pool.runReorg(ctx, nextDone, reset, dirtyAccounts, queuedEvents)
// Prepare everything for the next round of reorg
curDone, nextDone = nextDone, make(chan struct{})
@@ -1165,86 +1526,178 @@ func (pool *TxPool) scheduleReorgLoop() {
}
// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
-func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) {
- defer func(t0 time.Time) {
- reorgDurationTimer.Update(time.Since(t0))
- }(time.Now())
- defer close(done)
-
- var promoteAddrs []common.Address
- if dirtyAccounts != nil && reset == nil {
- // Only dirty accounts need to be promoted, unless we're resetting.
- // For resets, all addresses in the tx queue will be promoted and
- // the flatten operation can be avoided.
- promoteAddrs = dirtyAccounts.flatten()
- }
- pool.mu.Lock()
- if reset != nil {
- // Reset from the old head to the new, rescheduling any reorged transactions
- pool.reset(reset.oldHead, reset.newHead)
-
- // Nonces were reset, discard any events that became stale
- for addr := range events {
- events[addr].Forward(pool.pendingNonces.get(addr))
- if events[addr].Len() == 0 {
- delete(events, addr)
+//
+//nolint:gocognit
+func (pool *TxPool) runReorg(ctx context.Context, done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) {
+ tracing.Exec(ctx, "TxPoolReorg", "txpool-reorg", func(ctx context.Context, span trace.Span) {
+ defer func(t0 time.Time) {
+ reorgDurationTimer.Update(time.Since(t0))
+ }(time.Now())
+
+ defer close(done)
+
+ var promoteAddrs []common.Address
+
+ tracing.ElapsedTime(ctx, span, "01 dirty accounts flattening", func(_ context.Context, innerSpan trace.Span) {
+ if dirtyAccounts != nil && reset == nil {
+ // Only dirty accounts need to be promoted, unless we're resetting.
+ // For resets, all addresses in the tx queue will be promoted and
+ // the flatten operation can be avoided.
+ promoteAddrs = dirtyAccounts.flatten()
}
+
+ tracing.SetAttributes(
+ innerSpan,
+ attribute.Int("promoteAddresses-flatten", len(promoteAddrs)),
+ )
+ })
+
+ tracing.ElapsedTime(ctx, span, "02 obtaining pool.WMutex", func(_ context.Context, _ trace.Span) {
+ pool.mu.Lock()
+ })
+
+ if reset != nil {
+ tracing.ElapsedTime(ctx, span, "03 reset-head reorg", func(_ context.Context, innerSpan trace.Span) {
+
+ // Reset from the old head to the new, rescheduling any reorged transactions
+ tracing.ElapsedTime(ctx, innerSpan, "04 reset-head-itself reorg", func(_ context.Context, innerSpan trace.Span) {
+ pool.reset(reset.oldHead, reset.newHead)
+ })
+
+ tracing.SetAttributes(
+ innerSpan,
+ attribute.Int("events-reset-head", len(events)),
+ )
+
+ // Nonces were reset, discard any events that became stale
+ for addr := range events {
+ events[addr].Forward(pool.pendingNonces.get(addr))
+
+ if events[addr].Len() == 0 {
+ delete(events, addr)
+ }
+ }
+
+ // Reset needs promote for all addresses
+ promoteAddrs = make([]common.Address, 0, len(pool.queue))
+ for addr := range pool.queue {
+ promoteAddrs = append(promoteAddrs, addr)
+ }
+
+ tracing.SetAttributes(
+ innerSpan,
+ attribute.Int("promoteAddresses-reset-head", len(promoteAddrs)),
+ )
+ })
}
- // Reset needs promote for all addresses
- promoteAddrs = make([]common.Address, 0, len(pool.queue))
- for addr := range pool.queue {
- promoteAddrs = append(promoteAddrs, addr)
+
+ // Check for pending transactions for every account that sent new ones
+ var promoted []*types.Transaction
+
+ tracing.ElapsedTime(ctx, span, "05 promoteExecutables", func(_ context.Context, _ trace.Span) {
+ promoted = pool.promoteExecutables(promoteAddrs)
+ })
+
+ tracing.SetAttributes(
+ span,
+ attribute.Int("count.promoteAddresses-reset-head", len(promoteAddrs)),
+ attribute.Int("count.all", pool.all.Count()),
+ attribute.Int("count.pending", len(pool.pending)),
+ attribute.Int("count.queue", len(pool.queue)),
+ )
+
+ // If a new block appeared, validate the pool of pending transactions. This will
+ // remove any transaction that has been included in the block or was invalidated
+ // because of another transaction (e.g. higher gas price).
+
+ //nolint:nestif
+ if reset != nil {
+ tracing.ElapsedTime(ctx, span, "new block", func(_ context.Context, innerSpan trace.Span) {
+
+ tracing.ElapsedTime(ctx, innerSpan, "06 demoteUnexecutables", func(_ context.Context, _ trace.Span) {
+ pool.demoteUnexecutables()
+ })
+
+ var nonces map[common.Address]uint64
+
+ tracing.ElapsedTime(ctx, innerSpan, "07 set_base_fee", func(_ context.Context, _ trace.Span) {
+ if reset.newHead != nil {
+ if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) {
+ // london fork enabled, reset given the base fee
+ pendingBaseFee := misc.CalcBaseFeeUint(pool.chainconfig, reset.newHead)
+ pool.priced.SetBaseFee(pendingBaseFee)
+ } else {
+ // london fork not enabled, reheap to "reset" the priced list
+ pool.priced.Reheap()
+ }
+ }
+
+ // Update all accounts to the latest known pending nonce
+ nonces = make(map[common.Address]uint64, len(pool.pending))
+ })
+
+ tracing.ElapsedTime(ctx, innerSpan, "08 obtaining pendingMu.RMutex", func(_ context.Context, _ trace.Span) {
+ pool.pendingMu.RLock()
+ })
+
+ var highestPending *types.Transaction
+
+ tracing.ElapsedTime(ctx, innerSpan, "09 fill nonces", func(_ context.Context, innerSpan trace.Span) {
+ for addr, list := range pool.pending {
+ highestPending = list.LastElement()
+ if highestPending != nil {
+ nonces[addr] = highestPending.Nonce() + 1
+ }
+ }
+ })
+
+ pool.pendingMu.RUnlock()
+
+ tracing.ElapsedTime(ctx, innerSpan, "10 reset nonces", func(_ context.Context, _ trace.Span) {
+ pool.pendingNonces.setAll(nonces)
+ })
+ })
}
- }
- // Check for pending transactions for every account that sent new ones
- promoted := pool.promoteExecutables(promoteAddrs)
-
- // If a new block appeared, validate the pool of pending transactions. This will
- // remove any transaction that has been included in the block or was invalidated
- // because of another transaction (e.g. higher gas price).
- if reset != nil {
- pool.demoteUnexecutables()
- if reset.newHead != nil {
- if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) {
- // london fork enabled, reset given the base fee
- pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead)
- pool.priced.SetBaseFee(pendingBaseFee)
- } else {
- // london fork not enabled, reheap to "reset" the priced list
- pool.priced.Reheap()
+
+ // Ensure pool.queue and pool.pending sizes stay within the configured limits.
+ tracing.ElapsedTime(ctx, span, "11 truncatePending", func(_ context.Context, _ trace.Span) {
+ pool.truncatePending()
+ })
+
+ tracing.ElapsedTime(ctx, span, "12 truncateQueue", func(_ context.Context, _ trace.Span) {
+ pool.truncateQueue()
+ })
+
+ dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
+ pool.changesSinceReorg = 0 // Reset change counter
+
+ pool.mu.Unlock()
+
+ // Notify subsystems for newly added transactions
+ tracing.ElapsedTime(ctx, span, "13 notify about new transactions", func(_ context.Context, _ trace.Span) {
+ for _, tx := range promoted {
+ addr, _ := types.Sender(pool.signer, tx)
+
+ if _, ok := events[addr]; !ok {
+ events[addr] = newTxSortedMap()
+ }
+
+ events[addr].Put(tx)
}
- }
- // Update all accounts to the latest known pending nonce
- nonces := make(map[common.Address]uint64, len(pool.pending))
- for addr, list := range pool.pending {
- highestPending := list.LastElement()
- nonces[addr] = highestPending.Nonce() + 1
- }
- pool.pendingNonces.setAll(nonces)
- }
- // Ensure pool.queue and pool.pending sizes stay within the configured limits.
- pool.truncatePending()
- pool.truncateQueue()
+ })
- dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
- pool.changesSinceReorg = 0 // Reset change counter
- pool.mu.Unlock()
+ if len(events) > 0 {
+ tracing.ElapsedTime(ctx, span, "14 txFeed", func(_ context.Context, _ trace.Span) {
+ var txs []*types.Transaction
- // Notify subsystems for newly added transactions
- for _, tx := range promoted {
- addr, _ := types.Sender(pool.signer, tx)
- if _, ok := events[addr]; !ok {
- events[addr] = newTxSortedMap()
- }
- events[addr].Put(tx)
- }
- if len(events) > 0 {
- var txs []*types.Transaction
- for _, set := range events {
- txs = append(txs, set.Flatten()...)
+ for _, set := range events {
+ txs = append(txs, set.Flatten()...)
+ }
+
+ pool.txFeed.Send(NewTxsEvent{txs})
+ })
}
- pool.txFeed.Send(NewTxsEvent{txs})
- }
+ })
}
// reset retrieves the current state of the blockchain and ensures the content
@@ -1343,64 +1796,100 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
// invalidated transactions (low nonce, low balance) are deleted.
func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction {
// Track the promoted transactions to broadcast them at once
- var promoted []*types.Transaction
+ var (
+ promoted []*types.Transaction
+ promotedLen int
+ forwards types.Transactions
+ forwardsLen int
+ caps types.Transactions
+ capsLen int
+ drops types.Transactions
+ dropsLen int
+ list *txList
+ hash common.Hash
+ readies types.Transactions
+ readiesLen int
+ )
+
+ balance := uint256.NewInt(0)
// Iterate over all accounts and promote any executable transactions
for _, addr := range accounts {
- list := pool.queue[addr]
+ list = pool.queue[addr]
if list == nil {
continue // Just in case someone calls with a non existing account
}
+
// Drop all transactions that are deemed too old (low nonce)
- forwards := list.Forward(pool.currentState.GetNonce(addr))
+ forwards = list.Forward(pool.currentState.GetNonce(addr))
+ forwardsLen = len(forwards)
+
for _, tx := range forwards {
- hash := tx.Hash()
+ hash = tx.Hash()
pool.all.Remove(hash)
}
- log.Trace("Removed old queued transactions", "count", len(forwards))
+
+ log.Trace("Removed old queued transactions", "count", forwardsLen)
+
// Drop all transactions that are too costly (low balance or out of gas)
- drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
+ balance.SetFromBig(pool.currentState.GetBalance(addr))
+
+ drops, _ = list.Filter(balance, pool.currentMaxGas)
+ dropsLen = len(drops)
+
for _, tx := range drops {
- hash := tx.Hash()
+ hash = tx.Hash()
pool.all.Remove(hash)
}
- log.Trace("Removed unpayable queued transactions", "count", len(drops))
- queuedNofundsMeter.Mark(int64(len(drops)))
+
+ log.Trace("Removed unpayable queued transactions", "count", dropsLen)
+ queuedNofundsMeter.Mark(int64(dropsLen))
// Gather all executable transactions and promote them
- readies := list.Ready(pool.pendingNonces.get(addr))
+ readies = list.Ready(pool.pendingNonces.get(addr))
+ readiesLen = len(readies)
+
for _, tx := range readies {
- hash := tx.Hash()
+ hash = tx.Hash()
if pool.promoteTx(addr, hash, tx) {
promoted = append(promoted, tx)
}
}
- log.Trace("Promoted queued transactions", "count", len(promoted))
- queuedGauge.Dec(int64(len(readies)))
+
+ log.Trace("Promoted queued transactions", "count", promotedLen)
+ queuedGauge.Dec(int64(readiesLen))
// Drop all transactions over the allowed limit
- var caps types.Transactions
if !pool.locals.contains(addr) {
caps = list.Cap(int(pool.config.AccountQueue))
+ capsLen = len(caps)
+
for _, tx := range caps {
- hash := tx.Hash()
+ hash = tx.Hash()
pool.all.Remove(hash)
+
log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
}
- queuedRateLimitMeter.Mark(int64(len(caps)))
+
+ queuedRateLimitMeter.Mark(int64(capsLen))
}
+
// Mark all the items dropped as removed
- pool.priced.Removed(len(forwards) + len(drops) + len(caps))
- queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
+ pool.priced.Removed(forwardsLen + dropsLen + capsLen)
+
+ queuedGauge.Dec(int64(forwardsLen + dropsLen + capsLen))
+
if pool.locals.contains(addr) {
- localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
+ localGauge.Dec(int64(forwardsLen + dropsLen + capsLen))
}
+
// Delete the entire queue entry if it became empty.
if list.Empty() {
delete(pool.queue, addr)
delete(pool.beats, addr)
}
}
+
return promoted
}
@@ -1408,86 +1897,162 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
// pending limit. The algorithm tries to reduce transaction counts by an approximately
// equal number for all for accounts with many pending transactions.
func (pool *TxPool) truncatePending() {
- pending := uint64(0)
- for _, list := range pool.pending {
- pending += uint64(list.Len())
- }
+ pending := uint64(pool.pendingCount)
if pending <= pool.config.GlobalSlots {
return
}
pendingBeforeCap := pending
+
+ var listLen int
+
+ type pair struct {
+ address common.Address
+ value int64
+ }
+
// Assemble a spam order to penalize large transactors first
- spammers := prque.New(nil)
+ spammers := make([]pair, 0, 8)
+ count := 0
+
+ var ok bool
+
+ pool.pendingMu.RLock()
for addr, list := range pool.pending {
// Only evict transactions from high rollers
- if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
- spammers.Push(addr, int64(list.Len()))
+ listLen = len(list.txs.items)
+
+ pool.pendingMu.RUnlock()
+
+ pool.locals.m.RLock()
+
+ if uint64(listLen) > pool.config.AccountSlots {
+ if _, ok = pool.locals.accounts[addr]; ok {
+ pool.locals.m.RUnlock()
+
+ pool.pendingMu.RLock()
+
+ continue
+ }
+
+ count++
+
+ spammers = append(spammers, pair{addr, int64(listLen)})
}
+
+ pool.locals.m.RUnlock()
+
+ pool.pendingMu.RLock()
}
+
+ pool.pendingMu.RUnlock()
+
// Gradually drop transactions from offenders
- offenders := []common.Address{}
- for pending > pool.config.GlobalSlots && !spammers.Empty() {
+ offenders := make([]common.Address, 0, len(spammers))
+ sort.Slice(spammers, func(i, j int) bool {
+ return spammers[i].value < spammers[j].value
+ })
+
+ var (
+ offender common.Address
+ caps types.Transactions
+ capsLen int
+ list *txList
+ hash common.Hash
+ )
+
+ // todo: metrics: spammers, offenders, total loops
+ for len(spammers) != 0 && pending > pool.config.GlobalSlots {
// Retrieve the next offender if not local address
- offender, _ := spammers.Pop()
- offenders = append(offenders, offender.(common.Address))
+ offender, spammers = spammers[len(spammers)-1].address, spammers[:len(spammers)-1]
+ offenders = append(offenders, offender)
+
+ var threshold int
// Equalize balances until all the same or below threshold
if len(offenders) > 1 {
// Calculate the equalization threshold for all current offenders
- threshold := pool.pending[offender.(common.Address)].Len()
+ pool.pendingMu.RLock()
+ threshold = len(pool.pending[offender].txs.items)
// Iteratively reduce all offenders until below limit or threshold reached
for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
for i := 0; i < len(offenders)-1; i++ {
- list := pool.pending[offenders[i]]
+ list = pool.pending[offenders[i]]
+
+ caps = list.Cap(len(list.txs.items) - 1)
+ capsLen = len(caps)
+
+ pool.pendingMu.RUnlock()
- caps := list.Cap(list.Len() - 1)
for _, tx := range caps {
// Drop the transaction from the global pools too
- hash := tx.Hash()
+ hash = tx.Hash()
pool.all.Remove(hash)
// Update the account nonce to the dropped transaction
pool.pendingNonces.setIfLower(offenders[i], tx.Nonce())
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
}
- pool.priced.Removed(len(caps))
- pendingGauge.Dec(int64(len(caps)))
+
+ pool.priced.Removed(capsLen)
+
+ pendingGauge.Dec(int64(capsLen))
if pool.locals.contains(offenders[i]) {
- localGauge.Dec(int64(len(caps)))
+ localGauge.Dec(int64(capsLen))
}
+
pending--
+
+ pool.pendingMu.RLock()
}
}
+
+ pool.pendingMu.RUnlock()
}
}
// If still above threshold, reduce to limit or min allowance
if pending > pool.config.GlobalSlots && len(offenders) > 0 {
+
+ pool.pendingMu.RLock()
+
for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
for _, addr := range offenders {
- list := pool.pending[addr]
+ list = pool.pending[addr]
+
+ caps = list.Cap(len(list.txs.items) - 1)
+ capsLen = len(caps)
+
+ pool.pendingMu.RUnlock()
- caps := list.Cap(list.Len() - 1)
for _, tx := range caps {
// Drop the transaction from the global pools too
- hash := tx.Hash()
+ hash = tx.Hash()
pool.all.Remove(hash)
// Update the account nonce to the dropped transaction
pool.pendingNonces.setIfLower(addr, tx.Nonce())
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
}
- pool.priced.Removed(len(caps))
- pendingGauge.Dec(int64(len(caps)))
- if pool.locals.contains(addr) {
- localGauge.Dec(int64(len(caps)))
+
+ pool.priced.Removed(capsLen)
+
+ pendingGauge.Dec(int64(capsLen))
+
+ if _, ok = pool.locals.accounts[addr]; ok {
+ localGauge.Dec(int64(capsLen))
}
+
pending--
+
+ pool.pendingMu.RLock()
}
}
+
+ pool.pendingMu.RUnlock()
}
+
pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
}
@@ -1510,27 +2075,52 @@ func (pool *TxPool) truncateQueue() {
}
sort.Sort(addresses)
+ var (
+ tx *types.Transaction
+ txs types.Transactions
+ list *txList
+ addr addressByHeartbeat
+ size uint64
+ )
+
// Drop transactions until the total is below the limit or only locals remain
for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
- addr := addresses[len(addresses)-1]
- list := pool.queue[addr.address]
+ addr = addresses[len(addresses)-1]
+ list = pool.queue[addr.address]
addresses = addresses[:len(addresses)-1]
+ var (
+ listFlatten types.Transactions
+ isSet bool
+ )
+
// Drop all transactions if they are less than the overflow
- if size := uint64(list.Len()); size <= drop {
- for _, tx := range list.Flatten() {
+ if size = uint64(list.Len()); size <= drop {
+ listFlatten = list.Flatten()
+ isSet = true
+
+ for _, tx = range listFlatten {
pool.removeTx(tx.Hash(), true)
}
+
drop -= size
queuedRateLimitMeter.Mark(int64(size))
+
continue
}
+
// Otherwise drop only last few transactions
- txs := list.Flatten()
+ if !isSet {
+ listFlatten = list.Flatten()
+ }
+
+ txs = listFlatten
for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
pool.removeTx(txs[i].Hash(), true)
+
drop--
+
queuedRateLimitMeter.Mark(1)
}
}
@@ -1544,56 +2134,98 @@ func (pool *TxPool) truncateQueue() {
// is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful
// to trigger a re-heap is this function
func (pool *TxPool) demoteUnexecutables() {
+ balance := uint256.NewInt(0)
+
+ var (
+ olds types.Transactions
+ oldsLen int
+ hash common.Hash
+ drops types.Transactions
+ dropsLen int
+ invalids types.Transactions
+ invalidsLen int
+ gapped types.Transactions
+ gappedLen int
+ )
+
// Iterate over all accounts and demote any non-executable transactions
+ pool.pendingMu.RLock()
+
for addr, list := range pool.pending {
nonce := pool.currentState.GetNonce(addr)
// Drop all transactions that are deemed too old (low nonce)
- olds := list.Forward(nonce)
+ olds = list.Forward(nonce)
+ oldsLen = len(olds)
+
for _, tx := range olds {
- hash := tx.Hash()
+ hash = tx.Hash()
pool.all.Remove(hash)
log.Trace("Removed old pending transaction", "hash", hash)
}
+
// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
- drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
+ balance.SetFromBig(pool.currentState.GetBalance(addr))
+ drops, invalids = list.Filter(balance, pool.currentMaxGas)
+ dropsLen = len(drops)
+ invalidsLen = len(invalids)
+
for _, tx := range drops {
- hash := tx.Hash()
+ hash = tx.Hash()
+
log.Trace("Removed unpayable pending transaction", "hash", hash)
+
pool.all.Remove(hash)
}
- pendingNofundsMeter.Mark(int64(len(drops)))
+
+ pendingNofundsMeter.Mark(int64(dropsLen))
for _, tx := range invalids {
- hash := tx.Hash()
+ hash = tx.Hash()
+
log.Trace("Demoting pending transaction", "hash", hash)
// Internal shuffle shouldn't touch the lookup set.
pool.enqueueTx(hash, tx, false, false)
}
- pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
+
+ pendingGauge.Dec(int64(oldsLen + dropsLen + invalidsLen))
+
if pool.locals.contains(addr) {
- localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
+ localGauge.Dec(int64(oldsLen + dropsLen + invalidsLen))
}
// If there's a gap in front, alert (should never happen) and postpone all transactions
if list.Len() > 0 && list.txs.Get(nonce) == nil {
- gapped := list.Cap(0)
+ gapped = list.Cap(0)
+ gappedLen = len(gapped)
+
for _, tx := range gapped {
- hash := tx.Hash()
+ hash = tx.Hash()
log.Error("Demoting invalidated transaction", "hash", hash)
// Internal shuffle shouldn't touch the lookup set.
pool.enqueueTx(hash, tx, false, false)
}
- pendingGauge.Dec(int64(len(gapped)))
+
+ pendingGauge.Dec(int64(gappedLen))
// This might happen in a reorg, so log it to the metering
- blockReorgInvalidatedTx.Mark(int64(len(gapped)))
+ blockReorgInvalidatedTx.Mark(int64(gappedLen))
}
+
// Delete the entire pending entry if it became empty.
if list.Empty() {
+ pool.pendingMu.RUnlock()
+ pool.pendingMu.Lock()
+
+ pool.pendingCount -= pool.pending[addr].Len()
delete(pool.pending, addr)
+
+ pool.pendingMu.Unlock()
+ pool.pendingMu.RLock()
}
}
+
+ pool.pendingMu.RUnlock()
}
// addressByHeartbeat is an account address tagged with its last activity timestamp.
@@ -1611,9 +2243,10 @@ func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// accountSet is simply a set of addresses to check for existence, and a signer
// capable of deriving addresses from transactions.
type accountSet struct {
- accounts map[common.Address]struct{}
- signer types.Signer
- cache *[]common.Address
+ accounts map[common.Address]struct{}
+ accountsFlatted []common.Address
+ signer types.Signer
+ m sync.RWMutex
}
// newAccountSet creates a new address set with an associated signer for sender
@@ -1631,17 +2264,26 @@ func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
// contains checks if a given address is contained within the set.
func (as *accountSet) contains(addr common.Address) bool {
+ as.m.RLock()
+ defer as.m.RUnlock()
+
_, exist := as.accounts[addr]
return exist
}
func (as *accountSet) empty() bool {
+ as.m.RLock()
+ defer as.m.RUnlock()
+
return len(as.accounts) == 0
}
// containsTx checks if the sender of a given tx is within the set. If the sender
// cannot be derived, this method returns false.
func (as *accountSet) containsTx(tx *types.Transaction) bool {
+ as.m.RLock()
+ defer as.m.RUnlock()
+
if addr, err := types.Sender(as.signer, tx); err == nil {
return as.contains(addr)
}
@@ -1650,8 +2292,14 @@ func (as *accountSet) containsTx(tx *types.Transaction) bool {
// add inserts a new address into the set to track.
func (as *accountSet) add(addr common.Address) {
+ as.m.Lock()
+ defer as.m.Unlock()
+
+ if _, ok := as.accounts[addr]; !ok {
+ as.accountsFlatted = append(as.accountsFlatted, addr)
+ }
+
as.accounts[addr] = struct{}{}
- as.cache = nil
}
// addTx adds the sender of tx into the set.
@@ -1664,22 +2312,25 @@ func (as *accountSet) addTx(tx *types.Transaction) {
// flatten returns the list of addresses within this set, also caching it for later
// reuse. The returned slice should not be changed!
func (as *accountSet) flatten() []common.Address {
- if as.cache == nil {
- accounts := make([]common.Address, 0, len(as.accounts))
- for account := range as.accounts {
- accounts = append(accounts, account)
- }
- as.cache = &accounts
- }
- return *as.cache
+ as.m.RLock()
+ defer as.m.RUnlock()
+
+ return as.accountsFlatted
}
// merge adds all addresses from the 'other' set into 'as'.
func (as *accountSet) merge(other *accountSet) {
+ var ok bool
+
+ as.m.Lock()
+ defer as.m.Unlock()
+
for addr := range other.accounts {
+ if _, ok = as.accounts[addr]; !ok {
+ as.accountsFlatted = append(as.accountsFlatted, addr)
+ }
as.accounts[addr] = struct{}{}
}
- as.cache = nil
}
// txLookup is used internally by TxPool to track transactions while allowing
@@ -1835,7 +2486,10 @@ func (t *txLookup) RemoteToLocals(locals *accountSet) int {
var migrated int
for hash, tx := range t.remotes {
if locals.containsTx(tx) {
+ locals.m.Lock()
t.locals[hash] = tx
+ locals.m.Unlock()
+
delete(t.remotes, hash)
migrated += 1
}
diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go
index 664ca6c9d4..13fa4ff20d 100644
--- a/core/tx_pool_test.go
+++ b/core/tx_pool_test.go
@@ -21,6 +21,7 @@ import (
"crypto/ecdsa"
"errors"
"fmt"
+ "io"
"io/ioutil"
"math/big"
"math/rand"
@@ -32,11 +33,15 @@ import (
"testing"
"time"
+ "github.com/holiman/uint256"
+ "go.uber.org/goleak"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/stat"
"pgregory.net/rapid"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/debug"
+ "github.com/ethereum/go-ethereum/common/leak"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@@ -98,7 +103,7 @@ func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) *types.Tr
}
func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
- tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, gasprice, nil), types.HomesteadSigner{}, key)
+ tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{0x01}, big.NewInt(100), gaslimit, gasprice, nil), types.HomesteadSigner{}, key)
return tx
}
@@ -153,12 +158,17 @@ func validateTxPoolInternals(pool *TxPool) error {
if total := pool.all.Count(); total != pending+queued {
return fmt.Errorf("total transaction count %d != %d pending + %d queued", total, pending, queued)
}
+
pool.priced.Reheap()
priced, remote := pool.priced.urgent.Len()+pool.priced.floating.Len(), pool.all.RemoteCount()
if priced != remote {
return fmt.Errorf("total priced transaction count %d != %d", priced, remote)
}
+
// Ensure the next nonce to assign is the correct one
+ pool.pendingMu.RLock()
+ defer pool.pendingMu.RUnlock()
+
for addr, txs := range pool.pending {
// Find the last transaction
var last uint64
@@ -167,10 +177,16 @@ func validateTxPoolInternals(pool *TxPool) error {
last = nonce
}
}
+
if nonce := pool.pendingNonces.get(addr); nonce != last+1 {
return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1)
}
+
+ if txs.totalcost.Cmp(common.Big0) < 0 {
+ return fmt.Errorf("totalcost went negative: %v", txs.totalcost)
+ }
}
+
return nil
}
@@ -325,10 +341,18 @@ func TestInvalidTransactions(t *testing.T) {
}
tx = transaction(1, 100000, key)
+
+ pool.gasPriceMu.Lock()
+
pool.gasPrice = big.NewInt(1000)
- if err := pool.AddRemote(tx); err != ErrUnderpriced {
+ pool.gasPriceUint = uint256.NewInt(1000)
+
+ pool.gasPriceMu.Unlock()
+
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) {
t.Error("expected", ErrUnderpriced, "got", err)
}
+
if err := pool.AddLocal(tx); err != nil {
t.Error("expected", nil, "got", err)
}
@@ -347,9 +371,12 @@ func TestTransactionQueue(t *testing.T) {
pool.enqueueTx(tx.Hash(), tx, false, true)
<-pool.requestPromoteExecutables(newAccountSet(pool.signer, from))
+
+ pool.pendingMu.RLock()
if len(pool.pending) != 1 {
t.Error("expected valid txs to be 1 is", len(pool.pending))
}
+ pool.pendingMu.RUnlock()
tx = transaction(1, 100, key)
from, _ = deriveSender(tx)
@@ -357,9 +384,13 @@ func TestTransactionQueue(t *testing.T) {
pool.enqueueTx(tx.Hash(), tx, false, true)
<-pool.requestPromoteExecutables(newAccountSet(pool.signer, from))
+
+ pool.pendingMu.RLock()
if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok {
t.Error("expected transaction to be in tx pool")
}
+ pool.pendingMu.RUnlock()
+
if len(pool.queue) > 0 {
t.Error("expected transaction queue to be empty. is", len(pool.queue))
}
@@ -383,9 +414,13 @@ func TestTransactionQueue2(t *testing.T) {
pool.enqueueTx(tx3.Hash(), tx3, false, true)
pool.promoteExecutables([]common.Address{from})
+
+ pool.pendingMu.RLock()
if len(pool.pending) != 1 {
t.Error("expected pending length to be 1, got", len(pool.pending))
}
+ pool.pendingMu.RUnlock()
+
if pool.queue[from].Len() != 2 {
t.Error("expected len(queue) == 2, got", pool.queue[from].Len())
}
@@ -399,8 +434,10 @@ func TestTransactionNegativeValue(t *testing.T) {
tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key)
from, _ := deriveSender(tx)
+
testAddBalance(pool, from, big.NewInt(1))
- if err := pool.AddRemote(tx); err != ErrNegativeValue {
+
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrNegativeValue) {
t.Error("expected", ErrNegativeValue, "got", err)
}
}
@@ -413,7 +450,7 @@ func TestTransactionTipAboveFeeCap(t *testing.T) {
tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key)
- if err := pool.AddRemote(tx); err != ErrTipAboveFeeCap {
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrTipAboveFeeCap) {
t.Error("expected", ErrTipAboveFeeCap, "got", err)
}
}
@@ -428,12 +465,12 @@ func TestTransactionVeryHighValues(t *testing.T) {
veryBigNumber.Lsh(veryBigNumber, 300)
tx := dynamicFeeTx(0, 100, big.NewInt(1), veryBigNumber, key)
- if err := pool.AddRemote(tx); err != ErrTipVeryHigh {
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrTipVeryHigh) {
t.Error("expected", ErrTipVeryHigh, "got", err)
}
tx2 := dynamicFeeTx(0, 100, veryBigNumber, big.NewInt(1), key)
- if err := pool.AddRemote(tx2); err != ErrFeeCapVeryHigh {
+ if err := pool.AddRemote(tx2); !errors.Is(err, ErrFeeCapVeryHigh) {
t.Error("expected", ErrFeeCapVeryHigh, "got", err)
}
}
@@ -495,23 +532,32 @@ func TestTransactionDoubleNonce(t *testing.T) {
if replace, err := pool.add(tx2, false); err != nil || !replace {
t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace)
}
+
<-pool.requestPromoteExecutables(newAccountSet(signer, addr))
+
+ pool.pendingMu.RLock()
if pool.pending[addr].Len() != 1 {
t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
}
if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() {
t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash())
}
+ pool.pendingMu.RUnlock()
// Add the third transaction and ensure it's not saved (smaller price)
pool.add(tx3, false)
+
<-pool.requestPromoteExecutables(newAccountSet(signer, addr))
+
+ pool.pendingMu.RLock()
if pool.pending[addr].Len() != 1 {
t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
}
if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() {
t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash())
}
+ pool.pendingMu.RUnlock()
+
// Ensure the total transaction count is correct
if pool.all.Count() != 1 {
t.Error("expected 1 total transactions, got", pool.all.Count())
@@ -530,9 +576,13 @@ func TestTransactionMissingNonce(t *testing.T) {
if _, err := pool.add(tx, false); err != nil {
t.Error("didn't expect error", err)
}
+
+ pool.pendingMu.RLock()
if len(pool.pending) != 0 {
t.Error("expected 0 pending transactions, got", len(pool.pending))
}
+ pool.pendingMu.RUnlock()
+
if pool.queue[addr].Len() != 1 {
t.Error("expected 1 queued transaction, got", pool.queue[addr].Len())
}
@@ -603,19 +653,27 @@ func TestTransactionDropping(t *testing.T) {
pool.enqueueTx(tx12.Hash(), tx12, false, true)
// Check that pre and post validations leave the pool as is
+ pool.pendingMu.RLock()
if pool.pending[account].Len() != 3 {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
}
+ pool.pendingMu.RUnlock()
+
if pool.queue[account].Len() != 3 {
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
}
if pool.all.Count() != 6 {
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6)
}
+
<-pool.requestReset(nil, nil)
+
+ pool.pendingMu.RLock()
if pool.pending[account].Len() != 3 {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
}
+ pool.pendingMu.RUnlock()
+
if pool.queue[account].Len() != 3 {
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
}
@@ -626,6 +684,7 @@ func TestTransactionDropping(t *testing.T) {
testAddBalance(pool, account, big.NewInt(-650))
<-pool.requestReset(nil, nil)
+ pool.pendingMu.RLock()
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
t.Errorf("funded pending transaction missing: %v", tx0)
}
@@ -635,6 +694,8 @@ func TestTransactionDropping(t *testing.T) {
if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok {
t.Errorf("out-of-fund pending transaction present: %v", tx1)
}
+ pool.pendingMu.RUnlock()
+
if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok {
t.Errorf("funded queued transaction missing: %v", tx10)
}
@@ -651,12 +712,15 @@ func TestTransactionDropping(t *testing.T) {
atomic.StoreUint64(&pool.chain.(*testBlockChain).gasLimit, 100)
<-pool.requestReset(nil, nil)
+ pool.pendingMu.RLock()
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
t.Errorf("funded pending transaction missing: %v", tx0)
}
if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok {
t.Errorf("over-gased pending transaction present: %v", tx1)
}
+ pool.pendingMu.RUnlock()
+
if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok {
t.Errorf("funded queued transaction missing: %v", tx10)
}
@@ -711,19 +775,27 @@ func TestTransactionPostponing(t *testing.T) {
}
}
// Check that pre and post validations leave the pool as is
+ pool.pendingMu.RLock()
if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) {
t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs))
}
+ pool.pendingMu.RUnlock()
+
if len(pool.queue) != 0 {
t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0)
}
if pool.all.Count() != len(txs) {
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs))
}
+
<-pool.requestReset(nil, nil)
+
+ pool.pendingMu.RLock()
if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) {
t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs))
}
+ pool.pendingMu.RUnlock()
+
if len(pool.queue) != 0 {
t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0)
}
@@ -738,12 +810,17 @@ func TestTransactionPostponing(t *testing.T) {
// The first account's first transaction remains valid, check that subsequent
// ones are either filtered out, or queued up for later.
+ pool.pendingMu.RLock()
if _, ok := pool.pending[accs[0]].txs.items[txs[0].Nonce()]; !ok {
t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0])
}
+ pool.pendingMu.RUnlock()
+
if _, ok := pool.queue[accs[0]].txs.items[txs[0].Nonce()]; ok {
t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0])
}
+
+ pool.pendingMu.RLock()
for i, tx := range txs[1:100] {
if i%2 == 1 {
if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok {
@@ -761,11 +838,16 @@ func TestTransactionPostponing(t *testing.T) {
}
}
}
+ pool.pendingMu.RUnlock()
+
// The second account's first transaction got invalid, check that all transactions
// are either filtered out, or queued up for later.
+ pool.pendingMu.RLock()
if pool.pending[accs[1]] != nil {
t.Errorf("invalidated account still has pending transactions")
}
+ pool.pendingMu.RUnlock()
+
for i, tx := range txs[100:] {
if i%2 == 1 {
if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; !ok {
@@ -854,9 +936,13 @@ func TestTransactionQueueAccountLimiting(t *testing.T) {
if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
+
+ pool.pendingMu.RLock()
if len(pool.pending) != 0 {
t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0)
}
+ pool.pendingMu.RUnlock()
+
if i <= testTxPoolConfig.AccountQueue {
if pool.queue[account].Len() != int(i) {
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i)
@@ -935,6 +1021,7 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) {
for i := uint64(0); i < 3*config.GlobalQueue; i++ {
txs = append(txs, transaction(i+1, 100000, local))
}
+
pool.AddLocals(txs)
// If locals are disabled, the previous eviction algorithm should apply here too
@@ -1112,6 +1199,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
}
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
@@ -1128,7 +1216,7 @@ func TestTransactionPendingLimiting(t *testing.T) {
defer pool.Stop()
account := crypto.PubkeyToAddress(key.PublicKey)
- testAddBalance(pool, account, big.NewInt(1000000))
+ testAddBalance(pool, account, big.NewInt(1000000000000))
// Keep track of transaction events to ensure all executables get announced
events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5)
@@ -1140,9 +1228,13 @@ func TestTransactionPendingLimiting(t *testing.T) {
if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
+
+ pool.pendingMu.RLock()
if pool.pending[account].Len() != int(i)+1 {
t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, pool.pending[account].Len(), i+1)
}
+ pool.pendingMu.RUnlock()
+
if len(pool.queue) != 0 {
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0)
}
@@ -1195,9 +1287,13 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
pool.AddRemotesSync(txs)
pending := 0
+
+ pool.pendingMu.RLock()
for _, list := range pool.pending {
pending += list.Len()
}
+ pool.pendingMu.RUnlock()
+
if pending > int(config.GlobalSlots) {
t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, config.GlobalSlots)
}
@@ -1330,11 +1426,14 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
// Import the batch and verify that limits have been enforced
pool.AddRemotesSync(txs)
+ pool.pendingMu.RLock()
for addr, list := range pool.pending {
if list.Len() != int(config.AccountSlots) {
t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), config.AccountSlots)
}
}
+ pool.pendingMu.RUnlock()
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
@@ -1391,15 +1490,19 @@ func TestTransactionPoolRepricing(t *testing.T) {
if pending != 7 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7)
}
+
if queued != 3 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
}
+
if err := validateEvents(events, 7); err != nil {
t.Fatalf("original event firing failed: %v", err)
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
+
// Reprice the pool and check that underpriced transactions get dropped
pool.SetGasPrice(big.NewInt(2))
@@ -1407,58 +1510,76 @@ func TestTransactionPoolRepricing(t *testing.T) {
if pending != 2 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
+
if queued != 5 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5)
}
+
if err := validateEvents(events, 0); err != nil {
t.Fatalf("reprice event firing failed: %v", err)
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
+
// Check that we can't add the old transactions back
- if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); err != ErrUnderpriced {
+ if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced {
+
+ if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
- if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); err != ErrUnderpriced {
+
+ if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
+
if err := validateEvents(events, 0); err != nil {
t.Fatalf("post-reprice event firing failed: %v", err)
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
+
// However we can add local underpriced transactions
tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3])
+
if err := pool.AddLocal(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
+
if pending, _ = pool.Stats(); pending != 3 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
}
+
if err := validateEvents(events, 1); err != nil {
t.Fatalf("post-reprice local event firing failed: %v", err)
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
+
// And we can fill gaps with properly priced transactions
if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
+
if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
+
if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil {
t.Fatalf("failed to add queued transaction: %v", err)
}
+
if err := validateEvents(events, 5); err != nil {
t.Fatalf("post-reprice event firing failed: %v", err)
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
@@ -1487,6 +1608,7 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) {
keys[i], _ = crypto.GenerateKey()
testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
}
+
// Generate and queue a batch of transactions, both pending and queued
txs := types.Transactions{}
@@ -1512,15 +1634,19 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) {
if pending != 7 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7)
}
+
if queued != 3 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
}
+
if err := validateEvents(events, 7); err != nil {
t.Fatalf("original event firing failed: %v", err)
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
+
// Reprice the pool and check that underpriced transactions get dropped
pool.SetGasPrice(big.NewInt(2))
@@ -1528,64 +1654,87 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) {
if pending != 2 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
+
if queued != 5 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5)
}
+
if err := validateEvents(events, 0); err != nil {
t.Fatalf("reprice event firing failed: %v", err)
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
+
// Check that we can't add the old transactions back
tx := pricedTransaction(1, 100000, big.NewInt(1), keys[0])
- if err := pool.AddRemote(tx); err != ErrUnderpriced {
+
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
+
tx = dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])
- if err := pool.AddRemote(tx); err != ErrUnderpriced {
+
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
+
tx = dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2])
- if err := pool.AddRemote(tx); err != ErrUnderpriced {
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
+
if err := validateEvents(events, 0); err != nil {
t.Fatalf("post-reprice event firing failed: %v", err)
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
+
// However we can add local underpriced transactions
tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3])
+
if err := pool.AddLocal(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
+
if pending, _ = pool.Stats(); pending != 3 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
}
+
if err := validateEvents(events, 1); err != nil {
t.Fatalf("post-reprice local event firing failed: %v", err)
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
+
// And we can fill gaps with properly priced transactions
tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0])
+
if err := pool.AddRemote(tx); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
+
tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1])
+
if err := pool.AddRemote(tx); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
+
tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2])
+
if err := pool.AddRemote(tx); err != nil {
t.Fatalf("failed to add queued transaction: %v", err)
}
+
if err := validateEvents(events, 5); err != nil {
t.Fatalf("post-reprice event firing failed: %v", err)
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
@@ -1607,7 +1756,7 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) {
keys := make([]*ecdsa.PrivateKey, 3)
for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey()
- testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000*1000000))
+ testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(100000*1000000))
}
// Create transaction (both pending and queued) with a linearly growing gasprice
for i := uint64(0); i < 500; i++ {
@@ -1686,7 +1835,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
defer sub.Unsubscribe()
// Create a number of test accounts and fund them
- keys := make([]*ecdsa.PrivateKey, 4)
+ keys := make([]*ecdsa.PrivateKey, 5)
for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey()
testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
@@ -1719,9 +1868,13 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Ensure that adding an underpriced transaction on block limit fails
- if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced {
+ if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, ErrUnderpriced) {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
+ // Replace a future transaction with a future transaction
+ if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1
+ t.Fatalf("failed to add well priced transaction: %v", err)
+ }
// Ensure that adding high priced transactions drops cheap ones, but not own
if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que -
t.Fatalf("failed to add well priced transaction: %v", err)
@@ -1732,6 +1885,10 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
if err := pool.AddRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3
t.Fatalf("failed to add well priced transaction: %v", err)
}
+ // Ensure that replacing a pending transaction with a future transaction fails
+ if err := pool.AddRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != ErrFutureReplacePending {
+ t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, ErrFutureReplacePending)
+ }
pending, queued = pool.Stats()
if pending != 2 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
@@ -1739,7 +1896,8 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
if queued != 2 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
}
- if err := validateEvents(events, 1); err != nil {
+
+ if err := validateEvents(events, 2); err != nil {
t.Fatalf("additional event firing failed: %v", err)
}
if err := validateTxPoolInternals(pool); err != nil {
@@ -1891,7 +2049,7 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) {
// Ensure that adding an underpriced transaction fails
tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])
- if err := pool.AddRemote(tx); err != ErrUnderpriced { // Pend K0:0, K0:1, K2:0; Que K1:1
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
@@ -1901,11 +2059,12 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) {
t.Fatalf("failed to add well priced transaction: %v", err)
}
- tx = pricedTransaction(2, 100000, big.NewInt(3), keys[1])
+ tx = pricedTransaction(1, 100000, big.NewInt(3), keys[1])
if err := pool.AddRemote(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2
t.Fatalf("failed to add well priced transaction: %v", err)
}
- tx = dynamicFeeTx(3, 100000, big.NewInt(4), big.NewInt(1), keys[1])
+
+ tx = dynamicFeeTx(2, 100000, big.NewInt(4), big.NewInt(1), keys[1])
if err := pool.AddRemote(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3
t.Fatalf("failed to add well priced transaction: %v", err)
}
@@ -1916,7 +2075,8 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) {
if queued != 2 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
}
- if err := validateEvents(events, 1); err != nil {
+
+ if err := validateEvents(events, 2); err != nil {
t.Fatalf("additional event firing failed: %v", err)
}
if err := validateTxPoolInternals(pool); err != nil {
@@ -1991,7 +2151,7 @@ func TestDualHeapEviction(t *testing.T) {
add(false)
for baseFee = 0; baseFee <= 1000; baseFee += 100 {
- pool.priced.SetBaseFee(big.NewInt(int64(baseFee)))
+ pool.priced.SetBaseFee(uint256.NewInt(uint64(baseFee)))
add(true)
check(highCap, "fee cap")
add(false)
@@ -2020,49 +2180,65 @@ func TestTransactionDeduplication(t *testing.T) {
// Create a batch of transactions and add a few of them
txs := make([]*types.Transaction, 16)
+
for i := 0; i < len(txs); i++ {
txs[i] = pricedTransaction(uint64(i), 100000, big.NewInt(1), key)
}
+
var firsts []*types.Transaction
+
for i := 0; i < len(txs); i += 2 {
firsts = append(firsts, txs[i])
}
+
errs := pool.AddRemotesSync(firsts)
- if len(errs) != len(firsts) {
- t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts))
+
+ if len(errs) != 0 {
+ t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), 0)
}
+
for i, err := range errs {
if err != nil {
t.Errorf("add %d failed: %v", i, err)
}
}
+
pending, queued := pool.Stats()
+
if pending != 1 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1)
}
+
if queued != len(txs)/2-1 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1)
}
+
// Try to add all of them now and ensure previous ones error out as knowns
errs = pool.AddRemotesSync(txs)
- if len(errs) != len(txs) {
- t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs))
+ if len(errs) != 0 {
+ t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), 0)
}
+
for i, err := range errs {
if i%2 == 0 && err == nil {
t.Errorf("add %d succeeded, should have failed as known", i)
}
+
if i%2 == 1 && err != nil {
t.Errorf("add %d failed: %v", i, err)
}
}
+
pending, queued = pool.Stats()
+
if pending != len(txs) {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, len(txs))
}
+
if queued != 0 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
@@ -2096,12 +2272,15 @@ func TestTransactionReplacement(t *testing.T) {
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil {
t.Fatalf("failed to add original cheap pending transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced {
+
+ if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); !errors.Is(err, ErrReplaceUnderpriced) {
t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
}
+
if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil {
t.Fatalf("failed to replace original cheap pending transaction: %v", err)
}
+
if err := validateEvents(events, 2); err != nil {
t.Fatalf("cheap replacement event firing failed: %v", err)
}
@@ -2109,12 +2288,15 @@ func TestTransactionReplacement(t *testing.T) {
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil {
t.Fatalf("failed to add original proper pending transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced {
+
+ if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); !errors.Is(err, ErrReplaceUnderpriced) {
t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
}
+
if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil {
t.Fatalf("failed to replace original proper pending transaction: %v", err)
}
+
if err := validateEvents(events, 2); err != nil {
t.Fatalf("proper replacement event firing failed: %v", err)
}
@@ -2123,9 +2305,11 @@ func TestTransactionReplacement(t *testing.T) {
if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil {
t.Fatalf("failed to add original cheap queued transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced {
+
+ if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); !errors.Is(err, ErrReplaceUnderpriced) {
t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
}
+
if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil {
t.Fatalf("failed to replace original cheap queued transaction: %v", err)
}
@@ -2133,9 +2317,11 @@ func TestTransactionReplacement(t *testing.T) {
if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil {
t.Fatalf("failed to add original proper queued transaction: %v", err)
}
- if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced {
+
+ if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); !errors.Is(err, ErrReplaceUnderpriced) {
t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
}
+
if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil {
t.Fatalf("failed to replace original proper queued transaction: %v", err)
}
@@ -2143,6 +2329,7 @@ func TestTransactionReplacement(t *testing.T) {
if err := validateEvents(events, 0); err != nil {
t.Fatalf("queued replacement event firing failed: %v", err)
}
+
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
@@ -2197,7 +2384,7 @@ func TestTransactionReplacementDynamicFee(t *testing.T) {
}
// 2. Don't bump tip or feecap => discard
tx = dynamicFeeTx(nonce, 100001, big.NewInt(2), big.NewInt(1), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) {
t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
}
// 3. Bump both more than min => accept
@@ -2220,22 +2407,22 @@ func TestTransactionReplacementDynamicFee(t *testing.T) {
}
// 6. Bump tip max allowed so it's still underpriced => discard
tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold-1), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) {
t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
}
// 7. Bump fee cap max allowed so it's still underpriced => discard
tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold-1), big.NewInt(gasTipCap), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) {
t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
}
// 8. Bump tip min for acceptance => accept
tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) {
t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
}
// 9. Bump fee cap min for acceptance => accept
tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(gasTipCap), key)
- if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced {
+ if err := pool.AddRemote(tx); !errors.Is(err, ErrReplaceUnderpriced) {
t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced)
}
// 10. Check events match expected (3 new executable txs during pending, 0 during queue)
@@ -2465,6 +2652,7 @@ func benchmarkPendingDemotion(b *testing.B, size int) {
}
// Benchmark the speed of pool validation
b.ResetTimer()
+ b.ReportAllocs()
for i := 0; i < b.N; i++ {
pool.demoteUnexecutables()
}
@@ -2496,37 +2684,161 @@ func benchmarkFuturePromotion(b *testing.B, size int) {
}
// Benchmarks the speed of batched transaction insertion.
-func BenchmarkPoolBatchInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100, false) }
-func BenchmarkPoolBatchInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000, false) }
-func BenchmarkPoolBatchInsert10000(b *testing.B) { benchmarkPoolBatchInsert(b, 10000, false) }
-
-func BenchmarkPoolBatchLocalInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100, true) }
-func BenchmarkPoolBatchLocalInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000, true) }
-func BenchmarkPoolBatchLocalInsert10000(b *testing.B) { benchmarkPoolBatchInsert(b, 10000, true) }
-
-func benchmarkPoolBatchInsert(b *testing.B, size int, local bool) {
+func BenchmarkPoolBatchInsert(b *testing.B) {
// Generate a batch of transactions to enqueue into the pool
pool, key := setupTxPool()
defer pool.Stop()
account := crypto.PubkeyToAddress(key.PublicKey)
- testAddBalance(pool, account, big.NewInt(1000000))
+ testAddBalance(pool, account, big.NewInt(1000000000000000000))
- batches := make([]types.Transactions, b.N)
- for i := 0; i < b.N; i++ {
- batches[i] = make(types.Transactions, size)
- for j := 0; j < size; j++ {
- batches[i][j] = transaction(uint64(size*i+j), 100000, key)
- }
+ const format = "size %d, is local %t"
+
+ cases := []struct {
+ name string
+ size int
+ isLocal bool
+ }{
+ {size: 100, isLocal: false},
+ {size: 1000, isLocal: false},
+ {size: 10000, isLocal: false},
+
+ {size: 100, isLocal: true},
+ {size: 1000, isLocal: true},
+ {size: 10000, isLocal: true},
}
+
+ for i := range cases {
+ cases[i].name = fmt.Sprintf(format, cases[i].size, cases[i].isLocal)
+ }
+
// Benchmark importing the transactions into the queue
- b.ResetTimer()
- for _, batch := range batches {
- if local {
- pool.AddLocals(batch)
- } else {
- pool.AddRemotes(batch)
- }
+
+ for _, testCase := range cases {
+ singleCase := testCase
+
+ b.Run(singleCase.name, func(b *testing.B) {
+ batches := make([]types.Transactions, b.N)
+
+ for i := 0; i < b.N; i++ {
+ batches[i] = make(types.Transactions, singleCase.size)
+
+ for j := 0; j < singleCase.size; j++ {
+ batches[i][j] = transaction(uint64(singleCase.size*i+j), 100000, key)
+ }
+ }
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for _, batch := range batches {
+ if testCase.isLocal {
+ pool.AddLocals(batch)
+ } else {
+ pool.AddRemotes(batch)
+ }
+ }
+ })
+ }
+}
+
+func BenchmarkPoolMining(b *testing.B) {
+ const format = "size %d"
+
+ cases := []struct {
+ name string
+ size int
+ }{
+ {size: 1},
+ {size: 5},
+ {size: 10},
+ {size: 20},
+ }
+
+ for i := range cases {
+ cases[i].name = fmt.Sprintf(format, cases[i].size)
+ }
+
+ const blockGasLimit = 30_000_000
+
+ // Benchmark importing the transactions into the queue
+
+ for _, testCase := range cases {
+ singleCase := testCase
+
+ b.Run(singleCase.name, func(b *testing.B) {
+ // Generate a batch of transactions to enqueue into the pool
+ pendingAddedCh := make(chan struct{}, 1024)
+
+ pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh))
+ defer pool.Stop()
+
+ localKeyPub := localKey.PublicKey
+ account := crypto.PubkeyToAddress(localKeyPub)
+
+ const balanceStr = "1_000_000_000"
+ balance, ok := big.NewInt(0).SetString(balanceStr, 0)
+ if !ok {
+ b.Fatal("incorrect initial balance", balanceStr)
+ }
+
+ testAddBalance(pool, account, balance)
+
+ signer := types.NewEIP155Signer(big.NewInt(1))
+ baseFee := uint256.NewInt(1)
+
+ const batchesSize = 100
+
+ batches := make([]types.Transactions, batchesSize)
+
+ for i := 0; i < batchesSize; i++ {
+ batches[i] = make(types.Transactions, singleCase.size)
+
+ for j := 0; j < singleCase.size; j++ {
+ batches[i][j] = transaction(uint64(singleCase.size*i+j), 100_000, localKey)
+ }
+
+ for _, batch := range batches {
+ pool.AddRemotes(batch)
+ }
+ }
+
+ var promoted int
+
+ for range pendingAddedCh {
+ promoted++
+
+ if promoted >= batchesSize*singleCase.size/2 {
+ break
+ }
+ }
+
+ var total int
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ pendingDurations := make([]time.Duration, b.N)
+
+ var added int
+
+ for i := 0; i < b.N; i++ {
+ added, pendingDurations[i], _ = mining(b, pool, signer, baseFee, blockGasLimit, i)
+ total += added
+ }
+
+ b.StopTimer()
+
+ pendingDurationsFloat := make([]float64, len(pendingDurations))
+
+ for i, v := range pendingDurations {
+ pendingDurationsFloat[i] = float64(v.Nanoseconds())
+ }
+
+ mean, stddev := stat.MeanStdDev(pendingDurationsFloat, nil)
+ b.Logf("[%s] pending mean %v, stdev %v, %v-%v",
+ common.NowMilliseconds(), time.Duration(mean), time.Duration(stddev), time.Duration(floats.Min(pendingDurationsFloat)), time.Duration(floats.Max(pendingDurationsFloat)))
+ })
}
}
@@ -2566,62 +2878,355 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) {
}
// Benchmarks the speed of batch transaction insertion in case of multiple accounts.
-func BenchmarkPoolMultiAccountBatchInsert(b *testing.B) {
+func BenchmarkPoolAccountMultiBatchInsert(b *testing.B) {
// Generate a batch of transactions to enqueue into the pool
pool, _ := setupTxPool()
defer pool.Stop()
- b.ReportAllocs()
+
batches := make(types.Transactions, b.N)
+
for i := 0; i < b.N; i++ {
key, _ := crypto.GenerateKey()
account := crypto.PubkeyToAddress(key.PublicKey)
+
pool.currentState.AddBalance(account, big.NewInt(1000000))
+
tx := transaction(uint64(0), 100000, key)
+
batches[i] = tx
}
+
// Benchmark importing the transactions into the queue
+ b.ReportAllocs()
b.ResetTimer()
+
for _, tx := range batches {
pool.AddRemotesSync([]*types.Transaction{tx})
}
}
-type acc struct {
- nonce uint64
- key *ecdsa.PrivateKey
- account common.Address
-}
-
-type testTx struct {
- tx *types.Transaction
- idx int
- isLocal bool
-}
+func BenchmarkPoolAccountMultiBatchInsertRace(b *testing.B) {
+ // Generate a batch of transactions to enqueue into the pool
+ pool, _ := setupTxPool()
+ defer pool.Stop()
-const localIdx = 0
+ batches := make(types.Transactions, b.N)
-func getTransactionGen(t *rapid.T, keys []*acc, nonces []uint64, localKey *acc, gasPriceMin, gasPriceMax, gasLimitMin, gasLimitMax uint64) *testTx {
- idx := rapid.IntRange(0, len(keys)-1).Draw(t, "accIdx").(int)
+ for i := 0; i < b.N; i++ {
+ key, _ := crypto.GenerateKey()
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ tx := transaction(uint64(0), 100000, key)
- var (
- isLocal bool
- key *ecdsa.PrivateKey
- )
+ pool.currentState.AddBalance(account, big.NewInt(1000000))
- if idx == localIdx {
- isLocal = true
- key = localKey.key
- } else {
- key = keys[idx].key
+ batches[i] = tx
}
- nonces[idx]++
+ done := make(chan struct{})
- gasPriceUint := rapid.Uint64Range(gasPriceMin, gasPriceMax).Draw(t, "gasPrice").(uint64)
- gasPrice := big.NewInt(0).SetUint64(gasPriceUint)
- gasLimit := rapid.Uint64Range(gasLimitMin, gasLimitMax).Draw(t, "gasLimit").(uint64)
+ go func() {
+ t := time.NewTicker(time.Microsecond)
+ defer t.Stop()
- return &testTx{
+ var pending map[common.Address]types.Transactions
+
+ loop:
+ for {
+ select {
+ case <-t.C:
+ pending = pool.Pending(context.Background(), true)
+ case <-done:
+ break loop
+ }
+ }
+
+ fmt.Fprint(io.Discard, pending)
+ }()
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for _, tx := range batches {
+ pool.AddRemotesSync([]*types.Transaction{tx})
+ }
+
+ close(done)
+}
+
+func BenchmarkPoolAccountMultiBatchInsertNoLockRace(b *testing.B) {
+ // Generate a batch of transactions to enqueue into the pool
+ pendingAddedCh := make(chan struct{}, 1024)
+
+ pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh))
+ defer pool.Stop()
+
+ _ = localKey
+
+ batches := make(types.Transactions, b.N)
+
+ for i := 0; i < b.N; i++ {
+ key, _ := crypto.GenerateKey()
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ tx := transaction(uint64(0), 100000, key)
+
+ pool.currentState.AddBalance(account, big.NewInt(1000000))
+
+ batches[i] = tx
+ }
+
+ done := make(chan struct{})
+
+ go func() {
+ t := time.NewTicker(time.Microsecond)
+ defer t.Stop()
+
+ var pending map[common.Address]types.Transactions
+
+ for range t.C {
+ pending = pool.Pending(context.Background(), true)
+
+ if len(pending) >= b.N/2 {
+ close(done)
+
+ return
+ }
+ }
+ }()
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for _, tx := range batches {
+ pool.AddRemotes([]*types.Transaction{tx})
+ }
+
+ <-done
+}
+
+func BenchmarkPoolAccountsBatchInsert(b *testing.B) {
+ // Generate a batch of transactions to enqueue into the pool
+ pool, _ := setupTxPool()
+ defer pool.Stop()
+
+ batches := make(types.Transactions, b.N)
+
+ for i := 0; i < b.N; i++ {
+ key, _ := crypto.GenerateKey()
+ account := crypto.PubkeyToAddress(key.PublicKey)
+
+ pool.currentState.AddBalance(account, big.NewInt(1000000))
+
+ tx := transaction(uint64(0), 100000, key)
+
+ batches[i] = tx
+ }
+
+ // Benchmark importing the transactions into the queue
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for _, tx := range batches {
+ _ = pool.AddRemoteSync(tx)
+ }
+}
+
+func BenchmarkPoolAccountsBatchInsertRace(b *testing.B) {
+ // Generate a batch of transactions to enqueue into the pool
+ pool, _ := setupTxPool()
+ defer pool.Stop()
+
+ batches := make(types.Transactions, b.N)
+
+ for i := 0; i < b.N; i++ {
+ key, _ := crypto.GenerateKey()
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ tx := transaction(uint64(0), 100000, key)
+
+ pool.currentState.AddBalance(account, big.NewInt(1000000))
+
+ batches[i] = tx
+ }
+
+ done := make(chan struct{})
+
+ go func() {
+ t := time.NewTicker(time.Microsecond)
+ defer t.Stop()
+
+ var pending map[common.Address]types.Transactions
+
+ loop:
+ for {
+ select {
+ case <-t.C:
+ pending = pool.Pending(context.Background(), true)
+ case <-done:
+ break loop
+ }
+ }
+
+ fmt.Fprint(io.Discard, pending)
+ }()
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for _, tx := range batches {
+ _ = pool.AddRemoteSync(tx)
+ }
+
+ close(done)
+}
+
+func BenchmarkPoolAccountsBatchInsertNoLockRace(b *testing.B) {
+ // Generate a batch of transactions to enqueue into the pool
+ pendingAddedCh := make(chan struct{}, 1024)
+
+ pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh))
+ defer pool.Stop()
+
+ _ = localKey
+
+ batches := make(types.Transactions, b.N)
+
+ for i := 0; i < b.N; i++ {
+ key, _ := crypto.GenerateKey()
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ tx := transaction(uint64(0), 100000, key)
+
+ pool.currentState.AddBalance(account, big.NewInt(1000000))
+
+ batches[i] = tx
+ }
+
+ done := make(chan struct{})
+
+ go func() {
+ t := time.NewTicker(time.Microsecond)
+ defer t.Stop()
+
+ var pending map[common.Address]types.Transactions
+
+ for range t.C {
+ pending = pool.Pending(context.Background(), true)
+
+ if len(pending) >= b.N/2 {
+ close(done)
+
+ return
+ }
+ }
+ }()
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for _, tx := range batches {
+ _ = pool.AddRemote(tx)
+ }
+
+ <-done
+}
+
+func TestPoolMultiAccountBatchInsertRace(t *testing.T) {
+ t.Parallel()
+
+ // Generate a batch of transactions to enqueue into the pool
+ pool, _ := setupTxPool()
+ defer pool.Stop()
+
+ const n = 5000
+
+ batches := make(types.Transactions, n)
+ batchesSecond := make(types.Transactions, n)
+
+ for i := 0; i < n; i++ {
+ batches[i] = newTxs(pool)
+ batchesSecond[i] = newTxs(pool)
+ }
+
+ done := make(chan struct{})
+
+ go func() {
+ t := time.NewTicker(time.Microsecond)
+ defer t.Stop()
+
+ var (
+ pending map[common.Address]types.Transactions
+ total int
+ )
+
+ for range t.C {
+ pending = pool.Pending(context.Background(), true)
+ total = len(pending)
+
+ _ = pool.Locals()
+
+ if total >= n {
+ close(done)
+
+ return
+ }
+ }
+ }()
+
+ for _, tx := range batches {
+ pool.AddRemotesSync([]*types.Transaction{tx})
+ }
+
+ for _, tx := range batchesSecond {
+ pool.AddRemotes([]*types.Transaction{tx})
+ }
+
+ <-done
+}
+
+func newTxs(pool *TxPool) *types.Transaction {
+ key, _ := crypto.GenerateKey()
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ tx := transaction(uint64(0), 100000, key)
+
+ pool.currentState.AddBalance(account, big.NewInt(1_000_000_000))
+
+ return tx
+}
+
+type acc struct {
+ nonce uint64
+ key *ecdsa.PrivateKey
+ account common.Address
+}
+
+type testTx struct {
+ tx *types.Transaction
+ idx int
+ isLocal bool
+}
+
+const localIdx = 0
+
+func getTransactionGen(t *rapid.T, keys []*acc, nonces []uint64, localKey *acc, gasPriceMin, gasPriceMax, gasLimitMin, gasLimitMax uint64) *testTx {
+ idx := rapid.IntRange(0, len(keys)-1).Draw(t, "accIdx").(int)
+
+ var (
+ isLocal bool
+ key *ecdsa.PrivateKey
+ )
+
+ if idx == localIdx {
+ isLocal = true
+ key = localKey.key
+ } else {
+ key = keys[idx].key
+ }
+
+ nonces[idx]++
+
+ gasPriceUint := rapid.Uint64Range(gasPriceMin, gasPriceMax).Draw(t, "gasPrice").(uint64)
+ gasPrice := big.NewInt(0).SetUint64(gasPriceUint)
+ gasLimit := rapid.Uint64Range(gasLimitMin, gasLimitMax).Draw(t, "gasLimit").(uint64)
+
+ return &testTx{
tx: pricedTransaction(nonces[idx]-1, gasLimit, gasPrice, key),
idx: idx,
isLocal: isLocal,
@@ -2878,20 +3483,20 @@ func testPoolBatchInsert(t *testing.T, cfg txPoolRapidConfig) {
wg.Wait()
var (
- addIntoTxPool func(tx []*types.Transaction) []error
+ addIntoTxPool func(tx *types.Transaction) error
totalInBatch int
)
for _, tx := range txs.txs {
- addIntoTxPool = pool.AddRemotesSync
+ addIntoTxPool = pool.AddRemoteSync
if tx.isLocal {
- addIntoTxPool = pool.AddLocals
+ addIntoTxPool = pool.AddLocal
}
- err := addIntoTxPool([]*types.Transaction{tx.tx})
- if len(err) != 0 && err[0] != nil {
- rt.Log("on adding a transaction to the tx pool", err[0], tx.tx.Gas(), tx.tx.GasPrice(), pool.GasPrice(), getBalance(pool, keys[tx.idx].account))
+ err := addIntoTxPool(tx.tx)
+ if err != nil {
+ rt.Log("on adding a transaction to the tx pool", err, tx.tx.Gas(), tx.tx.GasPrice(), pool.GasPrice(), getBalance(pool, keys[tx.idx].account))
}
}
@@ -2930,7 +3535,7 @@ func testPoolBatchInsert(t *testing.T, cfg txPoolRapidConfig) {
// check if txPool got stuck
if currentTxPoolStats == lastTxPoolStats {
- stuckBlocks++ //todo: переписать
+ stuckBlocks++ //todo: need something better then that
} else {
stuckBlocks = 0
lastTxPoolStats = currentTxPoolStats
@@ -2938,7 +3543,7 @@ func testPoolBatchInsert(t *testing.T, cfg txPoolRapidConfig) {
// copy-paste
start := time.Now()
- pending := pool.Pending(true)
+ pending := pool.Pending(context.Background(), true)
locals := pool.Locals()
// from fillTransactions
@@ -2956,7 +3561,7 @@ func testPoolBatchInsert(t *testing.T, cfg txPoolRapidConfig) {
// check for nonce gaps
var lastNonce, currentNonce int
- pending = pool.Pending(true)
+ pending = pool.Pending(context.Background(), true)
for txAcc, pendingTxs := range pending {
lastNonce = int(pool.Nonce(txAcc)) - len(pendingTxs) - 1
@@ -3026,7 +3631,7 @@ func fillTransactions(ctx context.Context, pool *TxPool, locals []common.Address
signer := types.NewLondonSigner(big.NewInt(1))
// fake baseFee
- baseFee := big.NewInt(1)
+ baseFee := uint256.NewInt(1)
blockGasLimit := gasLimit
@@ -3083,7 +3688,10 @@ func commitTransactions(pool *TxPool, txs *types.TransactionsByPriceAndNonce, bl
if tx.Gas() <= blockGasLimit {
blockGasLimit -= tx.Gas()
+
+ pool.mu.Lock()
pool.removeTx(tx.Hash(), false)
+ pool.mu.Unlock()
txCount++
} else {
@@ -3098,3 +3706,885 @@ func MakeWithPromoteTxCh(ch chan struct{}) func(*TxPool) {
pool.promoteTxCh = ch
}
}
+
+//nolint:thelper
+func mining(tb testing.TB, pool *TxPool, signer types.Signer, baseFee *uint256.Int, blockGasLimit uint64, totalBlocks int) (int, time.Duration, time.Duration) {
+ var (
+ localTxsCount int
+ remoteTxsCount int
+ localTxs = make(map[common.Address]types.Transactions)
+ remoteTxs map[common.Address]types.Transactions
+ total int
+ )
+
+ start := time.Now()
+
+ pending := pool.Pending(context.Background(), true)
+
+ pendingDuration := time.Since(start)
+
+ remoteTxs = pending
+
+ locals := pool.Locals()
+
+ pendingLen, queuedLen := pool.Stats()
+
+ for _, account := range locals {
+ if txs := remoteTxs[account]; len(txs) > 0 {
+ delete(remoteTxs, account)
+
+ localTxs[account] = txs
+ }
+ }
+
+ localTxsCount = len(localTxs)
+ remoteTxsCount = len(remoteTxs)
+
+ var txLocalCount int
+
+ if localTxsCount > 0 {
+ txs := types.NewTransactionsByPriceAndNonce(signer, localTxs, baseFee)
+
+ blockGasLimit, txLocalCount = commitTransactions(pool, txs, blockGasLimit)
+
+ total += txLocalCount
+ }
+
+ var txRemoteCount int
+
+ if remoteTxsCount > 0 {
+ txs := types.NewTransactionsByPriceAndNonce(signer, remoteTxs, baseFee)
+
+ _, txRemoteCount = commitTransactions(pool, txs, blockGasLimit)
+
+ total += txRemoteCount
+ }
+
+ miningDuration := time.Since(start)
+
+ tb.Logf("[%s] mining block. block %d. total %d: pending %d(added %d), local %d(added %d), queued %d, localTxsCount %d, remoteTxsCount %d, pending %v, mining %v",
+ common.NowMilliseconds(), totalBlocks, total, pendingLen, txRemoteCount, localTxsCount, txLocalCount, queuedLen, localTxsCount, remoteTxsCount, pendingDuration, miningDuration)
+
+ return total, pendingDuration, miningDuration
+}
+
+//nolint:paralleltest
+func TestPoolMiningDataRaces(t *testing.T) {
+ if testing.Short() {
+ t.Skip("only for data race testing")
+ }
+
+ const format = "size %d, txs ticker %v, api ticker %v"
+
+ cases := []struct {
+ name string
+ size int
+ txsTickerDuration time.Duration
+ apiTickerDuration time.Duration
+ }{
+ {
+ size: 1,
+ txsTickerDuration: 200 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 1,
+ txsTickerDuration: 400 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 1,
+ txsTickerDuration: 600 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 1,
+ txsTickerDuration: 800 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+
+ {
+ size: 5,
+ txsTickerDuration: 200 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 5,
+ txsTickerDuration: 400 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 5,
+ txsTickerDuration: 600 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 5,
+ txsTickerDuration: 800 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+
+ {
+ size: 10,
+ txsTickerDuration: 200 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 10,
+ txsTickerDuration: 400 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 10,
+ txsTickerDuration: 600 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 10,
+ txsTickerDuration: 800 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+
+ {
+ size: 20,
+ txsTickerDuration: 200 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 20,
+ txsTickerDuration: 400 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 20,
+ txsTickerDuration: 600 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 20,
+ txsTickerDuration: 800 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+
+ {
+ size: 30,
+ txsTickerDuration: 200 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 30,
+ txsTickerDuration: 400 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 30,
+ txsTickerDuration: 600 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ {
+ size: 30,
+ txsTickerDuration: 800 * time.Millisecond,
+ apiTickerDuration: 10 * time.Millisecond,
+ },
+ }
+
+ for i := range cases {
+ cases[i].name = fmt.Sprintf(format, cases[i].size, cases[i].txsTickerDuration, cases[i].apiTickerDuration)
+ }
+
+ //nolint:paralleltest
+ for _, testCase := range cases {
+ singleCase := testCase
+
+ t.Run(singleCase.name, func(t *testing.T) {
+ defer goleak.VerifyNone(t, leak.IgnoreList()...)
+
+ const (
+ blocks = 300
+ blockGasLimit = 40_000_000
+ blockPeriod = time.Second
+ threads = 10
+ batchesSize = 10_000
+ timeoutDuration = 10 * blockPeriod
+
+ balanceStr = "1_000_000_000_000"
+ )
+
+ apiWithMining(t, balanceStr, batchesSize, singleCase, timeoutDuration, threads, blockPeriod, blocks, blockGasLimit)
+ })
+ }
+}
+
+//nolint:gocognit,thelper
+func apiWithMining(tb testing.TB, balanceStr string, batchesSize int, singleCase struct {
+ name string
+ size int
+ txsTickerDuration time.Duration
+ apiTickerDuration time.Duration
+}, timeoutDuration time.Duration, threads int, blockPeriod time.Duration, blocks int, blockGasLimit uint64) {
+ done := make(chan struct{})
+
+ var wg sync.WaitGroup
+
+ defer func() {
+ close(done)
+
+ tb.Logf("[%s] finishing apiWithMining", common.NowMilliseconds())
+
+ wg.Wait()
+
+ tb.Logf("[%s] apiWithMining finished", common.NowMilliseconds())
+ }()
+
+ // Generate a batch of transactions to enqueue into the pool
+ pendingAddedCh := make(chan struct{}, 1024)
+
+ pool, localKey := setupTxPoolWithConfig(params.TestChainConfig, testTxPoolConfig, txPoolGasLimit, MakeWithPromoteTxCh(pendingAddedCh))
+ defer pool.Stop()
+
+ localKeyPub := localKey.PublicKey
+ account := crypto.PubkeyToAddress(localKeyPub)
+
+ balance, ok := big.NewInt(0).SetString(balanceStr, 0)
+ if !ok {
+ tb.Fatal("incorrect initial balance", balanceStr)
+ }
+
+ testAddBalance(pool, account, balance)
+
+ signer := types.NewEIP155Signer(big.NewInt(1))
+ baseFee := uint256.NewInt(1)
+
+ batchesLocal := make([]types.Transactions, batchesSize)
+ batchesRemote := make([]types.Transactions, batchesSize)
+ batchesRemotes := make([]types.Transactions, batchesSize)
+ batchesRemoteSync := make([]types.Transactions, batchesSize)
+ batchesRemotesSync := make([]types.Transactions, batchesSize)
+
+ for i := 0; i < batchesSize; i++ {
+ batchesLocal[i] = make(types.Transactions, singleCase.size)
+
+ for j := 0; j < singleCase.size; j++ {
+ batchesLocal[i][j] = pricedTransaction(uint64(singleCase.size*i+j), 100_000, big.NewInt(int64(i+1)), localKey)
+ }
+
+ batchesRemote[i] = make(types.Transactions, singleCase.size)
+
+ remoteKey, _ := crypto.GenerateKey()
+ remoteAddr := crypto.PubkeyToAddress(remoteKey.PublicKey)
+ testAddBalance(pool, remoteAddr, balance)
+
+ for j := 0; j < singleCase.size; j++ {
+ batchesRemote[i][j] = pricedTransaction(uint64(j), 100_000, big.NewInt(int64(i+1)), remoteKey)
+ }
+
+ batchesRemotes[i] = make(types.Transactions, singleCase.size)
+
+ remotesKey, _ := crypto.GenerateKey()
+ remotesAddr := crypto.PubkeyToAddress(remotesKey.PublicKey)
+ testAddBalance(pool, remotesAddr, balance)
+
+ for j := 0; j < singleCase.size; j++ {
+ batchesRemotes[i][j] = pricedTransaction(uint64(j), 100_000, big.NewInt(int64(i+1)), remotesKey)
+ }
+
+ batchesRemoteSync[i] = make(types.Transactions, singleCase.size)
+
+ remoteSyncKey, _ := crypto.GenerateKey()
+ remoteSyncAddr := crypto.PubkeyToAddress(remoteSyncKey.PublicKey)
+ testAddBalance(pool, remoteSyncAddr, balance)
+
+ for j := 0; j < singleCase.size; j++ {
+ batchesRemoteSync[i][j] = pricedTransaction(uint64(j), 100_000, big.NewInt(int64(i+1)), remoteSyncKey)
+ }
+
+ batchesRemotesSync[i] = make(types.Transactions, singleCase.size)
+
+ remotesSyncKey, _ := crypto.GenerateKey()
+ remotesSyncAddr := crypto.PubkeyToAddress(remotesSyncKey.PublicKey)
+ testAddBalance(pool, remotesSyncAddr, balance)
+
+ for j := 0; j < singleCase.size; j++ {
+ batchesRemotesSync[i][j] = pricedTransaction(uint64(j), 100_000, big.NewInt(int64(i+1)), remotesSyncKey)
+ }
+ }
+
+ tb.Logf("[%s] starting goroutines", common.NowMilliseconds())
+
+ txsTickerDuration := singleCase.txsTickerDuration
+ apiTickerDuration := singleCase.apiTickerDuration
+
+ // locals
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping AddLocal(s)", common.NowMilliseconds())
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped AddLocal(s)", common.NowMilliseconds())
+ }()
+
+ tb.Logf("[%s] starting AddLocal(s)", common.NowMilliseconds())
+
+ for _, batch := range batchesLocal {
+ batch := batch
+
+ select {
+ case <-done:
+ return
+ default:
+ }
+
+ if rand.Int()%2 == 0 {
+ runWithTimeout(tb, func(_ chan struct{}) {
+ errs := pool.AddLocals(batch)
+ if len(errs) != 0 {
+ tb.Logf("[%s] AddLocals error, %v", common.NowMilliseconds(), errs)
+ }
+ }, done, "AddLocals", timeoutDuration, 0, 0)
+ } else {
+ for _, tx := range batch {
+ tx := tx
+
+ runWithTimeout(tb, func(_ chan struct{}) {
+ err := pool.AddLocal(tx)
+ if err != nil {
+ tb.Logf("[%s] AddLocal error %s", common.NowMilliseconds(), err)
+ }
+ }, done, "AddLocal", timeoutDuration, 0, 0)
+
+ time.Sleep(txsTickerDuration)
+ }
+ }
+
+ time.Sleep(txsTickerDuration)
+ }
+ }()
+
+ // remotes
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping AddRemotes", common.NowMilliseconds())
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped AddRemotes", common.NowMilliseconds())
+ }()
+
+ addTransactionsBatches(tb, batchesRemotes, getFnForBatches(pool.AddRemotes), done, timeoutDuration, txsTickerDuration, "AddRemotes", 0)
+ }()
+
+ // remote
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping AddRemote", common.NowMilliseconds())
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped AddRemote", common.NowMilliseconds())
+ }()
+
+ addTransactions(tb, batchesRemote, pool.AddRemote, done, timeoutDuration, txsTickerDuration, "AddRemote", 0)
+ }()
+
+ // sync
+ // remotes
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping AddRemotesSync", common.NowMilliseconds())
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped AddRemotesSync", common.NowMilliseconds())
+ }()
+
+ addTransactionsBatches(tb, batchesRemotesSync, getFnForBatches(pool.AddRemotesSync), done, timeoutDuration, txsTickerDuration, "AddRemotesSync", 0)
+ }()
+
+ // remote
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping AddRemoteSync", common.NowMilliseconds())
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped AddRemoteSync", common.NowMilliseconds())
+ }()
+
+ addTransactions(tb, batchesRemoteSync, pool.AddRemoteSync, done, timeoutDuration, txsTickerDuration, "AddRemoteSync", 0)
+ }()
+
+ // tx pool API
+ for i := 0; i < threads; i++ {
+ i := i
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping Pending-no-tips, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped Pending-no-tips, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ p := pool.Pending(context.Background(), false)
+ fmt.Fprint(io.Discard, p)
+ }, done, "Pending-no-tips", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping Pending-with-tips, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped Pending-with-tips, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ p := pool.Pending(context.Background(), true)
+ fmt.Fprint(io.Discard, p)
+ }, done, "Pending-with-tips", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping Locals, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped Locals, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ l := pool.Locals()
+ fmt.Fprint(io.Discard, l)
+ }, done, "Locals", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping Content, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped Content, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ p, q := pool.Content()
+ fmt.Fprint(io.Discard, p, q)
+ }, done, "Content", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping GasPriceUint256, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped GasPriceUint256, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ res := pool.GasPriceUint256()
+ fmt.Fprint(io.Discard, res)
+ }, done, "GasPriceUint256", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping GasPrice, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped GasPrice, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ res := pool.GasPrice()
+ fmt.Fprint(io.Discard, res)
+ }, done, "GasPrice", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping SetGasPrice, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped SetGasPrice, , thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ pool.SetGasPrice(pool.GasPrice())
+ }, done, "SetGasPrice", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping ContentFrom, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped ContentFrom, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ p, q := pool.ContentFrom(account)
+ fmt.Fprint(io.Discard, p, q)
+ }, done, "ContentFrom", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping Has, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped Has, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ res := pool.Has(batchesRemotes[0][0].Hash())
+ fmt.Fprint(io.Discard, res)
+ }, done, "Has", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping Get, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped Get, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ tx := pool.Get(batchesRemotes[0][0].Hash())
+ fmt.Fprint(io.Discard, tx == nil)
+ }, done, "Get", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping Nonce, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped Nonce, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ res := pool.Nonce(account)
+ fmt.Fprint(io.Discard, res)
+ }, done, "Nonce", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping Stats, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped Stats, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ p, q := pool.Stats()
+ fmt.Fprint(io.Discard, p, q)
+ }, done, "Stats", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping Status, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped Status, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(_ chan struct{}) {
+ st := pool.Status([]common.Hash{batchesRemotes[1][0].Hash()})
+ fmt.Fprint(io.Discard, st)
+ }, done, "Status", apiTickerDuration, timeoutDuration, i)
+ }()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ tb.Logf("[%s] stopping SubscribeNewTxsEvent, thread %d", common.NowMilliseconds(), i)
+
+ wg.Done()
+
+ tb.Logf("[%s] stopped SubscribeNewTxsEvent, thread %d", common.NowMilliseconds(), i)
+ }()
+
+ runWithTicker(tb, func(c chan struct{}) {
+ ch := make(chan NewTxsEvent, 10)
+ sub := pool.SubscribeNewTxsEvent(ch)
+
+ if sub == nil {
+ return
+ }
+
+ defer sub.Unsubscribe()
+
+ select {
+ case <-done:
+ return
+ case <-c:
+ case res := <-ch:
+ fmt.Fprint(io.Discard, res)
+ }
+
+ }, done, "SubscribeNewTxsEvent", apiTickerDuration, timeoutDuration, i)
+ }()
+ }
+
+ // wait for the start
+ tb.Logf("[%s] before the first propagated transaction", common.NowMilliseconds())
+ <-pendingAddedCh
+ tb.Logf("[%s] after the first propagated transaction", common.NowMilliseconds())
+
+ var (
+ totalTxs int
+ totalBlocks int
+ )
+
+ pendingDurations := make([]time.Duration, 0, blocks)
+
+ var (
+ added int
+ pendingDuration time.Duration
+ miningDuration time.Duration
+ diff time.Duration
+ )
+
+ for {
+ added, pendingDuration, miningDuration = mining(tb, pool, signer, baseFee, blockGasLimit, totalBlocks)
+
+ totalTxs += added
+
+ pendingDurations = append(pendingDurations, pendingDuration)
+
+ totalBlocks++
+
+ if totalBlocks > blocks {
+ fmt.Fprint(io.Discard, totalTxs)
+ break
+ }
+
+ diff = blockPeriod - miningDuration
+ if diff > 0 {
+ time.Sleep(diff)
+ }
+ }
+
+ pendingDurationsFloat := make([]float64, len(pendingDurations))
+
+ for i, v := range pendingDurations {
+ pendingDurationsFloat[i] = float64(v.Nanoseconds())
+ }
+
+ mean, stddev := stat.MeanStdDev(pendingDurationsFloat, nil)
+ tb.Logf("[%s] pending mean %v, stddev %v, %v-%v",
+ common.NowMilliseconds(), time.Duration(mean), time.Duration(stddev), time.Duration(floats.Min(pendingDurationsFloat)), time.Duration(floats.Max(pendingDurationsFloat)))
+}
+
+func addTransactionsBatches(tb testing.TB, batches []types.Transactions, fn func(types.Transactions) error, done chan struct{}, timeoutDuration time.Duration, tickerDuration time.Duration, name string, thread int) {
+ tb.Helper()
+
+ tb.Logf("[%s] starting %s", common.NowMilliseconds(), name)
+
+ defer func() {
+ tb.Logf("[%s] stop %s", common.NowMilliseconds(), name)
+ }()
+
+ for _, batch := range batches {
+ batch := batch
+
+ select {
+ case <-done:
+ return
+ default:
+ }
+
+ runWithTimeout(tb, func(_ chan struct{}) {
+ err := fn(batch)
+ if err != nil {
+ tb.Logf("[%s] %s error: %s", common.NowMilliseconds(), name, err)
+ }
+ }, done, name, timeoutDuration, 0, thread)
+
+ time.Sleep(tickerDuration)
+ }
+}
+
+func addTransactions(tb testing.TB, batches []types.Transactions, fn func(*types.Transaction) error, done chan struct{}, timeoutDuration time.Duration, tickerDuration time.Duration, name string, thread int) {
+ tb.Helper()
+
+ tb.Logf("[%s] starting %s", common.NowMilliseconds(), name)
+
+ defer func() {
+ tb.Logf("[%s] stop %s", common.NowMilliseconds(), name)
+ }()
+
+ for _, batch := range batches {
+ for _, tx := range batch {
+ tx := tx
+
+ select {
+ case <-done:
+ return
+ default:
+ }
+
+ runWithTimeout(tb, func(_ chan struct{}) {
+ err := fn(tx)
+ if err != nil {
+ tb.Logf("%s error: %s", name, err)
+ }
+ }, done, name, timeoutDuration, 0, thread)
+
+ time.Sleep(tickerDuration)
+ }
+
+ time.Sleep(tickerDuration)
+ }
+}
+
+func getFnForBatches(fn func([]*types.Transaction) []error) func(types.Transactions) error {
+ return func(batch types.Transactions) error {
+ errs := fn(batch)
+ if len(errs) != 0 {
+ return errs[0]
+ }
+
+ return nil
+ }
+}
+
+//nolint:unparam
+func runWithTicker(tb testing.TB, fn func(c chan struct{}), done chan struct{}, name string, tickerDuration, timeoutDuration time.Duration, thread int) {
+ tb.Helper()
+
+ select {
+ case <-done:
+ tb.Logf("[%s] Short path. finishing outer runWithTicker for %q, thread %d", common.NowMilliseconds(), name, thread)
+
+ return
+ default:
+ }
+
+ defer func() {
+ tb.Logf("[%s] finishing outer runWithTicker for %q, thread %d", common.NowMilliseconds(), name, thread)
+ }()
+
+ localTicker := time.NewTicker(tickerDuration)
+ defer localTicker.Stop()
+
+ n := 0
+
+ for range localTicker.C {
+ select {
+ case <-done:
+ return
+ default:
+ }
+
+ runWithTimeout(tb, fn, done, name, timeoutDuration, n, thread)
+
+ n++
+ }
+}
+
+func runWithTimeout(tb testing.TB, fn func(chan struct{}), outerDone chan struct{}, name string, timeoutDuration time.Duration, n, thread int) {
+ tb.Helper()
+
+ select {
+ case <-outerDone:
+ tb.Logf("[%s] Short path. exiting inner runWithTimeout by outer exit event for %q, thread %d, iteration %d", common.NowMilliseconds(), name, thread, n)
+
+ return
+ default:
+ }
+
+ timeout := time.NewTimer(timeoutDuration)
+ defer timeout.Stop()
+
+ doneCh := make(chan struct{})
+
+ isError := new(int32)
+ *isError = 0
+
+ go func() {
+ defer close(doneCh)
+
+ select {
+ case <-outerDone:
+ return
+ default:
+ fn(doneCh)
+ }
+ }()
+
+ const isDebug = false
+
+ var stack string
+
+ select {
+ case <-outerDone:
+ tb.Logf("[%s] exiting inner runWithTimeout by outer exit event for %q, thread %d, iteration %d", common.NowMilliseconds(), name, thread, n)
+ case <-doneCh:
+ // only for debug
+ //tb.Logf("[%s] exiting inner runWithTimeout by successful call for %q, thread %d, iteration %d", common.NowMilliseconds(), name, thread, n)
+ case <-timeout.C:
+ atomic.StoreInt32(isError, 1)
+
+ if isDebug {
+ stack = string(debug.Stack(true))
+ }
+
+ tb.Errorf("[%s] %s timeouted, thread %d, iteration %d. Stack %s", common.NowMilliseconds(), name, thread, n, stack)
+ }
+}
diff --git a/core/txpool2_test.go b/core/txpool2_test.go
new file mode 100644
index 0000000000..45f784f343
--- /dev/null
+++ b/core/txpool2_test.go
@@ -0,0 +1,229 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+package core
+
+import (
+ "crypto/ecdsa"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/event"
+)
+
+func pricedValuedTransaction(nonce uint64, value int64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
+ tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(value), gaslimit, gasprice, nil), types.HomesteadSigner{}, key)
+ return tx
+}
+
+func count(t *testing.T, pool *TxPool) (pending int, queued int) {
+ t.Helper()
+
+ pending, queued = pool.stats()
+
+ if err := validateTxPoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+
+ return pending, queued
+}
+
+func fillPool(t *testing.T, pool *TxPool) {
+ t.Helper()
+ // Create a number of test accounts, fund them and make transactions
+ executableTxs := types.Transactions{}
+ nonExecutableTxs := types.Transactions{}
+
+ for i := 0; i < 384; i++ {
+ key, _ := crypto.GenerateKey()
+ pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(10000000000))
+ // Add executable ones
+ for j := 0; j < int(pool.config.AccountSlots); j++ {
+ executableTxs = append(executableTxs, pricedTransaction(uint64(j), 100000, big.NewInt(300), key))
+ }
+ }
+ // Import the batch and verify that limits have been enforced
+ pool.AddRemotesSync(executableTxs)
+ pool.AddRemotesSync(nonExecutableTxs)
+ pending, queued := pool.Stats()
+ slots := pool.all.Slots()
+ // sanity-check that the test prerequisites are ok (pending full)
+ if have, want := pending, slots; have != want {
+ t.Fatalf("have %d, want %d", have, want)
+ }
+
+ if have, want := queued, 0; have != want {
+ t.Fatalf("have %d, want %d", have, want)
+ }
+
+ t.Logf("pool.config: GlobalSlots=%d, GlobalQueue=%d\n", pool.config.GlobalSlots, pool.config.GlobalQueue)
+ t.Logf("pending: %d queued: %d, all: %d\n", pending, queued, slots)
+}
+
+// Tests that if a batch high-priced of non-executables arrive, they do not kick out
+// executable transactions
+func TestTransactionFutureAttack(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the limit enforcement with
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
+ config := testTxPoolConfig
+ config.GlobalQueue = 100
+ config.GlobalSlots = 100
+ pool := NewTxPool(config, eip1559Config, blockchain)
+
+ defer pool.Stop()
+ fillPool(t, pool)
+ pending, _ := pool.Stats()
+ // Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops
+ {
+ key, _ := crypto.GenerateKey()
+ pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
+ futureTxs := types.Transactions{}
+ for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ {
+ futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 100000, big.NewInt(500), key))
+ }
+ for i := 0; i < 5; i++ {
+ pool.AddRemotesSync(futureTxs)
+ newPending, newQueued := count(t, pool)
+ t.Logf("pending: %d queued: %d, all: %d\n", newPending, newQueued, pool.all.Slots())
+ }
+ }
+
+ newPending, _ := pool.Stats()
+ // Pending should not have been touched
+ if have, want := newPending, pending; have < want {
+ t.Errorf("wrong pending-count, have %d, want %d (GlobalSlots: %d)",
+ have, want, pool.config.GlobalSlots)
+ }
+}
+
+// Tests that if a batch high-priced of non-executables arrive, they do not kick out
+// executable transactions
+func TestTransactionFuture1559(t *testing.T) {
+ t.Parallel()
+ // Create the pool to test the pricing enforcement with
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
+ pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
+
+ defer pool.Stop()
+
+ // Create a number of test accounts, fund them and make transactions
+ fillPool(t, pool)
+ pending, _ := pool.Stats()
+
+ // Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops
+ {
+ key, _ := crypto.GenerateKey()
+ pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
+ futureTxs := types.Transactions{}
+ for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ {
+ futureTxs = append(futureTxs, dynamicFeeTx(1000+uint64(j), 100000, big.NewInt(200), big.NewInt(101), key))
+ }
+ pool.AddRemotesSync(futureTxs)
+ }
+
+ newPending, _ := pool.Stats()
+ // Pending should not have been touched
+ if have, want := newPending, pending; have != want {
+ t.Errorf("Wrong pending-count, have %d, want %d (GlobalSlots: %d)",
+ have, want, pool.config.GlobalSlots)
+ }
+}
+
+// Tests that if a batch of balance-overdraft txs arrive, they do not kick out
+// executable transactions
+func TestTransactionZAttack(t *testing.T) {
+ t.Parallel()
+ // Create the pool to test the pricing enforcement with
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
+ pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
+
+ defer pool.Stop()
+
+ // Create a number of test accounts, fund them and make transactions
+ fillPool(t, pool)
+
+ countInvalidPending := func() int {
+ t.Helper()
+
+ var ivpendingNum int
+
+ pendingtxs, _ := pool.Content()
+
+ for account, txs := range pendingtxs {
+ cur_balance := new(big.Int).Set(pool.currentState.GetBalance(account))
+ for _, tx := range txs {
+ if cur_balance.Cmp(tx.Value()) <= 0 {
+ ivpendingNum++
+ } else {
+ cur_balance.Sub(cur_balance, tx.Value())
+ }
+ }
+ }
+
+ if err := validateTxPoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+
+ return ivpendingNum
+ }
+ ivPending := countInvalidPending()
+ t.Logf("invalid pending: %d\n", ivPending)
+
+ // Now, DETER-Z attack starts, let's add a bunch of expensive non-executables (from N accounts) along with balance-overdraft txs (from one account), and see if the pending-count drops
+ for j := 0; j < int(pool.config.GlobalQueue); j++ {
+ futureTxs := types.Transactions{}
+ key, _ := crypto.GenerateKey()
+ pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
+ futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 21000, big.NewInt(500), key))
+ pool.AddRemotesSync(futureTxs)
+ }
+
+ overDraftTxs := types.Transactions{}
+ {
+ key, _ := crypto.GenerateKey()
+ pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
+ for j := 0; j < int(pool.config.GlobalSlots); j++ {
+ overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 60000000000, 21000, big.NewInt(500), key))
+ }
+ }
+ pool.AddRemotesSync(overDraftTxs)
+ pool.AddRemotesSync(overDraftTxs)
+ pool.AddRemotesSync(overDraftTxs)
+ pool.AddRemotesSync(overDraftTxs)
+ pool.AddRemotesSync(overDraftTxs)
+
+ newPending, newQueued := count(t, pool)
+ newIvPending := countInvalidPending()
+
+ t.Logf("pool.all.Slots(): %d\n", pool.all.Slots())
+ t.Logf("pending: %d queued: %d, all: %d\n", newPending, newQueued, pool.all.Slots())
+ t.Logf("invalid pending: %d\n", newIvPending)
+
+ // Pending should not have been touched
+ if newIvPending != ivPending {
+ t.Errorf("Wrong invalid pending-count, have %d, want %d (GlobalSlots: %d, queued: %d)",
+ newIvPending, ivPending, pool.config.GlobalSlots, newQueued)
+ }
+}
diff --git a/core/types/access_list_tx.go b/core/types/access_list_tx.go
index 8ad5e739e9..509f86b622 100644
--- a/core/types/access_list_tx.go
+++ b/core/types/access_list_tx.go
@@ -19,6 +19,8 @@ package types
import (
"math/big"
+ "github.com/holiman/uint256"
+
"github.com/ethereum/go-ethereum/common"
)
@@ -44,15 +46,16 @@ func (al AccessList) StorageKeys() int {
// AccessListTx is the data of EIP-2930 access list transactions.
type AccessListTx struct {
- ChainID *big.Int // destination chain ID
- Nonce uint64 // nonce of sender account
- GasPrice *big.Int // wei per gas
- Gas uint64 // gas limit
- To *common.Address `rlp:"nil"` // nil means contract creation
- Value *big.Int // wei amount
- Data []byte // contract invocation input data
- AccessList AccessList // EIP-2930 access list
- V, R, S *big.Int // signature values
+ ChainID *big.Int // destination chain ID
+ Nonce uint64 // nonce of sender account
+ GasPrice *big.Int // wei per gas
+ gasPriceUint256 *uint256.Int // wei per gas
+ Gas uint64 // gas limit
+ To *common.Address `rlp:"nil"` // nil means contract creation
+ Value *big.Int // wei amount
+ Data []byte // contract invocation input data
+ AccessList AccessList // EIP-2930 access list
+ V, R, S *big.Int // signature values
}
// copy creates a deep copy of the transaction data and initializes all fields.
@@ -80,6 +83,12 @@ func (tx *AccessListTx) copy() TxData {
}
if tx.GasPrice != nil {
cpy.GasPrice.Set(tx.GasPrice)
+
+ if cpy.gasPriceUint256 != nil {
+ cpy.gasPriceUint256.Set(tx.gasPriceUint256)
+ } else {
+ cpy.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice)
+ }
}
if tx.V != nil {
cpy.V.Set(tx.V)
@@ -100,11 +109,39 @@ func (tx *AccessListTx) accessList() AccessList { return tx.AccessList }
func (tx *AccessListTx) data() []byte { return tx.Data }
func (tx *AccessListTx) gas() uint64 { return tx.Gas }
func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice }
-func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice }
-func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice }
-func (tx *AccessListTx) value() *big.Int { return tx.Value }
-func (tx *AccessListTx) nonce() uint64 { return tx.Nonce }
-func (tx *AccessListTx) to() *common.Address { return tx.To }
+func (tx *AccessListTx) gasPriceU256() *uint256.Int {
+ if tx.gasPriceUint256 != nil {
+ return tx.gasPriceUint256
+ }
+
+ tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice)
+
+ return tx.gasPriceUint256
+}
+
+func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice }
+func (tx *AccessListTx) gasTipCapU256() *uint256.Int {
+ if tx.gasPriceUint256 != nil {
+ return tx.gasPriceUint256
+ }
+
+ tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice)
+
+ return tx.gasPriceUint256
+}
+func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice }
+func (tx *AccessListTx) gasFeeCapU256() *uint256.Int {
+ if tx.gasPriceUint256 != nil {
+ return tx.gasPriceUint256
+ }
+
+ tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice)
+
+ return tx.gasPriceUint256
+}
+func (tx *AccessListTx) value() *big.Int { return tx.Value }
+func (tx *AccessListTx) nonce() uint64 { return tx.Nonce }
+func (tx *AccessListTx) to() *common.Address { return tx.To }
func (tx *AccessListTx) rawSignatureValues() (v, r, s *big.Int) {
return tx.V, tx.R, tx.S
diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go
index 53f246ea1f..532544d54e 100644
--- a/core/types/dynamic_fee_tx.go
+++ b/core/types/dynamic_fee_tx.go
@@ -19,19 +19,23 @@ package types
import (
"math/big"
+ "github.com/holiman/uint256"
+
"github.com/ethereum/go-ethereum/common"
)
type DynamicFeeTx struct {
- ChainID *big.Int
- Nonce uint64
- GasTipCap *big.Int // a.k.a. maxPriorityFeePerGas
- GasFeeCap *big.Int // a.k.a. maxFeePerGas
- Gas uint64
- To *common.Address `rlp:"nil"` // nil means contract creation
- Value *big.Int
- Data []byte
- AccessList AccessList
+ ChainID *big.Int
+ Nonce uint64
+ GasTipCap *big.Int // a.k.a. maxPriorityFeePerGas
+ gasTipCapUint256 *uint256.Int // a.k.a. maxPriorityFeePerGas
+ GasFeeCap *big.Int // a.k.a. maxFeePerGas
+ gasFeeCapUint256 *uint256.Int // a.k.a. maxFeePerGas
+ Gas uint64
+ To *common.Address `rlp:"nil"` // nil means contract creation
+ Value *big.Int
+ Data []byte
+ AccessList AccessList
// Signature values
V *big.Int `json:"v" gencodec:"required"`
@@ -65,9 +69,21 @@ func (tx *DynamicFeeTx) copy() TxData {
}
if tx.GasTipCap != nil {
cpy.GasTipCap.Set(tx.GasTipCap)
+
+ if cpy.gasTipCapUint256 != nil {
+ cpy.gasTipCapUint256.Set(tx.gasTipCapUint256)
+ } else {
+ cpy.gasTipCapUint256, _ = uint256.FromBig(tx.GasTipCap)
+ }
}
if tx.GasFeeCap != nil {
cpy.GasFeeCap.Set(tx.GasFeeCap)
+
+ if cpy.gasFeeCapUint256 != nil {
+ cpy.gasFeeCapUint256.Set(tx.gasFeeCapUint256)
+ } else {
+ cpy.gasFeeCapUint256, _ = uint256.FromBig(tx.GasFeeCap)
+ }
}
if tx.V != nil {
cpy.V.Set(tx.V)
@@ -88,11 +104,38 @@ func (tx *DynamicFeeTx) accessList() AccessList { return tx.AccessList }
func (tx *DynamicFeeTx) data() []byte { return tx.Data }
func (tx *DynamicFeeTx) gas() uint64 { return tx.Gas }
func (tx *DynamicFeeTx) gasFeeCap() *big.Int { return tx.GasFeeCap }
-func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap }
-func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap }
-func (tx *DynamicFeeTx) value() *big.Int { return tx.Value }
-func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce }
-func (tx *DynamicFeeTx) to() *common.Address { return tx.To }
+func (tx *DynamicFeeTx) gasFeeCapU256() *uint256.Int {
+ if tx.gasFeeCapUint256 != nil {
+ return tx.gasFeeCapUint256
+ }
+
+ tx.gasFeeCapUint256, _ = uint256.FromBig(tx.GasFeeCap)
+
+ return tx.gasFeeCapUint256
+}
+func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap }
+func (tx *DynamicFeeTx) gasTipCapU256() *uint256.Int {
+ if tx.gasTipCapUint256 != nil {
+ return tx.gasTipCapUint256
+ }
+
+ tx.gasTipCapUint256, _ = uint256.FromBig(tx.GasTipCap)
+
+ return tx.gasTipCapUint256
+}
+func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap }
+func (tx *DynamicFeeTx) gasPriceU256() *uint256.Int {
+ if tx.gasFeeCapUint256 != nil {
+ return tx.gasTipCapUint256
+ }
+
+ tx.gasFeeCapUint256, _ = uint256.FromBig(tx.GasFeeCap)
+
+ return tx.gasFeeCapUint256
+}
+func (tx *DynamicFeeTx) value() *big.Int { return tx.Value }
+func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce }
+func (tx *DynamicFeeTx) to() *common.Address { return tx.To }
func (tx *DynamicFeeTx) rawSignatureValues() (v, r, s *big.Int) {
return tx.V, tx.R, tx.S
diff --git a/core/types/legacy_tx.go b/core/types/legacy_tx.go
index cb86bed772..72fcd34fa5 100644
--- a/core/types/legacy_tx.go
+++ b/core/types/legacy_tx.go
@@ -19,18 +19,21 @@ package types
import (
"math/big"
+ "github.com/holiman/uint256"
+
"github.com/ethereum/go-ethereum/common"
)
// LegacyTx is the transaction data of regular Ethereum transactions.
type LegacyTx struct {
- Nonce uint64 // nonce of sender account
- GasPrice *big.Int // wei per gas
- Gas uint64 // gas limit
- To *common.Address `rlp:"nil"` // nil means contract creation
- Value *big.Int // wei amount
- Data []byte // contract invocation input data
- V, R, S *big.Int // signature values
+ Nonce uint64 // nonce of sender account
+ GasPrice *big.Int // wei per gas
+ gasPriceUint256 *uint256.Int // wei per gas
+ Gas uint64 // gas limit
+ To *common.Address `rlp:"nil"` // nil means contract creation
+ Value *big.Int // wei amount
+ Data []byte // contract invocation input data
+ V, R, S *big.Int // signature values
}
// NewTransaction creates an unsigned legacy transaction.
@@ -77,6 +80,12 @@ func (tx *LegacyTx) copy() TxData {
}
if tx.GasPrice != nil {
cpy.GasPrice.Set(tx.GasPrice)
+
+ if cpy.gasPriceUint256 != nil {
+ cpy.gasPriceUint256.Set(tx.gasPriceUint256)
+ } else {
+ cpy.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice)
+ }
}
if tx.V != nil {
cpy.V.Set(tx.V)
@@ -97,11 +106,38 @@ func (tx *LegacyTx) accessList() AccessList { return nil }
func (tx *LegacyTx) data() []byte { return tx.Data }
func (tx *LegacyTx) gas() uint64 { return tx.Gas }
func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice }
-func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice }
-func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice }
-func (tx *LegacyTx) value() *big.Int { return tx.Value }
-func (tx *LegacyTx) nonce() uint64 { return tx.Nonce }
-func (tx *LegacyTx) to() *common.Address { return tx.To }
+func (tx *LegacyTx) gasPriceU256() *uint256.Int {
+ if tx.gasPriceUint256 != nil {
+ return tx.gasPriceUint256
+ }
+
+ tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice)
+
+ return tx.gasPriceUint256
+}
+func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice }
+func (tx *LegacyTx) gasTipCapU256() *uint256.Int {
+ if tx.gasPriceUint256 != nil {
+ return tx.gasPriceUint256
+ }
+
+ tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice)
+
+ return tx.gasPriceUint256
+}
+func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice }
+func (tx *LegacyTx) gasFeeCapU256() *uint256.Int {
+ if tx.gasPriceUint256 != nil {
+ return tx.gasPriceUint256
+ }
+
+ tx.gasPriceUint256, _ = uint256.FromBig(tx.GasPrice)
+
+ return tx.gasPriceUint256
+}
+func (tx *LegacyTx) value() *big.Int { return tx.Value }
+func (tx *LegacyTx) nonce() uint64 { return tx.Nonce }
+func (tx *LegacyTx) to() *common.Address { return tx.To }
func (tx *LegacyTx) rawSignatureValues() (v, r, s *big.Int) {
return tx.V, tx.R, tx.S
diff --git a/core/types/transaction.go b/core/types/transaction.go
index e0e52f25bc..9b89f12517 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -25,6 +25,8 @@ import (
"sync/atomic"
"time"
+ "github.com/holiman/uint256"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
@@ -53,9 +55,9 @@ type Transaction struct {
time time.Time // Time first seen locally (spam avoidance)
// caches
- hash atomic.Value
- size atomic.Value
- from atomic.Value
+ hash atomic.Pointer[common.Hash]
+ size atomic.Pointer[common.StorageSize]
+ from atomic.Pointer[sigCache]
}
// NewTx creates a new transaction.
@@ -77,8 +79,11 @@ type TxData interface {
data() []byte
gas() uint64
gasPrice() *big.Int
+ gasPriceU256() *uint256.Int
gasTipCap() *big.Int
+ gasTipCapU256() *uint256.Int
gasFeeCap() *big.Int
+ gasFeeCapU256() *uint256.Int
value() *big.Int
nonce() uint64
to() *common.Address
@@ -194,7 +199,8 @@ func (tx *Transaction) setDecoded(inner TxData, size int) {
tx.inner = inner
tx.time = time.Now()
if size > 0 {
- tx.size.Store(common.StorageSize(size))
+ v := float64(size)
+ tx.size.Store((*common.StorageSize)(&v))
}
}
@@ -265,16 +271,23 @@ func (tx *Transaction) AccessList() AccessList { return tx.inner.accessList() }
func (tx *Transaction) Gas() uint64 { return tx.inner.gas() }
// GasPrice returns the gas price of the transaction.
-func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.gasPrice()) }
+func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.gasPrice()) }
+func (tx *Transaction) GasPriceRef() *big.Int { return tx.inner.gasPrice() }
+func (tx *Transaction) GasPriceUint() *uint256.Int { return tx.inner.gasPriceU256() }
// GasTipCap returns the gasTipCap per gas of the transaction.
-func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.gasTipCap()) }
+func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.gasTipCap()) }
+func (tx *Transaction) GasTipCapRef() *big.Int { return tx.inner.gasTipCap() }
+func (tx *Transaction) GasTipCapUint() *uint256.Int { return tx.inner.gasTipCapU256() }
// GasFeeCap returns the fee cap per gas of the transaction.
-func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) }
+func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) }
+func (tx *Transaction) GasFeeCapRef() *big.Int { return tx.inner.gasFeeCap() }
+func (tx *Transaction) GasFeeCapUint() *uint256.Int { return tx.inner.gasFeeCapU256() }
// Value returns the ether amount of the transaction.
-func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) }
+func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) }
+func (tx *Transaction) ValueRef() *big.Int { return tx.inner.value() }
// Nonce returns the sender account nonce of the transaction.
func (tx *Transaction) Nonce() uint64 { return tx.inner.nonce() }
@@ -287,9 +300,19 @@ func (tx *Transaction) To() *common.Address {
// Cost returns gas * gasPrice + value.
func (tx *Transaction) Cost() *big.Int {
- total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas()))
- total.Add(total, tx.Value())
- return total
+ gasPrice, _ := uint256.FromBig(tx.GasPriceRef())
+ gasPrice.Mul(gasPrice, uint256.NewInt(tx.Gas()))
+ value, _ := uint256.FromBig(tx.ValueRef())
+
+ return gasPrice.Add(gasPrice, value).ToBig()
+}
+
+func (tx *Transaction) CostUint() *uint256.Int {
+ gasPrice, _ := uint256.FromBig(tx.GasPriceRef())
+ gasPrice.Mul(gasPrice, uint256.NewInt(tx.Gas()))
+ value, _ := uint256.FromBig(tx.ValueRef())
+
+ return gasPrice.Add(gasPrice, value)
}
// RawSignatureValues returns the V, R, S signature values of the transaction.
@@ -303,11 +326,18 @@ func (tx *Transaction) GasFeeCapCmp(other *Transaction) int {
return tx.inner.gasFeeCap().Cmp(other.inner.gasFeeCap())
}
-// GasFeeCapIntCmp compares the fee cap of the transaction against the given fee cap.
func (tx *Transaction) GasFeeCapIntCmp(other *big.Int) int {
return tx.inner.gasFeeCap().Cmp(other)
}
+func (tx *Transaction) GasFeeCapUIntCmp(other *uint256.Int) int {
+ return tx.inner.gasFeeCapU256().Cmp(other)
+}
+
+func (tx *Transaction) GasFeeCapUIntLt(other *uint256.Int) bool {
+ return tx.inner.gasFeeCapU256().Lt(other)
+}
+
// GasTipCapCmp compares the gasTipCap of two transactions.
func (tx *Transaction) GasTipCapCmp(other *Transaction) int {
return tx.inner.gasTipCap().Cmp(other.inner.gasTipCap())
@@ -318,6 +348,14 @@ func (tx *Transaction) GasTipCapIntCmp(other *big.Int) int {
return tx.inner.gasTipCap().Cmp(other)
}
+func (tx *Transaction) GasTipCapUIntCmp(other *uint256.Int) int {
+ return tx.inner.gasTipCapU256().Cmp(other)
+}
+
+func (tx *Transaction) GasTipCapUIntLt(other *uint256.Int) bool {
+ return tx.inner.gasTipCapU256().Lt(other)
+}
+
// EffectiveGasTip returns the effective miner gasTipCap for the given base fee.
// Note: if the effective gasTipCap is negative, this method returns both error
// the actual negative value, _and_ ErrGasFeeCapTooLow
@@ -356,10 +394,73 @@ func (tx *Transaction) EffectiveGasTipIntCmp(other *big.Int, baseFee *big.Int) i
return tx.EffectiveGasTipValue(baseFee).Cmp(other)
}
+func (tx *Transaction) EffectiveGasTipUintCmp(other *uint256.Int, baseFee *uint256.Int) int {
+ if baseFee == nil {
+ return tx.GasTipCapUIntCmp(other)
+ }
+
+ return tx.EffectiveGasTipValueUint(baseFee).Cmp(other)
+}
+
+func (tx *Transaction) EffectiveGasTipUintLt(other *uint256.Int, baseFee *uint256.Int) bool {
+ if baseFee == nil {
+ return tx.GasTipCapUIntLt(other)
+ }
+
+ return tx.EffectiveGasTipValueUint(baseFee).Lt(other)
+}
+
+func (tx *Transaction) EffectiveGasTipTxUintCmp(other *Transaction, baseFee *uint256.Int) int {
+ if baseFee == nil {
+ return tx.inner.gasTipCapU256().Cmp(other.inner.gasTipCapU256())
+ }
+
+ return tx.EffectiveGasTipValueUint(baseFee).Cmp(other.EffectiveGasTipValueUint(baseFee))
+}
+
+func (tx *Transaction) EffectiveGasTipValueUint(baseFee *uint256.Int) *uint256.Int {
+ effectiveTip, _ := tx.EffectiveGasTipUnit(baseFee)
+ return effectiveTip
+}
+
+func (tx *Transaction) EffectiveGasTipUnit(baseFee *uint256.Int) (*uint256.Int, error) {
+ if baseFee == nil {
+ return tx.GasFeeCapUint(), nil
+ }
+
+ var err error
+
+ gasFeeCap := tx.GasFeeCapUint().Clone()
+
+ if gasFeeCap.Lt(baseFee) {
+ err = ErrGasFeeCapTooLow
+ }
+
+ gasTipCapUint := tx.GasTipCapUint()
+
+ if gasFeeCap.Lt(gasTipCapUint) {
+ return gasFeeCap, err
+ }
+
+ if gasFeeCap.Lt(gasTipCapUint) && baseFee.IsZero() {
+ return gasFeeCap, err
+ }
+
+ gasFeeCap.Sub(gasFeeCap, baseFee)
+
+ if gasFeeCap.Gt(gasTipCapUint) || gasFeeCap.Eq(gasTipCapUint) {
+ gasFeeCap.Add(gasFeeCap, baseFee)
+
+ return gasTipCapUint, err
+ }
+
+ return gasFeeCap, err
+}
+
// Hash returns the transaction hash.
func (tx *Transaction) Hash() common.Hash {
if hash := tx.hash.Load(); hash != nil {
- return hash.(common.Hash)
+ return *hash
}
var h common.Hash
@@ -368,7 +469,9 @@ func (tx *Transaction) Hash() common.Hash {
} else {
h = prefixedRlpHash(tx.Type(), tx.inner)
}
- tx.hash.Store(h)
+
+ tx.hash.Store(&h)
+
return h
}
@@ -376,11 +479,14 @@ func (tx *Transaction) Hash() common.Hash {
// encoding and returning it, or returning a previously cached value.
func (tx *Transaction) Size() common.StorageSize {
if size := tx.size.Load(); size != nil {
- return size.(common.StorageSize)
+ return *size
}
+
c := writeCounter(0)
+
rlp.Encode(&c, &tx.inner)
- tx.size.Store(common.StorageSize(c))
+ tx.size.Store((*common.StorageSize)(&c))
+
return common.StorageSize(c)
}
@@ -444,14 +550,14 @@ func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// TxWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap
type TxWithMinerFee struct {
tx *Transaction
- minerFee *big.Int
+ minerFee *uint256.Int
}
// NewTxWithMinerFee creates a wrapped transaction, calculating the effective
// miner gasTipCap if a base fee is provided.
// Returns error in case of a negative effective miner gasTipCap.
-func NewTxWithMinerFee(tx *Transaction, baseFee *big.Int) (*TxWithMinerFee, error) {
- minerFee, err := tx.EffectiveGasTip(baseFee)
+func NewTxWithMinerFee(tx *Transaction, baseFee *uint256.Int) (*TxWithMinerFee, error) {
+ minerFee, err := tx.EffectiveGasTipUnit(baseFee)
if err != nil {
return nil, err
}
@@ -496,7 +602,7 @@ type TransactionsByPriceAndNonce struct {
txs map[common.Address]Transactions // Per account nonce-sorted list of transactions
heads TxByPriceAndTime // Next transaction for each unique account (price heap)
signer Signer // Signer for the set of transactions
- baseFee *big.Int // Current base fee
+ baseFee *uint256.Int // Current base fee
}
// NewTransactionsByPriceAndNonce creates a transaction set that can retrieve
@@ -504,6 +610,7 @@ type TransactionsByPriceAndNonce struct {
//
// Note, the input map is reowned so the caller should not interact any more with
// if after providing it to the constructor.
+/*
func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *big.Int) *TransactionsByPriceAndNonce {
// Initialize a price and received time based heap with the head transactions
heads := make(TxByPriceAndTime, 0, len(txs))
@@ -524,6 +631,39 @@ func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transa
}
heap.Init(&heads)
+ // Assemble and return the transaction set
+ return &TransactionsByPriceAndNonce{
+ txs: txs,
+ heads: heads,
+ signer: signer,
+ baseFee: baseFee,
+ }
+}*/
+
+func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *uint256.Int) *TransactionsByPriceAndNonce {
+ // Initialize a price and received time based heap with the head transactions
+ heads := make(TxByPriceAndTime, 0, len(txs))
+
+ for from, accTxs := range txs {
+ if len(accTxs) == 0 {
+ continue
+ }
+
+ acc, _ := Sender(signer, accTxs[0])
+ wrapped, err := NewTxWithMinerFee(accTxs[0], baseFee)
+
+ // Remove transaction if sender doesn't match from, or if wrapping fails.
+ if acc != from || err != nil {
+ delete(txs, from)
+ continue
+ }
+
+ heads = append(heads, wrapped)
+ txs[from] = accTxs[1:]
+ }
+
+ heap.Init(&heads)
+
// Assemble and return the transaction set
return &TransactionsByPriceAndNonce{
txs: txs,
diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go
index 1d0d2a4c75..959aba637a 100644
--- a/core/types/transaction_signing.go
+++ b/core/types/transaction_signing.go
@@ -130,12 +130,11 @@ func MustSignNewTx(prv *ecdsa.PrivateKey, s Signer, txdata TxData) *Transaction
// not match the signer used in the current call.
func Sender(signer Signer, tx *Transaction) (common.Address, error) {
if sc := tx.from.Load(); sc != nil {
- sigCache := sc.(sigCache)
// If the signer used to derive from in a previous
// call is not the same as used current, invalidate
// the cache.
- if sigCache.signer.Equal(signer) {
- return sigCache.from, nil
+ if sc.signer.Equal(signer) {
+ return sc.from, nil
}
}
@@ -143,7 +142,9 @@ func Sender(signer Signer, tx *Transaction) (common.Address, error) {
if err != nil {
return common.Address{}, err
}
- tx.from.Store(sigCache{signer: signer, from: addr})
+
+ tx.from.Store(&sigCache{signer: signer, from: addr})
+
return addr, nil
}
@@ -461,10 +462,10 @@ func (fs FrontierSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v *
func (fs FrontierSigner) Hash(tx *Transaction) common.Hash {
return rlpHash([]interface{}{
tx.Nonce(),
- tx.GasPrice(),
+ tx.GasPriceRef(),
tx.Gas(),
tx.To(),
- tx.Value(),
+ tx.ValueRef(),
tx.Data(),
})
}
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index a4755675cd..255a7b76b4 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -27,7 +27,10 @@ import (
"testing"
"time"
+ "github.com/holiman/uint256"
+
"github.com/ethereum/go-ethereum/common"
+ cmath "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -272,14 +275,22 @@ func TestTransactionPriceNonceSort1559(t *testing.T) {
// Tests that transactions can be correctly sorted according to their price in
// decreasing order, but at the same time with increasing nonces when issued by
// the same account.
-func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
+//
+//nolint:gocognit,thelper
+func testTransactionPriceNonceSort(t *testing.T, baseFeeBig *big.Int) {
// Generate a batch of accounts to start with
keys := make([]*ecdsa.PrivateKey, 25)
for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey()
}
+
signer := LatestSignerForChainID(common.Big1)
+ var baseFee *uint256.Int
+ if baseFeeBig != nil {
+ baseFee = cmath.FromBig(baseFeeBig)
+ }
+
// Generate a batch of transactions with overlapping values, but shifted nonces
groups := map[common.Address]Transactions{}
expectedCount := 0
@@ -308,7 +319,7 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
GasTipCap: big.NewInt(int64(rand.Intn(gasFeeCap + 1))),
Data: nil,
})
- if count == 25 && int64(gasFeeCap) < baseFee.Int64() {
+ if count == 25 && uint64(gasFeeCap) < baseFee.Uint64() {
count = i
}
}
@@ -341,12 +352,25 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce())
}
}
+
// If the next tx has different from account, the price must be lower than the current one
if i+1 < len(txs) {
next := txs[i+1]
fromNext, _ := Sender(signer, next)
- tip, err := txi.EffectiveGasTip(baseFee)
- nextTip, nextErr := next.EffectiveGasTip(baseFee)
+ tip, err := txi.EffectiveGasTipUnit(baseFee)
+ nextTip, nextErr := next.EffectiveGasTipUnit(baseFee)
+
+ tipBig, _ := txi.EffectiveGasTip(baseFeeBig)
+ nextTipBig, _ := next.EffectiveGasTip(baseFeeBig)
+
+ if tip.Cmp(cmath.FromBig(tipBig)) != 0 {
+ t.Fatalf("EffectiveGasTip incorrect. uint256 %q, big.Int %q, baseFee %q, baseFeeBig %q", tip.String(), tipBig.String(), baseFee.String(), baseFeeBig.String())
+ }
+
+ if nextTip.Cmp(cmath.FromBig(nextTipBig)) != 0 {
+ t.Fatalf("EffectiveGasTip next incorrect. uint256 %q, big.Int %q, baseFee %q, baseFeeBig %q", nextTip.String(), nextTipBig.String(), baseFee.String(), baseFeeBig.String())
+ }
+
if err != nil || nextErr != nil {
t.Errorf("error calculating effective tip")
}
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index 9210f5486c..d7d9eeb525 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/crypto/bn256"
"github.com/ethereum/go-ethereum/params"
- //lint:ignore SA1019 Needed for precompile
+ big2 "github.com/holiman/big"
"golang.org/x/crypto/ripemd160"
)
@@ -266,9 +266,10 @@ var (
// modexpMultComplexity implements bigModexp multComplexity formula, as defined in EIP-198
//
// def mult_complexity(x):
-// if x <= 64: return x ** 2
-// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072
-// else: return x ** 2 // 16 + 480 * x - 199680
+//
+// if x <= 64: return x ** 2
+// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072
+// else: return x ** 2 // 16 + 480 * x - 199680
//
// where is x is max(length_of_MODULUS, length_of_BASE)
func modexpMultComplexity(x *big.Int) *big.Int {
@@ -379,15 +380,24 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) {
}
// Retrieve the operands and execute the exponentiation
var (
- base = new(big.Int).SetBytes(getData(input, 0, baseLen))
- exp = new(big.Int).SetBytes(getData(input, baseLen, expLen))
- mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen))
+ base = new(big2.Int).SetBytes(getData(input, 0, baseLen))
+ exp = new(big2.Int).SetBytes(getData(input, baseLen, expLen))
+ mod = new(big2.Int).SetBytes(getData(input, baseLen+expLen, modLen))
+ v []byte
)
- if mod.BitLen() == 0 {
+
+ switch {
+ case mod.BitLen() == 0:
// Modulo 0 is undefined, return zero
return common.LeftPadBytes([]byte{}, int(modLen)), nil
+ case base.BitLen() == 1: // a bit length of 1 means it's 1 (or -1).
+ //If base == 1, then we can just return base % mod (if mod >= 1, which it is)
+ v = base.Mod(base, mod).Bytes()
+ default:
+ v = base.Exp(base, exp, mod).Bytes()
}
- return common.LeftPadBytes(base.Exp(base, exp, mod).Bytes(), int(modLen)), nil
+
+ return common.LeftPadBytes(v, int(modLen)), nil
}
// newCurvePoint unmarshals a binary blob into a bn256 elliptic curve point,
diff --git a/docs/cli/example_config.toml b/docs/cli/example_config.toml
index c32c40e2c6..31c73965fc 100644
--- a/docs/cli/example_config.toml
+++ b/docs/cli/example_config.toml
@@ -93,7 +93,7 @@ ethstats = "" # Reporting URL of a ethstats service (nodename:sec
vhosts = ["localhost"] # Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.
corsdomain = ["localhost"] # Comma separated list of domains from which to accept cross origin requests (browser enforced)
[jsonrpc.timeouts]
- read = "30s"
+ read = "10s"
write = "30s"
idle = "2m0s"
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 60aea7527e..c8825dc582 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -236,11 +236,18 @@ func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri
}
func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
- return b.eth.txPool.AddLocal(signedTx)
+ err := b.eth.txPool.AddLocal(signedTx)
+ if err != nil {
+ if unwrapped := errors.Unwrap(err); unwrapped != nil {
+ return unwrapped
+ }
+ }
+
+ return err
}
func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
- pending := b.eth.txPool.Pending(false)
+ pending := b.eth.txPool.Pending(context.Background(), false)
var txs types.Transactions
for _, batch := range pending {
txs = append(txs, batch...)
diff --git a/eth/bor_checkpoint_verifier.go b/eth/bor_checkpoint_verifier.go
index 61e8c382e1..ad81eb6116 100644
--- a/eth/bor_checkpoint_verifier.go
+++ b/eth/bor_checkpoint_verifier.go
@@ -26,6 +26,7 @@ func newCheckpointVerifier(verifyFn func(ctx context.Context, handler *ethHandle
)
// check if we have the checkpoint blocks
+ //nolint:contextcheck
head := handler.ethAPI.BlockNumber()
if head < hexutil.Uint64(endBlock) {
log.Debug("Head block behind checkpoint block", "head", head, "checkpoint end block", endBlock)
diff --git a/eth/handler.go b/eth/handler.go
index 8e6d89f9ef..48bdf8eb15 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -17,6 +17,7 @@
package eth
import (
+ "context"
"errors"
"math"
"math/big"
@@ -69,7 +70,7 @@ type txPool interface {
// Pending should return pending transactions.
// The slice should be modifiable by the caller.
- Pending(enforceTips bool) map[common.Address]types.Transactions
+ Pending(ctx context.Context, enforceTips bool) map[common.Address]types.Transactions
// SubscribeNewTxsEvent should return an event subscription of
// NewTxsEvent and send events to the given channel.
diff --git a/eth/handler_test.go b/eth/handler_test.go
index c6d7811d10..7a14619159 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -17,6 +17,7 @@
package eth
import (
+ "context"
"math/big"
"sort"
"sync"
@@ -92,7 +93,7 @@ func (p *testTxPool) AddRemotes(txs []*types.Transaction) []error {
}
// Pending returns all the transactions known to the pool
-func (p *testTxPool) Pending(enforceTips bool) map[common.Address]types.Transactions {
+func (p *testTxPool) Pending(ctx context.Context, enforceTips bool) map[common.Address]types.Transactions {
p.lock.RLock()
defer p.lock.RUnlock()
diff --git a/eth/sync.go b/eth/sync.go
index aa79b6181c..377acff95c 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -17,6 +17,7 @@
package eth
import (
+ "context"
"errors"
"math/big"
"sync/atomic"
@@ -44,20 +45,24 @@ func (h *handler) syncTransactions(p *eth.Peer) {
//
// TODO(karalabe): Figure out if we could get away with random order somehow
var txs types.Transactions
- pending := h.txpool.Pending(false)
+
+ pending := h.txpool.Pending(context.Background(), false)
for _, batch := range pending {
txs = append(txs, batch...)
}
+
if len(txs) == 0 {
return
}
// The eth/65 protocol introduces proper transaction announcements, so instead
// of dripping transactions across multiple peers, just send the entire list as
// an announcement and let the remote side decide what they need (likely nothing).
+
hashes := make([]common.Hash, len(txs))
for i, tx := range txs {
hashes[i] = tx.Hash()
}
+
p.AsyncSendPooledTransactionHashes(hashes)
}
diff --git a/go.mod b/go.mod
index f55b2f9aa7..69fa5990bd 100644
--- a/go.mod
+++ b/go.mod
@@ -12,13 +12,13 @@ require (
github.com/aws/aws-sdk-go-v2/config v1.1.1
github.com/aws/aws-sdk-go-v2/credentials v1.1.1
github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1
- github.com/btcsuite/btcd/btcec/v2 v2.1.2
+ github.com/btcsuite/btcd/btcec/v2 v2.1.3
github.com/cespare/cp v0.1.0
github.com/cloudflare/cloudflare-go v0.14.0
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set v1.8.0
- github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf
+ github.com/docker/docker v1.6.1
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48
github.com/edsrzf/mmap-go v1.0.0
github.com/fatih/color v1.7.0
@@ -37,6 +37,7 @@ require (
github.com/hashicorp/go-bexpr v0.1.10
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/hashicorp/hcl/v2 v2.10.1
+ github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e
github.com/holiman/bloomfilter/v2 v2.0.3
github.com/holiman/uint256 v1.2.0
github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204
@@ -69,12 +70,12 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.2.0
go.opentelemetry.io/otel/sdk v1.2.0
go.uber.org/goleak v1.1.12
- golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122
- golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
- golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10
- golang.org/x/text v0.3.7
+ golang.org/x/crypto v0.1.0
+ golang.org/x/sync v0.1.0
+ golang.org/x/sys v0.6.0
+ golang.org/x/text v0.8.0
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
- golang.org/x/tools v0.1.12
+ golang.org/x/tools v0.6.0
gonum.org/v1/gonum v0.11.0
google.golang.org/grpc v1.48.0
google.golang.org/protobuf v1.28.0
@@ -90,7 +91,7 @@ require github.com/gammazero/deque v0.2.1 // indirect
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect
- github.com/Masterminds/goutils v1.1.0 // indirect
+ github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
@@ -136,9 +137,9 @@ require (
go.opentelemetry.io/otel/trace v1.2.0
go.opentelemetry.io/proto/otlp v0.10.0 // indirect
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
- golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
- golang.org/x/net v0.0.0-20220728030405-41545e8bf201 // indirect
- golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
+ golang.org/x/mod v0.8.0 // indirect
+ golang.org/x/net v0.8.0 // indirect
+ golang.org/x/term v0.6.0 // indirect
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
google.golang.org/genproto v0.0.0-20220725144611-272f38e5d71b // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index 4b312ccfb1..61c9fd1ca5 100644
--- a/go.sum
+++ b/go.sum
@@ -33,8 +33,9 @@ github.com/JekaMas/go-grpc-net-conn v0.0.0-20220708155319-6aff21f2d13d h1:RO27lg
github.com/JekaMas/go-grpc-net-conn v0.0.0-20220708155319-6aff21f2d13d/go.mod h1:romz7UPgSYhfJkKOalzEEyV6sWtt/eAEm0nX2aOrod0=
github.com/JekaMas/workerpool v1.1.5 h1:xmrx2Zyft95CEGiEqzDxiawptCIRZQ0zZDhTGDFOCaw=
github.com/JekaMas/workerpool v1.1.5/go.mod h1:IoDWPpwMcA27qbuugZKeBslDrgX09lVmksuh9sjzbhc=
-github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=
github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
+github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
@@ -84,8 +85,8 @@ github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQ
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
-github.com/btcsuite/btcd/btcec/v2 v2.1.2 h1:YoYoC9J0jwfukodSBMzZYUVQ8PTiYg4BnOWiJVzTmLs=
-github.com/btcsuite/btcd/btcec/v2 v2.1.2/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
+github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE=
+github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0 h1:MSskdM4/xJYcFzy0altH/C/xHopifpWzHUi1JeVI34Q=
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
@@ -136,8 +137,8 @@ github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwu
github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
-github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf h1:sh8rkQZavChcmakYiSlqu2425CHyFXLZZnvm7PDpU8M=
-github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v1.6.1 h1:4xYASHy5cScPkLD7PO0uTmnVc860m9NarPN1X8zeMe8=
+github.com/docker/docker v1.6.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ=
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
@@ -275,6 +276,8 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl/v2 v2.10.1 h1:h4Xx4fsrRE26ohAk/1iGF/JBqRQbyUqu5Lvj60U54ys=
github.com/hashicorp/hcl/v2 v2.10.1/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg=
+github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw=
+github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
@@ -535,8 +538,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 h1:NvGWuYG8dkDHFSKksI1P9faiVJ9rayE6l0+ouWVIDs8=
-golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -569,8 +572,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -600,8 +603,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220728030405-41545e8bf201 h1:bvOltf3SADAfG05iRml8lAB3qjoEX5RCyN4K6G5v3N0=
-golang.org/x/net v0.0.0-20220728030405-41545e8bf201/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -615,8 +618,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -658,12 +661,12 @@ golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
-golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -671,8 +674,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -709,8 +713,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go
index ca7a235ace..b733f36988 100644
--- a/internal/cli/server/config.go
+++ b/internal/cli/server/config.go
@@ -540,7 +540,7 @@ func DefaultConfig() *Config {
VHost: []string{"localhost"},
},
HttpTimeout: &HttpTimeouts{
- ReadTimeout: 30 * time.Second,
+ ReadTimeout: 10 * time.Second,
WriteTimeout: 30 * time.Second,
IdleTimeout: 120 * time.Second,
},
diff --git a/internal/cli/server/pprof/pprof.go b/internal/cli/server/pprof/pprof.go
index 44034f3bb8..69056bd0fb 100644
--- a/internal/cli/server/pprof/pprof.go
+++ b/internal/cli/server/pprof/pprof.go
@@ -61,6 +61,28 @@ func CPUProfile(ctx context.Context, sec int) ([]byte, map[string]string, error)
}, nil
}
+// CPUProfile generates a CPU Profile for a given duration
+func CPUProfileWithChannel(done chan bool) ([]byte, map[string]string, error) {
+ var buf bytes.Buffer
+ if err := pprof.StartCPUProfile(&buf); err != nil {
+ return nil, nil, err
+ }
+
+ select {
+ case <-done:
+ case <-time.After(30 * time.Second):
+ }
+
+ pprof.StopCPUProfile()
+
+ return buf.Bytes(),
+ map[string]string{
+ "X-Content-Type-Options": "nosniff",
+ "Content-Type": "application/octet-stream",
+ "Content-Disposition": `attachment; filename="profile"`,
+ }, nil
+}
+
// Trace runs a trace profile for a given duration
func Trace(ctx context.Context, sec int) ([]byte, map[string]string, error) {
if sec <= 0 {
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 8ba6ea0b91..f130813234 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"math/big"
+ "runtime"
"strings"
"time"
@@ -1457,16 +1458,26 @@ func newRPCPendingTransaction(tx *types.Transaction, current *types.Header, conf
func newRPCTransactionFromBlockIndex(b *types.Block, index uint64, config *params.ChainConfig, db ethdb.Database) *RPCTransaction {
txs := b.Transactions()
- borReceipt := rawdb.ReadBorReceipt(db, b.Hash(), b.NumberU64(), config)
- if borReceipt != nil {
- if borReceipt.TxHash != (common.Hash{}) {
- borTx, _, _, _ := rawdb.ReadBorTransactionWithBlockHash(db, borReceipt.TxHash, b.Hash())
- if borTx != nil {
- txs = append(txs, borTx)
+ if index >= uint64(len(txs)+1) {
+ return nil
+ }
+
+ var borReceipt *types.Receipt
+
+ // Read bor receipts if a state-sync transaction is requested
+ if index == uint64(len(txs)) {
+ borReceipt = rawdb.ReadBorReceipt(db, b.Hash(), b.NumberU64(), config)
+ if borReceipt != nil {
+ if borReceipt.TxHash != (common.Hash{}) {
+ borTx, _, _, _ := rawdb.ReadBorTransactionWithBlockHash(db, borReceipt.TxHash, b.Hash())
+ if borTx != nil {
+ txs = append(txs, borTx)
+ }
}
}
}
+ // If the index is still out of the range after checking bor state sync transaction, it means that the transaction index is invalid
if index >= uint64(len(txs)) {
return nil
}
@@ -1474,7 +1485,7 @@ func newRPCTransactionFromBlockIndex(b *types.Block, index uint64, config *param
rpcTx := newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee(), config)
// If the transaction is a bor transaction, we need to set the hash to the derived bor tx hash. BorTx is always the last index.
- if borReceipt != nil && int(index) == len(txs)-1 {
+ if borReceipt != nil && index == uint64(len(txs)-1) {
rpcTx.Hash = borReceipt.TxHash
}
@@ -2222,6 +2233,21 @@ func (api *PrivateDebugAPI) PurgeCheckpointWhitelist() {
api.b.PurgeCheckpointWhitelist()
}
+// GetTraceStack returns the current trace stack
+func (api *PrivateDebugAPI) GetTraceStack() string {
+ buf := make([]byte, 1024)
+
+ for {
+ n := runtime.Stack(buf, true)
+
+ if n < len(buf) {
+ return string(buf)
+ }
+
+ buf = make([]byte, 2*len(buf))
+ }
+}
+
// PublicNetAPI offers network related RPC methods
type PublicNetAPI struct {
net *p2p.Server
diff --git a/internal/testlog/testlog.go b/internal/testlog/testlog.go
index a5836b8446..93d6f27086 100644
--- a/internal/testlog/testlog.go
+++ b/internal/testlog/testlog.go
@@ -148,3 +148,35 @@ func (l *logger) flush() {
}
l.h.buf = nil
}
+
+func (l *logger) OnTrace(fn func(l log.Logging)) {
+ if l.GetHandler().Level() >= log.LvlTrace {
+ fn(l.Trace)
+ }
+}
+
+func (l *logger) OnDebug(fn func(l log.Logging)) {
+ if l.GetHandler().Level() >= log.LvlDebug {
+ fn(l.Debug)
+ }
+}
+func (l *logger) OnInfo(fn func(l log.Logging)) {
+ if l.GetHandler().Level() >= log.LvlInfo {
+ fn(l.Info)
+ }
+}
+func (l *logger) OnWarn(fn func(l log.Logging)) {
+ if l.GetHandler().Level() >= log.LvlWarn {
+ fn(l.Warn)
+ }
+}
+func (l *logger) OnError(fn func(l log.Logging)) {
+ if l.GetHandler().Level() >= log.LvlError {
+ fn(l.Error)
+ }
+}
+func (l *logger) OnCrit(fn func(l log.Logging)) {
+ if l.GetHandler().Level() >= log.LvlCrit {
+ fn(l.Crit)
+ }
+}
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index c823f096d6..38ce69e33e 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -512,6 +512,11 @@ web3._extend({
call: 'debug_purgeCheckpointWhitelist',
params: 0,
}),
+ new web3._extend.Method({
+ name: 'getTraceStack',
+ call: 'debug_getTraceStack',
+ params: 0,
+ }),
],
properties: []
});
diff --git a/les/handler_test.go b/les/handler_test.go
index 3ceabdf8ec..af3324b042 100644
--- a/les/handler_test.go
+++ b/les/handler_test.go
@@ -617,7 +617,7 @@ func testTransactionStatus(t *testing.T, protocol int) {
sendRequest(rawPeer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()})
}
if err := expectResponse(rawPeer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
- t.Errorf("transaction status mismatch")
+ t.Error("transaction status mismatch", err)
}
}
signer := types.HomesteadSigner{}
diff --git a/les/server_requests.go b/les/server_requests.go
index 3595a6ab38..b31c11c9d0 100644
--- a/les/server_requests.go
+++ b/les/server_requests.go
@@ -507,25 +507,39 @@ func handleSendTx(msg Decoder) (serveRequestFn, uint64, uint64, error) {
if err := msg.Decode(&r); err != nil {
return nil, 0, 0, err
}
+
amount := uint64(len(r.Txs))
+
return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {
stats := make([]light.TxStatus, len(r.Txs))
+
+ var (
+ err error
+ addFn func(transaction *types.Transaction) error
+ )
+
for i, tx := range r.Txs {
if i != 0 && !waitOrStop() {
return nil
}
+
hash := tx.Hash()
stats[i] = txStatus(backend, hash)
+
if stats[i].Status == core.TxStatusUnknown {
- addFn := backend.TxPool().AddRemotes
+ addFn = backend.TxPool().AddRemote
+
// Add txs synchronously for testing purpose
if backend.AddTxsSync() {
- addFn = backend.TxPool().AddRemotesSync
+ addFn = backend.TxPool().AddRemoteSync
}
- if errs := addFn([]*types.Transaction{tx}); errs[0] != nil {
- stats[i].Error = errs[0].Error()
+
+ if err = addFn(tx); err != nil {
+ stats[i].Error = err.Error()
+
continue
}
+
stats[i] = txStatus(backend, hash)
}
}
diff --git a/log/logger.go b/log/logger.go
index 2b96681a82..c2678259bf 100644
--- a/log/logger.go
+++ b/log/logger.go
@@ -106,6 +106,8 @@ type RecordKeyNames struct {
Ctx string
}
+type Logging func(msg string, ctx ...interface{})
+
// A Logger writes key/value pairs to a Handler
type Logger interface {
// New returns a new Logger that has this logger's context plus the given context
@@ -124,6 +126,13 @@ type Logger interface {
Warn(msg string, ctx ...interface{})
Error(msg string, ctx ...interface{})
Crit(msg string, ctx ...interface{})
+
+ OnTrace(func(l Logging))
+ OnDebug(func(l Logging))
+ OnInfo(func(l Logging))
+ OnWarn(func(l Logging))
+ OnError(func(l Logging))
+ OnCrit(func(l Logging))
}
type logger struct {
@@ -198,6 +207,38 @@ func (l *logger) SetHandler(h Handler) {
l.h.Swap(h)
}
+func (l *logger) OnTrace(fn func(l Logging)) {
+ if l.GetHandler().Level() >= LvlTrace {
+ fn(l.Trace)
+ }
+}
+
+func (l *logger) OnDebug(fn func(l Logging)) {
+ if l.GetHandler().Level() >= LvlDebug {
+ fn(l.Debug)
+ }
+}
+func (l *logger) OnInfo(fn func(l Logging)) {
+ if l.GetHandler().Level() >= LvlInfo {
+ fn(l.Info)
+ }
+}
+func (l *logger) OnWarn(fn func(l Logging)) {
+ if l.GetHandler().Level() >= LvlWarn {
+ fn(l.Warn)
+ }
+}
+func (l *logger) OnError(fn func(l Logging)) {
+ if l.GetHandler().Level() >= LvlError {
+ fn(l.Error)
+ }
+}
+func (l *logger) OnCrit(fn func(l Logging)) {
+ if l.GetHandler().Level() >= LvlCrit {
+ fn(l.Crit)
+ }
+}
+
func normalize(ctx []interface{}) []interface{} {
// if the caller passed a Ctx object, then expand it
if len(ctx) == 1 {
diff --git a/log/root.go b/log/root.go
index 9fb4c5ae0b..04b80f4a02 100644
--- a/log/root.go
+++ b/log/root.go
@@ -60,6 +60,38 @@ func Crit(msg string, ctx ...interface{}) {
os.Exit(1)
}
+func OnTrace(fn func(l Logging)) {
+ if root.GetHandler().Level() >= LvlTrace {
+ fn(root.Trace)
+ }
+}
+
+func OnDebug(fn func(l Logging)) {
+ if root.GetHandler().Level() >= LvlDebug {
+ fn(root.Debug)
+ }
+}
+func OnInfo(fn func(l Logging)) {
+ if root.GetHandler().Level() >= LvlInfo {
+ fn(root.Info)
+ }
+}
+func OnWarn(fn func(l Logging)) {
+ if root.GetHandler().Level() >= LvlWarn {
+ fn(root.Warn)
+ }
+}
+func OnError(fn func(l Logging)) {
+ if root.GetHandler().Level() >= LvlError {
+ fn(root.Error)
+ }
+}
+func OnCrit(fn func(l Logging)) {
+ if root.GetHandler().Level() >= LvlCrit {
+ fn(root.Crit)
+ }
+}
+
// Output is a convenient alias for write, allowing for the modification of
// the calldepth (number of stack frames to skip).
// calldepth influences the reported line number of the log message.
diff --git a/miner/fake_miner.go b/miner/fake_miner.go
index 3ca2f5be77..39cc999a0a 100644
--- a/miner/fake_miner.go
+++ b/miner/fake_miner.go
@@ -47,7 +47,7 @@ func NewBorDefaultMiner(t *testing.T) *DefaultBorMiner {
ethAPI.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
spanner := bor.NewMockSpanner(ctrl)
- spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{
+ spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{
{
ID: 0,
Address: common.Address{0x1},
diff --git a/miner/worker.go b/miner/worker.go
index 30809cd558..9d04838ccb 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -17,10 +17,15 @@
package miner
import (
+ "bytes"
"context"
"errors"
"fmt"
"math/big"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ ptrace "runtime/trace"
"sync"
"sync/atomic"
"time"
@@ -31,14 +36,17 @@ import (
"go.opentelemetry.io/otel/trace"
"github.com/ethereum/go-ethereum/common"
+ cmath "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/common/tracing"
"github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/bor"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
)
@@ -83,6 +91,12 @@ const (
staleThreshold = 7
)
+// metrics gauge to track total and empty blocks sealed by a miner
+var (
+ sealedBlocksCounter = metrics.NewRegisteredCounter("worker/sealedBlocks", nil)
+ sealedEmptyBlocksCounter = metrics.NewRegisteredCounter("worker/sealedEmptyBlocks", nil)
+)
+
// environment is the worker's current environment and holds all
// information of the sealing block generation.
type environment struct {
@@ -257,6 +271,8 @@ type worker struct {
skipSealHook func(*task) bool // Method to decide whether skipping the sealing.
fullTaskHook func() // Method to call before pushing the full sealing task.
resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval.
+
+ profileCount *int32 // Global count for profiling
}
//nolint:staticcheck
@@ -285,6 +301,7 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus
resubmitIntervalCh: make(chan time.Duration),
resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize),
}
+ worker.profileCount = new(int32)
// Subscribe NewTxsEvent for tx pool
worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh)
// Subscribe events for blockchain
@@ -560,9 +577,11 @@ func (w *worker) mainLoop(ctx context.Context) {
for {
select {
case req := <-w.newWorkCh:
+ //nolint:contextcheck
w.commitWork(req.ctx, req.interrupt, req.noempty, req.timestamp)
case req := <-w.getWorkCh:
+ //nolint:contextcheck
block, err := w.generateWork(req.ctx, req.params)
if err != nil {
req.err = err
@@ -622,13 +641,17 @@ func (w *worker) mainLoop(ctx context.Context) {
if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas {
continue
}
+
txs := make(map[common.Address]types.Transactions)
+
for _, tx := range ev.Txs {
acc, _ := types.Sender(w.current.signer, tx)
txs[acc] = append(txs[acc], tx)
}
- txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee)
+
+ txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, cmath.FromBig(w.current.header.BaseFee))
tcount := w.current.tcount
+
w.commitTransactions(w.current, txset, nil)
// Only update the snapshot if any new transactions were added
@@ -758,7 +781,7 @@ func (w *worker) resultLoop() {
err error
)
- tracing.Exec(task.ctx, "resultLoop", func(ctx context.Context, span trace.Span) {
+ tracing.Exec(task.ctx, "", "resultLoop", func(ctx context.Context, span trace.Span) {
for i, taskReceipt := range task.receipts {
receipt := new(types.Receipt)
receipts[i] = receipt
@@ -782,8 +805,8 @@ func (w *worker) resultLoop() {
}
// Commit block and state to database.
- tracing.ElapsedTime(ctx, span, "WriteBlockAndSetHead time taken", func(_ context.Context, _ trace.Span) {
- _, err = w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true)
+ tracing.Exec(ctx, "", "resultLoop.WriteBlockAndSetHead", func(ctx context.Context, span trace.Span) {
+ _, err = w.chain.WriteBlockAndSetHead(ctx, block, receipts, logs, task.state, true)
})
tracing.SetAttributes(
@@ -808,6 +831,12 @@ func (w *worker) resultLoop() {
// Broadcast the block and announce chain insertion event
w.mux.Post(core.NewMinedBlockEvent{Block: block})
+ sealedBlocksCounter.Inc(1)
+
+ if block.Transactions().Len() == 0 {
+ sealedEmptyBlocksCounter.Inc(1)
+ }
+
// Insert the block into the set of pending ones to resultLoop for confirmations
w.unconfirmed.Insert(block.NumberU64(), block.Hash())
case <-w.exitCh:
@@ -918,6 +947,22 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP
}
var coalescedLogs []*types.Log
+ initialGasLimit := env.gasPool.Gas()
+ initialTxs := txs.GetTxs()
+
+ var breakCause string
+
+ defer func() {
+ log.OnDebug(func(lg log.Logging) {
+ lg("commitTransactions-stats",
+ "initialTxsCount", initialTxs,
+ "initialGasLimit", initialGasLimit,
+ "resultTxsCount", txs.GetTxs(),
+ "resultGapPool", env.gasPool.Gas(),
+ "exitCause", breakCause)
+ })
+ }()
+
for {
// In the following three cases, we will interrupt the execution of the transaction.
// (1) new head block event arrival, the interrupt signal is 1
@@ -965,7 +1010,14 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP
// Start executing the transaction
env.state.Prepare(tx.Hash(), env.tcount)
+ var start time.Time
+
+ log.OnDebug(func(log.Logging) {
+ start = time.Now()
+ })
+
logs, err := w.commitTransaction(env, tx)
+
switch {
case errors.Is(err, core.ErrGasLimitReached):
// Pop the current out-of-gas transaction without shifting in the next from the account
@@ -988,6 +1040,10 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP
env.tcount++
txs.Shift()
+ log.OnDebug(func(lg log.Logging) {
+ lg("Committed new tx", "tx hash", tx.Hash(), "from", from, "to", tx.To(), "nonce", tx.Nonce(), "gas", tx.Gas(), "gasPrice", tx.GasPrice(), "value", tx.Value(), "time spent", time.Since(start))
+ })
+
case errors.Is(err, core.ErrTxTypeNotSupported):
// Pop the unsupported transaction without shifting in the next from the account
log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type())
@@ -1077,7 +1133,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
}
// Set baseFee and GasLimit if we are on an EIP-1559 chain
if w.chainConfig.IsLondon(header.Number) {
- header.BaseFee = misc.CalcBaseFee(w.chainConfig, parent.Header())
+ header.BaseFee = misc.CalcBaseFeeUint(w.chainConfig, parent.Header()).ToBig()
if !w.chainConfig.IsLondon(parent.Number()) {
parentGasLimit := parent.GasLimit() * params.ElasticityMultiplier
header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil)
@@ -1085,7 +1141,12 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
}
// Run the consensus preparation with the default or customized consensus engine.
if err := w.engine.Prepare(w.chain, header); err != nil {
- log.Error("Failed to prepare header for sealing", "err", err)
+ switch err.(type) {
+ case *bor.UnauthorizedSignerError:
+ log.Debug("Failed to prepare header for sealing", "err", err)
+ default:
+ log.Error("Failed to prepare header for sealing", "err", err)
+ }
return nil, err
}
// Could potentially happen if starting to mine in an odd state.
@@ -1117,9 +1178,75 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
return env, nil
}
+func startProfiler(profile string, filepath string, number uint64) (func() error, error) {
+ var (
+ buf bytes.Buffer
+ err error
+ )
+
+ closeFn := func() {}
+
+ switch profile {
+ case "cpu":
+ err = pprof.StartCPUProfile(&buf)
+
+ if err == nil {
+ closeFn = func() {
+ pprof.StopCPUProfile()
+ }
+ }
+ case "trace":
+ err = ptrace.Start(&buf)
+
+ if err == nil {
+ closeFn = func() {
+ ptrace.Stop()
+ }
+ }
+ case "heap":
+ runtime.GC()
+
+ err = pprof.WriteHeapProfile(&buf)
+ default:
+ log.Info("Incorrect profile name")
+ }
+
+ if err != nil {
+ return func() error {
+ closeFn()
+ return nil
+ }, err
+ }
+
+ closeFnNew := func() error {
+ var err error
+
+ closeFn()
+
+ if buf.Len() == 0 {
+ return nil
+ }
+
+ f, err := os.Create(filepath + "/" + profile + "-" + fmt.Sprint(number) + ".prof")
+ if err != nil {
+ return err
+ }
+
+ defer f.Close()
+
+ _, err = f.Write(buf.Bytes())
+
+ return err
+ }
+
+ return closeFnNew, nil
+}
+
// fillTransactions retrieves the pending transactions from the txpool and fills them
// into the given sealing block. The transaction selection and ordering strategy can
// be customized with the plugin in the future.
+//
+//nolint:gocognit
func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *environment) {
ctx, span := tracing.StartSpan(ctx, "fillTransactions")
defer tracing.EndSpan(span)
@@ -1134,10 +1261,76 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en
remoteTxs map[common.Address]types.Transactions
)
- tracing.Exec(ctx, "worker.SplittingTransactions", func(ctx context.Context, span trace.Span) {
- pending := w.eth.TxPool().Pending(true)
+ // TODO: move to config or RPC
+ const profiling = false
+
+ if profiling {
+ doneCh := make(chan struct{})
+
+ defer func() {
+ close(doneCh)
+ }()
+
+ go func(number uint64) {
+ closeFn := func() error {
+ return nil
+ }
+
+ for {
+ select {
+ case <-time.After(150 * time.Millisecond):
+ // Check if we've not crossed limit
+ if attempt := atomic.AddInt32(w.profileCount, 1); attempt >= 10 {
+ log.Info("Completed profiling", "attempt", attempt)
+
+ return
+ }
+
+ log.Info("Starting profiling in fill transactions", "number", number)
+
+ dir, err := os.MkdirTemp("", fmt.Sprintf("bor-traces-%s-", time.Now().UTC().Format("2006-01-02-150405Z")))
+ if err != nil {
+ log.Error("Error in profiling", "path", dir, "number", number, "err", err)
+ return
+ }
+
+ // grab the cpu profile
+ closeFnInternal, err := startProfiler("cpu", dir, number)
+ if err != nil {
+ log.Error("Error in profiling", "path", dir, "number", number, "err", err)
+ return
+ }
+
+ closeFn = func() error {
+ err := closeFnInternal()
+
+ log.Info("Completed profiling", "path", dir, "number", number, "error", err)
+
+ return nil
+ }
+
+ case <-doneCh:
+ err := closeFn()
+
+ if err != nil {
+ log.Info("closing fillTransactions", "number", number, "error", err)
+ }
+
+ return
+ }
+ }
+ }(env.header.Number.Uint64())
+ }
+
+ tracing.Exec(ctx, "", "worker.SplittingTransactions", func(ctx context.Context, span trace.Span) {
+
+ prePendingTime := time.Now()
+
+ pending := w.eth.TxPool().Pending(ctx, true)
remoteTxs = pending
+ postPendingTime := time.Now()
+
for _, account := range w.eth.TxPool().Locals() {
if txs := remoteTxs[account]; len(txs) > 0 {
delete(remoteTxs, account)
@@ -1145,6 +1338,8 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en
}
}
+ postLocalsTime := time.Now()
+
localTxsCount = len(localTxs)
remoteTxsCount = len(remoteTxs)
@@ -1152,6 +1347,8 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en
span,
attribute.Int("len of local txs", localTxsCount),
attribute.Int("len of remote txs", remoteTxsCount),
+ attribute.String("time taken by Pending()", fmt.Sprintf("%v", postPendingTime.Sub(prePendingTime))),
+ attribute.String("time taken by Locals()", fmt.Sprintf("%v", postLocalsTime.Sub(postPendingTime))),
)
})
@@ -1164,8 +1361,8 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en
if localTxsCount > 0 {
var txs *types.TransactionsByPriceAndNonce
- tracing.Exec(ctx, "worker.LocalTransactionsByPriceAndNonce", func(ctx context.Context, span trace.Span) {
- txs = types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee)
+ tracing.Exec(ctx, "", "worker.LocalTransactionsByPriceAndNonce", func(ctx context.Context, span trace.Span) {
+ txs = types.NewTransactionsByPriceAndNonce(env.signer, localTxs, cmath.FromBig(env.header.BaseFee))
tracing.SetAttributes(
span,
@@ -1173,7 +1370,7 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en
)
})
- tracing.Exec(ctx, "worker.LocalCommitTransactions", func(ctx context.Context, span trace.Span) {
+ tracing.Exec(ctx, "", "worker.LocalCommitTransactions", func(ctx context.Context, span trace.Span) {
committed = w.commitTransactions(env, txs, interrupt)
})
@@ -1187,8 +1384,8 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en
if remoteTxsCount > 0 {
var txs *types.TransactionsByPriceAndNonce
- tracing.Exec(ctx, "worker.RemoteTransactionsByPriceAndNonce", func(ctx context.Context, span trace.Span) {
- txs = types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee)
+ tracing.Exec(ctx, "", "worker.RemoteTransactionsByPriceAndNonce", func(ctx context.Context, span trace.Span) {
+ txs = types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, cmath.FromBig(env.header.BaseFee))
tracing.SetAttributes(
span,
@@ -1196,7 +1393,7 @@ func (w *worker) fillTransactions(ctx context.Context, interrupt *int32, env *en
)
})
- tracing.Exec(ctx, "worker.RemoteCommitTransactions", func(ctx context.Context, span trace.Span) {
+ tracing.Exec(ctx, "", "worker.RemoteCommitTransactions", func(ctx context.Context, span trace.Span) {
committed = w.commitTransactions(env, txs, interrupt)
})
@@ -1237,7 +1434,7 @@ func (w *worker) commitWork(ctx context.Context, interrupt *int32, noempty bool,
err error
)
- tracing.Exec(ctx, "worker.prepareWork", func(ctx context.Context, span trace.Span) {
+ tracing.Exec(ctx, "", "worker.prepareWork", func(ctx context.Context, span trace.Span) {
// Set the coinbase if the worker is running or it's required
var coinbase common.Address
if w.isRunning() {
diff --git a/miner/worker_test.go b/miner/worker_test.go
index 011895c854..ffd44bebfe 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -75,7 +75,7 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool, isBor bool) {
ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
spanner := bor.NewMockSpanner(ctrl)
- spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{
+ spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{
{
ID: 0,
Address: TestBankAddress,
@@ -622,7 +622,7 @@ func BenchmarkBorMining(b *testing.B) {
ethAPIMock.EXPECT().Call(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
spanner := bor.NewMockSpanner(ctrl)
- spanner.EXPECT().GetCurrentValidators(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{
+ spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*valset.Validator{
{
ID: 0,
Address: TestBankAddress,
diff --git a/p2p/dnsdisc/client_test.go b/p2p/dnsdisc/client_test.go
index 9320dd667a..99c3aac0ea 100644
--- a/p2p/dnsdisc/client_test.go
+++ b/p2p/dnsdisc/client_test.go
@@ -57,15 +57,19 @@ func TestClientSyncTree(t *testing.T) {
c := NewClient(Config{Resolver: r, Logger: testlog.Logger(t, log.LvlTrace)})
stree, err := c.SyncTree("enrtree://AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@n")
+
if err != nil {
t.Fatal("sync error:", err)
}
+
if !reflect.DeepEqual(sortByID(stree.Nodes()), sortByID(wantNodes)) {
t.Errorf("wrong nodes in synced tree:\nhave %v\nwant %v", spew.Sdump(stree.Nodes()), spew.Sdump(wantNodes))
}
+
if !reflect.DeepEqual(stree.Links(), wantLinks) {
t.Errorf("wrong links in synced tree: %v", stree.Links())
}
+
if stree.Seq() != wantSeq {
t.Errorf("synced tree has wrong seq: %d", stree.Seq())
}
@@ -295,7 +299,7 @@ func TestIteratorEmptyTree(t *testing.T) {
// updateSomeNodes applies ENR updates to some of the given nodes.
func updateSomeNodes(keySeed int64, nodes []*enode.Node) {
- keys := testKeys(nodesSeed1, len(nodes))
+ keys := testKeys(keySeed, len(nodes))
for i, n := range nodes[:len(nodes)/2] {
r := n.Record()
r.Set(enr.IP{127, 0, 0, 1})
@@ -384,10 +388,12 @@ func makeTestTree(domain string, nodes []*enode.Node, links []string) (*Tree, st
if err != nil {
panic(err)
}
+
url, err := tree.Sign(testKey(signingKeySeed), domain)
if err != nil {
panic(err)
}
+
return tree, url
}
diff --git a/packaging/templates/mainnet-v1/archive/config.toml b/packaging/templates/mainnet-v1/archive/config.toml
index 5491c784ef..355d07051d 100644
--- a/packaging/templates/mainnet-v1/archive/config.toml
+++ b/packaging/templates/mainnet-v1/archive/config.toml
@@ -86,7 +86,7 @@ gcmode = "archive"
# vhosts = ["*"]
# corsdomain = ["*"]
# [jsonrpc.timeouts]
- # read = "30s"
+ # read = "10s"
# write = "30s"
# idle = "2m0s"
diff --git a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml
index 90df84dc07..0299f59bdc 100644
--- a/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml
+++ b/packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml
@@ -86,7 +86,7 @@ syncmode = "full"
# vhosts = ["*"]
# corsdomain = ["*"]
# [jsonrpc.timeouts]
- # read = "30s"
+ # read = "10s"
# write = "30s"
# idle = "2m0s"
diff --git a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml
index 9e2d80fd2a..36b14cd263 100644
--- a/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml
+++ b/packaging/templates/mainnet-v1/sentry/validator/bor/config.toml
@@ -88,7 +88,7 @@ syncmode = "full"
# vhosts = ["*"]
# corsdomain = ["*"]
# [jsonrpc.timeouts]
- # read = "30s"
+ # read = "10s"
# write = "30s"
# idle = "2m0s"
diff --git a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml
index 1e5fd67762..cfba7fb181 100644
--- a/packaging/templates/mainnet-v1/without-sentry/bor/config.toml
+++ b/packaging/templates/mainnet-v1/without-sentry/bor/config.toml
@@ -88,7 +88,7 @@ syncmode = "full"
# vhosts = ["*"]
# corsdomain = ["*"]
# [jsonrpc.timeouts]
- # read = "30s"
+ # read = "10s"
# write = "30s"
# idle = "2m0s"
diff --git a/packaging/templates/package_scripts/control b/packaging/templates/package_scripts/control
index df0427b322..130226241b 100644
--- a/packaging/templates/package_scripts/control
+++ b/packaging/templates/package_scripts/control
@@ -1,5 +1,5 @@
Source: bor
-Version: 0.3.4-beta3
+Version: 0.3.8-beta
Section: develop
Priority: standard
Maintainer: Polygon
diff --git a/packaging/templates/package_scripts/control.arm64 b/packaging/templates/package_scripts/control.arm64
index bcc8041a77..e8073532af 100644
--- a/packaging/templates/package_scripts/control.arm64
+++ b/packaging/templates/package_scripts/control.arm64
@@ -1,5 +1,5 @@
Source: bor
-Version: 0.3.4-beta3
+Version: 0.3.8-beta
Section: develop
Priority: standard
Maintainer: Polygon
diff --git a/packaging/templates/package_scripts/control.profile.amd64 b/packaging/templates/package_scripts/control.profile.amd64
index 507d4328b2..a5b46bff79 100644
--- a/packaging/templates/package_scripts/control.profile.amd64
+++ b/packaging/templates/package_scripts/control.profile.amd64
@@ -1,5 +1,5 @@
Source: bor-profile
-Version: 0.3.4-beta3
+Version: 0.3.8-beta
Section: develop
Priority: standard
Maintainer: Polygon
diff --git a/packaging/templates/package_scripts/control.profile.arm64 b/packaging/templates/package_scripts/control.profile.arm64
index 011dfa8b63..b0d94da338 100644
--- a/packaging/templates/package_scripts/control.profile.arm64
+++ b/packaging/templates/package_scripts/control.profile.arm64
@@ -1,5 +1,5 @@
Source: bor-profile
-Version: 0.3.4-beta3
+Version: 0.3.8-beta
Section: develop
Priority: standard
Maintainer: Polygon
diff --git a/packaging/templates/package_scripts/control.validator b/packaging/templates/package_scripts/control.validator
index 94ee786237..887713056a 100644
--- a/packaging/templates/package_scripts/control.validator
+++ b/packaging/templates/package_scripts/control.validator
@@ -1,5 +1,5 @@
Source: bor-profile
-Version: 0.3.4-beta3
+Version: 0.3.8-beta
Section: develop
Priority: standard
Maintainer: Polygon
diff --git a/packaging/templates/package_scripts/control.validator.arm64 b/packaging/templates/package_scripts/control.validator.arm64
index 96049a56d6..f9fa7635a9 100644
--- a/packaging/templates/package_scripts/control.validator.arm64
+++ b/packaging/templates/package_scripts/control.validator.arm64
@@ -1,5 +1,5 @@
Source: bor-profile
-Version: 0.3.4-beta3
+Version: 0.3.8-beta
Section: develop
Priority: standard
Maintainer: Polygon
diff --git a/packaging/templates/testnet-v4/archive/config.toml b/packaging/templates/testnet-v4/archive/config.toml
index fb9ffd0a17..71d360faf6 100644
--- a/packaging/templates/testnet-v4/archive/config.toml
+++ b/packaging/templates/testnet-v4/archive/config.toml
@@ -86,7 +86,7 @@ gcmode = "archive"
# vhosts = ["*"]
# corsdomain = ["*"]
# [jsonrpc.timeouts]
- # read = "30s"
+ # read = "10s"
# write = "30s"
# idle = "2m0s"
diff --git a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml
index 9884c0eccc..124b34f09c 100644
--- a/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml
+++ b/packaging/templates/testnet-v4/sentry/sentry/bor/config.toml
@@ -86,7 +86,7 @@ syncmode = "full"
# vhosts = ["*"]
# corsdomain = ["*"]
# [jsonrpc.timeouts]
- # read = "30s"
+ # read = "10s"
# write = "30s"
# idle = "2m0s"
diff --git a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml
index 49c47fedd4..bfebe422ca 100644
--- a/packaging/templates/testnet-v4/sentry/validator/bor/config.toml
+++ b/packaging/templates/testnet-v4/sentry/validator/bor/config.toml
@@ -88,7 +88,7 @@ syncmode = "full"
# vhosts = ["*"]
# corsdomain = ["*"]
# [jsonrpc.timeouts]
- # read = "30s"
+ # read = "10s"
# write = "30s"
# idle = "2m0s"
diff --git a/packaging/templates/testnet-v4/without-sentry/bor/config.toml b/packaging/templates/testnet-v4/without-sentry/bor/config.toml
index 2fb83a6ae2..2f7710d0d8 100644
--- a/packaging/templates/testnet-v4/without-sentry/bor/config.toml
+++ b/packaging/templates/testnet-v4/without-sentry/bor/config.toml
@@ -88,7 +88,7 @@ syncmode = "full"
# vhosts = ["*"]
# corsdomain = ["*"]
# [jsonrpc.timeouts]
- # read = "30s"
+ # read = "10s"
# write = "30s"
# idle = "2m0s"
diff --git a/params/version.go b/params/version.go
index 46fcbb6e1e..affdc6b5eb 100644
--- a/params/version.go
+++ b/params/version.go
@@ -21,10 +21,10 @@ import (
)
const (
- VersionMajor = 0 // Major version component of the current release
- VersionMinor = 3 // Minor version component of the current release
- VersionPatch = 4 // Patch version component of the current release
- VersionMeta = "beta3" // Version metadata to append to the version string
+ VersionMajor = 0 // Major version component of the current release
+ VersionMinor = 3 // Minor version component of the current release
+ VersionPatch = 8 // Patch version component of the current release
+ VersionMeta = "beta" // Version metadata to append to the version string
)
// Version holds the textual version string.
diff --git a/rpc/http.go b/rpc/http.go
index 18404c060a..09594d0280 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -104,7 +104,7 @@ type HTTPTimeouts struct {
// DefaultHTTPTimeouts represents the default timeout values used if further
// configuration is not provided.
var DefaultHTTPTimeouts = HTTPTimeouts{
- ReadTimeout: 30 * time.Second,
+ ReadTimeout: 10 * time.Second,
WriteTimeout: 30 * time.Second,
IdleTimeout: 120 * time.Second,
}
diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go
index 2dc20a915e..e6e8188ce0 100644
--- a/tests/bor/bor_test.go
+++ b/tests/bor/bor_test.go
@@ -392,12 +392,18 @@ func TestInsertingSpanSizeBlocks(t *testing.T) {
currentValidators := []*valset.Validator{valset.NewValidator(addr, 10)}
+ spanner := getMockedSpanner(t, currentValidators)
+ _bor.SetSpanner(spanner)
+
// Insert sprintSize # of blocks so that span is fetched at the start of a new sprint
for i := uint64(1); i <= spanSize; i++ {
block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, currentValidators)
insertNewBlock(t, chain, block)
}
+ spanner = getMockedSpanner(t, currentSpan.ValidatorSet.Validators)
+ _bor.SetSpanner(spanner)
+
validators, err := _bor.GetCurrentValidators(context.Background(), block.Hash(), spanSize) // check validator set at the first block of new span
if err != nil {
t.Fatalf("%s", err)
@@ -427,6 +433,9 @@ func TestFetchStateSyncEvents(t *testing.T) {
currentValidators := []*valset.Validator{valset.NewValidator(addr, 10)}
+ spanner := getMockedSpanner(t, currentValidators)
+ _bor.SetSpanner(spanner)
+
// Insert sprintSize # of blocks so that span is fetched at the start of a new sprint
for i := uint64(1); i < sprintSize; i++ {
if IsSpanEnd(i) {
@@ -528,6 +537,9 @@ func TestFetchStateSyncEvents_2(t *testing.T) {
currentValidators = []*valset.Validator{valset.NewValidator(addr, 10)}
}
+ spanner := getMockedSpanner(t, currentValidators)
+ _bor.SetSpanner(spanner)
+
block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, currentValidators)
insertNewBlock(t, chain, block)
}
@@ -554,6 +566,9 @@ func TestFetchStateSyncEvents_2(t *testing.T) {
currentValidators = []*valset.Validator{valset.NewValidator(addr, 10)}
}
+ spanner := getMockedSpanner(t, currentValidators)
+ _bor.SetSpanner(spanner)
+
block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, res.Result.ValidatorSet.Validators)
insertNewBlock(t, chain, block)
}
@@ -580,6 +595,8 @@ func TestOutOfTurnSigning(t *testing.T) {
h.EXPECT().Close().AnyTimes()
+ spanner := getMockedSpanner(t, heimdallSpan.ValidatorSet.Validators)
+ _bor.SetSpanner(spanner)
_bor.SetHeimdallClient(h)
db := init.ethereum.ChainDb()
@@ -1082,6 +1099,9 @@ func TestJaipurFork(t *testing.T) {
res, _ := loadSpanFromFile(t)
+ spanner := getMockedSpanner(t, res.Result.ValidatorSet.Validators)
+ _bor.SetSpanner(spanner)
+
for i := uint64(1); i < sprintSize; i++ {
block = buildNextBlock(t, _bor, chain, block, nil, init.genesis.Config.Bor, nil, res.Result.ValidatorSet.Validators)
insertNewBlock(t, chain, block)
diff --git a/tests/bor/helper.go b/tests/bor/helper.go
index e28076a3b1..c4b45f970d 100644
--- a/tests/bor/helper.go
+++ b/tests/bor/helper.go
@@ -352,6 +352,17 @@ func getMockedHeimdallClient(t *testing.T, heimdallSpan *span.HeimdallSpan) (*mo
return h, ctrl
}
+func getMockedSpanner(t *testing.T, validators []*valset.Validator) *bor.MockSpanner {
+ t.Helper()
+
+ spanner := bor.NewMockSpanner(gomock.NewController(t))
+ spanner.EXPECT().GetCurrentValidatorsByHash(gomock.Any(), gomock.Any(), gomock.Any()).Return(validators, nil).AnyTimes()
+ spanner.EXPECT().GetCurrentValidatorsByBlockNrOrHash(gomock.Any(), gomock.Any(), gomock.Any()).Return(validators, nil).AnyTimes()
+ spanner.EXPECT().GetCurrentSpan(gomock.Any(), gomock.Any()).Return(&span.Span{0, 0, 0}, nil).AnyTimes()
+ spanner.EXPECT().CommitSpan(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
+ return spanner
+}
+
func generateFakeStateSyncEvents(sample *clerk.EventRecordWithTime, count int) []*clerk.EventRecordWithTime {
events := make([]*clerk.EventRecordWithTime, count)
event := *sample
diff --git a/tests/init_test.go b/tests/init_test.go
index 1c6841e030..5e32f20abf 100644
--- a/tests/init_test.go
+++ b/tests/init_test.go
@@ -141,9 +141,6 @@ func (tm *testMatcher) findSkip(name string) (reason string, skipload bool) {
isWin32 := runtime.GOARCH == "386" && runtime.GOOS == "windows"
for _, re := range tm.slowpat {
if re.MatchString(name) {
- if testing.Short() {
- return "skipped in -short mode", false
- }
if isWin32 {
return "skipped on 32bit windows", false
}