From 856c379626c2ff6d37dc28e79596cfbb814bf1dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 3 May 2021 15:42:43 +0300 Subject: [PATCH 001/208] eth: don't print db upgrade warning on db init --- eth/backend.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/eth/backend.go b/eth/backend.go index 4c7374612e5d..7aac17eeb329 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -165,7 +165,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if bcVersion != nil && *bcVersion > core.BlockChainVersion { return nil, fmt.Errorf("database version is v%d, Geth %s only supports v%d", *bcVersion, params.VersionWithMeta, core.BlockChainVersion) } else if bcVersion == nil || *bcVersion < core.BlockChainVersion { - log.Warn("Upgrade blockchain database version", "from", dbVer, "to", core.BlockChainVersion) + if bcVersion != nil { // only print warning on upgrade, not on init + log.Warn("Upgrade blockchain database version", "from", dbVer, "to", core.BlockChainVersion) + } rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion) } } From b8040a430e34117f121c67e8deee4a5e889e8372 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 4 May 2021 11:29:32 +0200 Subject: [PATCH 002/208] cmd/utils: use eth DNS tree for snap discovery (#22808) This removes auto-configuration of the snap.*.ethdisco.net DNS discovery tree. Since measurements have shown that > 75% of nodes in all.*.ethdisco.net support snap, we have decided to retire the dedicated index for snap and just use the eth tree instead. The dial iterators of eth and snap now use the same DNS tree in the default configuration, so both iterators should use the same DNS discovery client instance. This ensures that the record cache and rate limit are shared. Records will not be requested multiple times. While testing the change, I noticed that duplicate DNS requests do happen even when the client instance is shared. This is because the two iterators request the tree root, link tree root, and first levels of the tree in lockstep. To avoid this problem, the change also adds a singleflight.Group instance in the client. When one iterator attempts to resolve an entry which is already being resolved, the singleflight object waits for the existing resolve call to finish and returns the entry to both places. --- cmd/utils/flags.go | 6 +---- eth/backend.go | 10 +++++-- eth/discovery.go | 11 -------- eth/protocols/snap/handler.go | 6 +++++ go.mod | 1 + go.sum | 2 ++ p2p/dnsdisc/client.go | 51 ++++++++++++++++++++--------------- 7 files changed, 48 insertions(+), 39 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index a50fdd968a35..a81188342f60 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1690,11 +1690,7 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) { } if url := params.KnownDNSNetwork(genesis, protocol); url != "" { cfg.EthDiscoveryURLs = []string{url} - } - if cfg.SyncMode == downloader.SnapSync { - if url := params.KnownDNSNetwork(genesis, "snap"); url != "" { - cfg.SnapDiscoveryURLs = []string{url} - } + cfg.SnapDiscoveryURLs = cfg.EthDiscoveryURLs } } diff --git a/eth/backend.go b/eth/backend.go index 7aac17eeb329..7d8b0c52c6a4 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -50,6 +50,7 @@ import ( "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" @@ -239,14 +240,17 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams) - eth.ethDialCandidates, err = setupDiscovery(eth.config.EthDiscoveryURLs) + // Setup DNS discovery iterators. + dnsclient := dnsdisc.NewClient(dnsdisc.Config{}) + eth.ethDialCandidates, err = dnsclient.NewIterator(eth.config.EthDiscoveryURLs...) if err != nil { return nil, err } - eth.snapDialCandidates, err = setupDiscovery(eth.config.SnapDiscoveryURLs) + eth.snapDialCandidates, err = dnsclient.NewIterator(eth.config.SnapDiscoveryURLs...) if err != nil { return nil, err } + // Start the RPC service eth.netRPCService = ethapi.NewPublicNetAPI(eth.p2pServer, config.NetworkId) @@ -544,6 +548,8 @@ func (s *Ethereum) Start() error { // Ethereum protocol. func (s *Ethereum) Stop() error { // Stop all the peer-related stuff first. + s.ethDialCandidates.Close() + s.snapDialCandidates.Close() s.handler.Stop() // Then stop everything else. diff --git a/eth/discovery.go b/eth/discovery.go index 855ce3b0e1f4..70668b2b70bf 100644 --- a/eth/discovery.go +++ b/eth/discovery.go @@ -19,7 +19,6 @@ package eth import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/forkid" - "github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rlp" ) @@ -62,13 +61,3 @@ func (eth *Ethereum) currentEthEntry() *ethEntry { return ðEntry{ForkID: forkid.NewID(eth.blockchain.Config(), eth.blockchain.Genesis().Hash(), eth.blockchain.CurrentHeader().Number.Uint64())} } - -// setupDiscovery creates the node discovery source for the `eth` and `snap` -// protocols. -func setupDiscovery(urls []string) (enode.Iterator, error) { - if len(urls) == 0 { - return nil, nil - } - client := dnsdisc.NewClient(dnsdisc.Config{}) - return client.NewIterator(urls...) -} diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go index 9bfac6f03ff7..3d668a2ebb6f 100644 --- a/eth/protocols/snap/handler.go +++ b/eth/protocols/snap/handler.go @@ -84,6 +84,12 @@ type Backend interface { // MakeProtocols constructs the P2P protocol definitions for `snap`. func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol { + // Filter the discovery iterator for nodes advertising snap support. + dnsdisc = enode.Filter(dnsdisc, func(n *enode.Node) bool { + var snap enrEntry + return n.Load(&snap) == nil + }) + protocols := make([]p2p.Protocol, len(ProtocolVersions)) for i, version := range ProtocolVersions { version := version // Closure diff --git a/go.mod b/go.mod index 63636caae130..079d40ae6fed 100644 --- a/go.mod +++ b/go.mod @@ -60,6 +60,7 @@ require ( github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988 golang.org/x/text v0.3.4 golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 diff --git a/go.sum b/go.sum index cadcd1de95cf..3ae80f7b9c65 100644 --- a/go.sum +++ b/go.sum @@ -455,6 +455,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/p2p/dnsdisc/client.go b/p2p/dnsdisc/client.go index 3e4b50aaddb0..f2a4bed4c6e4 100644 --- a/p2p/dnsdisc/client.go +++ b/p2p/dnsdisc/client.go @@ -32,15 +32,17 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" lru "github.com/hashicorp/golang-lru" + "golang.org/x/sync/singleflight" "golang.org/x/time/rate" ) // Client discovers nodes by querying DNS servers. type Client struct { - cfg Config - clock mclock.Clock - entries *lru.Cache - ratelimit *rate.Limiter + cfg Config + clock mclock.Clock + entries *lru.Cache + ratelimit *rate.Limiter + singleflight singleflight.Group } // Config holds configuration options for the client. @@ -135,17 +137,20 @@ func (c *Client) NewIterator(urls ...string) (enode.Iterator, error) { // resolveRoot retrieves a root entry via DNS. func (c *Client) resolveRoot(ctx context.Context, loc *linkEntry) (rootEntry, error) { - txts, err := c.cfg.Resolver.LookupTXT(ctx, loc.domain) - c.cfg.Logger.Trace("Updating DNS discovery root", "tree", loc.domain, "err", err) - if err != nil { - return rootEntry{}, err - } - for _, txt := range txts { - if strings.HasPrefix(txt, rootPrefix) { - return parseAndVerifyRoot(txt, loc) + e, err, _ := c.singleflight.Do(loc.str, func() (interface{}, error) { + txts, err := c.cfg.Resolver.LookupTXT(ctx, loc.domain) + c.cfg.Logger.Trace("Updating DNS discovery root", "tree", loc.domain, "err", err) + if err != nil { + return rootEntry{}, err } - } - return rootEntry{}, nameError{loc.domain, errNoRoot} + for _, txt := range txts { + if strings.HasPrefix(txt, rootPrefix) { + return parseAndVerifyRoot(txt, loc) + } + } + return rootEntry{}, nameError{loc.domain, errNoRoot} + }) + return e.(rootEntry), err } func parseAndVerifyRoot(txt string, loc *linkEntry) (rootEntry, error) { @@ -168,17 +173,21 @@ func (c *Client) resolveEntry(ctx context.Context, domain, hash string) (entry, if err := c.ratelimit.Wait(ctx); err != nil { return nil, err } - cacheKey := truncateHash(hash) if e, ok := c.entries.Get(cacheKey); ok { return e.(entry), nil } - e, err := c.doResolveEntry(ctx, domain, hash) - if err != nil { - return nil, err - } - c.entries.Add(cacheKey, e) - return e, nil + + ei, err, _ := c.singleflight.Do(cacheKey, func() (interface{}, error) { + e, err := c.doResolveEntry(ctx, domain, hash) + if err != nil { + return nil, err + } + c.entries.Add(cacheKey, e) + return e, nil + }) + e, _ := ei.(entry) + return e, err } // doResolveEntry fetches an entry via DNS. From effaf185234533b787926a67a6f73eb8d636e7af Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 4 May 2021 13:01:20 +0200 Subject: [PATCH 003/208] build: improve cross compilation setup (#22804) This PR cleans up the CI build system and fixes a couple of issues. - The go tool launcher code has been moved to internal/build. With the new toolchain functions, the environment of the host Go (i.e. the one that built ci.go) and the target Go (i.e. the toolchain downloaded by -dlgo) are isolated more strictly. This is important to make cross compilation and -dlgo work correctly in more cases. - The -dlgo option now skips the download and uses the host Go if the running Go version matches dlgoVersion exactly. - The 'test' command now supports -dlgo, -cc and -arch. Running unit tests with foreign GOARCH is occasionally useful. For example, it can be used to run 32-bit tests on Windows. It can also be used to run darwin/amd64 tests on darwin/arm64 using Rosetta 2. - The 'aar', 'xcode' and 'xgo' commands now use a slightly different method to install external tools. They previously used `go get`, but this comes with the annoying side effect of modifying go.mod. They now use `go install` instead, which is the recommended way of installing tools without modifying the local module. - The old build warning about outdated Go version has been removed because we're much better at keeping backwards compatibility now. --- Makefile | 11 +-- build/ci.go | 206 ++++++++++++--------------------------- internal/build/env.go | 3 + internal/build/gotool.go | 149 ++++++++++++++++++++++++++++ internal/build/util.go | 14 --- 5 files changed, 221 insertions(+), 162 deletions(-) create mode 100644 internal/build/gotool.go diff --git a/Makefile b/Makefile index ecee5f189435..cb5a87dad0ee 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ android: @echo "Import \"$(GOBIN)/geth.aar\" to use the library." @echo "Import \"$(GOBIN)/geth-sources.jar\" to add javadocs" @echo "For more info see https://stackoverflow.com/questions/20994336/android-studio-how-to-attach-javadoc" - + ios: $(GORUN) build/ci.go xcode --local @echo "Done building." @@ -46,12 +46,11 @@ clean: # You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'. devtools: - env GOBIN= go get -u golang.org/x/tools/cmd/stringer - env GOBIN= go get -u github.com/kevinburke/go-bindata/go-bindata - env GOBIN= go get -u github.com/fjl/gencodec - env GOBIN= go get -u github.com/golang/protobuf/protoc-gen-go + env GOBIN= go install golang.org/x/tools/cmd/stringer@latest + env GOBIN= go install github.com/kevinburke/go-bindata/go-bindata@latest + env GOBIN= go install github.com/fjl/gencodec@latest + env GOBIN= go install github.com/golang/protobuf/protoc-gen-go@latest env GOBIN= go install ./cmd/abigen - @type "npm" 2> /dev/null || echo 'Please install node.js and npm' @type "solc" 2> /dev/null || echo 'Please install solc' @type "protoc" 2> /dev/null || echo 'Please install protoc' diff --git a/build/ci.go b/build/ci.go index b73435c5e605..a89c6e02fd0c 100644 --- a/build/ci.go +++ b/build/ci.go @@ -208,58 +208,25 @@ func doInstall(cmdline []string) { cc = flag.String("cc", "", "C compiler to cross build with") ) flag.CommandLine.Parse(cmdline) - env := build.Env() - // Check local Go version. People regularly open issues about compilation - // failure with outdated Go. This should save them the trouble. - if !strings.Contains(runtime.Version(), "devel") { - // Figure out the minor version number since we can't textually compare (1.10 < 1.9) - var minor int - fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor) - if minor < 13 { - log.Println("You have Go version", runtime.Version()) - log.Println("go-ethereum requires at least Go version 1.13 and cannot") - log.Println("be compiled with an earlier version. Please upgrade your Go installation.") - os.Exit(1) - } - } - - // Choose which go command we're going to use. - var gobuild *exec.Cmd - if !*dlgo { - // Default behavior: use the go version which runs ci.go right now. - gobuild = goTool("build") - } else { - // Download of Go requested. This is for build environments where the - // installed version is too old and cannot be upgraded easily. - cachedir := filepath.Join("build", "cache") - goroot := downloadGo(runtime.GOARCH, runtime.GOOS, cachedir) - gobuild = localGoTool(goroot, "build") + // Configure the toolchain. + tc := build.GoToolchain{GOARCH: *arch, CC: *cc} + if *dlgo { + csdb := build.MustLoadChecksums("build/checksums.txt") + tc.Root = build.DownloadGo(csdb, dlgoVersion) } - // Configure environment for cross build. - if *arch != "" || *arch != runtime.GOARCH { - gobuild.Env = append(gobuild.Env, "CGO_ENABLED=1") - gobuild.Env = append(gobuild.Env, "GOARCH="+*arch) - } - - // Configure C compiler. - if *cc != "" { - gobuild.Env = append(gobuild.Env, "CC="+*cc) - } else if os.Getenv("CC") != "" { - gobuild.Env = append(gobuild.Env, "CC="+os.Getenv("CC")) - } + // Configure the build. + env := build.Env() + gobuild := tc.Go("build", buildFlags(env)...) // arm64 CI builders are memory-constrained and can't handle concurrent builds, // better disable it. This check isn't the best, it should probably // check for something in env instead. - if runtime.GOARCH == "arm64" { + if env.CI && runtime.GOARCH == "arm64" { gobuild.Args = append(gobuild.Args, "-p", "1") } - // Put the default settings in. - gobuild.Args = append(gobuild.Args, buildFlags(env)...) - // We use -trimpath to avoid leaking local paths into the built executables. gobuild.Args = append(gobuild.Args, "-trimpath") @@ -301,53 +268,30 @@ func buildFlags(env build.Environment) (flags []string) { return flags } -// goTool returns the go tool. This uses the Go version which runs ci.go. -func goTool(subcmd string, args ...string) *exec.Cmd { - cmd := build.GoTool(subcmd, args...) - goToolSetEnv(cmd) - return cmd -} - -// localGoTool returns the go tool from the given GOROOT. -func localGoTool(goroot string, subcmd string, args ...string) *exec.Cmd { - gotool := filepath.Join(goroot, "bin", "go") - cmd := exec.Command(gotool, subcmd) - goToolSetEnv(cmd) - cmd.Env = append(cmd.Env, "GOROOT="+goroot) - cmd.Args = append(cmd.Args, args...) - return cmd -} - -// goToolSetEnv forwards the build environment to the go tool. -func goToolSetEnv(cmd *exec.Cmd) { - cmd.Env = append(cmd.Env, "GOBIN="+GOBIN) - for _, e := range os.Environ() { - if strings.HasPrefix(e, "GOBIN=") || strings.HasPrefix(e, "CC=") { - continue - } - cmd.Env = append(cmd.Env, e) - } -} - // Running The Tests // // "tests" also includes static analysis tools such as vet. func doTest(cmdline []string) { - coverage := flag.Bool("coverage", false, "Whether to record code coverage") - verbose := flag.Bool("v", false, "Whether to log verbosely") + var ( + dlgo = flag.Bool("dlgo", false, "Download Go and build with it") + arch = flag.String("arch", "", "Run tests for given architecture") + cc = flag.String("cc", "", "Sets C compiler binary") + coverage = flag.Bool("coverage", false, "Whether to record code coverage") + verbose = flag.Bool("v", false, "Whether to log verbosely") + ) flag.CommandLine.Parse(cmdline) - env := build.Env() - packages := []string{"./..."} - if len(flag.CommandLine.Args()) > 0 { - packages = flag.CommandLine.Args() + // Configure the toolchain. + tc := build.GoToolchain{GOARCH: *arch, CC: *cc} + if *dlgo { + csdb := build.MustLoadChecksums("build/checksums.txt") + tc.Root = build.DownloadGo(csdb, dlgoVersion) } + gotest := tc.Go("test") - // Run the actual tests. // Test a single package at a time. CI builders are slow // and some tests run into timeouts under load. - gotest := goTool("test", buildFlags(env)...) gotest.Args = append(gotest.Args, "-p", "1") if *coverage { gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover") @@ -356,6 +300,10 @@ func doTest(cmdline []string) { gotest.Args = append(gotest.Args, "-v") } + packages := []string{"./..."} + if len(flag.CommandLine.Args()) > 0 { + packages = flag.CommandLine.Args() + } gotest.Args = append(gotest.Args, packages...) build.MustRun(gotest) } @@ -415,8 +363,7 @@ func doArchive(cmdline []string) { } var ( - env = build.Env() - + env = build.Env() basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit)) geth = "geth-" + basegeth + ext alltools = "geth-alltools-" + basegeth + ext @@ -492,15 +439,15 @@ func archiveUpload(archive string, blobstore string, signer string, signifyVar s // skips archiving for some build configurations. func maybeSkipArchive(env build.Environment) { if env.IsPullRequest { - log.Printf("skipping because this is a PR build") + log.Printf("skipping archive creation because this is a PR build") os.Exit(0) } if env.IsCronJob { - log.Printf("skipping because this is a cron job") + log.Printf("skipping archive creation because this is a cron job") os.Exit(0) } if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") { - log.Printf("skipping because branch %q, tag %q is not on the whitelist", env.Branch, env.Tag) + log.Printf("skipping archive creation because branch %q, tag %q is not on the whitelist", env.Branch, env.Tag) os.Exit(0) } } @@ -518,6 +465,7 @@ func doDebianSource(cmdline []string) { flag.CommandLine.Parse(cmdline) *workdir = makeWorkdir(*workdir) env := build.Env() + tc := new(build.GoToolchain) maybeSkipArchive(env) // Import the signing key. @@ -531,12 +479,12 @@ func doDebianSource(cmdline []string) { gobundle := downloadGoSources(*cachedir) // Download all the dependencies needed to build the sources and run the ci script - srcdepfetch := goTool("mod", "download") - srcdepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath")) + srcdepfetch := tc.Go("mod", "download") + srcdepfetch.Env = append(srcdepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath")) build.MustRun(srcdepfetch) - cidepfetch := goTool("run", "./build/ci.go") - cidepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath")) + cidepfetch := tc.Go("run", "./build/ci.go") + cidepfetch.Env = append(cidepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath")) cidepfetch.Run() // Command fails, don't care, we only need the deps to start it // Create Debian packages and upload them. @@ -592,41 +540,6 @@ func downloadGoSources(cachedir string) string { return dst } -// downloadGo downloads the Go binary distribution and unpacks it into a temporary -// directory. It returns the GOROOT of the unpacked toolchain. -func downloadGo(goarch, goos, cachedir string) string { - if goarch == "arm" { - goarch = "armv6l" - } - - csdb := build.MustLoadChecksums("build/checksums.txt") - file := fmt.Sprintf("go%s.%s-%s", dlgoVersion, goos, goarch) - if goos == "windows" { - file += ".zip" - } else { - file += ".tar.gz" - } - url := "https://golang.org/dl/" + file - dst := filepath.Join(cachedir, file) - if err := csdb.DownloadFile(url, dst); err != nil { - log.Fatal(err) - } - - ucache, err := os.UserCacheDir() - if err != nil { - log.Fatal(err) - } - godir := filepath.Join(ucache, fmt.Sprintf("geth-go-%s-%s-%s", dlgoVersion, goos, goarch)) - if err := build.ExtractArchive(dst, godir); err != nil { - log.Fatal(err) - } - goroot, err := filepath.Abs(filepath.Join(godir, "go")) - if err != nil { - log.Fatal(err) - } - return goroot -} - func ppaUpload(workdir, ppa, sshUser string, files []string) { p := strings.Split(ppa, "/") if len(p) != 2 { @@ -901,13 +814,23 @@ func doAndroidArchive(cmdline []string) { ) flag.CommandLine.Parse(cmdline) env := build.Env() + tc := new(build.GoToolchain) // Sanity check that the SDK and NDK are installed and set if os.Getenv("ANDROID_HOME") == "" { log.Fatal("Please ensure ANDROID_HOME points to your Android SDK") } + + // Build gomobile. + install := tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile@latest", "golang.org/x/mobile/cmd/gobind@latest") + install.Env = append(install.Env) + build.MustRun(install) + + // Ensure all dependencies are available. This is required to make + // gomobile bind work because it expects go.sum to contain all checksums. + build.MustRun(tc.Go("mod", "download")) + // Build the Android archive and Maven resources - build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind")) build.MustRun(gomobileTool("bind", "-ldflags", "-s -w", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile")) if *local { @@ -1027,10 +950,16 @@ func doXCodeFramework(cmdline []string) { ) flag.CommandLine.Parse(cmdline) env := build.Env() + tc := new(build.GoToolchain) + + // Build gomobile. + build.MustRun(tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile@latest", "golang.org/x/mobile/cmd/gobind@latest")) + + // Ensure all dependencies are available. This is required to make + // gomobile bind work because it expects go.sum to contain all checksums. + build.MustRun(tc.Go("mod", "download")) // Build the iOS XCode framework - build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind")) - build.MustRun(gomobileTool("init")) bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "-v", "github.com/ethereum/go-ethereum/mobile") if *local { @@ -1039,17 +968,14 @@ func doXCodeFramework(cmdline []string) { build.MustRun(bind) return } + + // Create the archive. + maybeSkipArchive(env) archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit)) - if err := os.Mkdir(archive, os.ModePerm); err != nil { - log.Fatal(err) - } bind.Dir, _ = filepath.Abs(archive) build.MustRun(bind) build.MustRunCommand("tar", "-zcvf", archive+".tar.gz", archive) - // Skip CocoaPods deploy and Azure upload for PR builds - maybeSkipArchive(env) - // Sign and upload the framework to Azure if err := archiveUpload(archive+".tar.gz", *upload, *signer, *signify); err != nil { log.Fatal(err) @@ -1115,10 +1041,10 @@ func doXgo(cmdline []string) { ) flag.CommandLine.Parse(cmdline) env := build.Env() + var tc build.GoToolchain // Make sure xgo is available for cross compilation - gogetxgo := goTool("get", "github.com/karalabe/xgo") - build.MustRun(gogetxgo) + build.MustRun(tc.Install(GOBIN, "github.com/karalabe/xgo@latest")) // If all tools building is requested, build everything the builder wants args := append(buildFlags(env), flag.Args()...) @@ -1129,27 +1055,23 @@ func doXgo(cmdline []string) { if strings.HasPrefix(res, GOBIN) { // Binary tool found, cross build it explicitly args = append(args, "./"+filepath.Join("cmd", filepath.Base(res))) - xgo := xgoTool(args) - build.MustRun(xgo) + build.MustRun(xgoTool(args)) args = args[:len(args)-1] } } return } - // Otherwise xxecute the explicit cross compilation + + // Otherwise execute the explicit cross compilation path := args[len(args)-1] args = append(args[:len(args)-1], []string{"--dest", GOBIN, path}...) - - xgo := xgoTool(args) - build.MustRun(xgo) + build.MustRun(xgoTool(args)) } func xgoTool(args []string) *exec.Cmd { cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...) cmd.Env = os.Environ() - cmd.Env = append(cmd.Env, []string{ - "GOBIN=" + GOBIN, - }...) + cmd.Env = append(cmd.Env, []string{"GOBIN=" + GOBIN}...) return cmd } diff --git a/internal/build/env.go b/internal/build/env.go index a3017182e3c1..d70c0d50a499 100644 --- a/internal/build/env.go +++ b/internal/build/env.go @@ -38,6 +38,7 @@ var ( // Environment contains metadata provided by the build environment. type Environment struct { + CI bool Name string // name of the environment Repo string // name of GitHub repo Commit, Date, Branch, Tag string // Git info @@ -61,6 +62,7 @@ func Env() Environment { commit = os.Getenv("TRAVIS_COMMIT") } return Environment{ + CI: true, Name: "travis", Repo: os.Getenv("TRAVIS_REPO_SLUG"), Commit: commit, @@ -77,6 +79,7 @@ func Env() Environment { commit = os.Getenv("APPVEYOR_REPO_COMMIT") } return Environment{ + CI: true, Name: "appveyor", Repo: os.Getenv("APPVEYOR_REPO_NAME"), Commit: commit, diff --git a/internal/build/gotool.go b/internal/build/gotool.go new file mode 100644 index 000000000000..e644b5f69526 --- /dev/null +++ b/internal/build/gotool.go @@ -0,0 +1,149 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package build + +import ( + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" +) + +type GoToolchain struct { + Root string // GOROOT + + // Cross-compilation variables. These are set when running the go tool. + GOARCH string + GOOS string + CC string +} + +// Go creates an invocation of the go command. +func (g *GoToolchain) Go(command string, args ...string) *exec.Cmd { + tool := g.goTool(command, args...) + + // Configure environment for cross build. + if g.GOARCH != "" && g.GOARCH != runtime.GOARCH { + tool.Env = append(tool.Env, "CGO_ENABLED=1") + tool.Env = append(tool.Env, "GOARCH="+g.GOARCH) + } + if g.GOOS != "" && g.GOOS != runtime.GOOS { + tool.Env = append(tool.Env, "GOOS="+g.GOOS) + } + // Configure C compiler. + if g.CC != "" { + tool.Env = append(tool.Env, "CC="+g.CC) + } else if os.Getenv("CC") != "" { + tool.Env = append(tool.Env, "CC="+os.Getenv("CC")) + } + return tool +} + +// Install creates an invocation of 'go install'. The command is configured to output +// executables to the given 'gobin' directory. +// +// This can be used to install auxiliary build tools without modifying the local go.mod and +// go.sum files. To install tools which are not required by go.mod, ensure that all module +// paths in 'args' contain a module version suffix (e.g. "...@latest"). +func (g *GoToolchain) Install(gobin string, args ...string) *exec.Cmd { + if !filepath.IsAbs(gobin) { + panic("GOBIN must be an absolute path") + } + tool := g.goTool("install") + tool.Env = append(tool.Env, "GOBIN="+gobin) + tool.Args = append(tool.Args, "-mod=readonly") + tool.Args = append(tool.Args, args...) + + // Ensure GOPATH is set because go install seems to absolutely require it. This uses + // 'go env' because it resolves the default value when GOPATH is not set in the + // environment. Ignore errors running go env and leave any complaining about GOPATH to + // the install command. + pathTool := g.goTool("env", "GOPATH") + output, _ := pathTool.Output() + tool.Env = append(tool.Env, "GOPATH="+string(output)) + return tool +} + +func (g *GoToolchain) goTool(command string, args ...string) *exec.Cmd { + if g.Root == "" { + g.Root = runtime.GOROOT() + } + tool := exec.Command(filepath.Join(g.Root, "bin", "go"), command) + tool.Args = append(tool.Args, args...) + tool.Env = append(tool.Env, "GOROOT="+g.Root) + + // Forward environment variables to the tool, but skip compiler target settings. + // TODO: what about GOARM? + skip := map[string]struct{}{"GOROOT": {}, "GOARCH": {}, "GOOS": {}, "GOBIN": {}, "CC": {}} + for _, e := range os.Environ() { + if i := strings.IndexByte(e, '='); i >= 0 { + if _, ok := skip[e[:i]]; ok { + continue + } + } + tool.Env = append(tool.Env, e) + } + return tool +} + +// DownloadGo downloads the Go binary distribution and unpacks it into a temporary +// directory. It returns the GOROOT of the unpacked toolchain. +func DownloadGo(csdb *ChecksumDB, version string) string { + // Shortcut: if the Go version that runs this script matches the + // requested version exactly, there is no need to download anything. + activeGo := strings.TrimPrefix(runtime.Version(), "go") + if activeGo == version { + log.Printf("-dlgo version matches active Go version %s, skipping download.", activeGo) + return runtime.GOROOT() + } + + ucache, err := os.UserCacheDir() + if err != nil { + log.Fatal(err) + } + + // For Arm architecture, GOARCH includes ISA version. + os := runtime.GOOS + arch := runtime.GOARCH + if arch == "arm" { + arch = "armv6l" + } + file := fmt.Sprintf("go%s.%s-%s", version, os, arch) + if os == "windows" { + file += ".zip" + } else { + file += ".tar.gz" + } + url := "https://golang.org/dl/" + file + dst := filepath.Join(ucache, file) + if err := csdb.DownloadFile(url, dst); err != nil { + log.Fatal(err) + } + + godir := filepath.Join(ucache, fmt.Sprintf("geth-go-%s-%s-%s", version, os, arch)) + if err := ExtractArchive(dst, godir); err != nil { + log.Fatal(err) + } + goroot, err := filepath.Abs(filepath.Join(godir, "go")) + if err != nil { + log.Fatal(err) + } + return goroot +} diff --git a/internal/build/util.go b/internal/build/util.go index 91149926f790..2bdced82ee2f 100644 --- a/internal/build/util.go +++ b/internal/build/util.go @@ -29,7 +29,6 @@ import ( "os/exec" "path" "path/filepath" - "runtime" "strings" "text/template" ) @@ -111,19 +110,6 @@ func render(tpl *template.Template, outputFile string, outputPerm os.FileMode, x } } -// GoTool returns the command that runs a go tool. This uses go from GOROOT instead of PATH -// so that go commands executed by build use the same version of Go as the 'host' that runs -// build code. e.g. -// -// /usr/lib/go-1.12.1/bin/go run build/ci.go ... -// -// runs using go 1.12.1 and invokes go 1.12.1 tools from the same GOROOT. This is also important -// because runtime.Version checks on the host should match the tools that are run. -func GoTool(tool string, args ...string) *exec.Cmd { - args = append([]string{tool}, args...) - return exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...) -} - // UploadSFTP uploads files to a remote host using the sftp command line tool. // The destination host may be specified either as [user@]host[: or as a URI in // the form sftp://[user@]host[:port]. From d107f90d1ce74c5d60c11a7171c0178bde12f14f Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 4 May 2021 21:45:21 +0200 Subject: [PATCH 004/208] go.mod: go mod tidy (#22814) This updates go.mod for the addition of golang.org/x/sync. --- go.mod | 2 +- go.sum | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 079d40ae6fed..512d541f411d 100644 --- a/go.mod +++ b/go.mod @@ -60,7 +60,7 @@ require ( github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988 golang.org/x/text v0.3.4 golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 diff --git a/go.sum b/go.sum index 3ae80f7b9c65..b6a27a2cf128 100644 --- a/go.sum +++ b/go.sum @@ -453,7 +453,6 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 973ad66b498546468c66ec3cc9d5aa111c1df948 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 4 May 2021 21:45:45 +0200 Subject: [PATCH 005/208] build: fix iOS framework build (#22813) This fixes a regression introduced in #22804. --- build/ci.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/build/ci.go b/build/ci.go index a89c6e02fd0c..d9f147ef0ea2 100644 --- a/build/ci.go +++ b/build/ci.go @@ -972,6 +972,9 @@ func doXCodeFramework(cmdline []string) { // Create the archive. maybeSkipArchive(env) archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit)) + if err := os.MkdirAll(archive, 0755); err != nil { + log.Fatal(err) + } bind.Dir, _ = filepath.Abs(archive) build.MustRun(bind) build.MustRunCommand("tar", "-zcvf", archive+".tar.gz", archive) From 3a2b29c1ed91e1099318c3829a44926a1c2a37d9 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 4 May 2021 22:39:09 +0200 Subject: [PATCH 006/208] appveyor.yml: upgrade to VisualStudio 2019 image (#22811) --- appveyor.yml | 40 ++++++++++++++-------------------------- 1 file changed, 14 insertions(+), 26 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 052280be156a..a72163382a9c 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,41 +1,29 @@ -os: Visual Studio 2015 - -# Clone directly into GOPATH. -clone_folder: C:\gopath\src\github.com\ethereum\go-ethereum +os: Visual Studio 2019 clone_depth: 5 version: "{branch}.{build}" environment: - global: - GO111MODULE: on - GOPATH: C:\gopath - CC: gcc.exe matrix: + # We use gcc from MSYS2 because it is the most recent compiler version available on + # AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is + # contained in PATH. - GETH_ARCH: amd64 - MSYS2_ARCH: x86_64 - MSYS2_BITS: 64 - MSYSTEM: MINGW64 - PATH: C:\msys64\mingw64\bin\;C:\Program Files (x86)\NSIS\;%PATH% + GETH_CC: C:\msys64\mingw64\bin\gcc.exe + PATH: C:\msys64\mingw64\bin;C:\Program Files (x86)\NSIS\;%PATH% - GETH_ARCH: 386 - MSYS2_ARCH: i686 - MSYS2_BITS: 32 - MSYSTEM: MINGW32 - PATH: C:\msys64\mingw32\bin\;C:\Program Files (x86)\NSIS\;%PATH% + GETH_CC: C:\msys64\mingw32\bin\gcc.exe + PATH: C:\msys64\mingw32\bin;C:\Program Files (x86)\NSIS\;%PATH% install: - - git submodule update --init - - rmdir C:\go /s /q - - appveyor DownloadFile https://dl.google.com/go/go1.16.windows-%GETH_ARCH%.zip - - 7z x go1.16.windows-%GETH_ARCH%.zip -y -oC:\ > NUL + - git submodule update --init --depth 1 - go version - - gcc --version + - "%GETH_CC% --version" build_script: - - go run build\ci.go install -dlgo + - go run build\ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC% after_build: - - go run build\ci.go archive -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - - go run build\ci.go nsis -signer WINDOWS_SIGNING_KEY -upload gethstore/builds + - go run build\ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds + - go run build\ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds test_script: - - set CGO_ENABLED=1 - - go run build\ci.go test -coverage + - go run build\ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage From 41671d449f5b3a19119eef070151ec72fdbb31f8 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 5 May 2021 12:19:51 +0200 Subject: [PATCH 007/208] build: fix windows installer build for NSIS v3.05 (#22821) With the update to a newer AppVeyor build image, creating the Windows installer no longer worked because of a string quoting error in EnvVarUpdate.nsh. This applies the fix recommended in https://stackoverflow.com/questions/62081765. --- build/nsis.envvarupdate.nsh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/nsis.envvarupdate.nsh b/build/nsis.envvarupdate.nsh index 9c3ecbe3372f..95c2f1f6392b 100644 --- a/build/nsis.envvarupdate.nsh +++ b/build/nsis.envvarupdate.nsh @@ -43,7 +43,7 @@ !ifndef Un${StrFuncName}_INCLUDED ${Un${StrFuncName}} !endif - !define un.${StrFuncName} "${Un${StrFuncName}}" + !define un.${StrFuncName} '${Un${StrFuncName}}' !macroend !insertmacro _IncludeStrFunction StrTok From 0f3a1e7f9b069c7dcc934238bd75ace72c166f36 Mon Sep 17 00:00:00 2001 From: rene <41963722+renaynay@users.noreply.github.com> Date: Wed, 5 May 2021 12:27:27 +0200 Subject: [PATCH 008/208] cmd/devp2p/internal/ethtest: send simultaneous requests on one connection (#22801) This changes the SimultaneousRequests test to send the requests from the same connection, as it doesn't really make sense to test whether a node can respond to two requests with different request IDs from separate connections. --- cmd/devp2p/internal/ethtest/eth66_suite.go | 41 +++++++++++----------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/eth66_suite.go b/cmd/devp2p/internal/ethtest/eth66_suite.go index d4890d8de6db..903a90c7eb2b 100644 --- a/cmd/devp2p/internal/ethtest/eth66_suite.go +++ b/cmd/devp2p/internal/ethtest/eth66_suite.go @@ -91,9 +91,8 @@ func (s *Suite) TestGetBlockHeaders_66(t *utesting.T) { // headers per request. func (s *Suite) TestSimultaneousRequests_66(t *utesting.T) { // create two connections - conn1, conn2 := s.setupConnection66(t), s.setupConnection66(t) - defer conn1.Close() - defer conn2.Close() + conn := s.setupConnection66(t) + defer conn.Close() // create two requests req1 := ð.GetBlockHeadersPacket66{ RequestId: 111, @@ -117,27 +116,29 @@ func (s *Suite) TestSimultaneousRequests_66(t *utesting.T) { Reverse: false, }, } - // wait for headers for first request - headerChan := make(chan BlockHeaders, 1) - go func(headers chan BlockHeaders) { - recvHeaders, err := s.getBlockHeaders66(conn1, req1, req1.RequestId) - if err != nil { - t.Fatalf("could not get block headers: %v", err) - return - } - headers <- recvHeaders - }(headerChan) - // check headers of second request - headers1, err := s.getBlockHeaders66(conn2, req2, req2.RequestId) + // write first request + if err := conn.write66(req1, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // write second request + if err := conn.write66(req2, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // wait for responses + headers1, err := s.waitForBlockHeadersResponse66(conn, req1.RequestId) if err != nil { - t.Fatalf("could not get block headers: %v", err) + t.Fatalf("error while waiting for block headers: %v", err) + } + headers2, err := s.waitForBlockHeadersResponse66(conn, req2.RequestId) + if err != nil { + t.Fatalf("error while waiting for block headers: %v", err) } + // check headers of both responses if !headersMatch(t, s.chain, headers1) { - t.Fatal("wrong header(s) in response to req2") + t.Fatalf("wrong header(s) in response to req1: got %v", headers1) } - // check headers of first request - if !headersMatch(t, s.chain, <-headerChan) { - t.Fatal("wrong header(s) in response to req1") + if !headersMatch(t, s.chain, headers2) { + t.Fatalf("wrong header(s) in response to req2: got %v", headers2) } } From 991384a7f6719e1125ca0be7fb27d0c4d1c5d2d3 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 5 May 2021 13:20:06 +0200 Subject: [PATCH 009/208] params: go-ethereum v1.10.3 stable --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index d350c5ff4a0d..1c7bf8d88eaa 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 10 // Minor version component of the current release - VersionPatch = 3 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 10 // Minor version component of the current release + VersionPatch = 3 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. From 37b5595456e7049e3ed487c41564281de52e00ab Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 5 May 2021 13:21:13 +0200 Subject: [PATCH 010/208] params: begin v1.10.4 release cycle --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 1c7bf8d88eaa..89969705dc1c 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 10 // Minor version component of the current release - VersionPatch = 3 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 10 // Minor version component of the current release + VersionPatch = 4 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. From df20b3b98286c9333c143c0a873d09e27b0e0693 Mon Sep 17 00:00:00 2001 From: Evgeny Danilenko <6655321@bk.ru> Date: Thu, 6 May 2021 11:46:27 +0300 Subject: [PATCH 011/208] core/vm: avoid duplicate log in json logger (#22825) --- core/vm/logger_json.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/core/vm/logger_json.go b/core/vm/logger_json.go index e54be08596e6..93878b9806cc 100644 --- a/core/vm/logger_json.go +++ b/core/vm/logger_json.go @@ -87,8 +87,9 @@ func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, Time time.Duration `json:"time"` Err string `json:"error,omitempty"` } + var errMsg string if err != nil { - l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, err.Error()}) + errMsg = err.Error() } - l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, ""}) + l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, errMsg}) } From cc606be74c6f1f05b0b0a6226a400e734b9aac31 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Thu, 6 May 2021 11:07:42 +0200 Subject: [PATCH 012/208] all: define London+baikal, undefine yolov3, add london override flag (#22822) * all: define London+baikal, undefine yolov3, add london override flag * cmd, core, params: add baikal genesis definition --- cmd/geth/chaincmd.go | 2 +- cmd/geth/config.go | 4 ++-- cmd/geth/consolecmd.go | 4 ++-- cmd/geth/dbcmd.go | 16 +++++++------- cmd/geth/main.go | 8 +++---- cmd/geth/usage.go | 2 +- cmd/puppeth/wizard_genesis.go | 4 ++-- cmd/utils/flags.go | 36 +++++++++++++++---------------- core/blockchain_test.go | 2 +- core/genesis.go | 20 ++++++++--------- core/genesis_alloc.go | 2 +- core/genesis_test.go | 4 ++-- core/state/statedb.go | 2 +- core/types/transaction_signing.go | 2 +- core/vm/runtime/runtime.go | 2 +- eth/backend.go | 2 +- eth/ethconfig/config.go | 2 +- les/client.go | 2 +- params/bootnodes.go | 10 ++++----- params/config.go | 34 +++++++++++++++++------------ tests/init.go | 22 ++++++++++++++----- 21 files changed, 100 insertions(+), 82 deletions(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 3cae32aa9e18..d00b4bc1f61b 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -63,7 +63,7 @@ It expects the genesis file as argument.`, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, + utils.BaikalFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` diff --git a/cmd/geth/config.go b/cmd/geth/config.go index c867877ee6f7..c4ebf648813b 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -141,8 +141,8 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { // makeFullNode loads geth configuration and creates the Ethereum backend. func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { stack, cfg := makeConfigNode(ctx) - if ctx.GlobalIsSet(utils.OverrideBerlinFlag.Name) { - cfg.Eth.OverrideBerlin = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideBerlinFlag.Name)) + if ctx.GlobalIsSet(utils.OverrideLondonFlag.Name) { + cfg.Eth.OverrideLondon = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideLondonFlag.Name)) } backend, eth := utils.RegisterEthService(stack, &cfg.Eth) diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index 9d8794eb15f5..5c715a125074 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -134,8 +134,8 @@ func remoteConsole(ctx *cli.Context) error { path = filepath.Join(path, "rinkeby") } else if ctx.GlobalBool(utils.GoerliFlag.Name) { path = filepath.Join(path, "goerli") - } else if ctx.GlobalBool(utils.YoloV3Flag.Name) { - path = filepath.Join(path, "yolo-v3") + } else if ctx.GlobalBool(utils.BaikalFlag.Name) { + path = filepath.Join(path, "baikal") } } endpoint = fmt.Sprintf("%s/geth.ipc", path) diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 4c70373e9aff..2cd481b706c9 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -75,7 +75,7 @@ Remove blockchain and state databases`, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, + utils.BaikalFlag, }, Usage: "Inspect the storage size for each type of data in the database", Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, @@ -91,7 +91,7 @@ Remove blockchain and state databases`, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, + utils.BaikalFlag, }, } dbCompactCmd = cli.Command{ @@ -105,7 +105,7 @@ Remove blockchain and state databases`, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, + utils.BaikalFlag, utils.CacheFlag, utils.CacheDatabaseFlag, }, @@ -125,7 +125,7 @@ corruption if it is aborted during execution'!`, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, + utils.BaikalFlag, }, Description: "This command looks up the specified database key from the database.", } @@ -141,7 +141,7 @@ corruption if it is aborted during execution'!`, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, + utils.BaikalFlag, }, Description: `This command deletes the specified database key from the database. WARNING: This is a low-level operation which may cause database corruption!`, @@ -158,7 +158,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, + utils.BaikalFlag, }, Description: `This command sets a given database key to the given value. WARNING: This is a low-level operation which may cause database corruption!`, @@ -175,7 +175,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, + utils.BaikalFlag, }, Description: "This command looks up the specified database key from the database.", } @@ -191,7 +191,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, + utils.BaikalFlag, }, Description: "This command displays information about the freezer index.", } diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 78e65161daf7..bed4d8a488e1 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -66,7 +66,7 @@ var ( utils.NoUSBFlag, utils.USBFlag, utils.SmartCardDaemonPathFlag, - utils.OverrideBerlinFlag, + utils.OverrideLondonFlag, utils.EthashCacheDirFlag, utils.EthashCachesInMemoryFlag, utils.EthashCachesOnDiskFlag, @@ -138,7 +138,7 @@ var ( utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, + utils.BaikalFlag, utils.VMEnableDebugFlag, utils.NetworkIdFlag, utils.EthStatsURLFlag, @@ -275,8 +275,8 @@ func prepare(ctx *cli.Context) { case ctx.GlobalIsSet(utils.GoerliFlag.Name): log.Info("Starting Geth on Görli testnet...") - case ctx.GlobalIsSet(utils.YoloV3Flag.Name): - log.Info("Starting Geth on YOLOv3 testnet...") + case ctx.GlobalIsSet(utils.BaikalFlag.Name): + log.Info("Starting Geth on Baikal testnet...") case ctx.GlobalIsSet(utils.DeveloperFlag.Name): log.Info("Starting Geth in ephemeral dev mode...") diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 980794db7358..aa0a4b290116 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -44,7 +44,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.MainnetFlag, utils.GoerliFlag, utils.RinkebyFlag, - utils.YoloV3Flag, + utils.BaikalFlag, utils.RopstenFlag, utils.SyncModeFlag, utils.ExitWhenSyncedFlag, diff --git a/cmd/puppeth/wizard_genesis.go b/cmd/puppeth/wizard_genesis.go index 4f701fa1c305..ae5977b3723c 100644 --- a/cmd/puppeth/wizard_genesis.go +++ b/cmd/puppeth/wizard_genesis.go @@ -240,8 +240,8 @@ func (w *wizard) manageGenesis() { w.conf.Genesis.Config.BerlinBlock = w.readDefaultBigInt(w.conf.Genesis.Config.BerlinBlock) fmt.Println() - fmt.Printf("Which block should YOLOv3 come into effect? (default = %v)\n", w.conf.Genesis.Config.YoloV3Block) - w.conf.Genesis.Config.YoloV3Block = w.readDefaultBigInt(w.conf.Genesis.Config.YoloV3Block) + fmt.Printf("Which block should London come into effect? (default = %v)\n", w.conf.Genesis.Config.LondonBlock) + w.conf.Genesis.Config.LondonBlock = w.readDefaultBigInt(w.conf.Genesis.Config.LondonBlock) out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ") fmt.Printf("Chain configuration updated:\n\n%s\n", out) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index a81188342f60..aa00f96c92e7 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -151,9 +151,9 @@ var ( Name: "goerli", Usage: "Görli network: pre-configured proof-of-authority test network", } - YoloV3Flag = cli.BoolFlag{ - Name: "yolov3", - Usage: "YOLOv3 network: pre-configured proof-of-authority shortlived test network.", + BaikalFlag = cli.BoolFlag{ + Name: "baikal", + Usage: "Bailkal network: pre-configured proof-of-authority shortlived test network.", } RinkebyFlag = cli.BoolFlag{ Name: "rinkeby", @@ -233,9 +233,9 @@ var ( Usage: "Megabytes of memory allocated to bloom-filter for pruning", Value: 2048, } - OverrideBerlinFlag = cli.Uint64Flag{ - Name: "override.berlin", - Usage: "Manually specify Berlin fork-block, overriding the bundled setting", + OverrideLondonFlag = cli.Uint64Flag{ + Name: "override.london", + Usage: "Manually specify London fork-block, overriding the bundled setting", } // Light server and client settings LightServeFlag = cli.IntFlag{ @@ -778,8 +778,8 @@ func MakeDataDir(ctx *cli.Context) string { if ctx.GlobalBool(GoerliFlag.Name) { return filepath.Join(path, "goerli") } - if ctx.GlobalBool(YoloV3Flag.Name) { - return filepath.Join(path, "yolo-v3") + if ctx.GlobalBool(BaikalFlag.Name) { + return filepath.Join(path, "baikal") } return path } @@ -833,8 +833,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) { urls = params.RinkebyBootnodes case ctx.GlobalBool(GoerliFlag.Name): urls = params.GoerliBootnodes - case ctx.GlobalBool(YoloV3Flag.Name): - urls = params.YoloV3Bootnodes + case ctx.GlobalBool(BaikalFlag.Name): + urls = params.BaikalBootnodes case cfg.BootstrapNodes != nil: return // already set, don't apply defaults. } @@ -1275,8 +1275,8 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) { cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir(): cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli") - case ctx.GlobalBool(YoloV3Flag.Name) && cfg.DataDir == node.DefaultDataDir(): - cfg.DataDir = filepath.Join(node.DefaultDataDir(), "yolo-v3") + case ctx.GlobalBool(BaikalFlag.Name) && cfg.DataDir == node.DefaultDataDir(): + cfg.DataDir = filepath.Join(node.DefaultDataDir(), "baikal") } } @@ -1460,7 +1460,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) { // SetEthConfig applies eth-related command line flags to the config. func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { // Avoid conflicting network flags - CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, YoloV3Flag) + CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, BaikalFlag) CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light") CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 { @@ -1620,11 +1620,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { } cfg.Genesis = core.DefaultGoerliGenesisBlock() SetDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash) - case ctx.GlobalBool(YoloV3Flag.Name): + case ctx.GlobalBool(BaikalFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { - cfg.NetworkId = new(big.Int).SetBytes([]byte("yolov3x")).Uint64() // "yolov3x" + cfg.NetworkId = 1642 // https://github.com/ethereum/eth1.0-specs/blob/master/network-upgrades/client-integration-testnets/baikal.md } - cfg.Genesis = core.DefaultYoloV3GenesisBlock() + cfg.Genesis = core.DefaultBaikalGenesisBlock() case ctx.GlobalBool(DeveloperFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 1337 @@ -1813,8 +1813,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis { genesis = core.DefaultRinkebyGenesisBlock() case ctx.GlobalBool(GoerliFlag.Name): genesis = core.DefaultGoerliGenesisBlock() - case ctx.GlobalBool(YoloV3Flag.Name): - genesis = core.DefaultYoloV3GenesisBlock() + case ctx.GlobalBool(BaikalFlag.Name): + genesis = core.DefaultBaikalGenesisBlock() case ctx.GlobalBool(DeveloperFlag.Name): Fatalf("Developer chains are ephemeral") } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 5004abd1c7a1..75ef399c9adc 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -3049,7 +3049,7 @@ func TestEIP2718Transition(t *testing.T) { address = crypto.PubkeyToAddress(key.PublicKey) funds = big.NewInt(1000000000) gspec = &Genesis{ - Config: params.YoloV3ChainConfig, + Config: params.TestChainConfig, Alloc: GenesisAlloc{ address: {Balance: funds}, // The address 0xAAAA sloads 0x00 and 0x01 diff --git a/core/genesis.go b/core/genesis.go index e05e27fe1750..9ae718beb654 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -156,7 +156,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig return SetupGenesisBlockWithOverride(db, genesis, nil) } -func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideBerlin *big.Int) (*params.ChainConfig, common.Hash, error) { +func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideLondon *big.Int) (*params.ChainConfig, common.Hash, error) { if genesis != nil && genesis.Config == nil { return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig } @@ -202,8 +202,8 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override } // Get the existing chain configuration. newcfg := genesis.configOrDefault(stored) - if overrideBerlin != nil { - newcfg.BerlinBlock = overrideBerlin + if overrideLondon != nil { + newcfg.LondonBlock = overrideLondon } if err := newcfg.CheckConfigForkOrder(); err != nil { return newcfg, common.Hash{}, err @@ -246,8 +246,8 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { return params.RinkebyChainConfig case ghash == params.GoerliGenesisHash: return params.GoerliChainConfig - case ghash == params.YoloV3GenesisHash: - return params.YoloV3ChainConfig + case ghash == params.BaikalGenesisHash: + return params.BaikalChainConfig default: return params.AllEthashProtocolChanges } @@ -383,15 +383,15 @@ func DefaultGoerliGenesisBlock() *Genesis { } } -func DefaultYoloV3GenesisBlock() *Genesis { +func DefaultBaikalGenesisBlock() *Genesis { // Full genesis: https://gist.github.com/holiman/c6ed9269dce28304ad176314caa75e97 return &Genesis{ - Config: params.YoloV3ChainConfig, - Timestamp: 0x6027dd2e, - ExtraData: hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000001041afbcb359d5a8dc58c15b2ff51354ff8a217d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), + Config: params.BaikalChainConfig, + Timestamp: 0x6092ca7f, + ExtraData: hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000005211cea3870c7ba7c6c44b185e62eecdb864cd8c560228ce57d31efbf64c200b2c200aacec78cf17a7148e784fe95a7a750335f8b9572ee28d72e7650000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), GasLimit: 0x47b760, Difficulty: big.NewInt(1), - Alloc: decodePrealloc(yoloV3AllocData), + Alloc: decodePrealloc(baikalAllocData), } } diff --git a/core/genesis_alloc.go b/core/genesis_alloc.go index 5b0e933d7a3d..0828d1067e54 100644 --- a/core/genesis_alloc.go +++ b/core/genesis_alloc.go @@ -25,4 +25,4 @@ const mainnetAllocData = "\xfa\x04]X\u0793\r\x83b\x011\x8e\u0189\x9agT\x06\x908' const ropstenAllocData = "\xf9\x03\xa4\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x80\xc2\v\x80\xc2\f\x80\xc2\r\x80\xc2\x0e\x80\xc2\x0f\x80\xc2\x10\x80\xc2\x11\x80\xc2\x12\x80\xc2\x13\x80\xc2\x14\x80\xc2\x15\x80\xc2\x16\x80\xc2\x17\x80\xc2\x18\x80\xc2\x19\x80\xc2\x1a\x80\xc2\x1b\x80\xc2\x1c\x80\xc2\x1d\x80\xc2\x1e\x80\xc2\x1f\x80\xc2 \x80\xc2!\x80\xc2\"\x80\xc2#\x80\xc2$\x80\xc2%\x80\xc2&\x80\xc2'\x80\xc2(\x80\xc2)\x80\xc2*\x80\xc2+\x80\xc2,\x80\xc2-\x80\xc2.\x80\xc2/\x80\xc20\x80\xc21\x80\xc22\x80\xc23\x80\xc24\x80\xc25\x80\xc26\x80\xc27\x80\xc28\x80\xc29\x80\xc2:\x80\xc2;\x80\xc2<\x80\xc2=\x80\xc2>\x80\xc2?\x80\xc2@\x80\xc2A\x80\xc2B\x80\xc2C\x80\xc2D\x80\xc2E\x80\xc2F\x80\xc2G\x80\xc2H\x80\xc2I\x80\xc2J\x80\xc2K\x80\xc2L\x80\xc2M\x80\xc2N\x80\xc2O\x80\xc2P\x80\xc2Q\x80\xc2R\x80\xc2S\x80\xc2T\x80\xc2U\x80\xc2V\x80\xc2W\x80\xc2X\x80\xc2Y\x80\xc2Z\x80\xc2[\x80\xc2\\\x80\xc2]\x80\xc2^\x80\xc2_\x80\xc2`\x80\xc2a\x80\xc2b\x80\xc2c\x80\xc2d\x80\xc2e\x80\xc2f\x80\xc2g\x80\xc2h\x80\xc2i\x80\xc2j\x80\xc2k\x80\xc2l\x80\xc2m\x80\xc2n\x80\xc2o\x80\xc2p\x80\xc2q\x80\xc2r\x80\xc2s\x80\xc2t\x80\xc2u\x80\xc2v\x80\xc2w\x80\xc2x\x80\xc2y\x80\xc2z\x80\xc2{\x80\xc2|\x80\xc2}\x80\xc2~\x80\xc2\u007f\x80\u00c1\x80\x80\u00c1\x81\x80\u00c1\x82\x80\u00c1\x83\x80\u00c1\x84\x80\u00c1\x85\x80\u00c1\x86\x80\u00c1\x87\x80\u00c1\x88\x80\u00c1\x89\x80\u00c1\x8a\x80\u00c1\x8b\x80\u00c1\x8c\x80\u00c1\x8d\x80\u00c1\x8e\x80\u00c1\x8f\x80\u00c1\x90\x80\u00c1\x91\x80\u00c1\x92\x80\u00c1\x93\x80\u00c1\x94\x80\u00c1\x95\x80\u00c1\x96\x80\u00c1\x97\x80\u00c1\x98\x80\u00c1\x99\x80\u00c1\x9a\x80\u00c1\x9b\x80\u00c1\x9c\x80\u00c1\x9d\x80\u00c1\x9e\x80\u00c1\x9f\x80\u00c1\xa0\x80\u00c1\xa1\x80\u00c1\xa2\x80\u00c1\xa3\x80\u00c1\xa4\x80\u00c1\xa5\x80\u00c1\xa6\x80\u00c1\xa7\x80\u00c1\xa8\x80\u00c1\xa9\x80\u00c1\xaa\x80\u00c1\xab\x80\u00c1\xac\x80\u00c1\xad\x80\u00c1\xae\x80\u00c1\xaf\x80\u00c1\xb0\x80\u00c1\xb1\x80\u00c1\xb2\x80\u00c1\xb3\x80\u00c1\xb4\x80\u00c1\xb5\x80\u00c1\xb6\x80\u00c1\xb7\x80\u00c1\xb8\x80\u00c1\xb9\x80\u00c1\xba\x80\u00c1\xbb\x80\u00c1\xbc\x80\u00c1\xbd\x80\u00c1\xbe\x80\u00c1\xbf\x80\u00c1\xc0\x80\u00c1\xc1\x80\u00c1\u0080\u00c1\u00c0\u00c1\u0100\u00c1\u0140\u00c1\u0180\u00c1\u01c0\u00c1\u0200\u00c1\u0240\u00c1\u0280\u00c1\u02c0\u00c1\u0300\u00c1\u0340\u00c1\u0380\u00c1\u03c0\u00c1\u0400\u00c1\u0440\u00c1\u0480\u00c1\u04c0\u00c1\u0500\u00c1\u0540\u00c1\u0580\u00c1\u05c0\u00c1\u0600\u00c1\u0640\u00c1\u0680\u00c1\u06c0\u00c1\u0700\u00c1\u0740\u00c1\u0780\u00c1\u07c0\u00c1\xe0\x80\u00c1\xe1\x80\u00c1\xe2\x80\u00c1\xe3\x80\u00c1\xe4\x80\u00c1\xe5\x80\u00c1\xe6\x80\u00c1\xe7\x80\u00c1\xe8\x80\u00c1\xe9\x80\u00c1\xea\x80\u00c1\xeb\x80\u00c1\xec\x80\u00c1\xed\x80\u00c1\xee\x80\u00c1\xef\x80\u00c1\xf0\x80\u00c1\xf1\x80\u00c1\xf2\x80\u00c1\xf3\x80\u00c1\xf4\x80\u00c1\xf5\x80\u00c1\xf6\x80\u00c1\xf7\x80\u00c1\xf8\x80\u00c1\xf9\x80\u00c1\xfa\x80\u00c1\xfb\x80\u00c1\xfc\x80\u00c1\xfd\x80\u00c1\xfe\x80\u00c1\xff\x80\u3507KT\xa8\xbd\x15)f\xd6?pk\xae\x1f\xfe\xb0A\x19!\xe5\x8d\f\x9f,\x9c\xd0Ft\xed\xea@\x00\x00\x00" const rinkebyAllocData = "\xf9\x03\xb7\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xf6\x941\xb9\x8d\x14\x00{\xde\xe67)\x80\x86\x98\x8a\v\xbd1\x18E#\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" const goerliAllocData = "\xf9\x04\x06\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xe0\x94L*\xe4\x82Y5\x05\xf0\x16<\xde\xfc\a>\x81\xc6<\xdaA\a\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94\xa8\xe8\xf1G2e\x8eKQ\xe8q\x191\x05:\x8ai\xba\xf2\xb1\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe1\x94\u0665\x17\x9f\t\x1d\x85\x05\x1d<\x98'\x85\xef\xd1E\\\uc199\x8b\bE\x95\x16\x14\x01HJ\x00\x00\x00\xe1\x94\u08bdBX\xd2v\x887\xba\xa2j(\xfeq\xdc\a\x9f\x84\u01cbJG\xe3\xc1$H\xf4\xad\x00\x00\x00" -const yoloV3AllocData = "\xf9\x05o\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xf6\x94\x0e\x89\xe2\xae\xdb\x1c\xfc\u06d4$\xd4\x1a\x1f!\x8fA2s\x81r\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x10A\xaf\xbc\xb3Y\u0568\xdcX\xc1[/\xf5\x13T\xff\x8a!}\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94`\xad\xc0\xf8\x9aA\xaf#|\xe75T\xed\xe1p\xd73\xec\x14\xe0\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94y\x9d2\x9e_X4\x19\x16|\xd7\"\x96$\x85\x92n3\x8fJ\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94|\xf5\xb7\x9b\xfe)\x1ag\xab\x02\xb3\x93\xe4V\xcc\xc4\xc2f\xf7S\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x8a\x8e\xaf\xb1\xcfb\xbf\xbe\xb1t\x17i\xda\xe1\xa9\xddG\x99a\x92\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x8b\xa1\xf1\tU\x1b\xd42\x800\x12dZ\xc16\xdd\xd6M\xbar\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xb0*.\xda\x1b1\u007f\xbd\x16v\x01(\x83k\n\u015bV\x0e\x9d\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xdf\n\x88\xb2\xb6\x8cg7\x13\xa8\xec\x82`\x03go'.5s\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +const baikalAllocData = "\xf9\x05\x01\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xf6\x94\x0e\x89\xe2\xae\xdb\x1c\xfc\u06d4$\xd4\x1a\x1f!\x8fA2s\x81r\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94`\xad\xc0\xf8\x9aA\xaf#|\xe75T\xed\xe1p\xd73\xec\x14\xe0\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94y\x9d2\x9e_X4\x19\x16|\xd7\"\x96$\x85\x92n3\x8fJ\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94|\xf5\xb7\x9b\xfe)\x1ag\xab\x02\xb3\x93\xe4V\xcc\xc4\xc2f\xf7S\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x8a\x8e\xaf\xb1\xcfb\xbf\xbe\xb1t\x17i\xda\xe1\xa9\xddG\x99a\x92\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x8b\xa1\xf1\tU\x1b\xd42\x800\x12dZ\xc16\xdd\xd6M\xbar\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xb0*.\xda\x1b1\u007f\xbd\x16v\x01(\x83k\n\u015bV\x0e\x9d\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" diff --git a/core/genesis_test.go b/core/genesis_test.go index 44c1ef253ac7..eec4c171e89c 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -186,8 +186,8 @@ func TestGenesisHashes(t *testing.T) { hash: params.RinkebyGenesisHash, }, { - genesis: DefaultYoloV3GenesisBlock(), - hash: params.YoloV3GenesisHash, + genesis: DefaultBaikalGenesisBlock(), + hash: params.BaikalGenesisHash, }, } for i, c := range cases { diff --git a/core/state/statedb.go b/core/state/statedb.go index 90f4709bfc46..203556c6b76a 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -991,7 +991,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { // - Add precompiles to access list (2929) // - Add the contents of the optional tx access list (2930) // -// This method should only be called if Yolov3/Berlin/2929+2930 is applicable at the current number. +// This method should only be called if Berlin/2929+2930 is applicable at the current number. func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { s.AddAddressToAccessList(sender) if dst != nil { diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index 126748efeb87..5d94b26b3647 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -61,7 +61,7 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer { // have the current block number available, use MakeSigner instead. func LatestSigner(config *params.ChainConfig) Signer { if config.ChainID != nil { - if config.BerlinBlock != nil || config.YoloV3Block != nil { + if config.BerlinBlock != nil { return NewEIP2930Signer(config.ChainID) } if config.EIP155Block != nil { diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 72601441d500..be612fb0ef8e 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -66,7 +66,7 @@ func setDefaults(cfg *Config) { IstanbulBlock: new(big.Int), MuirGlacierBlock: new(big.Int), BerlinBlock: new(big.Int), - YoloV3Block: nil, + LondonBlock: nil, } } diff --git a/eth/backend.go b/eth/backend.go index 7d8b0c52c6a4..3e770fe83d92 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -131,7 +131,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err != nil { return nil, err } - chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideBerlin) + chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideLondon) if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { return nil, genesisErr } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 0c6eb0bdd7d0..f5629e6a398b 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -200,7 +200,7 @@ type Config struct { CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` // Berlin block override (TODO: remove after the fork) - OverrideBerlin *big.Int `toml:",omitempty"` + OverrideLondon *big.Int `toml:",omitempty"` } // CreateConsensusEngine creates a consensus engine for the given chain configuration. diff --git a/les/client.go b/les/client.go index 7534eb3ea0b9..1d8a2c6f9a07 100644 --- a/les/client.go +++ b/les/client.go @@ -88,7 +88,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { if err != nil { return nil, err } - chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideBerlin) + chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideLondon) if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat { return nil, genesisErr } diff --git a/params/bootnodes.go b/params/bootnodes.go index f36ad61729c2..20bf0b7cbf27 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -67,11 +67,11 @@ var GoerliBootnodes = []string{ "enode://a59e33ccd2b3e52d578f1fbd70c6f9babda2650f0760d6ff3b37742fdcdfdb3defba5d56d315b40c46b70198c7621e63ffa3f987389c7118634b0fefbbdfa7fd@51.15.119.157:40303", } -// YoloV3Bootnodes are the enode URLs of the P2P bootstrap nodes running on the -// YOLOv3 ephemeral test network. -// TODO: Set Yolov3 bootnodes -var YoloV3Bootnodes = []string{ - "enode://9e1096aa59862a6f164994cb5cb16f5124d6c992cdbf4535ff7dea43ea1512afe5448dca9df1b7ab0726129603f1a3336b631e4d7a1a44c94daddd03241587f9@3.9.20.133:30303", +// BaikalBootnodes are the enode URLs of the P2P bootstrap nodes running on the +// Baikal ephemeral test network. +// TODO: Set Baikal bootnodes +var BaikalBootnodes = []string{ + "", } var V5Bootnodes = []string{ diff --git a/params/config.go b/params/config.go index eb80bb2e27f3..2cafa0e44922 100644 --- a/params/config.go +++ b/params/config.go @@ -31,7 +31,7 @@ var ( RopstenGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177") GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") - YoloV3GenesisHash = common.HexToHash("0xf1f2876e8500c77afcc03228757b39477eceffccf645b734967fe3c7e16967b7") + BaikalGenesisHash = common.HexToHash("0xa0bc5d43c72a990cedeb59d305702602b34c3ee8585e77d03c7a4fa64d79636e") ) // TrustedCheckpoints associates each known checkpoint with the genesis hash of @@ -217,9 +217,8 @@ var ( Threshold: 2, } - // YoloV3ChainConfig contains the chain parameters to run a node on the YOLOv3 test network. - YoloV3ChainConfig = &ChainConfig{ - ChainID: new(big.Int).SetBytes([]byte("yolov3x")), + BaikalChainConfig = &ChainConfig{ + ChainID: big.NewInt(1642), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: true, @@ -231,10 +230,10 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: nil, - BerlinBlock: nil, // Don't enable Berlin directly, we're YOLOing it - YoloV3Block: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), Clique: &CliqueConfig{ - Period: 15, + Period: 30, Epoch: 30000, }, } @@ -331,8 +330,8 @@ type ChainConfig struct { IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"` // Istanbul switch block (nil = no fork, 0 = already on istanbul) MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated) BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin) + LondonBlock *big.Int `json:"londonBlock,omitempty"` // London switch block (nil = no fork, 0 = already on london) - YoloV3Block *big.Int `json:"yoloV3Block,omitempty"` // YOLO v3: Gas repricings TODO @holiman add EIP references EWASMBlock *big.Int `json:"ewasmBlock,omitempty"` // EWASM switch block (nil = no fork, 0 = already activated) CatalystBlock *big.Int `json:"catalystBlock,omitempty"` // Catalyst switch block (nil = no fork, 0 = already on catalyst) @@ -371,7 +370,7 @@ func (c *ChainConfig) String() string { default: engine = "unknown" } - return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Berlin: %v, YOLO v3: %v, Engine: %v}", + return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Engine: %v}", c.ChainID, c.HomesteadBlock, c.DAOForkBlock, @@ -385,7 +384,7 @@ func (c *ChainConfig) String() string { c.IstanbulBlock, c.MuirGlacierBlock, c.BerlinBlock, - c.YoloV3Block, + c.LondonBlock, engine, ) } @@ -444,7 +443,12 @@ func (c *ChainConfig) IsIstanbul(num *big.Int) bool { // IsBerlin returns whether num is either equal to the Berlin fork block or greater. func (c *ChainConfig) IsBerlin(num *big.Int) bool { - return isForked(c.BerlinBlock, num) || isForked(c.YoloV3Block, num) + return isForked(c.BerlinBlock, num) +} + +// IsLondon returns whether num is either equal to the London fork block or greater. +func (c *ChainConfig) IsLondon(num *big.Int) bool { + return isForked(c.LondonBlock, num) } // IsCatalyst returns whether num is either equal to the Merge fork block or greater. @@ -496,6 +500,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "istanbulBlock", block: c.IstanbulBlock}, {name: "muirGlacierBlock", block: c.MuirGlacierBlock, optional: true}, {name: "berlinBlock", block: c.BerlinBlock}, + {name: "londonBlock", block: c.LondonBlock}, } { if lastFork.name != "" { // Next one must be higher number @@ -562,8 +567,8 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi if isForkIncompatible(c.BerlinBlock, newcfg.BerlinBlock, head) { return newCompatError("Berlin fork block", c.BerlinBlock, newcfg.BerlinBlock) } - if isForkIncompatible(c.YoloV3Block, newcfg.YoloV3Block, head) { - return newCompatError("YOLOv3 fork block", c.YoloV3Block, newcfg.YoloV3Block) + if isForkIncompatible(c.LondonBlock, newcfg.LondonBlock, head) { + return newCompatError("London fork block", c.LondonBlock, newcfg.LondonBlock) } if isForkIncompatible(c.EWASMBlock, newcfg.EWASMBlock, head) { return newCompatError("ewasm fork block", c.EWASMBlock, newcfg.EWASMBlock) @@ -635,7 +640,7 @@ type Rules struct { ChainID *big.Int IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool - IsBerlin, IsCatalyst bool + IsBerlin, IsLondon, IsCatalyst bool } // Rules ensures c's ChainID is not nil. @@ -655,6 +660,7 @@ func (c *ChainConfig) Rules(num *big.Int) Rules { IsPetersburg: c.IsPetersburg(num), IsIstanbul: c.IsIstanbul(num), IsBerlin: c.IsBerlin(num), + IsLondon: c.IsLondon(num), IsCatalyst: c.IsCatalyst(num), } } diff --git a/tests/init.go b/tests/init.go index 67f706eb50bc..240b7159d5c9 100644 --- a/tests/init.go +++ b/tests/init.go @@ -141,7 +141,7 @@ var Forks = map[string]*params.ChainConfig{ PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(5), }, - "YOLOv3": { + "Berlin": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), @@ -151,11 +151,22 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), - YoloV3Block: big.NewInt(0), + BerlinBlock: big.NewInt(0), }, - // This specification is subject to change, but is for now identical to YOLOv3 - // for cross-client testing purposes - "Berlin": { + "BerlinToLondonAt5": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(5), + }, + "London": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), @@ -166,6 +177,7 @@ var Forks = map[string]*params.ChainConfig{ PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), }, } From e69130d9f15d85c5955e1fa94abc74d606accba7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Bylica?= Date: Thu, 29 Apr 2021 11:46:03 +0200 Subject: [PATCH 013/208] core/vm, params: implement EIP 3541 --- core/vm/errors.go | 1 + core/vm/evm.go | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/core/vm/errors.go b/core/vm/errors.go index c813aa36af36..c7cfeae53ce3 100644 --- a/core/vm/errors.go +++ b/core/vm/errors.go @@ -34,6 +34,7 @@ var ( ErrWriteProtection = errors.New("write protection") ErrReturnDataOutOfBounds = errors.New("return data out of bounds") ErrGasUintOverflow = errors.New("gas uint64 overflow") + ErrInvalidCode = errors.New("invalid code: must not begin with 0xef") ) // ErrStackUnderflow wraps an evm error when the items on the stack less diff --git a/core/vm/evm.go b/core/vm/evm.go index bd54e855c6b7..8e3c9fe00f8d 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -468,6 +468,11 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, err = ErrMaxCodeSizeExceeded } + // Reject code starting with 0xEF if EIP-3541 is enabled. + if err == nil && len(ret) >= 1 && ret[0] == 0xEF && evm.chainRules.IsLondon { + err = ErrInvalidCode + } + // if the contract creation ran successfully and no errors were returned // calculate the gas required to store the code. If the code could not // be stored due to not enough gas set an error and let it be handled From a5669ae292274d3b0a223cda07ebe64f943bded4 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Fri, 7 May 2021 08:25:32 +0200 Subject: [PATCH 014/208] core, params: implement EIP-3529 (#22733) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * core, params: implement EIP-3529 * core/vm: add london instructionset * core/vm: add method doc for EIP enabler Co-authored-by: Péter Szilágyi --- core/blockchain_test.go | 3 +- core/state_transition.go | 15 ++- core/vm/eips.go | 26 +++-- core/vm/interpreter.go | 2 + core/vm/jump_table.go | 9 ++ core/vm/operations_acl.go | 210 ++++++++++++++++++++------------------ params/protocol_params.go | 15 +++ 7 files changed, 166 insertions(+), 114 deletions(-) diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 75ef399c9adc..a8ce4c7b9a64 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -3102,7 +3102,8 @@ func TestEIP2718Transition(t *testing.T) { block := chain.GetBlockByNumber(1) // Expected gas is intrinsic + 2 * pc + hot load + cold load, since only one load is in the access list - expected := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas + vm.GasQuickStep*2 + vm.WarmStorageReadCostEIP2929 + vm.ColdSloadCostEIP2929 + expected := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas + + vm.GasQuickStep*2 + params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929 if block.GasUsed() != expected { t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expected, block.GasUsed()) diff --git a/core/state_transition.go b/core/state_transition.go index cdffc100a176..05becd9a0058 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -241,6 +241,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { sender := vm.AccountRef(msg.From()) homestead := st.evm.ChainConfig().IsHomestead(st.evm.Context.BlockNumber) istanbul := st.evm.ChainConfig().IsIstanbul(st.evm.Context.BlockNumber) + eip3529 := st.evm.ChainConfig().IsLondon(st.evm.Context.BlockNumber) contractCreation := msg.To() == nil // Check clauses 4-5, subtract intrinsic gas if everything is correct @@ -273,7 +274,13 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { st.state.SetNonce(msg.From(), st.state.GetNonce(sender.Address())+1) ret, st.gas, vmerr = st.evm.Call(sender, st.to(), st.data, st.gas, st.value) } - st.refundGas() + if !eip3529 { + // Before EIP-3529: refunds were capped to gasUsed / 2 + st.refundGas(params.RefundQuotient) + } else { + // After EIP-3529: refunds are capped to gasUsed / 5 + st.refundGas(params.RefundQuotientEIP3529) + } st.state.AddBalance(st.evm.Context.Coinbase, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.gasPrice)) return &ExecutionResult{ @@ -283,9 +290,9 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { }, nil } -func (st *StateTransition) refundGas() { - // Apply refund counter, capped to half of the used gas. - refund := st.gasUsed() / 2 +func (st *StateTransition) refundGas(refundQuotient uint64) { + // Apply refund counter, capped to a refund quotient + refund := st.gasUsed() / refundQuotient if refund > st.state.GetRefund() { refund = st.state.GetRefund() } diff --git a/core/vm/eips.go b/core/vm/eips.go index 6bb941d5f9d7..025502760b4a 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -25,6 +25,7 @@ import ( ) var activators = map[int]func(*JumpTable){ + 3529: enable3529, 2929: enable2929, 2200: enable2200, 1884: enable1884, @@ -115,28 +116,28 @@ func enable2929(jt *JumpTable) { jt[SLOAD].constantGas = 0 jt[SLOAD].dynamicGas = gasSLoadEIP2929 - jt[EXTCODECOPY].constantGas = WarmStorageReadCostEIP2929 + jt[EXTCODECOPY].constantGas = params.WarmStorageReadCostEIP2929 jt[EXTCODECOPY].dynamicGas = gasExtCodeCopyEIP2929 - jt[EXTCODESIZE].constantGas = WarmStorageReadCostEIP2929 + jt[EXTCODESIZE].constantGas = params.WarmStorageReadCostEIP2929 jt[EXTCODESIZE].dynamicGas = gasEip2929AccountCheck - jt[EXTCODEHASH].constantGas = WarmStorageReadCostEIP2929 + jt[EXTCODEHASH].constantGas = params.WarmStorageReadCostEIP2929 jt[EXTCODEHASH].dynamicGas = gasEip2929AccountCheck - jt[BALANCE].constantGas = WarmStorageReadCostEIP2929 + jt[BALANCE].constantGas = params.WarmStorageReadCostEIP2929 jt[BALANCE].dynamicGas = gasEip2929AccountCheck - jt[CALL].constantGas = WarmStorageReadCostEIP2929 + jt[CALL].constantGas = params.WarmStorageReadCostEIP2929 jt[CALL].dynamicGas = gasCallEIP2929 - jt[CALLCODE].constantGas = WarmStorageReadCostEIP2929 + jt[CALLCODE].constantGas = params.WarmStorageReadCostEIP2929 jt[CALLCODE].dynamicGas = gasCallCodeEIP2929 - jt[STATICCALL].constantGas = WarmStorageReadCostEIP2929 + jt[STATICCALL].constantGas = params.WarmStorageReadCostEIP2929 jt[STATICCALL].dynamicGas = gasStaticCallEIP2929 - jt[DELEGATECALL].constantGas = WarmStorageReadCostEIP2929 + jt[DELEGATECALL].constantGas = params.WarmStorageReadCostEIP2929 jt[DELEGATECALL].dynamicGas = gasDelegateCallEIP2929 // This was previously part of the dynamic cost, but we're using it as a constantGas @@ -144,3 +145,12 @@ func enable2929(jt *JumpTable) { jt[SELFDESTRUCT].constantGas = params.SelfdestructGasEIP150 jt[SELFDESTRUCT].dynamicGas = gasSelfdestructEIP2929 } + +// enable3529 enabled "EIP-3529: Reduction in refunds": +// - Removes refunds for selfdestructs +// - Reduces refunds for SSTORE +// - Reduces max refunds to 20% gas +func enable3529(jt *JumpTable) { + jt[SSTORE].dynamicGas = gasSStoreEIP3529 + jt[SELFDESTRUCT].dynamicGas = gasSelfdestructEIP3529 +} diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 1022c355c10c..8b755038fde8 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -98,6 +98,8 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { if cfg.JumpTable[STOP] == nil { var jt JumpTable switch { + case evm.chainRules.IsLondon: + jt = londonInstructionSet case evm.chainRules.IsBerlin: jt = berlinInstructionSet case evm.chainRules.IsIstanbul: diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index bb1800ea9197..a0609a0d7894 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -57,11 +57,20 @@ var ( constantinopleInstructionSet = newConstantinopleInstructionSet() istanbulInstructionSet = newIstanbulInstructionSet() berlinInstructionSet = newBerlinInstructionSet() + londonInstructionSet = newLondonInstructionSet() ) // JumpTable contains the EVM opcodes supported at a given fork. type JumpTable [256]*operation +// newLondonInstructionSet returns the frontier, homestead, byzantium, +// contantinople, istanbul, petersburg, berlin and london instructions. +func newLondonInstructionSet() JumpTable { + instructionSet := newBerlinInstructionSet() + enable3529(&instructionSet) // EIP-3529: Reduction in refunds https://eips.ethereum.org/EIPS/eip-3529 + return instructionSet +} + // newBerlinInstructionSet returns the frontier, homestead, byzantium, // contantinople, istanbul, petersburg and berlin instructions. func newBerlinInstructionSet() JumpTable { diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index c56941899ed1..483226eefad8 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -24,91 +24,75 @@ import ( "github.com/ethereum/go-ethereum/params" ) -const ( - ColdAccountAccessCostEIP2929 = uint64(2600) // COLD_ACCOUNT_ACCESS_COST - ColdSloadCostEIP2929 = uint64(2100) // COLD_SLOAD_COST - WarmStorageReadCostEIP2929 = uint64(100) // WARM_STORAGE_READ_COST -) - -// gasSStoreEIP2929 implements gas cost for SSTORE according to EIP-2929 -// -// When calling SSTORE, check if the (address, storage_key) pair is in accessed_storage_keys. -// If it is not, charge an additional COLD_SLOAD_COST gas, and add the pair to accessed_storage_keys. -// Additionally, modify the parameters defined in EIP 2200 as follows: -// -// Parameter Old value New value -// SLOAD_GAS 800 = WARM_STORAGE_READ_COST -// SSTORE_RESET_GAS 5000 5000 - COLD_SLOAD_COST -// -//The other parameters defined in EIP 2200 are unchanged. -// see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified -func gasSStoreEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { - // If we fail the minimum gas availability invariant, fail (0) - if contract.Gas <= params.SstoreSentryGasEIP2200 { - return 0, errors.New("not enough gas for reentrancy sentry") - } - // Gas sentry honoured, do the actual gas calculation based on the stored value - var ( - y, x = stack.Back(1), stack.peek() - slot = common.Hash(x.Bytes32()) - current = evm.StateDB.GetState(contract.Address(), slot) - cost = uint64(0) - ) - // Check slot presence in the access list - if addrPresent, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent { - cost = ColdSloadCostEIP2929 - // If the caller cannot afford the cost, this change will be rolled back - evm.StateDB.AddSlotToAccessList(contract.Address(), slot) - if !addrPresent { - // Once we're done with YOLOv2 and schedule this for mainnet, might - // be good to remove this panic here, which is just really a - // canary to have during testing - panic("impossible case: address was not present in access list during sstore op") +func makeGasSStoreFunc(clearingRefund uint64) gasFunc { + return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + // If we fail the minimum gas availability invariant, fail (0) + if contract.Gas <= params.SstoreSentryGasEIP2200 { + return 0, errors.New("not enough gas for reentrancy sentry") } - } - value := common.Hash(y.Bytes32()) + // Gas sentry honoured, do the actual gas calculation based on the stored value + var ( + y, x = stack.Back(1), stack.peek() + slot = common.Hash(x.Bytes32()) + current = evm.StateDB.GetState(contract.Address(), slot) + cost = uint64(0) + ) + // Check slot presence in the access list + if addrPresent, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent { + cost = params.ColdSloadCostEIP2929 + // If the caller cannot afford the cost, this change will be rolled back + evm.StateDB.AddSlotToAccessList(contract.Address(), slot) + if !addrPresent { + // Once we're done with YOLOv2 and schedule this for mainnet, might + // be good to remove this panic here, which is just really a + // canary to have during testing + panic("impossible case: address was not present in access list during sstore op") + } + } + value := common.Hash(y.Bytes32()) - if current == value { // noop (1) - // EIP 2200 original clause: - // return params.SloadGasEIP2200, nil - return cost + WarmStorageReadCostEIP2929, nil // SLOAD_GAS - } - original := evm.StateDB.GetCommittedState(contract.Address(), x.Bytes32()) - if original == current { - if original == (common.Hash{}) { // create slot (2.1.1) - return cost + params.SstoreSetGasEIP2200, nil + if current == value { // noop (1) + // EIP 2200 original clause: + // return params.SloadGasEIP2200, nil + return cost + params.WarmStorageReadCostEIP2929, nil // SLOAD_GAS } - if value == (common.Hash{}) { // delete slot (2.1.2b) - evm.StateDB.AddRefund(params.SstoreClearsScheduleRefundEIP2200) + original := evm.StateDB.GetCommittedState(contract.Address(), x.Bytes32()) + if original == current { + if original == (common.Hash{}) { // create slot (2.1.1) + return cost + params.SstoreSetGasEIP2200, nil + } + if value == (common.Hash{}) { // delete slot (2.1.2b) + evm.StateDB.AddRefund(clearingRefund) + } + // EIP-2200 original clause: + // return params.SstoreResetGasEIP2200, nil // write existing slot (2.1.2) + return cost + (params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929), nil // write existing slot (2.1.2) } - // EIP-2200 original clause: - // return params.SstoreResetGasEIP2200, nil // write existing slot (2.1.2) - return cost + (params.SstoreResetGasEIP2200 - ColdSloadCostEIP2929), nil // write existing slot (2.1.2) - } - if original != (common.Hash{}) { - if current == (common.Hash{}) { // recreate slot (2.2.1.1) - evm.StateDB.SubRefund(params.SstoreClearsScheduleRefundEIP2200) - } else if value == (common.Hash{}) { // delete slot (2.2.1.2) - evm.StateDB.AddRefund(params.SstoreClearsScheduleRefundEIP2200) + if original != (common.Hash{}) { + if current == (common.Hash{}) { // recreate slot (2.2.1.1) + evm.StateDB.SubRefund(clearingRefund) + } else if value == (common.Hash{}) { // delete slot (2.2.1.2) + evm.StateDB.AddRefund(clearingRefund) + } } - } - if original == value { - if original == (common.Hash{}) { // reset to original inexistent slot (2.2.2.1) - // EIP 2200 Original clause: - //evm.StateDB.AddRefund(params.SstoreSetGasEIP2200 - params.SloadGasEIP2200) - evm.StateDB.AddRefund(params.SstoreSetGasEIP2200 - WarmStorageReadCostEIP2929) - } else { // reset to original existing slot (2.2.2.2) - // EIP 2200 Original clause: - // evm.StateDB.AddRefund(params.SstoreResetGasEIP2200 - params.SloadGasEIP2200) - // - SSTORE_RESET_GAS redefined as (5000 - COLD_SLOAD_COST) - // - SLOAD_GAS redefined as WARM_STORAGE_READ_COST - // Final: (5000 - COLD_SLOAD_COST) - WARM_STORAGE_READ_COST - evm.StateDB.AddRefund((params.SstoreResetGasEIP2200 - ColdSloadCostEIP2929) - WarmStorageReadCostEIP2929) + if original == value { + if original == (common.Hash{}) { // reset to original inexistent slot (2.2.2.1) + // EIP 2200 Original clause: + //evm.StateDB.AddRefund(params.SstoreSetGasEIP2200 - params.SloadGasEIP2200) + evm.StateDB.AddRefund(params.SstoreSetGasEIP2200 - params.WarmStorageReadCostEIP2929) + } else { // reset to original existing slot (2.2.2.2) + // EIP 2200 Original clause: + // evm.StateDB.AddRefund(params.SstoreResetGasEIP2200 - params.SloadGasEIP2200) + // - SSTORE_RESET_GAS redefined as (5000 - COLD_SLOAD_COST) + // - SLOAD_GAS redefined as WARM_STORAGE_READ_COST + // Final: (5000 - COLD_SLOAD_COST) - WARM_STORAGE_READ_COST + evm.StateDB.AddRefund((params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929) - params.WarmStorageReadCostEIP2929) + } } + // EIP-2200 original clause: + //return params.SloadGasEIP2200, nil // dirty update (2.2) + return cost + params.WarmStorageReadCostEIP2929, nil // dirty update (2.2) } - // EIP-2200 original clause: - //return params.SloadGasEIP2200, nil // dirty update (2.2) - return cost + WarmStorageReadCostEIP2929, nil // dirty update (2.2) } // gasSLoadEIP2929 calculates dynamic gas for SLOAD according to EIP-2929 @@ -124,9 +108,9 @@ func gasSLoadEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me // If the caller cannot afford the cost, this change will be rolled back // If he does afford it, we can skip checking the same thing later on, during execution evm.StateDB.AddSlotToAccessList(contract.Address(), slot) - return ColdSloadCostEIP2929, nil + return params.ColdSloadCostEIP2929, nil } - return WarmStorageReadCostEIP2929, nil + return params.WarmStorageReadCostEIP2929, nil } // gasExtCodeCopyEIP2929 implements extcodecopy according to EIP-2929 @@ -146,7 +130,7 @@ func gasExtCodeCopyEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memo evm.StateDB.AddAddressToAccessList(addr) var overflow bool // We charge (cold-warm), since 'warm' is already charged as constantGas - if gas, overflow = math.SafeAdd(gas, ColdAccountAccessCostEIP2929-WarmStorageReadCostEIP2929); overflow { + if gas, overflow = math.SafeAdd(gas, params.ColdAccountAccessCostEIP2929-params.WarmStorageReadCostEIP2929); overflow { return 0, ErrGasUintOverflow } return gas, nil @@ -168,7 +152,7 @@ func gasEip2929AccountCheck(evm *EVM, contract *Contract, stack *Stack, mem *Mem // If the caller cannot afford the cost, this change will be rolled back evm.StateDB.AddAddressToAccessList(addr) // The warm storage read cost is already charged as constantGas - return ColdAccountAccessCostEIP2929 - WarmStorageReadCostEIP2929, nil + return params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929, nil } return 0, nil } @@ -180,7 +164,7 @@ func makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc { warmAccess := evm.StateDB.AddressInAccessList(addr) // The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost, so // the cost to charge for cold access, if any, is Cold - Warm - coldCost := ColdAccountAccessCostEIP2929 - WarmStorageReadCostEIP2929 + coldCost := params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929 if !warmAccess { evm.StateDB.AddAddressToAccessList(addr) // Charge the remaining difference here already, to correctly calculate available @@ -212,25 +196,49 @@ var ( gasDelegateCallEIP2929 = makeCallVariantGasCallEIP2929(gasDelegateCall) gasStaticCallEIP2929 = makeCallVariantGasCallEIP2929(gasStaticCall) gasCallCodeEIP2929 = makeCallVariantGasCallEIP2929(gasCallCode) + gasSelfdestructEIP2929 = makeSelfdestructGasFn(true) + // gasSelfdestructEIP3529 implements the changes in EIP-2539 (no refunds) + gasSelfdestructEIP3529 = makeSelfdestructGasFn(false) + + // gasSStoreEIP2929 implements gas cost for SSTORE according to EIP-2929 + // + // When calling SSTORE, check if the (address, storage_key) pair is in accessed_storage_keys. + // If it is not, charge an additional COLD_SLOAD_COST gas, and add the pair to accessed_storage_keys. + // Additionally, modify the parameters defined in EIP 2200 as follows: + // + // Parameter Old value New value + // SLOAD_GAS 800 = WARM_STORAGE_READ_COST + // SSTORE_RESET_GAS 5000 5000 - COLD_SLOAD_COST + // + //The other parameters defined in EIP 2200 are unchanged. + // see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified + gasSStoreEIP2929 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP2200) + + // gasSStoreEIP2539 implements gas cost for SSTORE according to EPI-2539 + // Replace `SSTORE_CLEARS_SCHEDULE` with `SSTORE_RESET_GAS + ACCESS_LIST_STORAGE_KEY_COST` (4,800) + gasSStoreEIP3529 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP3529) ) -func gasSelfdestructEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { - var ( - gas uint64 - address = common.Address(stack.peek().Bytes20()) - ) - if !evm.StateDB.AddressInAccessList(address) { - // If the caller cannot afford the cost, this change will be rolled back - evm.StateDB.AddAddressToAccessList(address) - gas = ColdAccountAccessCostEIP2929 - } - // if empty and transfers value - if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 { - gas += params.CreateBySelfdestructGas - } - if !evm.StateDB.HasSuicided(contract.Address()) { - evm.StateDB.AddRefund(params.SelfdestructRefundGas) +// makeSelfdestructGasFn can create the selfdestruct dynamic gas function for EIP-2929 and EIP-2539 +func makeSelfdestructGasFn(refundsEnabled bool) gasFunc { + gasFunc := func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + var ( + gas uint64 + address = common.Address(stack.peek().Bytes20()) + ) + if !evm.StateDB.AddressInAccessList(address) { + // If the caller cannot afford the cost, this change will be rolled back + evm.StateDB.AddAddressToAccessList(address) + gas = params.ColdAccountAccessCostEIP2929 + } + // if empty and transfers value + if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 { + gas += params.CreateBySelfdestructGas + } + if refundsEnabled && !evm.StateDB.HasSuicided(contract.Address()) { + evm.StateDB.AddRefund(params.SelfdestructRefundGas) + } + return gas, nil } - return gas, nil - + return gasFunc } diff --git a/params/protocol_params.go b/params/protocol_params.go index 88f1a06e12d3..22b4c0651c8f 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -57,6 +57,16 @@ const ( SstoreResetGasEIP2200 uint64 = 5000 // Once per SSTORE operation from clean non-zero to something else SstoreClearsScheduleRefundEIP2200 uint64 = 15000 // Once per SSTORE operation for clearing an originally existing storage slot + ColdAccountAccessCostEIP2929 = uint64(2600) // COLD_ACCOUNT_ACCESS_COST + ColdSloadCostEIP2929 = uint64(2100) // COLD_SLOAD_COST + WarmStorageReadCostEIP2929 = uint64(100) // WARM_STORAGE_READ_COST + + // In EIP-2200: SstoreResetGas was 5000. + // In EIP-2929: SstoreResetGas was changed to '5000 - COLD_SLOAD_COST'. + // In EIP-3529: SSTORE_CLEARS_SCHEDULE is defined as SSTORE_RESET_GAS + ACCESS_LIST_STORAGE_KEY_COST + // Which becomes: 5000 - 2100 + 1900 = 4800 + SstoreClearsScheduleRefundEIP3529 uint64 = SstoreResetGasEIP2200 - ColdSloadCostEIP2929 + TxAccessListStorageKeyGas + JumpdestGas uint64 = 1 // Once per JUMPDEST operation. EpochDuration uint64 = 30000 // Duration between proof-of-work epochs. @@ -137,6 +147,11 @@ const ( Bls12381PairingPerPairGas uint64 = 23000 // Per-point pair gas price for BLS12-381 elliptic curve pairing check Bls12381MapG1Gas uint64 = 5500 // Gas price for BLS12-381 mapping field element to G1 operation Bls12381MapG2Gas uint64 = 110000 // Gas price for BLS12-381 mapping field element to G2 operation + + // The Refund Quotient is the cap on how much of the used gas can be refunded. Before EIP-3529, + // up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529 + RefundQuotient uint64 = 2 + RefundQuotientEIP3529 uint64 = 5 ) // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations From 8a070e8f7d45eca745c29d4523cc2e05ff2f117f Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Fri, 7 May 2021 10:31:01 +0200 Subject: [PATCH 015/208] consensus/clique: add some missing checks (#22836) --- consensus/clique/clique.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index c62e180faa91..9954a023e574 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -20,6 +20,7 @@ package clique import ( "bytes" "errors" + "fmt" "io" "math/big" "math/rand" @@ -293,6 +294,15 @@ func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.H return errInvalidDifficulty } } + // Verify that the gas limit is <= 2^63-1 + cap := uint64(0x7fffffffffffffff) + if header.GasLimit > cap { + return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap) + } + // Verify that the gasUsed is <= gasLimit + if header.GasUsed > header.GasLimit { + return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit) + } // If all checks passed, validate any special fields for hard forks if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil { return err @@ -324,6 +334,15 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainHeaderReader, header if parent.Time+c.config.Period > header.Time { return errInvalidTimestamp } + // Verify that the gas limit remains within allowed bounds + diff := int64(parent.GasLimit) - int64(header.GasLimit) + if diff < 0 { + diff *= -1 + } + limit := parent.GasLimit / params.GasLimitBoundDivisor + if uint64(diff) >= limit || header.GasLimit < params.MinGasLimit { + return fmt.Errorf("invalid gas limit: have %d, want %d += %d", header.GasLimit, parent.GasLimit, limit) + } // Retrieve the snapshot needed to verify this header and cache it snap, err := c.snapshot(chain, number-1, header.ParentHash, parents) if err != nil { From 17b1be26617268504a8ef3a6ee3c5c6400bfb63a Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Fri, 7 May 2021 14:04:54 +0200 Subject: [PATCH 016/208] consensus/ethash: implement EIP-3554 (bomb delay) --- consensus/ethash/consensus.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 249c2f65ebb7..492fc835382a 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -45,6 +45,11 @@ var ( maxUncles = 2 // Maximum number of uncles allowed in a single block allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks + // calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554. + // It offsets the bomb a total of 9.5M blocks. + // Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554 + calcDifficultyEip3554 = makeDifficultyCalculator(big.NewInt(9500000)) + // calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384. // It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks. // Specification EIP-2384: https://eips.ethereum.org/EIPS/eip-2384 @@ -325,6 +330,8 @@ func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Heade switch { case config.IsCatalyst(next): return big.NewInt(1) + case config.IsLondon(next): + return calcDifficultyEip3554(time, parent) case config.IsMuirGlacier(next): return calcDifficultyEip2384(time, parent) case config.IsConstantinople(next): From 700df1442d714cb3c42a602c39c042ce88be463f Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 7 May 2021 14:37:13 +0200 Subject: [PATCH 017/208] rlp: add support for optional struct fields (#22832) This adds support for a new struct tag "optional". Using this tag, structs used for RLP encoding/decoding can be extended in a backwards-compatible way, by adding new fields at the end. --- rlp/decode.go | 18 ++++- rlp/decode_test.go | 181 +++++++++++++++++++++++++++++++++++++++++++-- rlp/doc.go | 61 +++++++++++---- rlp/encode.go | 39 ++++++++-- rlp/encode_test.go | 22 +++++- rlp/typecache.go | 54 +++++++++++--- 6 files changed, 330 insertions(+), 45 deletions(-) diff --git a/rlp/decode.go b/rlp/decode.go index 79b7ef062664..b340aa029e43 100644 --- a/rlp/decode.go +++ b/rlp/decode.go @@ -229,7 +229,7 @@ func decodeBigInt(s *Stream, val reflect.Value) error { i = new(big.Int) val.Set(reflect.ValueOf(i)) } - // Reject leading zero bytes + // Reject leading zero bytes. if len(b) > 0 && b[0] == 0 { return wrapStreamError(ErrCanonInt, val.Type()) } @@ -394,9 +394,16 @@ func makeStructDecoder(typ reflect.Type) (decoder, error) { if _, err := s.List(); err != nil { return wrapStreamError(err, typ) } - for _, f := range fields { + for i, f := range fields { err := f.info.decoder(s, val.Field(f.index)) if err == EOL { + if f.optional { + // The field is optional, so reaching the end of the list before + // reaching the last field is acceptable. All remaining undecoded + // fields are zeroed. + zeroFields(val, fields[i:]) + break + } return &decodeError{msg: "too few elements", typ: typ} } else if err != nil { return addErrorContext(err, "."+typ.Field(f.index).Name) @@ -407,6 +414,13 @@ func makeStructDecoder(typ reflect.Type) (decoder, error) { return dec, nil } +func zeroFields(structval reflect.Value, fields []field) { + for _, f := range fields { + fv := structval.Field(f.index) + fv.Set(reflect.Zero(fv.Type())) + } +} + // makePtrDecoder creates a decoder that decodes into the pointer's element type. func makePtrDecoder(typ reflect.Type, tag tags) (decoder, error) { etype := typ.Elem() diff --git a/rlp/decode_test.go b/rlp/decode_test.go index d94c3969b221..87a330633230 100644 --- a/rlp/decode_test.go +++ b/rlp/decode_test.go @@ -369,6 +369,39 @@ type intField struct { X int } +type optionalFields struct { + A uint + B uint `rlp:"optional"` + C uint `rlp:"optional"` +} + +type optionalAndTailField struct { + A uint + B uint `rlp:"optional"` + Tail []uint `rlp:"tail"` +} + +type optionalBigIntField struct { + A uint + B *big.Int `rlp:"optional"` +} + +type optionalPtrField struct { + A uint + B *[3]byte `rlp:"optional"` +} + +type optionalPtrFieldNil struct { + A uint + B *[3]byte `rlp:"optional,nil"` +} + +type ignoredField struct { + A uint + B uint `rlp:"-"` + C uint +} + var ( veryBigInt = big.NewInt(0).Add( big.NewInt(0).Lsh(big.NewInt(0xFFFFFFFFFFFFFF), 16), @@ -376,12 +409,6 @@ var ( ) ) -type hasIgnoredField struct { - A uint - B uint `rlp:"-"` - C uint -} - var decodeTests = []decodeTest{ // booleans {input: "01", ptr: new(bool), value: true}, @@ -551,8 +578,8 @@ var decodeTests = []decodeTest{ // struct tag "-" { input: "C20102", - ptr: new(hasIgnoredField), - value: hasIgnoredField{A: 1, C: 2}, + ptr: new(ignoredField), + value: ignoredField{A: 1, C: 2}, }, // struct tag "nilList" @@ -592,6 +619,110 @@ var decodeTests = []decodeTest{ value: nilStringSlice{X: &[]uint{3}}, }, + // struct tag "optional" + { + input: "C101", + ptr: new(optionalFields), + value: optionalFields{1, 0, 0}, + }, + { + input: "C20102", + ptr: new(optionalFields), + value: optionalFields{1, 2, 0}, + }, + { + input: "C3010203", + ptr: new(optionalFields), + value: optionalFields{1, 2, 3}, + }, + { + input: "C401020304", + ptr: new(optionalFields), + error: "rlp: input list has too many elements for rlp.optionalFields", + }, + { + input: "C101", + ptr: new(optionalAndTailField), + value: optionalAndTailField{A: 1}, + }, + { + input: "C20102", + ptr: new(optionalAndTailField), + value: optionalAndTailField{A: 1, B: 2, Tail: []uint{}}, + }, + { + input: "C401020304", + ptr: new(optionalAndTailField), + value: optionalAndTailField{A: 1, B: 2, Tail: []uint{3, 4}}, + }, + { + input: "C101", + ptr: new(optionalBigIntField), + value: optionalBigIntField{A: 1, B: nil}, + }, + { + input: "C20102", + ptr: new(optionalBigIntField), + value: optionalBigIntField{A: 1, B: big.NewInt(2)}, + }, + { + input: "C101", + ptr: new(optionalPtrField), + value: optionalPtrField{A: 1}, + }, + { + input: "C20180", // not accepted because "optional" doesn't enable "nil" + ptr: new(optionalPtrField), + error: "rlp: input string too short for [3]uint8, decoding into (rlp.optionalPtrField).B", + }, + { + input: "C20102", + ptr: new(optionalPtrField), + error: "rlp: input string too short for [3]uint8, decoding into (rlp.optionalPtrField).B", + }, + { + input: "C50183010203", + ptr: new(optionalPtrField), + value: optionalPtrField{A: 1, B: &[3]byte{1, 2, 3}}, + }, + { + input: "C101", + ptr: new(optionalPtrFieldNil), + value: optionalPtrFieldNil{A: 1}, + }, + { + input: "C20180", // accepted because "nil" tag allows empty input + ptr: new(optionalPtrFieldNil), + value: optionalPtrFieldNil{A: 1}, + }, + { + input: "C20102", + ptr: new(optionalPtrFieldNil), + error: "rlp: input string too short for [3]uint8, decoding into (rlp.optionalPtrFieldNil).B", + }, + + // struct tag "optional" field clearing + { + input: "C101", + ptr: &optionalFields{A: 9, B: 8, C: 7}, + value: optionalFields{A: 1, B: 0, C: 0}, + }, + { + input: "C20102", + ptr: &optionalFields{A: 9, B: 8, C: 7}, + value: optionalFields{A: 1, B: 2, C: 0}, + }, + { + input: "C20102", + ptr: &optionalAndTailField{A: 9, B: 8, Tail: []uint{7, 6, 5}}, + value: optionalAndTailField{A: 1, B: 2, Tail: []uint{}}, + }, + { + input: "C101", + ptr: &optionalPtrField{A: 9, B: &[3]byte{8, 7, 6}}, + value: optionalPtrField{A: 1}, + }, + // RawValue {input: "01", ptr: new(RawValue), value: RawValue(unhex("01"))}, {input: "82FFFF", ptr: new(RawValue), value: RawValue(unhex("82FFFF"))}, @@ -822,6 +953,40 @@ func TestDecoderFunc(t *testing.T) { x() } +// This tests the validity checks for fields with struct tag "optional". +func TestInvalidOptionalField(t *testing.T) { + type ( + invalid1 struct { + A uint `rlp:"optional"` + B uint + } + invalid2 struct { + T []uint `rlp:"tail,optional"` + } + invalid3 struct { + T []uint `rlp:"optional,tail"` + } + ) + + tests := []struct { + v interface{} + err string + }{ + {v: new(invalid1), err: `rlp: struct field rlp.invalid1.B needs "optional" tag`}, + {v: new(invalid2), err: `rlp: invalid struct tag "optional" for rlp.invalid2.T (also has "tail" tag)`}, + {v: new(invalid3), err: `rlp: invalid struct tag "tail" for rlp.invalid3.T (also has "optional" tag)`}, + } + for _, test := range tests { + err := DecodeBytes(unhex("C20102"), test.v) + if err == nil { + t.Errorf("no error for %T", test.v) + } else if err.Error() != test.err { + t.Errorf("wrong error for %T: %v", test.v, err.Error()) + } + } + +} + func ExampleDecode() { input, _ := hex.DecodeString("C90A1486666F6F626172") diff --git a/rlp/doc.go b/rlp/doc.go index 7e6ee8520019..113828e39b90 100644 --- a/rlp/doc.go +++ b/rlp/doc.go @@ -102,29 +102,60 @@ Signed integers, floating point numbers, maps, channels and functions cannot be Struct Tags -Package rlp honours certain struct tags: "-", "tail", "nil", "nilList" and "nilString". +As with other encoding packages, the "-" tag ignores fields. -The "-" tag ignores fields. + type StructWithIgnoredField struct{ + Ignored uint `rlp:"-"` + Field uint + } + +Go struct values encode/decode as RLP lists. There are two ways of influencing the mapping +of fields to list elements. The "tail" tag, which may only be used on the last exported +struct field, allows slurping up any excess list elements into a slice. + + type StructWithTail struct{ + Field uint + Tail []string `rlp:"tail"` + } -The "tail" tag, which may only be used on the last exported struct field, allows slurping -up any excess list elements into a slice. See examples for more details. +The "optional" tag says that the field may be omitted if it is zero-valued. If this tag is +used on a struct field, all subsequent public fields must also be declared optional. -The "nil" tag applies to pointer-typed fields and changes the decoding rules for the field -such that input values of size zero decode as a nil pointer. This tag can be useful when -decoding recursive types. +When encoding a struct with optional fields, the output RLP list contains all values up to +the last non-zero optional field. - type StructWithOptionalFoo struct { - Foo *[20]byte `rlp:"nil"` +When decoding into a struct, optional fields may be omitted from the end of the input +list. For the example below, this means input lists of one, two, or three elements are +accepted. + + type StructWithOptionalFields struct{ + Required uint + Optional1 uint `rlp:"optional"` + Optional2 uint `rlp:"optional"` + } + +The "nil", "nilList" and "nilString" tags apply to pointer-typed fields only, and change +the decoding rules for the field type. For regular pointer fields without the "nil" tag, +input values must always match the required input length exactly and the decoder does not +produce nil values. When the "nil" tag is set, input values of size zero decode as a nil +pointer. This is especially useful for recursive types. + + type StructWithNilField struct { + Field *[3]byte `rlp:"nil"` } +In the example above, Field allows two possible input sizes. For input 0xC180 (a list +containing an empty string) Field is set to nil after decoding. For input 0xC483000000 (a +list containing a 3-byte string), Field is set to a non-nil array pointer. + RLP supports two kinds of empty values: empty lists and empty strings. When using the -"nil" tag, the kind of empty value allowed for a type is chosen automatically. A struct -field whose Go type is a pointer to an unsigned integer, string, boolean or byte -array/slice expects an empty RLP string. Any other pointer field type encodes/decodes as -an empty RLP list. +"nil" tag, the kind of empty value allowed for a type is chosen automatically. A field +whose Go type is a pointer to an unsigned integer, string, boolean or byte array/slice +expects an empty RLP string. Any other pointer field type encodes/decodes as an empty RLP +list. The choice of null value can be made explicit with the "nilList" and "nilString" struct -tags. Using these tags encodes/decodes a Go nil pointer value as the kind of empty -RLP value defined by the tag. +tags. Using these tags encodes/decodes a Go nil pointer value as the empty RLP value kind +defined by the tag. */ package rlp diff --git a/rlp/encode.go b/rlp/encode.go index 77b591045d2f..b7e74a133f2e 100644 --- a/rlp/encode.go +++ b/rlp/encode.go @@ -546,15 +546,40 @@ func makeStructWriter(typ reflect.Type) (writer, error) { return nil, structFieldError{typ, f.index, f.info.writerErr} } } - writer := func(val reflect.Value, w *encbuf) error { - lh := w.list() - for _, f := range fields { - if err := f.info.writer(val.Field(f.index), w); err != nil { - return err + + var writer writer + firstOptionalField := firstOptionalField(fields) + if firstOptionalField == len(fields) { + // This is the writer function for structs without any optional fields. + writer = func(val reflect.Value, w *encbuf) error { + lh := w.list() + for _, f := range fields { + if err := f.info.writer(val.Field(f.index), w); err != nil { + return err + } } + w.listEnd(lh) + return nil + } + } else { + // If there are any "optional" fields, the writer needs to perform additional + // checks to determine the output list length. + writer = func(val reflect.Value, w *encbuf) error { + lastField := len(fields) - 1 + for ; lastField >= firstOptionalField; lastField-- { + if !val.Field(fields[lastField].index).IsZero() { + break + } + } + lh := w.list() + for i := 0; i <= lastField; i++ { + if err := fields[i].info.writer(val.Field(fields[i].index), w); err != nil { + return err + } + } + w.listEnd(lh) + return nil } - w.listEnd(lh) - return nil } return writer, nil } diff --git a/rlp/encode_test.go b/rlp/encode_test.go index 418ee10a350e..74e8ededcb7e 100644 --- a/rlp/encode_test.go +++ b/rlp/encode_test.go @@ -257,12 +257,30 @@ var encTests = []encTest{ {val: simplestruct{A: 3, B: "foo"}, output: "C50383666F6F"}, {val: &recstruct{5, nil}, output: "C205C0"}, {val: &recstruct{5, &recstruct{4, &recstruct{3, nil}}}, output: "C605C404C203C0"}, + {val: &intField{X: 3}, error: "rlp: type int is not RLP-serializable (struct field rlp.intField.X)"}, + + // struct tag "-" + {val: &ignoredField{A: 1, B: 2, C: 3}, output: "C20103"}, + + // struct tag "tail" {val: &tailRaw{A: 1, Tail: []RawValue{unhex("02"), unhex("03")}}, output: "C3010203"}, {val: &tailRaw{A: 1, Tail: []RawValue{unhex("02")}}, output: "C20102"}, {val: &tailRaw{A: 1, Tail: []RawValue{}}, output: "C101"}, {val: &tailRaw{A: 1, Tail: nil}, output: "C101"}, - {val: &hasIgnoredField{A: 1, B: 2, C: 3}, output: "C20103"}, - {val: &intField{X: 3}, error: "rlp: type int is not RLP-serializable (struct field rlp.intField.X)"}, + + // struct tag "optional" + {val: &optionalFields{}, output: "C180"}, + {val: &optionalFields{A: 1}, output: "C101"}, + {val: &optionalFields{A: 1, B: 2}, output: "C20102"}, + {val: &optionalFields{A: 1, B: 2, C: 3}, output: "C3010203"}, + {val: &optionalFields{A: 1, B: 0, C: 3}, output: "C3018003"}, + {val: &optionalAndTailField{A: 1}, output: "C101"}, + {val: &optionalAndTailField{A: 1, B: 2}, output: "C20102"}, + {val: &optionalAndTailField{A: 1, Tail: []uint{5, 6}}, output: "C401800506"}, + {val: &optionalAndTailField{A: 1, Tail: []uint{5, 6}}, output: "C401800506"}, + {val: &optionalBigIntField{A: 1}, output: "C101"}, + {val: &optionalPtrField{A: 1}, output: "C101"}, + {val: &optionalPtrFieldNil{A: 1}, output: "C101"}, // nil {val: (*uint)(nil), output: "80"}, diff --git a/rlp/typecache.go b/rlp/typecache.go index 6026e1a64951..3910dcf0806e 100644 --- a/rlp/typecache.go +++ b/rlp/typecache.go @@ -38,15 +38,16 @@ type typeinfo struct { // tags represents struct tags. type tags struct { // rlp:"nil" controls whether empty input results in a nil pointer. - nilOK bool - - // This controls whether nil pointers are encoded/decoded as empty strings - // or empty lists. + // nilKind is the kind of empty value allowed for the field. nilKind Kind + nilOK bool + + // rlp:"optional" allows for a field to be missing in the input list. + // If this is set, all subsequent fields must also be optional. + optional bool - // rlp:"tail" controls whether this field swallows additional list - // elements. It can only be set for the last field, which must be - // of slice type. + // rlp:"tail" controls whether this field swallows additional list elements. It can + // only be set for the last field, which must be of slice type. tail bool // rlp:"-" ignores fields. @@ -104,28 +105,51 @@ func cachedTypeInfo1(typ reflect.Type, tags tags) *typeinfo { } type field struct { - index int - info *typeinfo + index int + info *typeinfo + optional bool } +// structFields resolves the typeinfo of all public fields in a struct type. func structFields(typ reflect.Type) (fields []field, err error) { - lastPublic := lastPublicField(typ) + var ( + lastPublic = lastPublicField(typ) + anyOptional = false + ) for i := 0; i < typ.NumField(); i++ { if f := typ.Field(i); f.PkgPath == "" { // exported tags, err := parseStructTag(typ, i, lastPublic) if err != nil { return nil, err } + + // Skip rlp:"-" fields. if tags.ignored { continue } + // If any field has the "optional" tag, subsequent fields must also have it. + if tags.optional || tags.tail { + anyOptional = true + } else if anyOptional { + return nil, fmt.Errorf(`rlp: struct field %v.%s needs "optional" tag`, typ, f.Name) + } info := cachedTypeInfo1(f.Type, tags) - fields = append(fields, field{i, info}) + fields = append(fields, field{i, info, tags.optional}) } } return fields, nil } +// anyOptionalFields returns the index of the first field with "optional" tag. +func firstOptionalField(fields []field) int { + for i, f := range fields { + if f.optional { + return i + } + } + return len(fields) +} + type structFieldError struct { typ reflect.Type field int @@ -166,11 +190,19 @@ func parseStructTag(typ reflect.Type, fi, lastPublic int) (tags, error) { case "nilList": ts.nilKind = List } + case "optional": + ts.optional = true + if ts.tail { + return ts, structTagError{typ, f.Name, t, `also has "tail" tag`} + } case "tail": ts.tail = true if fi != lastPublic { return ts, structTagError{typ, f.Name, t, "must be on last field"} } + if ts.optional { + return ts, structTagError{typ, f.Name, t, `also has "optional" tag`} + } if f.Type.Kind() != reflect.Slice { return ts, structTagError{typ, f.Name, t, "field type is not slice"} } From 7ab7acfded9d121e39ef0dc99ba427211693e4ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marius=20Kj=C3=A6rstad?= Date: Mon, 10 May 2021 12:18:42 +0200 Subject: [PATCH 018/208] build: upgrade -dlgo version to Go 1.16.4 (#22848) --- build/checksums.txt | 26 +++++++++++++------------- build/ci.go | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/build/checksums.txt b/build/checksums.txt index 0e33e07f2cb5..e667b30ce67f 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,18 +1,18 @@ # This file contains sha256 checksums of optional build dependencies. -b298d29de9236ca47a023e382313bcc2d2eed31dfa706b60a04103ce83a71a25 go1.16.3.src.tar.gz -6bb1cf421f8abc2a9a4e39140b7397cdae6aca3e8d36dcff39a1a77f4f1170ac go1.16.3.darwin-amd64.tar.gz -f4e96bbcd5d2d1942f5b55d9e4ab19564da4fad192012f6d7b0b9b055ba4208f go1.16.3.darwin-arm64.tar.gz -48b2d1481db756c88c18b1f064dbfc3e265ce4a775a23177ca17e25d13a24c5d go1.16.3.linux-386.tar.gz -951a3c7c6ce4e56ad883f97d9db74d3d6d80d5fec77455c6ada6c1f7ac4776d2 go1.16.3.linux-amd64.tar.gz -566b1d6f17d2bc4ad5f81486f0df44f3088c3ed47a3bec4099d8ed9939e90d5d go1.16.3.linux-arm64.tar.gz -0dae30385e3564a557dac7f12a63eedc73543e6da0f6017990e214ce8cc8797c go1.16.3.linux-armv6l.tar.gz -a3c16e1531bf9726f47911c4a9ed7cb665a6207a51c44f10ebad4db63b4bcc5a go1.16.3.windows-386.zip -a4400345135b36cb7942e52bbaf978b66814738b855eeff8de879a09fd99de7f go1.16.3.windows-amd64.zip -31ecd11d497684fa8b0f01ba784590c4c760943665fdc4fe0adaa1405c71736c go1.16.3.freebsd-386.tar.gz -ffbd920b309e62e807457b11d80e8c17fefe3ef6de423aaba4b1e270b2ca4c3d go1.16.3.freebsd-amd64.tar.gz -5eb046bbbbc7fe2591846a4303884cb5a01abb903e3e61e33459affe7874e811 go1.16.3.linux-ppc64le.tar.gz -3e8bd7bde533a73fd6fa75b5288678ef397e76c198cfb26b8ae086035383b1cf go1.16.3.linux-s390x.tar.gz +ae4f6b6e2a1677d31817984655a762074b5356da50fb58722b99104870d43503 go1.16.4.src.tar.gz +18fe94775763db3878717393b6d41371b0b45206055e49b3838328120c977d13 go1.16.4.darwin-amd64.tar.gz +cb6b972cc42e669f3585c648198cd5b6f6d7a0811d413ad64b50c02ba06ccc3a go1.16.4.darwin-arm64.tar.gz +cd1b146ef6e9006f27dd99e9687773e7fef30e8c985b7d41bff33e955a3bb53a go1.16.4.linux-386.tar.gz +7154e88f5a8047aad4b80ebace58a059e36e7e2e4eb3b383127a28c711b4ff59 go1.16.4.linux-amd64.tar.gz +8b18eb05ddda2652d69ab1b1dd1f40dd731799f43c6a58b512ad01ae5b5bba21 go1.16.4.linux-arm64.tar.gz +a53391a800ddec749ee90d38992babb27b95cfb864027350c737b9aa8e069494 go1.16.4.linux-armv6l.tar.gz +e75c0b114a09eb5499874162b208931dc260de0fedaeedac8621bf263c974605 go1.16.4.windows-386.zip +d40139b7ade8a3008e3240a6f86fe8f899a9c465c917e11dac8758af216f5eb0 go1.16.4.windows-amd64.zip +7cf2bc8a175d6d656861165bfc554f92dc78d2abf5afe5631db3579555d97409 go1.16.4.freebsd-386.tar.gz +ccdd2b76de1941b60734408fda0d750aaa69330d8a07430eed4c56bdb3502f6f go1.16.4.freebsd-amd64.tar.gz +80cfac566e344096a8df8f37bbd21f89e76a6fbe601406565d71a87a665fc125 go1.16.4.linux-ppc64le.tar.gz +d6431881b3573dc29ecc24fbeab5e5ec25d8c9273aa543769c86a1a3bbac1ddf go1.16.4.linux-s390x.tar.gz 7e9a47ab540aa3e8472fbf8120d28bed3b9d9cf625b955818e8bc69628d7187c golangci-lint-1.39.0-darwin-amd64.tar.gz 574daa2c9c299b01672a6daeb1873b5f12e413cdb6dc0e30f2ff163956778064 golangci-lint-1.39.0-darwin-arm64.tar.gz diff --git a/build/ci.go b/build/ci.go index d9f147ef0ea2..3752b1bed92b 100644 --- a/build/ci.go +++ b/build/ci.go @@ -152,7 +152,7 @@ var ( // This is the version of go that will be downloaded by // // go run ci.go install -dlgo - dlgoVersion = "1.16.3" + dlgoVersion = "1.16.4" ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) From f19a679b09500e63b2fd134a4e829591174d9e48 Mon Sep 17 00:00:00 2001 From: Ceelog Date: Mon, 10 May 2021 18:19:32 +0800 Subject: [PATCH 019/208] cmd/geth: remove reference to monitor command (#22844) 'geth monitor' subcommand is no longer supported. --- cmd/geth/consolecmd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index 5c715a125074..85eabb527b22 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -171,7 +171,7 @@ func remoteConsole(ctx *cli.Context) error { // dialRPC returns a RPC client which connects to the given endpoint. // The check for empty endpoint implements the defaulting logic -// for "geth attach" and "geth monitor" with no argument. +// for "geth attach" with no argument. func dialRPC(endpoint string) (*rpc.Client, error) { if endpoint == "" { endpoint = node.DefaultIPCEndpoint(clientIdentifier) From ae5fcdc67fbfa208de257487cb86271b429545df Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Mon, 10 May 2021 12:29:33 +0200 Subject: [PATCH 020/208] go.mod: upgrade to github.com/holiman/uint256 v1.2.0 (#22745) --- consensus/ethash/difficulty.go | 6 ++---- core/vm/instructions_test.go | 4 ++-- core/vm/logger_test.go | 4 ++-- go.mod | 2 +- go.sum | 2 ++ 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/consensus/ethash/difficulty.go b/consensus/ethash/difficulty.go index 59c4ac74191f..66a18059c617 100644 --- a/consensus/ethash/difficulty.go +++ b/consensus/ethash/difficulty.go @@ -52,8 +52,7 @@ func CalcDifficultyFrontierU256(time uint64, parent *types.Header) *big.Int { - num = block.number */ - pDiff := uint256.NewInt() - pDiff.SetFromBig(parent.Difficulty) // pDiff: pdiff + pDiff, _ := uint256.FromBig(parent.Difficulty) // pDiff: pdiff adjust := pDiff.Clone() adjust.Rsh(adjust, difficultyBoundDivisor) // adjust: pDiff / 2048 @@ -96,8 +95,7 @@ func CalcDifficultyHomesteadU256(time uint64, parent *types.Header) *big.Int { - num = block.number */ - pDiff := uint256.NewInt() - pDiff.SetFromBig(parent.Difficulty) // pDiff: pdiff + pDiff, _ := uint256.FromBig(parent.Difficulty) // pDiff: pdiff adjust := pDiff.Clone() adjust.Rsh(adjust, difficultyBoundDivisor) // adjust: pDiff / 2048 diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index d17ccfab89ee..c881ba0aca31 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -568,11 +568,11 @@ func BenchmarkOpSHA3(bench *testing.B) { env.interpreter = evmInterpreter mem.Resize(32) pc := uint64(0) - start := uint256.NewInt() + start := new(uint256.Int) bench.ResetTimer() for i := 0; i < bench.N; i++ { - stack.pushN(*uint256.NewInt().SetUint64(32), *start) + stack.pushN(*uint256.NewInt(32), *start) opSha3(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) } } diff --git a/core/vm/logger_test.go b/core/vm/logger_test.go index 9c936af36ab1..0bbfd4469206 100644 --- a/core/vm/logger_test.go +++ b/core/vm/logger_test.go @@ -60,8 +60,8 @@ func TestStoreCapture(t *testing.T) { Contract: contract, } ) - scope.Stack.push(uint256.NewInt().SetUint64(1)) - scope.Stack.push(uint256.NewInt()) + scope.Stack.push(uint256.NewInt(1)) + scope.Stack.push(new(uint256.Int)) var index common.Hash logger.CaptureState(env, 0, SSTORE, 0, 0, scope, nil, 0, nil) if len(logger.storage[contract.Address()]) == 0 { diff --git a/go.mod b/go.mod index 512d541f411d..9a2e66e101ba 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/holiman/bloomfilter/v2 v2.0.3 - github.com/holiman/uint256 v1.1.1 + github.com/holiman/uint256 v1.2.0 github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88 github.com/influxdata/influxdb v1.8.3 github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 diff --git a/go.sum b/go.sum index b6a27a2cf128..4cc4dbf20b3a 100644 --- a/go.sum +++ b/go.sum @@ -212,6 +212,8 @@ github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.1.1 h1:4JywC80b+/hSfljFlEBLHrrh+CIONLDz9NuFl0af4Mw= github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= +github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88 h1:bcAj8KroPf552TScjFPIakjH2/tdIrIH8F+cc4v4SRo= github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= From c0e201b69039b50a28ec7f8beee79306c9844cff Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 10 May 2021 12:38:54 +0200 Subject: [PATCH 021/208] eth/protocols/eth, les: avoid Raw() when decoding HashOrNumber (#22841) Getting the raw value is not necessary to decode this type, and decoding it directly from the stream is faster. --- eth/protocols/eth/protocol.go | 24 ++++++++++++------------ les/protocol.go | 24 ++++++++++++------------ 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index 62c018ef8e16..de1b0ed1ee7f 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -155,19 +155,19 @@ func (hn *HashOrNumber) EncodeRLP(w io.Writer) error { // DecodeRLP is a specialized decoder for HashOrNumber to decode the contents // into either a block hash or a block number. func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error { - _, size, _ := s.Kind() - origin, err := s.Raw() - if err == nil { - switch { - case size == 32: - err = rlp.DecodeBytes(origin, &hn.Hash) - case size <= 8: - err = rlp.DecodeBytes(origin, &hn.Number) - default: - err = fmt.Errorf("invalid input size %d for origin", size) - } + _, size, err := s.Kind() + switch { + case err != nil: + return err + case size == 32: + hn.Number = 0 + return s.Decode(&hn.Hash) + case size <= 8: + hn.Hash = common.Hash{} + return s.Decode(&hn.Number) + default: + return fmt.Errorf("invalid input size %d for origin", size) } - return err } // BlockHeadersPacket represents a block header response. diff --git a/les/protocol.go b/les/protocol.go index 07a4452f40c9..06db9024eb8b 100644 --- a/les/protocol.go +++ b/les/protocol.go @@ -307,19 +307,19 @@ func (hn *hashOrNumber) EncodeRLP(w io.Writer) error { // DecodeRLP is a specialized decoder for hashOrNumber to decode the contents // into either a block hash or a block number. func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error { - _, size, _ := s.Kind() - origin, err := s.Raw() - if err == nil { - switch { - case size == 32: - err = rlp.DecodeBytes(origin, &hn.Hash) - case size <= 8: - err = rlp.DecodeBytes(origin, &hn.Number) - default: - err = fmt.Errorf("invalid input size %d for origin", size) - } + _, size, err := s.Kind() + switch { + case err != nil: + return err + case size == 32: + hn.Number = 0 + return s.Decode(&hn.Hash) + case size <= 8: + hn.Hash = common.Hash{} + return s.Decode(&hn.Number) + default: + return fmt.Errorf("invalid input size %d for origin", size) } - return err } // CodeData is the network response packet for a node data retrieval. From e536bb52ff87cfffe9b413d02dfcf80e7f265e5a Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 10 May 2021 13:35:07 +0200 Subject: [PATCH 022/208] eth/protocols/snap: adapt to uint256 API changes (#22851) --- eth/protocols/snap/range.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/eth/protocols/snap/range.go b/eth/protocols/snap/range.go index dd380ff47148..2627cb954b8f 100644 --- a/eth/protocols/snap/range.go +++ b/eth/protocols/snap/range.go @@ -42,15 +42,15 @@ func newHashRange(start common.Hash, num uint64) *hashRange { step256.SetFromBig(step) return &hashRange{ - current: uint256.NewInt().SetBytes32(start[:]), + current: new(uint256.Int).SetBytes32(start[:]), step: step256, } } // Next pushes the hash range to the next interval. func (r *hashRange) Next() bool { - next := new(uint256.Int) - if overflow := next.AddOverflow(r.current, r.step); overflow { + next, overflow := new(uint256.Int).AddOverflow(r.current, r.step) + if overflow { return false } r.current = next @@ -65,16 +65,17 @@ func (r *hashRange) Start() common.Hash { // End returns the last hash in the current interval. func (r *hashRange) End() common.Hash { // If the end overflows (non divisible range), return a shorter interval - next := new(uint256.Int) - if overflow := next.AddOverflow(r.current, r.step); overflow { + next, overflow := new(uint256.Int).AddOverflow(r.current, r.step) + if overflow { return common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") } - return new(uint256.Int).Sub(next, uint256.NewInt().SetOne()).Bytes32() + return next.SubUint64(next, 1).Bytes32() } // incHash returns the next hash, in lexicographical order (a.k.a plus one) func incHash(h common.Hash) common.Hash { - a := uint256.NewInt().SetBytes32(h[:]) - a.Add(a, uint256.NewInt().SetOne()) + var a uint256.Int + a.SetBytes32(h[:]) + a.AddUint64(&a, 1) return common.Hash(a.Bytes32()) } From 643fd0efc6ebc8397311d1ca68fc71d8da247643 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 11 May 2021 10:43:35 +0200 Subject: [PATCH 023/208] core/types: remove support for legacy receipt/log storage encoding (#22852) * core/types: remove support for legacy receipt storage encoding * core/types: remove support for legacy log storage encoding --- core/types/log.go | 39 ++---------- core/types/receipt.go | 89 +-------------------------- core/types/receipt_test.go | 123 ------------------------------------- 3 files changed, 7 insertions(+), 244 deletions(-) diff --git a/core/types/log.go b/core/types/log.go index 88274e39dae0..87865bdb25c6 100644 --- a/core/types/log.go +++ b/core/types/log.go @@ -71,18 +71,6 @@ type rlpLog struct { // rlpStorageLog is the storage encoding of a log. type rlpStorageLog rlpLog -// legacyRlpStorageLog is the previous storage encoding of a log including some redundant fields. -type legacyRlpStorageLog struct { - Address common.Address - Topics []common.Hash - Data []byte - BlockNumber uint64 - TxHash common.Hash - TxIndex uint - BlockHash common.Hash - Index uint -} - // EncodeRLP implements rlp.Encoder. func (l *Log) EncodeRLP(w io.Writer) error { return rlp.Encode(w, rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data}) @@ -115,29 +103,10 @@ func (l *LogForStorage) EncodeRLP(w io.Writer) error { // // Note some redundant fields(e.g. block number, tx hash etc) will be assembled later. func (l *LogForStorage) DecodeRLP(s *rlp.Stream) error { - blob, err := s.Raw() - if err != nil { - return err - } var dec rlpStorageLog - err = rlp.DecodeBytes(blob, &dec) - if err == nil { - *l = LogForStorage{ - Address: dec.Address, - Topics: dec.Topics, - Data: dec.Data, - } - } else { - // Try to decode log with previous definition. - var dec legacyRlpStorageLog - err = rlp.DecodeBytes(blob, &dec) - if err == nil { - *l = LogForStorage{ - Address: dec.Address, - Topics: dec.Topics, - Data: dec.Data, - } - } + if err := s.Decode(&dec); err != nil { + return err } - return err + *l = LogForStorage{Address: dec.Address, Topics: dec.Topics, Data: dec.Data} + return nil } diff --git a/core/types/receipt.go b/core/types/receipt.go index e04259b9d895..6b519a79d221 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -97,27 +97,6 @@ type storedReceiptRLP struct { Logs []*LogForStorage } -// v4StoredReceiptRLP is the storage encoding of a receipt used in database version 4. -type v4StoredReceiptRLP struct { - PostStateOrStatus []byte - CumulativeGasUsed uint64 - TxHash common.Hash - ContractAddress common.Address - Logs []*LogForStorage - GasUsed uint64 -} - -// v3StoredReceiptRLP is the original storage encoding of a receipt including some unnecessary fields. -type v3StoredReceiptRLP struct { - PostStateOrStatus []byte - CumulativeGasUsed uint64 - Bloom Bloom - TxHash common.Hash - ContractAddress common.Address - Logs []*LogForStorage - GasUsed uint64 -} - // NewReceipt creates a barebone transaction receipt, copying the init fields. // Deprecated: create receipts using a struct literal instead. func NewReceipt(root []byte, failed bool, cumulativeGasUsed uint64) *Receipt { @@ -237,8 +216,7 @@ func (r *Receipt) Size() common.StorageSize { // entire content of a receipt, as opposed to only the consensus fields originally. type ReceiptForStorage Receipt -// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt -// into an RLP stream. +// EncodeRLP implements rlp.Encoder. func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error { enc := &storedReceiptRLP{ PostStateOrStatus: (*Receipt)(r).statusEncoding(), @@ -251,82 +229,21 @@ func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error { return rlp.Encode(w, enc) } -// DecodeRLP implements rlp.Decoder, and loads both consensus and implementation -// fields of a receipt from an RLP stream. +// DecodeRLP implements rlp.Decoder. func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error { - // Retrieve the entire receipt blob as we need to try multiple decoders - blob, err := s.Raw() - if err != nil { - return err - } - // Try decoding from the newest format for future proofness, then the older one - // for old nodes that just upgraded. V4 was an intermediate unreleased format so - // we do need to decode it, but it's not common (try last). - if err := decodeStoredReceiptRLP(r, blob); err == nil { - return nil - } - if err := decodeV3StoredReceiptRLP(r, blob); err == nil { - return nil - } - return decodeV4StoredReceiptRLP(r, blob) -} - -func decodeStoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { var stored storedReceiptRLP - if err := rlp.DecodeBytes(blob, &stored); err != nil { - return err - } - if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { - return err - } - r.CumulativeGasUsed = stored.CumulativeGasUsed - r.Logs = make([]*Log, len(stored.Logs)) - for i, log := range stored.Logs { - r.Logs[i] = (*Log)(log) - } - r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) - - return nil -} - -func decodeV4StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { - var stored v4StoredReceiptRLP - if err := rlp.DecodeBytes(blob, &stored); err != nil { + if err := s.Decode(&stored); err != nil { return err } if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { return err } r.CumulativeGasUsed = stored.CumulativeGasUsed - r.TxHash = stored.TxHash - r.ContractAddress = stored.ContractAddress - r.GasUsed = stored.GasUsed r.Logs = make([]*Log, len(stored.Logs)) for i, log := range stored.Logs { r.Logs[i] = (*Log)(log) } r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) - - return nil -} - -func decodeV3StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { - var stored v3StoredReceiptRLP - if err := rlp.DecodeBytes(blob, &stored); err != nil { - return err - } - if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { - return err - } - r.CumulativeGasUsed = stored.CumulativeGasUsed - r.Bloom = stored.Bloom - r.TxHash = stored.TxHash - r.ContractAddress = stored.ContractAddress - r.GasUsed = stored.GasUsed - r.Logs = make([]*Log, len(stored.Logs)) - for i, log := range stored.Logs { - r.Logs[i] = (*Log)(log) - } return nil } diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go index 22a316c2374b..87fc16a5105b 100644 --- a/core/types/receipt_test.go +++ b/core/types/receipt_test.go @@ -20,7 +20,6 @@ import ( "bytes" "math" "math/big" - "reflect" "testing" "github.com/ethereum/go-ethereum/common" @@ -38,128 +37,6 @@ func TestDecodeEmptyTypedReceipt(t *testing.T) { } } -func TestLegacyReceiptDecoding(t *testing.T) { - tests := []struct { - name string - encode func(*Receipt) ([]byte, error) - }{ - { - "StoredReceiptRLP", - encodeAsStoredReceiptRLP, - }, - { - "V4StoredReceiptRLP", - encodeAsV4StoredReceiptRLP, - }, - { - "V3StoredReceiptRLP", - encodeAsV3StoredReceiptRLP, - }, - } - - tx := NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil) - receipt := &Receipt{ - Status: ReceiptStatusFailed, - CumulativeGasUsed: 1, - Logs: []*Log{ - { - Address: common.BytesToAddress([]byte{0x11}), - Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, - Data: []byte{0x01, 0x00, 0xff}, - }, - { - Address: common.BytesToAddress([]byte{0x01, 0x11}), - Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, - Data: []byte{0x01, 0x00, 0xff}, - }, - }, - TxHash: tx.Hash(), - ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), - GasUsed: 111111, - } - receipt.Bloom = CreateBloom(Receipts{receipt}) - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - enc, err := tc.encode(receipt) - if err != nil { - t.Fatalf("Error encoding receipt: %v", err) - } - var dec ReceiptForStorage - if err := rlp.DecodeBytes(enc, &dec); err != nil { - t.Fatalf("Error decoding RLP receipt: %v", err) - } - // Check whether all consensus fields are correct. - if dec.Status != receipt.Status { - t.Fatalf("Receipt status mismatch, want %v, have %v", receipt.Status, dec.Status) - } - if dec.CumulativeGasUsed != receipt.CumulativeGasUsed { - t.Fatalf("Receipt CumulativeGasUsed mismatch, want %v, have %v", receipt.CumulativeGasUsed, dec.CumulativeGasUsed) - } - if dec.Bloom != receipt.Bloom { - t.Fatalf("Bloom data mismatch, want %v, have %v", receipt.Bloom, dec.Bloom) - } - if len(dec.Logs) != len(receipt.Logs) { - t.Fatalf("Receipt log number mismatch, want %v, have %v", len(receipt.Logs), len(dec.Logs)) - } - for i := 0; i < len(dec.Logs); i++ { - if dec.Logs[i].Address != receipt.Logs[i].Address { - t.Fatalf("Receipt log %d address mismatch, want %v, have %v", i, receipt.Logs[i].Address, dec.Logs[i].Address) - } - if !reflect.DeepEqual(dec.Logs[i].Topics, receipt.Logs[i].Topics) { - t.Fatalf("Receipt log %d topics mismatch, want %v, have %v", i, receipt.Logs[i].Topics, dec.Logs[i].Topics) - } - if !bytes.Equal(dec.Logs[i].Data, receipt.Logs[i].Data) { - t.Fatalf("Receipt log %d data mismatch, want %v, have %v", i, receipt.Logs[i].Data, dec.Logs[i].Data) - } - } - }) - } -} - -func encodeAsStoredReceiptRLP(want *Receipt) ([]byte, error) { - stored := &storedReceiptRLP{ - PostStateOrStatus: want.statusEncoding(), - CumulativeGasUsed: want.CumulativeGasUsed, - Logs: make([]*LogForStorage, len(want.Logs)), - } - for i, log := range want.Logs { - stored.Logs[i] = (*LogForStorage)(log) - } - return rlp.EncodeToBytes(stored) -} - -func encodeAsV4StoredReceiptRLP(want *Receipt) ([]byte, error) { - stored := &v4StoredReceiptRLP{ - PostStateOrStatus: want.statusEncoding(), - CumulativeGasUsed: want.CumulativeGasUsed, - TxHash: want.TxHash, - ContractAddress: want.ContractAddress, - Logs: make([]*LogForStorage, len(want.Logs)), - GasUsed: want.GasUsed, - } - for i, log := range want.Logs { - stored.Logs[i] = (*LogForStorage)(log) - } - return rlp.EncodeToBytes(stored) -} - -func encodeAsV3StoredReceiptRLP(want *Receipt) ([]byte, error) { - stored := &v3StoredReceiptRLP{ - PostStateOrStatus: want.statusEncoding(), - CumulativeGasUsed: want.CumulativeGasUsed, - Bloom: want.Bloom, - TxHash: want.TxHash, - ContractAddress: want.ContractAddress, - Logs: make([]*LogForStorage, len(want.Logs)), - GasUsed: want.GasUsed, - } - for i, log := range want.Logs { - stored.Logs[i] = (*LogForStorage)(log) - } - return rlp.EncodeToBytes(stored) -} - // Tests that receipt data can be correctly derived from the contextual infos func TestDeriveFields(t *testing.T) { // Create a few transactions to have receipts for From ca9808079844c55961cad6ddd9df04fbe21ff78c Mon Sep 17 00:00:00 2001 From: Ryan Schneider Date: Tue, 11 May 2021 02:25:51 -0700 Subject: [PATCH 024/208] cmd/geth, eth/gasprice: add configurable threshold to gas price oracle (#22752) This adds a cmd line parameter `--gpo.ignoreprice`, to make the gas price oracle ignore transactions below the given threshold. --- cmd/geth/main.go | 1 + cmd/geth/usage.go | 1 + cmd/utils/flags.go | 8 ++++++++ eth/ethconfig/config.go | 14 ++++++++------ eth/gasprice/gasprice.go | 39 +++++++++++++++++++++++++-------------- 5 files changed, 43 insertions(+), 20 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index bed4d8a488e1..04274d75f798 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -147,6 +147,7 @@ var ( utils.GpoBlocksFlag, utils.GpoPercentileFlag, utils.GpoMaxGasPriceFlag, + utils.GpoIgnoreGasPriceFlag, utils.EWASMInterpreterFlag, utils.EVMInterpreterFlag, utils.MinerNotifyFullFlag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index aa0a4b290116..117f6dc02b87 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -196,6 +196,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.GpoBlocksFlag, utils.GpoPercentileFlag, utils.GpoMaxGasPriceFlag, + utils.GpoIgnoreGasPriceFlag, }, }, { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index aa00f96c92e7..5c0bba4655f1 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -687,6 +687,11 @@ var ( Usage: "Maximum gas price will be recommended by gpo", Value: ethconfig.Defaults.GPO.MaxPrice.Int64(), } + GpoIgnoreGasPriceFlag = cli.Int64Flag{ + Name: "gpo.ignoreprice", + Usage: "Gas price below which gpo will ignore transactions", + Value: ethconfig.Defaults.GPO.IgnorePrice.Int64(), + } // Metrics flags MetricsEnabledFlag = cli.BoolFlag{ @@ -1296,6 +1301,9 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config, light bool) { if ctx.GlobalIsSet(GpoMaxGasPriceFlag.Name) { cfg.MaxPrice = big.NewInt(ctx.GlobalInt64(GpoMaxGasPriceFlag.Name)) } + if ctx.GlobalIsSet(GpoIgnoreGasPriceFlag.Name) { + cfg.IgnorePrice = big.NewInt(ctx.GlobalInt64(GpoIgnoreGasPriceFlag.Name)) + } } func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index f5629e6a398b..f297019ba5a5 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -41,16 +41,18 @@ import ( // FullNodeGPO contains default gasprice oracle settings for full node. var FullNodeGPO = gasprice.Config{ - Blocks: 20, - Percentile: 60, - MaxPrice: gasprice.DefaultMaxPrice, + Blocks: 20, + Percentile: 60, + MaxPrice: gasprice.DefaultMaxPrice, + IgnorePrice: gasprice.DefaultIgnorePrice, } // LightClientGPO contains default gasprice oracle settings for light client. var LightClientGPO = gasprice.Config{ - Blocks: 2, - Percentile: 60, - MaxPrice: gasprice.DefaultMaxPrice, + Blocks: 2, + Percentile: 60, + MaxPrice: gasprice.DefaultMaxPrice, + IgnorePrice: gasprice.DefaultIgnorePrice, } // Defaults contains default settings for use on the Ethereum main net. diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index 560722bec09f..566c954ecc4f 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -32,12 +32,14 @@ import ( const sampleNumber = 3 // Number of transactions sampled in a block var DefaultMaxPrice = big.NewInt(500 * params.GWei) +var DefaultIgnorePrice = big.NewInt(2 * params.Wei) type Config struct { - Blocks int - Percentile int - Default *big.Int `toml:",omitempty"` - MaxPrice *big.Int `toml:",omitempty"` + Blocks int + Percentile int + Default *big.Int `toml:",omitempty"` + MaxPrice *big.Int `toml:",omitempty"` + IgnorePrice *big.Int `toml:",omitempty"` } // OracleBackend includes all necessary background APIs for oracle. @@ -50,12 +52,13 @@ type OracleBackend interface { // Oracle recommends gas prices based on the content of recent // blocks. Suitable for both light and full clients. type Oracle struct { - backend OracleBackend - lastHead common.Hash - lastPrice *big.Int - maxPrice *big.Int - cacheLock sync.RWMutex - fetchLock sync.Mutex + backend OracleBackend + lastHead common.Hash + lastPrice *big.Int + maxPrice *big.Int + ignorePrice *big.Int + cacheLock sync.RWMutex + fetchLock sync.Mutex checkBlocks int percentile int @@ -83,10 +86,18 @@ func NewOracle(backend OracleBackend, params Config) *Oracle { maxPrice = DefaultMaxPrice log.Warn("Sanitizing invalid gasprice oracle price cap", "provided", params.MaxPrice, "updated", maxPrice) } + ignorePrice := params.IgnorePrice + if ignorePrice == nil || ignorePrice.Int64() <= 0 { + ignorePrice = DefaultIgnorePrice + log.Warn("Sanitizing invalid gasprice oracle ignore price", "provided", params.IgnorePrice, "updated", ignorePrice) + } else if ignorePrice.Int64() > 0 { + log.Info("Gasprice oracle is ignoring threshold set", "threshold", ignorePrice) + } return &Oracle{ backend: backend, lastPrice: params.Default, maxPrice: maxPrice, + ignorePrice: ignorePrice, checkBlocks: blocks, percentile: percent, } @@ -123,7 +134,7 @@ func (gpo *Oracle) SuggestPrice(ctx context.Context) (*big.Int, error) { txPrices []*big.Int ) for sent < gpo.checkBlocks && number > 0 { - go gpo.getBlockPrices(ctx, types.MakeSigner(gpo.backend.ChainConfig(), big.NewInt(int64(number))), number, sampleNumber, result, quit) + go gpo.getBlockPrices(ctx, types.MakeSigner(gpo.backend.ChainConfig(), big.NewInt(int64(number))), number, sampleNumber, gpo.ignorePrice, result, quit) sent++ exp++ number-- @@ -146,7 +157,7 @@ func (gpo *Oracle) SuggestPrice(ctx context.Context) (*big.Int, error) { // meaningful returned, try to query more blocks. But the maximum // is 2*checkBlocks. if len(res.prices) == 1 && len(txPrices)+1+exp < gpo.checkBlocks*2 && number > 0 { - go gpo.getBlockPrices(ctx, types.MakeSigner(gpo.backend.ChainConfig(), big.NewInt(int64(number))), number, sampleNumber, result, quit) + go gpo.getBlockPrices(ctx, types.MakeSigner(gpo.backend.ChainConfig(), big.NewInt(int64(number))), number, sampleNumber, gpo.ignorePrice, result, quit) sent++ exp++ number-- @@ -183,7 +194,7 @@ func (t transactionsByGasPrice) Less(i, j int) bool { return t[i].GasPriceCmp(t[ // and sends it to the result channel. If the block is empty or all transactions // are sent by the miner itself(it doesn't make any sense to include this kind of // transaction prices for sampling), nil gasprice is returned. -func (gpo *Oracle) getBlockPrices(ctx context.Context, signer types.Signer, blockNum uint64, limit int, result chan getBlockPricesResult, quit chan struct{}) { +func (gpo *Oracle) getBlockPrices(ctx context.Context, signer types.Signer, blockNum uint64, limit int, ignoreUnder *big.Int, result chan getBlockPricesResult, quit chan struct{}) { block, err := gpo.backend.BlockByNumber(ctx, rpc.BlockNumber(blockNum)) if block == nil { select { @@ -199,7 +210,7 @@ func (gpo *Oracle) getBlockPrices(ctx context.Context, signer types.Signer, bloc var prices []*big.Int for _, tx := range txs { - if tx.GasPriceIntCmp(common.Big1) <= 0 { + if ignoreUnder != nil && tx.GasPriceIntCmp(ignoreUnder) == -1 { continue } sender, err := types.Sender(signer, tx) From 0524cede37508068b3031912989493e10eb0544a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 11 May 2021 16:23:54 +0300 Subject: [PATCH 025/208] eth/tracers: do the JSON serialization via .js to capture C faults --- eth/tracers/tracer.go | 24 +++++++++++++++++++----- eth/tracers/tracer_test.go | 14 +++++++++----- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/eth/tracers/tracer.go b/eth/tracers/tracer.go index ba6592537300..218903dd92a0 100644 --- a/eth/tracers/tracer.go +++ b/eth/tracers/tracer.go @@ -505,7 +505,7 @@ func (jst *Tracer) Stop(err error) { // call executes a method on a JS object, catching any errors, formatting and // returning them as error objects. -func (jst *Tracer) call(method string, args ...string) (json.RawMessage, error) { +func (jst *Tracer) call(noret bool, method string, args ...string) (json.RawMessage, error) { // Execute the JavaScript call and return any error jst.vm.PushString(method) for _, arg := range args { @@ -519,7 +519,21 @@ func (jst *Tracer) call(method string, args ...string) (json.RawMessage, error) return nil, errors.New(err) } // No error occurred, extract return value and return - return json.RawMessage(jst.vm.JsonEncode(-1)), nil + if noret { + return nil, nil + } + // Push a JSON marshaller onto the stack. We can't marshal from the out- + // side because duktape can crash on large nestings and we can't catch + // C++ exceptions ourselves from Go. TODO(karalabe): Yuck, why wrap?! + jst.vm.PushString("(JSON.stringify)") + jst.vm.Eval() + + jst.vm.Swap(-1, -2) + if code = jst.vm.Pcall(1); code != 0 { + err := jst.vm.SafeToString(-1) + return nil, errors.New(err) + } + return json.RawMessage(jst.vm.SafeToString(-1)), nil } func wrapError(context string, err error) error { @@ -578,7 +592,7 @@ func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost *jst.errorValue = err.Error() } - if _, err := jst.call("step", "log", "db"); err != nil { + if _, err := jst.call(true, "step", "log", "db"); err != nil { jst.err = wrapError("step", err) } } @@ -592,7 +606,7 @@ func (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost jst.errorValue = new(string) *jst.errorValue = err.Error() - if _, err := jst.call("fault", "log", "db"); err != nil { + if _, err := jst.call(true, "fault", "log", "db"); err != nil { jst.err = wrapError("fault", err) } } @@ -640,7 +654,7 @@ func (jst *Tracer) GetResult() (json.RawMessage, error) { jst.vm.PutPropString(jst.stateObject, "ctx") // Finalize the trace and return the results - result, err := jst.call("result", "ctx", "db") + result, err := jst.call(false, "result", "ctx", "db") if err != nil { jst.err = wrapError("result", err) } diff --git a/eth/tracers/tracer_test.go b/eth/tracers/tracer_test.go index 7cda2e533064..033824474d26 100644 --- a/eth/tracers/tracer_test.go +++ b/eth/tracers/tracer_test.go @@ -78,7 +78,7 @@ func runTrace(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) { } func TestTracer(t *testing.T) { - execTracer := func(code string) []byte { + execTracer := func(code string) ([]byte, string) { t.Helper() ctx := &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}} tracer, err := New(code, ctx.txCtx) @@ -87,13 +87,14 @@ func TestTracer(t *testing.T) { } ret, err := runTrace(tracer, ctx) if err != nil { - t.Fatal(err) + return nil, err.Error() // Stringify to allow comparison without nil checks } - return ret + return ret, "" } for i, tt := range []struct { code string want string + fail string }{ { // tests that we don't panic on bad arguments to memory access code: "{depths: [], step: function(log) { this.depths.push(log.memory.slice(-1,-2)); }, fault: function() {}, result: function() { return this.depths; }}", @@ -116,10 +117,13 @@ func TestTracer(t *testing.T) { }, { // tests intrinsic gas code: "{depths: [], step: function() {}, fault: function() {}, result: function(ctx) { return ctx.gasPrice+'.'+ctx.gasUsed+'.'+ctx.intrinsicGas; }}", want: `"100000.6.21000"`, + }, { // tests too deep object / serialization crash + code: "{step: function() {}, fault: function() {}, result: function() { var o={}; var x=o; for (var i=0; i<1000; i++){ o.foo={}; o=o.foo; } return x; }}", + fail: "RangeError: json encode recursion limit in server-side tracer function 'result'", }, } { - if have := execTracer(tt.code); tt.want != string(have) { - t.Errorf("testcase %d: expected return value to be %s got %s\n\tcode: %v", i, tt.want, string(have), tt.code) + if have, err := execTracer(tt.code); tt.want != string(have) || tt.fail != err { + t.Errorf("testcase %d: expected return value to be '%s' got '%s', error to be '%s' got '%s'\n\tcode: %v", i, tt.want, string(have), tt.fail, err, tt.code) } } } From a2c456a526ae9c07944f12913c6830add93a553f Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Tue, 11 May 2021 17:12:10 +0200 Subject: [PATCH 026/208] core: ensure a broken trie invariant crashes genesis creation (#22780) * Ensure state could be created in ToBlock * Fix rebase errors * use a panic instead --- core/genesis.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/genesis.go b/core/genesis.go index 9ae718beb654..d7d08e090994 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -259,7 +259,10 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { if db == nil { db = rawdb.NewMemoryDatabase() } - statedb, _ := state.New(common.Hash{}, state.NewDatabase(db), nil) + statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil) + if err != nil { + panic(err) + } for addr, account := range g.Alloc { statedb.AddBalance(addr, account.Balance) statedb.SetCode(addr, account.Code) From addd8824cf3ad6133c1b1bbc3387a621eafba6a3 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Wed, 12 May 2021 10:05:39 +0200 Subject: [PATCH 027/208] cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * cmd/geth, eth, core: snapshot dump + unify with trie dump * cmd/evm: dump API fixes * cmd/geth, core, eth: fix some remaining errors * cmd/evm: dump - add limit, support address startkey, address review concerns * cmd, core/state, eth: minor polishes, fix snap dump crash, unify format Co-authored-by: Péter Szilágyi --- cmd/evm/internal/t8ntool/transition.go | 7 +- cmd/evm/runner.go | 2 +- cmd/evm/staterunner.go | 10 +-- cmd/geth/chaincmd.go | 112 +++++++++++++++++-------- cmd/geth/snapshot.go | 97 +++++++++++++++++++++ cmd/utils/flags.go | 12 ++- core/state/dump.go | 80 ++++++++++++------ core/state/state_test.go | 19 +++-- eth/api.go | 19 ++++- eth/api_test.go | 23 +++-- 10 files changed, 293 insertions(+), 88 deletions(-) diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index fedcd1243569..22cd0dd851ee 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -212,16 +212,15 @@ func Main(ctx *cli.Context) error { // Iterate over all the tests, run them and aggregate the results // Run the test and aggregate the result - state, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) + s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) if err != nil { return err } body, _ := rlp.EncodeToBytes(txs) // Dump the excution result collector := make(Alloc) - state.DumpToCollector(collector, false, false, false, nil, -1) + s.DumpToCollector(collector, nil) return dispatchOutput(ctx, baseDir, result, collector, body) - } // txWithKey is a helper-struct, to allow us to use the types.Transaction along with @@ -303,7 +302,7 @@ func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) { } } genesisAccount := core.GenesisAccount{ - Code: common.FromHex(dumpAccount.Code), + Code: dumpAccount.Code, Storage: storage, Balance: balance, Nonce: dumpAccount.Nonce, diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index 4063767cb8f4..2d890ef1a230 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -270,7 +270,7 @@ func runCmd(ctx *cli.Context) error { if ctx.GlobalBool(DumpFlag.Name) { statedb.Commit(true) statedb.IntermediateRoot(true) - fmt.Println(string(statedb.Dump(false, false, true))) + fmt.Println(string(statedb.Dump(nil))) } if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" { diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index c4df936c75ff..d8bc4eae8001 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -98,16 +98,16 @@ func stateTestCmd(ctx *cli.Context) error { for _, st := range test.Subtests() { // Run the test and aggregate the result result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} - _, state, err := test.Run(st, cfg, false) + _, s, err := test.Run(st, cfg, false) // print state root for evmlab tracing - if ctx.GlobalBool(MachineFlag.Name) && state != nil { - fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false)) + if ctx.GlobalBool(MachineFlag.Name) && s != nil { + fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", s.IntermediateRoot(false)) } if err != nil { // Test failed, mark as so and dump any state to aid debugging result.Pass, result.Error = false, err.Error() - if ctx.GlobalBool(DumpFlag.Name) && state != nil { - dump := state.RawDump(false, false, true) + if ctx.GlobalBool(DumpFlag.Name) && s != nil { + dump := s.RawDump(nil) result.State = &dump } } diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index d00b4bc1f61b..b9bd88e21362 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -18,6 +18,7 @@ package main import ( "encoding/json" + "errors" "fmt" "os" "runtime" @@ -27,12 +28,16 @@ import ( "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/node" "gopkg.in/urfave/cli.v1" ) @@ -152,20 +157,21 @@ The export-preimages command export hash preimages to an RLP encoded stream`, Action: utils.MigrateFlags(dump), Name: "dump", Usage: "Dump a specific block from storage", - ArgsUsage: "[ | ]...", + ArgsUsage: "[? | ]", Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.SyncModeFlag, utils.IterativeOutputFlag, utils.ExcludeCodeFlag, utils.ExcludeStorageFlag, utils.IncludeIncompletesFlag, + utils.StartKeyFlag, + utils.DumpLimitFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` -The arguments are interpreted as block numbers or hashes. -Use "ethereum dump 0" to dump the genesis block.`, +This command dumps out the state for a given block (or latest, if none provided). +`, } ) @@ -373,47 +379,85 @@ func exportPreimages(ctx *cli.Context) error { return nil } -func dump(ctx *cli.Context) error { - stack, _ := makeConfigNode(ctx) - defer stack.Close() - +func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) { db := utils.MakeChainDatabase(ctx, stack, true) - for _, arg := range ctx.Args() { - var header *types.Header + var header *types.Header + if ctx.NArg() > 1 { + return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg()) + } + if ctx.NArg() == 1 { + arg := ctx.Args().First() if hashish(arg) { hash := common.HexToHash(arg) - number := rawdb.ReadHeaderNumber(db, hash) - if number != nil { + if number := rawdb.ReadHeaderNumber(db, hash); number != nil { header = rawdb.ReadHeader(db, hash, *number) + } else { + return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash) } } else { - number, _ := strconv.Atoi(arg) - hash := rawdb.ReadCanonicalHash(db, uint64(number)) - if hash != (common.Hash{}) { - header = rawdb.ReadHeader(db, hash, uint64(number)) - } - } - if header == nil { - fmt.Println("{}") - utils.Fatalf("block not found") - } else { - state, err := state.New(header.Root, state.NewDatabase(db), nil) + number, err := strconv.Atoi(arg) if err != nil { - utils.Fatalf("could not create new state: %v", err) + return nil, nil, common.Hash{}, err } - excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name) - excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name) - includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name) - if ctx.Bool(utils.IterativeOutputFlag.Name) { - state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout)) + if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) { + header = rawdb.ReadHeader(db, hash, uint64(number)) } else { - if includeMissing { - fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" + - " otherwise the accounts will overwrite each other in the resulting mapping.") - } - fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false)) + return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number) } } + } else { + // Use latest + header = rawdb.ReadHeadHeader(db) + } + if header == nil { + return nil, nil, common.Hash{}, errors.New("no head block found") + } + startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name)) + var start common.Hash + switch len(startArg) { + case 0: // common.Hash + case 32: + start = common.BytesToHash(startArg) + case 20: + start = crypto.Keccak256Hash(startArg) + log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex()) + default: + return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg) + } + var conf = &state.DumpConfig{ + SkipCode: ctx.Bool(utils.ExcludeCodeFlag.Name), + SkipStorage: ctx.Bool(utils.ExcludeStorageFlag.Name), + OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name), + Start: start.Bytes(), + Max: ctx.Uint64(utils.DumpLimitFlag.Name), + } + log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(), + "skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage, + "start", hexutil.Encode(conf.Start), "limit", conf.Max) + return conf, db, header.Root, nil +} + +func dump(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + conf, db, root, err := parseDumpConfig(ctx, stack) + if err != nil { + return err + } + state, err := state.New(root, state.NewDatabase(db), nil) + if err != nil { + return err + } + if ctx.Bool(utils.IterativeOutputFlag.Name) { + state.IterativeDump(conf, json.NewEncoder(os.Stdout)) + } else { + if conf.OnlyWithAddresses { + fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+ + " otherwise the accounts will overwrite each other in the resulting mapping.") + return fmt.Errorf("incompatible options") + } + fmt.Println(string(state.Dump(conf))) } return nil } diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 1af458af207e..35d027fb1648 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -18,7 +18,9 @@ package main import ( "bytes" + "encoding/json" "errors" + "os" "time" "github.com/ethereum/go-ethereum/cmd/utils" @@ -142,6 +144,31 @@ verification. The default checking target is the HEAD state. It's basically iden to traverse-state, but the check granularity is smaller. It's also usable without snapshot enabled. +`, + }, + { + Name: "dump", + Usage: "Dump a specific block from storage (same as 'geth dump' but using snapshots)", + ArgsUsage: "[? | ]", + Action: utils.MigrateFlags(dumpState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.AncientFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.ExcludeCodeFlag, + utils.ExcludeStorageFlag, + utils.StartKeyFlag, + utils.DumpLimitFlag, + }, + Description: ` +This command is semantically equivalent to 'geth dump', but uses the snapshots +as the backend data source, making this command a lot faster. + +The argument is interpreted as block number or hash. If none is provided, the latest +block is used. `, }, }, @@ -430,3 +457,73 @@ func parseRoot(input string) (common.Hash, error) { } return h, nil } + +func dumpState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + conf, db, root, err := parseDumpConfig(ctx, stack) + if err != nil { + return err + } + snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false) + if err != nil { + return err + } + accIt, err := snaptree.AccountIterator(root, common.BytesToHash(conf.Start)) + if err != nil { + return err + } + defer accIt.Release() + + log.Info("Snapshot dumping started", "root", root) + var ( + start = time.Now() + logged = time.Now() + accounts uint64 + ) + enc := json.NewEncoder(os.Stdout) + enc.Encode(struct { + Root common.Hash `json:"root"` + }{root}) + for accIt.Next() { + account, err := snapshot.FullAccount(accIt.Account()) + if err != nil { + return err + } + da := &state.DumpAccount{ + Balance: account.Balance.String(), + Nonce: account.Nonce, + Root: account.Root, + CodeHash: account.CodeHash, + SecureKey: accIt.Hash().Bytes(), + } + if !conf.SkipCode && !bytes.Equal(account.CodeHash, emptyCode) { + da.Code = rawdb.ReadCode(db, common.BytesToHash(account.CodeHash)) + } + if !conf.SkipStorage { + da.Storage = make(map[common.Hash]string) + + stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{}) + if err != nil { + return err + } + for stIt.Next() { + da.Storage[stIt.Hash()] = common.Bytes2Hex(stIt.Slot()) + } + } + enc.Encode(da) + accounts++ + if time.Since(logged) > 8*time.Second { + log.Info("Snapshot dumping in progress", "at", accIt.Hash(), "accounts", accounts, + "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + if conf.Max > 0 && accounts >= conf.Max { + break + } + } + log.Info("Snapshot dumping complete", "accounts", accounts, + "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 5c0bba4655f1..d3fb3f2cbd1b 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -184,7 +184,7 @@ var ( Name: "exitwhensynced", Usage: "Exits after block synchronisation completes", } - IterativeOutputFlag = cli.BoolFlag{ + IterativeOutputFlag = cli.BoolTFlag{ Name: "iterative", Usage: "Print streaming JSON iteratively, delimited by newlines", } @@ -200,6 +200,16 @@ var ( Name: "nocode", Usage: "Exclude contract code (save db lookups)", } + StartKeyFlag = cli.StringFlag{ + Name: "start", + Usage: "Start position. Either a hash or address", + Value: "0x0000000000000000000000000000000000000000000000000000000000000000", + } + DumpLimitFlag = cli.Uint64Flag{ + Name: "limit", + Usage: "Max number of elements (0 = no limit)", + Value: 0, + } defaultSyncMode = ethconfig.Defaults.SyncMode SyncModeFlag = TextMarshalerFlag{ Name: "syncmode", diff --git a/core/state/dump.go b/core/state/dump.go index b25da714fd11..00faa4ed6a6b 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -19,6 +19,7 @@ package state import ( "encoding/json" "fmt" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -27,6 +28,16 @@ import ( "github.com/ethereum/go-ethereum/trie" ) +// DumpConfig is a set of options to control what portions of the statewill be +// iterated and collected. +type DumpConfig struct { + SkipCode bool + SkipStorage bool + OnlyWithAddresses bool + Start []byte + Max uint64 +} + // DumpCollector interface which the state trie calls during iteration type DumpCollector interface { // OnRoot is called with the state root @@ -39,9 +50,9 @@ type DumpCollector interface { type DumpAccount struct { Balance string `json:"balance"` Nonce uint64 `json:"nonce"` - Root string `json:"root"` - CodeHash string `json:"codeHash"` - Code string `json:"code,omitempty"` + Root hexutil.Bytes `json:"root"` + CodeHash hexutil.Bytes `json:"codeHash"` + Code hexutil.Bytes `json:"code,omitempty"` Storage map[common.Hash]string `json:"storage,omitempty"` Address *common.Address `json:"address,omitempty"` // Address only present in iterative (line-by-line) mode SecureKey hexutil.Bytes `json:"key,omitempty"` // If we don't have address, we can output the key @@ -111,38 +122,50 @@ func (d iterativeDump) OnRoot(root common.Hash) { }{root}) } -func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage, excludeMissingPreimages bool, start []byte, maxResults int) (nextKey []byte) { - missingPreimages := 0 +// DumpToCollector iterates the state according to the given options and inserts +// the items into a collector for aggregation or serialization. +func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []byte) { + // Sanitize the input to allow nil configs + if conf == nil { + conf = new(DumpConfig) + } + var ( + missingPreimages int + accounts uint64 + start = time.Now() + logged = time.Now() + ) + log.Info("Trie dumping started", "root", s.trie.Hash()) c.OnRoot(s.trie.Hash()) - var count int - it := trie.NewIterator(s.trie.NodeIterator(start)) + it := trie.NewIterator(s.trie.NodeIterator(conf.Start)) for it.Next() { var data Account if err := rlp.DecodeBytes(it.Value, &data); err != nil { panic(err) } account := DumpAccount{ - Balance: data.Balance.String(), - Nonce: data.Nonce, - Root: common.Bytes2Hex(data.Root[:]), - CodeHash: common.Bytes2Hex(data.CodeHash), + Balance: data.Balance.String(), + Nonce: data.Nonce, + Root: data.Root[:], + CodeHash: data.CodeHash, + SecureKey: it.Key, } addrBytes := s.trie.GetKey(it.Key) if addrBytes == nil { // Preimage missing missingPreimages++ - if excludeMissingPreimages { + if conf.OnlyWithAddresses { continue } account.SecureKey = it.Key } addr := common.BytesToAddress(addrBytes) obj := newObject(s, addr, data) - if !excludeCode { - account.Code = common.Bytes2Hex(obj.Code(s.db)) + if !conf.SkipCode { + account.Code = obj.Code(s.db) } - if !excludeStorage { + if !conf.SkipStorage { account.Storage = make(map[common.Hash]string) storageIt := trie.NewIterator(obj.getTrie(s.db).NodeIterator(nil)) for storageIt.Next() { @@ -155,8 +178,13 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage, } } c.OnAccount(addr, account) - count++ - if maxResults > 0 && count >= maxResults { + accounts++ + if time.Since(logged) > 8*time.Second { + log.Info("Trie dumping in progress", "at", it.Key, "accounts", accounts, + "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + if conf.Max > 0 && accounts >= conf.Max { if it.Next() { nextKey = it.Key } @@ -166,22 +194,24 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage, if missingPreimages > 0 { log.Warn("Dump incomplete due to missing preimages", "missing", missingPreimages) } + log.Info("Trie dumping complete", "accounts", accounts, + "elapsed", common.PrettyDuration(time.Since(start))) return nextKey } // RawDump returns the entire state an a single large object -func (s *StateDB) RawDump(excludeCode, excludeStorage, excludeMissingPreimages bool) Dump { +func (s *StateDB) RawDump(opts *DumpConfig) Dump { dump := &Dump{ Accounts: make(map[common.Address]DumpAccount), } - s.DumpToCollector(dump, excludeCode, excludeStorage, excludeMissingPreimages, nil, 0) + s.DumpToCollector(dump, opts) return *dump } // Dump returns a JSON string representing the entire state as a single json-object -func (s *StateDB) Dump(excludeCode, excludeStorage, excludeMissingPreimages bool) []byte { - dump := s.RawDump(excludeCode, excludeStorage, excludeMissingPreimages) +func (s *StateDB) Dump(opts *DumpConfig) []byte { + dump := s.RawDump(opts) json, err := json.MarshalIndent(dump, "", " ") if err != nil { fmt.Println("Dump err", err) @@ -190,15 +220,15 @@ func (s *StateDB) Dump(excludeCode, excludeStorage, excludeMissingPreimages bool } // IterativeDump dumps out accounts as json-objects, delimited by linebreaks on stdout -func (s *StateDB) IterativeDump(excludeCode, excludeStorage, excludeMissingPreimages bool, output *json.Encoder) { - s.DumpToCollector(iterativeDump{output}, excludeCode, excludeStorage, excludeMissingPreimages, nil, 0) +func (s *StateDB) IterativeDump(opts *DumpConfig, output *json.Encoder) { + s.DumpToCollector(iterativeDump{output}, opts) } // IteratorDump dumps out a batch of accounts starts with the given start key -func (s *StateDB) IteratorDump(excludeCode, excludeStorage, excludeMissingPreimages bool, start []byte, maxResults int) IteratorDump { +func (s *StateDB) IteratorDump(opts *DumpConfig) IteratorDump { iterator := &IteratorDump{ Accounts: make(map[common.Address]DumpAccount), } - iterator.Next = s.DumpToCollector(iterator, excludeCode, excludeStorage, excludeMissingPreimages, start, maxResults) + iterator.Next = s.DumpToCollector(iterator, opts) return *iterator } diff --git a/core/state/state_test.go b/core/state/state_test.go index 956653146647..0a55d7781fd1 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -57,28 +57,31 @@ func TestDump(t *testing.T) { s.state.Commit(false) // check that DumpToCollector contains the state objects that are in trie - got := string(s.state.Dump(false, false, true)) + got := string(s.state.Dump(nil)) want := `{ "root": "71edff0130dd2385947095001c73d9e28d862fc286fca2b922ca6f6f3cddfdd2", "accounts": { "0x0000000000000000000000000000000000000001": { "balance": "22", "nonce": 0, - "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "key": "0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d" }, "0x0000000000000000000000000000000000000002": { "balance": "44", "nonce": 0, - "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "key": "0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62" }, "0x0000000000000000000000000000000000000102": { "balance": "0", "nonce": 0, - "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3", - "code": "03030303030303" + "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "codeHash": "0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3", + "code": "0x03030303030303", + "key": "0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1" } } }` diff --git a/eth/api.go b/eth/api.go index 7387459c94e0..6a22c9e41680 100644 --- a/eth/api.go +++ b/eth/api.go @@ -264,12 +264,16 @@ func NewPublicDebugAPI(eth *Ethereum) *PublicDebugAPI { // DumpBlock retrieves the entire state of the database at a given block. func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) { + opts := &state.DumpConfig{ + OnlyWithAddresses: true, + Max: AccountRangeMaxResults, // Sanity limit over RPC + } if blockNr == rpc.PendingBlockNumber { // If we're dumping the pending state, we need to request // both the pending block as well as the pending state from // the miner and operate on those _, stateDb := api.eth.miner.Pending() - return stateDb.RawDump(false, false, true), nil + return stateDb.RawDump(opts), nil } var block *types.Block if blockNr == rpc.LatestBlockNumber { @@ -284,7 +288,7 @@ func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error if err != nil { return state.Dump{}, err } - return stateDb.RawDump(false, false, true), nil + return stateDb.RawDump(opts), nil } // PrivateDebugAPI is the collection of Ethereum full node APIs exposed over @@ -386,10 +390,17 @@ func (api *PublicDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, sta return state.IteratorDump{}, errors.New("either block number or block hash must be specified") } + opts := &state.DumpConfig{ + SkipCode: nocode, + SkipStorage: nostorage, + OnlyWithAddresses: !incompletes, + Start: start, + Max: uint64(maxResults), + } if maxResults > AccountRangeMaxResults || maxResults <= 0 { - maxResults = AccountRangeMaxResults + opts.Max = AccountRangeMaxResults } - return stateDb.IteratorDump(nocode, nostorage, incompletes, start, maxResults), nil + return stateDb.IteratorDump(opts), nil } // StorageRangeResult is the result of a debug_storageRangeAt API call. diff --git a/eth/api_test.go b/eth/api_test.go index b44eed40bcd2..39a1d5846004 100644 --- a/eth/api_test.go +++ b/eth/api_test.go @@ -34,7 +34,13 @@ import ( var dumper = spew.ConfigState{Indent: " "} func accountRangeTest(t *testing.T, trie *state.Trie, statedb *state.StateDB, start common.Hash, requestedNum int, expectedNum int) state.IteratorDump { - result := statedb.IteratorDump(true, true, false, start.Bytes(), requestedNum) + result := statedb.IteratorDump(&state.DumpConfig{ + SkipCode: true, + SkipStorage: true, + OnlyWithAddresses: false, + Start: start.Bytes(), + Max: uint64(requestedNum), + }) if len(result.Accounts) != expectedNum { t.Fatalf("expected %d results, got %d", expectedNum, len(result.Accounts)) @@ -131,12 +137,17 @@ func TestEmptyAccountRange(t *testing.T) { t.Parallel() var ( - statedb = state.NewDatabase(rawdb.NewMemoryDatabase()) - state, _ = state.New(common.Hash{}, statedb, nil) + statedb = state.NewDatabase(rawdb.NewMemoryDatabase()) + st, _ = state.New(common.Hash{}, statedb, nil) ) - state.Commit(true) - state.IntermediateRoot(true) - results := state.IteratorDump(true, true, true, (common.Hash{}).Bytes(), AccountRangeMaxResults) + st.Commit(true) + st.IntermediateRoot(true) + results := st.IteratorDump(&state.DumpConfig{ + SkipCode: true, + SkipStorage: true, + OnlyWithAddresses: true, + Max: uint64(AccountRangeMaxResults), + }) if bytes.Equal(results.Next, (common.Hash{}).Bytes()) { t.Fatalf("Empty results should not return a second page") } From 597ecb39cc963eb3b85b01b759c75c45e71f41d0 Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Mon, 17 May 2021 00:52:32 -0600 Subject: [PATCH 028/208] cmd/evm: return json error if unmarshalling from stdin fails (#22871) * cmd/evm: return json error if unmarshalling from stdin fails * cmd/evm: make error capitalizations uniform (all lowercase starts) * cmd/evm: capitalize error sent directly to stderror --- cmd/evm/internal/t8ntool/transition.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 22cd0dd851ee..9bb03c2c6aaf 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -142,7 +142,9 @@ func Main(ctx *cli.Context) error { // Figure out the prestate alloc if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector { decoder := json.NewDecoder(os.Stdin) - decoder.Decode(inputData) + if err := decoder.Decode(inputData); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) + } } if allocStr != stdinSelector { inFile, err := os.Open(allocStr) @@ -152,7 +154,7 @@ func Main(ctx *cli.Context) error { defer inFile.Close() decoder := json.NewDecoder(inFile) if err := decoder.Decode(&inputData.Alloc); err != nil { - return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling alloc-file: %v", err)) + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling alloc-file: %v", err)) } } prestate.Pre = inputData.Alloc @@ -167,7 +169,7 @@ func Main(ctx *cli.Context) error { decoder := json.NewDecoder(inFile) var env stEnv if err := decoder.Decode(&env); err != nil { - return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling env-file: %v", err)) + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling env-file: %v", err)) } inputData.Env = &env } @@ -180,7 +182,7 @@ func Main(ctx *cli.Context) error { // Construct the chainconfig var chainConfig *params.ChainConfig if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil { - return NewError(ErrorVMConfig, fmt.Errorf("Failed constructing chain configuration: %v", err)) + return NewError(ErrorVMConfig, fmt.Errorf("failed constructing chain configuration: %v", err)) } else { chainConfig = cConf vmConfig.ExtraEips = extraEips @@ -197,7 +199,7 @@ func Main(ctx *cli.Context) error { defer inFile.Close() decoder := json.NewDecoder(inFile) if err := decoder.Decode(&txsWithKeys); err != nil { - return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling txs-file: %v", err)) + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err)) } } else { txsWithKeys = inputData.Txs @@ -206,7 +208,7 @@ func Main(ctx *cli.Context) error { signer := types.MakeSigner(chainConfig, big.NewInt(int64(prestate.Env.Number))) if txs, err = signUnsignedTransactions(txsWithKeys, signer); err != nil { - return NewError(ErrorJson, fmt.Errorf("Failed signing transactions: %v", err)) + return NewError(ErrorJson, fmt.Errorf("failed signing transactions: %v", err)) } // Iterate over all the tests, run them and aggregate the results @@ -277,7 +279,7 @@ func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Tran // This transaction needs to be signed signed, err := types.SignTx(tx, signer, key) if err != nil { - return nil, NewError(ErrorJson, fmt.Errorf("Tx %d: failed to sign tx: %v", i, err)) + return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err)) } signedTxs = append(signedTxs, signed) } else { From 14bc6e5130ebd291026da82f9ad9684bde895479 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Mon, 17 May 2021 10:49:23 +0200 Subject: [PATCH 029/208] consensus/ethash: change eip3554 from 9.5M to 9.7M (#22870) --- consensus/ethash/consensus.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 492fc835382a..b04cb24fb478 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -46,9 +46,9 @@ var ( allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks // calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554. - // It offsets the bomb a total of 9.5M blocks. + // It offsets the bomb a total of 9.7M blocks. // Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554 - calcDifficultyEip3554 = makeDifficultyCalculator(big.NewInt(9500000)) + calcDifficultyEip3554 = makeDifficultyCalculator(big.NewInt(9700000)) // calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384. // It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks. From 94451c2788295901c302c9bf5fa2f7b021c924e2 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Mon, 17 May 2021 15:13:22 +0200 Subject: [PATCH 030/208] all: implement EIP-1559 (#22837) This is the initial implementation of EIP-1559 in packages core/types and core. Mining, RPC, etc. will be added in subsequent commits. Co-authored-by: Marius van der Wijden Co-authored-by: lightclient@protonmail.com Co-authored-by: Felix Lange --- accounts/abi/bind/backends/simulated.go | 2 + accounts/external/backend.go | 12 +- cmd/evm/internal/t8ntool/execution.go | 8 +- cmd/evm/internal/t8ntool/gen_stenv.go | 22 ++- cmd/evm/internal/t8ntool/transition.go | 10 +- cmd/evm/poststate.json | 23 --- cmd/evm/testdata/10/alloc.json | 23 +++ cmd/evm/testdata/10/env.json | 12 ++ cmd/evm/testdata/10/readme.md | 79 +++++++++ cmd/evm/testdata/10/txs.json | 70 ++++++++ cmd/evm/testdata/11/alloc.json | 25 +++ cmd/evm/testdata/11/env.json | 12 ++ cmd/evm/testdata/11/readme.md | 13 ++ cmd/evm/testdata/11/txs.json | 14 ++ cmd/evm/testdata/9/alloc.json | 11 ++ cmd/evm/testdata/9/env.json | 8 + cmd/evm/testdata/9/readme.md | 75 +++++++++ cmd/evm/testdata/9/txs.json | 37 +++++ consensus/clique/clique.go | 34 ++-- consensus/ethash/consensus.go | 30 ++-- consensus/misc/eip1559.go | 93 +++++++++++ consensus/misc/eip1559_test.go | 133 ++++++++++++++++ consensus/misc/gaslimit.go | 42 +++++ core/blockchain_test.go | 157 ++++++++++++++++++ core/chain_makers.go | 8 +- core/error.go | 4 + core/evm.go | 10 +- core/genesis.go | 3 + core/state_prefetcher.go | 2 +- core/state_processor.go | 6 +- core/state_processor_test.go | 203 +++++++++++++++++------- core/state_transition.go | 22 ++- core/types/access_list_tx.go | 3 +- core/types/block.go | 14 ++ core/types/block_test.go | 65 ++++++++ core/types/dynamic_fee_tx.go | 104 ++++++++++++ core/types/legacy_tx.go | 3 +- core/types/receipt.go | 9 +- core/types/transaction.go | 31 +++- core/types/transaction_marshalling.go | 104 ++++++++++-- core/types/transaction_signing.go | 77 ++++++++- core/vm/eips.go | 20 +++ core/vm/evm.go | 1 + core/vm/jump_table.go | 1 + core/vm/opcodes.go | 3 + eth/state_accessor.go | 2 +- eth/tracers/api.go | 8 +- eth/tracers/api_test.go | 2 +- eth/tracers/tracers_test.go | 4 +- interfaces.go | 3 + internal/ethapi/api.go | 6 +- les/odr_test.go | 4 +- les/state_accessor.go | 2 +- light/odr_test.go | 2 +- params/bootnodes.go | 3 +- params/config.go | 2 +- params/protocol_params.go | 4 + tests/init.go | 13 ++ tests/state_test_util.go | 2 +- 59 files changed, 1522 insertions(+), 173 deletions(-) delete mode 100644 cmd/evm/poststate.json create mode 100644 cmd/evm/testdata/10/alloc.json create mode 100644 cmd/evm/testdata/10/env.json create mode 100644 cmd/evm/testdata/10/readme.md create mode 100644 cmd/evm/testdata/10/txs.json create mode 100644 cmd/evm/testdata/11/alloc.json create mode 100644 cmd/evm/testdata/11/env.json create mode 100644 cmd/evm/testdata/11/readme.md create mode 100644 cmd/evm/testdata/11/txs.json create mode 100644 cmd/evm/testdata/9/alloc.json create mode 100644 cmd/evm/testdata/9/env.json create mode 100644 cmd/evm/testdata/9/readme.md create mode 100644 cmd/evm/testdata/9/txs.json create mode 100644 consensus/misc/eip1559.go create mode 100644 consensus/misc/eip1559_test.go create mode 100644 consensus/misc/gaslimit.go create mode 100644 core/types/dynamic_fee_tx.go diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index d6d525eae10d..9de427ae4391 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -716,6 +716,8 @@ func (m callMsg) Nonce() uint64 { return 0 } func (m callMsg) CheckNonce() bool { return false } func (m callMsg) To() *common.Address { return m.CallMsg.To } func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } +func (m callMsg) FeeCap() *big.Int { return m.CallMsg.FeeCap } +func (m callMsg) Tip() *big.Int { return m.CallMsg.Tip } func (m callMsg) Gas() uint64 { return m.CallMsg.Gas } func (m callMsg) Value() *big.Int { return m.CallMsg.Value } func (m callMsg) Data() []byte { return m.CallMsg.Data } diff --git a/accounts/external/backend.go b/accounts/external/backend.go index de241385c2d2..59766217d225 100644 --- a/accounts/external/backend.go +++ b/accounts/external/backend.go @@ -217,12 +217,12 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio if chainID != nil { args.ChainID = (*hexutil.Big)(chainID) } - // However, if the user asked for a particular chain id, then we should - // use that instead. - if tx.Type() != types.LegacyTxType && tx.ChainId() != nil { - args.ChainID = (*hexutil.Big)(tx.ChainId()) - } - if tx.Type() == types.AccessListTxType { + if tx.Type() != types.LegacyTxType { + // However, if the user asked for a particular chain id, then we should + // use that instead. + if tx.ChainId() != nil { + args.ChainID = (*hexutil.Big)(tx.ChainId()) + } accessList := tx.AccessList() args.AccessList = &accessList } diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index c3f1b16efc9b..cf6974bc43cb 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -69,6 +69,7 @@ type stEnv struct { Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` Ommers []ommer `json:"ommers,omitempty"` + BaseFee *big.Int `json:"currentBaseFee,omitempty"` } type stEnvMarshaling struct { @@ -77,6 +78,7 @@ type stEnvMarshaling struct { GasLimit math.HexOrDecimal64 Number math.HexOrDecimal64 Timestamp math.HexOrDecimal64 + BaseFee *math.HexOrDecimal256 } // Apply applies a set of transactions to a pre-state @@ -120,6 +122,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, GasLimit: pre.Env.GasLimit, GetHash: getHash, } + // If currentBaseFee is defined, add it to the vmContext. + if pre.Env.BaseFee != nil { + vmContext.BaseFee = new(big.Int).Set(pre.Env.BaseFee) + } // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's // done in StateProcessor.Process(block, ...), right before transactions are applied. if chainConfig.DAOForkSupport && @@ -129,7 +135,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } for i, tx := range txs { - msg, err := tx.AsMessage(signer) + msg, err := tx.AsMessage(signer, pre.Env.BaseFee) if err != nil { log.Info("rejected tx", "index", i, "hash", tx.Hash(), "error", err) rejectedTxs = append(rejectedTxs, i) diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index ab5951534e48..695fdba1e15d 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -16,13 +16,14 @@ var _ = (*stEnvMarshaling)(nil) // MarshalJSON marshals as JSON. func (s stEnv) MarshalJSON() ([]byte, error) { type stEnv struct { - Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` } var enc stEnv enc.Coinbase = common.UnprefixedAddress(s.Coinbase) @@ -32,19 +33,21 @@ func (s stEnv) MarshalJSON() ([]byte, error) { enc.Timestamp = math.HexOrDecimal64(s.Timestamp) enc.BlockHashes = s.BlockHashes enc.Ommers = s.Ommers + enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee) return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (s *stEnv) UnmarshalJSON(input []byte) error { type stEnv struct { - Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` } var dec stEnv if err := json.Unmarshal(input, &dec); err != nil { @@ -76,5 +79,8 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.Ommers != nil { s.Ommers = dec.Ommers } + if dec.BaseFee != nil { + s.BaseFee = (*big.Int)(dec.BaseFee) + } return nil } diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 9bb03c2c6aaf..bab6e63faa6a 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -19,6 +19,7 @@ package t8ntool import ( "crypto/ecdsa" "encoding/json" + "errors" "fmt" "io/ioutil" "math/big" @@ -210,9 +211,12 @@ func Main(ctx *cli.Context) error { if txs, err = signUnsignedTransactions(txsWithKeys, signer); err != nil { return NewError(ErrorJson, fmt.Errorf("failed signing transactions: %v", err)) } - - // Iterate over all the tests, run them and aggregate the results - + // Sanity check, to not `panic` in state_transition + if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) { + if prestate.Env.BaseFee == nil { + return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) + } + } // Run the test and aggregate the result s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) if err != nil { diff --git a/cmd/evm/poststate.json b/cmd/evm/poststate.json deleted file mode 100644 index 9ee17f18d139..000000000000 --- a/cmd/evm/poststate.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "root": "f4157bb27bcb1d1a63001434a249a80948f2e9fe1f53d551244c1dae826b5b23", - "accounts": { - "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { - "balance": "4276951709", - "nonce": 1, - "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "6916764286133345652", - "nonce": 172, - "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - }, - "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "42500", - "nonce": 0, - "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - } - } -} \ No newline at end of file diff --git a/cmd/evm/testdata/10/alloc.json b/cmd/evm/testdata/10/alloc.json new file mode 100644 index 000000000000..6e98e7513c45 --- /dev/null +++ b/cmd/evm/testdata/10/alloc.json @@ -0,0 +1,23 @@ +{ + "0x1111111111111111111111111111111111111111" : { + "balance" : "0x010000000000", + "code" : "0xfe", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x010000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363" : { + "balance" : "0x01000000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/10/env.json b/cmd/evm/testdata/10/env.json new file mode 100644 index 000000000000..3a82d46a774b --- /dev/null +++ b/cmd/evm/testdata/10/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x079e", + "previousHash" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f", + "currentGasLimit" : "0x40000000", + "currentBaseFee" : "0x036b", + "blockHashes" : { + "0" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/10/readme.md b/cmd/evm/testdata/10/readme.md new file mode 100644 index 000000000000..c34be80bb71c --- /dev/null +++ b/cmd/evm/testdata/10/readme.md @@ -0,0 +1,79 @@ +## EIP-1559 testing + +This test contains testcases for EIP-1559, which were reported by Ori as misbehaving. + +``` +[user@work evm]$ dir=./testdata/10 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1 +INFO [05-09|22:11:59.436] rejected tx index=3 hash=db07bf..ede1e8 from=0xd02d72E067e77158444ef2020Ff2d325f929B363 error="gas limit reached" +``` +Output: +```json +{ + "alloc": { + "0x1111111111111111111111111111111111111111": { + "code": "0xfe", + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363": { + "balance": "0xff5beffffc95", + "nonce": "0x4" + } + }, + "result": { + "stateRoot": "0xf91a7ec08e4bfea88719aab34deabb000c86902360532b52afa9599d41f2bb8b", + "txRoot": "0xda925f2306a52fa24c15d5cd212d736ee016415fd8dd0c45fd368de7917d64bb", + "receiptRoot": "0x439a25f7fc424c10fb1f89800e4aa1df74156b137239d9ac3eaa7c911c353cd5", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x10000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x88980f6efcc5358d9c359663e7b9414722d430497637340ea056b076bc206701", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000001", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x20000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xd7bf3886f4e2aef74d525ae072c680f3846f550254401b67cbfda4a233757582", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x1" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x30000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x50308296760f01f1eeec7500e9e73cad67469249b1f59e9a9f55e6625a4923db", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x2" + } + ], + "rejected": [ + 3 + ] + } +} +``` diff --git a/cmd/evm/testdata/10/txs.json b/cmd/evm/testdata/10/txs.json new file mode 100644 index 000000000000..014f9db9af03 --- /dev/null +++ b/cmd/evm/testdata/10/txs.json @@ -0,0 +1,70 @@ +[ + { + "input" : "0x", + "gas" : "0x10000001", + "nonce" : "0x1", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x7a45f00bcde9036b026cdf1628b023cd8a31a95c62b5e4dbbee2fa7debe668fb", + "s" : "0x3cc9d6f2cd00a045b0263f2d6dad7d60938d5d13d061af4969f95928aa934d4a", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "feeCap" : "0xfa0", + "tip" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x2", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x4c564b94b0281a8210eeec2dd1fe2e16ff1c1903a8c3a1078d735d7f8208b2af", + "s" : "0x56432b2593e6de95db1cb997b7385217aca03f1615327e231734446b39f266d", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "feeCap" : "0xfa0", + "tip" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x3", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x2ed2ef52f924f59d4a21e1f2a50d3b1109303ce5e32334a7ece9b46f4fbc2a57", + "s" : "0x2980257129cbd3da987226f323d50ba3975a834d165e0681f991b75615605c44", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "feeCap" : "0xfa0", + "tip" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x4", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x5df7d7f8f8e15b36fc9f189cacb625040fad10398d08fc90812595922a2c49b2", + "s" : "0x565fc1803f77a84d754ffe3c5363ab54a8d93a06ea1bb9d4c73c73a282b35917", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "feeCap" : "0xfa0", + "tip" : "0x0", + "accessList" : [ + ] + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/11/alloc.json b/cmd/evm/testdata/11/alloc.json new file mode 100644 index 000000000000..86938230fa75 --- /dev/null +++ b/cmd/evm/testdata/11/alloc.json @@ -0,0 +1,25 @@ +{ + "0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x61ffff5060046000f3", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + "0x00" : "0x00" + } + }, + "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x00", + "code" : "0x6001600055", + "nonce" : "0x00", + "storage" : { + } + } +} + diff --git a/cmd/evm/testdata/11/env.json b/cmd/evm/testdata/11/env.json new file mode 100644 index 000000000000..37dedf09475a --- /dev/null +++ b/cmd/evm/testdata/11/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8", + "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", + "currentGasLimit" : "0x0f4240", + "blockHashes" : { + "0" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2" + } +} + diff --git a/cmd/evm/testdata/11/readme.md b/cmd/evm/testdata/11/readme.md new file mode 100644 index 000000000000..d499f8e99fae --- /dev/null +++ b/cmd/evm/testdata/11/readme.md @@ -0,0 +1,13 @@ +## Test missing basefee + +In this test, the `currentBaseFee` is missing from the env portion. +On a live blockchain, the basefee is present in the header, and verified as part of header validation. + +In `evm t8n`, we don't have blocks, so it needs to be added in the `env`instead. + +When it's missing, an error is expected. + +``` +dir=./testdata/11 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1>/dev/null +ERROR(3): EIP-1559 config but missing 'currentBaseFee' in env section +``` \ No newline at end of file diff --git a/cmd/evm/testdata/11/txs.json b/cmd/evm/testdata/11/txs.json new file mode 100644 index 000000000000..c54b0a1f5b4d --- /dev/null +++ b/cmd/evm/testdata/11/txs.json @@ -0,0 +1,14 @@ +[ + { + "input" : "0x38600060013960015160005560006000f3", + "gas" : "0x61a80", + "gasPrice" : "0x1", + "nonce" : "0x0", + "value" : "0x186a0", + "v" : "0x1c", + "r" : "0x2e1391fd903387f1cc2b51df083805fb4bbb0d4710a2cdf4a044d191ff7be63e", + "s" : "0x7f10a933c42ab74927db02b1db009e923d9d2ab24ac24d63c399f2fe5d9c9b22", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] + diff --git a/cmd/evm/testdata/9/alloc.json b/cmd/evm/testdata/9/alloc.json new file mode 100644 index 000000000000..c14e38e84515 --- /dev/null +++ b/cmd/evm/testdata/9/alloc.json @@ -0,0 +1,11 @@ +{ + "0x000000000000000000000000000000000000aaaa": { + "balance": "0x03", + "code": "0x58585454", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x100000000000000", + "nonce": "0x00" + } +} diff --git a/cmd/evm/testdata/9/env.json b/cmd/evm/testdata/9/env.json new file mode 100644 index 000000000000..ec5164b9952e --- /dev/null +++ b/cmd/evm/testdata/9/env.json @@ -0,0 +1,8 @@ +{ + "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty": "0x20000", + "currentGasTarget": "0x1000000000", + "currentBaseFee": "0x3B9ACA00", + "currentNumber": "0x1000000", + "currentTimestamp": "0x04" +} diff --git a/cmd/evm/testdata/9/readme.md b/cmd/evm/testdata/9/readme.md new file mode 100644 index 000000000000..88f0f12aaaa5 --- /dev/null +++ b/cmd/evm/testdata/9/readme.md @@ -0,0 +1,75 @@ +## EIP-1559 testing + +This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter. + +### Prestate + +The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the +following code: `0x58585454`: `PC; PC; SLOAD; SLOAD`. + +Essentialy, this contract does `SLOAD(0)` and `SLOAD(1)`. + +The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`. + +## Transactions + +There are two transactions, each invokes the contract above. + +1. EIP-1559 ACL-transaction, which contains the `0x0` slot for `0xaaaa` +2. Legacy transaction + +## Execution + +Running it yields: +``` +$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --trace && cat trace-* | grep SLOAD +{"pc":2,"op":84,"gas":"0x48c28","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":3,"op":84,"gas":"0x483f4","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnDa +ta":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":2,"op":84,"gas":"0x49cf4","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":3,"op":84,"gas":"0x494c0","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +``` + +We can also get the post-alloc: +``` +$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +{ + "alloc": { + "0x000000000000000000000000000000000000aaaa": { + "code": "0x58585454", + "balance": "0x3", + "nonce": "0x1" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0xbfc02677a000" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xff104fcfea7800", + "nonce": "0x2" + } + } +} +``` + +If we try to execute it on older rules: +``` +dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +ERROR(10): Failed signing transactions: ERROR(10): Tx 0: failed to sign tx: transaction type not supported +``` + +It fails, due to the `evm t8n` cannot sign them in with the given signer. We can bypass that, however, +by feeding it presigned transactions, located in `txs_signed.json`. + +``` +dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs_signed.json --input.env=$dir/env.json +INFO [05-07|12:28:42.072] rejected tx index=0 hash=b4821e..536819 error="transaction type not supported" +INFO [05-07|12:28:42.072] rejected tx index=1 hash=a9c6c6..fa4036 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="nonce too high: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B, tx: 1 state: 0" +INFO [05-07|12:28:42.073] Wrote file file=alloc.json +INFO [05-07|12:28:42.073] Wrote file file=result.json +``` + +Number `0` is not applicable, and therefore number `1` has wrong nonce, and both are rejected. + diff --git a/cmd/evm/testdata/9/txs.json b/cmd/evm/testdata/9/txs.json new file mode 100644 index 000000000000..f349ae4a24a2 --- /dev/null +++ b/cmd/evm/testdata/9/txs.json @@ -0,0 +1,37 @@ +[ + { + "gas": "0x4ef00", + "tip": "0x2", + "feeCap": "0x12A05F200", + "chainId": "0x1", + "input": "0x", + "nonce": "0x0", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x0", + "type" : "0x2", + "accessList": [ + {"address": "0x000000000000000000000000000000000000aaaa", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + ], + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + { + "gas": "0x4ef00", + "gasPrice": "0x12A05F200", + "chainId": "0x1", + "input": "0x", + "nonce": "0x1", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x0", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 9954a023e574..b693e8051826 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -299,10 +299,6 @@ func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.H if header.GasLimit > cap { return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap) } - // Verify that the gasUsed is <= gasLimit - if header.GasUsed > header.GasLimit { - return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit) - } // If all checks passed, validate any special fields for hard forks if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil { return err @@ -334,14 +330,21 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainHeaderReader, header if parent.Time+c.config.Period > header.Time { return errInvalidTimestamp } - // Verify that the gas limit remains within allowed bounds - diff := int64(parent.GasLimit) - int64(header.GasLimit) - if diff < 0 { - diff *= -1 + // Verify that the gasUsed is <= gasLimit + if header.GasUsed > header.GasLimit { + return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit) } - limit := parent.GasLimit / params.GasLimitBoundDivisor - if uint64(diff) >= limit || header.GasLimit < params.MinGasLimit { - return fmt.Errorf("invalid gas limit: have %d, want %d += %d", header.GasLimit, parent.GasLimit, limit) + if !chain.Config().IsLondon(header.Number) { + // Verify BaseFee not present before EIP-1559 fork. + if header.BaseFee != nil { + return fmt.Errorf("invalid baseFee before fork: have %d, want ", header.BaseFee) + } + if err := misc.VerifyGaslimit(parent.GasLimit, header.GasLimit); err != nil { + return err + } + } else if err := misc.VerifyEip1559Header(chain.Config(), parent, header); err != nil { + // Verify the header's EIP-1559 attributes. + return err } // Retrieve the snapshot needed to verify this header and cache it snap, err := c.snapshot(chain, number-1, header.ParentHash, parents) @@ -725,7 +728,7 @@ func CliqueRLP(header *types.Header) []byte { } func encodeSigHeader(w io.Writer, header *types.Header) { - err := rlp.Encode(w, []interface{}{ + enc := []interface{}{ header.ParentHash, header.UncleHash, header.Coinbase, @@ -741,8 +744,11 @@ func encodeSigHeader(w io.Writer, header *types.Header) { header.Extra[:len(header.Extra)-crypto.SignatureLength], // Yes, this will panic if extra is too short header.MixDigest, header.Nonce, - }) - if err != nil { + } + if header.BaseFee != nil { + enc = append(enc, header.BaseFee) + } + if err := rlp.Encode(w, enc); err != nil { panic("can't encode: " + err.Error()) } } diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index b04cb24fb478..9b9657e19009 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -284,16 +284,18 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa if header.GasUsed > header.GasLimit { return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit) } - - // Verify that the gas limit remains within allowed bounds - diff := int64(parent.GasLimit) - int64(header.GasLimit) - if diff < 0 { - diff *= -1 - } - limit := parent.GasLimit / params.GasLimitBoundDivisor - - if uint64(diff) >= limit || header.GasLimit < params.MinGasLimit { - return fmt.Errorf("invalid gas limit: have %d, want %d += %d", header.GasLimit, parent.GasLimit, limit) + // Verify the block's gas usage and (if applicable) verify the base fee. + if !chain.Config().IsLondon(header.Number) { + // Verify BaseFee not present before EIP-1559 fork. + if header.BaseFee != nil { + return fmt.Errorf("invalid baseFee before fork: have %d, expected 'nil'", header.BaseFee) + } + if err := misc.VerifyGaslimit(parent.GasLimit, header.GasLimit); err != nil { + return err + } + } else if err := misc.VerifyEip1559Header(chain.Config(), parent, header); err != nil { + // Verify the header's EIP-1559 attributes. + return err } // Verify that the block number is parent's +1 if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 { @@ -604,7 +606,7 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) { hasher := sha3.NewLegacyKeccak256() - rlp.Encode(hasher, []interface{}{ + enc := []interface{}{ header.ParentHash, header.UncleHash, header.Coinbase, @@ -618,7 +620,11 @@ func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) { header.GasUsed, header.Time, header.Extra, - }) + } + if header.BaseFee != nil { + enc = append(enc, header.BaseFee) + } + rlp.Encode(hasher, enc) hasher.Sum(hash[:0]) return hash } diff --git a/consensus/misc/eip1559.go b/consensus/misc/eip1559.go new file mode 100644 index 000000000000..8fca0fdc7092 --- /dev/null +++ b/consensus/misc/eip1559.go @@ -0,0 +1,93 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package misc + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" +) + +// VerifyEip1559Header verifies some header attributes which were changed in EIP-1559, +// - gas limit check +// - basefee check +func VerifyEip1559Header(config *params.ChainConfig, parent, header *types.Header) error { + // Verify that the gas limit remains within allowed bounds + parentGasLimit := parent.GasLimit + if !config.IsLondon(parent.Number) { + parentGasLimit = parent.GasLimit * params.ElasticityMultiplier + } + if err := VerifyGaslimit(parentGasLimit, header.GasLimit); err != nil { + return err + } + // Verify the header is not malformed + if header.BaseFee == nil { + return fmt.Errorf("header is missing baseFee") + } + // Verify the baseFee is correct based on the parent header. + expectedBaseFee := CalcBaseFee(config, parent) + if header.BaseFee.Cmp(expectedBaseFee) != 0 { + return fmt.Errorf("invalid baseFee: have %s, want %s, parentBaseFee %s, parentGasUsed %d", + expectedBaseFee, header.BaseFee, parent.BaseFee, parent.GasUsed) + } + return nil +} + +// CalcBaseFee calculates the basefee of the header. +func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int { + // If the current block is the first EIP-1559 block, return the InitialBaseFee. + if !config.IsLondon(parent.Number) { + return new(big.Int).SetUint64(params.InitialBaseFee) + } + + var ( + parentGasTarget = parent.GasLimit / params.ElasticityMultiplier + parentGasTargetBig = new(big.Int).SetUint64(parentGasTarget) + baseFeeChangeDenominator = new(big.Int).SetUint64(params.BaseFeeChangeDenominator) + ) + // If the parent gasUsed is the same as the target, the baseFee remains unchanged. + if parent.GasUsed == parentGasTarget { + return new(big.Int).Set(parent.BaseFee) + } + if parent.GasUsed > parentGasTarget { + // If the parent block used more gas than its target, the baseFee should increase. + gasUsedDelta := new(big.Int).SetUint64(parent.GasUsed - parentGasTarget) + x := new(big.Int).Mul(parent.BaseFee, gasUsedDelta) + y := x.Div(x, parentGasTargetBig) + baseFeeDelta := math.BigMax( + x.Div(y, baseFeeChangeDenominator), + common.Big1, + ) + + return x.Add(parent.BaseFee, baseFeeDelta) + } else { + // Otherwise if the parent block used less gas than its target, the baseFee should decrease. + gasUsedDelta := new(big.Int).SetUint64(parentGasTarget - parent.GasUsed) + x := new(big.Int).Mul(parent.BaseFee, gasUsedDelta) + y := x.Div(x, parentGasTargetBig) + baseFeeDelta := x.Div(y, baseFeeChangeDenominator) + + return math.BigMax( + x.Sub(parent.BaseFee, baseFeeDelta), + common.Big0, + ) + } +} diff --git a/consensus/misc/eip1559_test.go b/consensus/misc/eip1559_test.go new file mode 100644 index 000000000000..333411db514c --- /dev/null +++ b/consensus/misc/eip1559_test.go @@ -0,0 +1,133 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package misc + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" +) + +// copyConfig does a _shallow_ copy of a given config. Safe to set new values, but +// do not use e.g. SetInt() on the numbers. For testing only +func copyConfig(original *params.ChainConfig) *params.ChainConfig { + return ¶ms.ChainConfig{ + ChainID: original.ChainID, + HomesteadBlock: original.HomesteadBlock, + DAOForkBlock: original.DAOForkBlock, + DAOForkSupport: original.DAOForkSupport, + EIP150Block: original.EIP150Block, + EIP150Hash: original.EIP150Hash, + EIP155Block: original.EIP155Block, + EIP158Block: original.EIP158Block, + ByzantiumBlock: original.ByzantiumBlock, + ConstantinopleBlock: original.ConstantinopleBlock, + PetersburgBlock: original.PetersburgBlock, + IstanbulBlock: original.IstanbulBlock, + MuirGlacierBlock: original.MuirGlacierBlock, + BerlinBlock: original.BerlinBlock, + LondonBlock: original.LondonBlock, + EWASMBlock: original.EWASMBlock, + CatalystBlock: original.CatalystBlock, + Ethash: original.Ethash, + Clique: original.Clique, + } +} + +func config() *params.ChainConfig { + config := copyConfig(params.TestChainConfig) + config.LondonBlock = big.NewInt(5) + return config +} + +// TestBlockGasLimits tests the gasLimit checks for blocks both across +// the EIP-1559 boundary and post-1559 blocks +func TestBlockGasLimits(t *testing.T) { + initial := new(big.Int).SetUint64(params.InitialBaseFee) + + for i, tc := range []struct { + pGasLimit uint64 + pNum int64 + gasLimit uint64 + ok bool + }{ + // Transitions from non-london to london + {10000000, 4, 20000000, true}, // No change + {10000000, 4, 20019530, true}, // Upper limit + {10000000, 4, 20019531, false}, // Upper +1 + {10000000, 4, 19980470, true}, // Lower limit + {10000000, 4, 19980469, false}, // Lower limit -1 + // London to London + {20000000, 5, 20000000, true}, + {20000000, 5, 20019530, true}, // Upper limit + {20000000, 5, 20019531, false}, // Upper limit +1 + {20000000, 5, 19980470, true}, // Lower limit + {20000000, 5, 19980469, false}, // Lower limit -1 + {40000000, 5, 40039061, true}, // Upper limit + {40000000, 5, 40039062, false}, // Upper limit +1 + {40000000, 5, 39960939, true}, // lower limit + {40000000, 5, 39960938, false}, // Lower limit -1 + } { + parent := &types.Header{ + GasUsed: tc.pGasLimit / 2, + GasLimit: tc.pGasLimit, + BaseFee: initial, + Number: big.NewInt(tc.pNum), + } + header := &types.Header{ + GasUsed: tc.gasLimit / 2, + GasLimit: tc.gasLimit, + BaseFee: initial, + Number: big.NewInt(tc.pNum + 1), + } + err := VerifyEip1559Header(config(), parent, header) + if tc.ok && err != nil { + t.Errorf("test %d: Expected valid header: %s", i, err) + } + if !tc.ok && err == nil { + t.Errorf("test %d: Expected invalid header", i) + } + } +} + +// TestCalcBaseFee assumes all blocks are 1559-blocks +func TestCalcBaseFee(t *testing.T) { + tests := []struct { + parentBaseFee int64 + parentGasLimit uint64 + parentGasUsed uint64 + expectedBaseFee int64 + }{ + {params.InitialBaseFee, 20000000, 10000000, params.InitialBaseFee}, // usage == target + {params.InitialBaseFee, 20000000, 9000000, 987500000}, // usage below target + {params.InitialBaseFee, 20000000, 11000000, 1012500000}, // usage above target + } + for i, test := range tests { + parent := &types.Header{ + Number: common.Big32, + GasLimit: test.parentGasLimit, + GasUsed: test.parentGasUsed, + BaseFee: big.NewInt(test.parentBaseFee), + } + if have, want := CalcBaseFee(config(), parent), big.NewInt(test.expectedBaseFee); have.Cmp(want) != 0 { + t.Errorf("test %d: have %d want %d, ", i, have, want) + } + } +} diff --git a/consensus/misc/gaslimit.go b/consensus/misc/gaslimit.go new file mode 100644 index 000000000000..25f35300b94d --- /dev/null +++ b/consensus/misc/gaslimit.go @@ -0,0 +1,42 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package misc + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/params" +) + +// VerifyGaslimit verifies the header gas limit according increase/decrease +// in relation to the parent gas limit. +func VerifyGaslimit(parentGasLimit, headerGasLimit uint64) error { + // Verify that the gas limit remains within allowed bounds + diff := int64(parentGasLimit) - int64(headerGasLimit) + if diff < 0 { + diff *= -1 + } + limit := parentGasLimit / params.GasLimitBoundDivisor + if uint64(diff) >= limit { + return fmt.Errorf("invalid gas limit: have %d, want %d +-= %d", headerGasLimit, parentGasLimit, limit-1) + } + if headerGasLimit < params.MinGasLimit { + return errors.New("invalid gas limit below 5000") + } + return nil +} diff --git a/core/blockchain_test.go b/core/blockchain_test.go index a8ce4c7b9a64..8ace9f6a6d42 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -73,6 +73,10 @@ func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *B return db, blockchain, err } +func newGwei(n int64) *big.Int { + return new(big.Int).Mul(big.NewInt(n), big.NewInt(params.GWei)) +} + // Test fork of length N starting from block i func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) { // Copy old chain up to #i into a new db @@ -3109,3 +3113,156 @@ func TestEIP2718Transition(t *testing.T) { } } + +// TestEIP1559Transition tests the following: +// +// 1. A tranaction whose feeCap is greater than the baseFee is valid. +// 2. Gas accounting for access lists on EIP-1559 transactions is correct. +// 3. Only the transaction's tip will be received by the coinbase. +// 4. The transaction sender pays for both the tip and baseFee. +// 5. The coinbase receives only the partially realized tip when +// feeCap - tip < baseFee. +// 6. Legacy transaction behave as expected (e.g. gasPrice = feeCap = tip). +func TestEIP1559Transition(t *testing.T) { + var ( + aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") + + // Generate a canonical chain to act as the main dataset + engine = ethash.NewFaker() + db = rawdb.NewMemoryDatabase() + + // A sender who makes transactions, has some funds + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) + gspec = &Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: GenesisAlloc{ + addr1: {Balance: funds}, + addr2: {Balance: funds}, + // The address 0xAAAA sloads 0x00 and 0x01 + aa: { + Code: []byte{ + byte(vm.PC), + byte(vm.PC), + byte(vm.SLOAD), + byte(vm.SLOAD), + }, + Nonce: 0, + Balance: big.NewInt(0), + }, + }, + } + ) + + gspec.Config.BerlinBlock = common.Big0 + gspec.Config.LondonBlock = common.Big0 + genesis := gspec.MustCommit(db) + signer := types.LatestSigner(gspec.Config) + + blocks, _ := GenerateChain(gspec.Config, genesis, engine, db, 1, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{1}) + + // One transaction to 0xAAAA + accesses := types.AccessList{types.AccessTuple{ + Address: aa, + StorageKeys: []common.Hash{{0}}, + }} + + txdata := &types.DynamicFeeTx{ + ChainID: gspec.Config.ChainID, + Nonce: 0, + To: &aa, + Gas: 30000, + FeeCap: newGwei(5), + Tip: big.NewInt(2), + AccessList: accesses, + Data: []byte{}, + } + tx := types.NewTx(txdata) + tx, _ = types.SignTx(tx, signer, key1) + + b.AddTx(tx) + }) + + diskdb := rawdb.NewMemoryDatabase() + gspec.MustCommit(diskdb) + + chain, err := NewBlockChain(diskdb, nil, gspec.Config, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + if n, err := chain.InsertChain(blocks); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } + + block := chain.GetBlockByNumber(1) + + // 1+2: Ensure EIP-1559 access lists are accounted for via gas usage. + expectedGas := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas + + vm.GasQuickStep*2 + params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929 + if block.GasUsed() != expectedGas { + t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed()) + } + + state, _ := chain.State() + + // 3: Ensure that miner received only the tx's tip. + actual := state.GetBalance(block.Coinbase()) + expected := new(big.Int).Add( + new(big.Int).SetUint64(block.GasUsed()*block.Transactions()[0].Tip().Uint64()), + ethash.ConstantinopleBlockReward, + ) + if actual.Cmp(expected) != 0 { + t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual) + } + + // 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee). + actual = new(big.Int).Sub(funds, state.GetBalance(addr1)) + expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].Tip().Uint64() + block.BaseFee().Uint64())) + if actual.Cmp(expected) != 0 { + t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual) + } + + blocks, _ = GenerateChain(gspec.Config, block, engine, db, 1, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{2}) + + txdata := &types.LegacyTx{ + Nonce: 0, + To: &aa, + Gas: 30000, + GasPrice: newGwei(5), + } + tx := types.NewTx(txdata) + tx, _ = types.SignTx(tx, signer, key2) + + b.AddTx(tx) + }) + + if n, err := chain.InsertChain(blocks); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } + + block = chain.GetBlockByNumber(2) + state, _ = chain.State() + effectiveTip := block.Transactions()[0].Tip().Uint64() - block.BaseFee().Uint64() + + // 6+5: Ensure that miner received only the tx's effective tip. + actual = state.GetBalance(block.Coinbase()) + expected = new(big.Int).Add( + new(big.Int).SetUint64(block.GasUsed()*effectiveTip), + ethash.ConstantinopleBlockReward, + ) + if actual.Cmp(expected) != 0 { + t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual) + } + + // 4: Ensure the tx sender paid for the gasUsed * (effectiveTip + block baseFee). + actual = new(big.Int).Sub(funds, state.GetBalance(addr2)) + expected = new(big.Int).SetUint64(block.GasUsed() * (effectiveTip + block.BaseFee().Uint64())) + if actual.Cmp(expected) != 0 { + t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual) + } +} diff --git a/core/chain_makers.go b/core/chain_makers.go index e058e5a78e5b..b1b7dc3591df 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -253,7 +253,7 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.S time = parent.Time() + 10 // block time is fixed at 10 seconds } - return &types.Header{ + header := &types.Header{ Root: state.IntermediateRoot(chain.Config().IsEIP158(parent.Number())), ParentHash: parent.Hash(), Coinbase: parent.Coinbase(), @@ -267,6 +267,12 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.S Number: new(big.Int).Add(parent.Number(), common.Big1), Time: time, } + + if chain.Config().IsLondon(parent.Number()) { + header.BaseFee = misc.CalcBaseFee(chain.Config(), parent.Header()) + } + + return header } // makeHeaderChain creates a deterministic chain of headers rooted at parent. diff --git a/core/error.go b/core/error.go index 197dd81567c1..3d62cb9bcfaf 100644 --- a/core/error.go +++ b/core/error.go @@ -71,4 +71,8 @@ var ( // ErrTxTypeNotSupported is returned if a transaction is not supported in the // current network configuration. ErrTxTypeNotSupported = types.ErrTxTypeNotSupported + + // ErrFeeCapTooLow is returned if the transaction fee cap is less than the + // the base fee of the block. + ErrFeeCapTooLow = errors.New("fee cap less than block base fee") ) diff --git a/core/evm.go b/core/evm.go index 8f69d514995e..6c67fc43762c 100644 --- a/core/evm.go +++ b/core/evm.go @@ -37,13 +37,20 @@ type ChainContext interface { // NewEVMBlockContext creates a new context for use in the EVM. func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common.Address) vm.BlockContext { + var ( + beneficiary common.Address + baseFee *big.Int + ) + // If we don't have an explicit author (i.e. not mining), extract from the header - var beneficiary common.Address if author == nil { beneficiary, _ = chain.Engine().Author(header) // Ignore error, we're past header validation } else { beneficiary = *author } + if header.BaseFee != nil { + baseFee = new(big.Int).Set(header.BaseFee) + } return vm.BlockContext{ CanTransfer: CanTransfer, Transfer: Transfer, @@ -52,6 +59,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common BlockNumber: new(big.Int).Set(header.Number), Time: new(big.Int).SetUint64(header.Time), Difficulty: new(big.Int).Set(header.Difficulty), + BaseFee: baseFee, GasLimit: header.GasLimit, } } diff --git a/core/genesis.go b/core/genesis.go index d7d08e090994..b68ae4ef564d 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -291,6 +291,9 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { if g.Difficulty == nil { head.Difficulty = params.GenesisDifficulty } + if g.Config != nil && g.Config.IsLondon(common.Big0) { + head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee) + } statedb.Commit(false) statedb.Database().TrieDB().Commit(root, true, nil) diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go index 05394321f74b..ecdfa67f00dd 100644 --- a/core/state_prefetcher.go +++ b/core/state_prefetcher.go @@ -63,7 +63,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c return } // Convert the transaction into an executable message and pre-cache its sender - msg, err := tx.AsMessage(signer) + msg, err := tx.AsMessage(signer, header.BaseFee) if err != nil { return // Also invalid block, bail out } diff --git a/core/state_processor.go b/core/state_processor.go index 40a953f0d4f8..6f6bc1879b8d 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -71,9 +71,9 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) // Iterate over and process the individual transactions for i, tx := range block.Transactions() { - msg, err := tx.AsMessage(types.MakeSigner(p.config, header.Number)) + msg, err := tx.AsMessage(types.MakeSigner(p.config, header.Number), header.BaseFee) if err != nil { - return nil, nil, 0, err + return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } statedb.Prepare(tx.Hash(), block.Hash(), i) receipt, err := applyTransaction(msg, p.config, p.bc, nil, gp, statedb, header, tx, usedGas, vmenv) @@ -139,7 +139,7 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) { - msg, err := tx.AsMessage(types.MakeSigner(config, header.Number)) + msg, err := tx.AsMessage(types.MakeSigner(config, header.Number), header.BaseFee) if err != nil { return nil, err } diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 5976ecc3d4e4..28baf6e7d270 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -38,75 +39,157 @@ import ( // contain invalid transactions func TestStateProcessorErrors(t *testing.T) { var ( - signer = types.HomesteadSigner{} + signer = types.LatestSigner(params.TestChainConfig) testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - db = rawdb.NewMemoryDatabase() - gspec = &Genesis{ - Config: params.TestChainConfig, - } - genesis = gspec.MustCommit(db) - blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) ) - defer blockchain.Stop() var makeTx = func(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction { tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, testKey) return tx } - for i, tt := range []struct { - txs []*types.Transaction - want string - }{ - { - txs: []*types.Transaction{ - makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, nil, nil), - makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, nil, nil), + var mkDynamicTx = func(nonce uint64, to common.Address, gasLimit uint64, tip, feeCap *big.Int) *types.Transaction { + tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ + Nonce: nonce, + Tip: tip, + FeeCap: feeCap, + Gas: 0, + To: &to, + Value: big.NewInt(0), + }), signer, testKey) + return tx + } + { // Tests against a 'recent' chain definition + var ( + db = rawdb.NewMemoryDatabase() + gspec = &Genesis{ + Config: params.TestChainConfig, + Alloc: GenesisAlloc{ + common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + }, + } + genesis = gspec.MustCommit(db) + blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) + ) + defer blockchain.Stop() + + for i, tt := range []struct { + txs []*types.Transaction + want string + }{ + { // ErrNonceTooLow + txs: []*types.Transaction{ + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil), + }, + want: "could not apply tx 1 [0x0026256b3939ed97e2c4a6f3fce8ecf83bdcfa6d507c47838c308a1fb0436f62]: nonce too low: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 0 state: 1", + }, + { // ErrNonceTooHigh + txs: []*types.Transaction{ + makeTx(100, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil), + }, + want: "could not apply tx 0 [0xdebad714ca7f363bd0d8121c4518ad48fa469ca81b0a081be3d10c17460f751b]: nonce too high: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 100 state: 0", + }, + { // ErrGasLimitReached + txs: []*types.Transaction{ + makeTx(0, common.Address{}, big.NewInt(0), 21000000, big.NewInt(875000000), nil), + }, + want: "could not apply tx 0 [0xbd49d8dadfd47fb846986695f7d4da3f7b2c48c8da82dbc211a26eb124883de9]: gas limit reached", }, - want: "could not apply tx 1 [0x36bfa6d14f1cd35a1be8cc2322982a595fabc0e799f09c1de3bad7bd5b1f7626]: nonce too low: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 0 state: 1", - }, - { - txs: []*types.Transaction{ - makeTx(100, common.Address{}, big.NewInt(0), params.TxGas, nil, nil), + { // ErrInsufficientFundsForTransfer + txs: []*types.Transaction{ + makeTx(0, common.Address{}, big.NewInt(1000000000000000000), params.TxGas, big.NewInt(875000000), nil), + }, + want: "could not apply tx 0 [0x98c796b470f7fcab40aaef5c965a602b0238e1034cce6fb73823042dd0638d74]: insufficient funds for transfer: address 0x71562b71999873DB5b286dF957af199Ec94617F7", }, - want: "could not apply tx 0 [0x51cd272d41ef6011d8138e18bf4043797aca9b713c7d39a97563f9bbe6bdbe6f]: nonce too high: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 100 state: 0", - }, - { - txs: []*types.Transaction{ - makeTx(0, common.Address{}, big.NewInt(0), 21000000, nil, nil), + { // ErrInsufficientFunds + txs: []*types.Transaction{ + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(900000000000000000), nil), + }, + want: "could not apply tx 0 [0x4a69690c4b0cd85e64d0d9ea06302455b01e10a83db964d60281739752003440]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 1000000000000000000 want 18900000000000000000000", }, - want: "could not apply tx 0 [0x54c58b530824b0bb84b7a98183f08913b5d74e1cebc368515ef3c65edf8eb56a]: gas limit reached", - }, - { - txs: []*types.Transaction{ - makeTx(0, common.Address{}, big.NewInt(1), params.TxGas, nil, nil), + // ErrGasUintOverflow + // One missing 'core' error is ErrGasUintOverflow: "gas uint64 overflow", + // In order to trigger that one, we'd have to allocate a _huge_ chunk of data, such that the + // multiplication len(data) +gas_per_byte overflows uint64. Not testable at the moment + { // ErrIntrinsicGas + txs: []*types.Transaction{ + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(875000000), nil), + }, + want: "could not apply tx 0 [0xcf3b049a0b516cb4f9274b3e2a264359e2ba53b2fb64b7bda2c634d5c9d01fca]: intrinsic gas too low: have 20000, want 21000", }, - want: "could not apply tx 0 [0x3094b17498940d92b13baccf356ce8bfd6f221e926abc903d642fa1466c5b50e]: insufficient funds for transfer: address 0x71562b71999873DB5b286dF957af199Ec94617F7", - }, - { - txs: []*types.Transaction{ - makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(0xffffff), nil), + { // ErrGasLimitReached + txs: []*types.Transaction{ + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas*1000, big.NewInt(875000000), nil), + }, + want: "could not apply tx 0 [0xbd49d8dadfd47fb846986695f7d4da3f7b2c48c8da82dbc211a26eb124883de9]: gas limit reached", }, - want: "could not apply tx 0 [0xaa3f7d86802b1f364576d9071bf231e31d61b392d306831ac9cf706ff5371ce0]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 0 want 352321515000", - }, - { - txs: []*types.Transaction{ - makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, nil, nil), - makeTx(1, common.Address{}, big.NewInt(0), params.TxGas, nil, nil), - makeTx(2, common.Address{}, big.NewInt(0), params.TxGas, nil, nil), - makeTx(3, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(0), nil), + { // ErrFeeCapTooLow + txs: []*types.Transaction{ + mkDynamicTx(0, common.Address{}, params.TxGas-1000, big.NewInt(0), big.NewInt(0)), + }, + want: "could not apply tx 0 [0x21e9b9015150fc7f6bd5059890a5e1727f2452df285e8a84f4ca61a74c159ded]: fee cap less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, feeCap: 0 baseFee: 875000000", }, - want: "could not apply tx 3 [0x836fab5882205362680e49b311a20646de03b630920f18ec6ee3b111a2cf6835]: intrinsic gas too low: have 20000, want 21000", - }, - // The last 'core' error is ErrGasUintOverflow: "gas uint64 overflow", but in order to - // trigger that one, we'd have to allocate a _huge_ chunk of data, such that the - // multiplication len(data) +gas_per_byte overflows uint64. Not testable at the moment - } { - block := GenerateBadBlock(genesis, ethash.NewFaker(), tt.txs) - _, err := blockchain.InsertChain(types.Blocks{block}) - if err == nil { - t.Fatal("block imported without errors") + } { + block := GenerateBadBlock(genesis, ethash.NewFaker(), tt.txs, gspec.Config) + _, err := blockchain.InsertChain(types.Blocks{block}) + if err == nil { + t.Fatal("block imported without errors") + } + if have, want := err.Error(), tt.want; have != want { + t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want) + } } - if have, want := err.Error(), tt.want; have != want { - t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want) + } + + // One final error is ErrTxTypeNotSupported. For this, we need an older chain + { + var ( + db = rawdb.NewMemoryDatabase() + gspec = &Genesis{ + Config: ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + }, + Alloc: GenesisAlloc{ + common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + }, + } + genesis = gspec.MustCommit(db) + blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) + ) + defer blockchain.Stop() + for i, tt := range []struct { + txs []*types.Transaction + want string + }{ + { // ErrTxTypeNotSupported + txs: []*types.Transaction{ + mkDynamicTx(0, common.Address{}, params.TxGas-1000, big.NewInt(0), big.NewInt(0)), + }, + want: "could not apply tx 0 [0x21e9b9015150fc7f6bd5059890a5e1727f2452df285e8a84f4ca61a74c159ded]: transaction type not supported", + }, + } { + block := GenerateBadBlock(genesis, ethash.NewFaker(), tt.txs, gspec.Config) + _, err := blockchain.InsertChain(types.Blocks{block}) + if err == nil { + t.Fatal("block imported without errors") + } + if have, want := err.Error(), tt.want; have != want { + t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want) + } } } } @@ -115,11 +198,11 @@ func TestStateProcessorErrors(t *testing.T) { // valid, and no proper post-state can be made. But from the perspective of the blockchain, the block is sufficiently // valid to be considered for import: // - valid pow (fake), ancestry, difficulty, gaslimit etc -func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Transactions) *types.Block { +func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Transactions, config *params.ChainConfig) *types.Block { header := &types.Header{ ParentHash: parent.Hash(), Coinbase: parent.Coinbase(), - Difficulty: engine.CalcDifficulty(&fakeChainReader{params.TestChainConfig}, parent.Time()+10, &types.Header{ + Difficulty: engine.CalcDifficulty(&fakeChainReader{config}, parent.Time()+10, &types.Header{ Number: parent.Number(), Time: parent.Time(), Difficulty: parent.Difficulty(), @@ -130,8 +213,10 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr Time: parent.Time() + 10, UncleHash: types.EmptyUncleHash, } + if config.IsLondon(header.Number) { + header.BaseFee = misc.CalcBaseFee(config, parent.Header()) + } var receipts []*types.Receipt - // The post-state result doesn't need to be correct (this is a bad block), but we do need something there // Preferably something unique. So let's use a combo of blocknum + txhash hasher := sha3.NewLegacyKeccak256() diff --git a/core/state_transition.go b/core/state_transition.go index 05becd9a0058..881d34f4bcd8 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -22,6 +22,7 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" + cmath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" @@ -49,6 +50,8 @@ type StateTransition struct { msg Message gas uint64 gasPrice *big.Int + feeCap *big.Int + tip *big.Int initialGas uint64 value *big.Int data []byte @@ -62,6 +65,8 @@ type Message interface { To() *common.Address GasPrice() *big.Int + FeeCap() *big.Int + Tip() *big.Int Gas() uint64 Value() *big.Int @@ -154,6 +159,8 @@ func NewStateTransition(evm *vm.EVM, msg Message, gp *GasPool) *StateTransition evm: evm, msg: msg, gasPrice: msg.GasPrice(), + feeCap: msg.FeeCap(), + tip: msg.Tip(), value: msg.Value(), data: msg.Data(), state: evm.StateDB, @@ -206,6 +213,15 @@ func (st *StateTransition) preCheck() error { st.msg.From().Hex(), msgNonce, stNonce) } } + // Make sure that transaction feeCap is greater than the baseFee (post london) + if st.evm.ChainConfig().IsLondon(st.evm.Context.BlockNumber) { + // This will panic if baseFee is nil, but basefee presence is verified + // as part of header validation. + if st.feeCap.Cmp(st.evm.Context.BaseFee) < 0 { + return fmt.Errorf("%w: address %v, feeCap: %s baseFee: %s", ErrFeeCapTooLow, + st.msg.From().Hex(), st.feeCap, st.evm.Context.BaseFee) + } + } return st.buyGas() } @@ -281,7 +297,11 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { // After EIP-3529: refunds are capped to gasUsed / 5 st.refundGas(params.RefundQuotientEIP3529) } - st.state.AddBalance(st.evm.Context.Coinbase, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.gasPrice)) + effectiveTip := st.gasPrice + if st.evm.ChainConfig().IsLondon(st.evm.Context.BlockNumber) { + effectiveTip = cmath.BigMin(st.tip, new(big.Int).Sub(st.feeCap, st.evm.Context.BaseFee)) + } + st.state.AddBalance(st.evm.Context.Coinbase, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), effectiveTip)) return &ExecutionResult{ UsedGas: st.gasUsed(), diff --git a/core/types/access_list_tx.go b/core/types/access_list_tx.go index 65ee95adf611..48102a1d4075 100644 --- a/core/types/access_list_tx.go +++ b/core/types/access_list_tx.go @@ -94,7 +94,6 @@ func (tx *AccessListTx) copy() TxData { } // accessors for innerTx. - func (tx *AccessListTx) txType() byte { return AccessListTxType } func (tx *AccessListTx) chainID() *big.Int { return tx.ChainID } func (tx *AccessListTx) protected() bool { return true } @@ -102,6 +101,8 @@ func (tx *AccessListTx) accessList() AccessList { return tx.AccessList } func (tx *AccessListTx) data() []byte { return tx.Data } func (tx *AccessListTx) gas() uint64 { return tx.Gas } func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) tip() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) feeCap() *big.Int { return tx.GasPrice } func (tx *AccessListTx) value() *big.Int { return tx.Value } func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } func (tx *AccessListTx) to() *common.Address { return tx.To } diff --git a/core/types/block.go b/core/types/block.go index a3318f8779d6..5f3dbb957b6d 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -82,6 +82,9 @@ type Header struct { Extra []byte `json:"extraData" gencodec:"required"` MixDigest common.Hash `json:"mixHash"` Nonce BlockNonce `json:"nonce"` + + // BaseFee was added by EIP-1559 and is ignored in legacy headers. + BaseFee *big.Int `json:"baseFee" rlp:"optional"` } // field type overrides for gencodec @@ -92,6 +95,7 @@ type headerMarshaling struct { GasUsed hexutil.Uint64 Time hexutil.Uint64 Extra hexutil.Bytes + BaseFee *hexutil.Big Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON } @@ -229,6 +233,9 @@ func CopyHeader(h *Header) *Header { if cpy.Number = new(big.Int); h.Number != nil { cpy.Number.Set(h.Number) } + if h.BaseFee != nil { + cpy.BaseFee = new(big.Int).Set(h.BaseFee) + } if len(h.Extra) > 0 { cpy.Extra = make([]byte, len(h.Extra)) copy(cpy.Extra, h.Extra) @@ -289,6 +296,13 @@ func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash } func (b *Block) UncleHash() common.Hash { return b.header.UncleHash } func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) } +func (b *Block) BaseFee() *big.Int { + if b.header.BaseFee == nil { + return nil + } + return new(big.Int).Set(b.header.BaseFee) +} + func (b *Block) Header() *Header { return CopyHeader(b.header) } // Body returns the non-header content of the block. diff --git a/core/types/block_test.go b/core/types/block_test.go index 63904f882c92..9ecb1a4d8f19 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -68,6 +68,71 @@ func TestBlockEncoding(t *testing.T) { } } +func TestEIP1559BlockEncoding(t *testing.T) { + blockEnc := common.FromHex("f9030bf901fea083cafc574e1f51ba9dc0568fc617a08ea2429fb384059c972f13b19fa1c8dd55a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017a05fe50b260da6308036625b850b5d6ced6d0a9f814c0688bc91ffb7b7a3a54b67a0bc37d79753ad738a6dac4921e57392f145d8887476de3f783dfa7edae9283e52b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001832fefd8825208845506eb0780a0bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff49888a13a5a8c8f2bb1c4843b9aca00f90106f85f800a82c35094095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba09bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094fa08a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b1b8a302f8a0018080843b9aca008301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8c0") + var block Block + if err := rlp.DecodeBytes(blockEnc, &block); err != nil { + t.Fatal("decode error: ", err) + } + + check := func(f string, got, want interface{}) { + if !reflect.DeepEqual(got, want) { + t.Errorf("%s mismatch: got %v, want %v", f, got, want) + } + } + + check("Difficulty", block.Difficulty(), big.NewInt(131072)) + check("GasLimit", block.GasLimit(), uint64(3141592)) + check("GasUsed", block.GasUsed(), uint64(21000)) + check("Coinbase", block.Coinbase(), common.HexToAddress("8888f1f195afa192cfee860698584c030f4c9db1")) + check("MixDigest", block.MixDigest(), common.HexToHash("bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff498")) + check("Root", block.Root(), common.HexToHash("ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017")) + check("Hash", block.Hash(), common.HexToHash("c7252048cd273fe0dac09650027d07f0e3da4ee0675ebbb26627cea92729c372")) + check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4)) + check("Time", block.Time(), uint64(1426516743)) + check("Size", block.Size(), common.StorageSize(len(blockEnc))) + check("BaseFee", block.BaseFee(), new(big.Int).SetUint64(params.InitialBaseFee)) + + tx1 := NewTransaction(0, common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), big.NewInt(10), 50000, big.NewInt(10), nil) + tx1, _ = tx1.WithSignature(HomesteadSigner{}, common.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100")) + + addr := common.HexToAddress("0x0000000000000000000000000000000000000001") + accesses := AccessList{AccessTuple{ + Address: addr, + StorageKeys: []common.Hash{ + {0}, + }, + }} + to := common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87") + txdata := &DynamicFeeTx{ + ChainID: big.NewInt(1), + Nonce: 0, + To: &to, + Gas: 123457, + FeeCap: new(big.Int).Set(block.BaseFee()), + Tip: big.NewInt(0), + AccessList: accesses, + Data: []byte{}, + } + tx2 := NewTx(txdata) + tx2, err := tx2.WithSignature(LatestSignerForChainID(big.NewInt(1)), common.Hex2Bytes("fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a800")) + if err != nil { + t.Fatal("invalid signature error: ", err) + } + + check("len(Transactions)", len(block.Transactions()), 2) + check("Transactions[0].Hash", block.Transactions()[0].Hash(), tx1.Hash()) + check("Transactions[1].Hash", block.Transactions()[1].Hash(), tx2.Hash()) + check("Transactions[1].Type", block.Transactions()[1].Type(), tx2.Type()) + ourBlockEnc, err := rlp.EncodeToBytes(&block) + if err != nil { + t.Fatal("encode error: ", err) + } + if !bytes.Equal(ourBlockEnc, blockEnc) { + t.Errorf("encoded block mismatch:\ngot: %x\nwant: %x", ourBlockEnc, blockEnc) + } +} + func TestEIP2718BlockEncoding(t *testing.T) { blockEnc := common.FromHex("f90319f90211a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017a0e6e49996c7ec59f7a23d22b83239a60151512c65613bf84a0d7da336399ebc4aa0cafe75574d59780665a97fbfd11365c7545aa8f1abf4e5e12e8243334ef7286bb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000820200832fefd882a410845506eb0796636f6f6c65737420626c6f636b206f6e20636861696ea0bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff49888a13a5a8c8f2bb1c4f90101f85f800a82c35094095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba09bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094fa08a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b1b89e01f89b01800a8301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000001a03dbacc8d0259f2508625e97fdfc57cd85fdd16e5821bc2c10bdd1a52649e8335a0476e10695b183a87b0aa292a7f4b78ef0c3fbe62aa2c42c84e1d9c3da159ef14c0") var block Block diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go new file mode 100644 index 000000000000..777babe02712 --- /dev/null +++ b/core/types/dynamic_fee_tx.go @@ -0,0 +1,104 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +type DynamicFeeTx struct { + ChainID *big.Int + Nonce uint64 + Tip *big.Int + FeeCap *big.Int + Gas uint64 + To *common.Address `rlp:"nil"` // nil means contract creation + Value *big.Int + Data []byte + AccessList AccessList + + // Signature values + V *big.Int `json:"v" gencodec:"required"` + R *big.Int `json:"r" gencodec:"required"` + S *big.Int `json:"s" gencodec:"required"` +} + +// copy creates a deep copy of the transaction data and initializes all fields. +func (tx *DynamicFeeTx) copy() TxData { + cpy := &DynamicFeeTx{ + Nonce: tx.Nonce, + To: tx.To, // TODO: copy pointed-to address + Data: common.CopyBytes(tx.Data), + Gas: tx.Gas, + // These are copied below. + AccessList: make(AccessList, len(tx.AccessList)), + Value: new(big.Int), + ChainID: new(big.Int), + Tip: new(big.Int), + FeeCap: new(big.Int), + V: new(big.Int), + R: new(big.Int), + S: new(big.Int), + } + copy(cpy.AccessList, tx.AccessList) + if tx.Value != nil { + cpy.Value.Set(tx.Value) + } + if tx.ChainID != nil { + cpy.ChainID.Set(tx.ChainID) + } + if tx.Tip != nil { + cpy.Tip.Set(tx.Tip) + } + if tx.FeeCap != nil { + cpy.FeeCap.Set(tx.FeeCap) + } + if tx.V != nil { + cpy.V.Set(tx.V) + } + if tx.R != nil { + cpy.R.Set(tx.R) + } + if tx.S != nil { + cpy.S.Set(tx.S) + } + return cpy +} + +// accessors for innerTx. +func (tx *DynamicFeeTx) txType() byte { return DynamicFeeTxType } +func (tx *DynamicFeeTx) chainID() *big.Int { return tx.ChainID } +func (tx *DynamicFeeTx) protected() bool { return true } +func (tx *DynamicFeeTx) accessList() AccessList { return tx.AccessList } +func (tx *DynamicFeeTx) data() []byte { return tx.Data } +func (tx *DynamicFeeTx) gas() uint64 { return tx.Gas } +func (tx *DynamicFeeTx) feeCap() *big.Int { return tx.FeeCap } +func (tx *DynamicFeeTx) tip() *big.Int { return tx.Tip } +func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.FeeCap } +func (tx *DynamicFeeTx) value() *big.Int { return tx.Value } +func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce } +func (tx *DynamicFeeTx) to() *common.Address { return tx.To } + +func (tx *DynamicFeeTx) rawSignatureValues() (v, r, s *big.Int) { + return tx.V, tx.R, tx.S +} + +func (tx *DynamicFeeTx) setSignatureValues(chainID, v, r, s *big.Int) { + tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s +} diff --git a/core/types/legacy_tx.go b/core/types/legacy_tx.go index 41ad44f37985..f5e0f70783ae 100644 --- a/core/types/legacy_tx.go +++ b/core/types/legacy_tx.go @@ -91,13 +91,14 @@ func (tx *LegacyTx) copy() TxData { } // accessors for innerTx. - func (tx *LegacyTx) txType() byte { return LegacyTxType } func (tx *LegacyTx) chainID() *big.Int { return deriveChainId(tx.V) } func (tx *LegacyTx) accessList() AccessList { return nil } func (tx *LegacyTx) data() []byte { return tx.Data } func (tx *LegacyTx) gas() uint64 { return tx.Gas } func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) tip() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) feeCap() *big.Int { return tx.GasPrice } func (tx *LegacyTx) value() *big.Int { return tx.Value } func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } func (tx *LegacyTx) to() *common.Address { return tx.To } diff --git a/core/types/receipt.go b/core/types/receipt.go index 6b519a79d221..b949bd2bd5f3 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -120,10 +120,6 @@ func (r *Receipt) EncodeRLP(w io.Writer) error { if r.Type == LegacyTxType { return rlp.Encode(w, data) } - // It's an EIP-2718 typed TX receipt. - if r.Type != AccessListTxType { - return ErrTxTypeNotSupported - } buf := encodeBufferPool.Get().(*bytes.Buffer) defer encodeBufferPool.Put(buf) buf.Reset() @@ -159,7 +155,7 @@ func (r *Receipt) DecodeRLP(s *rlp.Stream) error { return errEmptyTypedReceipt } r.Type = b[0] - if r.Type == AccessListTxType { + if r.Type == AccessListTxType || r.Type == DynamicFeeTxType { var dec receiptRLP if err := rlp.DecodeBytes(b[1:], &dec); err != nil { return err @@ -263,6 +259,9 @@ func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) { case AccessListTxType: w.WriteByte(AccessListTxType) rlp.Encode(w, data) + case DynamicFeeTxType: + w.WriteByte(DynamicFeeTxType) + rlp.Encode(w, data) default: // For unsupported types, write nothing. Since this is for // DeriveSha, the error will be caught matching the derived hash diff --git a/core/types/transaction.go b/core/types/transaction.go index a35e07a5a316..ace1843e932b 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -26,6 +26,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" ) @@ -42,6 +43,7 @@ var ( const ( LegacyTxType = iota AccessListTxType + DynamicFeeTxType ) // Transaction is an Ethereum transaction. @@ -74,6 +76,8 @@ type TxData interface { data() []byte gas() uint64 gasPrice() *big.Int + tip() *big.Int + feeCap() *big.Int value() *big.Int nonce() uint64 to() *common.Address @@ -177,6 +181,10 @@ func (tx *Transaction) decodeTyped(b []byte) (TxData, error) { var inner AccessListTx err := rlp.DecodeBytes(b[1:], &inner) return &inner, err + case DynamicFeeTxType: + var inner DynamicFeeTx + err := rlp.DecodeBytes(b[1:], &inner) + return &inner, err default: return nil, ErrTxTypeNotSupported } @@ -260,6 +268,12 @@ func (tx *Transaction) Gas() uint64 { return tx.inner.gas() } // GasPrice returns the gas price of the transaction. func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.gasPrice()) } +// Tip returns the tip per gas of the transaction. +func (tx *Transaction) Tip() *big.Int { return new(big.Int).Set(tx.inner.tip()) } + +// FeeCap returns the fee cap per gas of the transaction. +func (tx *Transaction) FeeCap() *big.Int { return new(big.Int).Set(tx.inner.feeCap()) } + // Value returns the ether amount of the transaction. func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) } @@ -486,12 +500,14 @@ type Message struct { amount *big.Int gasLimit uint64 gasPrice *big.Int + feeCap *big.Int + tip *big.Int data []byte accessList AccessList checkNonce bool } -func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte, accessList AccessList, checkNonce bool) Message { +func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice, feeCap, tip *big.Int, data []byte, accessList AccessList, checkNonce bool) Message { return Message{ from: from, to: to, @@ -499,6 +515,8 @@ func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *b amount: amount, gasLimit: gasLimit, gasPrice: gasPrice, + feeCap: feeCap, + tip: tip, data: data, accessList: accessList, checkNonce: checkNonce, @@ -506,11 +524,13 @@ func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *b } // AsMessage returns the transaction as a core.Message. -func (tx *Transaction) AsMessage(s Signer) (Message, error) { +func (tx *Transaction) AsMessage(s Signer, baseFee *big.Int) (Message, error) { msg := Message{ nonce: tx.Nonce(), gasLimit: tx.Gas(), gasPrice: new(big.Int).Set(tx.GasPrice()), + feeCap: new(big.Int).Set(tx.FeeCap()), + tip: new(big.Int).Set(tx.Tip()), to: tx.To(), amount: tx.Value(), data: tx.Data(), @@ -518,6 +538,11 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) { checkNonce: true, } + // If baseFee provided, set gasPrice to effectiveGasPrice. + if baseFee != nil { + msg.gasPrice = math.BigMin(msg.gasPrice.Add(msg.tip, baseFee), msg.feeCap) + } + var err error msg.from, err = Sender(s, tx) return msg, err @@ -526,6 +551,8 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) { func (m Message) From() common.Address { return m.from } func (m Message) To() *common.Address { return m.to } func (m Message) GasPrice() *big.Int { return m.gasPrice } +func (m Message) FeeCap() *big.Int { return m.feeCap } +func (m Message) Tip() *big.Int { return m.tip } func (m Message) Value() *big.Int { return m.amount } func (m Message) Gas() uint64 { return m.gasLimit } func (m Message) Nonce() uint64 { return m.nonce } diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go index e56148555654..ecdbd70afa1b 100644 --- a/core/types/transaction_marshalling.go +++ b/core/types/transaction_marshalling.go @@ -30,15 +30,19 @@ type txJSON struct { Type hexutil.Uint64 `json:"type"` // Common transaction fields: - Nonce *hexutil.Uint64 `json:"nonce"` - GasPrice *hexutil.Big `json:"gasPrice"` - Gas *hexutil.Uint64 `json:"gas"` - Value *hexutil.Big `json:"value"` - Data *hexutil.Bytes `json:"input"` - V *hexutil.Big `json:"v"` - R *hexutil.Big `json:"r"` - S *hexutil.Big `json:"s"` - To *common.Address `json:"to"` + Nonce *hexutil.Uint64 `json:"nonce"` + GasPrice *hexutil.Big `json:"gasPrice"` + FeeCap *hexutil.Big `json:"feeCap"` + Tip *hexutil.Big `json:"tip"` + MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"` + MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"` + Gas *hexutil.Uint64 `json:"gas"` + Value *hexutil.Big `json:"value"` + Data *hexutil.Bytes `json:"input"` + V *hexutil.Big `json:"v"` + R *hexutil.Big `json:"r"` + S *hexutil.Big `json:"s"` + To *common.Address `json:"to"` // Access list transaction fields: ChainID *hexutil.Big `json:"chainId,omitempty"` @@ -79,6 +83,19 @@ func (t *Transaction) MarshalJSON() ([]byte, error) { enc.V = (*hexutil.Big)(tx.V) enc.R = (*hexutil.Big)(tx.R) enc.S = (*hexutil.Big)(tx.S) + case *DynamicFeeTx: + enc.ChainID = (*hexutil.Big)(tx.ChainID) + enc.AccessList = &tx.AccessList + enc.Nonce = (*hexutil.Uint64)(&tx.Nonce) + enc.Gas = (*hexutil.Uint64)(&tx.Gas) + enc.FeeCap = (*hexutil.Big)(tx.FeeCap) + enc.Tip = (*hexutil.Big)(tx.Tip) + enc.Value = (*hexutil.Big)(tx.Value) + enc.Data = (*hexutil.Bytes)(&tx.Data) + enc.To = t.To() + enc.V = (*hexutil.Big)(tx.V) + enc.R = (*hexutil.Big)(tx.R) + enc.S = (*hexutil.Big)(tx.S) } return json.Marshal(&enc) } @@ -191,6 +208,75 @@ func (t *Transaction) UnmarshalJSON(input []byte) error { } } + case DynamicFeeTxType: + var itx DynamicFeeTx + inner = &itx + // Access list is optional for now. + if dec.AccessList != nil { + itx.AccessList = *dec.AccessList + } + if dec.ChainID == nil { + return errors.New("missing required field 'chainId' in transaction") + } + itx.ChainID = (*big.Int)(dec.ChainID) + if dec.To != nil { + itx.To = dec.To + } + if dec.Nonce == nil { + return errors.New("missing required field 'nonce' in transaction") + } + itx.Nonce = uint64(*dec.Nonce) + switch { + case dec.Tip == nil && dec.MaxPriorityFeePerGas == nil: + return errors.New("at least one of 'tip' or 'maxPriorityFeePerGas' must be defined") + case dec.Tip != nil && dec.MaxPriorityFeePerGas != nil: + return errors.New("only one of 'tip' or 'maxPriorityFeePerGas' may be defined") + case dec.Tip != nil && dec.MaxPriorityFeePerGas == nil: + itx.Tip = (*big.Int)(dec.Tip) + case dec.Tip == nil && dec.MaxPriorityFeePerGas != nil: + itx.Tip = (*big.Int)(dec.MaxPriorityFeePerGas) + } + switch { + case dec.FeeCap == nil && dec.MaxFeePerGas == nil: + return errors.New("at least one of 'feeCap' or 'maxFeePerGas' must be defined") + case dec.FeeCap != nil && dec.MaxFeePerGas != nil: + return errors.New("only one of 'feeCap' or 'maxFeePerGas' may be defined") + case dec.FeeCap != nil && dec.MaxFeePerGas == nil: + itx.FeeCap = (*big.Int)(dec.FeeCap) + case dec.FeeCap == nil && dec.MaxFeePerGas != nil: + itx.FeeCap = (*big.Int)(dec.MaxFeePerGas) + } + if dec.Gas == nil { + return errors.New("missing required field 'gas' for txdata") + } + itx.Gas = uint64(*dec.Gas) + if dec.Value == nil { + return errors.New("missing required field 'value' in transaction") + } + itx.Value = (*big.Int)(dec.Value) + if dec.Data == nil { + return errors.New("missing required field 'input' in transaction") + } + itx.Data = *dec.Data + if dec.V == nil { + return errors.New("missing required field 'v' in transaction") + } + itx.V = (*big.Int)(dec.V) + if dec.R == nil { + return errors.New("missing required field 'r' in transaction") + } + itx.R = (*big.Int)(dec.R) + if dec.S == nil { + return errors.New("missing required field 's' in transaction") + } + itx.S = (*big.Int)(dec.S) + withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 + if withSignature { + if err := sanityCheckSignature(itx.V, itx.R, itx.S, false); err != nil { + return err + } + } + default: return ErrTxTypeNotSupported } diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index 5d94b26b3647..e553e6af9665 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -40,6 +40,8 @@ type sigCache struct { func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer { var signer Signer switch { + case config.IsLondon(blockNumber): + signer = NewLondonSigner(config.ChainID) case config.IsBerlin(blockNumber): signer = NewEIP2930Signer(config.ChainID) case config.IsEIP155(blockNumber): @@ -61,6 +63,9 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer { // have the current block number available, use MakeSigner instead. func LatestSigner(config *params.ChainConfig) Signer { if config.ChainID != nil { + if config.LondonBlock != nil { + return NewLondonSigner(config.ChainID) + } if config.BerlinBlock != nil { return NewEIP2930Signer(config.ChainID) } @@ -82,7 +87,7 @@ func LatestSignerForChainID(chainID *big.Int) Signer { if chainID == nil { return HomesteadSigner{} } - return NewEIP2930Signer(chainID) + return NewLondonSigner(chainID) } // SignTx signs the transaction using the given signer and private key. @@ -165,6 +170,72 @@ type Signer interface { Equal(Signer) bool } +type londonSigner struct{ eip2930Signer } + +// NewLondonSigner returns a signer that accepts +// - EIP-1559 dynamic fee transactions +// - EIP-2930 access list transactions, +// - EIP-155 replay protected transactions, and +// - legacy Homestead transactions. +func NewLondonSigner(chainId *big.Int) Signer { + return londonSigner{eip2930Signer{NewEIP155Signer(chainId)}} +} + +func (s londonSigner) Sender(tx *Transaction) (common.Address, error) { + if tx.Type() != DynamicFeeTxType { + return s.eip2930Signer.Sender(tx) + } + V, R, S := tx.RawSignatureValues() + // DynamicFee txs are defined to use 0 and 1 as their recovery + // id, add 27 to become equivalent to unprotected Homestead signatures. + V = new(big.Int).Add(V, big.NewInt(27)) + if tx.ChainId().Cmp(s.chainId) != 0 { + return common.Address{}, ErrInvalidChainId + } + return recoverPlain(s.Hash(tx), R, S, V, true) +} + +func (s londonSigner) Equal(s2 Signer) bool { + x, ok := s2.(londonSigner) + return ok && x.chainId.Cmp(s.chainId) == 0 +} + +func (s londonSigner) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) { + txdata, ok := tx.inner.(*DynamicFeeTx) + if !ok { + return s.eip2930Signer.SignatureValues(tx, sig) + } + // Check that chain ID of tx matches the signer. We also accept ID zero here, + // because it indicates that the chain ID was not specified in the tx. + if txdata.ChainID.Sign() != 0 && txdata.ChainID.Cmp(s.chainId) != 0 { + return nil, nil, nil, ErrInvalidChainId + } + R, S, _ = decodeSignature(sig) + V = big.NewInt(int64(sig[64])) + return R, S, V, nil +} + +// Hash returns the hash to be signed by the sender. +// It does not uniquely identify the transaction. +func (s londonSigner) Hash(tx *Transaction) common.Hash { + if tx.Type() != DynamicFeeTxType { + return s.eip2930Signer.Hash(tx) + } + return prefixedRlpHash( + tx.Type(), + []interface{}{ + s.chainId, + tx.Nonce(), + tx.Tip(), + tx.FeeCap(), + tx.Gas(), + tx.To(), + tx.Value(), + tx.Data(), + tx.AccessList(), + }) +} + type eip2930Signer struct{ EIP155Signer } // NewEIP2930Signer returns a signer that accepts EIP-2930 access list transactions, @@ -192,8 +263,8 @@ func (s eip2930Signer) Sender(tx *Transaction) (common.Address, error) { V = new(big.Int).Sub(V, s.chainIdMul) V.Sub(V, big8) case AccessListTxType: - // ACL txs are defined to use 0 and 1 as their recovery id, add - // 27 to become equivalent to unprotected Homestead signatures. + // AL txs are defined to use 0 and 1 as their recovery + // id, add 27 to become equivalent to unprotected Homestead signatures. V = new(big.Int).Add(V, big.NewInt(27)) default: return common.Address{}, ErrTxTypeNotSupported diff --git a/core/vm/eips.go b/core/vm/eips.go index 025502760b4a..4070a2db5342 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -26,6 +26,7 @@ import ( var activators = map[int]func(*JumpTable){ 3529: enable3529, + 3198: enable3198, 2929: enable2929, 2200: enable2200, 1884: enable1884, @@ -154,3 +155,22 @@ func enable3529(jt *JumpTable) { jt[SSTORE].dynamicGas = gasSStoreEIP3529 jt[SELFDESTRUCT].dynamicGas = gasSelfdestructEIP3529 } + +// enable3198 applies EIP-3198 (BASEFEE Opcode) +// - Adds an opcode that returns the current block's base fee. +func enable3198(jt *JumpTable) { + // New opcode + jt[BASEFEE] = &operation{ + execute: opBaseFee, + constantGas: GasQuickStep, + minStack: minStack(0, 1), + maxStack: maxStack(0, 1), + } +} + +// opBaseFee implements BASEFEE opcode +func opBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + baseFee, _ := uint256.FromBig(interpreter.evm.Context.BaseFee) + scope.Stack.push(baseFee) + return nil, nil +} diff --git a/core/vm/evm.go b/core/vm/evm.go index 8e3c9fe00f8d..980dc6d20154 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -93,6 +93,7 @@ type BlockContext struct { BlockNumber *big.Int // Provides information for NUMBER Time *big.Int // Provides information for TIME Difficulty *big.Int // Provides information for DIFFICULTY + BaseFee *big.Int // Provides information for BASEFEE } // TxContext provides the EVM with information about a transaction. diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index a0609a0d7894..329ad77cbf83 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -68,6 +68,7 @@ type JumpTable [256]*operation func newLondonInstructionSet() JumpTable { instructionSet := newBerlinInstructionSet() enable3529(&instructionSet) // EIP-3529: Reduction in refunds https://eips.ethereum.org/EIPS/eip-3529 + enable3198(&instructionSet) // Base fee opcode https://eips.ethereum.org/EIPS/eip-3198 return instructionSet } diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index b0adf37d0c26..286307ae91ae 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -103,6 +103,7 @@ const ( GASLIMIT CHAINID OpCode = 0x46 SELFBALANCE OpCode = 0x47 + BASEFEE OpCode = 0x48 ) // 0x50 range - 'storage' and execution. @@ -280,6 +281,7 @@ var opCodeToString = map[OpCode]string{ GASLIMIT: "GASLIMIT", CHAINID: "CHAINID", SELFBALANCE: "SELFBALANCE", + BASEFEE: "BASEFEE", // 0x50 range - 'storage' and execution. POP: "POP", @@ -432,6 +434,7 @@ var stringToOp = map[string]OpCode{ "CALLDATASIZE": CALLDATASIZE, "CALLDATACOPY": CALLDATACOPY, "CHAINID": CHAINID, + "BASEFEE": BASEFEE, "DELEGATECALL": DELEGATECALL, "STATICCALL": STATICCALL, "CODESIZE": CODESIZE, diff --git a/eth/state_accessor.go b/eth/state_accessor.go index 84cfaf4d738d..8d53739721fb 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -162,7 +162,7 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec signer := types.MakeSigner(eth.blockchain.Config(), block.Number()) for idx, tx := range block.Transactions() { // Assemble the transaction call message and return if the requested offset - msg, _ := tx.AsMessage(signer) + msg, _ := tx.AsMessage(signer, block.BaseFee()) txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil) if idx == txIndex { diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 1c727f1366c2..172054e9b8ee 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -271,7 +271,7 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config blockCtx := core.NewEVMBlockContext(task.block.Header(), api.chainContext(localctx), nil) // Trace all the transactions contained within for i, tx := range task.block.Transactions() { - msg, _ := tx.AsMessage(signer) + msg, _ := tx.AsMessage(signer, task.block.BaseFee()) txctx := &txTraceContext{ index: i, hash: tx.Hash(), @@ -523,7 +523,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac defer pend.Done() // Fetch and execute the next transaction trace tasks for task := range jobs { - msg, _ := txs[task.index].AsMessage(signer) + msg, _ := txs[task.index].AsMessage(signer, block.BaseFee()) txctx := &txTraceContext{ index: task.index, hash: txs[task.index].Hash(), @@ -545,7 +545,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac jobs <- &txTraceTask{statedb: statedb.Copy(), index: i} // Generate the next state snapshot fast without tracing - msg, _ := tx.AsMessage(signer) + msg, _ := tx.AsMessage(signer, block.BaseFee()) statedb.Prepare(tx.Hash(), block.Hash(), i) vmenv := vm.NewEVM(blockCtx, core.NewEVMTxContext(msg), statedb, api.backend.ChainConfig(), vm.Config{}) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil { @@ -630,7 +630,7 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block for i, tx := range block.Transactions() { // Prepare the trasaction for un-traced execution var ( - msg, _ = tx.AsMessage(signer) + msg, _ = tx.AsMessage(signer, block.BaseFee()) txContext = core.NewEVMTxContext(msg) vmConf vm.Config dump *os.File diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 4c0240cd2c5b..24bce320cf00 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -161,7 +161,7 @@ func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block // Recompute transactions up to the target index. signer := types.MakeSigner(b.chainConfig, block.Number()) for idx, tx := range block.Transactions() { - msg, _ := tx.AsMessage(signer) + msg, _ := tx.AsMessage(signer, block.BaseFee()) txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), b.chain, nil) if idx == txIndex { diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index 9dc4c696311c..8b01edd7b4cc 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -179,7 +179,7 @@ func TestPrestateTracerCreate2(t *testing.T) { } evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer}) - msg, err := tx.AsMessage(signer) + msg, err := tx.AsMessage(signer, nil) if err != nil { t.Fatalf("failed to prepare transaction for tracing: %v", err) } @@ -254,7 +254,7 @@ func TestCallTracer(t *testing.T) { } evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) - msg, err := tx.AsMessage(signer) + msg, err := tx.AsMessage(signer, nil) if err != nil { t.Fatalf("failed to prepare transaction for tracing: %v", err) } diff --git a/interfaces.go b/interfaces.go index afcdc17e5824..857d309be9c6 100644 --- a/interfaces.go +++ b/interfaces.go @@ -120,6 +120,9 @@ type CallMsg struct { Value *big.Int // amount of wei sent along with the call Data []byte // input data, usually an ABI-encoded contract method invocation + FeeCap *big.Int // EIP-1559 fee cap per gas. + Tip *big.Int // EIP-1559 tip per gas. + AccessList types.AccessList // EIP-2930 access list. } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index fe3f80c038c0..7bc0477bd273 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -799,7 +799,7 @@ func (args *CallArgs) ToMessage(globalGasCap uint64) types.Message { accessList = *args.AccessList } - msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, data, accessList, false) + msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, nil, nil, data, accessList, false) return msg } @@ -1271,7 +1271,7 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) result.TransactionIndex = (*hexutil.Uint64)(&index) } - if tx.Type() == types.AccessListTxType { + if tx.Type() != types.LegacyTxType { al := tx.AccessList() result.Accesses = &al result.ChainID = (*hexutil.Big)(tx.ChainId()) @@ -1393,7 +1393,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH } // Copy the original db so we don't modify it statedb := db.Copy() - msg := types.NewMessage(args.From, args.To, uint64(*args.Nonce), args.Value.ToInt(), uint64(*args.Gas), args.GasPrice.ToInt(), input, accessList, false) + msg := types.NewMessage(args.From, args.To, uint64(*args.Nonce), args.Value.ToInt(), uint64(*args.Gas), args.GasPrice.ToInt(), nil, nil, input, accessList, false) // Apply the transaction with the access list tracer tracer := vm.NewAccessListTracer(accessList, args.From, to, precompiles) diff --git a/les/odr_test.go b/les/odr_test.go index 0c75014d4975..5fb881b5c48f 100644 --- a/les/odr_test.go +++ b/les/odr_test.go @@ -135,7 +135,7 @@ func odrContractCall(ctx context.Context, db ethdb.Database, config *params.Chai from := statedb.GetOrNewStateObject(bankAddr) from.SetBalance(math.MaxBig256) - msg := callmsg{types.NewMessage(from.Address(), &testContractAddr, 0, new(big.Int), 100000, new(big.Int), data, nil, false)} + msg := callmsg{types.NewMessage(from.Address(), &testContractAddr, 0, new(big.Int), 100000, new(big.Int), nil, nil, data, nil, false)} context := core.NewEVMBlockContext(header, bc, nil) txContext := core.NewEVMTxContext(msg) @@ -150,7 +150,7 @@ func odrContractCall(ctx context.Context, db ethdb.Database, config *params.Chai header := lc.GetHeaderByHash(bhash) state := light.NewState(ctx, header, lc.Odr()) state.SetBalance(bankAddr, math.MaxBig256) - msg := callmsg{types.NewMessage(bankAddr, &testContractAddr, 0, new(big.Int), 100000, new(big.Int), data, nil, false)} + msg := callmsg{types.NewMessage(bankAddr, &testContractAddr, 0, new(big.Int), 100000, new(big.Int), nil, nil, data, nil, false)} context := core.NewEVMBlockContext(header, lc, nil) txContext := core.NewEVMTxContext(msg) vmenv := vm.NewEVM(context, txContext, state, config, vm.Config{}) diff --git a/les/state_accessor.go b/les/state_accessor.go index af5df36508ec..e276b06dc748 100644 --- a/les/state_accessor.go +++ b/les/state_accessor.go @@ -55,7 +55,7 @@ func (leth *LightEthereum) stateAtTransaction(ctx context.Context, block *types. signer := types.MakeSigner(leth.blockchain.Config(), block.Number()) for idx, tx := range block.Transactions() { // Assemble the transaction call message and return if the requested offset - msg, _ := tx.AsMessage(signer) + msg, _ := tx.AsMessage(signer, block.BaseFee()) txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), leth.blockchain, nil) statedb.Prepare(tx.Hash(), block.Hash(), idx) diff --git a/light/odr_test.go b/light/odr_test.go index 0fc45b873418..bb47c69eb14a 100644 --- a/light/odr_test.go +++ b/light/odr_test.go @@ -194,7 +194,7 @@ func odrContractCall(ctx context.Context, db ethdb.Database, bc *core.BlockChain // Perform read-only call. st.SetBalance(testBankAddress, math.MaxBig256) - msg := callmsg{types.NewMessage(testBankAddress, &testContractAddr, 0, new(big.Int), 1000000, new(big.Int), data, nil, false)} + msg := callmsg{types.NewMessage(testBankAddress, &testContractAddr, 0, new(big.Int), 1000000, new(big.Int), nil, nil, data, nil, false)} txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(header, chain, nil) vmenv := vm.NewEVM(context, txContext, st, config, vm.Config{}) diff --git a/params/bootnodes.go b/params/bootnodes.go index 20bf0b7cbf27..99068750fce0 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -69,9 +69,8 @@ var GoerliBootnodes = []string{ // BaikalBootnodes are the enode URLs of the P2P bootstrap nodes running on the // Baikal ephemeral test network. -// TODO: Set Baikal bootnodes var BaikalBootnodes = []string{ - "", + "enode://9e1096aa59862a6f164994cb5cb16f5124d6c992cdbf4535ff7dea43ea1512afe5448dca9df1b7ab0726129603f1a3336b631e4d7a1a44c94daddd03241587f9@3.9.20.133:30303", } var V5Bootnodes = []string{ diff --git a/params/config.go b/params/config.go index 2cafa0e44922..c777172381cc 100644 --- a/params/config.go +++ b/params/config.go @@ -231,7 +231,7 @@ var ( IstanbulBlock: big.NewInt(0), MuirGlacierBlock: nil, BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), + LondonBlock: big.NewInt(500), Clique: &CliqueConfig{ Period: 30, Epoch: 30000, diff --git a/params/protocol_params.go b/params/protocol_params.go index 22b4c0651c8f..a49c4489f19f 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -118,6 +118,10 @@ const ( // Introduced in Tangerine Whistle (Eip 150) CreateBySelfdestructGas uint64 = 25000 + BaseFeeChangeDenominator = 8 // Bounds the amount the base fee can change between blocks. + ElasticityMultiplier = 2 // Bounds the maximum gas limit an EIP-1559 block may have. + InitialBaseFee = 1000000000 // Initial base fee for EIP-1559 blocks. + MaxCodeSize = 24576 // Maximum bytecode to permit for a contract // Precompiled contract gas prices diff --git a/tests/init.go b/tests/init.go index 240b7159d5c9..b0a38e68b074 100644 --- a/tests/init.go +++ b/tests/init.go @@ -179,6 +179,19 @@ var Forks = map[string]*params.ChainConfig{ BerlinBlock: big.NewInt(0), LondonBlock: big.NewInt(0), }, + "Aleut": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + }, } // Returns the set of defined fork names diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 46834de6daeb..9778f058fe46 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -297,7 +297,7 @@ func (tx *stTransaction) toMessage(ps stPostState) (core.Message, error) { if tx.AccessLists != nil && tx.AccessLists[ps.Indexes.Data] != nil { accessList = *tx.AccessLists[ps.Indexes.Data] } - msg := types.NewMessage(from, to, tx.Nonce, value, gasLimit, tx.GasPrice, data, accessList, true) + msg := types.NewMessage(from, to, tx.Nonce, value, gasLimit, tx.GasPrice, nil, nil, data, accessList, true) return msg, nil } From 67e7f61af7fffbc47aadf48777e2dd5da39796ff Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Tue, 18 May 2021 01:10:28 +0200 Subject: [PATCH 031/208] core: fix failing tests (#22888) This PR fixes two errors that regressed when EIP-1559 was merged. --- core/blockchain_test.go | 4 ++-- core/state_processor_test.go | 19 +++++++++++++++++-- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 8ace9f6a6d42..1869b0f18383 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -1357,8 +1357,8 @@ func TestEIP155Transition(t *testing.T) { } }) _, err := blockchain.InsertChain(blocks) - if err != types.ErrInvalidChainId { - t.Error("expected error:", types.ErrInvalidChainId) + if have, want := err, types.ErrInvalidChainId; !errors.Is(have, want) { + t.Errorf("have %v, want %v", have, want) } } diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 28baf6e7d270..c15d3d276dbe 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -39,7 +39,22 @@ import ( // contain invalid transactions func TestStateProcessorErrors(t *testing.T) { var ( - signer = types.LatestSigner(params.TestChainConfig) + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + } + signer = types.LatestSigner(config) testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") ) var makeTx = func(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction { @@ -61,7 +76,7 @@ func TestStateProcessorErrors(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() gspec = &Genesis{ - Config: params.TestChainConfig, + Config: config, Alloc: GenesisAlloc{ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ Balance: big.NewInt(1000000000000000000), // 1 ether From bb9f9ccf4fdc1256a31abe634ab882baa6f1c888 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Tue, 18 May 2021 01:30:01 +0200 Subject: [PATCH 032/208] core/rawdb: wait for background freezing to exit when closing freezer (#22878) --- core/rawdb/database.go | 6 +++++- core/rawdb/freezer.go | 3 +++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 3a0a26c61d90..698f1ced8436 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -197,7 +197,11 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st } // Freezer is consistent with the key-value database, permit combining the two if !frdb.readonly { - go frdb.freeze(db) + frdb.wg.Add(1) + go func() { + frdb.freeze(db) + frdb.wg.Done() + }() } return &freezerdb{ KeyValueStore: db, diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 94b99a64eb4c..ff8919b59e92 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -84,6 +84,7 @@ type freezer struct { trigger chan chan struct{} // Manual blocking freeze trigger, test determinism quit chan struct{} + wg sync.WaitGroup closeOnce sync.Once } @@ -145,6 +146,8 @@ func (f *freezer) Close() error { var errs []error f.closeOnce.Do(func() { close(f.quit) + // Wait for any background freezing to stop + f.wg.Wait() for _, table := range f.tables { if err := table.Close(); err != nil { errs = append(errs, err) From b7a91663ab93abe360ae1b4bc68b75b5944625c4 Mon Sep 17 00:00:00 2001 From: Evolution404 <35091674+Evolution404@users.noreply.github.com> Date: Tue, 18 May 2021 16:22:58 +0800 Subject: [PATCH 033/208] core/asm: fix the bug of "00" prefix number (#22883) --- core/asm/lex_test.go | 4 ++++ core/asm/lexer.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/core/asm/lex_test.go b/core/asm/lex_test.go index 6b8bd3d740be..53e05fbbba0c 100644 --- a/core/asm/lex_test.go +++ b/core/asm/lex_test.go @@ -60,6 +60,10 @@ func TestLexer(t *testing.T) { input: "0123abc", tokens: []token{{typ: lineStart}, {typ: number, text: "0123"}, {typ: element, text: "abc"}, {typ: eof}}, }, + { + input: "00123abc", + tokens: []token{{typ: lineStart}, {typ: number, text: "00123"}, {typ: element, text: "abc"}, {typ: eof}}, + }, { input: "@foo", tokens: []token{{typ: lineStart}, {typ: label, text: "foo"}, {typ: eof}}, diff --git a/core/asm/lexer.go b/core/asm/lexer.go index 9eb8f914ac57..21cc8c465837 100644 --- a/core/asm/lexer.go +++ b/core/asm/lexer.go @@ -254,7 +254,7 @@ func lexInsideString(l *lexer) stateFn { func lexNumber(l *lexer) stateFn { acceptance := Numbers - if l.accept("0") || l.accept("xX") { + if l.accept("xX") { acceptance = HexadecimalNumbers } l.acceptRun(acceptance) From 32c1ed8a9c963dbf75faa80116b37f8cf53d4a38 Mon Sep 17 00:00:00 2001 From: Shane Bammel Date: Tue, 18 May 2021 03:37:18 -0500 Subject: [PATCH 034/208] core/forkid: fix off-by-one bug (#22879) * forkid: added failing test * forkid: fixed off-by-one bug --- core/forkid/forkid.go | 2 +- core/forkid/forkid_test.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go index 1bf3406828f0..f56ce85feeed 100644 --- a/core/forkid/forkid.go +++ b/core/forkid/forkid.go @@ -155,7 +155,7 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui for i, fork := range forks { // If our head is beyond this fork, continue to the next (we have a dummy // fork of maxuint64 as the last item to always fail this check eventually). - if head > fork { + if head >= fork { continue } // Found the first unpassed fork block, check if our current state matches diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index a20598fa9daa..2a7938bd297c 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -163,6 +163,10 @@ func TestValidation(t *testing.T) { // neither forks passed at neither nodes, they may mismatch, but we still connect for now. {7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: math.MaxUint64}, nil}, + // Local is mainnet exactly on Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote + // is simply out of sync, accept. + {7280000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil}, + // Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote // is simply out of sync, accept. {7987396, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil}, From 3e6f46caec51d82aef363632517eb5842eef6db6 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 18 May 2021 11:48:41 +0200 Subject: [PATCH 035/208] p2p/discover/v4wire: use optional RLP field for EIP-868 seq (#22842) This changes the definitions of Ping and Pong, adding an optional field for the sequence number. This field was previously encoded/decoded using the "tail" struct tag, but using "optional" is much nicer. --- p2p/discover/v4_udp.go | 9 +++------ p2p/discover/v4_udp_test.go | 8 ++++---- p2p/discover/v4wire/v4wire.go | 23 ++++++++--------------- p2p/discover/v4wire/v4wire_test.go | 24 ++---------------------- 4 files changed, 17 insertions(+), 47 deletions(-) diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index ad23eee6b471..2b3eb48391b2 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -34,7 +34,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/discover/v4wire" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/netutil" - "github.com/ethereum/go-ethereum/rlp" ) // Errors @@ -217,7 +216,7 @@ func (t *UDPv4) Ping(n *enode.Node) error { func (t *UDPv4) ping(n *enode.Node) (seq uint64, err error) { rm := t.sendPing(n.ID(), &net.UDPAddr{IP: n.IP(), Port: n.UDP()}, nil) if err = <-rm.errc; err == nil { - seq = rm.reply.(*v4wire.Pong).ENRSeq() + seq = rm.reply.(*v4wire.Pong).ENRSeq } return seq, err } @@ -248,13 +247,12 @@ func (t *UDPv4) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) *r } func (t *UDPv4) makePing(toaddr *net.UDPAddr) *v4wire.Ping { - seq, _ := rlp.EncodeToBytes(t.localNode.Node().Seq()) return &v4wire.Ping{ Version: 4, From: t.ourEndpoint(), To: v4wire.NewEndpoint(toaddr, 0), Expiration: uint64(time.Now().Add(expiration).Unix()), - Rest: []rlp.RawValue{seq}, + ENRSeq: t.localNode.Node().Seq(), } } @@ -660,12 +658,11 @@ func (t *UDPv4) handlePing(h *packetHandlerV4, from *net.UDPAddr, fromID enode.I req := h.Packet.(*v4wire.Ping) // Reply. - seq, _ := rlp.EncodeToBytes(t.localNode.Node().Seq()) t.send(from, fromID, &v4wire.Pong{ To: v4wire.NewEndpoint(from, req.From.TCP), ReplyTok: mac, Expiration: uint64(time.Now().Add(expiration).Unix()), - Rest: []rlp.RawValue{seq}, + ENRSeq: t.localNode.Node().Seq(), }) // Ping back if our last pong on file is too far in the past. diff --git a/p2p/discover/v4_udp_test.go b/p2p/discover/v4_udp_test.go index 262e3f0ba300..e36912f010ae 100644 --- a/p2p/discover/v4_udp_test.go +++ b/p2p/discover/v4_udp_test.go @@ -470,13 +470,13 @@ func TestUDPv4_EIP868(t *testing.T) { // Perform endpoint proof and check for sequence number in packet tail. test.packetIn(nil, &v4wire.Ping{Expiration: futureExp}) test.waitPacketOut(func(p *v4wire.Pong, addr *net.UDPAddr, hash []byte) { - if p.ENRSeq() != wantNode.Seq() { - t.Errorf("wrong sequence number in pong: %d, want %d", p.ENRSeq(), wantNode.Seq()) + if p.ENRSeq != wantNode.Seq() { + t.Errorf("wrong sequence number in pong: %d, want %d", p.ENRSeq, wantNode.Seq()) } }) test.waitPacketOut(func(p *v4wire.Ping, addr *net.UDPAddr, hash []byte) { - if p.ENRSeq() != wantNode.Seq() { - t.Errorf("wrong sequence number in ping: %d, want %d", p.ENRSeq(), wantNode.Seq()) + if p.ENRSeq != wantNode.Seq() { + t.Errorf("wrong sequence number in ping: %d, want %d", p.ENRSeq, wantNode.Seq()) } test.packetIn(nil, &v4wire.Pong{Expiration: futureExp, ReplyTok: hash}) }) diff --git a/p2p/discover/v4wire/v4wire.go b/p2p/discover/v4wire/v4wire.go index b5dcb6e51731..23e7134414c9 100644 --- a/p2p/discover/v4wire/v4wire.go +++ b/p2p/discover/v4wire/v4wire.go @@ -50,6 +50,8 @@ type ( Version uint From, To Endpoint Expiration uint64 + ENRSeq uint64 `rlp:"optional"` // Sequence number of local record, added by EIP-868. + // Ignore additional fields (for forward compatibility). Rest []rlp.RawValue `rlp:"tail"` } @@ -62,6 +64,8 @@ type ( To Endpoint ReplyTok []byte // This contains the hash of the ping packet. Expiration uint64 // Absolute timestamp at which the packet becomes invalid. + ENRSeq uint64 `rlp:"optional"` // Sequence number of local record, added by EIP-868. + // Ignore additional fields (for forward compatibility). Rest []rlp.RawValue `rlp:"tail"` } @@ -162,13 +166,11 @@ type Packet interface { Kind() byte } -func (req *Ping) Name() string { return "PING/v4" } -func (req *Ping) Kind() byte { return PingPacket } -func (req *Ping) ENRSeq() uint64 { return seqFromTail(req.Rest) } +func (req *Ping) Name() string { return "PING/v4" } +func (req *Ping) Kind() byte { return PingPacket } -func (req *Pong) Name() string { return "PONG/v4" } -func (req *Pong) Kind() byte { return PongPacket } -func (req *Pong) ENRSeq() uint64 { return seqFromTail(req.Rest) } +func (req *Pong) Name() string { return "PONG/v4" } +func (req *Pong) Kind() byte { return PongPacket } func (req *Findnode) Name() string { return "FINDNODE/v4" } func (req *Findnode) Kind() byte { return FindnodePacket } @@ -187,15 +189,6 @@ func Expired(ts uint64) bool { return time.Unix(int64(ts), 0).Before(time.Now()) } -func seqFromTail(tail []rlp.RawValue) uint64 { - if len(tail) == 0 { - return 0 - } - var seq uint64 - rlp.DecodeBytes(tail[0], &seq) - return seq -} - // Encoder/decoder. const ( diff --git a/p2p/discover/v4wire/v4wire_test.go b/p2p/discover/v4wire/v4wire_test.go index 4dddeadd2084..3b4161998dc6 100644 --- a/p2p/discover/v4wire/v4wire_test.go +++ b/p2p/discover/v4wire/v4wire_test.go @@ -23,7 +23,6 @@ import ( "testing" "github.com/davecgh/go-spew/spew" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" ) @@ -40,7 +39,6 @@ var testPackets = []struct { From: Endpoint{net.ParseIP("127.0.0.1").To4(), 3322, 5544}, To: Endpoint{net.ParseIP("::1"), 2222, 3333}, Expiration: 1136239445, - Rest: []rlp.RawValue{}, }, }, { @@ -50,26 +48,8 @@ var testPackets = []struct { From: Endpoint{net.ParseIP("127.0.0.1").To4(), 3322, 5544}, To: Endpoint{net.ParseIP("::1"), 2222, 3333}, Expiration: 1136239445, - Rest: []rlp.RawValue{{0x01}, {0x02}}, - }, - }, - { - input: "577be4349c4dd26768081f58de4c6f375a7a22f3f7adda654d1428637412c3d7fe917cadc56d4e5e7ffae1dbe3efffb9849feb71b262de37977e7c7a44e677295680e9e38ab26bee2fcbae207fba3ff3d74069a50b902a82c9903ed37cc993c50001f83e82022bd79020010db83c4d001500000000abcdef12820cfa8215a8d79020010db885a308d313198a2e037073488208ae82823a8443b9a355c5010203040531b9019afde696e582a78fa8d95ea13ce3297d4afb8ba6433e4154caa5ac6431af1b80ba76023fa4090c408f6b4bc3701562c031041d4702971d102c9ab7fa5eed4cd6bab8f7af956f7d565ee1917084a95398b6a21eac920fe3dd1345ec0a7ef39367ee69ddf092cbfe5b93e5e568ebc491983c09c76d922dc3", - wantPacket: &Ping{ - Version: 555, - From: Endpoint{net.ParseIP("2001:db8:3c4d:15::abcd:ef12"), 3322, 5544}, - To: Endpoint{net.ParseIP("2001:db8:85a3:8d3:1319:8a2e:370:7348"), 2222, 33338}, - Expiration: 1136239445, - Rest: []rlp.RawValue{{0xC5, 0x01, 0x02, 0x03, 0x04, 0x05}}, - }, - }, - { - input: "09b2428d83348d27cdf7064ad9024f526cebc19e4958f0fdad87c15eb598dd61d08423e0bf66b2069869e1724125f820d851c136684082774f870e614d95a2855d000f05d1648b2d5945470bc187c2d2216fbe870f43ed0909009882e176a46b0102f846d79020010db885a308d313198a2e037073488208ae82823aa0fbc914b16819237dcd8801d7e53f69e9719adecb3cc0e790c57e91ca4461c9548443b9a355c6010203c2040506a0c969a58f6f9095004c0177a6b47f451530cab38966a25cca5cb58f055542124e", - wantPacket: &Pong{ - To: Endpoint{net.ParseIP("2001:db8:85a3:8d3:1319:8a2e:370:7348"), 2222, 33338}, - ReplyTok: common.Hex2Bytes("fbc914b16819237dcd8801d7e53f69e9719adecb3cc0e790c57e91ca4461c954"), - Expiration: 1136239445, - Rest: []rlp.RawValue{{0xC6, 0x01, 0x02, 0x03, 0xC2, 0x04, 0x05}, {0x06}}, + ENRSeq: 1, + Rest: []rlp.RawValue{{0x02}}, }, }, { From 088da24ebfb17a50652d4f9c3657670abcf055c8 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 18 May 2021 12:10:27 +0200 Subject: [PATCH 036/208] rlp: improve decoder stream implementation (#22858) This commit makes various cleanup changes to rlp.Stream. * rlp: shrink Stream struct This removes a lot of unused padding space in Stream by reordering the fields. The size of Stream changes from 120 bytes to 88 bytes. Stream instances are internally cached and reused using sync.Pool, so this does not improve performance. * rlp: simplify list stack The list stack kept track of the size of the current list context as well as the current offset into it. The size had to be stored in the stack in order to subtract it from the remaining bytes of any enclosing list in ListEnd. It seems that this can be implemented in a simpler way: just subtract the size from the enclosing list context in List instead. --- rlp/decode.go | 175 +++++++++++++++++++++++++------------------------- 1 file changed, 86 insertions(+), 89 deletions(-) diff --git a/rlp/decode.go b/rlp/decode.go index b340aa029e43..976780971773 100644 --- a/rlp/decode.go +++ b/rlp/decode.go @@ -518,7 +518,7 @@ func decodeDecoder(s *Stream, val reflect.Value) error { } // Kind represents the kind of value contained in an RLP stream. -type Kind int +type Kind int8 const ( Byte Kind = iota @@ -561,22 +561,16 @@ type ByteReader interface { type Stream struct { r ByteReader - // number of bytes remaining to be read from r. - remaining uint64 - limited bool - - // auxiliary buffer for integer decoding - uintbuf []byte - - kind Kind // kind of value ahead - size uint64 // size of value ahead - byteval byte // value of single byte in type tag - kinderr error // error from last readKind - stack []listpos + remaining uint64 // number of bytes remaining to be read from r + size uint64 // size of value ahead + kinderr error // error from last readKind + stack []uint64 // list sizes + uintbuf [8]byte // auxiliary buffer for integer decoding + kind Kind // kind of value ahead + byteval byte // value of single byte in type tag + limited bool // true if input limit is in effect } -type listpos struct{ pos, size uint64 } - // NewStream creates a new decoding stream reading from r. // // If r implements the ByteReader interface, Stream will @@ -646,8 +640,8 @@ func (s *Stream) Raw() ([]byte, error) { s.kind = -1 // rearm Kind return []byte{s.byteval}, nil } - // the original header has already been read and is no longer - // available. read content and put a new header in front of it. + // The original header has already been read and is no longer + // available. Read content and put a new header in front of it. start := headsize(size) buf := make([]byte, uint64(start)+size) if err := s.readFull(buf[start:]); err != nil { @@ -730,7 +724,14 @@ func (s *Stream) List() (size uint64, err error) { if kind != List { return 0, ErrExpectedList } - s.stack = append(s.stack, listpos{0, size}) + + // Remove size of inner list from outer list before pushing the new size + // onto the stack. This ensures that the remaining outer list size will + // be correct after the matching call to ListEnd. + if inList, limit := s.listLimit(); inList { + s.stack[len(s.stack)-1] = limit - size + } + s.stack = append(s.stack, size) s.kind = -1 s.size = 0 return size, nil @@ -739,17 +740,13 @@ func (s *Stream) List() (size uint64, err error) { // ListEnd returns to the enclosing list. // The input reader must be positioned at the end of a list. func (s *Stream) ListEnd() error { - if len(s.stack) == 0 { + // Ensure that no more data is remaining in the current list. + if inList, listLimit := s.listLimit(); !inList { return errNotInList - } - tos := s.stack[len(s.stack)-1] - if tos.pos != tos.size { + } else if listLimit > 0 { return errNotAtEOL } s.stack = s.stack[:len(s.stack)-1] // pop - if len(s.stack) > 0 { - s.stack[len(s.stack)-1].pos += tos.size - } s.kind = -1 s.size = 0 return nil @@ -777,7 +774,7 @@ func (s *Stream) Decode(val interface{}) error { err = decoder(s, rval.Elem()) if decErr, ok := err.(*decodeError); ok && len(decErr.ctx) > 0 { - // add decode target type to error so context has more meaning + // Add decode target type to error so context has more meaning. decErr.ctx = append(decErr.ctx, fmt.Sprint("(", rtyp.Elem(), ")")) } return err @@ -800,6 +797,9 @@ func (s *Stream) Reset(r io.Reader, inputLimit uint64) { case *bytes.Reader: s.remaining = uint64(br.Len()) s.limited = true + case *bytes.Buffer: + s.remaining = uint64(br.Len()) + s.limited = true case *strings.Reader: s.remaining = uint64(br.Len()) s.limited = true @@ -818,10 +818,8 @@ func (s *Stream) Reset(r io.Reader, inputLimit uint64) { s.size = 0 s.kind = -1 s.kinderr = nil - if s.uintbuf == nil { - s.uintbuf = make([]byte, 8) - } s.byteval = 0 + s.uintbuf = [8]byte{} } // Kind returns the kind and size of the next value in the @@ -836,35 +834,29 @@ func (s *Stream) Reset(r io.Reader, inputLimit uint64) { // the value. Subsequent calls to Kind (until the value is decoded) // will not advance the input reader and return cached information. func (s *Stream) Kind() (kind Kind, size uint64, err error) { - var tos *listpos - if len(s.stack) > 0 { - tos = &s.stack[len(s.stack)-1] - } - if s.kind < 0 { - s.kinderr = nil - // Don't read further if we're at the end of the - // innermost list. - if tos != nil && tos.pos == tos.size { - return 0, 0, EOL - } - s.kind, s.size, s.kinderr = s.readKind() - if s.kinderr == nil { - if tos == nil { - // At toplevel, check that the value is smaller - // than the remaining input length. - if s.limited && s.size > s.remaining { - s.kinderr = ErrValueTooLarge - } - } else { - // Inside a list, check that the value doesn't overflow the list. - if s.size > tos.size-tos.pos { - s.kinderr = ErrElemTooLarge - } - } + if s.kind >= 0 { + return s.kind, s.size, s.kinderr + } + + // Check for end of list. This needs to be done here because readKind + // checks against the list size, and would return the wrong error. + inList, listLimit := s.listLimit() + if inList && listLimit == 0 { + return 0, 0, EOL + } + // Read the actual size tag. + s.kind, s.size, s.kinderr = s.readKind() + if s.kinderr == nil { + // Check the data size of the value ahead against input limits. This + // is done here because many decoders require allocating an input + // buffer matching the value size. Checking it here protects those + // decoders from inputs declaring very large value size. + if inList && s.size > listLimit { + s.kinderr = ErrElemTooLarge + } else if s.limited && s.size > s.remaining { + s.kinderr = ErrValueTooLarge } } - // Note: this might return a sticky error generated - // by an earlier call to readKind. return s.kind, s.size, s.kinderr } @@ -891,37 +883,35 @@ func (s *Stream) readKind() (kind Kind, size uint64, err error) { s.byteval = b return Byte, 0, nil case b < 0xB8: - // Otherwise, if a string is 0-55 bytes long, - // the RLP encoding consists of a single byte with value 0x80 plus the - // length of the string followed by the string. The range of the first - // byte is thus [0x80, 0xB7]. + // Otherwise, if a string is 0-55 bytes long, the RLP encoding consists + // of a single byte with value 0x80 plus the length of the string + // followed by the string. The range of the first byte is thus [0x80, 0xB7]. return String, uint64(b - 0x80), nil case b < 0xC0: - // If a string is more than 55 bytes long, the - // RLP encoding consists of a single byte with value 0xB7 plus the length - // of the length of the string in binary form, followed by the length of - // the string, followed by the string. For example, a length-1024 string - // would be encoded as 0xB90400 followed by the string. The range of - // the first byte is thus [0xB8, 0xBF]. + // If a string is more than 55 bytes long, the RLP encoding consists of a + // single byte with value 0xB7 plus the length of the length of the + // string in binary form, followed by the length of the string, followed + // by the string. For example, a length-1024 string would be encoded as + // 0xB90400 followed by the string. The range of the first byte is thus + // [0xB8, 0xBF]. size, err = s.readUint(b - 0xB7) if err == nil && size < 56 { err = ErrCanonSize } return String, size, err case b < 0xF8: - // If the total payload of a list - // (i.e. the combined length of all its items) is 0-55 bytes long, the - // RLP encoding consists of a single byte with value 0xC0 plus the length - // of the list followed by the concatenation of the RLP encodings of the - // items. The range of the first byte is thus [0xC0, 0xF7]. + // If the total payload of a list (i.e. the combined length of all its + // items) is 0-55 bytes long, the RLP encoding consists of a single byte + // with value 0xC0 plus the length of the list followed by the + // concatenation of the RLP encodings of the items. The range of the + // first byte is thus [0xC0, 0xF7]. return List, uint64(b - 0xC0), nil default: - // If the total payload of a list is more than 55 bytes long, - // the RLP encoding consists of a single byte with value 0xF7 - // plus the length of the length of the payload in binary - // form, followed by the length of the payload, followed by - // the concatenation of the RLP encodings of the items. The - // range of the first byte is thus [0xF8, 0xFF]. + // If the total payload of a list is more than 55 bytes long, the RLP + // encoding consists of a single byte with value 0xF7 plus the length of + // the length of the payload in binary form, followed by the length of + // the payload, followed by the concatenation of the RLP encodings of + // the items. The range of the first byte is thus [0xF8, 0xFF]. size, err = s.readUint(b - 0xF7) if err == nil && size < 56 { err = ErrCanonSize @@ -940,22 +930,20 @@ func (s *Stream) readUint(size byte) (uint64, error) { return uint64(b), err default: start := int(8 - size) - for i := 0; i < start; i++ { - s.uintbuf[i] = 0 - } + s.uintbuf = [8]byte{} if err := s.readFull(s.uintbuf[start:]); err != nil { return 0, err } if s.uintbuf[start] == 0 { - // Note: readUint is also used to decode integer - // values. The error needs to be adjusted to become - // ErrCanonInt in this case. + // Note: readUint is also used to decode integer values. + // The error needs to be adjusted to become ErrCanonInt in this case. return 0, ErrCanonSize } - return binary.BigEndian.Uint64(s.uintbuf), nil + return binary.BigEndian.Uint64(s.uintbuf[:]), nil } } +// readFull reads into buf from the underlying stream. func (s *Stream) readFull(buf []byte) (err error) { if err := s.willRead(uint64(len(buf))); err != nil { return err @@ -977,6 +965,7 @@ func (s *Stream) readFull(buf []byte) (err error) { return err } +// readByte reads a single byte from the underlying stream. func (s *Stream) readByte() (byte, error) { if err := s.willRead(1); err != nil { return 0, err @@ -988,16 +977,16 @@ func (s *Stream) readByte() (byte, error) { return b, err } +// willRead is called before any read from the underlying stream. It checks +// n against size limits, and updates the limits if n doesn't overflow them. func (s *Stream) willRead(n uint64) error { s.kind = -1 // rearm Kind - if len(s.stack) > 0 { - // check list overflow - tos := s.stack[len(s.stack)-1] - if n > tos.size-tos.pos { + if inList, limit := s.listLimit(); inList { + if n > limit { return ErrElemTooLarge } - s.stack[len(s.stack)-1].pos += n + s.stack[len(s.stack)-1] = limit - n } if s.limited { if n > s.remaining { @@ -1007,3 +996,11 @@ func (s *Stream) willRead(n uint64) error { } return nil } + +// listLimit returns the amount of data remaining in the innermost list. +func (s *Stream) listLimit() (inList bool, limit uint64) { + if len(s.stack) == 0 { + return false, 0 + } + return true, s.stack[len(s.stack)-1] +} From b3a1fda6509c8dd64b5f5916f62a6602bcdc7a9d Mon Sep 17 00:00:00 2001 From: lightclient <14004106+lightclient@users.noreply.github.com> Date: Tue, 18 May 2021 11:54:10 -0600 Subject: [PATCH 037/208] cmd/utils: expand tilde in --jspath (#22900) --- cmd/utils/flags.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index d3fb3f2cbd1b..1ae07108e4ae 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -675,10 +675,10 @@ var ( } // ATM the url is left to the user and deployment to - JSpathFlag = cli.StringFlag{ + JSpathFlag = DirectoryFlag{ Name: "jspath", Usage: "JavaScript root path for `loadScript`", - Value: ".", + Value: DirectoryString("."), } // Gas price oracle settings From 3e795881ea6d68c32da5da3c95f0d458a64e35c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 19 May 2021 15:09:03 +0300 Subject: [PATCH 038/208] eth, p2p/msgrate: move peer QoS tracking to its own package and use it for snap (#22876) This change extracts the peer QoS tracking logic from eth/downloader, moving it into the new package p2p/msgrate. The job of msgrate.Tracker is determining suitable timeout values and request sizes per peer. The snap sync scheduler now uses msgrate.Tracker instead of the hard-coded 15s timeout. This should make the sync work better on network links with high latency. --- eth/downloader/downloader.go | 119 +-------- eth/downloader/peer.go | 171 ++++-------- eth/downloader/peer_test.go | 53 ---- eth/downloader/statesync.go | 4 +- eth/protocols/snap/sync.go | 331 +++++++++++++++-------- eth/protocols/snap/sync_test.go | 18 +- p2p/msgrate/msgrate.go | 458 ++++++++++++++++++++++++++++++++ 7 files changed, 745 insertions(+), 409 deletions(-) delete mode 100644 eth/downloader/peer_test.go create mode 100644 p2p/msgrate/msgrate.go diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 6f59b29a5e9a..e8a4a76ca21f 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -47,16 +47,6 @@ var ( MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request MaxStateFetch = 384 // Amount of node state values to allow fetching per request - rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests - rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests - rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value - ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion - ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts - - qosTuningPeers = 5 // Number of peers to tune based on (best peers) - qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence - qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value - maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) maxHeadersProcess = 2048 // Number of header download results to import at once into the chain maxResultsProcess = 2048 // Number of content download results to import at once into the chain @@ -96,13 +86,6 @@ var ( ) type Downloader struct { - // WARNING: The `rttEstimate` and `rttConfidence` fields are accessed atomically. - // On 32 bit platforms, only 64-bit aligned fields can be atomic. The struct is - // guaranteed to be so aligned, so take advantage of that. For more information, - // see https://golang.org/pkg/sync/atomic/#pkg-note-BUG. - rttEstimate uint64 // Round trip time to target for download requests - rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops) - mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode mux *event.TypeMux // Event multiplexer to announce sync operation events @@ -232,8 +215,6 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, checkpoint: checkpoint, queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), peers: newPeerSet(), - rttEstimate: uint64(rttMaxEstimate), - rttConfidence: uint64(1000000), blockchain: chain, lightchain: lightchain, dropPeer: dropPeer, @@ -252,7 +233,6 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, }, trackStateReq: make(chan *stateReq), } - go dl.qosTuner() go dl.stateFetcher() return dl } @@ -310,8 +290,6 @@ func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { logger.Error("Failed to register sync peer", "err", err) return err } - d.qosReduceConfidence() - return nil } @@ -670,7 +648,7 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty } go p.peer.RequestHeadersByHash(latest, fetch, fsMinFullBlocks-1, true) - ttl := d.requestTTL() + ttl := d.peers.rates.TargetTimeout() timeout := time.After(ttl) for { select { @@ -853,7 +831,7 @@ func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, re // Wait for the remote response to the head fetch number, hash := uint64(0), common.Hash{} - ttl := d.requestTTL() + ttl := d.peers.rates.TargetTimeout() timeout := time.After(ttl) for finished := false; !finished; { @@ -942,7 +920,7 @@ func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, // Split our chain interval in two, and request the hash to cross check check := (start + end) / 2 - ttl := d.requestTTL() + ttl := d.peers.rates.TargetTimeout() timeout := time.After(ttl) go p.peer.RequestHeadersByNumber(check, 1, 0, false) @@ -1035,7 +1013,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error { getHeaders := func(from uint64) { request = time.Now() - ttl = d.requestTTL() + ttl = d.peers.rates.TargetTimeout() timeout.Reset(ttl) if skeleton { @@ -1050,7 +1028,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error { pivoting = true request = time.Now() - ttl = d.requestTTL() + ttl = d.peers.rates.TargetTimeout() timeout.Reset(ttl) d.pivotLock.RLock() @@ -1262,12 +1240,12 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ( pack := packet.(*headerPack) return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh) } - expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } + expire = func() map[string]int { return d.queue.ExpireHeaders(d.peers.rates.TargetTimeout()) } reserve = func(p *peerConnection, count int) (*fetchRequest, bool, bool) { return d.queue.ReserveHeaders(p, count), false, false } fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } - capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) } + capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.peers.rates.TargetRoundTrip()) } setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { p.SetHeadersIdle(accepted, deliveryTime) } @@ -1293,9 +1271,9 @@ func (d *Downloader) fetchBodies(from uint64) error { pack := packet.(*bodyPack) return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles) } - expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) } + expire = func() map[string]int { return d.queue.ExpireBodies(d.peers.rates.TargetTimeout()) } fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } - capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) } + capacity = func(p *peerConnection) int { return p.BlockCapacity(d.peers.rates.TargetRoundTrip()) } setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { p.SetBodiesIdle(accepted, deliveryTime) } ) err := d.fetchParts(d.bodyCh, deliver, d.bodyWakeCh, expire, @@ -1317,9 +1295,9 @@ func (d *Downloader) fetchReceipts(from uint64) error { pack := packet.(*receiptPack) return d.queue.DeliverReceipts(pack.peerID, pack.receipts) } - expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) } + expire = func() map[string]int { return d.queue.ExpireReceipts(d.peers.rates.TargetTimeout()) } fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) } - capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) } + capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.peers.rates.TargetRoundTrip()) } setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { p.SetReceiptsIdle(accepted, deliveryTime) } @@ -2031,78 +2009,3 @@ func (d *Downloader) deliver(destCh chan dataPack, packet dataPack, inMeter, dro return errNoSyncActive } } - -// qosTuner is the quality of service tuning loop that occasionally gathers the -// peer latency statistics and updates the estimated request round trip time. -func (d *Downloader) qosTuner() { - for { - // Retrieve the current median RTT and integrate into the previoust target RTT - rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) - atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) - - // A new RTT cycle passed, increase our confidence in the estimated RTT - conf := atomic.LoadUint64(&d.rttConfidence) - conf = conf + (1000000-conf)/2 - atomic.StoreUint64(&d.rttConfidence, conf) - - // Log the new QoS values and sleep until the next RTT - log.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) - select { - case <-d.quitCh: - return - case <-time.After(rtt): - } - } -} - -// qosReduceConfidence is meant to be called when a new peer joins the downloader's -// peer set, needing to reduce the confidence we have in out QoS estimates. -func (d *Downloader) qosReduceConfidence() { - // If we have a single peer, confidence is always 1 - peers := uint64(d.peers.Len()) - if peers == 0 { - // Ensure peer connectivity races don't catch us off guard - return - } - if peers == 1 { - atomic.StoreUint64(&d.rttConfidence, 1000000) - return - } - // If we have a ton of peers, don't drop confidence) - if peers >= uint64(qosConfidenceCap) { - return - } - // Otherwise drop the confidence factor - conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers - if float64(conf)/1000000 < rttMinConfidence { - conf = uint64(rttMinConfidence * 1000000) - } - atomic.StoreUint64(&d.rttConfidence, conf) - - rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) - log.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) -} - -// requestRTT returns the current target round trip time for a download request -// to complete in. -// -// Note, the returned RTT is .9 of the actually estimated RTT. The reason is that -// the downloader tries to adapt queries to the RTT, so multiple RTT values can -// be adapted to, but smaller ones are preferred (stabler download stream). -func (d *Downloader) requestRTT() time.Duration { - return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10 -} - -// requestTTL returns the current timeout allowance for a single download request -// to finish under. -func (d *Downloader) requestTTL() time.Duration { - var ( - rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate)) - conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0 - ) - ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf) - if ttl > ttlLimit { - ttl = ttlLimit - } - return ttl -} diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index b3b6cc95a0cc..b9c771694112 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -32,11 +32,11 @@ import ( "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/msgrate" ) const ( - maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items - measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value. + maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items ) var ( @@ -54,18 +54,12 @@ type peerConnection struct { receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) - headerThroughput float64 // Number of headers measured to be retrievable per second - blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second - receiptThroughput float64 // Number of receipts measured to be retrievable per second - stateThroughput float64 // Number of node data pieces measured to be retrievable per second - - rtt time.Duration // Request round trip time to track responsiveness (QoS) - headerStarted time.Time // Time instance when the last header fetch was started blockStarted time.Time // Time instance when the last block (body) fetch was started receiptStarted time.Time // Time instance when the last receipt fetch was started stateStarted time.Time // Time instance when the last node data fetch was started + rates *msgrate.Tracker // Tracker to hone in on the number of items retrievable per second lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously) peer Peer @@ -133,11 +127,6 @@ func (p *peerConnection) Reset() { atomic.StoreInt32(&p.receiptIdle, 0) atomic.StoreInt32(&p.stateIdle, 0) - p.headerThroughput = 0 - p.blockThroughput = 0 - p.receiptThroughput = 0 - p.stateThroughput = 0 - p.lacking = make(map[common.Hash]struct{}) } @@ -212,93 +201,72 @@ func (p *peerConnection) FetchNodeData(hashes []common.Hash) error { // requests. Its estimated header retrieval throughput is updated with that measured // just now. func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) { - p.setIdle(deliveryTime.Sub(p.headerStarted), delivered, &p.headerThroughput, &p.headerIdle) + p.rates.Update(eth.BlockHeadersMsg, deliveryTime.Sub(p.headerStarted), delivered) + atomic.StoreInt32(&p.headerIdle, 0) } // SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval // requests. Its estimated body retrieval throughput is updated with that measured // just now. func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) { - p.setIdle(deliveryTime.Sub(p.blockStarted), delivered, &p.blockThroughput, &p.blockIdle) + p.rates.Update(eth.BlockBodiesMsg, deliveryTime.Sub(p.blockStarted), delivered) + atomic.StoreInt32(&p.blockIdle, 0) } // SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt // retrieval requests. Its estimated receipt retrieval throughput is updated // with that measured just now. func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) { - p.setIdle(deliveryTime.Sub(p.receiptStarted), delivered, &p.receiptThroughput, &p.receiptIdle) + p.rates.Update(eth.ReceiptsMsg, deliveryTime.Sub(p.receiptStarted), delivered) + atomic.StoreInt32(&p.receiptIdle, 0) } // SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie // data retrieval requests. Its estimated state retrieval throughput is updated // with that measured just now. func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) { - p.setIdle(deliveryTime.Sub(p.stateStarted), delivered, &p.stateThroughput, &p.stateIdle) -} - -// setIdle sets the peer to idle, allowing it to execute new retrieval requests. -// Its estimated retrieval throughput is updated with that measured just now. -func (p *peerConnection) setIdle(elapsed time.Duration, delivered int, throughput *float64, idle *int32) { - // Irrelevant of the scaling, make sure the peer ends up idle - defer atomic.StoreInt32(idle, 0) - - p.lock.Lock() - defer p.lock.Unlock() - - // If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum - if delivered == 0 { - *throughput = 0 - return - } - // Otherwise update the throughput with a new measurement - if elapsed <= 0 { - elapsed = 1 // +1 (ns) to ensure non-zero divisor - } - measured := float64(delivered) / (float64(elapsed) / float64(time.Second)) - - *throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured - p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed)) - - p.log.Trace("Peer throughput measurements updated", - "hps", p.headerThroughput, "bps", p.blockThroughput, - "rps", p.receiptThroughput, "sps", p.stateThroughput, - "miss", len(p.lacking), "rtt", p.rtt) + p.rates.Update(eth.NodeDataMsg, deliveryTime.Sub(p.stateStarted), delivered) + atomic.StoreInt32(&p.stateIdle, 0) } // HeaderCapacity retrieves the peers header download allowance based on its // previously discovered throughput. func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { - p.lock.RLock() - defer p.lock.RUnlock() - - return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch))) + cap := int(math.Ceil(p.rates.Capacity(eth.BlockHeadersMsg, targetRTT))) + if cap > MaxHeaderFetch { + cap = MaxHeaderFetch + } + return cap } // BlockCapacity retrieves the peers block download allowance based on its // previously discovered throughput. func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int { - p.lock.RLock() - defer p.lock.RUnlock() - - return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch))) + cap := int(math.Ceil(p.rates.Capacity(eth.BlockBodiesMsg, targetRTT))) + if cap > MaxBlockFetch { + cap = MaxBlockFetch + } + return cap } // ReceiptCapacity retrieves the peers receipt download allowance based on its // previously discovered throughput. func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { - p.lock.RLock() - defer p.lock.RUnlock() - - return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch))) + cap := int(math.Ceil(p.rates.Capacity(eth.ReceiptsMsg, targetRTT))) + if cap > MaxReceiptFetch { + cap = MaxReceiptFetch + } + return cap } // NodeDataCapacity retrieves the peers state download allowance based on its // previously discovered throughput. func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int { - p.lock.RLock() - defer p.lock.RUnlock() - - return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch))) + cap := int(math.Ceil(p.rates.Capacity(eth.NodeDataMsg, targetRTT))) + if cap > MaxStateFetch { + cap = MaxStateFetch + } + return cap } // MarkLacking appends a new entity to the set of items (blocks, receipts, states) @@ -330,16 +298,20 @@ func (p *peerConnection) Lacks(hash common.Hash) bool { // peerSet represents the collection of active peer participating in the chain // download procedure. type peerSet struct { - peers map[string]*peerConnection + peers map[string]*peerConnection + rates *msgrate.Trackers // Set of rate trackers to give the sync a common beat + newPeerFeed event.Feed peerDropFeed event.Feed - lock sync.RWMutex + + lock sync.RWMutex } // newPeerSet creates a new peer set top track the active download sources. func newPeerSet() *peerSet { return &peerSet{ peers: make(map[string]*peerConnection), + rates: msgrate.NewTrackers(log.New("proto", "eth")), } } @@ -371,30 +343,15 @@ func (ps *peerSet) Reset() { // average of all existing peers, to give it a realistic chance of being used // for data retrievals. func (ps *peerSet) Register(p *peerConnection) error { - // Retrieve the current median RTT as a sane default - p.rtt = ps.medianRTT() - // Register the new peer with some meaningful defaults ps.lock.Lock() if _, ok := ps.peers[p.id]; ok { ps.lock.Unlock() return errAlreadyRegistered } - if len(ps.peers) > 0 { - p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0 - - for _, peer := range ps.peers { - peer.lock.RLock() - p.headerThroughput += peer.headerThroughput - p.blockThroughput += peer.blockThroughput - p.receiptThroughput += peer.receiptThroughput - p.stateThroughput += peer.stateThroughput - peer.lock.RUnlock() - } - p.headerThroughput /= float64(len(ps.peers)) - p.blockThroughput /= float64(len(ps.peers)) - p.receiptThroughput /= float64(len(ps.peers)) - p.stateThroughput /= float64(len(ps.peers)) + p.rates = msgrate.NewTracker(ps.rates.MeanCapacities(), ps.rates.MedianRoundTrip()) + if err := ps.rates.Track(p.id, p.rates); err != nil { + return err } ps.peers[p.id] = p ps.lock.Unlock() @@ -413,6 +370,7 @@ func (ps *peerSet) Unregister(id string) error { return errNotRegistered } delete(ps.peers, id) + ps.rates.Untrack(id) ps.lock.Unlock() ps.peerDropFeed.Send(p) @@ -454,9 +412,7 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { return atomic.LoadInt32(&p.headerIdle) == 0 } throughput := func(p *peerConnection) float64 { - p.lock.RLock() - defer p.lock.RUnlock() - return p.headerThroughput + return p.rates.Capacity(eth.BlockHeadersMsg, time.Second) } return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput) } @@ -468,9 +424,7 @@ func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { return atomic.LoadInt32(&p.blockIdle) == 0 } throughput := func(p *peerConnection) float64 { - p.lock.RLock() - defer p.lock.RUnlock() - return p.blockThroughput + return p.rates.Capacity(eth.BlockBodiesMsg, time.Second) } return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput) } @@ -482,9 +436,7 @@ func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { return atomic.LoadInt32(&p.receiptIdle) == 0 } throughput := func(p *peerConnection) float64 { - p.lock.RLock() - defer p.lock.RUnlock() - return p.receiptThroughput + return p.rates.Capacity(eth.ReceiptsMsg, time.Second) } return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput) } @@ -496,9 +448,7 @@ func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { return atomic.LoadInt32(&p.stateIdle) == 0 } throughput := func(p *peerConnection) float64 { - p.lock.RLock() - defer p.lock.RUnlock() - return p.stateThroughput + return p.rates.Capacity(eth.NodeDataMsg, time.Second) } return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput) } @@ -527,37 +477,6 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol uint, idleCheck func(*peer return sortPeers.p, total } -// medianRTT returns the median RTT of the peerset, considering only the tuning -// peers if there are more peers available. -func (ps *peerSet) medianRTT() time.Duration { - // Gather all the currently measured round trip times - ps.lock.RLock() - defer ps.lock.RUnlock() - - rtts := make([]float64, 0, len(ps.peers)) - for _, p := range ps.peers { - p.lock.RLock() - rtts = append(rtts, float64(p.rtt)) - p.lock.RUnlock() - } - sort.Float64s(rtts) - - median := rttMaxEstimate - if qosTuningPeers <= len(rtts) { - median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers - } else if len(rtts) > 0 { - median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos) - } - // Restrict the RTT into some QoS defaults, irrelevant of true RTT - if median < rttMinEstimate { - median = rttMinEstimate - } - if median > rttMaxEstimate { - median = rttMaxEstimate - } - return median -} - // peerThroughputSort implements the Sort interface, and allows for // sorting a set of peers by their throughput // The sorted data is with the _highest_ throughput first diff --git a/eth/downloader/peer_test.go b/eth/downloader/peer_test.go deleted file mode 100644 index 4bf0e200bb12..000000000000 --- a/eth/downloader/peer_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package downloader - -import ( - "sort" - "testing" -) - -func TestPeerThroughputSorting(t *testing.T) { - a := &peerConnection{ - id: "a", - headerThroughput: 1.25, - } - b := &peerConnection{ - id: "b", - headerThroughput: 1.21, - } - c := &peerConnection{ - id: "c", - headerThroughput: 1.23, - } - - peers := []*peerConnection{a, b, c} - tps := []float64{a.headerThroughput, - b.headerThroughput, c.headerThroughput} - sortPeers := &peerThroughputSort{peers, tps} - sort.Sort(sortPeers) - if got, exp := sortPeers.p[0].id, "a"; got != exp { - t.Errorf("sort fail, got %v exp %v", got, exp) - } - if got, exp := sortPeers.p[1].id, "c"; got != exp { - t.Errorf("sort fail, got %v exp %v", got, exp) - } - if got, exp := sortPeers.p[2].id, "b"; got != exp { - t.Errorf("sort fail, got %v exp %v", got, exp) - } - -} diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go index ff84a3a8f015..6c53e5577a87 100644 --- a/eth/downloader/statesync.go +++ b/eth/downloader/statesync.go @@ -433,8 +433,8 @@ func (s *stateSync) assignTasks() { peers, _ := s.d.peers.NodeDataIdlePeers() for _, p := range peers { // Assign a batch of fetches proportional to the estimated latency/bandwidth - cap := p.NodeDataCapacity(s.d.requestRTT()) - req := &stateReq{peer: p, timeout: s.d.requestTTL()} + cap := p.NodeDataCapacity(s.d.peers.rates.TargetRoundTrip()) + req := &stateReq{peer: p, timeout: s.d.peers.rates.TargetTimeout()} nodes, _, codes := s.fillTasks(cap, req) diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index e283473207fd..c57fcd71f643 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -37,6 +37,7 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/msgrate" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "golang.org/x/crypto/sha3" @@ -51,14 +52,15 @@ var ( ) const ( - // maxRequestSize is the maximum number of bytes to request from a remote peer. - maxRequestSize = 128 * 1024 + // minRequestSize is the minimum number of bytes to request from a remote peer. + // This number is used as the low cap for account and storage range requests. + // Bytecode and trienode are limited inherently by item count (1). + minRequestSize = 64 * 1024 - // maxStorageSetRequestCount is the maximum number of contracts to request the - // storage of in a single query. If this number is too low, we're not filling - // responses fully and waste round trip times. If it's too high, we're capping - // responses and waste bandwidth. - maxStorageSetRequestCount = maxRequestSize / 1024 + // maxRequestSize is the maximum number of bytes to request from a remote peer. + // This number is used as the high cap for account and storage range requests. + // Bytecode and trienode are limited more explicitly by the caps below. + maxRequestSize = 512 * 1024 // maxCodeRequestCount is the maximum number of bytecode blobs to request in a // single query. If this number is too low, we're not filling responses fully @@ -74,7 +76,7 @@ const ( // a single query. If this number is too low, we're not filling responses fully // and waste round trip times. If it's too high, we're capping responses and // waste bandwidth. - maxTrieRequestCount = 256 + maxTrieRequestCount = maxRequestSize / 512 ) var ( @@ -85,10 +87,6 @@ var ( // storageConcurrency is the number of chunks to split the a large contract // storage trie into to allow concurrent retrievals. storageConcurrency = 16 - - // requestTimeout is the maximum time a peer is allowed to spend on serving - // a single network request. - requestTimeout = 15 * time.Second // TODO(karalabe): Make it dynamic ala fast-sync? ) // ErrCancelled is returned from snap syncing if the operation was prematurely @@ -105,8 +103,9 @@ var ErrCancelled = errors.New("sync cancelled") // is only included to allow the runloop to match a response to the task being // synced without having yet another set of maps. type accountRequest struct { - peer string // Peer to which this request is assigned - id uint64 // Request ID of this request + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + time time.Time // Timestamp when the request was sent deliver chan *accountResponse // Channel to deliver successful response on revert chan *accountRequest // Channel to deliver request failure on @@ -142,8 +141,9 @@ type accountResponse struct { // is only included to allow the runloop to match a response to the task being // synced without having yet another set of maps. type bytecodeRequest struct { - peer string // Peer to which this request is assigned - id uint64 // Request ID of this request + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + time time.Time // Timestamp when the request was sent deliver chan *bytecodeResponse // Channel to deliver successful response on revert chan *bytecodeRequest // Channel to deliver request failure on @@ -173,8 +173,9 @@ type bytecodeResponse struct { // is only included to allow the runloop to match a response to the task being // synced without having yet another set of maps. type storageRequest struct { - peer string // Peer to which this request is assigned - id uint64 // Request ID of this request + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + time time.Time // Timestamp when the request was sent deliver chan *storageResponse // Channel to deliver successful response on revert chan *storageRequest // Channel to deliver request failure on @@ -218,8 +219,9 @@ type storageResponse struct { // is only included to allow the runloop to match a response to the task being // synced without having yet another set of maps. type trienodeHealRequest struct { - peer string // Peer to which this request is assigned - id uint64 // Request ID of this request + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + time time.Time // Timestamp when the request was sent deliver chan *trienodeHealResponse // Channel to deliver successful response on revert chan *trienodeHealRequest // Channel to deliver request failure on @@ -252,8 +254,9 @@ type trienodeHealResponse struct { // is only included to allow the runloop to match a response to the task being // synced without having yet another set of maps. type bytecodeHealRequest struct { - peer string // Peer to which this request is assigned - id uint64 // Request ID of this request + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + time time.Time // Timestamp when the request was sent deliver chan *bytecodeHealResponse // Channel to deliver successful response on revert chan *bytecodeHealRequest // Channel to deliver request failure on @@ -396,6 +399,7 @@ type Syncer struct { peers map[string]SyncPeer // Currently active peers to download from peerJoin *event.Feed // Event feed to react to peers joining peerDrop *event.Feed // Event feed to react to peers dropping + rates *msgrate.Trackers // Message throughput rates for peers // Request tracking during syncing phase statelessPeers map[string]struct{} // Peers that failed to deliver state data @@ -452,6 +456,7 @@ func NewSyncer(db ethdb.KeyValueStore) *Syncer { peers: make(map[string]SyncPeer), peerJoin: new(event.Feed), peerDrop: new(event.Feed), + rates: msgrate.NewTrackers(log.New("proto", "snap")), update: make(chan struct{}, 1), accountIdlers: make(map[string]struct{}), @@ -484,6 +489,7 @@ func (s *Syncer) Register(peer SyncPeer) error { return errors.New("already registered") } s.peers[id] = peer + s.rates.Track(id, msgrate.NewTracker(s.rates.MeanCapacities(), s.rates.MedianRoundTrip())) // Mark the peer as idle, even if no sync is running s.accountIdlers[id] = struct{}{} @@ -509,6 +515,7 @@ func (s *Syncer) Unregister(id string) error { return errors.New("not registered") } delete(s.peers, id) + s.rates.Untrack(id) // Remove status markers, even if no sync is running delete(s.statelessPeers, id) @@ -851,10 +858,24 @@ func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *ac s.lock.Lock() defer s.lock.Unlock() - // If there are no idle peers, short circuit assignment - if len(s.accountIdlers) == 0 { + // Sort the peers by download capacity to use faster ones if many available + idlers := &capacitySort{ + ids: make([]string, 0, len(s.accountIdlers)), + caps: make([]float64, 0, len(s.accountIdlers)), + } + targetTTL := s.rates.TargetTimeout() + for id := range s.accountIdlers { + if _, ok := s.statelessPeers[id]; ok { + continue + } + idlers.ids = append(idlers.ids, id) + idlers.caps = append(idlers.caps, s.rates.Capacity(id, AccountRangeMsg, targetTTL)) + } + if len(idlers.ids) == 0 { return } + sort.Sort(sort.Reverse(idlers)) + // Iterate over all the tasks and try to find a pending one for _, task := range s.tasks { // Skip any tasks already filling @@ -864,20 +885,15 @@ func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *ac // Task pending retrieval, try to find an idle peer. If no such peer // exists, we probably assigned tasks for all (or they are stateless). // Abort the entire assignment mechanism. - var idle string - for id := range s.accountIdlers { - // If the peer rejected a query in this sync cycle, don't bother asking - // again for anything, it's either out of sync or already pruned - if _, ok := s.statelessPeers[id]; ok { - continue - } - idle = id - break - } - if idle == "" { + if len(idlers.ids) == 0 { return } - peer := s.peers[idle] + var ( + idle = idlers.ids[0] + peer = s.peers[idle] + cap = idlers.caps[0] + ) + idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:] // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 @@ -895,6 +911,7 @@ func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *ac req := &accountRequest{ peer: idle, id: reqid, + time: time.Now(), deliver: success, revert: fail, cancel: cancel, @@ -903,8 +920,9 @@ func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *ac limit: task.Last, task: task, } - req.timeout = time.AfterFunc(requestTimeout, func() { + req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() { peer.Log().Debug("Account range request timed out", "reqid", reqid) + s.rates.Update(idle, AccountRangeMsg, 0, 0) s.scheduleRevertAccountRequest(req) }) s.accountReqs[reqid] = req @@ -915,7 +933,13 @@ func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *ac defer s.pend.Done() // Attempt to send the remote request and revert if it fails - if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, maxRequestSize); err != nil { + if cap > maxRequestSize { + cap = maxRequestSize + } + if cap < minRequestSize { // Don't bother with peers below a bare minimum performance + cap = minRequestSize + } + if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, uint64(cap)); err != nil { peer.Log().Debug("Failed to request account range", "err", err) s.scheduleRevertAccountRequest(req) } @@ -931,10 +955,24 @@ func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan * s.lock.Lock() defer s.lock.Unlock() - // If there are no idle peers, short circuit assignment - if len(s.bytecodeIdlers) == 0 { + // Sort the peers by download capacity to use faster ones if many available + idlers := &capacitySort{ + ids: make([]string, 0, len(s.bytecodeIdlers)), + caps: make([]float64, 0, len(s.bytecodeIdlers)), + } + targetTTL := s.rates.TargetTimeout() + for id := range s.bytecodeIdlers { + if _, ok := s.statelessPeers[id]; ok { + continue + } + idlers.ids = append(idlers.ids, id) + idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL)) + } + if len(idlers.ids) == 0 { return } + sort.Sort(sort.Reverse(idlers)) + // Iterate over all the tasks and try to find a pending one for _, task := range s.tasks { // Skip any tasks not in the bytecode retrieval phase @@ -948,20 +986,15 @@ func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan * // Task pending retrieval, try to find an idle peer. If no such peer // exists, we probably assigned tasks for all (or they are stateless). // Abort the entire assignment mechanism. - var idle string - for id := range s.bytecodeIdlers { - // If the peer rejected a query in this sync cycle, don't bother asking - // again for anything, it's either out of sync or already pruned - if _, ok := s.statelessPeers[id]; ok { - continue - } - idle = id - break - } - if idle == "" { + if len(idlers.ids) == 0 { return } - peer := s.peers[idle] + var ( + idle = idlers.ids[0] + peer = s.peers[idle] + cap = idlers.caps[0] + ) + idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:] // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 @@ -976,17 +1009,21 @@ func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan * break } // Generate the network query and send it to the peer - hashes := make([]common.Hash, 0, maxCodeRequestCount) + if cap > maxCodeRequestCount { + cap = maxCodeRequestCount + } + hashes := make([]common.Hash, 0, int(cap)) for hash := range task.codeTasks { delete(task.codeTasks, hash) hashes = append(hashes, hash) - if len(hashes) >= maxCodeRequestCount { + if len(hashes) >= int(cap) { break } } req := &bytecodeRequest{ peer: idle, id: reqid, + time: time.Now(), deliver: success, revert: fail, cancel: cancel, @@ -994,8 +1031,9 @@ func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan * hashes: hashes, task: task, } - req.timeout = time.AfterFunc(requestTimeout, func() { + req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() { peer.Log().Debug("Bytecode request timed out", "reqid", reqid) + s.rates.Update(idle, ByteCodesMsg, 0, 0) s.scheduleRevertBytecodeRequest(req) }) s.bytecodeReqs[reqid] = req @@ -1020,10 +1058,24 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st s.lock.Lock() defer s.lock.Unlock() - // If there are no idle peers, short circuit assignment - if len(s.storageIdlers) == 0 { + // Sort the peers by download capacity to use faster ones if many available + idlers := &capacitySort{ + ids: make([]string, 0, len(s.storageIdlers)), + caps: make([]float64, 0, len(s.storageIdlers)), + } + targetTTL := s.rates.TargetTimeout() + for id := range s.storageIdlers { + if _, ok := s.statelessPeers[id]; ok { + continue + } + idlers.ids = append(idlers.ids, id) + idlers.caps = append(idlers.caps, s.rates.Capacity(id, StorageRangesMsg, targetTTL)) + } + if len(idlers.ids) == 0 { return } + sort.Sort(sort.Reverse(idlers)) + // Iterate over all the tasks and try to find a pending one for _, task := range s.tasks { // Skip any tasks not in the storage retrieval phase @@ -1037,20 +1089,15 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st // Task pending retrieval, try to find an idle peer. If no such peer // exists, we probably assigned tasks for all (or they are stateless). // Abort the entire assignment mechanism. - var idle string - for id := range s.storageIdlers { - // If the peer rejected a query in this sync cycle, don't bother asking - // again for anything, it's either out of sync or already pruned - if _, ok := s.statelessPeers[id]; ok { - continue - } - idle = id - break - } - if idle == "" { + if len(idlers.ids) == 0 { return } - peer := s.peers[idle] + var ( + idle = idlers.ids[0] + peer = s.peers[idle] + cap = idlers.caps[0] + ) + idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:] // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 @@ -1067,9 +1114,17 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st // Generate the network query and send it to the peer. If there are // large contract tasks pending, complete those before diving into // even more new contracts. + if cap > maxRequestSize { + cap = maxRequestSize + } + if cap < minRequestSize { // Don't bother with peers below a bare minimum performance + cap = minRequestSize + } + storageSets := int(cap / 1024) + var ( - accounts = make([]common.Hash, 0, maxStorageSetRequestCount) - roots = make([]common.Hash, 0, maxStorageSetRequestCount) + accounts = make([]common.Hash, 0, storageSets) + roots = make([]common.Hash, 0, storageSets) subtask *storageTask ) for account, subtasks := range task.SubTasks { @@ -1096,7 +1151,7 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st accounts = append(accounts, acccount) roots = append(roots, root) - if len(accounts) >= maxStorageSetRequestCount { + if len(accounts) >= storageSets { break } } @@ -1109,6 +1164,7 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st req := &storageRequest{ peer: idle, id: reqid, + time: time.Now(), deliver: success, revert: fail, cancel: cancel, @@ -1122,8 +1178,9 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st req.origin = subtask.Next req.limit = subtask.Last } - req.timeout = time.AfterFunc(requestTimeout, func() { + req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() { peer.Log().Debug("Storage request timed out", "reqid", reqid) + s.rates.Update(idle, StorageRangesMsg, 0, 0) s.scheduleRevertStorageRequest(req) }) s.storageReqs[reqid] = req @@ -1138,7 +1195,7 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st if subtask != nil { origin, limit = req.origin[:], req.limit[:] } - if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, maxRequestSize); err != nil { + if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, uint64(cap)); err != nil { log.Debug("Failed to request storage", "err", err) s.scheduleRevertStorageRequest(req) } @@ -1157,10 +1214,24 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai s.lock.Lock() defer s.lock.Unlock() - // If there are no idle peers, short circuit assignment - if len(s.trienodeHealIdlers) == 0 { + // Sort the peers by download capacity to use faster ones if many available + idlers := &capacitySort{ + ids: make([]string, 0, len(s.trienodeHealIdlers)), + caps: make([]float64, 0, len(s.trienodeHealIdlers)), + } + targetTTL := s.rates.TargetTimeout() + for id := range s.trienodeHealIdlers { + if _, ok := s.statelessPeers[id]; ok { + continue + } + idlers.ids = append(idlers.ids, id) + idlers.caps = append(idlers.caps, s.rates.Capacity(id, TrieNodesMsg, targetTTL)) + } + if len(idlers.ids) == 0 { return } + sort.Sort(sort.Reverse(idlers)) + // Iterate over pending tasks and try to find a peer to retrieve with for len(s.healer.trieTasks) > 0 || s.healer.scheduler.Pending() > 0 { // If there are not enough trie tasks queued to fully assign, fill the @@ -1186,20 +1257,15 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai // Task pending retrieval, try to find an idle peer. If no such peer // exists, we probably assigned tasks for all (or they are stateless). // Abort the entire assignment mechanism. - var idle string - for id := range s.trienodeHealIdlers { - // If the peer rejected a query in this sync cycle, don't bother asking - // again for anything, it's either out of sync or already pruned - if _, ok := s.statelessPeers[id]; ok { - continue - } - idle = id - break - } - if idle == "" { + if len(idlers.ids) == 0 { return } - peer := s.peers[idle] + var ( + idle = idlers.ids[0] + peer = s.peers[idle] + cap = idlers.caps[0] + ) + idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:] // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 @@ -1214,10 +1280,13 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai break } // Generate the network query and send it to the peer + if cap > maxTrieRequestCount { + cap = maxTrieRequestCount + } var ( - hashes = make([]common.Hash, 0, maxTrieRequestCount) - paths = make([]trie.SyncPath, 0, maxTrieRequestCount) - pathsets = make([]TrieNodePathSet, 0, maxTrieRequestCount) + hashes = make([]common.Hash, 0, int(cap)) + paths = make([]trie.SyncPath, 0, int(cap)) + pathsets = make([]TrieNodePathSet, 0, int(cap)) ) for hash, pathset := range s.healer.trieTasks { delete(s.healer.trieTasks, hash) @@ -1226,13 +1295,14 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai paths = append(paths, pathset) pathsets = append(pathsets, [][]byte(pathset)) // TODO(karalabe): group requests by account hash - if len(hashes) >= maxTrieRequestCount { + if len(hashes) >= int(cap) { break } } req := &trienodeHealRequest{ peer: idle, id: reqid, + time: time.Now(), deliver: success, revert: fail, cancel: cancel, @@ -1241,8 +1311,9 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai paths: paths, task: s.healer, } - req.timeout = time.AfterFunc(requestTimeout, func() { + req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() { peer.Log().Debug("Trienode heal request timed out", "reqid", reqid) + s.rates.Update(idle, TrieNodesMsg, 0, 0) s.scheduleRevertTrienodeHealRequest(req) }) s.trienodeHealReqs[reqid] = req @@ -1267,10 +1338,24 @@ func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fai s.lock.Lock() defer s.lock.Unlock() - // If there are no idle peers, short circuit assignment - if len(s.bytecodeHealIdlers) == 0 { + // Sort the peers by download capacity to use faster ones if many available + idlers := &capacitySort{ + ids: make([]string, 0, len(s.bytecodeHealIdlers)), + caps: make([]float64, 0, len(s.bytecodeHealIdlers)), + } + targetTTL := s.rates.TargetTimeout() + for id := range s.bytecodeHealIdlers { + if _, ok := s.statelessPeers[id]; ok { + continue + } + idlers.ids = append(idlers.ids, id) + idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL)) + } + if len(idlers.ids) == 0 { return } + sort.Sort(sort.Reverse(idlers)) + // Iterate over pending tasks and try to find a peer to retrieve with for len(s.healer.codeTasks) > 0 || s.healer.scheduler.Pending() > 0 { // If there are not enough trie tasks queued to fully assign, fill the @@ -1296,20 +1381,15 @@ func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fai // Task pending retrieval, try to find an idle peer. If no such peer // exists, we probably assigned tasks for all (or they are stateless). // Abort the entire assignment mechanism. - var idle string - for id := range s.bytecodeHealIdlers { - // If the peer rejected a query in this sync cycle, don't bother asking - // again for anything, it's either out of sync or already pruned - if _, ok := s.statelessPeers[id]; ok { - continue - } - idle = id - break - } - if idle == "" { + if len(idlers.ids) == 0 { return } - peer := s.peers[idle] + var ( + idle = idlers.ids[0] + peer = s.peers[idle] + cap = idlers.caps[0] + ) + idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:] // Matched a pending task to an idle peer, allocate a unique request id var reqid uint64 @@ -1324,18 +1404,22 @@ func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fai break } // Generate the network query and send it to the peer - hashes := make([]common.Hash, 0, maxCodeRequestCount) + if cap > maxCodeRequestCount { + cap = maxCodeRequestCount + } + hashes := make([]common.Hash, 0, int(cap)) for hash := range s.healer.codeTasks { delete(s.healer.codeTasks, hash) hashes = append(hashes, hash) - if len(hashes) >= maxCodeRequestCount { + if len(hashes) >= int(cap) { break } } req := &bytecodeHealRequest{ peer: idle, id: reqid, + time: time.Now(), deliver: success, revert: fail, cancel: cancel, @@ -1343,8 +1427,9 @@ func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fai hashes: hashes, task: s.healer, } - req.timeout = time.AfterFunc(requestTimeout, func() { + req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() { peer.Log().Debug("Bytecode heal request timed out", "reqid", reqid) + s.rates.Update(idle, ByteCodesMsg, 0, 0) s.scheduleRevertBytecodeHealRequest(req) }) s.bytecodeHealReqs[reqid] = req @@ -2142,6 +2227,7 @@ func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, acco return nil } delete(s.accountReqs, id) + s.rates.Update(peer.ID(), AccountRangeMsg, time.Since(req.time), int(size)) // Clean up the request timeout timer, we'll see how to proceed further based // on the actual delivered content @@ -2253,6 +2339,7 @@ func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error return nil } delete(s.bytecodeReqs, id) + s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes)) // Clean up the request timeout timer, we'll see how to proceed further based // on the actual delivered content @@ -2361,6 +2448,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo return nil } delete(s.storageReqs, id) + s.rates.Update(peer.ID(), StorageRangesMsg, time.Since(req.time), int(size)) // Clean up the request timeout timer, we'll see how to proceed further based // on the actual delivered content @@ -2487,6 +2575,7 @@ func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error return nil } delete(s.trienodeHealReqs, id) + s.rates.Update(peer.ID(), TrieNodesMsg, time.Since(req.time), len(trienodes)) // Clean up the request timeout timer, we'll see how to proceed further based // on the actual delivered content @@ -2581,6 +2670,7 @@ func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) e return nil } delete(s.bytecodeHealReqs, id) + s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes)) // Clean up the request timeout timer, we'll see how to proceed further based // on the actual delivered content @@ -2756,3 +2846,24 @@ func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) { } return space.Uint64() - uint64(hashes), nil } + +// capacitySort implements the Sort interface, allowing sorting by peer message +// throughput. Note, callers should use sort.Reverse to get the desired effect +// of highest capacity being at the front. +type capacitySort struct { + ids []string + caps []float64 +} + +func (s *capacitySort) Len() int { + return len(s.ids) +} + +func (s *capacitySort) Less(i, j int) bool { + return s.caps[i] < s.caps[j] +} + +func (s *capacitySort) Swap(i, j int) { + s.ids[i], s.ids[j] = s.ids[j], s.ids[i] + s.caps[i], s.caps[j] = s.caps[j], s.caps[i] +} diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index a1cc3581a85c..023fc8ee0058 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -796,12 +796,6 @@ func TestMultiSyncManyUseless(t *testing.T) { // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { - // We're setting the timeout to very low, to increase the chance of the timeout - // being triggered. This was previously a cause of panic, when a response - // arrived simultaneously as a timeout was triggered. - defer func(old time.Duration) { requestTimeout = old }(requestTimeout) - requestTimeout = time.Millisecond - var ( once sync.Once cancel = make(chan struct{}) @@ -838,6 +832,11 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { mkSource("noStorage", true, false, true), mkSource("noTrie", true, true, false), ) + // We're setting the timeout to very low, to increase the chance of the timeout + // being triggered. This was previously a cause of panic, when a response + // arrived simultaneously as a timeout was triggered. + syncer.rates.OverrideTTLLimit = time.Millisecond + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) @@ -848,10 +847,6 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all func TestMultiSyncManyUnresponsive(t *testing.T) { - // We're setting the timeout to very low, to make the test run a bit faster - defer func(old time.Duration) { requestTimeout = old }(requestTimeout) - requestTimeout = time.Millisecond - var ( once sync.Once cancel = make(chan struct{}) @@ -888,6 +883,9 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { mkSource("noStorage", true, false, true), mkSource("noTrie", true, true, false), ) + // We're setting the timeout to very low, to make the test run a bit faster + syncer.rates.OverrideTTLLimit = time.Millisecond + done := checkStall(t, term) if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) diff --git a/p2p/msgrate/msgrate.go b/p2p/msgrate/msgrate.go new file mode 100644 index 000000000000..7cd172c5662e --- /dev/null +++ b/p2p/msgrate/msgrate.go @@ -0,0 +1,458 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package msgrate allows estimating the throughput of peers for more balanced syncs. +package msgrate + +import ( + "errors" + "sort" + "sync" + "time" + + "github.com/ethereum/go-ethereum/log" +) + +// measurementImpact is the impact a single measurement has on a peer's final +// capacity value. A value closer to 0 reacts slower to sudden network changes, +// but it is also more stable against temporary hiccups. 0.1 worked well for +// most of Ethereum's existence, so might as well go with it. +const measurementImpact = 0.1 + +// capacityOverestimation is the ratio of items to over-estimate when retrieving +// a peer's capacity to avoid locking into a lower value due to never attempting +// to fetch more than some local stable value. +const capacityOverestimation = 1.01 + +// qosTuningPeers is the number of best peers to tune round trip times based on. +// An Ethereum node doesn't need hundreds of connections to operate correctly, +// so instead of lowering our download speed to the median of potentially many +// bad nodes, we can target a smaller set of vey good nodes. At worse this will +// result in less nodes to sync from, but that's still better than some hogging +// the pipeline. +const qosTuningPeers = 5 + +// rttMinEstimate is the minimal round trip time to target requests for. Since +// every request entails a 2 way latency + bandwidth + serving database lookups, +// it should be generous enough to permit meaningful work to be done on top of +// the transmission costs. +const rttMinEstimate = 2 * time.Second + +// rttMaxEstimate is the maximal round trip time to target requests for. Although +// the expectation is that a well connected node will never reach this, certain +// special connectivity ones might experience significant delays (e.g. satellite +// uplink with 3s RTT). This value should be low enough to forbid stalling the +// pipeline too long, but large enough to cover the worst of the worst links. +const rttMaxEstimate = 20 * time.Second + +// rttPushdownFactor is a multiplier to attempt forcing quicker requests than +// what the message rate tracker estimates. The reason is that message rate +// tracking adapts queries to the RTT, but multiple RTT values can be perfectly +// valid, they just result in higher packet sizes. Since smaller packets almost +// always result in stabler download streams, this factor hones in on the lowest +// RTT from all the functional ones. +const rttPushdownFactor = 0.9 + +// rttMinConfidence is the minimum value the roundtrip confidence factor may drop +// to. Since the target timeouts are based on how confident the tracker is in the +// true roundtrip, it's important to not allow too huge fluctuations. +const rttMinConfidence = 0.1 + +// ttlScaling is the multiplier that converts the estimated roundtrip time to a +// timeout cap for network requests. The expectation is that peers' response time +// will fluctuate around the estimated roundtrip, but depending in their load at +// request time, it might be higher than anticipated. This scaling factor ensures +// that we allow remote connections some slack but at the same time do enforce a +// behavior similar to our median peers. +const ttlScaling = 3 + +// ttlLimit is the maximum timeout allowance to prevent reaching crazy numbers +// if some unforeseen network events shappen. As much as we try to hone in on +// the most optimal values, it doesn't make any sense to go above a threshold, +// even if everything is slow and screwy. +const ttlLimit = time.Minute + +// tuningConfidenceCap is the number of active peers above which to stop detuning +// the confidence number. The idea here is that once we hone in on the capacity +// of a meaningful number of peers, adding one more should ot have a significant +// impact on things, so just ron with the originals. +const tuningConfidenceCap = 10 + +// tuningImpact is the influence that a new tuning target has on the previously +// cached value. This number is mostly just an out-of-the-blue heuristic that +// prevents the estimates from jumping around. There's no particular reason for +// the current value. +const tuningImpact = 0.25 + +// Tracker estimates the throughput capacity of a peer with regard to each data +// type it can deliver. The goal is to dynamically adjust request sizes to max +// out network throughput without overloading either the peer or th elocal node. +// +// By tracking in real time the latencies and bandiwdths peers exhibit for each +// packet type, it's possible to prevent overloading by detecting a slowdown on +// one type when another type is pushed too hard. +// +// Similarly, real time measurements also help avoid overloading the local net +// connection if our peers would otherwise be capable to deliver more, but the +// local link is saturated. In that case, the live measurements will force us +// to reduce request sizes until the throughput gets stable. +// +// Lastly, message rate measurements allows us to detect if a peer is unsuaully +// slow compared to other peers, in which case we can decide to keep it around +// or free up the slot so someone closer. +// +// Since throughput tracking and estimation adapts dynamically to live network +// conditions, it's fine to have multiple trackers locally track the same peer +// in different subsystem. The throughput will simply be distributed across the +// two trackers if both are highly active. +type Tracker struct { + // capacity is the number of items retrievable per second of a given type. + // It is analogous to bandwidth, but we deliberately avoided using bytes + // as the unit, since serving nodes also spend a lot of time loading data + // from disk, which is linear in the number of items, but mostly constant + // in their sizes. + // + // Callers of course are free to use the item counter as a byte counter if + // or when their protocol of choise if capped by bytes instead of items. + // (eg. eth.getHeaders vs snap.getAccountRange). + capacity map[uint64]float64 + + // roundtrip is the latency a peer in general responds to data requests. + // This number is not used inside the tracker, but is exposed to compare + // peers to each other and filter out slow ones. Note however, it only + // makes sense to compare RTTs if the caller caters request sizes for + // each peer to target the same RTT. There's no need to make this number + // the real networking RTT, we just need a number to compare peers with. + roundtrip time.Duration + + lock sync.RWMutex +} + +// NewTracker creates a new message rate tracker for a specific peer. An initial +// RTT is needed to avoid a peer getting marked as an outlier compared to others +// right after joining. It's suggested to use the median rtt across all peers to +// init a new peer tracker. +func NewTracker(caps map[uint64]float64, rtt time.Duration) *Tracker { + if caps == nil { + caps = make(map[uint64]float64) + } + return &Tracker{ + capacity: caps, + roundtrip: rtt, + } +} + +// Capacity calculates the number of items the peer is estimated to be able to +// retrieve within the alloted time slot. The method will round up any division +// errors and will add an additional overestimation ratio on top. The reason for +// overshooting the capacity is because certain message types might not increase +// the load proportionally to the requested items, so fetching a bit more might +// still take the same RTT. By forcefully overshooting by a small amount, we can +// avoid locking into a lower-that-real capacity. +func (t *Tracker) Capacity(kind uint64, targetRTT time.Duration) float64 { + t.lock.RLock() + defer t.lock.RUnlock() + + // Calculate the actual measured throughput + throughput := t.capacity[kind] * float64(targetRTT) / float64(time.Second) + + // Return an overestimation to force the peer out of a stuck minima, adding + // +1 in case the item count is too low for the overestimator to dent + return 1 + capacityOverestimation*throughput +} + +// Update modifies the peer's capacity values for a specific data type with a new +// measurement. If the delivery is zero, the peer is assumed to have either timed +// out or to not have the requested data, resulting in a slash to 0 capacity. This +// avoids assigning the peer retrievals that it won't be able to honour. +func (t *Tracker) Update(kind uint64, elapsed time.Duration, items int) { + t.lock.Lock() + defer t.lock.Unlock() + + // If nothing was delivered (timeout / unavailable data), reduce throughput + // to minimum + if items == 0 { + t.capacity[kind] = 0 + return + } + // Otherwise update the throughput with a new measurement + if elapsed <= 0 { + elapsed = 1 // +1 (ns) to ensure non-zero divisor + } + measured := float64(items) / (float64(elapsed) / float64(time.Second)) + + t.capacity[kind] = (1-measurementImpact)*(t.capacity[kind]) + measurementImpact*measured + t.roundtrip = time.Duration((1-measurementImpact)*float64(t.roundtrip) + measurementImpact*float64(elapsed)) +} + +// Trackers is a set of message rate trackers across a number of peers with the +// goal of aggregating certain measurements across the entire set for outlier +// filtering and newly joining initialization. +type Trackers struct { + trackers map[string]*Tracker + + // roundtrip is the current best guess as to what is a stable round trip time + // across the entire collection of connected peers. This is derived from the + // various trackers added, but is used as a cache to avoid recomputing on each + // network request. The value is updated once every RTT to avoid fluctuations + // caused by hiccups or peer events. + roundtrip time.Duration + + // confidence represents the probability that the estimated roundtrip value + // is the real one across all our peers. The confidence value is used as an + // impact factor of new measurements on old estimates. As our connectivity + // stabilizes, this value gravitates towards 1, new measurements havinng + // almost no impact. If there's a large peer churn and few peers, then new + // measurements will impact it more. The confidence is increased with every + // packet and dropped with every new connection. + confidence float64 + + // tuned is the time instance the tracker recalculated its cached roundtrip + // value and confidence values. A cleaner way would be to have a heartbeat + // goroutine do it regularly, but that requires a lot of maintenance to just + // run every now and again. + tuned time.Time + + // The fields below can be used to override certain default values. Their + // purpose is to allow quicker tests. Don't use them in production. + OverrideTTLLimit time.Duration + + log log.Logger + lock sync.RWMutex +} + +// NewTrackers creates an empty set of trackers to be filled with peers. +func NewTrackers(log log.Logger) *Trackers { + return &Trackers{ + trackers: make(map[string]*Tracker), + roundtrip: rttMaxEstimate, + confidence: 1, + tuned: time.Now(), + OverrideTTLLimit: ttlLimit, + log: log, + } +} + +// Track inserts a new tracker into the set. +func (t *Trackers) Track(id string, tracker *Tracker) error { + t.lock.Lock() + defer t.lock.Unlock() + + if _, ok := t.trackers[id]; ok { + return errors.New("already tracking") + } + t.trackers[id] = tracker + t.detune() + + return nil +} + +// Untrack stops tracking a previously added peer. +func (t *Trackers) Untrack(id string) error { + t.lock.Lock() + defer t.lock.Unlock() + + if _, ok := t.trackers[id]; !ok { + return errors.New("not tracking") + } + delete(t.trackers, id) + return nil +} + +// MedianRoundTrip returns the median RTT across all known trackers. The purpose +// of the median RTT is to initialize a new peer with sane statistics that it will +// hopefully outperform. If it seriously underperforms, there's a risk of dropping +// the peer, but that is ok as we're aiming for a strong median. +func (t *Trackers) MedianRoundTrip() time.Duration { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.medianRoundTrip() +} + +// medianRoundTrip is the internal lockless version of MedianRoundTrip to be used +// by the QoS tuner. +func (t *Trackers) medianRoundTrip() time.Duration { + // Gather all the currently measured round trip times + rtts := make([]float64, 0, len(t.trackers)) + for _, tt := range t.trackers { + tt.lock.RLock() + rtts = append(rtts, float64(tt.roundtrip)) + tt.lock.RUnlock() + } + sort.Float64s(rtts) + + median := rttMaxEstimate + if qosTuningPeers <= len(rtts) { + median = time.Duration(rtts[qosTuningPeers/2]) // Median of our best few peers + } else if len(rtts) > 0 { + median = time.Duration(rtts[len(rtts)/2]) // Median of all out connected peers + } + // Restrict the RTT into some QoS defaults, irrelevant of true RTT + if median < rttMinEstimate { + median = rttMinEstimate + } + if median > rttMaxEstimate { + median = rttMaxEstimate + } + return median +} + +// MeanCapacities returns the capacities averaged across all the added trackers. +// The purpos of the mean capacities are to initialize a new peer with some sane +// starting values that it will hopefully outperform. If the mean overshoots, the +// peer will be cut back to minimal capacity and given another chance. +func (t *Trackers) MeanCapacities() map[uint64]float64 { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.meanCapacities() +} + +// meanCapacities is the internal lockless version of MeanCapacities used for +// debug logging. +func (t *Trackers) meanCapacities() map[uint64]float64 { + capacities := make(map[uint64]float64) + for _, tt := range t.trackers { + tt.lock.RLock() + for key, val := range tt.capacity { + capacities[key] += val + } + tt.lock.RUnlock() + } + for key, val := range capacities { + capacities[key] = val / float64(len(t.trackers)) + } + return capacities +} + +// TargetRoundTrip returns the current target round trip time for a request to +// complete in.The returned RTT is slightly under the estimated RTT. The reason +// is that message rate estimation is a 2 dimensional problem which is solvable +// for any RTT. The goal is to gravitate towards smaller RTTs instead of large +// messages, to result in a stabler download stream. +func (t *Trackers) TargetRoundTrip() time.Duration { + // Recalculate the internal caches if it's been a while + t.tune() + + // Caches surely recent, return target roundtrip + t.lock.RLock() + defer t.lock.RUnlock() + + return time.Duration(float64(t.roundtrip) * rttPushdownFactor) +} + +// TargetTimeout returns the timeout allowance for a single request to finish +// under. The timeout is proportional to the roundtrip, but also takes into +// consideration the tracker's confidence in said roundtrip and scales it +// accordingly. The final value is capped to avoid runaway requests. +func (t *Trackers) TargetTimeout() time.Duration { + // Recalculate the internal caches if it's been a while + t.tune() + + // Caches surely recent, return target timeout + t.lock.RLock() + defer t.lock.RUnlock() + + return t.targetTimeout() +} + +// targetTimeout is the internal lockless version of TargetTimeout to be used +// during QoS tuning. +func (t *Trackers) targetTimeout() time.Duration { + timeout := time.Duration(ttlScaling * float64(t.roundtrip) / t.confidence) + if timeout > t.OverrideTTLLimit { + timeout = t.OverrideTTLLimit + } + return timeout +} + +// tune gathers the individual tracker statistics and updates the estimated +// request round trip time. +func (t *Trackers) tune() { + // Tune may be called concurrently all over the place, but we only want to + // periodically update and even then only once. First check if it was updated + // recently and abort if so. + t.lock.RLock() + dirty := time.Since(t.tuned) > t.roundtrip + t.lock.RUnlock() + if !dirty { + return + } + // If an update is needed, obtain a write lock but make sure we don't update + // it on all concurrent threads one by one. + t.lock.Lock() + defer t.lock.Unlock() + + if dirty := time.Since(t.tuned) > t.roundtrip; !dirty { + return // A concurrent request beat us to the tuning + } + // First thread reaching the tuning point, update the estimates and return + t.roundtrip = time.Duration((1-tuningImpact)*float64(t.roundtrip) + tuningImpact*float64(t.medianRoundTrip())) + t.confidence = t.confidence + (1-t.confidence)/2 + + t.tuned = time.Now() + t.log.Debug("Recalculated msgrate QoS values", "rtt", t.roundtrip, "confidence", t.confidence, "ttl", t.targetTimeout(), "next", t.tuned.Add(t.roundtrip)) + t.log.Trace("Debug dump of mean capacities", "caps", log.Lazy{Fn: t.meanCapacities}) +} + +// detune reduces the tracker's confidence in order to make fresh measurements +// have a larger impact on the estimates. It is meant to be used during new peer +// connections so they can have a proper impact on the estimates. +func (t *Trackers) detune() { + // If we have a single peer, confidence is always 1 + if len(t.trackers) == 1 { + t.confidence = 1 + return + } + // If we have a ton of peers, don't drop the confidence since there's enough + // remaining to retain the same throughput + if len(t.trackers) >= tuningConfidenceCap { + return + } + // Otherwise drop the confidence factor + peers := float64(len(t.trackers)) + + t.confidence = t.confidence * (peers - 1) / peers + if t.confidence < rttMinConfidence { + t.confidence = rttMinConfidence + } + t.log.Debug("Relaxed msgrate QoS values", "rtt", t.roundtrip, "confidence", t.confidence, "ttl", t.targetTimeout()) +} + +// Capacity is a helper function to access a specific tracker without having to +// track it explicitly outside. +func (t *Trackers) Capacity(id string, kind uint64, targetRTT time.Duration) float64 { + t.lock.RLock() + defer t.lock.RUnlock() + + tracker := t.trackers[id] + if tracker == nil { + return 1 // Unregister race, don't return 0, it's a dangerous number + } + return tracker.Capacity(kind, targetRTT) +} + +// Update is a helper function to access a specific tracker without having to +// track it explicitly outside. +func (t *Trackers) Update(id string, kind uint64, elapsed time.Duration, items int) { + t.lock.RLock() + defer t.lock.RUnlock() + + if tracker := t.trackers[id]; tracker != nil { + tracker.Update(kind, elapsed, items) + } +} From 16bc57438bd3d28e947d12f6f295da62e4ca9e26 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 20 May 2021 09:24:41 +0200 Subject: [PATCH 039/208] p2p/dnsdisc: fix crash when iterator closed before first call to Next (#22906) --- p2p/dnsdisc/client.go | 6 ++++++ p2p/dnsdisc/client_test.go | 15 +++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/p2p/dnsdisc/client.go b/p2p/dnsdisc/client.go index f2a4bed4c6e4..096df06a54f9 100644 --- a/p2p/dnsdisc/client.go +++ b/p2p/dnsdisc/client.go @@ -298,6 +298,12 @@ func (it *randomIterator) pickTree() *clientTree { it.mu.Lock() defer it.mu.Unlock() + // First check if iterator was closed. + // Need to do this here to avoid nil map access in rebuildTrees. + if it.trees == nil { + return nil + } + // Rebuild the trees map if any links have changed. if it.lc.changed { it.rebuildTrees() diff --git a/p2p/dnsdisc/client_test.go b/p2p/dnsdisc/client_test.go index 741bee4230be..9320dd667a94 100644 --- a/p2p/dnsdisc/client_test.go +++ b/p2p/dnsdisc/client_test.go @@ -115,6 +115,21 @@ func TestIterator(t *testing.T) { checkIterator(t, it, nodes) } +func TestIteratorCloseWithoutNext(t *testing.T) { + tree1, url1 := makeTestTree("t1", nil, nil) + c := NewClient(Config{Resolver: newMapResolver(tree1.ToTXT("t1"))}) + it, err := c.NewIterator(url1) + if err != nil { + t.Fatal(err) + } + + it.Close() + ok := it.Next() + if ok { + t.Fatal("Next returned true after Close") + } +} + // This test checks if closing randomIterator races. func TestIteratorClose(t *testing.T) { nodes := testNodes(nodesSeed1, 500) From a6c462781f2ebac39b8bbcbbfeb01a6e70b46997 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Fri, 21 May 2021 09:59:26 +0200 Subject: [PATCH 040/208] EIP-1559: miner changes (#22896) * core/types, miner: create TxWithMinerFee wrapper, add EIP-1559 support to TransactionsByMinerFeeAndNonce miner: set base fee when creating a new header, handle gas limit, log miner fees * all: rename to NewTransactionsByPriceAndNonce * core/types, miner: rename to NewTransactionsByPriceAndNonce + EffectiveTip miner: activate 1559 for testGenerateBlockAndImport tests * core,miner: revert naming to TransactionsByPriceAndTime * core/types/transaction: update effective tip calculation logic * miner: update aleut to london * core/types/transaction_test: use correct signer for 1559 txs + add back sender check * miner/worker: calculate gas target from gas limit * core, miner: fix block gas limits for 1559 Co-authored-by: Ansgar Dietrichs Co-authored-by: lightclient@protonmail.com --- core/bench_test.go | 2 +- core/block_validator.go | 36 ++++++++++++--- core/block_validator_test.go | 33 ++++++++++++++ core/chain_makers.go | 2 +- core/state_processor_test.go | 2 +- core/types/transaction.go | 80 +++++++++++++++++++++++++--------- core/types/transaction_test.go | 63 ++++++++++++++++++++++---- eth/catalyst/api.go | 2 +- miner/worker.go | 28 ++++++++---- miner/worker_test.go | 6 ++- 10 files changed, 206 insertions(+), 48 deletions(-) diff --git a/core/bench_test.go b/core/bench_test.go index 0c49907e6433..ce288d372e08 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -112,7 +112,7 @@ func genTxRing(naccounts int) func(int, *BlockGen) { from := 0 return func(i int, gen *BlockGen) { block := gen.PrevBlock(i - 1) - gas := CalcGasLimit(block, block.GasLimit(), block.GasLimit()) + gas := block.GasLimit() for { gas -= params.TxGas if gas < params.TxGas { diff --git a/core/block_validator.go b/core/block_validator.go index 8dbd0f75520e..d317d82ed48d 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -106,12 +106,12 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD // to keep the baseline gas above the provided floor, and increase it towards the // ceil if the blocks are full. If the ceil is exceeded, it will always decrease // the gas allowance. -func CalcGasLimit(parent *types.Block, gasFloor, gasCeil uint64) uint64 { +func CalcGasLimit(parentGasUsed, parentGasLimit, gasFloor, gasCeil uint64) uint64 { // contrib = (parentGasUsed * 3 / 2) / 1024 - contrib := (parent.GasUsed() + parent.GasUsed()/2) / params.GasLimitBoundDivisor + contrib := (parentGasUsed + parentGasUsed/2) / params.GasLimitBoundDivisor // decay = parentGasLimit / 1024 -1 - decay := parent.GasLimit()/params.GasLimitBoundDivisor - 1 + decay := parentGasLimit/params.GasLimitBoundDivisor - 1 /* strategy: gasLimit of block-to-mine is set based on parent's @@ -120,21 +120,45 @@ func CalcGasLimit(parent *types.Block, gasFloor, gasCeil uint64) uint64 { at that usage) the amount increased/decreased depends on how far away from parentGasLimit * (2/3) parentGasUsed is. */ - limit := parent.GasLimit() - decay + contrib + limit := parentGasLimit - decay + contrib if limit < params.MinGasLimit { limit = params.MinGasLimit } // If we're outside our allowed gas range, we try to hone towards them if limit < gasFloor { - limit = parent.GasLimit() + decay + limit = parentGasLimit + decay if limit > gasFloor { limit = gasFloor } } else if limit > gasCeil { - limit = parent.GasLimit() - decay + limit = parentGasLimit - decay if limit < gasCeil { limit = gasCeil } } return limit } + +// CalcGasLimit1559 calculates the next block gas limit under 1559 rules. +func CalcGasLimit1559(parentGasLimit, desiredLimit uint64) uint64 { + delta := parentGasLimit/params.GasLimitBoundDivisor - 1 + limit := parentGasLimit + if desiredLimit < params.MinGasLimit { + desiredLimit = params.MinGasLimit + } + // If we're outside our allowed gas range, we try to hone towards them + if limit < desiredLimit { + limit = parentGasLimit + delta + if limit > desiredLimit { + limit = desiredLimit + } + return limit + } + if limit > desiredLimit { + limit = parentGasLimit - delta + if limit < desiredLimit { + limit = desiredLimit + } + } + return limit +} diff --git a/core/block_validator_test.go b/core/block_validator_test.go index dfb37b88cf8a..3b4de337891c 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -197,3 +197,36 @@ func testHeaderConcurrentAbortion(t *testing.T, threads int) { t.Errorf("verification count too large: have %d, want below %d", verified, 2*threads) } } + +func TestCalcGasLimit1559(t *testing.T) { + + for i, tc := range []struct { + pGasLimit uint64 + max uint64 + min uint64 + }{ + {20000000, 20019530, 19980470}, + {40000000, 40039061, 39960939}, + } { + // Increase + if have, want := CalcGasLimit1559(tc.pGasLimit, 2*tc.pGasLimit), tc.max; have != want { + t.Errorf("test %d: have %d want <%d", i, have, want) + } + // Decrease + if have, want := CalcGasLimit1559(tc.pGasLimit, 0), tc.min; have != want { + t.Errorf("test %d: have %d want >%d", i, have, want) + } + // Small decrease + if have, want := CalcGasLimit1559(tc.pGasLimit, tc.pGasLimit-1), tc.pGasLimit-1; have != want { + t.Errorf("test %d: have %d want %d", i, have, want) + } + // Small increase + if have, want := CalcGasLimit1559(tc.pGasLimit, tc.pGasLimit+1), tc.pGasLimit+1; have != want { + t.Errorf("test %d: have %d want %d", i, have, want) + } + // No change + if have, want := CalcGasLimit1559(tc.pGasLimit, tc.pGasLimit), tc.pGasLimit; have != want { + t.Errorf("test %d: have %d want %d", i, have, want) + } + } +} diff --git a/core/chain_makers.go b/core/chain_makers.go index b1b7dc3591df..f7353ffce3f8 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -263,7 +263,7 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.S Difficulty: parent.Difficulty(), UncleHash: parent.UncleHash(), }), - GasLimit: CalcGasLimit(parent, parent.GasLimit(), parent.GasLimit()), + GasLimit: parent.GasLimit(), Number: new(big.Int).Add(parent.Number(), common.Big1), Time: time, } diff --git a/core/state_processor_test.go b/core/state_processor_test.go index c15d3d276dbe..ab446eb0a209 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -223,7 +223,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr Difficulty: parent.Difficulty(), UncleHash: parent.UncleHash(), }), - GasLimit: CalcGasLimit(parent, parent.GasLimit(), parent.GasLimit()), + GasLimit: parent.GasLimit(), Number: new(big.Int).Add(parent.Number(), common.Big1), Time: parent.Time() + 10, UncleHash: types.EmptyUncleHash, diff --git a/core/types/transaction.go b/core/types/transaction.go index ace1843e932b..5347fdab8cd9 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -36,6 +36,7 @@ var ( ErrUnexpectedProtection = errors.New("transaction type does not supported EIP-155 protected signatures") ErrInvalidTxType = errors.New("transaction type not valid in this context") ErrTxTypeNotSupported = errors.New("transaction type not supported") + ErrFeeCapTooLow = errors.New("fee cap less than base fee") errEmptyTypedTx = errors.New("empty typed transaction bytes") ) @@ -299,6 +300,19 @@ func (tx *Transaction) Cost() *big.Int { return total } +// EffectiveTip returns the effective miner tip for the given base fee. +// Returns error in case of a negative effective miner tip. +func (tx *Transaction) EffectiveTip(baseFee *big.Int) (*big.Int, error) { + if baseFee == nil { + return tx.Tip(), nil + } + feeCap := tx.FeeCap() + if feeCap.Cmp(baseFee) == -1 { + return nil, ErrFeeCapTooLow + } + return math.BigMin(tx.Tip(), feeCap.Sub(feeCap, baseFee)), nil +} + // RawSignatureValues returns the V, R, S signature values of the transaction. // The return values should not be modified by the caller. func (tx *Transaction) RawSignatureValues() (v, r, s *big.Int) { @@ -400,24 +414,44 @@ func (s TxByNonce) Len() int { return len(s) } func (s TxByNonce) Less(i, j int) bool { return s[i].Nonce() < s[j].Nonce() } func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +// TxWithMinerFee wraps a transaction with its gas price or effective miner tip +type TxWithMinerFee struct { + tx *Transaction + minerFee *big.Int +} + +// NewTxWithMinerFee creates a wrapped transaction, calculating the effective +// miner tip if a base fee is provided. +// Returns error in case of a negative effective miner tip. +func NewTxWithMinerFee(tx *Transaction, baseFee *big.Int) (*TxWithMinerFee, error) { + minerFee, err := tx.EffectiveTip(baseFee) + if err != nil { + return nil, err + } + return &TxWithMinerFee{ + tx: tx, + minerFee: minerFee, + }, nil +} + // TxByPriceAndTime implements both the sort and the heap interface, making it useful // for all at once sorting as well as individually adding and removing elements. -type TxByPriceAndTime Transactions +type TxByPriceAndTime []*TxWithMinerFee func (s TxByPriceAndTime) Len() int { return len(s) } func (s TxByPriceAndTime) Less(i, j int) bool { // If the prices are equal, use the time the transaction was first seen for // deterministic sorting - cmp := s[i].GasPrice().Cmp(s[j].GasPrice()) + cmp := s[i].minerFee.Cmp(s[j].minerFee) if cmp == 0 { - return s[i].time.Before(s[j].time) + return s[i].tx.time.Before(s[j].tx.time) } return cmp > 0 } func (s TxByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s *TxByPriceAndTime) Push(x interface{}) { - *s = append(*s, x.(*Transaction)) + *s = append(*s, x.(*TxWithMinerFee)) } func (s *TxByPriceAndTime) Pop() interface{} { @@ -432,9 +466,10 @@ func (s *TxByPriceAndTime) Pop() interface{} { // transactions in a profit-maximizing sorted order, while supporting removing // entire batches of transactions for non-executable accounts. type TransactionsByPriceAndNonce struct { - txs map[common.Address]Transactions // Per account nonce-sorted list of transactions - heads TxByPriceAndTime // Next transaction for each unique account (price heap) - signer Signer // Signer for the set of transactions + txs map[common.Address]Transactions // Per account nonce-sorted list of transactions + heads TxByPriceAndTime // Next transaction for each unique account (price heap) + signer Signer // Signer for the set of transactions + baseFee *big.Int // Current base fee } // NewTransactionsByPriceAndNonce creates a transaction set that can retrieve @@ -442,25 +477,28 @@ type TransactionsByPriceAndNonce struct { // // Note, the input map is reowned so the caller should not interact any more with // if after providing it to the constructor. -func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions) *TransactionsByPriceAndNonce { +func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *big.Int) *TransactionsByPriceAndNonce { // Initialize a price and received time based heap with the head transactions heads := make(TxByPriceAndTime, 0, len(txs)) for from, accTxs := range txs { - // Ensure the sender address is from the signer - if acc, _ := Sender(signer, accTxs[0]); acc != from { + acc, _ := Sender(signer, accTxs[0]) + wrapped, err := NewTxWithMinerFee(accTxs[0], baseFee) + // Remove transaction if sender doesn't match from, or if wrapping fails. + if acc != from || err != nil { delete(txs, from) continue } - heads = append(heads, accTxs[0]) + heads = append(heads, wrapped) txs[from] = accTxs[1:] } heap.Init(&heads) // Assemble and return the transaction set return &TransactionsByPriceAndNonce{ - txs: txs, - heads: heads, - signer: signer, + txs: txs, + heads: heads, + signer: signer, + baseFee: baseFee, } } @@ -469,18 +507,20 @@ func (t *TransactionsByPriceAndNonce) Peek() *Transaction { if len(t.heads) == 0 { return nil } - return t.heads[0] + return t.heads[0].tx } // Shift replaces the current best head with the next one from the same account. func (t *TransactionsByPriceAndNonce) Shift() { - acc, _ := Sender(t.signer, t.heads[0]) + acc, _ := Sender(t.signer, t.heads[0].tx) if txs, ok := t.txs[acc]; ok && len(txs) > 0 { - t.heads[0], t.txs[acc] = txs[0], txs[1:] - heap.Fix(&t.heads, 0) - } else { - heap.Pop(&t.heads) + if wrapped, err := NewTxWithMinerFee(txs[0], t.baseFee); err == nil { + t.heads[0], t.txs[acc] = wrapped, txs[1:] + heap.Fix(&t.heads, 0) + return + } } + heap.Pop(&t.heads) } // Pop removes the best transaction, *not* replacing it with the next one from diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index 3cece9c2358d..7c30834c025b 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -22,6 +22,7 @@ import ( "encoding/json" "fmt" "math/big" + "math/rand" "reflect" "testing" "time" @@ -258,36 +259,77 @@ func TestRecipientNormal(t *testing.T) { } } +func TestTransactionPriceNonceSortLegacy(t *testing.T) { + testTransactionPriceNonceSort(t, nil) +} + +func TestTransactionPriceNonceSort1559(t *testing.T) { + testTransactionPriceNonceSort(t, big.NewInt(0)) + testTransactionPriceNonceSort(t, big.NewInt(5)) + testTransactionPriceNonceSort(t, big.NewInt(50)) +} + // Tests that transactions can be correctly sorted according to their price in // decreasing order, but at the same time with increasing nonces when issued by // the same account. -func TestTransactionPriceNonceSort(t *testing.T) { +func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { // Generate a batch of accounts to start with keys := make([]*ecdsa.PrivateKey, 25) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() } - signer := HomesteadSigner{} + signer := LatestSignerForChainID(common.Big1) // Generate a batch of transactions with overlapping values, but shifted nonces groups := map[common.Address]Transactions{} + expectedCount := 0 for start, key := range keys { addr := crypto.PubkeyToAddress(key.PublicKey) + count := 25 for i := 0; i < 25; i++ { - tx, _ := SignTx(NewTransaction(uint64(start+i), common.Address{}, big.NewInt(100), 100, big.NewInt(int64(start+i)), nil), signer, key) + var tx *Transaction + feeCap := rand.Intn(50) + if baseFee == nil { + tx = NewTx(&LegacyTx{ + Nonce: uint64(start + i), + To: &common.Address{}, + Value: big.NewInt(100), + Gas: 100, + GasPrice: big.NewInt(int64(feeCap)), + Data: nil, + }) + } else { + tx = NewTx(&DynamicFeeTx{ + Nonce: uint64(start + i), + To: &common.Address{}, + Value: big.NewInt(100), + Gas: 100, + FeeCap: big.NewInt(int64(feeCap)), + Tip: big.NewInt(int64(rand.Intn(feeCap + 1))), + Data: nil, + }) + if count == 25 && int64(feeCap) < baseFee.Int64() { + count = i + } + } + tx, err := SignTx(tx, signer, key) + if err != nil { + t.Fatalf("failed to sign tx: %s", err) + } groups[addr] = append(groups[addr], tx) } + expectedCount += count } // Sort the transactions and cross check the nonce ordering - txset := NewTransactionsByPriceAndNonce(signer, groups) + txset := NewTransactionsByPriceAndNonce(signer, groups, baseFee) txs := Transactions{} for tx := txset.Peek(); tx != nil; tx = txset.Peek() { txs = append(txs, tx) txset.Shift() } - if len(txs) != 25*25 { - t.Errorf("expected %d transactions, found %d", 25*25, len(txs)) + if len(txs) != expectedCount { + t.Errorf("expected %d transactions, found %d", expectedCount, len(txs)) } for i, txi := range txs { fromi, _ := Sender(signer, txi) @@ -303,7 +345,12 @@ func TestTransactionPriceNonceSort(t *testing.T) { if i+1 < len(txs) { next := txs[i+1] fromNext, _ := Sender(signer, next) - if fromi != fromNext && txi.GasPrice().Cmp(next.GasPrice()) < 0 { + tip, err := txi.EffectiveTip(baseFee) + nextTip, nextErr := next.EffectiveTip(baseFee) + if err != nil || nextErr != nil { + t.Errorf("error calculating effective tip") + } + if fromi != fromNext && tip.Cmp(nextTip) < 0 { t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice()) } } @@ -331,7 +378,7 @@ func TestTransactionTimeSort(t *testing.T) { groups[addr] = append(groups[addr], tx) } // Sort the transactions and cross check the nonce ordering - txset := NewTransactionsByPriceAndNonce(signer, groups) + txset := NewTransactionsByPriceAndNonce(signer, groups, nil) txs := Transactions{} for tx := txset.Peek(); tx != nil; tx = txset.Peek() { diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index d7e2af1c1a5d..d577e2a9ec08 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -155,7 +155,7 @@ func (api *consensusAPI) AssembleBlock(params assembleBlockParams) (*executableD var ( signer = types.MakeSigner(bc.Config(), header.Number) - txHeap = types.NewTransactionsByPriceAndNonce(signer, pending) + txHeap = types.NewTransactionsByPriceAndNonce(signer, pending, nil) transactions []*types.Transaction ) for { diff --git a/miner/worker.go b/miner/worker.go index 2cee6af0c326..f9aae0fdc9ba 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -499,7 +499,7 @@ func (w *worker) mainLoop() { acc, _ := types.Sender(w.current.signer, tx) txs[acc] = append(txs[acc], tx) } - txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs) + txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee) tcount := w.current.tcount w.commitTransactions(txset, coinbase, nil) // Only update the snapshot if any new transactons were added @@ -753,8 +753,9 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin return true } + gasLimit := w.current.header.GasLimit if w.current.gasPool == nil { - w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit) + w.current.gasPool = new(core.GasPool).AddGas(gasLimit) } var coalescedLogs []*types.Log @@ -769,7 +770,7 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { // Notify resubmit loop to increase resubmitting interval due to too frequent commits. if atomic.LoadInt32(interrupt) == commitInterruptResubmit { - ratio := float64(w.current.header.GasLimit-w.current.gasPool.Gas()) / float64(w.current.header.GasLimit) + ratio := float64(gasLimit-w.current.gasPool.Gas()) / float64(gasLimit) if ratio < 0.1 { ratio = 0.1 } @@ -880,10 +881,20 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) header := &types.Header{ ParentHash: parent.Hash(), Number: num.Add(num, common.Big1), - GasLimit: core.CalcGasLimit(parent, w.config.GasFloor, w.config.GasCeil), + GasLimit: core.CalcGasLimit(parent.GasUsed(), parent.GasLimit(), w.config.GasFloor, w.config.GasCeil), Extra: w.extra, Time: uint64(timestamp), } + // Set baseFee and GasLimit if we are on an EIP-1559 chain + if w.chainConfig.IsLondon(header.Number) { + header.BaseFee = misc.CalcBaseFee(w.chainConfig, parent.Header()) + parentGasLimit := parent.GasLimit() + if !w.chainConfig.IsLondon(parent.Number()) { + // Bump by 2x + parentGasLimit = parent.GasLimit() * params.ElasticityMultiplier + } + header.GasLimit = core.CalcGasLimit1559(parentGasLimit, w.config.GasCeil) + } // Only set the coinbase if our consensus engine is running (avoid spurious block rewards) if w.isRunning() { if w.coinbase == (common.Address{}) { @@ -973,13 +984,13 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) } } if len(localTxs) > 0 { - txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs) + txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs, header.BaseFee) if w.commitTransactions(txs, w.coinbase, interrupt) { return } } if len(remoteTxs) > 0 { - txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs) + txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs, header.BaseFee) if w.commitTransactions(txs, w.coinbase, interrupt) { return } @@ -1037,11 +1048,12 @@ func (w *worker) postSideBlock(event core.ChainSideEvent) { } } -// totalFees computes total consumed fees in ETH. Block transactions and receipts have to have the same order. +// totalFees computes total consumed miner fees in ETH. Block transactions and receipts have to have the same order. func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { feesWei := new(big.Int) for i, tx := range block.Transactions() { - feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), tx.GasPrice())) + minerFee, _ := tx.EffectiveTip(block.BaseFee()) + feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee)) } return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) } diff --git a/miner/worker_test.go b/miner/worker_test.go index 0fe62316e1f1..0a1e55ff337f 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -182,10 +182,11 @@ func (b *testWorkerBackend) newRandomUncle() *types.Block { func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { var tx *types.Transaction + gasPrice := big.NewInt(10 * params.InitialBaseFee) if creation { - tx, _ = types.SignTx(types.NewContractCreation(b.txPool.Nonce(testBankAddress), big.NewInt(0), testGas, nil, common.FromHex(testCode)), types.HomesteadSigner{}, testBankKey) + tx, _ = types.SignTx(types.NewContractCreation(b.txPool.Nonce(testBankAddress), big.NewInt(0), testGas, gasPrice, common.FromHex(testCode)), types.HomesteadSigner{}, testBankKey) } else { - tx, _ = types.SignTx(types.NewTransaction(b.txPool.Nonce(testBankAddress), testUserAddress, big.NewInt(1000), params.TxGas, nil, nil), types.HomesteadSigner{}, testBankKey) + tx, _ = types.SignTx(types.NewTransaction(b.txPool.Nonce(testBankAddress), testUserAddress, big.NewInt(1000), params.TxGas, gasPrice, nil), types.HomesteadSigner{}, testBankKey) } return tx } @@ -221,6 +222,7 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool) { engine = ethash.NewFaker() } + chainConfig.LondonBlock = big.NewInt(0) w, b := newTestWorker(t, chainConfig, engine, db, 0) defer w.close() From 81662fe82788a7b66aa95de86d7afd2cb4567370 Mon Sep 17 00:00:00 2001 From: Evolution404 <35091674+Evolution404@users.noreply.github.com> Date: Fri, 21 May 2021 16:33:59 +0800 Subject: [PATCH 041/208] core/rawdb: handle prefix in table.compact method (#22911) --- core/rawdb/table.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 323ef6293cab..d5ef60ae50ef 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -131,6 +131,8 @@ func (t *table) Compact(start []byte, limit []byte) error { // If no start was specified, use the table prefix as the first value if start == nil { start = []byte(t.prefix) + } else { + start = append([]byte(t.prefix), start...) } // If no limit was specified, use the first element not matching the prefix // as the limit @@ -147,6 +149,8 @@ func (t *table) Compact(start []byte, limit []byte) error { limit = nil } } + } else { + limit = append([]byte(t.prefix), limit...) } // Range correctly calculated based on table prefix, delegate down return t.db.Compact(start, limit) From 835fe06f1dba8ce2764d6d501f977c22e97eac9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 21 May 2021 12:36:04 +0300 Subject: [PATCH 042/208] les: generate random nums directly, not via strange conversions --- les/client_handler.go | 7 ++++--- les/fetcher.go | 2 +- les/odr.go | 5 +++-- les/retrieve.go | 9 --------- les/txrelay.go | 3 ++- 5 files changed, 10 insertions(+), 16 deletions(-) diff --git a/les/client_handler.go b/les/client_handler.go index 73149975c3a5..e95996c51f6f 100644 --- a/les/client_handler.go +++ b/les/client_handler.go @@ -19,6 +19,7 @@ package les import ( "context" "math/big" + "math/rand" "sync" "sync/atomic" "time" @@ -388,7 +389,7 @@ func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, s return dp.(*serverPeer) == pc.peer }, request: func(dp distPeer) func() { - reqID := genReqID() + reqID := rand.Uint64() peer := dp.(*serverPeer) cost := peer.getRequestCost(GetBlockHeadersMsg, amount) peer.fcServer.QueuedRequest(reqID, cost) @@ -412,7 +413,7 @@ func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip return dp.(*serverPeer) == pc.peer }, request: func(dp distPeer) func() { - reqID := genReqID() + reqID := rand.Uint64() peer := dp.(*serverPeer) cost := peer.getRequestCost(GetBlockHeadersMsg, amount) peer.fcServer.QueuedRequest(reqID, cost) @@ -429,7 +430,7 @@ func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip // RetrieveSingleHeaderByNumber requests a single header by the specified block // number. This function will wait the response until it's timeout or delivered. func (pc *peerConnection) RetrieveSingleHeaderByNumber(context context.Context, number uint64) (*types.Header, error) { - reqID := genReqID() + reqID := rand.Uint64() rq := &distReq{ getCost: func(dp distPeer) uint64 { peer := dp.(*serverPeer) diff --git a/les/fetcher.go b/les/fetcher.go index fc4c5e386a56..a6d1c93c4b71 100644 --- a/les/fetcher.go +++ b/les/fetcher.go @@ -507,7 +507,7 @@ func (f *lightFetcher) requestHeaderByHash(peerid enode.ID) func(common.Hash) er getCost: func(dp distPeer) uint64 { return dp.(*serverPeer).getRequestCost(GetBlockHeadersMsg, 1) }, canSend: func(dp distPeer) bool { return dp.(*serverPeer).ID() == peerid }, request: func(dp distPeer) func() { - peer, id := dp.(*serverPeer), genReqID() + peer, id := dp.(*serverPeer), rand.Uint64() cost := peer.getRequestCost(GetBlockHeadersMsg, 1) peer.fcServer.QueuedRequest(id, cost) diff --git a/les/odr.go b/les/odr.go index d45c6a1a5d05..10ff0854d385 100644 --- a/les/odr.go +++ b/les/odr.go @@ -18,6 +18,7 @@ package les import ( "context" + "math/rand" "sort" "time" @@ -156,7 +157,7 @@ func (odr *LesOdr) RetrieveTxStatus(ctx context.Context, req *light.TxStatusRequ var ( // Deep copy the request, so that the partial result won't be mixed. req = &TxStatusRequest{Hashes: req.Hashes} - id = genReqID() + id = rand.Uint64() distreq = &distReq{ getCost: func(dp distPeer) uint64 { return req.GetCost(dp.(*serverPeer)) }, canSend: func(dp distPeer) bool { return canSend[dp.(*serverPeer).id] }, @@ -200,7 +201,7 @@ func (odr *LesOdr) RetrieveTxStatus(ctx context.Context, req *light.TxStatusRequ func (odr *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err error) { lreq := LesRequest(req) - reqID := genReqID() + reqID := rand.Uint64() rq := &distReq{ getCost: func(dp distPeer) uint64 { return lreq.GetCost(dp.(*serverPeer)) diff --git a/les/retrieve.go b/les/retrieve.go index 3174d498789b..307af0421255 100644 --- a/les/retrieve.go +++ b/les/retrieve.go @@ -18,8 +18,6 @@ package les import ( "context" - "crypto/rand" - "encoding/binary" "fmt" "sync" "time" @@ -430,10 +428,3 @@ func (r *sentReq) stop(err error) { func (r *sentReq) getError() error { return r.err } - -// genReqID generates a new random request ID -func genReqID() uint64 { - var rnd [8]byte - rand.Read(rnd[:]) - return binary.BigEndian.Uint64(rnd[:]) -} diff --git a/les/txrelay.go b/les/txrelay.go index 9d29b2f23480..40a51fb76f8e 100644 --- a/les/txrelay.go +++ b/les/txrelay.go @@ -18,6 +18,7 @@ package les import ( "context" + "math/rand" "sync" "github.com/ethereum/go-ethereum/common" @@ -117,7 +118,7 @@ func (ltrx *lesTxRelay) send(txs types.Transactions, count int) { ll := list enc, _ := rlp.EncodeToBytes(ll) - reqID := genReqID() + reqID := rand.Uint64() rq := &distReq{ getCost: func(dp distPeer) uint64 { peer := dp.(*serverPeer) From 59f259b058b85eea38cd2686051a9076abb1e712 Mon Sep 17 00:00:00 2001 From: gary rong Date: Sat, 22 May 2021 02:52:51 +0800 Subject: [PATCH 043/208] miner/stress: update stress tests (#22919) This PR updates the miner stress tests and moves them to standalone packages, so that they can be run directly. --- miner/{stress_clique.go => stress/clique/clique.go} | 5 ++--- miner/{stress_ethash.go => stress/ethash/main.go} | 7 +++---- 2 files changed, 5 insertions(+), 7 deletions(-) rename miner/{stress_clique.go => stress/clique/clique.go} (98%) rename miner/{stress_ethash.go => stress/ethash/main.go} (97%) diff --git a/miner/stress_clique.go b/miner/stress/clique/clique.go similarity index 98% rename from miner/stress_clique.go rename to miner/stress/clique/clique.go index c585e0b1f6ee..dea1ab7453d3 100644 --- a/miner/stress_clique.go +++ b/miner/stress/clique/clique.go @@ -14,8 +14,6 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// +build none - // This file contains a miner stress test based on the Clique consensus engine. package main @@ -36,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/node" @@ -192,7 +191,7 @@ func makeSealer(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) { DatabaseCache: 256, DatabaseHandles: 256, TxPool: core.DefaultTxPoolConfig, - GPO: eth.DefaultConfig.GPO, + GPO: ethconfig.Defaults.GPO, Miner: miner.Config{ GasFloor: genesis.GasLimit * 9 / 10, GasCeil: genesis.GasLimit * 11 / 10, diff --git a/miner/stress_ethash.go b/miner/stress/ethash/main.go similarity index 97% rename from miner/stress_ethash.go rename to miner/stress/ethash/main.go index 0b838d48b9f2..0f27c5e74cc1 100644 --- a/miner/stress_ethash.go +++ b/miner/stress/ethash/main.go @@ -14,8 +14,6 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// +build none - // This file contains a miner stress test based on the Ethash consensus engine. package main @@ -37,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/node" @@ -169,8 +168,8 @@ func makeMiner(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) { DatabaseCache: 256, DatabaseHandles: 256, TxPool: core.DefaultTxPoolConfig, - GPO: eth.DefaultConfig.GPO, - Ethash: eth.DefaultConfig.Ethash, + GPO: ethconfig.Defaults.GPO, + Ethash: ethconfig.Defaults.Ethash, Miner: miner.Config{ GasFloor: genesis.GasLimit * 9 / 10, GasCeil: genesis.GasLimit * 11 / 10, From 0d076d92db39940ff181a0b07970c21bbe3521c2 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Sat, 22 May 2021 13:34:29 +0200 Subject: [PATCH 044/208] rlp: use atomic.Value for type cache (#22902) All encoding/decoding operations read the type cache to find the writer/decoder function responsible for a type. When analyzing CPU profiles of geth during sync, I found that the use of sync.RWMutex in cache lookups appears in the profiles. It seems we are running into CPU cache contention problems when package rlp is heavily used on all CPU cores during sync. This change makes it use atomic.Value + a writer lock instead of sync.RWMutex. In the common case where the typeinfo entry is present in the cache, we simply fetch the map and lookup the type. --- rlp/decode.go | 4 +-- rlp/encode.go | 4 +-- rlp/encode_test.go | 33 +++++++++++++++++++ rlp/typecache.go | 82 ++++++++++++++++++++++++++++++++-------------- 4 files changed, 94 insertions(+), 29 deletions(-) diff --git a/rlp/decode.go b/rlp/decode.go index 976780971773..e4262b64df9c 100644 --- a/rlp/decode.go +++ b/rlp/decode.go @@ -245,7 +245,7 @@ func makeListDecoder(typ reflect.Type, tag tags) (decoder, error) { } return decodeByteSlice, nil } - etypeinfo := cachedTypeInfo1(etype, tags{}) + etypeinfo := theTC.infoWhileGenerating(etype, tags{}) if etypeinfo.decoderErr != nil { return nil, etypeinfo.decoderErr } @@ -424,7 +424,7 @@ func zeroFields(structval reflect.Value, fields []field) { // makePtrDecoder creates a decoder that decodes into the pointer's element type. func makePtrDecoder(typ reflect.Type, tag tags) (decoder, error) { etype := typ.Elem() - etypeinfo := cachedTypeInfo1(etype, tags{}) + etypeinfo := theTC.infoWhileGenerating(etype, tags{}) switch { case etypeinfo.decoderErr != nil: return nil, etypeinfo.decoderErr diff --git a/rlp/encode.go b/rlp/encode.go index b7e74a133f2e..2e1b0102caae 100644 --- a/rlp/encode.go +++ b/rlp/encode.go @@ -517,7 +517,7 @@ func writeInterface(val reflect.Value, w *encbuf) error { } func makeSliceWriter(typ reflect.Type, ts tags) (writer, error) { - etypeinfo := cachedTypeInfo1(typ.Elem(), tags{}) + etypeinfo := theTC.infoWhileGenerating(typ.Elem(), tags{}) if etypeinfo.writerErr != nil { return nil, etypeinfo.writerErr } @@ -585,7 +585,7 @@ func makeStructWriter(typ reflect.Type) (writer, error) { } func makePtrWriter(typ reflect.Type, ts tags) (writer, error) { - etypeinfo := cachedTypeInfo1(typ.Elem(), tags{}) + etypeinfo := theTC.infoWhileGenerating(typ.Elem(), tags{}) if etypeinfo.writerErr != nil { return nil, etypeinfo.writerErr } diff --git a/rlp/encode_test.go b/rlp/encode_test.go index 74e8ededcb7e..0177bb0350d9 100644 --- a/rlp/encode_test.go +++ b/rlp/encode_test.go @@ -23,6 +23,7 @@ import ( "io" "io/ioutil" "math/big" + "runtime" "sync" "testing" @@ -480,3 +481,35 @@ func BenchmarkEncodeBigInts(b *testing.B) { } } } + +func BenchmarkEncodeConcurrentInterface(b *testing.B) { + type struct1 struct { + A string + B *big.Int + C [20]byte + } + value := []interface{}{ + uint(999), + &struct1{A: "hello", B: big.NewInt(0xFFFFFFFF)}, + [10]byte{1, 2, 3, 4, 5, 6}, + []string{"yeah", "yeah", "yeah"}, + } + + var wg sync.WaitGroup + for cpu := 0; cpu < runtime.NumCPU(); cpu++ { + wg.Add(1) + go func() { + defer wg.Done() + + var buffer bytes.Buffer + for i := 0; i < b.N; i++ { + buffer.Reset() + err := Encode(&buffer, value) + if err != nil { + panic(err) + } + } + }() + } + wg.Wait() +} diff --git a/rlp/typecache.go b/rlp/typecache.go index 3910dcf0806e..62553d3b55c1 100644 --- a/rlp/typecache.go +++ b/rlp/typecache.go @@ -21,13 +21,10 @@ import ( "reflect" "strings" "sync" + "sync/atomic" ) -var ( - typeCacheMutex sync.RWMutex - typeCache = make(map[typekey]*typeinfo) -) - +// typeinfo is an entry in the type cache. type typeinfo struct { decoder decoder decoderErr error // error from makeDecoder @@ -65,41 +62,76 @@ type decoder func(*Stream, reflect.Value) error type writer func(reflect.Value, *encbuf) error +var theTC = newTypeCache() + +type typeCache struct { + cur atomic.Value + + // This lock synchronizes writers. + mu sync.Mutex + next map[typekey]*typeinfo +} + +func newTypeCache() *typeCache { + c := new(typeCache) + c.cur.Store(make(map[typekey]*typeinfo)) + return c +} + func cachedDecoder(typ reflect.Type) (decoder, error) { - info := cachedTypeInfo(typ, tags{}) + info := theTC.info(typ) return info.decoder, info.decoderErr } func cachedWriter(typ reflect.Type) (writer, error) { - info := cachedTypeInfo(typ, tags{}) + info := theTC.info(typ) return info.writer, info.writerErr } -func cachedTypeInfo(typ reflect.Type, tags tags) *typeinfo { - typeCacheMutex.RLock() - info := typeCache[typekey{typ, tags}] - typeCacheMutex.RUnlock() - if info != nil { +func (c *typeCache) info(typ reflect.Type) *typeinfo { + key := typekey{Type: typ} + if info := c.cur.Load().(map[typekey]*typeinfo)[key]; info != nil { return info } - // not in the cache, need to generate info for this type. - typeCacheMutex.Lock() - defer typeCacheMutex.Unlock() - return cachedTypeInfo1(typ, tags) + + // Not in the cache, need to generate info for this type. + return c.generate(typ, tags{}) +} + +func (c *typeCache) generate(typ reflect.Type, tags tags) *typeinfo { + c.mu.Lock() + defer c.mu.Unlock() + + cur := c.cur.Load().(map[typekey]*typeinfo) + if info := cur[typekey{typ, tags}]; info != nil { + return info + } + + // Copy cur to next. + c.next = make(map[typekey]*typeinfo, len(cur)+1) + for k, v := range cur { + c.next[k] = v + } + + // Generate. + info := c.infoWhileGenerating(typ, tags) + + // next -> cur + c.cur.Store(c.next) + c.next = nil + return info } -func cachedTypeInfo1(typ reflect.Type, tags tags) *typeinfo { +func (c *typeCache) infoWhileGenerating(typ reflect.Type, tags tags) *typeinfo { key := typekey{typ, tags} - info := typeCache[key] - if info != nil { - // another goroutine got the write lock first + if info := c.next[key]; info != nil { return info } - // put a dummy value into the cache before generating. - // if the generator tries to lookup itself, it will get + // Put a dummy value into the cache before generating. + // If the generator tries to lookup itself, it will get // the dummy value and won't call itself recursively. - info = new(typeinfo) - typeCache[key] = info + info := new(typeinfo) + c.next[key] = info info.generate(typ, tags) return info } @@ -133,7 +165,7 @@ func structFields(typ reflect.Type) (fields []field, err error) { } else if anyOptional { return nil, fmt.Errorf(`rlp: struct field %v.%s needs "optional" tag`, typ, f.Name) } - info := cachedTypeInfo1(f.Type, tags) + info := theTC.infoWhileGenerating(f.Type, tags) fields = append(fields, field{i, info, tags.optional}) } } From 154ca32a8ade0a7d2461d0eb432361261a51a395 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Sat, 22 May 2021 15:10:16 +0200 Subject: [PATCH 045/208] rlp: optimize byte array handling (#22924) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change improves the performance of encoding/decoding [N]byte. name old time/op new time/op delta DecodeByteArrayStruct-8 336ns ± 0% 246ns ± 0% -26.98% (p=0.000 n=9+10) EncodeByteArrayStruct-8 225ns ± 1% 148ns ± 1% -34.12% (p=0.000 n=10+10) name old alloc/op new alloc/op delta DecodeByteArrayStruct-8 120B ± 0% 48B ± 0% -60.00% (p=0.000 n=10+10) EncodeByteArrayStruct-8 0.00B 0.00B ~ (all equal) --- rlp/decode.go | 16 ++++++------- rlp/decode_test.go | 44 ++++++++++++++++++++++++++++++++++-- rlp/encode.go | 56 ++++++++++++++-------------------------------- rlp/encode_test.go | 19 ++++++++++++++++ rlp/safe.go | 26 +++++++++++++++++++++ rlp/unsafe.go | 35 +++++++++++++++++++++++++++++ 6 files changed, 146 insertions(+), 50 deletions(-) create mode 100644 rlp/safe.go create mode 100644 rlp/unsafe.go diff --git a/rlp/decode.go b/rlp/decode.go index e4262b64df9c..8121ab2e72a5 100644 --- a/rlp/decode.go +++ b/rlp/decode.go @@ -348,25 +348,23 @@ func decodeByteArray(s *Stream, val reflect.Value) error { if err != nil { return err } - vlen := val.Len() + slice := byteArrayBytes(val) switch kind { case Byte: - if vlen == 0 { + if len(slice) == 0 { return &decodeError{msg: "input string too long", typ: val.Type()} - } - if vlen > 1 { + } else if len(slice) > 1 { return &decodeError{msg: "input string too short", typ: val.Type()} } - bv, _ := s.Uint() - val.Index(0).SetUint(bv) + slice[0] = s.byteval + s.kind = -1 case String: - if uint64(vlen) < size { + if uint64(len(slice)) < size { return &decodeError{msg: "input string too long", typ: val.Type()} } - if uint64(vlen) > size { + if uint64(len(slice)) > size { return &decodeError{msg: "input string too short", typ: val.Type()} } - slice := val.Slice(0, vlen).Interface().([]byte) if err := s.readFull(slice); err != nil { return err } diff --git a/rlp/decode_test.go b/rlp/decode_test.go index 87a330633230..36d254e18eb9 100644 --- a/rlp/decode_test.go +++ b/rlp/decode_test.go @@ -26,6 +26,8 @@ import ( "reflect" "strings" "testing" + + "github.com/ethereum/go-ethereum/common/math" ) func TestStreamKind(t *testing.T) { @@ -1063,7 +1065,7 @@ func ExampleStream() { // [102 111 111 98 97 114] } -func BenchmarkDecode(b *testing.B) { +func BenchmarkDecodeUints(b *testing.B) { enc := encodeTestSlice(90000) b.SetBytes(int64(len(enc))) b.ReportAllocs() @@ -1078,7 +1080,7 @@ func BenchmarkDecode(b *testing.B) { } } -func BenchmarkDecodeIntSliceReuse(b *testing.B) { +func BenchmarkDecodeUintsReused(b *testing.B) { enc := encodeTestSlice(100000) b.SetBytes(int64(len(enc))) b.ReportAllocs() @@ -1093,6 +1095,44 @@ func BenchmarkDecodeIntSliceReuse(b *testing.B) { } } +func BenchmarkDecodeByteArrayStruct(b *testing.B) { + enc, err := EncodeToBytes(&byteArrayStruct{}) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(enc))) + b.ReportAllocs() + b.ResetTimer() + + var out byteArrayStruct + for i := 0; i < b.N; i++ { + if err := DecodeBytes(enc, &out); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeBigInts(b *testing.B) { + ints := make([]*big.Int, 200) + for i := range ints { + ints[i] = math.BigPow(2, int64(i)) + } + enc, err := EncodeToBytes(ints) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(enc))) + b.ReportAllocs() + b.ResetTimer() + + var out []*big.Int + for i := 0; i < b.N; i++ { + if err := DecodeBytes(enc, &out); err != nil { + b.Fatal(err) + } + } +} + func encodeTestSlice(n uint) []byte { s := make([]uint, n) for i := uint(0); i < n; i++ { diff --git a/rlp/encode.go b/rlp/encode.go index 2e1b0102caae..334864434296 100644 --- a/rlp/encode.go +++ b/rlp/encode.go @@ -124,19 +124,15 @@ func puthead(buf []byte, smalltag, largetag byte, size uint64) int { } type encbuf struct { - str []byte // string data, contains everything except list headers - lheads []listhead // all list headers - lhsize int // sum of sizes of all encoded list headers - sizebuf [9]byte // auxiliary buffer for uint encoding - bufvalue reflect.Value // used in writeByteArrayCopy + str []byte // string data, contains everything except list headers + lheads []listhead // all list headers + lhsize int // sum of sizes of all encoded list headers + sizebuf [9]byte // auxiliary buffer for uint encoding } // encbufs are pooled. var encbufPool = sync.Pool{ - New: func() interface{} { - var bytes []byte - return &encbuf{bufvalue: reflect.ValueOf(&bytes).Elem()} - }, + New: func() interface{} { return new(encbuf) }, } func (w *encbuf) reset() { @@ -429,21 +425,14 @@ func writeBytes(val reflect.Value, w *encbuf) error { return nil } -var byteType = reflect.TypeOf(byte(0)) - func makeByteArrayWriter(typ reflect.Type) writer { - length := typ.Len() - if length == 0 { + switch typ.Len() { + case 0: return writeLengthZeroByteArray - } else if length == 1 { + case 1: return writeLengthOneByteArray - } - if typ.Elem() != byteType { - return writeNamedByteArray - } - return func(val reflect.Value, w *encbuf) error { - writeByteArrayCopy(length, val, w) - return nil + default: + return writeByteArray } } @@ -462,29 +451,18 @@ func writeLengthOneByteArray(val reflect.Value, w *encbuf) error { return nil } -// writeByteArrayCopy encodes byte arrays using reflect.Copy. This is -// the fast path for [N]byte where N > 1. -func writeByteArrayCopy(length int, val reflect.Value, w *encbuf) { - w.encodeStringHeader(length) - offset := len(w.str) - w.str = append(w.str, make([]byte, length)...) - w.bufvalue.SetBytes(w.str[offset:]) - reflect.Copy(w.bufvalue, val) -} - -// writeNamedByteArray encodes byte arrays with named element type. -// This exists because reflect.Copy can't be used with such types. -func writeNamedByteArray(val reflect.Value, w *encbuf) error { +func writeByteArray(val reflect.Value, w *encbuf) error { if !val.CanAddr() { - // Slice requires the value to be addressable. - // Make it addressable by copying. + // Getting the byte slice of val requires it to be addressable. Make it + // addressable by copying. copy := reflect.New(val.Type()).Elem() copy.Set(val) val = copy } - size := val.Len() - slice := val.Slice(0, size).Bytes() - w.encodeString(slice) + + slice := byteArrayBytes(val) + w.encodeStringHeader(len(slice)) + w.str = append(w.str, slice...) return nil } diff --git a/rlp/encode_test.go b/rlp/encode_test.go index 0177bb0350d9..08a2a84c623a 100644 --- a/rlp/encode_test.go +++ b/rlp/encode_test.go @@ -513,3 +513,22 @@ func BenchmarkEncodeConcurrentInterface(b *testing.B) { } wg.Wait() } + +type byteArrayStruct struct { + A [20]byte + B [32]byte + C [32]byte +} + +func BenchmarkEncodeByteArrayStruct(b *testing.B) { + var out bytes.Buffer + var value byteArrayStruct + + b.ReportAllocs() + for i := 0; i < b.N; i++ { + out.Reset() + if err := Encode(&out, &value); err != nil { + b.Fatal(err) + } + } +} diff --git a/rlp/safe.go b/rlp/safe.go new file mode 100644 index 000000000000..c881650a0df3 --- /dev/null +++ b/rlp/safe.go @@ -0,0 +1,26 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build nacl js !cgo + +package rlp + +import "reflect" + +// byteArrayBytes returns a slice of the byte array v. +func byteArrayBytes(v reflect.Value) []byte { + return v.Slice(0, v.Len()).Bytes() +} diff --git a/rlp/unsafe.go b/rlp/unsafe.go new file mode 100644 index 000000000000..94ed5405a8e4 --- /dev/null +++ b/rlp/unsafe.go @@ -0,0 +1,35 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build !nacl,!js,cgo + +package rlp + +import ( + "reflect" + "unsafe" +) + +// byteArrayBytes returns a slice of the byte array v. +func byteArrayBytes(v reflect.Value) []byte { + len := v.Len() + var s []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) + hdr.Data = v.UnsafeAddr() + hdr.Cap = len + hdr.Len = len + return s +} From 93407b14a66287d1a0ad0140b9c5f754c11ad437 Mon Sep 17 00:00:00 2001 From: Fire Man <55934298+basdevelop@users.noreply.github.com> Date: Mon, 24 May 2021 20:34:38 +0800 Subject: [PATCH 046/208] core: make txpool free space calculation more accurate (#22933) --- core/tx_pool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index 5db1d3df329d..0abc092ecb37 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -595,7 +595,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e return false, err } // If the transaction pool is full, discard underpriced transactions - if uint64(pool.all.Count()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { + if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it if !isLocal && pool.priced.Underpriced(tx) { log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) From 017cf71fbd51ede510dd0e40e02f89b5340eea93 Mon Sep 17 00:00:00 2001 From: ucwong Date: Tue, 25 May 2021 16:14:39 +0800 Subject: [PATCH 047/208] rlp, tests/fuzzers/bls12381: gofmt (#22937) --- rlp/safe.go | 2 +- tests/fuzzers/bls12381/bls12381_fuzz.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rlp/safe.go b/rlp/safe.go index c881650a0df3..a80380aef473 100644 --- a/rlp/safe.go +++ b/rlp/safe.go @@ -22,5 +22,5 @@ import "reflect" // byteArrayBytes returns a slice of the byte array v. func byteArrayBytes(v reflect.Value) []byte { - return v.Slice(0, v.Len()).Bytes() + return v.Slice(0, v.Len()).Bytes() } diff --git a/tests/fuzzers/bls12381/bls12381_fuzz.go b/tests/fuzzers/bls12381/bls12381_fuzz.go index 298050ad3650..c0f452f3edd5 100644 --- a/tests/fuzzers/bls12381/bls12381_fuzz.go +++ b/tests/fuzzers/bls12381/bls12381_fuzz.go @@ -159,7 +159,7 @@ func FuzzCrossG1MultiExp(data []byte) int { gethPoints = append(gethPoints, new(bls12381.PointG1).Set(kp1)) gnarkPoints = append(gnarkPoints, *cp1) } - if len(gethScalars) == 0{ + if len(gethScalars) == 0 { return 0 } // compute multi exponentiation From 4d33de9b4975be0f3450fa44d6c912e4331ca0c8 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 25 May 2021 21:56:25 +0200 Subject: [PATCH 048/208] rlp: optimize big.Int decoding for size <= 32 bytes (#22927) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change grows the static integer buffer in Stream to 32 bytes, making it possible to decode 256bit integers without allocating a temporary buffer. In the recent commit 088da24, Stream struct size decreased from 120 bytes down to 88 bytes. This commit grows the struct to 112 bytes again, but the size change will not degrade performance because Stream instances are internally cached in sync.Pool. name old time/op new time/op delta DecodeBigInts-8 12.2µs ± 0% 8.6µs ± 4% -29.58% (p=0.000 n=9+10) name old speed new speed delta DecodeBigInts-8 230MB/s ± 0% 326MB/s ± 4% +42.04% (p=0.000 n=9+10) --- rlp/decode.go | 60 ++++++++++++++++++++++++++++++++++++---------- rlp/decode_test.go | 22 ++++++++++++++--- rlp/encode_test.go | 8 +++++++ 3 files changed, 74 insertions(+), 16 deletions(-) diff --git a/rlp/decode.go b/rlp/decode.go index 8121ab2e72a5..ac04d5d56949 100644 --- a/rlp/decode.go +++ b/rlp/decode.go @@ -220,20 +220,51 @@ func decodeBigIntNoPtr(s *Stream, val reflect.Value) error { } func decodeBigInt(s *Stream, val reflect.Value) error { - b, err := s.Bytes() - if err != nil { + var buffer []byte + kind, size, err := s.Kind() + switch { + case err != nil: return wrapStreamError(err, val.Type()) + case kind == List: + return wrapStreamError(ErrExpectedString, val.Type()) + case kind == Byte: + buffer = s.uintbuf[:1] + buffer[0] = s.byteval + s.kind = -1 // re-arm Kind + case size == 0: + // Avoid zero-length read. + s.kind = -1 + case size <= uint64(len(s.uintbuf)): + // For integers smaller than s.uintbuf, allocating a buffer + // can be avoided. + buffer = s.uintbuf[:size] + if err := s.readFull(buffer); err != nil { + return wrapStreamError(err, val.Type()) + } + // Reject inputs where single byte encoding should have been used. + if size == 1 && buffer[0] < 128 { + return wrapStreamError(ErrCanonSize, val.Type()) + } + default: + // For large integers, a temporary buffer is needed. + buffer = make([]byte, size) + if err := s.readFull(buffer); err != nil { + return wrapStreamError(err, val.Type()) + } + } + + // Reject leading zero bytes. + if len(buffer) > 0 && buffer[0] == 0 { + return wrapStreamError(ErrCanonInt, val.Type()) } + + // Set the integer bytes. i := val.Interface().(*big.Int) if i == nil { i = new(big.Int) val.Set(reflect.ValueOf(i)) } - // Reject leading zero bytes. - if len(b) > 0 && b[0] == 0 { - return wrapStreamError(ErrCanonInt, val.Type()) - } - i.SetBytes(b) + i.SetBytes(buffer) return nil } @@ -563,7 +594,7 @@ type Stream struct { size uint64 // size of value ahead kinderr error // error from last readKind stack []uint64 // list sizes - uintbuf [8]byte // auxiliary buffer for integer decoding + uintbuf [32]byte // auxiliary buffer for integer decoding kind Kind // kind of value ahead byteval byte // value of single byte in type tag limited bool // true if input limit is in effect @@ -817,7 +848,7 @@ func (s *Stream) Reset(r io.Reader, inputLimit uint64) { s.kind = -1 s.kinderr = nil s.byteval = 0 - s.uintbuf = [8]byte{} + s.uintbuf = [32]byte{} } // Kind returns the kind and size of the next value in the @@ -927,17 +958,20 @@ func (s *Stream) readUint(size byte) (uint64, error) { b, err := s.readByte() return uint64(b), err default: + buffer := s.uintbuf[:8] + for i := range buffer { + buffer[i] = 0 + } start := int(8 - size) - s.uintbuf = [8]byte{} - if err := s.readFull(s.uintbuf[start:]); err != nil { + if err := s.readFull(buffer[start:]); err != nil { return 0, err } - if s.uintbuf[start] == 0 { + if buffer[start] == 0 { // Note: readUint is also used to decode integer values. // The error needs to be adjusted to become ErrCanonInt in this case. return 0, ErrCanonSize } - return binary.BigEndian.Uint64(s.uintbuf[:]), nil + return binary.BigEndian.Uint64(buffer[:]), nil } } diff --git a/rlp/decode_test.go b/rlp/decode_test.go index 36d254e18eb9..7c3dafeac44d 100644 --- a/rlp/decode_test.go +++ b/rlp/decode_test.go @@ -329,6 +329,11 @@ type recstruct struct { Child *recstruct `rlp:"nil"` } +type bigIntStruct struct { + I *big.Int + B string +} + type invalidNilTag struct { X []byte `rlp:"nil"` } @@ -405,10 +410,11 @@ type ignoredField struct { } var ( - veryBigInt = big.NewInt(0).Add( + veryBigInt = new(big.Int).Add( big.NewInt(0).Lsh(big.NewInt(0xFFFFFFFFFFFFFF), 16), big.NewInt(0xFFFF), ) + veryVeryBigInt = new(big.Int).Exp(veryBigInt, big.NewInt(8), nil) ) var decodeTests = []decodeTest{ @@ -479,12 +485,15 @@ var decodeTests = []decodeTest{ {input: "C0", ptr: new(string), error: "rlp: expected input string or byte for string"}, // big ints + {input: "80", ptr: new(*big.Int), value: big.NewInt(0)}, {input: "01", ptr: new(*big.Int), value: big.NewInt(1)}, {input: "89FFFFFFFFFFFFFFFFFF", ptr: new(*big.Int), value: veryBigInt}, + {input: "B848FFFFFFFFFFFFFFFFF800000000000000001BFFFFFFFFFFFFFFFFC8000000000000000045FFFFFFFFFFFFFFFFC800000000000000001BFFFFFFFFFFFFFFFFF8000000000000000001", ptr: new(*big.Int), value: veryVeryBigInt}, {input: "10", ptr: new(big.Int), value: *big.NewInt(16)}, // non-pointer also works {input: "C0", ptr: new(*big.Int), error: "rlp: expected input string or byte for *big.Int"}, - {input: "820001", ptr: new(big.Int), error: "rlp: non-canonical integer (leading zero bytes) for *big.Int"}, - {input: "8105", ptr: new(big.Int), error: "rlp: non-canonical size information for *big.Int"}, + {input: "00", ptr: new(*big.Int), error: "rlp: non-canonical integer (leading zero bytes) for *big.Int"}, + {input: "820001", ptr: new(*big.Int), error: "rlp: non-canonical integer (leading zero bytes) for *big.Int"}, + {input: "8105", ptr: new(*big.Int), error: "rlp: non-canonical size information for *big.Int"}, // structs { @@ -497,6 +506,13 @@ var decodeTests = []decodeTest{ ptr: new(recstruct), value: recstruct{1, &recstruct{2, &recstruct{3, nil}}}, }, + { + // This checks that empty big.Int works correctly in struct context. It's easy to + // miss the update of s.kind for this case, so it needs its own test. + input: "C58083343434", + ptr: new(bigIntStruct), + value: bigIntStruct{new(big.Int), "444"}, + }, // struct errors { diff --git a/rlp/encode_test.go b/rlp/encode_test.go index 08a2a84c623a..25d4aac26726 100644 --- a/rlp/encode_test.go +++ b/rlp/encode_test.go @@ -131,6 +131,14 @@ var encTests = []encTest{ val: big.NewInt(0).SetBytes(unhex("010000000000000000000000000000000000000000000000000000000000000000")), output: "A1010000000000000000000000000000000000000000000000000000000000000000", }, + { + val: veryBigInt, + output: "89FFFFFFFFFFFFFFFFFF", + }, + { + val: veryVeryBigInt, + output: "B848FFFFFFFFFFFFFFFFF800000000000000001BFFFFFFFFFFFFFFFFC8000000000000000045FFFFFFFFFFFFFFFFC800000000000000001BFFFFFFFFFFFFFFFFF8000000000000000001", + }, // non-pointer big.Int {val: *big.NewInt(0), output: "80"}, From 836c647bdd65896c0e5bef97466d5d514db419e7 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Tue, 25 May 2021 22:20:36 +0200 Subject: [PATCH 049/208] eth: unregister peer only when handler exits (#22908) This removes the error log message that says Ethereum peer removal failed ... err="peer not registered" The error happened because removePeer was called multiple times: once to disconnect the peer, and another time when the handler exited. With this change, removePeer now has the sole purpose of disconnecting the peer. Unregistering happens exactly once, when the handler exits. --- eth/handler.go | 15 +++++++---- eth/handler_eth_test.go | 59 ++++++++++++++++++++++++----------------- p2p/peer.go | 16 ++++++++++- 3 files changed, 60 insertions(+), 30 deletions(-) diff --git a/eth/handler.go b/eth/handler.go index 3f10750abf5e..cd165380446c 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -287,7 +287,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { peer.Log().Error("Ethereum peer registration failed", "err", err) return err } - defer h.removePeer(peer.ID()) + defer h.unregisterPeer(peer.ID()) p := h.peers.peer(peer.ID()) if p == nil { @@ -354,9 +354,16 @@ func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error return handler(peer) } -// removePeer unregisters a peer from the downloader and fetchers, removes it from -// the set of tracked peers and closes the network connection to it. +// removePeer requests disconnection of a peer. func (h *handler) removePeer(id string) { + peer := h.peers.peer(id) + if peer != nil { + peer.Peer.Disconnect(p2p.DiscUselessPeer) + } +} + +// unregisterPeer removes a peer from the downloader, fetchers and main peer set. +func (h *handler) unregisterPeer(id string) { // Create a custom logger to avoid printing the entire id var logger log.Logger if len(id) < 16 { @@ -384,8 +391,6 @@ func (h *handler) removePeer(id string) { if err := h.peers.unregisterPeer(id); err != nil { logger.Error("Ethereum peer removal failed", "err", err) } - // Hard disconnect at the networking layer - peer.Peer.Disconnect(p2p.DiscUselessPeer) } func (h *handler) Start(maxPeers int) { diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index 1d38e3b66663..038de469908e 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -144,8 +144,8 @@ func testForkIDSplit(t *testing.T, protocol uint) { defer p2pNoFork.Close() defer p2pProFork.Close() - peerNoFork := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil) - peerProFork := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil) + peerNoFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil) + peerProFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil) defer peerNoFork.Close() defer peerProFork.Close() @@ -206,8 +206,8 @@ func testForkIDSplit(t *testing.T, protocol uint) { defer p2pNoFork.Close() defer p2pProFork.Close() - peerNoFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil) - peerProFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil) + peerNoFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil) + peerProFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil) defer peerNoFork.Close() defer peerProFork.Close() @@ -257,8 +257,8 @@ func testRecvTransactions(t *testing.T, protocol uint) { defer p2pSrc.Close() defer p2pSink.Close() - src := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pSrc, handler.txpool) - sink := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pSink, handler.txpool) + src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool) + sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool) defer src.Close() defer sink.Close() @@ -319,8 +319,8 @@ func testSendTransactions(t *testing.T, protocol uint) { defer p2pSrc.Close() defer p2pSink.Close() - src := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pSrc, handler.txpool) - sink := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pSink, handler.txpool) + src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool) + sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool) defer src.Close() defer sink.Close() @@ -407,8 +407,8 @@ func testTransactionPropagation(t *testing.T, protocol uint) { defer sourcePipe.Close() defer sinkPipe.Close() - sourcePeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{byte(i)}, "", nil), sourcePipe, source.txpool) - sinkPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{0}, "", nil), sinkPipe, sink.txpool) + sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, source.txpool) + sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool) defer sourcePeer.Close() defer sinkPeer.Close() @@ -490,6 +490,8 @@ func TestCheckpointChallenge(t *testing.T) { } func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpoint bool, timeout bool, empty bool, match bool, drop bool) { + t.Parallel() + // Reduce the checkpoint handshake challenge timeout defer func(old time.Duration) { syncChallengeTimeout = old }(syncChallengeTimeout) syncChallengeTimeout = 250 * time.Millisecond @@ -513,20 +515,26 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo handler.handler.checkpointNumber = number handler.handler.checkpointHash = response.Hash() } - // Create a challenger peer and a challenged one + + // Create a challenger peer and a challenged one. p2pLocal, p2pRemote := p2p.MsgPipe() defer p2pLocal.Close() defer p2pRemote.Close() - local := eth.NewPeer(eth.ETH65, p2p.NewPeer(enode.ID{1}, "", nil), p2pLocal, handler.txpool) - remote := eth.NewPeer(eth.ETH65, p2p.NewPeer(enode.ID{2}, "", nil), p2pRemote, handler.txpool) + local := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pLocal), p2pLocal, handler.txpool) + remote := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pRemote), p2pRemote, handler.txpool) defer local.Close() defer remote.Close() - go handler.handler.runEthPeer(local, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(handler.handler), peer) - }) - // Run the handshake locally to avoid spinning up a remote handler + handlerDone := make(chan struct{}) + go func() { + defer close(handlerDone) + handler.handler.runEthPeer(local, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(handler.handler), peer) + }) + }() + + // Run the handshake locally to avoid spinning up a remote handler. var ( genesis = handler.chain.Genesis() head = handler.chain.CurrentBlock() @@ -535,12 +543,13 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo if err := remote.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { t.Fatalf("failed to run protocol handshake") } - // Connect a new peer and check that we receive the checkpoint challenge + + // Connect a new peer and check that we receive the checkpoint challenge. if checkpoint { if err := remote.ExpectRequestHeadersByNumber(response.Number.Uint64(), 1, 0, false); err != nil { t.Fatalf("challenge mismatch: %v", err) } - // Create a block to reply to the challenge if no timeout is simulated + // Create a block to reply to the challenge if no timeout is simulated. if !timeout { if empty { if err := remote.SendBlockHeaders([]*types.Header{}); err != nil { @@ -557,11 +566,13 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo } } } + // Wait until the test timeout passes to ensure proper cleanup time.Sleep(syncChallengeTimeout + 300*time.Millisecond) - // Verify that the remote peer is maintained or dropped + // Verify that the remote peer is maintained or dropped. if drop { + <-handlerDone if peers := handler.handler.peers.len(); peers != 0 { t.Fatalf("peer count mismatch: have %d, want %d", peers, 0) } @@ -608,8 +619,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) { defer sourcePipe.Close() defer sinkPipe.Close() - sourcePeer := eth.NewPeer(eth.ETH65, p2p.NewPeer(enode.ID{byte(i)}, "", nil), sourcePipe, nil) - sinkPeer := eth.NewPeer(eth.ETH65, p2p.NewPeer(enode.ID{0}, "", nil), sinkPipe, nil) + sourcePeer := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) + sinkPeer := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) defer sourcePeer.Close() defer sinkPeer.Close() @@ -676,8 +687,8 @@ func testBroadcastMalformedBlock(t *testing.T, protocol uint) { defer p2pSrc.Close() defer p2pSink.Close() - src := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pSrc, source.txpool) - sink := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pSink, source.txpool) + src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, source.txpool) + sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, source.txpool) defer src.Close() defer sink.Close() diff --git a/p2p/peer.go b/p2p/peer.go index 8ebc858392b5..b6d0dbd1aebd 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -115,7 +115,8 @@ type Peer struct { disc chan DiscReason // events receives message send / receive events if set - events *event.Feed + events *event.Feed + testPipe *MsgPipeRW // for testing } // NewPeer returns a peer for testing purposes. @@ -128,6 +129,15 @@ func NewPeer(id enode.ID, name string, caps []Cap) *Peer { return peer } +// NewPeerPipe creates a peer for testing purposes. +// The message pipe given as the last parameter is closed when +// Disconnect is called on the peer. +func NewPeerPipe(id enode.ID, name string, caps []Cap, pipe *MsgPipeRW) *Peer { + p := NewPeer(id, name, caps) + p.testPipe = pipe + return p +} + // ID returns the node's public key. func (p *Peer) ID() enode.ID { return p.rw.node.ID() @@ -185,6 +195,10 @@ func (p *Peer) LocalAddr() net.Addr { // Disconnect terminates the peer connection with the given reason. // It returns immediately and does not wait until the connection is closed. func (p *Peer) Disconnect(reason DiscReason) { + if p.testPipe != nil { + p.testPipe.Close() + } + select { case p.disc <- reason: case <-p.closed: From 51b32cc7e44e174316d3ae970680cfa9b77f6146 Mon Sep 17 00:00:00 2001 From: gary rong Date: Wed, 26 May 2021 04:30:21 +0800 Subject: [PATCH 050/208] internal/ethapi: merge CallArgs and SendTxArgs (#22718) There are two transaction parameter structures defined in the codebase, although for different purposes. But most of the parameters are shared. So it's nice to reduce the code duplication by merging them together. Co-authored-by: Martin Holst Swende --- eth/tracers/api.go | 2 +- eth/tracers/api_test.go | 20 +-- graphql/graphql.go | 8 +- internal/ethapi/api.go | 244 ++++------------------------ internal/ethapi/transaction_args.go | 185 +++++++++++++++++++++ 5 files changed, 236 insertions(+), 223 deletions(-) create mode 100644 internal/ethapi/transaction_args.go diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 172054e9b8ee..1e8759e69ce7 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -730,7 +730,7 @@ func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config * // created during the execution of EVM if the given transaction was added on // top of the provided block and returns them as a JSON object. // You can provide -2 as a block number to trace on top of the pending block. -func (api *API) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *TraceCallConfig) (interface{}, error) { +func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, config *TraceCallConfig) (interface{}, error) { // Try to retrieve the specified block var ( err error diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 24bce320cf00..9ff01d66d5f4 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -198,7 +198,7 @@ func TestTraceCall(t *testing.T) { var testSuite = []struct { blockNumber rpc.BlockNumber - call ethapi.CallArgs + call ethapi.TransactionArgs config *TraceCallConfig expectErr error expect interface{} @@ -206,7 +206,7 @@ func TestTraceCall(t *testing.T) { // Standard JSON trace upon the genesis, plain transfer. { blockNumber: rpc.BlockNumber(0), - call: ethapi.CallArgs{ + call: ethapi.TransactionArgs{ From: &accounts[0].addr, To: &accounts[1].addr, Value: (*hexutil.Big)(big.NewInt(1000)), @@ -223,7 +223,7 @@ func TestTraceCall(t *testing.T) { // Standard JSON trace upon the head, plain transfer. { blockNumber: rpc.BlockNumber(genBlocks), - call: ethapi.CallArgs{ + call: ethapi.TransactionArgs{ From: &accounts[0].addr, To: &accounts[1].addr, Value: (*hexutil.Big)(big.NewInt(1000)), @@ -240,7 +240,7 @@ func TestTraceCall(t *testing.T) { // Standard JSON trace upon the non-existent block, error expects { blockNumber: rpc.BlockNumber(genBlocks + 1), - call: ethapi.CallArgs{ + call: ethapi.TransactionArgs{ From: &accounts[0].addr, To: &accounts[1].addr, Value: (*hexutil.Big)(big.NewInt(1000)), @@ -252,7 +252,7 @@ func TestTraceCall(t *testing.T) { // Standard JSON trace upon the latest block { blockNumber: rpc.LatestBlockNumber, - call: ethapi.CallArgs{ + call: ethapi.TransactionArgs{ From: &accounts[0].addr, To: &accounts[1].addr, Value: (*hexutil.Big)(big.NewInt(1000)), @@ -269,7 +269,7 @@ func TestTraceCall(t *testing.T) { // Standard JSON trace upon the pending block { blockNumber: rpc.PendingBlockNumber, - call: ethapi.CallArgs{ + call: ethapi.TransactionArgs{ From: &accounts[0].addr, To: &accounts[1].addr, Value: (*hexutil.Big)(big.NewInt(1000)), @@ -329,7 +329,7 @@ func TestOverridenTraceCall(t *testing.T) { var testSuite = []struct { blockNumber rpc.BlockNumber - call ethapi.CallArgs + call ethapi.TransactionArgs config *TraceCallConfig expectErr error expect *callTrace @@ -337,7 +337,7 @@ func TestOverridenTraceCall(t *testing.T) { // Succcessful call with state overriding { blockNumber: rpc.PendingBlockNumber, - call: ethapi.CallArgs{ + call: ethapi.TransactionArgs{ From: &randomAccounts[0].addr, To: &randomAccounts[1].addr, Value: (*hexutil.Big)(big.NewInt(1000)), @@ -361,7 +361,7 @@ func TestOverridenTraceCall(t *testing.T) { // Invalid call without state overriding { blockNumber: rpc.PendingBlockNumber, - call: ethapi.CallArgs{ + call: ethapi.TransactionArgs{ From: &randomAccounts[0].addr, To: &randomAccounts[1].addr, Value: (*hexutil.Big)(big.NewInt(1000)), @@ -390,7 +390,7 @@ func TestOverridenTraceCall(t *testing.T) { // } { blockNumber: rpc.PendingBlockNumber, - call: ethapi.CallArgs{ + call: ethapi.TransactionArgs{ From: &randomAccounts[0].addr, To: &randomAccounts[2].addr, Data: newRPCBytes(common.Hex2Bytes("8381f58a")), // call number() diff --git a/graphql/graphql.go b/graphql/graphql.go index b1af7675bdf5..71d80d8abd69 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -862,7 +862,7 @@ func (c *CallResult) Status() Long { } func (b *Block) Call(ctx context.Context, args struct { - Data ethapi.CallArgs + Data ethapi.TransactionArgs }) (*CallResult, error) { if b.numberOrHash == nil { _, err := b.resolve(ctx) @@ -887,7 +887,7 @@ func (b *Block) Call(ctx context.Context, args struct { } func (b *Block) EstimateGas(ctx context.Context, args struct { - Data ethapi.CallArgs + Data ethapi.TransactionArgs }) (Long, error) { if b.numberOrHash == nil { _, err := b.resolveHeader(ctx) @@ -937,7 +937,7 @@ func (p *Pending) Account(ctx context.Context, args struct { } func (p *Pending) Call(ctx context.Context, args struct { - Data ethapi.CallArgs + Data ethapi.TransactionArgs }) (*CallResult, error) { pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) result, err := ethapi.DoCall(ctx, p.backend, args.Data, pendingBlockNr, nil, vm.Config{}, 5*time.Second, p.backend.RPCGasCap()) @@ -957,7 +957,7 @@ func (p *Pending) Call(ctx context.Context, args struct { } func (p *Pending) EstimateGas(ctx context.Context, args struct { - Data ethapi.CallArgs + Data ethapi.TransactionArgs }) (Long, error) { pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) gas, err := ethapi.DoEstimateGas(ctx, p.backend, args.Data, pendingBlockNr, p.backend.RPCGasCap()) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 7bc0477bd273..b06df8ff9f2c 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -17,7 +17,6 @@ package ethapi import ( - "bytes" "context" "errors" "fmt" @@ -351,9 +350,9 @@ func (s *PrivateAccountAPI) LockAccount(addr common.Address) bool { // signTransaction sets defaults and signs the given transaction // NOTE: the caller needs to ensure that the nonceLock is held, if applicable, // and release it after the transaction has been submitted to the tx pool -func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args *SendTxArgs, passwd string) (*types.Transaction, error) { +func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args *TransactionArgs, passwd string) (*types.Transaction, error) { // Look up the wallet containing the requested signer - account := accounts.Account{Address: args.From} + account := accounts.Account{Address: args.from()} wallet, err := s.am.Find(account) if err != nil { return nil, err @@ -369,18 +368,18 @@ func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args *SendTxArg } // SendTransaction will create a transaction from the given arguments and -// tries to sign it with the key associated with args.From. If the given passwd isn't -// able to decrypt the key it fails. -func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs, passwd string) (common.Hash, error) { +// tries to sign it with the key associated with args.From. If the given +// passwd isn't able to decrypt the key it fails. +func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args TransactionArgs, passwd string) (common.Hash, error) { if args.Nonce == nil { // Hold the addresse's mutex around signing to prevent concurrent assignment of // the same nonce to multiple accounts. - s.nonceLock.LockAddr(args.From) - defer s.nonceLock.UnlockAddr(args.From) + s.nonceLock.LockAddr(args.from()) + defer s.nonceLock.UnlockAddr(args.from()) } signed, err := s.signTransaction(ctx, &args, passwd) if err != nil { - log.Warn("Failed transaction send attempt", "from", args.From, "to", args.To, "value", args.Value.ToInt(), "err", err) + log.Warn("Failed transaction send attempt", "from", args.from(), "to", args.To, "value", args.Value.ToInt(), "err", err) return common.Hash{}, err } return SubmitTransaction(ctx, s.b, signed) @@ -390,9 +389,12 @@ func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs // tries to sign it with the key associated with args.From. If the given passwd isn't // able to decrypt the key it fails. The transaction is returned in RLP-form, not broadcast // to other nodes -func (s *PrivateAccountAPI) SignTransaction(ctx context.Context, args SendTxArgs, passwd string) (*SignTransactionResult, error) { +func (s *PrivateAccountAPI) SignTransaction(ctx context.Context, args TransactionArgs, passwd string) (*SignTransactionResult, error) { // No need to obtain the noncelock mutex, since we won't be sending this // tx into the transaction pool, but right back to the user + if args.From == nil { + return nil, fmt.Errorf("sender not specified") + } if args.Gas == nil { return nil, fmt.Errorf("gas not specified") } @@ -408,7 +410,7 @@ func (s *PrivateAccountAPI) SignTransaction(ctx context.Context, args SendTxArgs } signed, err := s.signTransaction(ctx, &args, passwd) if err != nil { - log.Warn("Failed transaction sign attempt", "from", args.From, "to", args.To, "value", args.Value.ToInt(), "err", err) + log.Warn("Failed transaction sign attempt", "from", args.from(), "to", args.To, "value", args.Value.ToInt(), "err", err) return nil, err } data, err := signed.MarshalBinary() @@ -473,7 +475,7 @@ func (s *PrivateAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Byt // SignAndSendTransaction was renamed to SendTransaction. This method is deprecated // and will be removed in the future. It primary goal is to give clients time to update. -func (s *PrivateAccountAPI) SignAndSendTransaction(ctx context.Context, args SendTxArgs, passwd string) (common.Hash, error) { +func (s *PrivateAccountAPI) SignAndSendTransaction(ctx context.Context, args TransactionArgs, passwd string) (common.Hash, error) { return s.SendTransaction(ctx, args, passwd) } @@ -566,6 +568,7 @@ type AccountResult struct { StorageHash common.Hash `json:"storageHash"` StorageProof []StorageResult `json:"storageProof"` } + type StorageResult struct { Key string `json:"key"` Value *hexutil.Big `json:"value"` @@ -751,58 +754,6 @@ func (s *PublicBlockChainAPI) GetStorageAt(ctx context.Context, address common.A return res[:], state.Error() } -// CallArgs represents the arguments for a call. -type CallArgs struct { - From *common.Address `json:"from"` - To *common.Address `json:"to"` - Gas *hexutil.Uint64 `json:"gas"` - GasPrice *hexutil.Big `json:"gasPrice"` - Value *hexutil.Big `json:"value"` - Data *hexutil.Bytes `json:"data"` - AccessList *types.AccessList `json:"accessList"` -} - -// ToMessage converts CallArgs to the Message type used by the core evm -func (args *CallArgs) ToMessage(globalGasCap uint64) types.Message { - // Set sender address or use zero address if none specified. - var addr common.Address - if args.From != nil { - addr = *args.From - } - - // Set default gas & gas price if none were set - gas := globalGasCap - if gas == 0 { - gas = uint64(math.MaxUint64 / 2) - } - if args.Gas != nil { - gas = uint64(*args.Gas) - } - if globalGasCap != 0 && globalGasCap < gas { - log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) - gas = globalGasCap - } - gasPrice := new(big.Int) - if args.GasPrice != nil { - gasPrice = args.GasPrice.ToInt() - } - value := new(big.Int) - if args.Value != nil { - value = args.Value.ToInt() - } - var data []byte - if args.Data != nil { - data = *args.Data - } - var accessList types.AccessList - if args.AccessList != nil { - accessList = *args.AccessList - } - - msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, nil, nil, data, accessList, false) - return msg -} - // OverrideAccount indicates the overriding fields of account during the execution // of a message call. // Note, state and stateDiff can't be specified at the same time. If state is @@ -855,7 +806,7 @@ func (diff *StateOverride) Apply(state *state.StateDB) error { return nil } -func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, vmCfg vm.Config, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { +func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, vmCfg vm.Config, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) @@ -943,7 +894,7 @@ func (e *revertError) ErrorData() interface{} { // // Note, this function doesn't make and changes in the state/blockchain and is // useful to execute and retrieve values. -func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Bytes, error) { +func (s *PublicBlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Bytes, error) { result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, vm.Config{}, 5*time.Second, s.b.RPCGasCap()) if err != nil { return nil, err @@ -955,7 +906,7 @@ func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNrOr return result.Return(), result.Err } -func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, gasCap uint64) (hexutil.Uint64, error) { +func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, gasCap uint64) (hexutil.Uint64, error) { // Binary search the gas requirement, as it may be higher than the amount used var ( lo uint64 = params.TxGas - 1 @@ -1066,7 +1017,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNrOrHash // EstimateGas returns an estimate of the amount of gas needed to execute the // given transaction against the current pending block. -func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) { +func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) { bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) if blockNrOrHash != nil { bNrOrHash = *blockNrOrHash @@ -1324,7 +1275,7 @@ type accessListResult struct { // CreateAccessList creates a EIP-2930 type AccessList for the given transaction. // Reexec and BlockNrOrHash can be specified to create the accessList on top of a certain state. -func (s *PublicBlockChainAPI) CreateAccessList(ctx context.Context, args SendTxArgs, blockNrOrHash *rpc.BlockNumberOrHash) (*accessListResult, error) { +func (s *PublicBlockChainAPI) CreateAccessList(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (*accessListResult, error) { bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) if blockNrOrHash != nil { bNrOrHash = *blockNrOrHash @@ -1343,7 +1294,7 @@ func (s *PublicBlockChainAPI) CreateAccessList(ctx context.Context, args SendTxA // AccessList creates an access list for the given transaction. // If the accesslist creation fails an error is returned. // If the transaction itself fails, an vmErr is returned. -func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrHash, args SendTxArgs) (acl types.AccessList, gasUsed uint64, vmErr error, err error) { +func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrHash, args TransactionArgs) (acl types.AccessList, gasUsed uint64, vmErr error, err error) { // Retrieve the execution context db, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if db == nil || err != nil { @@ -1361,21 +1312,15 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH if args.To != nil { to = *args.To } else { - to = crypto.CreateAddress(args.From, uint64(*args.Nonce)) - } - var input []byte - if args.Input != nil { - input = *args.Input - } else if args.Data != nil { - input = *args.Data + to = crypto.CreateAddress(args.from(), uint64(*args.Nonce)) } // Retrieve the precompiles since they don't need to be added to the access list precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number)) // Create an initial tracer - prevTracer := vm.NewAccessListTracer(nil, args.From, to, precompiles) + prevTracer := vm.NewAccessListTracer(nil, args.from(), to, precompiles) if args.AccessList != nil { - prevTracer = vm.NewAccessListTracer(*args.AccessList, args.From, to, precompiles) + prevTracer = vm.NewAccessListTracer(*args.AccessList, args.from(), to, precompiles) } for { // Retrieve the current access list to expand @@ -1393,10 +1338,10 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH } // Copy the original db so we don't modify it statedb := db.Copy() - msg := types.NewMessage(args.From, args.To, uint64(*args.Nonce), args.Value.ToInt(), uint64(*args.Gas), args.GasPrice.ToInt(), nil, nil, input, accessList, false) + msg := types.NewMessage(args.from(), args.To, uint64(*args.Nonce), args.Value.ToInt(), uint64(*args.Gas), args.GasPrice.ToInt(), nil, nil, args.data(), accessList, false) // Apply the transaction with the access list tracer - tracer := vm.NewAccessListTracer(accessList, args.From, to, precompiles) + tracer := vm.NewAccessListTracer(accessList, args.from(), to, precompiles) config := vm.Config{Tracer: tracer, Debug: true} vmenv, _, err := b.GetEVM(ctx, msg, statedb, header, &config) if err != nil { @@ -1597,123 +1542,6 @@ func (s *PublicTransactionPoolAPI) sign(addr common.Address, tx *types.Transacti return wallet.SignTx(account, tx, s.b.ChainConfig().ChainID) } -// SendTxArgs represents the arguments to sumbit a new transaction into the transaction pool. -type SendTxArgs struct { - From common.Address `json:"from"` - To *common.Address `json:"to"` - Gas *hexutil.Uint64 `json:"gas"` - GasPrice *hexutil.Big `json:"gasPrice"` - Value *hexutil.Big `json:"value"` - Nonce *hexutil.Uint64 `json:"nonce"` - // We accept "data" and "input" for backwards-compatibility reasons. "input" is the - // newer name and should be preferred by clients. - Data *hexutil.Bytes `json:"data"` - Input *hexutil.Bytes `json:"input"` - - // For non-legacy transactions - AccessList *types.AccessList `json:"accessList,omitempty"` - ChainID *hexutil.Big `json:"chainId,omitempty"` -} - -// setDefaults fills in default values for unspecified tx fields. -func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error { - if args.GasPrice == nil { - price, err := b.SuggestPrice(ctx) - if err != nil { - return err - } - args.GasPrice = (*hexutil.Big)(price) - } - if args.Value == nil { - args.Value = new(hexutil.Big) - } - if args.Nonce == nil { - nonce, err := b.GetPoolNonce(ctx, args.From) - if err != nil { - return err - } - args.Nonce = (*hexutil.Uint64)(&nonce) - } - if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) { - return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`) - } - if args.To == nil { - // Contract creation - var input []byte - if args.Data != nil { - input = *args.Data - } else if args.Input != nil { - input = *args.Input - } - if len(input) == 0 { - return errors.New(`contract creation without any data provided`) - } - } - // Estimate the gas usage if necessary. - if args.Gas == nil { - // For backwards-compatibility reason, we try both input and data - // but input is preferred. - input := args.Input - if input == nil { - input = args.Data - } - callArgs := CallArgs{ - From: &args.From, // From shouldn't be nil - To: args.To, - GasPrice: args.GasPrice, - Value: args.Value, - Data: input, - AccessList: args.AccessList, - } - pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) - estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, b.RPCGasCap()) - if err != nil { - return err - } - args.Gas = &estimated - log.Trace("Estimate gas usage automatically", "gas", args.Gas) - } - if args.ChainID == nil { - id := (*hexutil.Big)(b.ChainConfig().ChainID) - args.ChainID = id - } - return nil -} - -// toTransaction converts the arguments to a transaction. -// This assumes that setDefaults has been called. -func (args *SendTxArgs) toTransaction() *types.Transaction { - var input []byte - if args.Input != nil { - input = *args.Input - } else if args.Data != nil { - input = *args.Data - } - var data types.TxData - if args.AccessList == nil { - data = &types.LegacyTx{ - To: args.To, - Nonce: uint64(*args.Nonce), - Gas: uint64(*args.Gas), - GasPrice: (*big.Int)(args.GasPrice), - Value: (*big.Int)(args.Value), - Data: input, - } - } else { - data = &types.AccessListTx{ - To: args.To, - ChainID: (*big.Int)(args.ChainID), - Nonce: uint64(*args.Nonce), - Gas: uint64(*args.Gas), - GasPrice: (*big.Int)(args.GasPrice), - Value: (*big.Int)(args.Value), - Data: input, - AccessList: *args.AccessList, - } - } - return types.NewTx(data) -} - // SubmitTransaction is a helper function that submits tx to txPool and logs a message. func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (common.Hash, error) { // If the transaction fee cap is already specified, ensure the @@ -1746,9 +1574,9 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c // SendTransaction creates a transaction for the given argument, sign it and submit it to the // transaction pool. -func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args SendTxArgs) (common.Hash, error) { +func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args TransactionArgs) (common.Hash, error) { // Look up the wallet containing the requested signer - account := accounts.Account{Address: args.From} + account := accounts.Account{Address: args.from()} wallet, err := s.b.AccountManager().Find(account) if err != nil { @@ -1758,8 +1586,8 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Sen if args.Nonce == nil { // Hold the addresse's mutex around signing to prevent concurrent assignment of // the same nonce to multiple accounts. - s.nonceLock.LockAddr(args.From) - defer s.nonceLock.UnlockAddr(args.From) + s.nonceLock.LockAddr(args.from()) + defer s.nonceLock.UnlockAddr(args.from()) } // Set some sanity defaults and terminate on failure @@ -1778,7 +1606,7 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Sen // FillTransaction fills the defaults (nonce, gas, gasPrice) on a given unsigned transaction, // and returns it to the caller for further processing (signing + broadcast) -func (s *PublicTransactionPoolAPI) FillTransaction(ctx context.Context, args SendTxArgs) (*SignTransactionResult, error) { +func (s *PublicTransactionPoolAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) { // Set some sanity defaults and terminate on failure if err := args.setDefaults(ctx, s.b); err != nil { return nil, err @@ -1836,7 +1664,7 @@ type SignTransactionResult struct { // SignTransaction will sign the given transaction with the from account. // The node needs to have the private key of the account corresponding with // the given from address and it needs to be unlocked. -func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args SendTxArgs) (*SignTransactionResult, error) { +func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) { if args.Gas == nil { return nil, fmt.Errorf("gas not specified") } @@ -1853,7 +1681,7 @@ func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args Sen if err := checkTxFee(args.GasPrice.ToInt(), uint64(*args.Gas), s.b.RPCTxFeeCap()); err != nil { return nil, err } - tx, err := s.sign(args.From, args.toTransaction()) + tx, err := s.sign(args.from(), args.toTransaction()) if err != nil { return nil, err } @@ -1889,7 +1717,7 @@ func (s *PublicTransactionPoolAPI) PendingTransactions() ([]*RPCTransaction, err // Resend accepts an existing transaction and a new gas price and limit. It will remove // the given transaction from the pool and reinsert it with the new gas price and limit. -func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs SendTxArgs, gasPrice *hexutil.Big, gasLimit *hexutil.Uint64) (common.Hash, error) { +func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs TransactionArgs, gasPrice *hexutil.Big, gasLimit *hexutil.Uint64) (common.Hash, error) { if sendArgs.Nonce == nil { return common.Hash{}, fmt.Errorf("missing transaction nonce in transaction spec") } @@ -1918,7 +1746,7 @@ func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs SendTxAr for _, p := range pending { wantSigHash := s.signer.Hash(matchTx) pFrom, err := types.Sender(s.signer, p) - if err == nil && pFrom == sendArgs.From && s.signer.Hash(p) == wantSigHash { + if err == nil && pFrom == sendArgs.from() && s.signer.Hash(p) == wantSigHash { // Match. Re-sign and send the transaction. if gasPrice != nil && (*big.Int)(gasPrice).Sign() != 0 { sendArgs.GasPrice = gasPrice @@ -1926,7 +1754,7 @@ func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs SendTxAr if gasLimit != nil && *gasLimit != 0 { sendArgs.Gas = gasLimit } - signedTx, err := s.sign(sendArgs.From, sendArgs.toTransaction()) + signedTx, err := s.sign(sendArgs.from(), sendArgs.toTransaction()) if err != nil { return common.Hash{}, err } diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go new file mode 100644 index 000000000000..4385a9e97ac3 --- /dev/null +++ b/internal/ethapi/transaction_args.go @@ -0,0 +1,185 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethapi + +import ( + "bytes" + "context" + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" +) + +// TransactionArgs represents the arguments to construct a new transaction +// or a message call. +type TransactionArgs struct { + From *common.Address `json:"from"` + To *common.Address `json:"to"` + Gas *hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice"` + Value *hexutil.Big `json:"value"` + Nonce *hexutil.Uint64 `json:"nonce"` + + // We accept "data" and "input" for backwards-compatibility reasons. + // "input" is the newer name and should be preferred by clients. + // Issue detail: https://github.com/ethereum/go-ethereum/issues/15628 + Data *hexutil.Bytes `json:"data"` + Input *hexutil.Bytes `json:"input"` + + // For non-legacy transactions + AccessList *types.AccessList `json:"accessList,omitempty"` + ChainID *hexutil.Big `json:"chainId,omitempty"` +} + +// from retrieves the transaction sender address. +func (arg *TransactionArgs) from() common.Address { + if arg.From == nil { + return common.Address{} + } + return *arg.From +} + +// data retrieves the transaction calldata. Input field is preferred. +func (arg *TransactionArgs) data() []byte { + if arg.Input != nil { + return *arg.Input + } + if arg.Data != nil { + return *arg.Data + } + return nil +} + +// setDefaults fills in default values for unspecified tx fields. +func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { + if args.GasPrice == nil { + price, err := b.SuggestPrice(ctx) + if err != nil { + return err + } + args.GasPrice = (*hexutil.Big)(price) + } + if args.Value == nil { + args.Value = new(hexutil.Big) + } + if args.Nonce == nil { + nonce, err := b.GetPoolNonce(ctx, args.from()) + if err != nil { + return err + } + args.Nonce = (*hexutil.Uint64)(&nonce) + } + if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) { + return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`) + } + if args.To == nil && len(args.data()) == 0 { + return errors.New(`contract creation without any data provided`) + } + // Estimate the gas usage if necessary. + if args.Gas == nil { + // These fields are immutable during the estimation, safe to + // pass the pointer directly. + callArgs := TransactionArgs{ + From: args.From, + To: args.To, + GasPrice: args.GasPrice, + Value: args.Value, + Data: args.Data, + AccessList: args.AccessList, + } + pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) + estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, b.RPCGasCap()) + if err != nil { + return err + } + args.Gas = &estimated + log.Trace("Estimate gas usage automatically", "gas", args.Gas) + } + if args.ChainID == nil { + id := (*hexutil.Big)(b.ChainConfig().ChainID) + args.ChainID = id + } + return nil +} + +// ToMessage converts TransactionArgs to the Message type used by the core evm +func (args *TransactionArgs) ToMessage(globalGasCap uint64) types.Message { + // Set sender address or use zero address if none specified. + addr := args.from() + + // Set default gas & gas price if none were set + gas := globalGasCap + if gas == 0 { + gas = uint64(math.MaxUint64 / 2) + } + if args.Gas != nil { + gas = uint64(*args.Gas) + } + if globalGasCap != 0 && globalGasCap < gas { + log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) + gas = globalGasCap + } + gasPrice := new(big.Int) + if args.GasPrice != nil { + gasPrice = args.GasPrice.ToInt() + } + value := new(big.Int) + if args.Value != nil { + value = args.Value.ToInt() + } + data := args.data() + var accessList types.AccessList + if args.AccessList != nil { + accessList = *args.AccessList + } + msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, nil, nil, data, accessList, false) + return msg +} + +// toTransaction converts the arguments to a transaction. +// This assumes that setDefaults has been called. +func (args *TransactionArgs) toTransaction() *types.Transaction { + var data types.TxData + if args.AccessList == nil { + data = &types.LegacyTx{ + To: args.To, + Nonce: uint64(*args.Nonce), + Gas: uint64(*args.Gas), + GasPrice: (*big.Int)(args.GasPrice), + Value: (*big.Int)(args.Value), + Data: args.data(), + } + } else { + data = &types.AccessListTx{ + To: args.To, + ChainID: (*big.Int)(args.ChainID), + Nonce: uint64(*args.Nonce), + Gas: uint64(*args.Gas), + GasPrice: (*big.Int)(args.GasPrice), + Value: (*big.Int)(args.Value), + Data: args.data(), + AccessList: *args.AccessList, + } + } + return types.NewTx(data) +} From 750115ff3903a4d54267e0934a3391e4e2c2e84a Mon Sep 17 00:00:00 2001 From: meowsbits Date: Tue, 25 May 2021 15:37:30 -0500 Subject: [PATCH 051/208] p2p/nat: skip TestUPNP in non-CI environments if discover fails (#22877) Fixes #21476 --- p2p/nat/natupnp_test.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/p2p/nat/natupnp_test.go b/p2p/nat/natupnp_test.go index 79f6d25ae87f..17483a70367c 100644 --- a/p2p/nat/natupnp_test.go +++ b/p2p/nat/natupnp_test.go @@ -21,6 +21,7 @@ import ( "io" "net" "net/http" + "os" "runtime" "strings" "testing" @@ -162,7 +163,11 @@ func TestUPNP_DDWRT(t *testing.T) { // Attempt to discover the fake device. discovered := discoverUPnP() if discovered == nil { - t.Fatalf("not discovered") + if os.Getenv("CI") != "" { + t.Fatalf("not discovered") + } else { + t.Skipf("UPnP not discovered (known issue, see https://github.com/ethereum/go-ethereum/issues/21476)") + } } upnp, _ := discovered.(*upnp) if upnp.service != "IGDv1-IP1" { From 6c7d6cf886484805ae218af9b372c96e3ca8315c Mon Sep 17 00:00:00 2001 From: Eugene Lepeico Date: Tue, 25 May 2021 23:47:14 +0300 Subject: [PATCH 052/208] tests: get test name from testing.T (#22941) There were 2 TODOs about that fix after Golang 1.8 release. It's here for 3 years already, so now should be the right time. --- tests/block_test.go | 4 ++-- tests/difficulty_test.go | 4 ++-- tests/init_test.go | 10 ++++------ tests/rlp_test.go | 2 +- tests/state_test.go | 9 ++++----- tests/transaction_test.go | 2 +- tests/vm_test.go | 4 ++-- 7 files changed, 16 insertions(+), 19 deletions(-) diff --git a/tests/block_test.go b/tests/block_test.go index 4820ba733fe0..f7fbaea2a445 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -48,10 +48,10 @@ func TestBlockchain(t *testing.T) { // using 4.6 TGas bt.skipLoad(`.*randomStatetest94.json.*`) bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { - if err := bt.checkFailure(t, name+"/trie", test.Run(false)); err != nil { + if err := bt.checkFailure(t, test.Run(false)); err != nil { t.Errorf("test without snapshotter failed: %v", err) } - if err := bt.checkFailure(t, name+"/snap", test.Run(true)); err != nil { + if err := bt.checkFailure(t, test.Run(true)); err != nil { t.Errorf("test with snapshotter failed: %v", err) } }) diff --git a/tests/difficulty_test.go b/tests/difficulty_test.go index e80cd248bc7d..acbf96e71247 100644 --- a/tests/difficulty_test.go +++ b/tests/difficulty_test.go @@ -79,12 +79,12 @@ func TestDifficulty(t *testing.T) { dt.config("difficulty.json", mainnetChainConfig) dt.walk(t, difficultyTestDir, func(t *testing.T, name string, test *DifficultyTest) { - cfg := dt.findConfig(name) + cfg := dt.findConfig(t) if test.ParentDifficulty.Cmp(params.MinimumDifficulty) < 0 { t.Skip("difficulty below minimum") return } - if err := dt.checkFailure(t, name, test.Run(cfg)); err != nil { + if err := dt.checkFailure(t, test.Run(cfg)); err != nil { t.Error(err) } }) diff --git a/tests/init_test.go b/tests/init_test.go index 5af3e44bff96..dc923dc75e60 100644 --- a/tests/init_test.go +++ b/tests/init_test.go @@ -167,10 +167,9 @@ func (tm *testMatcher) findSkip(name string) (reason string, skipload bool) { } // findConfig returns the chain config matching defined patterns. -func (tm *testMatcher) findConfig(name string) *params.ChainConfig { - // TODO(fjl): name can be derived from testing.T when min Go version is 1.8 +func (tm *testMatcher) findConfig(t *testing.T) *params.ChainConfig { for _, m := range tm.configpat { - if m.p.MatchString(name) { + if m.p.MatchString(t.Name()) { return &m.config } } @@ -178,11 +177,10 @@ func (tm *testMatcher) findConfig(name string) *params.ChainConfig { } // checkFailure checks whether a failure is expected. -func (tm *testMatcher) checkFailure(t *testing.T, name string, err error) error { - // TODO(fjl): name can be derived from t when min Go version is 1.8 +func (tm *testMatcher) checkFailure(t *testing.T, err error) error { failReason := "" for _, m := range tm.failpat { - if m.p.MatchString(name) { + if m.p.MatchString(t.Name()) { failReason = m.reason break } diff --git a/tests/rlp_test.go b/tests/rlp_test.go index 1601625df569..79a1683eb23a 100644 --- a/tests/rlp_test.go +++ b/tests/rlp_test.go @@ -24,7 +24,7 @@ func TestRLP(t *testing.T) { t.Parallel() tm := new(testMatcher) tm.walk(t, rlpTestDir, func(t *testing.T, name string, test *RLPTest) { - if err := tm.checkFailure(t, name, test.Run()); err != nil { + if err := tm.checkFailure(t, test.Run()); err != nil { t.Error(err) } }) diff --git a/tests/state_test.go b/tests/state_test.go index b77a898c21f6..43009afdd533 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -64,12 +64,11 @@ func TestState(t *testing.T) { for _, subtest := range test.Subtests() { subtest := subtest key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index) - name := name + "/" + key t.Run(key+"/trie", func(t *testing.T) { withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { _, _, err := test.Run(subtest, vmconfig, false) - return st.checkFailure(t, name+"/trie", err) + return st.checkFailure(t, err) }) }) t.Run(key+"/snap", func(t *testing.T) { @@ -78,7 +77,7 @@ func TestState(t *testing.T) { if _, err := snaps.Journal(statedb.IntermediateRoot(false)); err != nil { return err } - return st.checkFailure(t, name+"/snap", err) + return st.checkFailure(t, err) }) }) } @@ -117,6 +116,6 @@ func withTrace(t *testing.T, gasLimit uint64, test func(vm.Config) error) { } else { t.Log("EVM operation log:\n" + buf.String()) } - //t.Logf("EVM output: 0x%x", tracer.Output()) - //t.Logf("EVM error: %v", tracer.Error()) + // t.Logf("EVM output: 0x%x", tracer.Output()) + // t.Logf("EVM error: %v", tracer.Error()) } diff --git a/tests/transaction_test.go b/tests/transaction_test.go index 0e3670d04bf7..cb0f2623189c 100644 --- a/tests/transaction_test.go +++ b/tests/transaction_test.go @@ -47,7 +47,7 @@ func TestTransaction(t *testing.T) { txt.skipLoad("^ttValue/TransactionWithHighValueOverflow.json") txt.walk(t, transactionTestDir, func(t *testing.T, name string, test *TransactionTest) { cfg := params.MainnetChainConfig - if err := txt.checkFailure(t, name, test.Run(cfg)); err != nil { + if err := txt.checkFailure(t, test.Run(cfg)); err != nil { t.Error(err) } }) diff --git a/tests/vm_test.go b/tests/vm_test.go index fb839827acc9..2150df9e23a5 100644 --- a/tests/vm_test.go +++ b/tests/vm_test.go @@ -30,10 +30,10 @@ func TestVM(t *testing.T) { vmt.walk(t, vmTestDir, func(t *testing.T, name string, test *VMTest) { withTrace(t, test.json.Exec.GasLimit, func(vmconfig vm.Config) error { - return vmt.checkFailure(t, name+"/trie", test.Run(vmconfig, false)) + return vmt.checkFailure(t, test.Run(vmconfig, false)) }) withTrace(t, test.json.Exec.GasLimit, func(vmconfig vm.Config) error { - return vmt.checkFailure(t, name+"/snap", test.Run(vmconfig, true)) + return vmt.checkFailure(t, test.Run(vmconfig, true)) }) }) } From 49bde05a55260469abcbeb64fd3c7b85c7536b5c Mon Sep 17 00:00:00 2001 From: rene <41963722+renaynay@users.noreply.github.com> Date: Tue, 25 May 2021 23:09:11 +0200 Subject: [PATCH 053/208] cmd/devp2p: refactor eth test suite (#22843) This PR refactors the eth test suite to make it more readable and easier to use. Some notable differences: - A new file helpers.go stores all of the methods used between both eth66 and eth65 and below tests, as well as methods shared among many test functions. - suite.go now contains all of the test functions for both eth65 tests and eth66 tests. - The utesting.T object doesn't get passed through to other helper methods, but is instead only used within the scope of the test function, whereas helper methods return errors, so only the test function itself can fatal out in the case of an error. - The full test suite now only takes 13.5 seconds to run. --- cmd/devp2p/internal/ethtest/chain.go | 20 +- cmd/devp2p/internal/ethtest/eth66_suite.go | 521 ----------- .../internal/ethtest/eth66_suiteHelpers.go | 333 ------- cmd/devp2p/internal/ethtest/helpers.go | 635 +++++++++++++ cmd/devp2p/internal/ethtest/suite.go | 844 +++++++++++------- cmd/devp2p/internal/ethtest/transaction.go | 298 +++++-- cmd/devp2p/internal/ethtest/types.go | 238 ++--- 7 files changed, 1467 insertions(+), 1422 deletions(-) delete mode 100644 cmd/devp2p/internal/ethtest/eth66_suite.go delete mode 100644 cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go create mode 100644 cmd/devp2p/internal/ethtest/helpers.go diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go index 83c55181ada4..34a20c515b23 100644 --- a/cmd/devp2p/internal/ethtest/chain.go +++ b/cmd/devp2p/internal/ethtest/chain.go @@ -54,10 +54,24 @@ func (c *Chain) Len() int { return len(c.blocks) } -// TD calculates the total difficulty of the chain. -func (c *Chain) TD(height int) *big.Int { // TODO later on channge scheme so that the height is included in range +// TD calculates the total difficulty of the chain at the +// chain head. +func (c *Chain) TD() *big.Int { sum := big.NewInt(0) - for _, block := range c.blocks[:height] { + for _, block := range c.blocks[:c.Len()] { + sum.Add(sum, block.Difficulty()) + } + return sum +} + +// TotalDifficultyAt calculates the total difficulty of the chain +// at the given block height. +func (c *Chain) TotalDifficultyAt(height int) *big.Int { + sum := big.NewInt(0) + if height >= c.Len() { + return sum + } + for _, block := range c.blocks[:height+1] { sum.Add(sum, block.Difficulty()) } return sum diff --git a/cmd/devp2p/internal/ethtest/eth66_suite.go b/cmd/devp2p/internal/ethtest/eth66_suite.go deleted file mode 100644 index 903a90c7eb2b..000000000000 --- a/cmd/devp2p/internal/ethtest/eth66_suite.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethtest - -import ( - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/internal/utesting" - "github.com/ethereum/go-ethereum/p2p" -) - -// Is_66 checks if the node supports the eth66 protocol version, -// and if not, exists the test suite -func (s *Suite) Is_66(t *utesting.T) { - conn := s.dial66(t) - conn.handshake(t) - if conn.negotiatedProtoVersion < 66 { - t.Fail() - } -} - -// TestStatus_66 attempts to connect to the given node and exchange -// a status message with it on the eth66 protocol, and then check to -// make sure the chain head is correct. -func (s *Suite) TestStatus_66(t *utesting.T) { - conn := s.dial66(t) - defer conn.Close() - // get protoHandshake - conn.handshake(t) - // get status - switch msg := conn.statusExchange66(t, s.chain).(type) { - case *Status: - status := *msg - if status.ProtocolVersion != uint32(66) { - t.Fatalf("mismatch in version: wanted 66, got %d", status.ProtocolVersion) - } - t.Logf("got status message: %s", pretty.Sdump(msg)) - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) - } -} - -// TestGetBlockHeaders_66 tests whether the given node can respond to -// an eth66 `GetBlockHeaders` request and that the response is accurate. -func (s *Suite) TestGetBlockHeaders_66(t *utesting.T) { - conn := s.setupConnection66(t) - defer conn.Close() - // get block headers - req := ð.GetBlockHeadersPacket66{ - RequestId: 3, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Hash: s.chain.blocks[1].Hash(), - }, - Amount: 2, - Skip: 1, - Reverse: false, - }, - } - // write message - headers, err := s.getBlockHeaders66(conn, req, req.RequestId) - if err != nil { - t.Fatalf("could not get block headers: %v", err) - } - // check for correct headers - if !headersMatch(t, s.chain, headers) { - t.Fatal("received wrong header(s)") - } -} - -// TestSimultaneousRequests_66 sends two simultaneous `GetBlockHeader` requests -// with different request IDs and checks to make sure the node responds with the correct -// headers per request. -func (s *Suite) TestSimultaneousRequests_66(t *utesting.T) { - // create two connections - conn := s.setupConnection66(t) - defer conn.Close() - // create two requests - req1 := ð.GetBlockHeadersPacket66{ - RequestId: 111, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Hash: s.chain.blocks[1].Hash(), - }, - Amount: 2, - Skip: 1, - Reverse: false, - }, - } - req2 := ð.GetBlockHeadersPacket66{ - RequestId: 222, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Hash: s.chain.blocks[1].Hash(), - }, - Amount: 4, - Skip: 1, - Reverse: false, - }, - } - // write first request - if err := conn.write66(req1, GetBlockHeaders{}.Code()); err != nil { - t.Fatalf("failed to write to connection: %v", err) - } - // write second request - if err := conn.write66(req2, GetBlockHeaders{}.Code()); err != nil { - t.Fatalf("failed to write to connection: %v", err) - } - // wait for responses - headers1, err := s.waitForBlockHeadersResponse66(conn, req1.RequestId) - if err != nil { - t.Fatalf("error while waiting for block headers: %v", err) - } - headers2, err := s.waitForBlockHeadersResponse66(conn, req2.RequestId) - if err != nil { - t.Fatalf("error while waiting for block headers: %v", err) - } - // check headers of both responses - if !headersMatch(t, s.chain, headers1) { - t.Fatalf("wrong header(s) in response to req1: got %v", headers1) - } - if !headersMatch(t, s.chain, headers2) { - t.Fatalf("wrong header(s) in response to req2: got %v", headers2) - } -} - -// TestBroadcast_66 tests whether a block announcement is correctly -// propagated to the given node's peer(s) on the eth66 protocol. -func (s *Suite) TestBroadcast_66(t *utesting.T) { - s.sendNextBlock66(t) -} - -// TestGetBlockBodies_66 tests whether the given node can respond to -// a `GetBlockBodies` request and that the response is accurate over -// the eth66 protocol. -func (s *Suite) TestGetBlockBodies_66(t *utesting.T) { - conn := s.setupConnection66(t) - defer conn.Close() - // create block bodies request - id := uint64(55) - req := ð.GetBlockBodiesPacket66{ - RequestId: id, - GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ - s.chain.blocks[54].Hash(), - s.chain.blocks[75].Hash(), - }, - } - if err := conn.write66(req, GetBlockBodies{}.Code()); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - - reqID, msg := conn.readAndServe66(s.chain, timeout) - switch msg := msg.(type) { - case BlockBodies: - if reqID != req.RequestId { - t.Fatalf("request ID mismatch: wanted %d, got %d", req.RequestId, reqID) - } - t.Logf("received %d block bodies", len(msg)) - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) - } -} - -// TestLargeAnnounce_66 tests the announcement mechanism with a large block. -func (s *Suite) TestLargeAnnounce_66(t *utesting.T) { - nextBlock := len(s.chain.blocks) - blocks := []*NewBlock{ - { - Block: largeBlock(), - TD: s.fullChain.TD(nextBlock + 1), - }, - { - Block: s.fullChain.blocks[nextBlock], - TD: largeNumber(2), - }, - { - Block: largeBlock(), - TD: largeNumber(2), - }, - { - Block: s.fullChain.blocks[nextBlock], - TD: s.fullChain.TD(nextBlock + 1), - }, - } - - for i, blockAnnouncement := range blocks[0:3] { - t.Logf("Testing malicious announcement: %v\n", i) - sendConn := s.setupConnection66(t) - if err := sendConn.Write(blockAnnouncement); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - // Invalid announcement, check that peer disconnected - switch msg := sendConn.ReadAndServe(s.chain, time.Second*8).(type) { - case *Disconnect: - case *Error: - break - default: - t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) - } - sendConn.Close() - } - // Test the last block as a valid block - s.sendNextBlock66(t) -} - -func (s *Suite) TestOldAnnounce_66(t *utesting.T) { - sendConn, recvConn := s.setupConnection66(t), s.setupConnection66(t) - defer sendConn.Close() - defer recvConn.Close() - - s.oldAnnounce(t, sendConn, recvConn) -} - -// TestMaliciousHandshake_66 tries to send malicious data during the handshake. -func (s *Suite) TestMaliciousHandshake_66(t *utesting.T) { - conn := s.dial66(t) - defer conn.Close() - // write hello to client - pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:] - handshakes := []*Hello{ - { - Version: 5, - Caps: []p2p.Cap{ - {Name: largeString(2), Version: 66}, - }, - ID: pub0, - }, - { - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - {Name: "eth", Version: 66}, - }, - ID: append(pub0, byte(0)), - }, - { - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - {Name: "eth", Version: 66}, - }, - ID: append(pub0, pub0...), - }, - { - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - {Name: "eth", Version: 66}, - }, - ID: largeBuffer(2), - }, - { - Version: 5, - Caps: []p2p.Cap{ - {Name: largeString(2), Version: 66}, - }, - ID: largeBuffer(2), - }, - } - for i, handshake := range handshakes { - t.Logf("Testing malicious handshake %v\n", i) - // Init the handshake - if err := conn.Write(handshake); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - // check that the peer disconnected - timeout := 20 * time.Second - // Discard one hello - for i := 0; i < 2; i++ { - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { - case *Disconnect: - case *Error: - case *Hello: - // Hello's are sent concurrently, so ignore them - continue - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) - } - } - // Dial for the next round - conn = s.dial66(t) - } -} - -// TestMaliciousStatus_66 sends a status package with a large total difficulty. -func (s *Suite) TestMaliciousStatus_66(t *utesting.T) { - conn := s.dial66(t) - defer conn.Close() - // get protoHandshake - conn.handshake(t) - status := &Status{ - ProtocolVersion: uint32(66), - NetworkID: s.chain.chainConfig.ChainID.Uint64(), - TD: largeNumber(2), - Head: s.chain.blocks[s.chain.Len()-1].Hash(), - Genesis: s.chain.blocks[0].Hash(), - ForkID: s.chain.ForkID(), - } - // get status - switch msg := conn.statusExchange(t, s.chain, status).(type) { - case *Status: - t.Logf("%+v\n", msg) - default: - t.Fatalf("expected status, got: %#v ", msg) - } - // wait for disconnect - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { - case *Disconnect: - case *Error: - return - default: - t.Fatalf("expected disconnect, got: %s", pretty.Sdump(msg)) - } -} - -func (s *Suite) TestTransaction_66(t *utesting.T) { - tests := []*types.Transaction{ - getNextTxFromChain(t, s), - unknownTx(t, s), - } - for i, tx := range tests { - t.Logf("Testing tx propagation: %v\n", i) - sendSuccessfulTx66(t, s, tx) - } -} - -func (s *Suite) TestMaliciousTx_66(t *utesting.T) { - badTxs := []*types.Transaction{ - getOldTxFromChain(t, s), - invalidNonceTx(t, s), - hugeAmount(t, s), - hugeGasPrice(t, s), - hugeData(t, s), - } - sendConn := s.setupConnection66(t) - defer sendConn.Close() - // set up receiving connection before sending txs to make sure - // no announcements are missed - recvConn := s.setupConnection66(t) - defer recvConn.Close() - - for i, tx := range badTxs { - t.Logf("Testing malicious tx propagation: %v\n", i) - if err := sendConn.Write(&Transactions{tx}); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - - } - // check to make sure bad txs aren't propagated - waitForTxPropagation(t, s, badTxs, recvConn) -} - -// TestZeroRequestID_66 checks that a request ID of zero is still handled -// by the node. -func (s *Suite) TestZeroRequestID_66(t *utesting.T) { - conn := s.setupConnection66(t) - defer conn.Close() - - req := ð.GetBlockHeadersPacket66{ - RequestId: 0, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Number: 0, - }, - Amount: 2, - }, - } - headers, err := s.getBlockHeaders66(conn, req, req.RequestId) - if err != nil { - t.Fatalf("could not get block headers: %v", err) - } - if !headersMatch(t, s.chain, headers) { - t.Fatal("received wrong header(s)") - } -} - -// TestSameRequestID_66 sends two requests with the same request ID -// concurrently to a single node. -func (s *Suite) TestSameRequestID_66(t *utesting.T) { - conn := s.setupConnection66(t) - // create two requests with the same request ID - reqID := uint64(1234) - request1 := ð.GetBlockHeadersPacket66{ - RequestId: reqID, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Number: 1, - }, - Amount: 2, - }, - } - request2 := ð.GetBlockHeadersPacket66{ - RequestId: reqID, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Number: 33, - }, - Amount: 2, - }, - } - // write the first request - err := conn.write66(request1, GetBlockHeaders{}.Code()) - if err != nil { - t.Fatalf("could not write to connection: %v", err) - } - // perform second request - headers2, err := s.getBlockHeaders66(conn, request2, reqID) - if err != nil { - t.Fatalf("could not get block headers: %v", err) - return - } - // wait for response to first request - headers1, err := s.waitForBlockHeadersResponse66(conn, reqID) - if err != nil { - t.Fatalf("could not get BlockHeaders response: %v", err) - } - // check if headers match - if !headersMatch(t, s.chain, headers1) || !headersMatch(t, s.chain, headers2) { - t.Fatal("received wrong header(s)") - } -} - -// TestLargeTxRequest_66 tests whether a node can fulfill a large GetPooledTransactions -// request. -func (s *Suite) TestLargeTxRequest_66(t *utesting.T) { - // send the next block to ensure the node is no longer syncing and is able to accept - // txs - s.sendNextBlock66(t) - // send 2000 transactions to the node - hashMap, txs := generateTxs(t, s, 2000) - sendConn := s.setupConnection66(t) - defer sendConn.Close() - - sendMultipleSuccessfulTxs(t, s, sendConn, txs) - // set up connection to receive to ensure node is peered with the receiving connection - // before tx request is sent - recvConn := s.setupConnection66(t) - defer recvConn.Close() - // create and send pooled tx request - hashes := make([]common.Hash, 0) - for _, hash := range hashMap { - hashes = append(hashes, hash) - } - getTxReq := ð.GetPooledTransactionsPacket66{ - RequestId: 1234, - GetPooledTransactionsPacket: hashes, - } - if err := recvConn.write66(getTxReq, GetPooledTransactions{}.Code()); err != nil { - t.Fatalf("could not write to conn: %v", err) - } - // check that all received transactions match those that were sent to node - switch msg := recvConn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { - case PooledTransactions: - for _, gotTx := range msg { - if _, exists := hashMap[gotTx.Hash()]; !exists { - t.Fatalf("unexpected tx received: %v", gotTx.Hash()) - } - } - default: - t.Fatalf("unexpected %s", pretty.Sdump(msg)) - } -} - -// TestNewPooledTxs_66 tests whether a node will do a GetPooledTransactions -// request upon receiving a NewPooledTransactionHashes announcement. -func (s *Suite) TestNewPooledTxs_66(t *utesting.T) { - // send the next block to ensure the node is no longer syncing and is able to accept - // txs - s.sendNextBlock66(t) - // generate 50 txs - hashMap, _ := generateTxs(t, s, 50) - // create new pooled tx hashes announcement - hashes := make([]common.Hash, 0) - for _, hash := range hashMap { - hashes = append(hashes, hash) - } - announce := NewPooledTransactionHashes(hashes) - // send announcement - conn := s.setupConnection66(t) - defer conn.Close() - if err := conn.Write(announce); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - // wait for GetPooledTxs request - for { - _, msg := conn.readAndServe66(s.chain, timeout) - switch msg := msg.(type) { - case GetPooledTransactions: - if len(msg) != len(hashes) { - t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg)) - } - return - case *NewPooledTransactionHashes, *NewBlock, *NewBlockHashes: - // ignore propagated txs and blocks from old tests - continue - default: - t.Fatalf("unexpected %s", pretty.Sdump(msg)) - } - } -} diff --git a/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go b/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go deleted file mode 100644 index 3c5b22f0b5ef..000000000000 --- a/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethtest - -import ( - "fmt" - "reflect" - "time" - - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/internal/utesting" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/rlp" - "github.com/stretchr/testify/assert" -) - -func (c *Conn) statusExchange66(t *utesting.T, chain *Chain) Message { - status := &Status{ - ProtocolVersion: uint32(66), - NetworkID: chain.chainConfig.ChainID.Uint64(), - TD: chain.TD(chain.Len()), - Head: chain.blocks[chain.Len()-1].Hash(), - Genesis: chain.blocks[0].Hash(), - ForkID: chain.ForkID(), - } - return c.statusExchange(t, chain, status) -} - -func (s *Suite) dial66(t *utesting.T) *Conn { - conn, err := s.dial() - if err != nil { - t.Fatalf("could not dial: %v", err) - } - conn.caps = append(conn.caps, p2p.Cap{Name: "eth", Version: 66}) - conn.ourHighestProtoVersion = 66 - return conn -} - -func (c *Conn) write66(req eth.Packet, code int) error { - payload, err := rlp.EncodeToBytes(req) - if err != nil { - return err - } - _, err = c.Conn.Write(uint64(code), payload) - return err -} - -func (c *Conn) read66() (uint64, Message) { - code, rawData, _, err := c.Conn.Read() - if err != nil { - return 0, errorf("could not read from connection: %v", err) - } - - var msg Message - - switch int(code) { - case (Hello{}).Code(): - msg = new(Hello) - - case (Ping{}).Code(): - msg = new(Ping) - case (Pong{}).Code(): - msg = new(Pong) - case (Disconnect{}).Code(): - msg = new(Disconnect) - case (Status{}).Code(): - msg = new(Status) - case (GetBlockHeaders{}).Code(): - ethMsg := new(eth.GetBlockHeadersPacket66) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return ethMsg.RequestId, GetBlockHeaders(*ethMsg.GetBlockHeadersPacket) - case (BlockHeaders{}).Code(): - ethMsg := new(eth.BlockHeadersPacket66) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return ethMsg.RequestId, BlockHeaders(ethMsg.BlockHeadersPacket) - case (GetBlockBodies{}).Code(): - ethMsg := new(eth.GetBlockBodiesPacket66) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return ethMsg.RequestId, GetBlockBodies(ethMsg.GetBlockBodiesPacket) - case (BlockBodies{}).Code(): - ethMsg := new(eth.BlockBodiesPacket66) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return ethMsg.RequestId, BlockBodies(ethMsg.BlockBodiesPacket) - case (NewBlock{}).Code(): - msg = new(NewBlock) - case (NewBlockHashes{}).Code(): - msg = new(NewBlockHashes) - case (Transactions{}).Code(): - msg = new(Transactions) - case (NewPooledTransactionHashes{}).Code(): - msg = new(NewPooledTransactionHashes) - case (GetPooledTransactions{}.Code()): - ethMsg := new(eth.GetPooledTransactionsPacket66) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return ethMsg.RequestId, GetPooledTransactions(ethMsg.GetPooledTransactionsPacket) - case (PooledTransactions{}.Code()): - ethMsg := new(eth.PooledTransactionsPacket66) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return ethMsg.RequestId, PooledTransactions(ethMsg.PooledTransactionsPacket) - default: - msg = errorf("invalid message code: %d", code) - } - - if msg != nil { - if err := rlp.DecodeBytes(rawData, msg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return 0, msg - } - return 0, errorf("invalid message: %s", string(rawData)) -} - -func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID uint64) Message { - for { - id, msg := c.readAndServe66(chain, timeout) - if id == requestID { - return msg - } - } -} - -// ReadAndServe serves GetBlockHeaders requests while waiting -// on another message from the node. -func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Message) { - start := time.Now() - for time.Since(start) < timeout { - c.SetReadDeadline(time.Now().Add(10 * time.Second)) - - reqID, msg := c.read66() - - switch msg := msg.(type) { - case *Ping: - c.Write(&Pong{}) - case *GetBlockHeaders: - headers, err := chain.GetHeaders(*msg) - if err != nil { - return 0, errorf("could not get headers for inbound header request: %v", err) - } - resp := ð.BlockHeadersPacket66{ - RequestId: reqID, - BlockHeadersPacket: eth.BlockHeadersPacket(headers), - } - if err := c.write66(resp, BlockHeaders{}.Code()); err != nil { - return 0, errorf("could not write to connection: %v", err) - } - default: - return reqID, msg - } - } - return 0, errorf("no message received within %v", timeout) -} - -func (s *Suite) setupConnection66(t *utesting.T) *Conn { - // create conn - sendConn := s.dial66(t) - sendConn.handshake(t) - sendConn.statusExchange66(t, s.chain) - return sendConn -} - -func (s *Suite) testAnnounce66(t *utesting.T, sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) { - // Announce the block. - if err := sendConn.Write(blockAnnouncement); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - s.waitAnnounce66(t, receiveConn, blockAnnouncement) -} - -func (s *Suite) waitAnnounce66(t *utesting.T, conn *Conn, blockAnnouncement *NewBlock) { - for { - _, msg := conn.readAndServe66(s.chain, timeout) - switch msg := msg.(type) { - case *NewBlock: - t.Logf("received NewBlock message: %s", pretty.Sdump(msg.Block)) - assert.Equal(t, - blockAnnouncement.Block.Header(), msg.Block.Header(), - "wrong block header in announcement", - ) - assert.Equal(t, - blockAnnouncement.TD, msg.TD, - "wrong TD in announcement", - ) - return - case *NewBlockHashes: - blockHashes := *msg - t.Logf("received NewBlockHashes message: %s", pretty.Sdump(blockHashes)) - assert.Equal(t, blockAnnouncement.Block.Hash(), blockHashes[0].Hash, - "wrong block hash in announcement", - ) - return - case *NewPooledTransactionHashes: - // ignore old txs being propagated - continue - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) - } - } -} - -// waitForBlock66 waits for confirmation from the client that it has -// imported the given block. -func (c *Conn) waitForBlock66(block *types.Block) error { - defer c.SetReadDeadline(time.Time{}) - - c.SetReadDeadline(time.Now().Add(20 * time.Second)) - // note: if the node has not yet imported the block, it will respond - // to the GetBlockHeaders request with an empty BlockHeaders response, - // so the GetBlockHeaders request must be sent again until the BlockHeaders - // response contains the desired header. - for { - req := eth.GetBlockHeadersPacket66{ - RequestId: 54, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Hash: block.Hash(), - }, - Amount: 1, - }, - } - if err := c.write66(req, GetBlockHeaders{}.Code()); err != nil { - return err - } - - reqID, msg := c.read66() - // check message - switch msg := msg.(type) { - case BlockHeaders: - // check request ID - if reqID != req.RequestId { - return fmt.Errorf("request ID mismatch: wanted %d, got %d", req.RequestId, reqID) - } - for _, header := range msg { - if header.Number.Uint64() == block.NumberU64() { - return nil - } - } - time.Sleep(100 * time.Millisecond) - case *NewPooledTransactionHashes: - // ignore old announcements - continue - default: - return fmt.Errorf("invalid message: %s", pretty.Sdump(msg)) - } - } -} - -func sendSuccessfulTx66(t *utesting.T, s *Suite, tx *types.Transaction) { - sendConn := s.setupConnection66(t) - defer sendConn.Close() - sendSuccessfulTxWithConn(t, s, tx, sendConn) -} - -// waitForBlockHeadersResponse66 waits for a BlockHeaders message with the given expected request ID -func (s *Suite) waitForBlockHeadersResponse66(conn *Conn, expectedID uint64) (BlockHeaders, error) { - reqID, msg := conn.readAndServe66(s.chain, timeout) - switch msg := msg.(type) { - case BlockHeaders: - if reqID != expectedID { - return nil, fmt.Errorf("request ID mismatch: wanted %d, got %d", expectedID, reqID) - } - return msg, nil - default: - return nil, fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) - } -} - -func (s *Suite) getBlockHeaders66(conn *Conn, req eth.Packet, expectedID uint64) (BlockHeaders, error) { - if err := conn.write66(req, GetBlockHeaders{}.Code()); err != nil { - return nil, fmt.Errorf("could not write to connection: %v", err) - } - return s.waitForBlockHeadersResponse66(conn, expectedID) -} - -func headersMatch(t *utesting.T, chain *Chain, headers BlockHeaders) bool { - mismatched := 0 - for _, header := range headers { - num := header.Number.Uint64() - t.Logf("received header (%d): %s", num, pretty.Sdump(header.Hash())) - if !reflect.DeepEqual(chain.blocks[int(num)].Header(), header) { - mismatched += 1 - t.Logf("received wrong header: %v", pretty.Sdump(header)) - } - } - return mismatched == 0 -} - -func (s *Suite) sendNextBlock66(t *utesting.T) { - sendConn, receiveConn := s.setupConnection66(t), s.setupConnection66(t) - defer sendConn.Close() - defer receiveConn.Close() - - // create new block announcement - nextBlock := len(s.chain.blocks) - blockAnnouncement := &NewBlock{ - Block: s.fullChain.blocks[nextBlock], - TD: s.fullChain.TD(nextBlock + 1), - } - // send announcement and wait for node to request the header - s.testAnnounce66(t, sendConn, receiveConn, blockAnnouncement) - // wait for client to update its chain - if err := receiveConn.waitForBlock66(s.fullChain.blocks[nextBlock]); err != nil { - t.Fatal(err) - } - // update test suite chain - s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock]) -} diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go new file mode 100644 index 000000000000..d99376124d2f --- /dev/null +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -0,0 +1,635 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "fmt" + "net" + "reflect" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/rlpx" +) + +var ( + pretty = spew.ConfigState{ + Indent: " ", + DisableCapacities: true, + DisablePointerAddresses: true, + SortKeys: true, + } + timeout = 20 * time.Second +) + +// Is_66 checks if the node supports the eth66 protocol version, +// and if not, exists the test suite +func (s *Suite) Is_66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + if err := conn.handshake(); err != nil { + t.Fatalf("handshake failed: %v", err) + } + if conn.negotiatedProtoVersion < 66 { + t.Fail() + } +} + +// dial attempts to dial the given node and perform a handshake, +// returning the created Conn if successful. +func (s *Suite) dial() (*Conn, error) { + // dial + fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP())) + if err != nil { + return nil, err + } + conn := Conn{Conn: rlpx.NewConn(fd, s.Dest.Pubkey())} + // do encHandshake + conn.ourKey, _ = crypto.GenerateKey() + _, err = conn.Handshake(conn.ourKey) + if err != nil { + conn.Close() + return nil, err + } + // set default p2p capabilities + conn.caps = []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + } + conn.ourHighestProtoVersion = 65 + return &conn, nil +} + +// dial66 attempts to dial the given node and perform a handshake, +// returning the created Conn with additional eth66 capabilities if +// successful +func (s *Suite) dial66() (*Conn, error) { + conn, err := s.dial() + if err != nil { + return nil, fmt.Errorf("dial failed: %v", err) + } + conn.caps = append(conn.caps, p2p.Cap{Name: "eth", Version: 66}) + conn.ourHighestProtoVersion = 66 + return conn, nil +} + +// peer performs both the protocol handshake and the status message +// exchange with the node in order to peer with it. +func (c *Conn) peer(chain *Chain, status *Status) error { + if err := c.handshake(); err != nil { + return fmt.Errorf("handshake failed: %v", err) + } + if _, err := c.statusExchange(chain, status); err != nil { + return fmt.Errorf("status exchange failed: %v", err) + } + return nil +} + +// handshake performs a protocol handshake with the node. +func (c *Conn) handshake() error { + defer c.SetDeadline(time.Time{}) + c.SetDeadline(time.Now().Add(10 * time.Second)) + // write hello to client + pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:] + ourHandshake := &Hello{ + Version: 5, + Caps: c.caps, + ID: pub0, + } + if err := c.Write(ourHandshake); err != nil { + return fmt.Errorf("write to connection failed: %v", err) + } + // read hello from client + switch msg := c.Read().(type) { + case *Hello: + // set snappy if version is at least 5 + if msg.Version >= 5 { + c.SetSnappy(true) + } + c.negotiateEthProtocol(msg.Caps) + if c.negotiatedProtoVersion == 0 { + return fmt.Errorf("unexpected eth protocol version") + } + return nil + default: + return fmt.Errorf("bad handshake: %#v", msg) + } +} + +// negotiateEthProtocol sets the Conn's eth protocol version to highest +// advertised capability from peer. +func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) { + var highestEthVersion uint + for _, capability := range caps { + if capability.Name != "eth" { + continue + } + if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion { + highestEthVersion = capability.Version + } + } + c.negotiatedProtoVersion = highestEthVersion +} + +// statusExchange performs a `Status` message exchange with the given node. +func (c *Conn) statusExchange(chain *Chain, status *Status) (Message, error) { + defer c.SetDeadline(time.Time{}) + c.SetDeadline(time.Now().Add(20 * time.Second)) + + // read status message from client + var message Message +loop: + for { + switch msg := c.Read().(type) { + case *Status: + if have, want := msg.Head, chain.blocks[chain.Len()-1].Hash(); have != want { + return nil, fmt.Errorf("wrong head block in status, want: %#x (block %d) have %#x", + want, chain.blocks[chain.Len()-1].NumberU64(), have) + } + if have, want := msg.TD.Cmp(chain.TD()), 0; have != want { + return nil, fmt.Errorf("wrong TD in status: have %v want %v", have, want) + } + if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) { + return nil, fmt.Errorf("wrong fork ID in status: have %v, want %v", have, want) + } + if have, want := msg.ProtocolVersion, c.ourHighestProtoVersion; have != uint32(want) { + return nil, fmt.Errorf("wrong protocol version: have %v, want %v", have, want) + } + message = msg + break loop + case *Disconnect: + return nil, fmt.Errorf("disconnect received: %v", msg.Reason) + case *Ping: + c.Write(&Pong{}) // TODO (renaynay): in the future, this should be an error + // (PINGs should not be a response upon fresh connection) + default: + return nil, fmt.Errorf("bad status message: %s", pretty.Sdump(msg)) + } + } + // make sure eth protocol version is set for negotiation + if c.negotiatedProtoVersion == 0 { + return nil, fmt.Errorf("eth protocol version must be set in Conn") + } + if status == nil { + // default status message + status = &Status{ + ProtocolVersion: uint32(c.negotiatedProtoVersion), + NetworkID: chain.chainConfig.ChainID.Uint64(), + TD: chain.TD(), + Head: chain.blocks[chain.Len()-1].Hash(), + Genesis: chain.blocks[0].Hash(), + ForkID: chain.ForkID(), + } + } + if err := c.Write(status); err != nil { + return nil, fmt.Errorf("write to connection failed: %v", err) + } + return message, nil +} + +// createSendAndRecvConns creates two connections, one for sending messages to the +// node, and one for receiving messages from the node. +func (s *Suite) createSendAndRecvConns(isEth66 bool) (*Conn, *Conn, error) { + var ( + sendConn *Conn + recvConn *Conn + err error + ) + if isEth66 { + sendConn, err = s.dial66() + if err != nil { + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + recvConn, err = s.dial66() + if err != nil { + sendConn.Close() + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + } else { + sendConn, err = s.dial() + if err != nil { + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + recvConn, err = s.dial() + if err != nil { + sendConn.Close() + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + } + return sendConn, recvConn, nil +} + +// readAndServe serves GetBlockHeaders requests while waiting +// on another message from the node. +func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message { + start := time.Now() + for time.Since(start) < timeout { + c.SetReadDeadline(time.Now().Add(5 * time.Second)) + switch msg := c.Read().(type) { + case *Ping: + c.Write(&Pong{}) + case *GetBlockHeaders: + req := *msg + headers, err := chain.GetHeaders(req) + if err != nil { + return errorf("could not get headers for inbound header request: %v", err) + } + if err := c.Write(headers); err != nil { + return errorf("could not write to connection: %v", err) + } + default: + return msg + } + } + return errorf("no message received within %v", timeout) +} + +// readAndServe66 serves eth66 GetBlockHeaders requests while waiting +// on another message from the node. +func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Message) { + start := time.Now() + for time.Since(start) < timeout { + c.SetReadDeadline(time.Now().Add(10 * time.Second)) + + reqID, msg := c.Read66() + + switch msg := msg.(type) { + case *Ping: + c.Write(&Pong{}) + case *GetBlockHeaders: + headers, err := chain.GetHeaders(*msg) + if err != nil { + return 0, errorf("could not get headers for inbound header request: %v", err) + } + resp := ð.BlockHeadersPacket66{ + RequestId: reqID, + BlockHeadersPacket: eth.BlockHeadersPacket(headers), + } + if err := c.Write66(resp, BlockHeaders{}.Code()); err != nil { + return 0, errorf("could not write to connection: %v", err) + } + default: + return reqID, msg + } + } + return 0, errorf("no message received within %v", timeout) +} + +// headersRequest executes the given `GetBlockHeaders` request. +func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bool, reqID uint64) (BlockHeaders, error) { + defer c.SetReadDeadline(time.Time{}) + c.SetReadDeadline(time.Now().Add(20 * time.Second)) + // if on eth66 connection, perform eth66 GetBlockHeaders request + if isEth66 { + return getBlockHeaders66(chain, c, request, reqID) + } + if err := c.Write(request); err != nil { + return nil, err + } + switch msg := c.readAndServe(chain, timeout).(type) { + case *BlockHeaders: + return *msg, nil + default: + return nil, fmt.Errorf("invalid message: %s", pretty.Sdump(msg)) + } +} + +// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol. +func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) { + // write request + packet := eth.GetBlockHeadersPacket(*request) + req := ð.GetBlockHeadersPacket66{ + RequestId: id, + GetBlockHeadersPacket: &packet, + } + if err := conn.Write66(req, GetBlockHeaders{}.Code()); err != nil { + return nil, fmt.Errorf("could not write to connection: %v", err) + } + // wait for response + msg := conn.waitForResponse(chain, timeout, req.RequestId) + headers, ok := msg.(BlockHeaders) + if !ok { + return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg)) + } + return headers, nil +} + +// headersMatch returns whether the received headers match the given request +func headersMatch(expected BlockHeaders, headers BlockHeaders) bool { + return reflect.DeepEqual(expected, headers) +} + +// waitForResponse reads from the connection until a response with the expected +// request ID is received. +func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID uint64) Message { + for { + id, msg := c.readAndServe66(chain, timeout) + if id == requestID { + return msg + } + } +} + +// sendNextBlock broadcasts the next block in the chain and waits +// for the node to propagate the block and import it into its chain. +func (s *Suite) sendNextBlock(isEth66 bool) error { + // set up sending and receiving connections + sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return err + } + defer sendConn.Close() + defer recvConn.Close() + if err = sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // create new block announcement + nextBlock := s.fullChain.blocks[s.chain.Len()] + blockAnnouncement := &NewBlock{ + Block: nextBlock, + TD: s.fullChain.TotalDifficultyAt(s.chain.Len()), + } + // send announcement and wait for node to request the header + if err = s.testAnnounce(sendConn, recvConn, blockAnnouncement); err != nil { + return fmt.Errorf("failed to announce block: %v", err) + } + // wait for client to update its chain + if err = s.waitForBlockImport(recvConn, nextBlock, isEth66); err != nil { + return fmt.Errorf("failed to receive confirmation of block import: %v", err) + } + // update test suite chain + s.chain.blocks = append(s.chain.blocks, nextBlock) + return nil +} + +// testAnnounce writes a block announcement to the node and waits for the node +// to propagate it. +func (s *Suite) testAnnounce(sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) error { + if err := sendConn.Write(blockAnnouncement); err != nil { + return fmt.Errorf("could not write to connection: %v", err) + } + return s.waitAnnounce(receiveConn, blockAnnouncement) +} + +// waitAnnounce waits for a NewBlock or NewBlockHashes announcement from the node. +func (s *Suite) waitAnnounce(conn *Conn, blockAnnouncement *NewBlock) error { + for { + switch msg := conn.readAndServe(s.chain, timeout).(type) { + case *NewBlock: + if !reflect.DeepEqual(blockAnnouncement.Block.Header(), msg.Block.Header()) { + return fmt.Errorf("wrong header in block announcement: \nexpected %v "+ + "\ngot %v", blockAnnouncement.Block.Header(), msg.Block.Header()) + } + if !reflect.DeepEqual(blockAnnouncement.TD, msg.TD) { + return fmt.Errorf("wrong TD in announcement: expected %v, got %v", blockAnnouncement.TD, msg.TD) + } + return nil + case *NewBlockHashes: + hashes := *msg + if blockAnnouncement.Block.Hash() != hashes[0].Hash { + return fmt.Errorf("wrong block hash in announcement: expected %v, got %v", blockAnnouncement.Block.Hash(), hashes[0].Hash) + } + return nil + case *NewPooledTransactionHashes: + // ignore tx announcements from previous tests + continue + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + } +} + +func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block, isEth66 bool) error { + defer conn.SetReadDeadline(time.Time{}) + conn.SetReadDeadline(time.Now().Add(20 * time.Second)) + // create request + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Hash: block.Hash(), + }, + Amount: 1, + } + // loop until BlockHeaders response contains desired block, confirming the + // node imported the block + for { + var ( + headers BlockHeaders + err error + ) + if isEth66 { + requestID := uint64(54) + headers, err = conn.headersRequest(req, s.chain, eth66, requestID) + } else { + headers, err = conn.headersRequest(req, s.chain, eth65, 0) + } + if err != nil { + return fmt.Errorf("GetBlockHeader request failed: %v", err) + } + // if headers response is empty, node hasn't imported block yet, try again + if len(headers) == 0 { + time.Sleep(100 * time.Millisecond) + continue + } + if !reflect.DeepEqual(block.Header(), headers[0]) { + return fmt.Errorf("wrong header returned: wanted %v, got %v", block.Header(), headers[0]) + } + return nil + } +} + +func (s *Suite) oldAnnounce(isEth66 bool) error { + sendConn, receiveConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return err + } + defer sendConn.Close() + defer receiveConn.Close() + if err := sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err := receiveConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // create old block announcement + oldBlockAnnounce := &NewBlock{ + Block: s.chain.blocks[len(s.chain.blocks)/2], + TD: s.chain.blocks[len(s.chain.blocks)/2].Difficulty(), + } + if err := sendConn.Write(oldBlockAnnounce); err != nil { + return fmt.Errorf("could not write to connection: %v", err) + } + // wait to see if the announcement is propagated + switch msg := receiveConn.readAndServe(s.chain, time.Second*8).(type) { + case *NewBlock: + block := *msg + if block.Block.Hash() == oldBlockAnnounce.Block.Hash() { + return fmt.Errorf("unexpected: block propagated: %s", pretty.Sdump(msg)) + } + case *NewBlockHashes: + hashes := *msg + for _, hash := range hashes { + if hash.Hash == oldBlockAnnounce.Block.Hash() { + return fmt.Errorf("unexpected: block announced: %s", pretty.Sdump(msg)) + } + } + case *Error: + errMsg := *msg + // check to make sure error is timeout (propagation didn't come through == test successful) + if !strings.Contains(errMsg.String(), "timeout") { + return fmt.Errorf("unexpected error: %v", pretty.Sdump(msg)) + } + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + return nil +} + +func (s *Suite) maliciousHandshakes(t *utesting.T, isEth66 bool) error { + var ( + conn *Conn + err error + ) + if isEth66 { + conn, err = s.dial66() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } else { + conn, err = s.dial() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } + defer conn.Close() + // write hello to client + pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:] + handshakes := []*Hello{ + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 64}, + }, + ID: pub0, + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: append(pub0, byte(0)), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: append(pub0, pub0...), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: largeBuffer(2), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 64}, + }, + ID: largeBuffer(2), + }, + } + for i, handshake := range handshakes { + t.Logf("Testing malicious handshake %v\n", i) + if err := conn.Write(handshake); err != nil { + return fmt.Errorf("could not write to connection: %v", err) + } + // check that the peer disconnected + for i := 0; i < 2; i++ { + switch msg := conn.readAndServe(s.chain, 20*time.Second).(type) { + case *Disconnect: + case *Error: + case *Hello: + // Discard one hello as Hello's are sent concurrently + continue + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + } + // dial for the next round + if isEth66 { + conn, err = s.dial66() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } else { + conn, err = s.dial() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } + } + return nil +} + +func (s *Suite) maliciousStatus(conn *Conn) error { + if err := conn.handshake(); err != nil { + return fmt.Errorf("handshake failed: %v", err) + } + status := &Status{ + ProtocolVersion: uint32(conn.negotiatedProtoVersion), + NetworkID: s.chain.chainConfig.ChainID.Uint64(), + TD: largeNumber(2), + Head: s.chain.blocks[s.chain.Len()-1].Hash(), + Genesis: s.chain.blocks[0].Hash(), + ForkID: s.chain.ForkID(), + } + // get status + msg, err := conn.statusExchange(s.chain, status) + if err != nil { + return fmt.Errorf("status exchange failed: %v", err) + } + switch msg := msg.(type) { + case *Status: + default: + return fmt.Errorf("expected status, got: %#v ", msg) + } + // wait for disconnect + switch msg := conn.readAndServe(s.chain, timeout).(type) { + case *Disconnect: + return nil + case *Error: + return nil + default: + return fmt.Errorf("expected disconnect, got: %s", pretty.Sdump(msg)) + } +} diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index abc6bcddce08..ad832dddd2de 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -17,33 +17,16 @@ package ethtest import ( - "fmt" - "net" - "strings" "time" - "github.com/davecgh/go-spew/spew" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/internal/utesting" - "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/rlpx" - "github.com/stretchr/testify/assert" ) -var pretty = spew.ConfigState{ - Indent: " ", - DisableCapacities: true, - DisablePointerAddresses: true, - SortKeys: true, -} - -var timeout = 20 * time.Second - -// Suite represents a structure used to test the eth -// protocol of a node(s). +// Suite represents a structure used to test a node's conformance +// to the eth protocol. type Suite struct { Dest *enode.Node @@ -70,35 +53,35 @@ func (s *Suite) AllEthTests() []utesting.Test { return []utesting.Test{ // status {Name: "TestStatus", Fn: s.TestStatus}, - {Name: "TestStatus_66", Fn: s.TestStatus_66}, + {Name: "TestStatus66", Fn: s.TestStatus66}, // get block headers {Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders}, - {Name: "TestGetBlockHeaders_66", Fn: s.TestGetBlockHeaders_66}, - {Name: "TestSimultaneousRequests_66", Fn: s.TestSimultaneousRequests_66}, - {Name: "TestSameRequestID_66", Fn: s.TestSameRequestID_66}, - {Name: "TestZeroRequestID_66", Fn: s.TestZeroRequestID_66}, + {Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66}, + {Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66}, + {Name: "TestSameRequestID66", Fn: s.TestSameRequestID66}, + {Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66}, // get block bodies {Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies}, - {Name: "TestGetBlockBodies_66", Fn: s.TestGetBlockBodies_66}, + {Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66}, // broadcast {Name: "TestBroadcast", Fn: s.TestBroadcast}, - {Name: "TestBroadcast_66", Fn: s.TestBroadcast_66}, + {Name: "TestBroadcast66", Fn: s.TestBroadcast66}, {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, - {Name: "TestLargeAnnounce_66", Fn: s.TestLargeAnnounce_66}, + {Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66}, {Name: "TestOldAnnounce", Fn: s.TestOldAnnounce}, - {Name: "TestOldAnnounce_66", Fn: s.TestOldAnnounce_66}, + {Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66}, // malicious handshakes + status {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, - {Name: "TestMaliciousHandshake_66", Fn: s.TestMaliciousHandshake_66}, - {Name: "TestMaliciousStatus_66", Fn: s.TestMaliciousStatus_66}, + {Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66}, + {Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66}, // test transactions {Name: "TestTransaction", Fn: s.TestTransaction}, - {Name: "TestTransaction_66", Fn: s.TestTransaction_66}, + {Name: "TestTransaction66", Fn: s.TestTransaction66}, {Name: "TestMaliciousTx", Fn: s.TestMaliciousTx}, - {Name: "TestMaliciousTx_66", Fn: s.TestMaliciousTx_66}, - {Name: "TestLargeTxRequest_66", Fn: s.TestLargeTxRequest_66}, - {Name: "TestNewPooledTxs_66", Fn: s.TestNewPooledTxs_66}, + {Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66}, + {Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66}, + {Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66}, } } @@ -109,6 +92,7 @@ func (s *Suite) EthTests() []utesting.Test { {Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies}, {Name: "TestBroadcast", Fn: s.TestBroadcast}, {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, + {Name: "TestOldAnnounce", Fn: s.TestOldAnnounce}, {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, {Name: "TestTransaction", Fn: s.TestTransaction}, @@ -119,90 +103,101 @@ func (s *Suite) EthTests() []utesting.Test { func (s *Suite) Eth66Tests() []utesting.Test { return []utesting.Test{ // only proceed with eth66 test suite if node supports eth 66 protocol - {Name: "TestStatus_66", Fn: s.TestStatus_66}, - {Name: "TestGetBlockHeaders_66", Fn: s.TestGetBlockHeaders_66}, - {Name: "TestSimultaneousRequests_66", Fn: s.TestSimultaneousRequests_66}, - {Name: "TestSameRequestID_66", Fn: s.TestSameRequestID_66}, - {Name: "TestZeroRequestID_66", Fn: s.TestZeroRequestID_66}, - {Name: "TestGetBlockBodies_66", Fn: s.TestGetBlockBodies_66}, - {Name: "TestBroadcast_66", Fn: s.TestBroadcast_66}, - {Name: "TestLargeAnnounce_66", Fn: s.TestLargeAnnounce_66}, - {Name: "TestMaliciousHandshake_66", Fn: s.TestMaliciousHandshake_66}, - {Name: "TestMaliciousStatus_66", Fn: s.TestMaliciousStatus_66}, - {Name: "TestTransaction_66", Fn: s.TestTransaction_66}, - {Name: "TestMaliciousTx_66", Fn: s.TestMaliciousTx_66}, - {Name: "TestLargeTxRequest_66", Fn: s.TestLargeTxRequest_66}, - {Name: "TestNewPooledTxs_66", Fn: s.TestNewPooledTxs_66}, + {Name: "TestStatus66", Fn: s.TestStatus66}, + {Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66}, + {Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66}, + {Name: "TestSameRequestID66", Fn: s.TestSameRequestID66}, + {Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66}, + {Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66}, + {Name: "TestBroadcast66", Fn: s.TestBroadcast66}, + {Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66}, + {Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66}, + {Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66}, + {Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66}, + {Name: "TestTransaction66", Fn: s.TestTransaction66}, + {Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66}, + {Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66}, + {Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66}, } } +var ( + eth66 = true // indicates whether suite should negotiate eth66 connection + eth65 = false // indicates whether suite should negotiate eth65 connection or below. +) + // TestStatus attempts to connect to the given node and exchange -// a status message with it, and then check to make sure -// the chain head is correct. +// a status message with it. func (s *Suite) TestStatus(t *utesting.T) { conn, err := s.dial() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) } defer conn.Close() - // get protoHandshake - conn.handshake(t) - // get status - switch msg := conn.statusExchange(t, s.chain, nil).(type) { - case *Status: - t.Logf("got status message: %s", pretty.Sdump(msg)) - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) } } -// TestMaliciousStatus sends a status package with a large total difficulty. -func (s *Suite) TestMaliciousStatus(t *utesting.T) { - conn, err := s.dial() +// TestStatus66 attempts to connect to the given node and exchange +// a status message with it on the eth66 protocol. +func (s *Suite) TestStatus66(t *utesting.T) { + conn, err := s.dial66() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) } defer conn.Close() - // get protoHandshake - conn.handshake(t) - status := &Status{ - ProtocolVersion: uint32(conn.negotiatedProtoVersion), - NetworkID: s.chain.chainConfig.ChainID.Uint64(), - TD: largeNumber(2), - Head: s.chain.blocks[s.chain.Len()-1].Hash(), - Genesis: s.chain.blocks[0].Hash(), - ForkID: s.chain.ForkID(), - } - // get status - switch msg := conn.statusExchange(t, s.chain, status).(type) { - case *Status: - t.Logf("%+v\n", msg) - default: - t.Fatalf("expected status, got: %#v ", msg) - } - // wait for disconnect - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { - case *Disconnect: - case *Error: - return - default: - t.Fatalf("expected disconnect, got: %s", pretty.Sdump(msg)) + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) } } // TestGetBlockHeaders tests whether the given node can respond to -// a `GetBlockHeaders` request and that the response is accurate. +// a `GetBlockHeaders` request accurately. func (s *Suite) TestGetBlockHeaders(t *utesting.T) { conn, err := s.dial() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) } defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("handshake(s) failed: %v", err) + } + // write request + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 2, + Skip: 1, + Reverse: false, + } + headers, err := conn.headersRequest(req, s.chain, eth65, 0) + if err != nil { + t.Fatalf("GetBlockHeaders request failed: %v", err) + } + // check for correct headers + expected, err := s.chain.GetHeaders(*req) + if err != nil { + t.Fatalf("failed to get headers for given request: %v", err) + } + if !headersMatch(expected, headers) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) + } +} - conn.handshake(t) - conn.statusExchange(t, s.chain, nil) - - // get block headers +// TestGetBlockHeaders66 tests whether the given node can respond to +// an eth66 `GetBlockHeaders` request and that the response is accurate. +func (s *Suite) TestGetBlockHeaders66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request req := &GetBlockHeaders{ Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), @@ -211,21 +206,185 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) { Skip: 1, Reverse: false, } + headers, err := conn.headersRequest(req, s.chain, eth66, 33) + if err != nil { + t.Fatalf("could not get block headers: %v", err) + } + // check for correct headers + expected, err := s.chain.GetHeaders(*req) + if err != nil { + t.Fatalf("failed to get headers for given request: %v", err) + } + if !headersMatch(expected, headers) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) + } +} - if err := conn.Write(req); err != nil { - t.Fatalf("could not write to connection: %v", err) +// TestSimultaneousRequests66 sends two simultaneous `GetBlockHeader` requests from +// the same connection with different request IDs and checks to make sure the node +// responds with the correct headers per request. +func (s *Suite) TestSimultaneousRequests66(t *utesting.T) { + // create a connection + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create two requests + req1 := ð.GetBlockHeadersPacket66{ + RequestId: uint64(111), + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 2, + Skip: 1, + Reverse: false, + }, + } + req2 := ð.GetBlockHeadersPacket66{ + RequestId: uint64(222), + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 4, + Skip: 1, + Reverse: false, + }, + } + // write the first request + if err := conn.Write66(req1, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // write the second request + if err := conn.Write66(req2, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // wait for responses + msg := conn.waitForResponse(s.chain, timeout, req1.RequestId) + headers1, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + msg = conn.waitForResponse(s.chain, timeout, req2.RequestId) + headers2, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + // check received headers for accuracy + expected1, err := s.chain.GetHeaders(GetBlockHeaders(*req1.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected headers for request 1: %v", err) + } + expected2, err := s.chain.GetHeaders(GetBlockHeaders(*req2.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected headers for request 2: %v", err) + } + if !headersMatch(expected1, headers1) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) } + if !headersMatch(expected2, headers2) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) + } +} - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { - case *BlockHeaders: - headers := *msg - for _, header := range headers { - num := header.Number.Uint64() - t.Logf("received header (%d): %s", num, pretty.Sdump(header.Hash())) - assert.Equal(t, s.chain.blocks[int(num)].Header(), header) - } - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) +// TestSameRequestID66 sends two requests with the same request ID to a +// single node. +func (s *Suite) TestSameRequestID66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create requests + reqID := uint64(1234) + request1 := ð.GetBlockHeadersPacket66{ + RequestId: reqID, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: 1, + }, + Amount: 2, + }, + } + request2 := ð.GetBlockHeadersPacket66{ + RequestId: reqID, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: 33, + }, + Amount: 2, + }, + } + // write the requests + if err = conn.Write66(request1, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + if err = conn.Write66(request2, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // wait for responses + msg := conn.waitForResponse(s.chain, timeout, reqID) + headers1, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + msg = conn.waitForResponse(s.chain, timeout, reqID) + headers2, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + // check if headers match + expected1, err := s.chain.GetHeaders(GetBlockHeaders(*request1.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected block headers: %v", err) + } + expected2, err := s.chain.GetHeaders(GetBlockHeaders(*request2.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected block headers: %v", err) + } + if !headersMatch(expected1, headers1) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) + } + if !headersMatch(expected2, headers2) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) + } +} + +// TestZeroRequestID_66 checks that a message with a request ID of zero is still handled +// by the node. +func (s *Suite) TestZeroRequestID66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Number: 0, + }, + Amount: 2, + } + headers, err := conn.headersRequest(req, s.chain, eth66, 0) + if err != nil { + t.Fatalf("failed to get block headers: %v", err) + } + expected, err := s.chain.GetHeaders(*req) + if err != nil { + t.Fatalf("failed to get expected block headers: %v", err) + } + if !headersMatch(expected, headers) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) } } @@ -234,12 +393,12 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) { func (s *Suite) TestGetBlockBodies(t *utesting.T) { conn, err := s.dial() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) } defer conn.Close() - - conn.handshake(t) - conn.statusExchange(t, s.chain, nil) + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } // create block bodies request req := &GetBlockBodies{ s.chain.blocks[54].Hash(), @@ -248,126 +407,125 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { if err := conn.Write(req); err != nil { t.Fatalf("could not write to connection: %v", err) } - - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { + // wait for response + switch msg := conn.readAndServe(s.chain, timeout).(type) { case *BlockBodies: t.Logf("received %d block bodies", len(*msg)) + if len(*msg) != len(*req) { + t.Fatalf("wrong bodies in response: expected %d bodies, "+ + "got %d", len(*req), len(*msg)) + } default: t.Fatalf("unexpected: %s", pretty.Sdump(msg)) } } +// TestGetBlockBodies66 tests whether the given node can respond to +// a `GetBlockBodies` request and that the response is accurate over +// the eth66 protocol. +func (s *Suite) TestGetBlockBodies66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create block bodies request + req := ð.GetBlockBodiesPacket66{ + RequestId: uint64(55), + GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ + s.chain.blocks[54].Hash(), + s.chain.blocks[75].Hash(), + }, + } + if err := conn.Write66(req, GetBlockBodies{}.Code()); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // wait for block bodies response + msg := conn.waitForResponse(s.chain, timeout, req.RequestId) + blockBodies, ok := msg.(BlockBodies) + if !ok { + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } + t.Logf("received %d block bodies", len(blockBodies)) + if len(blockBodies) != len(req.GetBlockBodiesPacket) { + t.Fatalf("wrong bodies in response: expected %d bodies, "+ + "got %d", len(req.GetBlockBodiesPacket), len(blockBodies)) + } +} + // TestBroadcast tests whether a block announcement is correctly // propagated to the given node's peer(s). func (s *Suite) TestBroadcast(t *utesting.T) { - s.sendNextBlock(t) + if err := s.sendNextBlock(eth65); err != nil { + t.Fatalf("block broadcast failed: %v", err) + } } -func (s *Suite) sendNextBlock(t *utesting.T) { - sendConn, receiveConn := s.setupConnection(t), s.setupConnection(t) - defer sendConn.Close() - defer receiveConn.Close() - - // create new block announcement - nextBlock := len(s.chain.blocks) - blockAnnouncement := &NewBlock{ - Block: s.fullChain.blocks[nextBlock], - TD: s.fullChain.TD(nextBlock + 1), - } - // send announcement and wait for node to request the header - s.testAnnounce(t, sendConn, receiveConn, blockAnnouncement) - // wait for client to update its chain - if err := receiveConn.waitForBlock(s.fullChain.blocks[nextBlock]); err != nil { - t.Fatal(err) +// TestBroadcast66 tests whether a block announcement is correctly +// propagated to the given node's peer(s) on the eth66 protocol. +func (s *Suite) TestBroadcast66(t *utesting.T) { + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("block broadcast failed: %v", err) } - // update test suite chain - s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock]) } -// TestMaliciousHandshake tries to send malicious data during the handshake. -func (s *Suite) TestMaliciousHandshake(t *utesting.T) { - conn, err := s.dial() - if err != nil { - t.Fatalf("could not dial: %v", err) - } - defer conn.Close() - // write hello to client - pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:] - handshakes := []*Hello{ - { - Version: 5, - Caps: []p2p.Cap{ - {Name: largeString(2), Version: 64}, - }, - ID: pub0, - }, - { - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - }, - ID: append(pub0, byte(0)), - }, +// TestLargeAnnounce tests the announcement mechanism with a large block. +func (s *Suite) TestLargeAnnounce(t *utesting.T) { + nextBlock := len(s.chain.blocks) + blocks := []*NewBlock{ { - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - }, - ID: append(pub0, pub0...), + Block: largeBlock(), + TD: s.fullChain.TotalDifficultyAt(nextBlock), }, { - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - }, - ID: largeBuffer(2), + Block: s.fullChain.blocks[nextBlock], + TD: largeNumber(2), }, { - Version: 5, - Caps: []p2p.Cap{ - {Name: largeString(2), Version: 64}, - }, - ID: largeBuffer(2), + Block: largeBlock(), + TD: largeNumber(2), }, } - for i, handshake := range handshakes { - t.Logf("Testing malicious handshake %v\n", i) - // Init the handshake - if err := conn.Write(handshake); err != nil { - t.Fatalf("could not write to connection: %v", err) + + for i, blockAnnouncement := range blocks { + t.Logf("Testing malicious announcement: %v\n", i) + conn, err := s.dial() + if err != nil { + t.Fatalf("dial failed: %v", err) } - // check that the peer disconnected - timeout := 20 * time.Second - // Discard one hello - for i := 0; i < 2; i++ { - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { - case *Disconnect: - case *Error: - case *Hello: - // Hello's are send concurrently, so ignore them - continue - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) - } + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) } - // Dial for the next round - conn, err = s.dial() - if err != nil { - t.Fatalf("could not dial: %v", err) + if err = conn.Write(blockAnnouncement); err != nil { + t.Fatalf("could not write to connection: %v", err) } + // Invalid announcement, check that peer disconnected + switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { + case *Disconnect: + case *Error: + break + default: + t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) + } + conn.Close() + } + // Test the last block as a valid block + if err := s.sendNextBlock(eth65); err != nil { + t.Fatalf("failed to broadcast next block: %v", err) } } -// TestLargeAnnounce tests the announcement mechanism with a large block. -func (s *Suite) TestLargeAnnounce(t *utesting.T) { +// TestLargeAnnounce66 tests the announcement mechanism with a large +// block over the eth66 protocol. +func (s *Suite) TestLargeAnnounce66(t *utesting.T) { nextBlock := len(s.chain.blocks) blocks := []*NewBlock{ { Block: largeBlock(), - TD: s.fullChain.TD(nextBlock + 1), + TD: s.fullChain.TotalDifficultyAt(nextBlock), }, { Block: s.fullChain.blocks[nextBlock], @@ -377,174 +535,220 @@ func (s *Suite) TestLargeAnnounce(t *utesting.T) { Block: largeBlock(), TD: largeNumber(2), }, - { - Block: s.fullChain.blocks[nextBlock], - TD: s.fullChain.TD(nextBlock + 1), - }, } for i, blockAnnouncement := range blocks[0:3] { t.Logf("Testing malicious announcement: %v\n", i) - sendConn := s.setupConnection(t) - if err := sendConn.Write(blockAnnouncement); err != nil { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + if err := conn.Write(blockAnnouncement); err != nil { t.Fatalf("could not write to connection: %v", err) } // Invalid announcement, check that peer disconnected - switch msg := sendConn.ReadAndServe(s.chain, time.Second*8).(type) { + switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { case *Disconnect: case *Error: break default: t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) } - sendConn.Close() + conn.Close() } // Test the last block as a valid block - s.sendNextBlock(t) + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("failed to broadcast next block: %v", err) + } } +// TestOldAnnounce tests the announcement mechanism with an old block. func (s *Suite) TestOldAnnounce(t *utesting.T) { - sendConn, recvConn := s.setupConnection(t), s.setupConnection(t) - defer sendConn.Close() - defer recvConn.Close() + if err := s.oldAnnounce(eth65); err != nil { + t.Fatal(err) + } +} + +// TestOldAnnounce66 tests the announcement mechanism with an old block, +// over the eth66 protocol. +func (s *Suite) TestOldAnnounce66(t *utesting.T) { + if err := s.oldAnnounce(eth66); err != nil { + t.Fatal(err) + } +} + +// TestMaliciousHandshake tries to send malicious data during the handshake. +func (s *Suite) TestMaliciousHandshake(t *utesting.T) { + if err := s.maliciousHandshakes(t, eth65); err != nil { + t.Fatal(err) + } +} - s.oldAnnounce(t, sendConn, recvConn) +// TestMaliciousHandshake66 tries to send malicious data during the handshake. +func (s *Suite) TestMaliciousHandshake66(t *utesting.T) { + if err := s.maliciousHandshakes(t, eth66); err != nil { + t.Fatal(err) + } } -func (s *Suite) oldAnnounce(t *utesting.T, sendConn, receiveConn *Conn) { - oldBlockAnnounce := &NewBlock{ - Block: s.chain.blocks[len(s.chain.blocks)/2], - TD: s.chain.blocks[len(s.chain.blocks)/2].Difficulty(), +// TestMaliciousStatus sends a status package with a large total difficulty. +func (s *Suite) TestMaliciousStatus(t *utesting.T) { + conn, err := s.dial() + if err != nil { + t.Fatalf("dial failed: %v", err) } + defer conn.Close() - if err := sendConn.Write(oldBlockAnnounce); err != nil { - t.Fatalf("could not write to connection: %v", err) + if err := s.maliciousStatus(conn); err != nil { + t.Fatal(err) } +} - switch msg := receiveConn.ReadAndServe(s.chain, time.Second*8).(type) { - case *NewBlock: - block := *msg - if block.Block.Hash() == oldBlockAnnounce.Block.Hash() { - t.Fatalf("unexpected: block propagated: %s", pretty.Sdump(msg)) - } - case *NewBlockHashes: - hashes := *msg - for _, hash := range hashes { - if hash.Hash == oldBlockAnnounce.Block.Hash() { - t.Fatalf("unexpected: block announced: %s", pretty.Sdump(msg)) - } - } - case *Error: - errMsg := *msg - // check to make sure error is timeout (propagation didn't come through == test successful) - if !strings.Contains(errMsg.String(), "timeout") { - t.Fatalf("unexpected error: %v", pretty.Sdump(msg)) - } - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) +// TestMaliciousStatus66 sends a status package with a large total +// difficulty over the eth66 protocol. +func (s *Suite) TestMaliciousStatus66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + + if err := s.maliciousStatus(conn); err != nil { + t.Fatal(err) } } -func (s *Suite) testAnnounce(t *utesting.T, sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) { - // Announce the block. - if err := sendConn.Write(blockAnnouncement); err != nil { - t.Fatalf("could not write to connection: %v", err) +// TestTransaction sends a valid transaction to the node and +// checks if the transaction gets propagated. +func (s *Suite) TestTransaction(t *utesting.T) { + if err := s.sendSuccessfulTxs(t, eth65); err != nil { + t.Fatal(err) } - s.waitAnnounce(t, receiveConn, blockAnnouncement) } -func (s *Suite) waitAnnounce(t *utesting.T, conn *Conn, blockAnnouncement *NewBlock) { - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { - case *NewBlock: - t.Logf("received NewBlock message: %s", pretty.Sdump(msg.Block)) - assert.Equal(t, - blockAnnouncement.Block.Header(), msg.Block.Header(), - "wrong block header in announcement", - ) - assert.Equal(t, - blockAnnouncement.TD, msg.TD, - "wrong TD in announcement", - ) - case *NewBlockHashes: - message := *msg - t.Logf("received NewBlockHashes message: %s", pretty.Sdump(message)) - assert.Equal(t, blockAnnouncement.Block.Hash(), message[0].Hash, - "wrong block hash in announcement", - ) - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) +// TestTransaction66 sends a valid transaction to the node and +// checks if the transaction gets propagated. +func (s *Suite) TestTransaction66(t *utesting.T) { + if err := s.sendSuccessfulTxs(t, eth66); err != nil { + t.Fatal(err) } } -func (s *Suite) setupConnection(t *utesting.T) *Conn { - // create conn - sendConn, err := s.dial() - if err != nil { - t.Fatalf("could not dial: %v", err) +// TestMaliciousTx sends several invalid transactions and tests whether +// the node will propagate them. +func (s *Suite) TestMaliciousTx(t *utesting.T) { + if err := s.sendMaliciousTxs(t, eth65); err != nil { + t.Fatal(err) + } +} + +// TestMaliciousTx66 sends several invalid transactions and tests whether +// the node will propagate them. +func (s *Suite) TestMaliciousTx66(t *utesting.T) { + if err := s.sendMaliciousTxs(t, eth66); err != nil { + t.Fatal(err) } - sendConn.handshake(t) - sendConn.statusExchange(t, s.chain, nil) - return sendConn } -// dial attempts to dial the given node and perform a handshake, -// returning the created Conn if successful. -func (s *Suite) dial() (*Conn, error) { - var conn Conn - // dial - fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP())) +// TestLargeTxRequest66 tests whether a node can fulfill a large GetPooledTransactions +// request. +func (s *Suite) TestLargeTxRequest66(t *utesting.T) { + // send the next block to ensure the node is no longer syncing and + // is able to accept txs + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("failed to send next block: %v", err) + } + // send 2000 transactions to the node + hashMap, txs, err := generateTxs(s, 2000) if err != nil { - return nil, err + t.Fatalf("failed to generate transactions: %v", err) } - conn.Conn = rlpx.NewConn(fd, s.Dest.Pubkey()) - // do encHandshake - conn.ourKey, _ = crypto.GenerateKey() - _, err = conn.Handshake(conn.ourKey) + if err = sendMultipleSuccessfulTxs(t, s, txs); err != nil { + t.Fatalf("failed to send multiple txs: %v", err) + } + // set up connection to receive to ensure node is peered with the receiving connection + // before tx request is sent + conn, err := s.dial66() if err != nil { - return nil, err + t.Fatalf("dial failed: %v", err) } - // set default p2p capabilities - conn.caps = []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create and send pooled tx request + hashes := make([]common.Hash, 0) + for _, hash := range hashMap { + hashes = append(hashes, hash) + } + getTxReq := ð.GetPooledTransactionsPacket66{ + RequestId: 1234, + GetPooledTransactionsPacket: hashes, + } + if err = conn.Write66(getTxReq, GetPooledTransactions{}.Code()); err != nil { + t.Fatalf("could not write to conn: %v", err) + } + // check that all received transactions match those that were sent to node + switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { + case PooledTransactions: + for _, gotTx := range msg { + if _, exists := hashMap[gotTx.Hash()]; !exists { + t.Fatalf("unexpected tx received: %v", gotTx.Hash()) + } + } + default: + t.Fatalf("unexpected %s", pretty.Sdump(msg)) } - conn.ourHighestProtoVersion = 65 - return &conn, nil } -func (s *Suite) TestTransaction(t *utesting.T) { - tests := []*types.Transaction{ - getNextTxFromChain(t, s), - unknownTx(t, s), +// TestNewPooledTxs_66 tests whether a node will do a GetPooledTransactions +// request upon receiving a NewPooledTransactionHashes announcement. +func (s *Suite) TestNewPooledTxs66(t *utesting.T) { + // send the next block to ensure the node is no longer syncing and + // is able to accept txs + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("failed to send next block: %v", err) } - for i, tx := range tests { - t.Logf("Testing tx propagation: %v\n", i) - sendSuccessfulTx(t, s, tx) + // generate 50 txs + hashMap, _, err := generateTxs(s, 50) + if err != nil { + t.Fatalf("failed to generate transactions: %v", err) } -} - -func (s *Suite) TestMaliciousTx(t *utesting.T) { - badTxs := []*types.Transaction{ - getOldTxFromChain(t, s), - invalidNonceTx(t, s), - hugeAmount(t, s), - hugeGasPrice(t, s), - hugeData(t, s), - } - sendConn := s.setupConnection(t) - defer sendConn.Close() - // set up receiving connection before sending txs to make sure - // no announcements are missed - recvConn := s.setupConnection(t) - defer recvConn.Close() - - for i, tx := range badTxs { - t.Logf("Testing malicious tx propagation: %v\n", i) - if err := sendConn.Write(&Transactions{tx}); err != nil { - t.Fatalf("could not write to connection: %v", err) + // create new pooled tx hashes announcement + hashes := make([]common.Hash, 0) + for _, hash := range hashMap { + hashes = append(hashes, hash) + } + announce := NewPooledTransactionHashes(hashes) + // send announcement + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + if err = conn.Write(announce); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // wait for GetPooledTxs request + for { + _, msg := conn.readAndServe66(s.chain, timeout) + switch msg := msg.(type) { + case GetPooledTransactions: + if len(msg) != len(hashes) { + t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg)) + } + return + case *NewPooledTransactionHashes: + // ignore propagated txs from old tests + continue + default: + t.Fatalf("unexpected %s", pretty.Sdump(msg)) } - } - // check to make sure bad txs aren't propagated - waitForTxPropagation(t, s, badTxs, recvConn) } diff --git a/cmd/devp2p/internal/ethtest/transaction.go b/cmd/devp2p/internal/ethtest/transaction.go index a6166bd2e34c..d2dbe0a7d69b 100644 --- a/cmd/devp2p/internal/ethtest/transaction.go +++ b/cmd/devp2p/internal/ethtest/transaction.go @@ -17,6 +17,7 @@ package ethtest import ( + "fmt" "math/big" "strings" "time" @@ -31,58 +32,171 @@ import ( //var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") -func sendSuccessfulTx(t *utesting.T, s *Suite, tx *types.Transaction) { - sendConn := s.setupConnection(t) - defer sendConn.Close() - sendSuccessfulTxWithConn(t, s, tx, sendConn) +func (s *Suite) sendSuccessfulTxs(t *utesting.T, isEth66 bool) error { + tests := []*types.Transaction{ + getNextTxFromChain(s), + unknownTx(s), + } + for i, tx := range tests { + if tx == nil { + return fmt.Errorf("could not find tx to send") + } + t.Logf("Testing tx propagation %d: sending tx %v %v %v\n", i, tx.Hash().String(), tx.GasPrice(), tx.Gas()) + // get previous tx if exists for reference in case of old tx propagation + var prevTx *types.Transaction + if i != 0 { + prevTx = tests[i-1] + } + // write tx to connection + if err := sendSuccessfulTx(s, tx, prevTx, isEth66); err != nil { + return fmt.Errorf("send successful tx test failed: %v", err) + } + } + return nil } -func sendSuccessfulTxWithConn(t *utesting.T, s *Suite, tx *types.Transaction, sendConn *Conn) { - t.Logf("sending tx: %v %v %v\n", tx.Hash().String(), tx.GasPrice(), tx.Gas()) +func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction, isEth66 bool) error { + sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return err + } + defer sendConn.Close() + defer recvConn.Close() + if err = sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } // Send the transaction - if err := sendConn.Write(&Transactions{tx}); err != nil { - t.Fatal(err) + if err = sendConn.Write(&Transactions{tx}); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + // peer receiving connection to node + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) } // update last nonce seen nonce = tx.Nonce() - - recvConn := s.setupConnection(t) // Wait for the transaction announcement - switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) { - case *Transactions: - recTxs := *msg - for _, gotTx := range recTxs { - if gotTx.Hash() == tx.Hash() { - // Ok - return + for { + switch msg := recvConn.readAndServe(s.chain, timeout).(type) { + case *Transactions: + recTxs := *msg + // if you receive an old tx propagation, read from connection again + if len(recTxs) == 1 && prevTx != nil { + if recTxs[0] == prevTx { + continue + } } - } - t.Fatalf("missing transaction: got %v missing %v", recTxs, tx.Hash()) - case *NewPooledTransactionHashes: - txHashes := *msg - for _, gotHash := range txHashes { - if gotHash == tx.Hash() { - return + for _, gotTx := range recTxs { + if gotTx.Hash() == tx.Hash() { + // Ok + return nil + } } + return fmt.Errorf("missing transaction: got %v missing %v", recTxs, tx.Hash()) + case *NewPooledTransactionHashes: + txHashes := *msg + // if you receive an old tx propagation, read from connection again + if len(txHashes) == 1 && prevTx != nil { + if txHashes[0] == prevTx.Hash() { + continue + } + } + for _, gotHash := range txHashes { + if gotHash == tx.Hash() { + // Ok + return nil + } + } + return fmt.Errorf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash()) + default: + return fmt.Errorf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg)) } - t.Fatalf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash()) - default: - t.Fatalf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg)) } } +func (s *Suite) sendMaliciousTxs(t *utesting.T, isEth66 bool) error { + badTxs := []*types.Transaction{ + getOldTxFromChain(s), + invalidNonceTx(s), + hugeAmount(s), + hugeGasPrice(s), + hugeData(s), + } + // setup receiving connection before sending malicious txs + var ( + recvConn *Conn + err error + ) + if isEth66 { + recvConn, err = s.dial66() + } else { + recvConn, err = s.dial() + } + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + defer recvConn.Close() + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + for i, tx := range badTxs { + t.Logf("Testing malicious tx propagation: %v\n", i) + if err = sendMaliciousTx(s, tx, isEth66); err != nil { + return fmt.Errorf("malicious tx test failed:\ntx: %v\nerror: %v", tx, err) + } + } + // check to make sure bad txs aren't propagated + return checkMaliciousTxPropagation(s, badTxs, recvConn) +} + +func sendMaliciousTx(s *Suite, tx *types.Transaction, isEth66 bool) error { + // setup connection + var ( + conn *Conn + err error + ) + if isEth66 { + conn, err = s.dial66() + } else { + conn, err = s.dial() + } + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // write malicious tx + if err = conn.Write(&Transactions{tx}); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + return nil +} + var nonce = uint64(99) -func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, sendConn *Conn, txs []*types.Transaction) { +// sendMultipleSuccessfulTxs sends the given transactions to the node and +// expects the node to accept and propagate them. +func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction) error { txMsg := Transactions(txs) t.Logf("sending %d txs\n", len(txs)) - recvConn := s.setupConnection(t) + sendConn, recvConn, err := s.createSendAndRecvConns(true) + if err != nil { + return err + } + defer sendConn.Close() defer recvConn.Close() - + if err = sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } // Send the transactions - if err := sendConn.Write(&txMsg); err != nil { - t.Fatal(err) + if err = sendConn.Write(&txMsg); err != nil { + return fmt.Errorf("failed to write message to connection: %v", err) } // update nonce nonce = txs[len(txs)-1].Nonce() @@ -90,7 +204,7 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, sendConn *Conn, txs []*t recvHashes := make([]common.Hash, 0) // all txs should be announced within 3 announcements for i := 0; i < 3; i++ { - switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) { + switch msg := recvConn.readAndServe(s.chain, timeout).(type) { case *Transactions: for _, tx := range *msg { recvHashes = append(recvHashes, tx.Hash()) @@ -99,7 +213,7 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, sendConn *Conn, txs []*t recvHashes = append(recvHashes, *msg...) default: if !strings.Contains(pretty.Sdump(msg), "i/o timeout") { - t.Fatalf("unexpected message while waiting to receive txs: %s", pretty.Sdump(msg)) + return fmt.Errorf("unexpected message while waiting to receive txs: %s", pretty.Sdump(msg)) } } // break once all 2000 txs have been received @@ -112,7 +226,7 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, sendConn *Conn, txs []*t continue } else { t.Logf("successfully received all %d txs", len(txs)) - return + return nil } } } @@ -121,13 +235,15 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, sendConn *Conn, txs []*t for _, missing := range missingTxs { t.Logf("missing tx: %v", missing.Hash()) } - t.Fatalf("missing %d txs", len(missingTxs)) + return fmt.Errorf("missing %d txs", len(missingTxs)) } + return nil } -func waitForTxPropagation(t *utesting.T, s *Suite, txs []*types.Transaction, recvConn *Conn) { - // Wait for another transaction announcement - switch msg := recvConn.ReadAndServe(s.chain, time.Second*8).(type) { +// checkMaliciousTxPropagation checks whether the given malicious transactions were +// propagated by the node. +func checkMaliciousTxPropagation(s *Suite, txs []*types.Transaction, conn *Conn) error { + switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { case *Transactions: // check to see if any of the failing txs were in the announcement recvTxs := make([]common.Hash, len(*msg)) @@ -136,25 +252,20 @@ func waitForTxPropagation(t *utesting.T, s *Suite, txs []*types.Transaction, rec } badTxs, _ := compareReceivedTxs(recvTxs, txs) if len(badTxs) > 0 { - for _, tx := range badTxs { - t.Logf("received bad tx: %v", tx) - } - t.Fatalf("received %d bad txs", len(badTxs)) + return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs) } case *NewPooledTransactionHashes: badTxs, _ := compareReceivedTxs(*msg, txs) if len(badTxs) > 0 { - for _, tx := range badTxs { - t.Logf("received bad tx: %v", tx) - } - t.Fatalf("received %d bad txs", len(badTxs)) + return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs) } case *Error: // Transaction should not be announced -> wait for timeout - return + return nil default: - t.Fatalf("unexpected message in sendFailingTx: %s", pretty.Sdump(msg)) + return fmt.Errorf("unexpected message in sendFailingTx: %s", pretty.Sdump(msg)) } + return nil } // compareReceivedTxs compares the received set of txs against the given set of txs, @@ -180,118 +291,129 @@ func compareReceivedTxs(recvTxs []common.Hash, txs []*types.Transaction) (presen return present, missing } -func unknownTx(t *utesting.T, s *Suite) *types.Transaction { - tx := getNextTxFromChain(t, s) +func unknownTx(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } var to common.Address if tx.To() != nil { to = *tx.To() } txNew := types.NewTransaction(tx.Nonce()+1, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data()) - return signWithFaucet(t, s.chain.chainConfig, txNew) + return signWithFaucet(s.chain.chainConfig, txNew) } -func getNextTxFromChain(t *utesting.T, s *Suite) *types.Transaction { +func getNextTxFromChain(s *Suite) *types.Transaction { // Get a new transaction - var tx *types.Transaction for _, blocks := range s.fullChain.blocks[s.chain.Len():] { txs := blocks.Transactions() if txs.Len() != 0 { - tx = txs[0] - break + return txs[0] } } - if tx == nil { - t.Fatal("could not find transaction") - } - return tx + return nil } -func generateTxs(t *utesting.T, s *Suite, numTxs int) (map[common.Hash]common.Hash, []*types.Transaction) { +func generateTxs(s *Suite, numTxs int) (map[common.Hash]common.Hash, []*types.Transaction, error) { txHashMap := make(map[common.Hash]common.Hash, numTxs) txs := make([]*types.Transaction, numTxs) - nextTx := getNextTxFromChain(t, s) + nextTx := getNextTxFromChain(s) + if nextTx == nil { + return nil, nil, fmt.Errorf("failed to get the next transaction") + } gas := nextTx.Gas() nonce = nonce + 1 // generate txs for i := 0; i < numTxs; i++ { - tx := generateTx(t, s.chain.chainConfig, nonce, gas) + tx := generateTx(s.chain.chainConfig, nonce, gas) + if tx == nil { + return nil, nil, fmt.Errorf("failed to get the next transaction") + } txHashMap[tx.Hash()] = tx.Hash() txs[i] = tx nonce = nonce + 1 } - return txHashMap, txs + return txHashMap, txs, nil } -func generateTx(t *utesting.T, chainConfig *params.ChainConfig, nonce uint64, gas uint64) *types.Transaction { +func generateTx(chainConfig *params.ChainConfig, nonce uint64, gas uint64) *types.Transaction { var to common.Address tx := types.NewTransaction(nonce, to, big.NewInt(1), gas, big.NewInt(1), []byte{}) - return signWithFaucet(t, chainConfig, tx) + return signWithFaucet(chainConfig, tx) } -func getOldTxFromChain(t *utesting.T, s *Suite) *types.Transaction { - var tx *types.Transaction +func getOldTxFromChain(s *Suite) *types.Transaction { for _, blocks := range s.fullChain.blocks[:s.chain.Len()-1] { txs := blocks.Transactions() if txs.Len() != 0 { - tx = txs[0] - break + return txs[0] } } - if tx == nil { - t.Fatal("could not find transaction") - } - return tx + return nil } -func invalidNonceTx(t *utesting.T, s *Suite) *types.Transaction { - tx := getNextTxFromChain(t, s) +func invalidNonceTx(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } var to common.Address if tx.To() != nil { to = *tx.To() } txNew := types.NewTransaction(tx.Nonce()-2, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data()) - return signWithFaucet(t, s.chain.chainConfig, txNew) + return signWithFaucet(s.chain.chainConfig, txNew) } -func hugeAmount(t *utesting.T, s *Suite) *types.Transaction { - tx := getNextTxFromChain(t, s) +func hugeAmount(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } amount := largeNumber(2) var to common.Address if tx.To() != nil { to = *tx.To() } txNew := types.NewTransaction(tx.Nonce(), to, amount, tx.Gas(), tx.GasPrice(), tx.Data()) - return signWithFaucet(t, s.chain.chainConfig, txNew) + return signWithFaucet(s.chain.chainConfig, txNew) } -func hugeGasPrice(t *utesting.T, s *Suite) *types.Transaction { - tx := getNextTxFromChain(t, s) +func hugeGasPrice(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } gasPrice := largeNumber(2) var to common.Address if tx.To() != nil { to = *tx.To() } txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), gasPrice, tx.Data()) - return signWithFaucet(t, s.chain.chainConfig, txNew) + return signWithFaucet(s.chain.chainConfig, txNew) } -func hugeData(t *utesting.T, s *Suite) *types.Transaction { - tx := getNextTxFromChain(t, s) +func hugeData(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } var to common.Address if tx.To() != nil { to = *tx.To() } txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), tx.GasPrice(), largeBuffer(2)) - return signWithFaucet(t, s.chain.chainConfig, txNew) + return signWithFaucet(s.chain.chainConfig, txNew) } -func signWithFaucet(t *utesting.T, chainConfig *params.ChainConfig, tx *types.Transaction) *types.Transaction { +func signWithFaucet(chainConfig *params.ChainConfig, tx *types.Transaction) *types.Transaction { signer := types.LatestSigner(chainConfig) signedTx, err := types.SignTx(tx, signer, faucetKey) if err != nil { - t.Fatalf("could not sign tx: %v\n", err) + return nil } return signedTx } diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go index 50a69b94183f..e49ea284e94c 100644 --- a/cmd/devp2p/internal/ethtest/types.go +++ b/cmd/devp2p/internal/ethtest/types.go @@ -19,13 +19,8 @@ package ethtest import ( "crypto/ecdsa" "fmt" - "reflect" - "time" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/rlpx" "github.com/ethereum/go-ethereum/rlp" @@ -137,6 +132,7 @@ type Conn struct { caps []p2p.Cap } +// Read reads an eth packet from the connection. func (c *Conn) Read() Message { code, rawData, _, err := c.Conn.Read() if err != nil { @@ -185,32 +181,83 @@ func (c *Conn) Read() Message { return msg } -// ReadAndServe serves GetBlockHeaders requests while waiting -// on another message from the node. -func (c *Conn) ReadAndServe(chain *Chain, timeout time.Duration) Message { - start := time.Now() - for time.Since(start) < timeout { - c.SetReadDeadline(time.Now().Add(5 * time.Second)) - switch msg := c.Read().(type) { - case *Ping: - c.Write(&Pong{}) - case *GetBlockHeaders: - req := *msg - headers, err := chain.GetHeaders(req) - if err != nil { - return errorf("could not get headers for inbound header request: %v", err) - } - - if err := c.Write(headers); err != nil { - return errorf("could not write to connection: %v", err) - } - default: - return msg +// Read66 reads an eth66 packet from the connection. +func (c *Conn) Read66() (uint64, Message) { + code, rawData, _, err := c.Conn.Read() + if err != nil { + return 0, errorf("could not read from connection: %v", err) + } + + var msg Message + switch int(code) { + case (Hello{}).Code(): + msg = new(Hello) + case (Ping{}).Code(): + msg = new(Ping) + case (Pong{}).Code(): + msg = new(Pong) + case (Disconnect{}).Code(): + msg = new(Disconnect) + case (Status{}).Code(): + msg = new(Status) + case (GetBlockHeaders{}).Code(): + ethMsg := new(eth.GetBlockHeadersPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) } + return ethMsg.RequestId, GetBlockHeaders(*ethMsg.GetBlockHeadersPacket) + case (BlockHeaders{}).Code(): + ethMsg := new(eth.BlockHeadersPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, BlockHeaders(ethMsg.BlockHeadersPacket) + case (GetBlockBodies{}).Code(): + ethMsg := new(eth.GetBlockBodiesPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, GetBlockBodies(ethMsg.GetBlockBodiesPacket) + case (BlockBodies{}).Code(): + ethMsg := new(eth.BlockBodiesPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, BlockBodies(ethMsg.BlockBodiesPacket) + case (NewBlock{}).Code(): + msg = new(NewBlock) + case (NewBlockHashes{}).Code(): + msg = new(NewBlockHashes) + case (Transactions{}).Code(): + msg = new(Transactions) + case (NewPooledTransactionHashes{}).Code(): + msg = new(NewPooledTransactionHashes) + case (GetPooledTransactions{}.Code()): + ethMsg := new(eth.GetPooledTransactionsPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, GetPooledTransactions(ethMsg.GetPooledTransactionsPacket) + case (PooledTransactions{}.Code()): + ethMsg := new(eth.PooledTransactionsPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, PooledTransactions(ethMsg.PooledTransactionsPacket) + default: + msg = errorf("invalid message code: %d", code) + } + + if msg != nil { + if err := rlp.DecodeBytes(rawData, msg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return 0, msg } - return errorf("no message received within %v", timeout) + return 0, errorf("invalid message: %s", string(rawData)) } +// Write writes a eth packet to the connection. func (c *Conn) Write(msg Message) error { // check if message is eth protocol message var ( @@ -225,135 +272,12 @@ func (c *Conn) Write(msg Message) error { return err } -// handshake checks to make sure a `HELLO` is received. -func (c *Conn) handshake(t *utesting.T) Message { - defer c.SetDeadline(time.Time{}) - c.SetDeadline(time.Now().Add(10 * time.Second)) - - // write hello to client - pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:] - ourHandshake := &Hello{ - Version: 5, - Caps: c.caps, - ID: pub0, - } - if err := c.Write(ourHandshake); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - // read hello from client - switch msg := c.Read().(type) { - case *Hello: - // set snappy if version is at least 5 - if msg.Version >= 5 { - c.SetSnappy(true) - } - c.negotiateEthProtocol(msg.Caps) - if c.negotiatedProtoVersion == 0 { - t.Fatalf("unexpected eth protocol version") - } - return msg - default: - t.Fatalf("bad handshake: %#v", msg) - return nil - } -} - -// negotiateEthProtocol sets the Conn's eth protocol version -// to highest advertised capability from peer -func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) { - var highestEthVersion uint - for _, capability := range caps { - if capability.Name != "eth" { - continue - } - if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion { - highestEthVersion = capability.Version - } - } - c.negotiatedProtoVersion = highestEthVersion -} - -// statusExchange performs a `Status` message exchange with the given -// node. -func (c *Conn) statusExchange(t *utesting.T, chain *Chain, status *Status) Message { - defer c.SetDeadline(time.Time{}) - c.SetDeadline(time.Now().Add(20 * time.Second)) - - // read status message from client - var message Message -loop: - for { - switch msg := c.Read().(type) { - case *Status: - if have, want := msg.Head, chain.blocks[chain.Len()-1].Hash(); have != want { - t.Fatalf("wrong head block in status, want: %#x (block %d) have %#x", - want, chain.blocks[chain.Len()-1].NumberU64(), have) - } - if have, want := msg.TD.Cmp(chain.TD(chain.Len())), 0; have != want { - t.Fatalf("wrong TD in status: have %v want %v", have, want) - } - if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) { - t.Fatalf("wrong fork ID in status: have %v, want %v", have, want) - } - message = msg - break loop - case *Disconnect: - t.Fatalf("disconnect received: %v", msg.Reason) - case *Ping: - c.Write(&Pong{}) // TODO (renaynay): in the future, this should be an error - // (PINGs should not be a response upon fresh connection) - default: - t.Fatalf("bad status message: %s", pretty.Sdump(msg)) - } - } - // make sure eth protocol version is set for negotiation - if c.negotiatedProtoVersion == 0 { - t.Fatalf("eth protocol version must be set in Conn") - } - if status == nil { - // write status message to client - status = &Status{ - ProtocolVersion: uint32(c.negotiatedProtoVersion), - NetworkID: chain.chainConfig.ChainID.Uint64(), - TD: chain.TD(chain.Len()), - Head: chain.blocks[chain.Len()-1].Hash(), - Genesis: chain.blocks[0].Hash(), - ForkID: chain.ForkID(), - } - } - - if err := c.Write(status); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - - return message -} - -// waitForBlock waits for confirmation from the client that it has -// imported the given block. -func (c *Conn) waitForBlock(block *types.Block) error { - defer c.SetReadDeadline(time.Time{}) - - c.SetReadDeadline(time.Now().Add(20 * time.Second)) - // note: if the node has not yet imported the block, it will respond - // to the GetBlockHeaders request with an empty BlockHeaders response, - // so the GetBlockHeaders request must be sent again until the BlockHeaders - // response contains the desired header. - for { - req := &GetBlockHeaders{Origin: eth.HashOrNumber{Hash: block.Hash()}, Amount: 1} - if err := c.Write(req); err != nil { - return err - } - switch msg := c.Read().(type) { - case *BlockHeaders: - for _, header := range *msg { - if header.Number.Uint64() == block.NumberU64() { - return nil - } - } - time.Sleep(100 * time.Millisecond) - default: - return fmt.Errorf("invalid message: %s", pretty.Sdump(msg)) - } +// Write66 writes an eth66 packet to the connection. +func (c *Conn) Write66(req eth.Packet, code int) error { + payload, err := rlp.EncodeToBytes(req) + if err != nil { + return err } + _, err = c.Conn.Write(uint64(code), payload) + return err } From 10962b685ef08579846342b3ab80507306ccd494 Mon Sep 17 00:00:00 2001 From: meowsbits Date: Tue, 25 May 2021 16:22:46 -0500 Subject: [PATCH 054/208] ethstats: fix URL parser for '@' or ':' in node name/password (#21640) Fixes the case (example below) where the value passed to --ethstats flag would be parsed wrongly because the node name and/or password value contained the special characters '@' or ':' --ethstats "ETC Labs Metrics @meowsbits":mypass@ws://mordor.dash.fault.dev:3000 --- ethstats/ethstats.go | 39 +++++++++++++++++------ ethstats/ethstats_test.go | 67 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+), 9 deletions(-) create mode 100644 ethstats/ethstats_test.go diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index c7acb9481c54..ef83e5a4eb6c 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -24,7 +24,6 @@ import ( "fmt" "math/big" "net/http" - "regexp" "runtime" "strconv" "strings" @@ -144,21 +143,43 @@ func (w *connWrapper) Close() error { return w.conn.Close() } +// parseEthstatsURL parses the netstats connection url. +// URL argument should be of the form +// If non-erroring, the returned slice contains 3 elements: [nodename, pass, host] +func parseEthstatsURL(url string) (parts []string, err error) { + err = fmt.Errorf("invalid netstats url: \"%s\", should be nodename:secret@host:port", url) + + hostIndex := strings.LastIndex(url, "@") + if hostIndex == -1 || hostIndex == len(url)-1 { + return nil, err + } + preHost, host := url[:hostIndex], url[hostIndex+1:] + + passIndex := strings.LastIndex(preHost, ":") + if passIndex == -1 { + return []string{preHost, "", host}, nil + } + nodename, pass := preHost[:passIndex], "" + if passIndex != len(preHost)-1 { + pass = preHost[passIndex+1:] + } + + return []string{nodename, pass, host}, nil +} + // New returns a monitoring service ready for stats reporting. func New(node *node.Node, backend backend, engine consensus.Engine, url string) error { - // Parse the netstats connection url - re := regexp.MustCompile("([^:@]*)(:([^@]*))?@(.+)") - parts := re.FindStringSubmatch(url) - if len(parts) != 5 { - return fmt.Errorf("invalid netstats url: \"%s\", should be nodename:secret@host:port", url) + parts, err := parseEthstatsURL(url) + if err != nil { + return err } ethstats := &Service{ backend: backend, engine: engine, server: node.Server(), - node: parts[1], - pass: parts[3], - host: parts[4], + node: parts[0], + pass: parts[1], + host: parts[2], pongCh: make(chan struct{}), histCh: make(chan []uint64, 1), } diff --git a/ethstats/ethstats_test.go b/ethstats/ethstats_test.go new file mode 100644 index 000000000000..92cec50c4d31 --- /dev/null +++ b/ethstats/ethstats_test.go @@ -0,0 +1,67 @@ +package ethstats + +import ( + "strconv" + "testing" +) + +func TestParseEthstatsURL(t *testing.T) { + cases := []struct { + url string + node, pass, host string + }{ + { + url: `"debug meowsbits":mypass@ws://mordor.dash.fault.dev:3000`, + node: "debug meowsbits", pass: "mypass", host: "ws://mordor.dash.fault.dev:3000", + }, + { + url: `"debug @meowsbits":mypass@ws://mordor.dash.fault.dev:3000`, + node: "debug @meowsbits", pass: "mypass", host: "ws://mordor.dash.fault.dev:3000", + }, + { + url: `"debug: @meowsbits":mypass@ws://mordor.dash.fault.dev:3000`, + node: "debug: @meowsbits", pass: "mypass", host: "ws://mordor.dash.fault.dev:3000", + }, + { + url: `name:@ws://mordor.dash.fault.dev:3000`, + node: "name", pass: "", host: "ws://mordor.dash.fault.dev:3000", + }, + { + url: `name@ws://mordor.dash.fault.dev:3000`, + node: "name", pass: "", host: "ws://mordor.dash.fault.dev:3000", + }, + { + url: `:mypass@ws://mordor.dash.fault.dev:3000`, + node: "", pass: "mypass", host: "ws://mordor.dash.fault.dev:3000", + }, + { + url: `:@ws://mordor.dash.fault.dev:3000`, + node: "", pass: "", host: "ws://mordor.dash.fault.dev:3000", + }, + } + + for i, c := range cases { + parts, err := parseEthstatsURL(c.url) + if err != nil { + t.Fatal(err) + } + node, pass, host := parts[0], parts[1], parts[2] + + // unquote because the value provided will be used as a CLI flag value, so unescaped quotes will be removed + nodeUnquote, err := strconv.Unquote(node) + if err == nil { + node = nodeUnquote + } + + if node != c.node { + t.Errorf("case=%d mismatch node value, got: %v ,want: %v", i, node, c.node) + } + if pass != c.pass { + t.Errorf("case=%d mismatch pass value, got: %v ,want: %v", i, pass, c.pass) + } + if host != c.host { + t.Errorf("case=%d mismatch host value, got: %v ,want: %v", i, host, c.host) + } + } + +} From 05dab7f6bde376d1608ee7800b38cee7ce600d4d Mon Sep 17 00:00:00 2001 From: aaronbuchwald Date: Wed, 26 May 2021 02:39:41 -0400 Subject: [PATCH 055/208] internal/ethapi: remove unused vm.Config parameter of DoCall (#22942) --- graphql/graphql.go | 5 ++--- internal/ethapi/api.go | 6 +++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/graphql/graphql.go b/graphql/graphql.go index 71d80d8abd69..94287b0d6bf4 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -30,7 +30,6 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/rpc" @@ -870,7 +869,7 @@ func (b *Block) Call(ctx context.Context, args struct { return nil, err } } - result, err := ethapi.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, vm.Config{}, 5*time.Second, b.backend.RPCGasCap()) + result, err := ethapi.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, 5*time.Second, b.backend.RPCGasCap()) if err != nil { return nil, err } @@ -940,7 +939,7 @@ func (p *Pending) Call(ctx context.Context, args struct { Data ethapi.TransactionArgs }) (*CallResult, error) { pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) - result, err := ethapi.DoCall(ctx, p.backend, args.Data, pendingBlockNr, nil, vm.Config{}, 5*time.Second, p.backend.RPCGasCap()) + result, err := ethapi.DoCall(ctx, p.backend, args.Data, pendingBlockNr, nil, 5*time.Second, p.backend.RPCGasCap()) if err != nil { return nil, err } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index b06df8ff9f2c..bf7c8e533d9e 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -806,7 +806,7 @@ func (diff *StateOverride) Apply(state *state.StateDB) error { return nil } -func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, vmCfg vm.Config, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { +func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) @@ -895,7 +895,7 @@ func (e *revertError) ErrorData() interface{} { // Note, this function doesn't make and changes in the state/blockchain and is // useful to execute and retrieve values. func (s *PublicBlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Bytes, error) { - result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, vm.Config{}, 5*time.Second, s.b.RPCGasCap()) + result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, 5*time.Second, s.b.RPCGasCap()) if err != nil { return nil, err } @@ -969,7 +969,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr executable := func(gas uint64) (bool, *core.ExecutionResult, error) { args.Gas = (*hexutil.Uint64)(&gas) - result, err := DoCall(ctx, b, args, blockNrOrHash, nil, vm.Config{}, 0, gasCap) + result, err := DoCall(ctx, b, args, blockNrOrHash, nil, 0, gasCap) if err != nil { if errors.Is(err, core.ErrIntrinsicGas) { return true, nil, nil // Special case, raise gas limit From c73652da0bb0ca4a4ecf3b88b0efed085be9adc4 Mon Sep 17 00:00:00 2001 From: gary rong Date: Wed, 26 May 2021 15:58:09 +0800 Subject: [PATCH 056/208] core/state/snapshot: fix flaky tests (#22944) * core/state/snapshot: fix flaky tests * core/state/snapshot: fix tests --- core/state/snapshot/generate_test.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go index 3a669085f748..a92517b3155f 100644 --- a/core/state/snapshot/generate_test.go +++ b/core/state/snapshot/generate_test.go @@ -71,7 +71,7 @@ func TestGeneration(t *testing.T) { case <-snap.genPending: // Snapshot generation succeeded - case <-time.After(250 * time.Millisecond): + case <-time.After(3 * time.Second): t.Errorf("Snapshot generation failed") } checkSnapRoot(t, snap, root) @@ -136,7 +136,7 @@ func TestGenerateExistentState(t *testing.T) { case <-snap.genPending: // Snapshot generation succeeded - case <-time.After(250 * time.Millisecond): + case <-time.After(3 * time.Second): t.Errorf("Snapshot generation failed") } checkSnapRoot(t, snap, root) @@ -309,7 +309,7 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { case <-snap.genPending: // Snapshot generation succeeded - case <-time.After(250 * time.Millisecond): + case <-time.After(3 * time.Second): t.Errorf("Snapshot generation failed") } checkSnapRoot(t, snap, root) @@ -361,7 +361,7 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) { case <-snap.genPending: // Snapshot generation succeeded - case <-time.After(250 * time.Millisecond): + case <-time.After(3 * time.Second): t.Errorf("Snapshot generation failed") } checkSnapRoot(t, snap, root) @@ -406,7 +406,7 @@ func TestGenerateCorruptAccountTrie(t *testing.T) { // Snapshot generation succeeded t.Errorf("Snapshot generated against corrupt account trie") - case <-time.After(250 * time.Millisecond): + case <-time.After(time.Second): // Not generated fast enough, hopefully blocked inside on missing trie node fail } // Signal abortion to the generator and wait for it to tear down @@ -466,7 +466,7 @@ func TestGenerateMissingStorageTrie(t *testing.T) { // Snapshot generation succeeded t.Errorf("Snapshot generated against corrupt storage trie") - case <-time.After(250 * time.Millisecond): + case <-time.After(time.Second): // Not generated fast enough, hopefully blocked inside on missing trie node fail } // Signal abortion to the generator and wait for it to tear down @@ -525,7 +525,7 @@ func TestGenerateCorruptStorageTrie(t *testing.T) { // Snapshot generation succeeded t.Errorf("Snapshot generated against corrupt storage trie") - case <-time.After(250 * time.Millisecond): + case <-time.After(time.Second): // Not generated fast enough, hopefully blocked inside on missing trie node fail } // Signal abortion to the generator and wait for it to tear down @@ -588,7 +588,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) { case <-snap.genPending: // Snapshot generation succeeded - case <-time.After(250 * time.Millisecond): + case <-time.After(3 * time.Second): t.Errorf("Snapshot generation failed") } checkSnapRoot(t, snap, root) @@ -646,7 +646,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) { case <-snap.genPending: // Snapshot generation succeeded - case <-time.After(250 * time.Millisecond): + case <-time.After(3 * time.Second): t.Errorf("Snapshot generation failed") } checkSnapRoot(t, snap, root) @@ -699,7 +699,7 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) { case <-snap.genPending: // Snapshot generation succeeded - case <-time.After(250 * time.Millisecond): + case <-time.After(3 * time.Second): t.Errorf("Snapshot generation failed") } checkSnapRoot(t, snap, root) @@ -743,7 +743,7 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) { case <-snap.genPending: // Snapshot generation succeeded - case <-time.After(250 * time.Millisecond): + case <-time.After(3 * time.Second): t.Errorf("Snapshot generation failed") } checkSnapRoot(t, snap, root) @@ -775,7 +775,7 @@ func TestGenerateFromEmptySnap(t *testing.T) { case <-snap.genPending: // Snapshot generation succeeded - case <-time.After(1 * time.Second): + case <-time.After(3 * time.Second): t.Errorf("Snapshot generation failed") } checkSnapRoot(t, snap, root) @@ -822,7 +822,7 @@ func TestGenerateWithIncompleteStorage(t *testing.T) { case <-snap.genPending: // Snapshot generation succeeded - case <-time.After(250 * time.Millisecond): + case <-time.After(3 * time.Second): t.Errorf("Snapshot generation failed") } checkSnapRoot(t, snap, root) From 5869789d7500dba00e8f78382d5af82ef73a9b0e Mon Sep 17 00:00:00 2001 From: Mike Burr Date: Wed, 26 May 2021 14:33:00 -0600 Subject: [PATCH 057/208] ethstats: fix typo in comment (#22952) Trivial but helpful to understanding. --- ethstats/ethstats.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index ef83e5a4eb6c..42d88f6db61f 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -353,7 +353,7 @@ func (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core // it, if they themselves are requests it initiates a reply, and lastly it drops // unknown packets. func (s *Service) readLoop(conn *connWrapper) { - // If the read loop exists, close the connection + // If the read loop exits, close the connection defer conn.Close() for { From 2e7714f8648b375f69f4146e702d5f19b36e55ba Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Thu, 27 May 2021 10:13:35 +0200 Subject: [PATCH 058/208] cmd/utils: avoid large alloc in --dev mode (#22949) * cmd/utils: avoid 1Gb alloc in --dev mode * cmd/geth: avoid 512Mb alloc in genesis query tests --- cmd/geth/genesis_test.go | 2 +- cmd/utils/flags.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/geth/genesis_test.go b/cmd/geth/genesis_test.go index cbc1b3837468..0563ef3c4271 100644 --- a/cmd/geth/genesis_test.go +++ b/cmd/geth/genesis_test.go @@ -84,7 +84,7 @@ func TestCustomGenesis(t *testing.T) { runGeth(t, "--datadir", datadir, "init", json).WaitExit() // Query the custom genesis block - geth := runGeth(t, "--networkid", "1337", "--syncmode=full", + geth := runGeth(t, "--networkid", "1337", "--syncmode=full", "--cache", "16", "--datadir", datadir, "--maxpeers", "0", "--port", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--exec", tt.query, "console") diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 1ae07108e4ae..c41286e4ec87 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1234,6 +1234,9 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { if ctx.GlobalIsSet(KeyStoreDirFlag.Name) { cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name) } + if ctx.GlobalIsSet(DeveloperFlag.Name) { + cfg.UseLightweightKDF = true + } if ctx.GlobalIsSet(LightKDFFlag.Name) { cfg.UseLightweightKDF = ctx.GlobalBool(LightKDFFlag.Name) } @@ -1647,6 +1650,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 1337 } + cfg.SyncMode = downloader.FullSync // Create new developer account or reuse existing one var ( developer accounts.Account From 7194c847b6e7f545f2aad57d8eae0a046e08d7a4 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 27 May 2021 10:19:13 +0200 Subject: [PATCH 059/208] p2p/rlpx: reduce allocation and syscalls (#22899) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change significantly improves the performance of RLPx message reads and writes. In the previous implementation, reading and writing of message frames performed multiple reads and writes on the underlying network connection, and allocated a new []byte buffer for every read. In the new implementation, reads and writes re-use buffers, and perform much fewer system calls on the underlying connection. This doubles the theoretically achievable throughput on a single connection, as shown by the benchmark result: name old speed new speed delta Throughput-8 70.3MB/s ± 0% 155.4MB/s ± 0% +121.11% (p=0.000 n=9+8) The change also removes support for the legacy, pre-EIP-8 handshake encoding. As of May 2021, no actively maintained client sends this format. --- p2p/rlpx/buffer.go | 127 +++++++++++++ p2p/rlpx/buffer_test.go | 51 ++++++ p2p/rlpx/rlpx.go | 385 ++++++++++++++++++++-------------------- p2p/rlpx/rlpx_test.go | 123 +++++++++---- p2p/transport.go | 5 + rlp/raw.go | 8 + rlp/raw_test.go | 6 + 7 files changed, 478 insertions(+), 227 deletions(-) create mode 100644 p2p/rlpx/buffer.go create mode 100644 p2p/rlpx/buffer_test.go diff --git a/p2p/rlpx/buffer.go b/p2p/rlpx/buffer.go new file mode 100644 index 000000000000..bb38e10577ef --- /dev/null +++ b/p2p/rlpx/buffer.go @@ -0,0 +1,127 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlpx + +import ( + "io" +) + +// readBuffer implements buffering for network reads. This type is similar to bufio.Reader, +// with two crucial differences: the buffer slice is exposed, and the buffer keeps all +// read data available until reset. +// +// How to use this type: +// +// Keep a readBuffer b alongside the underlying network connection. When reading a packet +// from the connection, first call b.reset(). This empties b.data. Now perform reads +// through b.read() until the end of the packet is reached. The complete packet data is +// now available in b.data. +type readBuffer struct { + data []byte + end int +} + +// reset removes all processed data which was read since the last call to reset. +// After reset, len(b.data) is zero. +func (b *readBuffer) reset() { + unprocessed := b.end - len(b.data) + copy(b.data[:unprocessed], b.data[len(b.data):b.end]) + b.end = unprocessed + b.data = b.data[:0] +} + +// read reads at least n bytes from r, returning the bytes. +// The returned slice is valid until the next call to reset. +func (b *readBuffer) read(r io.Reader, n int) ([]byte, error) { + offset := len(b.data) + have := b.end - len(b.data) + + // If n bytes are available in the buffer, there is no need to read from r at all. + if have >= n { + b.data = b.data[:offset+n] + return b.data[offset : offset+n], nil + } + + // Make buffer space available. + need := n - have + b.grow(need) + + // Read. + rn, err := io.ReadAtLeast(r, b.data[b.end:cap(b.data)], need) + if err != nil { + return nil, err + } + b.end += rn + b.data = b.data[:offset+n] + return b.data[offset : offset+n], nil +} + +// grow ensures the buffer has at least n bytes of unused space. +func (b *readBuffer) grow(n int) { + if cap(b.data)-b.end >= n { + return + } + need := n - (cap(b.data) - b.end) + offset := len(b.data) + b.data = append(b.data[:cap(b.data)], make([]byte, need)...) + b.data = b.data[:offset] +} + +// writeBuffer implements buffering for network writes. This is essentially +// a convenience wrapper around a byte slice. +type writeBuffer struct { + data []byte +} + +func (b *writeBuffer) reset() { + b.data = b.data[:0] +} + +func (b *writeBuffer) appendZero(n int) []byte { + offset := len(b.data) + b.data = append(b.data, make([]byte, n)...) + return b.data[offset : offset+n] +} + +func (b *writeBuffer) Write(data []byte) (int, error) { + b.data = append(b.data, data...) + return len(data), nil +} + +const maxUint24 = int(^uint32(0) >> 8) + +func readUint24(b []byte) uint32 { + return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 +} + +func putUint24(v uint32, b []byte) { + b[0] = byte(v >> 16) + b[1] = byte(v >> 8) + b[2] = byte(v) +} + +// growslice ensures b has the wanted length by either expanding it to its capacity +// or allocating a new slice if b has insufficient capacity. +func growslice(b []byte, wantLength int) []byte { + if len(b) >= wantLength { + return b + } + if cap(b) >= wantLength { + return b[:cap(b)] + } + return make([]byte, wantLength) +} diff --git a/p2p/rlpx/buffer_test.go b/p2p/rlpx/buffer_test.go new file mode 100644 index 000000000000..9fee4172bd09 --- /dev/null +++ b/p2p/rlpx/buffer_test.go @@ -0,0 +1,51 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlpx + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/assert" +) + +func TestReadBufferReset(t *testing.T) { + reader := bytes.NewReader(hexutil.MustDecode("0x010202030303040505")) + var b readBuffer + + s1, _ := b.read(reader, 1) + s2, _ := b.read(reader, 2) + s3, _ := b.read(reader, 3) + + assert.Equal(t, []byte{1}, s1) + assert.Equal(t, []byte{2, 2}, s2) + assert.Equal(t, []byte{3, 3, 3}, s3) + + b.reset() + + s4, _ := b.read(reader, 1) + s5, _ := b.read(reader, 2) + + assert.Equal(t, []byte{4}, s4) + assert.Equal(t, []byte{5, 5}, s5) + + s6, err := b.read(reader, 2) + + assert.EqualError(t, err, "EOF") + assert.Nil(t, s6) +} diff --git a/p2p/rlpx/rlpx.go b/p2p/rlpx/rlpx.go index 2021bf08be11..326c7c494138 100644 --- a/p2p/rlpx/rlpx.go +++ b/p2p/rlpx/rlpx.go @@ -48,19 +48,45 @@ import ( // This type is not generally safe for concurrent use, but reading and writing of messages // may happen concurrently after the handshake. type Conn struct { - dialDest *ecdsa.PublicKey - conn net.Conn - handshake *handshakeState - snappy bool + dialDest *ecdsa.PublicKey + conn net.Conn + session *sessionState + + // These are the buffers for snappy compression. + // Compression is enabled if they are non-nil. + snappyReadBuffer []byte + snappyWriteBuffer []byte } -type handshakeState struct { +// sessionState contains the session keys. +type sessionState struct { enc cipher.Stream dec cipher.Stream - macCipher cipher.Block - egressMAC hash.Hash - ingressMAC hash.Hash + egressMAC hashMAC + ingressMAC hashMAC + rbuf readBuffer + wbuf writeBuffer +} + +// hashMAC holds the state of the RLPx v4 MAC contraption. +type hashMAC struct { + cipher cipher.Block + hash hash.Hash + aesBuffer [16]byte + hashBuffer [32]byte + seedBuffer [32]byte +} + +func newHashMAC(cipher cipher.Block, h hash.Hash) hashMAC { + m := hashMAC{cipher: cipher, hash: h} + if cipher.BlockSize() != len(m.aesBuffer) { + panic(fmt.Errorf("invalid MAC cipher block size %d", cipher.BlockSize())) + } + if h.Size() != len(m.hashBuffer) { + panic(fmt.Errorf("invalid MAC digest size %d", h.Size())) + } + return m } // NewConn wraps the given network connection. If dialDest is non-nil, the connection @@ -76,7 +102,13 @@ func NewConn(conn net.Conn, dialDest *ecdsa.PublicKey) *Conn { // after the devp2p Hello message exchange when the negotiated version indicates that // compression is available on both ends of the connection. func (c *Conn) SetSnappy(snappy bool) { - c.snappy = snappy + if snappy { + c.snappyReadBuffer = []byte{} + c.snappyWriteBuffer = []byte{} + } else { + c.snappyReadBuffer = nil + c.snappyWriteBuffer = nil + } } // SetReadDeadline sets the deadline for all future read operations. @@ -95,12 +127,13 @@ func (c *Conn) SetDeadline(time time.Time) error { } // Read reads a message from the connection. +// The returned data buffer is valid until the next call to Read. func (c *Conn) Read() (code uint64, data []byte, wireSize int, err error) { - if c.handshake == nil { + if c.session == nil { panic("can't ReadMsg before handshake") } - frame, err := c.handshake.readFrame(c.conn) + frame, err := c.session.readFrame(c.conn) if err != nil { return 0, nil, 0, err } @@ -111,7 +144,7 @@ func (c *Conn) Read() (code uint64, data []byte, wireSize int, err error) { wireSize = len(data) // If snappy is enabled, verify and decompress message. - if c.snappy { + if c.snappyReadBuffer != nil { var actualSize int actualSize, err = snappy.DecodedLen(data) if err != nil { @@ -120,51 +153,55 @@ func (c *Conn) Read() (code uint64, data []byte, wireSize int, err error) { if actualSize > maxUint24 { return code, nil, 0, errPlainMessageTooLarge } - data, err = snappy.Decode(nil, data) + c.snappyReadBuffer = growslice(c.snappyReadBuffer, actualSize) + data, err = snappy.Decode(c.snappyReadBuffer, data) } return code, data, wireSize, err } -func (h *handshakeState) readFrame(conn io.Reader) ([]byte, error) { - // read the header - headbuf := make([]byte, 32) - if _, err := io.ReadFull(conn, headbuf); err != nil { +func (h *sessionState) readFrame(conn io.Reader) ([]byte, error) { + h.rbuf.reset() + + // Read the frame header. + header, err := h.rbuf.read(conn, 32) + if err != nil { return nil, err } - // verify header mac - shouldMAC := updateMAC(h.ingressMAC, h.macCipher, headbuf[:16]) - if !hmac.Equal(shouldMAC, headbuf[16:]) { + // Verify header MAC. + wantHeaderMAC := h.ingressMAC.computeHeader(header[:16]) + if !hmac.Equal(wantHeaderMAC, header[16:]) { return nil, errors.New("bad header MAC") } - h.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted - fsize := readInt24(headbuf) - // ignore protocol type for now - // read the frame content - var rsize = fsize // frame size rounded up to 16 byte boundary + // Decrypt the frame header to get the frame size. + h.dec.XORKeyStream(header[:16], header[:16]) + fsize := readUint24(header[:16]) + // Frame size rounded up to 16 byte boundary for padding. + rsize := fsize if padding := fsize % 16; padding > 0 { rsize += 16 - padding } - framebuf := make([]byte, rsize) - if _, err := io.ReadFull(conn, framebuf); err != nil { + + // Read the frame content. + frame, err := h.rbuf.read(conn, int(rsize)) + if err != nil { return nil, err } - // read and validate frame MAC. we can re-use headbuf for that. - h.ingressMAC.Write(framebuf) - fmacseed := h.ingressMAC.Sum(nil) - if _, err := io.ReadFull(conn, headbuf[:16]); err != nil { + // Validate frame MAC. + frameMAC, err := h.rbuf.read(conn, 16) + if err != nil { return nil, err } - shouldMAC = updateMAC(h.ingressMAC, h.macCipher, fmacseed) - if !hmac.Equal(shouldMAC, headbuf[:16]) { + wantFrameMAC := h.ingressMAC.computeFrame(frame) + if !hmac.Equal(wantFrameMAC, frameMAC) { return nil, errors.New("bad frame MAC") } - // decrypt frame content - h.dec.XORKeyStream(framebuf, framebuf) - return framebuf[:fsize], nil + // Decrypt the frame data. + h.dec.XORKeyStream(frame, frame) + return frame[:fsize], nil } // Write writes a message to the connection. @@ -172,83 +209,90 @@ func (h *handshakeState) readFrame(conn io.Reader) ([]byte, error) { // Write returns the written size of the message data. This may be less than or equal to // len(data) depending on whether snappy compression is enabled. func (c *Conn) Write(code uint64, data []byte) (uint32, error) { - if c.handshake == nil { + if c.session == nil { panic("can't WriteMsg before handshake") } if len(data) > maxUint24 { return 0, errPlainMessageTooLarge } - if c.snappy { - data = snappy.Encode(nil, data) + if c.snappyWriteBuffer != nil { + // Ensure the buffer has sufficient size. + // Package snappy will allocate its own buffer if the provided + // one is smaller than MaxEncodedLen. + c.snappyWriteBuffer = growslice(c.snappyWriteBuffer, snappy.MaxEncodedLen(len(data))) + data = snappy.Encode(c.snappyWriteBuffer, data) } wireSize := uint32(len(data)) - err := c.handshake.writeFrame(c.conn, code, data) + err := c.session.writeFrame(c.conn, code, data) return wireSize, err } -func (h *handshakeState) writeFrame(conn io.Writer, code uint64, data []byte) error { - ptype, _ := rlp.EncodeToBytes(code) +func (h *sessionState) writeFrame(conn io.Writer, code uint64, data []byte) error { + h.wbuf.reset() - // write header - headbuf := make([]byte, 32) - fsize := len(ptype) + len(data) + // Write header. + fsize := rlp.IntSize(code) + len(data) if fsize > maxUint24 { return errPlainMessageTooLarge } - putInt24(uint32(fsize), headbuf) - copy(headbuf[3:], zeroHeader) - h.enc.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now encrypted + header := h.wbuf.appendZero(16) + putUint24(uint32(fsize), header) + copy(header[3:], zeroHeader) + h.enc.XORKeyStream(header, header) - // write header MAC - copy(headbuf[16:], updateMAC(h.egressMAC, h.macCipher, headbuf[:16])) - if _, err := conn.Write(headbuf); err != nil { - return err - } + // Write header MAC. + h.wbuf.Write(h.egressMAC.computeHeader(header)) - // write encrypted frame, updating the egress MAC hash with - // the data written to conn. - tee := cipher.StreamWriter{S: h.enc, W: io.MultiWriter(conn, h.egressMAC)} - if _, err := tee.Write(ptype); err != nil { - return err - } - if _, err := tee.Write(data); err != nil { - return err - } + // Encode and encrypt the frame data. + offset := len(h.wbuf.data) + h.wbuf.data = rlp.AppendUint64(h.wbuf.data, code) + h.wbuf.Write(data) if padding := fsize % 16; padding > 0 { - if _, err := tee.Write(zero16[:16-padding]); err != nil { - return err - } + h.wbuf.appendZero(16 - padding) } + framedata := h.wbuf.data[offset:] + h.enc.XORKeyStream(framedata, framedata) - // write frame MAC. egress MAC hash is up to date because - // frame content was written to it as well. - fmacseed := h.egressMAC.Sum(nil) - mac := updateMAC(h.egressMAC, h.macCipher, fmacseed) - _, err := conn.Write(mac) + // Write frame MAC. + h.wbuf.Write(h.egressMAC.computeFrame(framedata)) + + _, err := conn.Write(h.wbuf.data) return err } -func readInt24(b []byte) uint32 { - return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 +// computeHeader computes the MAC of a frame header. +func (m *hashMAC) computeHeader(header []byte) []byte { + sum1 := m.hash.Sum(m.hashBuffer[:0]) + return m.compute(sum1, header) } -func putInt24(v uint32, b []byte) { - b[0] = byte(v >> 16) - b[1] = byte(v >> 8) - b[2] = byte(v) +// computeFrame computes the MAC of framedata. +func (m *hashMAC) computeFrame(framedata []byte) []byte { + m.hash.Write(framedata) + seed := m.hash.Sum(m.seedBuffer[:0]) + return m.compute(seed, seed[:16]) } -// updateMAC reseeds the given hash with encrypted seed. -// it returns the first 16 bytes of the hash sum after seeding. -func updateMAC(mac hash.Hash, block cipher.Block, seed []byte) []byte { - aesbuf := make([]byte, aes.BlockSize) - block.Encrypt(aesbuf, mac.Sum(nil)) - for i := range aesbuf { - aesbuf[i] ^= seed[i] +// compute computes the MAC of a 16-byte 'seed'. +// +// To do this, it encrypts the current value of the hash state, then XORs the ciphertext +// with seed. The obtained value is written back into the hash state and hash output is +// taken again. The first 16 bytes of the resulting sum are the MAC value. +// +// This MAC construction is a horrible, legacy thing. +func (m *hashMAC) compute(sum1, seed []byte) []byte { + if len(seed) != len(m.aesBuffer) { + panic("invalid MAC seed") + } + + m.cipher.Encrypt(m.aesBuffer[:], sum1) + for i := range m.aesBuffer { + m.aesBuffer[i] ^= seed[i] } - mac.Write(aesbuf) - return mac.Sum(nil)[:16] + m.hash.Write(m.aesBuffer[:]) + sum2 := m.hash.Sum(m.hashBuffer[:0]) + return sum2[:16] } // Handshake performs the handshake. This must be called before any data is written @@ -257,23 +301,26 @@ func (c *Conn) Handshake(prv *ecdsa.PrivateKey) (*ecdsa.PublicKey, error) { var ( sec Secrets err error + h handshakeState ) if c.dialDest != nil { - sec, err = initiatorEncHandshake(c.conn, prv, c.dialDest) + sec, err = h.runInitiator(c.conn, prv, c.dialDest) } else { - sec, err = receiverEncHandshake(c.conn, prv) + sec, err = h.runRecipient(c.conn, prv) } if err != nil { return nil, err } c.InitWithSecrets(sec) + c.session.rbuf = h.rbuf + c.session.wbuf = h.wbuf return sec.remote, err } // InitWithSecrets injects connection secrets as if a handshake had // been performed. This cannot be called after the handshake. func (c *Conn) InitWithSecrets(sec Secrets) { - if c.handshake != nil { + if c.session != nil { panic("can't handshake twice") } macc, err := aes.NewCipher(sec.MAC) @@ -287,12 +334,11 @@ func (c *Conn) InitWithSecrets(sec Secrets) { // we use an all-zeroes IV for AES because the key used // for encryption is ephemeral. iv := make([]byte, encc.BlockSize()) - c.handshake = &handshakeState{ + c.session = &sessionState{ enc: cipher.NewCTR(encc, iv), dec: cipher.NewCTR(encc, iv), - macCipher: macc, - egressMAC: sec.EgressMAC, - ingressMAC: sec.IngressMAC, + egressMAC: newHashMAC(macc, sec.EgressMAC), + ingressMAC: newHashMAC(macc, sec.IngressMAC), } } @@ -303,28 +349,18 @@ func (c *Conn) Close() error { // Constants for the handshake. const ( - maxUint24 = int(^uint32(0) >> 8) - sskLen = 16 // ecies.MaxSharedKeyLength(pubKey) / 2 sigLen = crypto.SignatureLength // elliptic S256 pubLen = 64 // 512 bit pubkey in uncompressed representation without format byte shaLen = 32 // hash length (for nonce etc) - authMsgLen = sigLen + shaLen + pubLen + shaLen + 1 - authRespLen = pubLen + shaLen + 1 - eciesOverhead = 65 /* pubkey */ + 16 /* IV */ + 32 /* MAC */ - - encAuthMsgLen = authMsgLen + eciesOverhead // size of encrypted pre-EIP-8 initiator handshake - encAuthRespLen = authRespLen + eciesOverhead // size of encrypted pre-EIP-8 handshake reply ) var ( // this is used in place of actual frame header data. // TODO: replace this when Msg contains the protocol type code. zeroHeader = []byte{0xC2, 0x80, 0x80} - // sixteen zero bytes - zero16 = make([]byte, 16) // errPlainMessageTooLarge is returned if a decompressed message length exceeds // the allowed 24 bits (i.e. length >= 16MB). @@ -338,19 +374,20 @@ type Secrets struct { remote *ecdsa.PublicKey } -// encHandshake contains the state of the encryption handshake. -type encHandshake struct { +// handshakeState contains the state of the encryption handshake. +type handshakeState struct { initiator bool remote *ecies.PublicKey // remote-pubk initNonce, respNonce []byte // nonce randomPrivKey *ecies.PrivateKey // ecdhe-random remoteRandomPub *ecies.PublicKey // ecdhe-random-pubk + + rbuf readBuffer + wbuf writeBuffer } // RLPx v4 handshake auth (defined in EIP-8). type authMsgV4 struct { - gotPlain bool // whether read packet had plain format. - Signature [sigLen]byte InitiatorPubkey [pubLen]byte Nonce [shaLen]byte @@ -370,17 +407,16 @@ type authRespV4 struct { Rest []rlp.RawValue `rlp:"tail"` } -// receiverEncHandshake negotiates a session token on conn. +// runRecipient negotiates a session token on conn. // it should be called on the listening side of the connection. // // prv is the local client's private key. -func receiverEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey) (s Secrets, err error) { +func (h *handshakeState) runRecipient(conn io.ReadWriter, prv *ecdsa.PrivateKey) (s Secrets, err error) { authMsg := new(authMsgV4) - authPacket, err := readHandshakeMsg(authMsg, encAuthMsgLen, prv, conn) + authPacket, err := h.readMsg(authMsg, prv, conn) if err != nil { return s, err } - h := new(encHandshake) if err := h.handleAuthMsg(authMsg, prv); err != nil { return s, err } @@ -389,22 +425,18 @@ func receiverEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey) (s Secrets, if err != nil { return s, err } - var authRespPacket []byte - if authMsg.gotPlain { - authRespPacket, err = authRespMsg.sealPlain(h) - } else { - authRespPacket, err = sealEIP8(authRespMsg, h) - } + authRespPacket, err := h.sealEIP8(authRespMsg) if err != nil { return s, err } if _, err = conn.Write(authRespPacket); err != nil { return s, err } + return h.secrets(authPacket, authRespPacket) } -func (h *encHandshake) handleAuthMsg(msg *authMsgV4, prv *ecdsa.PrivateKey) error { +func (h *handshakeState) handleAuthMsg(msg *authMsgV4, prv *ecdsa.PrivateKey) error { // Import the remote identity. rpub, err := importPublicKey(msg.InitiatorPubkey[:]) if err != nil { @@ -438,7 +470,7 @@ func (h *encHandshake) handleAuthMsg(msg *authMsgV4, prv *ecdsa.PrivateKey) erro // secrets is called after the handshake is completed. // It extracts the connection secrets from the handshake values. -func (h *encHandshake) secrets(auth, authResp []byte) (Secrets, error) { +func (h *handshakeState) secrets(auth, authResp []byte) (Secrets, error) { ecdheSecret, err := h.randomPrivKey.GenerateShared(h.remoteRandomPub, sskLen, sskLen) if err != nil { return Secrets{}, err @@ -471,21 +503,23 @@ func (h *encHandshake) secrets(auth, authResp []byte) (Secrets, error) { // staticSharedSecret returns the static shared secret, the result // of key agreement between the local and remote static node key. -func (h *encHandshake) staticSharedSecret(prv *ecdsa.PrivateKey) ([]byte, error) { +func (h *handshakeState) staticSharedSecret(prv *ecdsa.PrivateKey) ([]byte, error) { return ecies.ImportECDSA(prv).GenerateShared(h.remote, sskLen, sskLen) } -// initiatorEncHandshake negotiates a session token on conn. +// runInitiator negotiates a session token on conn. // it should be called on the dialing side of the connection. // // prv is the local client's private key. -func initiatorEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, remote *ecdsa.PublicKey) (s Secrets, err error) { - h := &encHandshake{initiator: true, remote: ecies.ImportECDSAPublic(remote)} +func (h *handshakeState) runInitiator(conn io.ReadWriter, prv *ecdsa.PrivateKey, remote *ecdsa.PublicKey) (s Secrets, err error) { + h.initiator = true + h.remote = ecies.ImportECDSAPublic(remote) + authMsg, err := h.makeAuthMsg(prv) if err != nil { return s, err } - authPacket, err := sealEIP8(authMsg, h) + authPacket, err := h.sealEIP8(authMsg) if err != nil { return s, err } @@ -495,18 +529,19 @@ func initiatorEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, remote *ec } authRespMsg := new(authRespV4) - authRespPacket, err := readHandshakeMsg(authRespMsg, encAuthRespLen, prv, conn) + authRespPacket, err := h.readMsg(authRespMsg, prv, conn) if err != nil { return s, err } if err := h.handleAuthResp(authRespMsg); err != nil { return s, err } + return h.secrets(authPacket, authRespPacket) } // makeAuthMsg creates the initiator handshake message. -func (h *encHandshake) makeAuthMsg(prv *ecdsa.PrivateKey) (*authMsgV4, error) { +func (h *handshakeState) makeAuthMsg(prv *ecdsa.PrivateKey) (*authMsgV4, error) { // Generate random initiator nonce. h.initNonce = make([]byte, shaLen) _, err := rand.Read(h.initNonce) @@ -538,13 +573,13 @@ func (h *encHandshake) makeAuthMsg(prv *ecdsa.PrivateKey) (*authMsgV4, error) { return msg, nil } -func (h *encHandshake) handleAuthResp(msg *authRespV4) (err error) { +func (h *handshakeState) handleAuthResp(msg *authRespV4) (err error) { h.respNonce = msg.Nonce[:] h.remoteRandomPub, err = importPublicKey(msg.RandomPubkey[:]) return err } -func (h *encHandshake) makeAuthResp() (msg *authRespV4, err error) { +func (h *handshakeState) makeAuthResp() (msg *authRespV4, err error) { // Generate random nonce. h.respNonce = make([]byte, shaLen) if _, err = rand.Read(h.respNonce); err != nil { @@ -558,81 +593,53 @@ func (h *encHandshake) makeAuthResp() (msg *authRespV4, err error) { return msg, nil } -func (msg *authMsgV4) decodePlain(input []byte) { - n := copy(msg.Signature[:], input) - n += shaLen // skip sha3(initiator-ephemeral-pubk) - n += copy(msg.InitiatorPubkey[:], input[n:]) - copy(msg.Nonce[:], input[n:]) - msg.Version = 4 - msg.gotPlain = true -} +// readMsg reads an encrypted handshake message, decoding it into msg. +func (h *handshakeState) readMsg(msg interface{}, prv *ecdsa.PrivateKey, r io.Reader) ([]byte, error) { + h.rbuf.reset() + h.rbuf.grow(512) -func (msg *authRespV4) sealPlain(hs *encHandshake) ([]byte, error) { - buf := make([]byte, authRespLen) - n := copy(buf, msg.RandomPubkey[:]) - copy(buf[n:], msg.Nonce[:]) - return ecies.Encrypt(rand.Reader, hs.remote, buf, nil, nil) -} + // Read the size prefix. + prefix, err := h.rbuf.read(r, 2) + if err != nil { + return nil, err + } + size := binary.BigEndian.Uint16(prefix) -func (msg *authRespV4) decodePlain(input []byte) { - n := copy(msg.RandomPubkey[:], input) - copy(msg.Nonce[:], input[n:]) - msg.Version = 4 + // Read the handshake packet. + packet, err := h.rbuf.read(r, int(size)) + if err != nil { + return nil, err + } + dec, err := ecies.ImportECDSA(prv).Decrypt(packet, nil, prefix) + if err != nil { + return nil, err + } + // Can't use rlp.DecodeBytes here because it rejects + // trailing data (forward-compatibility). + s := rlp.NewStream(bytes.NewReader(dec), 0) + err = s.Decode(msg) + return h.rbuf.data[:len(prefix)+len(packet)], err } -var padSpace = make([]byte, 300) +// sealEIP8 encrypts a handshake message. +func (h *handshakeState) sealEIP8(msg interface{}) ([]byte, error) { + h.wbuf.reset() -func sealEIP8(msg interface{}, h *encHandshake) ([]byte, error) { - buf := new(bytes.Buffer) - if err := rlp.Encode(buf, msg); err != nil { + // Write the message plaintext. + if err := rlp.Encode(&h.wbuf, msg); err != nil { return nil, err } - // pad with random amount of data. the amount needs to be at least 100 bytes to make + // Pad with random amount of data. the amount needs to be at least 100 bytes to make // the message distinguishable from pre-EIP-8 handshakes. - pad := padSpace[:mrand.Intn(len(padSpace)-100)+100] - buf.Write(pad) + h.wbuf.appendZero(mrand.Intn(100) + 100) + prefix := make([]byte, 2) - binary.BigEndian.PutUint16(prefix, uint16(buf.Len()+eciesOverhead)) + binary.BigEndian.PutUint16(prefix, uint16(len(h.wbuf.data)+eciesOverhead)) - enc, err := ecies.Encrypt(rand.Reader, h.remote, buf.Bytes(), nil, prefix) + enc, err := ecies.Encrypt(rand.Reader, h.remote, h.wbuf.data, nil, prefix) return append(prefix, enc...), err } -type plainDecoder interface { - decodePlain([]byte) -} - -func readHandshakeMsg(msg plainDecoder, plainSize int, prv *ecdsa.PrivateKey, r io.Reader) ([]byte, error) { - buf := make([]byte, plainSize) - if _, err := io.ReadFull(r, buf); err != nil { - return buf, err - } - // Attempt decoding pre-EIP-8 "plain" format. - key := ecies.ImportECDSA(prv) - if dec, err := key.Decrypt(buf, nil, nil); err == nil { - msg.decodePlain(dec) - return buf, nil - } - // Could be EIP-8 format, try that. - prefix := buf[:2] - size := binary.BigEndian.Uint16(prefix) - if size < uint16(plainSize) { - return buf, fmt.Errorf("size underflow, need at least %d bytes", plainSize) - } - buf = append(buf, make([]byte, size-uint16(plainSize)+2)...) - if _, err := io.ReadFull(r, buf[plainSize:]); err != nil { - return buf, err - } - dec, err := key.Decrypt(buf[2:], nil, prefix) - if err != nil { - return buf, err - } - // Can't use rlp.DecodeBytes here because it rejects - // trailing data (forward-compatibility). - s := rlp.NewStream(bytes.NewReader(dec), 0) - return buf, s.Decode(msg) -} - // importPublicKey unmarshals 512 bit public keys. func importPublicKey(pubKey []byte) (*ecies.PublicKey, error) { var pubKey65 []byte diff --git a/p2p/rlpx/rlpx_test.go b/p2p/rlpx/rlpx_test.go index 127a01816454..28759f2b4948 100644 --- a/p2p/rlpx/rlpx_test.go +++ b/p2p/rlpx/rlpx_test.go @@ -22,6 +22,7 @@ import ( "encoding/hex" "fmt" "io" + "math/rand" "net" "reflect" "strings" @@ -30,6 +31,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/ecies" + "github.com/ethereum/go-ethereum/p2p/simulations/pipes" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" ) @@ -124,7 +126,7 @@ func TestFrameReadWrite(t *testing.T) { IngressMAC: hash, EgressMAC: hash, }) - h := conn.handshake + h := conn.session golden := unhex(` 00828ddae471818bb0bfa6b551d1cb42 @@ -166,27 +168,11 @@ func (h fakeHash) Sum(b []byte) []byte { return append(b, h...) } type handshakeAuthTest struct { input string - isPlain bool wantVersion uint wantRest []rlp.RawValue } var eip8HandshakeAuthTests = []handshakeAuthTest{ - // (Auth₁) RLPx v4 plain encoding - { - input: ` - 048ca79ad18e4b0659fab4853fe5bc58eb83992980f4c9cc147d2aa31532efd29a3d3dc6a3d89eaf - 913150cfc777ce0ce4af2758bf4810235f6e6ceccfee1acc6b22c005e9e3a49d6448610a58e98744 - ba3ac0399e82692d67c1f58849050b3024e21a52c9d3b01d871ff5f210817912773e610443a9ef14 - 2e91cdba0bd77b5fdf0769b05671fc35f83d83e4d3b0b000c6b2a1b1bba89e0fc51bf4e460df3105 - c444f14be226458940d6061c296350937ffd5e3acaceeaaefd3c6f74be8e23e0f45163cc7ebd7622 - 0f0128410fd05250273156d548a414444ae2f7dea4dfca2d43c057adb701a715bf59f6fb66b2d1d2 - 0f2c703f851cbf5ac47396d9ca65b6260bd141ac4d53e2de585a73d1750780db4c9ee4cd4d225173 - a4592ee77e2bd94d0be3691f3b406f9bba9b591fc63facc016bfa8 - `, - isPlain: true, - wantVersion: 4, - }, // (Auth₂) EIP-8 encoding { input: ` @@ -233,18 +219,6 @@ type handshakeAckTest struct { } var eip8HandshakeRespTests = []handshakeAckTest{ - // (Ack₁) RLPx v4 plain encoding - { - input: ` - 049f8abcfa9c0dc65b982e98af921bc0ba6e4243169348a236abe9df5f93aa69d99cadddaa387662 - b0ff2c08e9006d5a11a278b1b3331e5aaabf0a32f01281b6f4ede0e09a2d5f585b26513cb794d963 - 5a57563921c04a9090b4f14ee42be1a5461049af4ea7a7f49bf4c97a352d39c8d02ee4acc416388c - 1c66cec761d2bc1c72da6ba143477f049c9d2dde846c252c111b904f630ac98e51609b3b1f58168d - dca6505b7196532e5f85b259a20c45e1979491683fee108e9660edbf38f3add489ae73e3dda2c71b - d1497113d5c755e942d1 - `, - wantVersion: 4, - }, // (Ack₂) EIP-8 encoding { input: ` @@ -287,10 +261,13 @@ var eip8HandshakeRespTests = []handshakeAckTest{ }, } +var ( + keyA, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + keyB, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") +) + func TestHandshakeForwardCompatibility(t *testing.T) { var ( - keyA, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - keyB, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") pubA = crypto.FromECDSAPub(&keyA.PublicKey)[1:] pubB = crypto.FromECDSAPub(&keyB.PublicKey)[1:] ephA, _ = crypto.HexToECDSA("869d6ecf5211f1cc60418a13b9d870b22959d0c16f02bec714c960dd2298a32d") @@ -304,7 +281,7 @@ func TestHandshakeForwardCompatibility(t *testing.T) { _ = authSignature ) makeAuth := func(test handshakeAuthTest) *authMsgV4 { - msg := &authMsgV4{Version: test.wantVersion, Rest: test.wantRest, gotPlain: test.isPlain} + msg := &authMsgV4{Version: test.wantVersion, Rest: test.wantRest} copy(msg.Signature[:], authSignature) copy(msg.InitiatorPubkey[:], pubA) copy(msg.Nonce[:], nonceA) @@ -319,9 +296,10 @@ func TestHandshakeForwardCompatibility(t *testing.T) { // check auth msg parsing for _, test := range eip8HandshakeAuthTests { + var h handshakeState r := bytes.NewReader(unhex(test.input)) msg := new(authMsgV4) - ciphertext, err := readHandshakeMsg(msg, encAuthMsgLen, keyB, r) + ciphertext, err := h.readMsg(msg, keyB, r) if err != nil { t.Errorf("error for input %x:\n %v", unhex(test.input), err) continue @@ -337,10 +315,11 @@ func TestHandshakeForwardCompatibility(t *testing.T) { // check auth resp parsing for _, test := range eip8HandshakeRespTests { + var h handshakeState input := unhex(test.input) r := bytes.NewReader(input) msg := new(authRespV4) - ciphertext, err := readHandshakeMsg(msg, encAuthRespLen, keyA, r) + ciphertext, err := h.readMsg(msg, keyA, r) if err != nil { t.Errorf("error for input %x:\n %v", input, err) continue @@ -356,14 +335,14 @@ func TestHandshakeForwardCompatibility(t *testing.T) { // check derivation for (Auth₂, Ack₂) on recipient side var ( - hs = &encHandshake{ + hs = &handshakeState{ initiator: false, respNonce: nonceB, randomPrivKey: ecies.ImportECDSA(ephB), } - authCiphertext = unhex(eip8HandshakeAuthTests[1].input) - authRespCiphertext = unhex(eip8HandshakeRespTests[1].input) - authMsg = makeAuth(eip8HandshakeAuthTests[1]) + authCiphertext = unhex(eip8HandshakeAuthTests[0].input) + authRespCiphertext = unhex(eip8HandshakeRespTests[0].input) + authMsg = makeAuth(eip8HandshakeAuthTests[0]) wantAES = unhex("80e8632c05fed6fc2a13b0f8d31a3cf645366239170ea067065aba8e28bac487") wantMAC = unhex("2ea74ec5dae199227dff1af715362700e989d889d7a493cb0639691efb8e5f98") wantFooIngressHash = unhex("0c7ec6340062cc46f5e9f1e3cf86f8c8c403c5a0964f5df0ebd34a75ddc86db5") @@ -388,6 +367,74 @@ func TestHandshakeForwardCompatibility(t *testing.T) { } } +func BenchmarkHandshakeRead(b *testing.B) { + var input = unhex(eip8HandshakeAuthTests[0].input) + + for i := 0; i < b.N; i++ { + var ( + h handshakeState + r = bytes.NewReader(input) + msg = new(authMsgV4) + ) + if _, err := h.readMsg(msg, keyB, r); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkThroughput(b *testing.B) { + pipe1, pipe2, err := pipes.TCPPipe() + if err != nil { + b.Fatal(err) + } + + var ( + conn1, conn2 = NewConn(pipe1, nil), NewConn(pipe2, &keyA.PublicKey) + handshakeDone = make(chan error, 1) + msgdata = make([]byte, 1024) + rand = rand.New(rand.NewSource(1337)) + ) + rand.Read(msgdata) + + // Server side. + go func() { + defer conn1.Close() + // Perform handshake. + _, err := conn1.Handshake(keyA) + handshakeDone <- err + if err != nil { + return + } + conn1.SetSnappy(true) + // Keep sending messages until connection closed. + for { + if _, err := conn1.Write(0, msgdata); err != nil { + return + } + } + }() + + // Set up client side. + defer conn2.Close() + if _, err := conn2.Handshake(keyB); err != nil { + b.Fatal("client handshake error:", err) + } + conn2.SetSnappy(true) + if err := <-handshakeDone; err != nil { + b.Fatal("server hanshake error:", err) + } + + // Read N messages. + b.SetBytes(int64(len(msgdata))) + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, _, _, err := conn2.Read() + if err != nil { + b.Fatal("read error:", err) + } + } +} + func unhex(str string) []byte { r := strings.NewReplacer("\t", "", " ", "", "\n", "") b, err := hex.DecodeString(r.Replace(str)) diff --git a/p2p/transport.go b/p2p/transport.go index 3f1cd7d64f20..d59425986628 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -25,6 +25,7 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/bitutil" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p/rlpx" @@ -62,6 +63,10 @@ func (t *rlpxTransport) ReadMsg() (Msg, error) { t.conn.SetReadDeadline(time.Now().Add(frameReadTimeout)) code, data, wireSize, err := t.conn.Read() if err == nil { + // Protocol messages are dispatched to subprotocol handlers asynchronously, + // but package rlpx may reuse the returned 'data' buffer on the next call + // to Read. Copy the message data to avoid this being an issue. + data = common.CopyBytes(data) msg = Msg{ ReceivedAt: time.Now(), Code: code, diff --git a/rlp/raw.go b/rlp/raw.go index 3071e99cab1b..f355efc144df 100644 --- a/rlp/raw.go +++ b/rlp/raw.go @@ -34,6 +34,14 @@ func ListSize(contentSize uint64) uint64 { return uint64(headsize(contentSize)) + contentSize } +// IntSize returns the encoded size of the integer x. +func IntSize(x uint64) int { + if x < 0x80 { + return 1 + } + return 1 + intsize(x) +} + // Split returns the content of first RLP value and any // bytes after the value as subslices of b. func Split(b []byte) (k Kind, content, rest []byte, err error) { diff --git a/rlp/raw_test.go b/rlp/raw_test.go index c976c4f73429..185e269d075a 100644 --- a/rlp/raw_test.go +++ b/rlp/raw_test.go @@ -263,6 +263,12 @@ func TestAppendUint64(t *testing.T) { if !bytes.Equal(x, unhex(test.output)) { t.Errorf("AppendUint64(%v, %d): got %x, want %s", test.slice, test.input, x, test.output) } + + // Check that IntSize returns the appended size. + length := len(x) - len(test.slice) + if s := IntSize(test.input); s != length { + t.Errorf("IntSize(%d): got %d, want %d", test.input, s, length) + } } } From d836ad141ef4842be494cd5156fc6bd25a13e463 Mon Sep 17 00:00:00 2001 From: rene <41963722+renaynay@users.noreply.github.com> Date: Thu, 27 May 2021 11:57:49 +0200 Subject: [PATCH 060/208] cmd/devp2p/internal/ethtest: add block hash announcement test (#22535) --- cmd/devp2p/internal/ethtest/helpers.go | 103 +++++++++++++++++++++++++ cmd/devp2p/internal/ethtest/suite.go | 20 +++++ 2 files changed, 123 insertions(+) diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go index d99376124d2f..a9a213f3371d 100644 --- a/cmd/devp2p/internal/ethtest/helpers.go +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -633,3 +633,106 @@ func (s *Suite) maliciousStatus(conn *Conn) error { return fmt.Errorf("expected disconnect, got: %s", pretty.Sdump(msg)) } } + +func (s *Suite) hashAnnounce(isEth66 bool) error { + // create connections + sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return fmt.Errorf("failed to create connections: %v", err) + } + defer sendConn.Close() + defer recvConn.Close() + if err := sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err := recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // create NewBlockHashes announcement + nextBlock := s.fullChain.blocks[s.chain.Len()] + newBlockHash := &NewBlockHashes{ + {Hash: nextBlock.Hash(), Number: nextBlock.Number().Uint64()}, + } + + if err := sendConn.Write(newBlockHash); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + if isEth66 { + // expect GetBlockHeaders request, and respond + id, msg := sendConn.Read66() + switch msg := msg.(type) { + case GetBlockHeaders: + blockHeaderReq := msg + if blockHeaderReq.Amount != 1 { + return fmt.Errorf("unexpected number of block headers requested: %v", blockHeaderReq.Amount) + } + if blockHeaderReq.Origin.Hash != nextBlock.Hash() { + return fmt.Errorf("unexpected block header requested: %v", pretty.Sdump(blockHeaderReq)) + } + resp := ð.BlockHeadersPacket66{ + RequestId: id, + BlockHeadersPacket: eth.BlockHeadersPacket{ + nextBlock.Header(), + }, + } + if err := sendConn.Write66(resp, BlockHeaders{}.Code()); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + default: + return fmt.Errorf("unexpected %s", pretty.Sdump(msg)) + } + } else { + // expect GetBlockHeaders request, and respond + switch msg := sendConn.Read().(type) { + case *GetBlockHeaders: + blockHeaderReq := *msg + if blockHeaderReq.Amount != 1 { + return fmt.Errorf("unexpected number of block headers requested: %v", blockHeaderReq.Amount) + } + if blockHeaderReq.Origin.Hash != nextBlock.Hash() { + return fmt.Errorf("unexpected block header requested: %v", pretty.Sdump(blockHeaderReq)) + } + if err := sendConn.Write(&BlockHeaders{nextBlock.Header()}); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + default: + return fmt.Errorf("unexpected %s", pretty.Sdump(msg)) + } + } + // wait for block announcement + msg := recvConn.readAndServe(s.chain, timeout) + switch msg := msg.(type) { + case *NewBlockHashes: + hashes := *msg + if len(hashes) != 1 { + return fmt.Errorf("unexpected new block hash announcement: wanted 1 announcement, got %d", len(hashes)) + } + if nextBlock.Hash() != hashes[0].Hash { + return fmt.Errorf("unexpected block hash announcement, wanted %v, got %v", nextBlock.Hash(), + hashes[0].Hash) + } + case *NewBlock: + // node should only propagate NewBlock without having requested the body if the body is empty + nextBlockBody := nextBlock.Body() + if len(nextBlockBody.Transactions) != 0 || len(nextBlockBody.Uncles) != 0 { + return fmt.Errorf("unexpected non-empty new block propagated: %s", pretty.Sdump(msg)) + } + if msg.Block.Hash() != nextBlock.Hash() { + return fmt.Errorf("mismatched hash of propagated new block: wanted %v, got %v", + nextBlock.Hash(), msg.Block.Hash()) + } + // check to make sure header matches header that was sent to the node + if !reflect.DeepEqual(nextBlock.Header(), msg.Block.Header()) { + return fmt.Errorf("incorrect header received: wanted %v, got %v", nextBlock.Header(), msg.Block.Header()) + } + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + // confirm node imported block + if err := s.waitForBlockImport(recvConn, nextBlock, isEth66); err != nil { + return fmt.Errorf("error waiting for node to import new block: %v", err) + } + // update the chain + s.chain.blocks = append(s.chain.blocks, nextBlock) + return nil +} diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index ad832dddd2de..491bcda7e7c2 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -70,6 +70,8 @@ func (s *Suite) AllEthTests() []utesting.Test { {Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66}, {Name: "TestOldAnnounce", Fn: s.TestOldAnnounce}, {Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66}, + {Name: "TestBlockHashAnnounce", Fn: s.TestBlockHashAnnounce}, + {Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66}, // malicious handshakes + status {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, @@ -93,6 +95,7 @@ func (s *Suite) EthTests() []utesting.Test { {Name: "TestBroadcast", Fn: s.TestBroadcast}, {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, {Name: "TestOldAnnounce", Fn: s.TestOldAnnounce}, + {Name: "TestBlockHashAnnounce", Fn: s.TestBlockHashAnnounce}, {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, {Name: "TestTransaction", Fn: s.TestTransaction}, @@ -112,6 +115,7 @@ func (s *Suite) Eth66Tests() []utesting.Test { {Name: "TestBroadcast66", Fn: s.TestBroadcast66}, {Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66}, {Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66}, + {Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66}, {Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66}, {Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66}, {Name: "TestTransaction66", Fn: s.TestTransaction66}, @@ -580,6 +584,22 @@ func (s *Suite) TestOldAnnounce66(t *utesting.T) { } } +// TestBlockHashAnnounce sends a new block hash announcement and expects +// the node to perform a `GetBlockHeaders` request. +func (s *Suite) TestBlockHashAnnounce(t *utesting.T) { + if err := s.hashAnnounce(eth65); err != nil { + t.Fatalf("block hash announcement failed: %v", err) + } +} + +// TestBlockHashAnnounce66 sends a new block hash announcement and expects +// the node to perform a `GetBlockHeaders` request. +func (s *Suite) TestBlockHashAnnounce66(t *utesting.T) { + if err := s.hashAnnounce(eth66); err != nil { + t.Fatalf("block hash announcement failed: %v", err) + } +} + // TestMaliciousHandshake tries to send malicious data during the handshake. func (s *Suite) TestMaliciousHandshake(t *utesting.T) { if err := s.maliciousHandshakes(t, eth65); err != nil { From 0703ef62d388eafa177540ff722c3a0871c4979d Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Thu, 27 May 2021 13:30:25 +0200 Subject: [PATCH 061/208] crypto/secp256k1: fix undefined behavior in BitCurve.Add (#22621) This commit changes the behavior of BitCurve.Add to be more inline with btcd. It fixes two different bugs: 1) When adding a point at infinity to another point, the other point should be returned. While this is undefined behavior, it is better to be more inline with the go standard library. Thus (0,0) + (a, b) = (a,b) 2) Adding the same point to itself produced the point at infinity. This is incorrect, now doubleJacobian is used to correctly calculate it. Thus (a,b) + (a,b) == 2* (a,b) and not (0,0) anymore. The change also adds a differential fuzzer for Add, testing it against btcd. Co-authored-by: Felix Lange --- crypto/secp256k1/curve.go | 53 +++++------------------- crypto/secp256k1/panic_cb.go | 2 + crypto/secp256k1/scalar_mult_cgo.go | 56 ++++++++++++++++++++++++++ crypto/secp256k1/scalar_mult_nocgo.go | 13 ++++++ crypto/secp256k1/secp256.go | 2 + oss-fuzz.sh | 1 + tests/fuzzers/secp256k1/secp_fuzzer.go | 50 +++++++++++++++++++++++ tests/fuzzers/secp256k1/secp_test.go | 8 ++++ 8 files changed, 143 insertions(+), 42 deletions(-) create mode 100644 crypto/secp256k1/scalar_mult_cgo.go create mode 100644 crypto/secp256k1/scalar_mult_nocgo.go create mode 100644 tests/fuzzers/secp256k1/secp_fuzzer.go create mode 100644 tests/fuzzers/secp256k1/secp_test.go diff --git a/crypto/secp256k1/curve.go b/crypto/secp256k1/curve.go index 8f83cccad9d1..fa1b199a3484 100644 --- a/crypto/secp256k1/curve.go +++ b/crypto/secp256k1/curve.go @@ -35,15 +35,8 @@ package secp256k1 import ( "crypto/elliptic" "math/big" - "unsafe" ) -/* -#include "libsecp256k1/include/secp256k1.h" -extern int secp256k1_ext_scalar_mul(const secp256k1_context* ctx, const unsigned char *point, const unsigned char *scalar); -*/ -import "C" - const ( // number of bits in a big.Word wordBits = 32 << (uint64(^big.Word(0)) >> 63) @@ -133,7 +126,18 @@ func (BitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big. // Add returns the sum of (x1,y1) and (x2,y2) func (BitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + // If one point is at infinity, return the other point. + // Adding the point at infinity to any point will preserve the other point. + if x1.Sign() == 0 && y1.Sign() == 0 { + return x2, y2 + } + if x2.Sign() == 0 && y2.Sign() == 0 { + return x1, y1 + } z := new(big.Int).SetInt64(1) + if x1.Cmp(x2) == 0 && y1.Cmp(y2) == 0 { + return BitCurve.affineFromJacobian(BitCurve.doubleJacobian(x1, y1, z)) + } return BitCurve.affineFromJacobian(BitCurve.addJacobian(x1, y1, z, x2, y2, z)) } @@ -242,41 +246,6 @@ func (BitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, return x3, y3, z3 } -func (BitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) { - // Ensure scalar is exactly 32 bytes. We pad always, even if - // scalar is 32 bytes long, to avoid a timing side channel. - if len(scalar) > 32 { - panic("can't handle scalars > 256 bits") - } - // NOTE: potential timing issue - padded := make([]byte, 32) - copy(padded[32-len(scalar):], scalar) - scalar = padded - - // Do the multiplication in C, updating point. - point := make([]byte, 64) - readBits(Bx, point[:32]) - readBits(By, point[32:]) - - pointPtr := (*C.uchar)(unsafe.Pointer(&point[0])) - scalarPtr := (*C.uchar)(unsafe.Pointer(&scalar[0])) - res := C.secp256k1_ext_scalar_mul(context, pointPtr, scalarPtr) - - // Unpack the result and clear temporaries. - x := new(big.Int).SetBytes(point[:32]) - y := new(big.Int).SetBytes(point[32:]) - for i := range point { - point[i] = 0 - } - for i := range padded { - scalar[i] = 0 - } - if res != 1 { - return nil, nil - } - return x, y -} - // ScalarBaseMult returns k*G, where G is the base point of the group and k is // an integer in big-endian form. func (BitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { diff --git a/crypto/secp256k1/panic_cb.go b/crypto/secp256k1/panic_cb.go index 6d59a1d247ea..262846fd8999 100644 --- a/crypto/secp256k1/panic_cb.go +++ b/crypto/secp256k1/panic_cb.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be found in // the LICENSE file. +// +build !gofuzz cgo + package secp256k1 import "C" diff --git a/crypto/secp256k1/scalar_mult_cgo.go b/crypto/secp256k1/scalar_mult_cgo.go new file mode 100644 index 000000000000..34998ad1a4ea --- /dev/null +++ b/crypto/secp256k1/scalar_mult_cgo.go @@ -0,0 +1,56 @@ +// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found in +// the LICENSE file. + +// +build !gofuzz cgo + +package secp256k1 + +import ( + "math/big" + "unsafe" +) + +/* + +#include "libsecp256k1/include/secp256k1.h" + +extern int secp256k1_ext_scalar_mul(const secp256k1_context* ctx, const unsigned char *point, const unsigned char *scalar); + +*/ +import "C" + +func (BitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) { + // Ensure scalar is exactly 32 bytes. We pad always, even if + // scalar is 32 bytes long, to avoid a timing side channel. + if len(scalar) > 32 { + panic("can't handle scalars > 256 bits") + } + // NOTE: potential timing issue + padded := make([]byte, 32) + copy(padded[32-len(scalar):], scalar) + scalar = padded + + // Do the multiplication in C, updating point. + point := make([]byte, 64) + readBits(Bx, point[:32]) + readBits(By, point[32:]) + + pointPtr := (*C.uchar)(unsafe.Pointer(&point[0])) + scalarPtr := (*C.uchar)(unsafe.Pointer(&scalar[0])) + res := C.secp256k1_ext_scalar_mul(context, pointPtr, scalarPtr) + + // Unpack the result and clear temporaries. + x := new(big.Int).SetBytes(point[:32]) + y := new(big.Int).SetBytes(point[32:]) + for i := range point { + point[i] = 0 + } + for i := range padded { + scalar[i] = 0 + } + if res != 1 { + return nil, nil + } + return x, y +} diff --git a/crypto/secp256k1/scalar_mult_nocgo.go b/crypto/secp256k1/scalar_mult_nocgo.go new file mode 100644 index 000000000000..55756b5be849 --- /dev/null +++ b/crypto/secp256k1/scalar_mult_nocgo.go @@ -0,0 +1,13 @@ +// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found in +// the LICENSE file. + +// +build gofuzz !cgo + +package secp256k1 + +import "math/big" + +func (BitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) { + panic("ScalarMult is not available when secp256k1 is built without cgo") +} diff --git a/crypto/secp256k1/secp256.go b/crypto/secp256k1/secp256.go index 9a7c06d7ce72..9e942ac6fefc 100644 --- a/crypto/secp256k1/secp256.go +++ b/crypto/secp256k1/secp256.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be found in // the LICENSE file. +// +build !gofuzz cgo + // Package secp256k1 wraps the bitcoin secp256k1 C library. package secp256k1 diff --git a/oss-fuzz.sh b/oss-fuzz.sh index a9bac0325765..081a8e1d5aab 100644 --- a/oss-fuzz.sh +++ b/oss-fuzz.sh @@ -102,6 +102,7 @@ compile_fuzzer tests/fuzzers/stacktrie Fuzz fuzzStackTrie compile_fuzzer tests/fuzzers/difficulty Fuzz fuzzDifficulty compile_fuzzer tests/fuzzers/abi Fuzz fuzzAbi compile_fuzzer tests/fuzzers/les Fuzz fuzzLes +compile_fuzzer tests/fuzzers/secp265k1 Fuzz fuzzSecp256k1 compile_fuzzer tests/fuzzers/vflux FuzzClientPool fuzzClientPool compile_fuzzer tests/fuzzers/bls12381 FuzzG1Add fuzz_g1_add diff --git a/tests/fuzzers/secp256k1/secp_fuzzer.go b/tests/fuzzers/secp256k1/secp_fuzzer.go new file mode 100644 index 000000000000..53845b643345 --- /dev/null +++ b/tests/fuzzers/secp256k1/secp_fuzzer.go @@ -0,0 +1,50 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// build +gofuzz + +package secp256k1 + +import ( + "fmt" + + "github.com/btcsuite/btcd/btcec" + "github.com/ethereum/go-ethereum/crypto/secp256k1" + fuzz "github.com/google/gofuzz" +) + +func Fuzz(input []byte) int { + var ( + fuzzer = fuzz.NewFromGoFuzz(input) + curveA = secp256k1.S256() + curveB = btcec.S256() + dataP1 []byte + dataP2 []byte + ) + // first point + fuzzer.Fuzz(&dataP1) + x1, y1 := curveB.ScalarBaseMult(dataP1) + // second point + fuzzer.Fuzz(&dataP2) + x2, y2 := curveB.ScalarBaseMult(dataP2) + resAX, resAY := curveA.Add(x1, y1, x2, y2) + resBX, resBY := curveB.Add(x1, y1, x2, y2) + if resAX.Cmp(resBX) != 0 || resAY.Cmp(resBY) != 0 { + fmt.Printf("%s %s %s %s\n", x1, y1, x2, y2) + panic(fmt.Sprintf("Addition failed: geth: %s %s btcd: %s %s", resAX, resAY, resBX, resBY)) + } + return 0 +} diff --git a/tests/fuzzers/secp256k1/secp_test.go b/tests/fuzzers/secp256k1/secp_test.go new file mode 100644 index 000000000000..76bae87086f5 --- /dev/null +++ b/tests/fuzzers/secp256k1/secp_test.go @@ -0,0 +1,8 @@ +package secp256k1 + +import "testing" + +func TestFuzzer(t *testing.T) { + test := "00000000N0000000/R00000000000000000U0000S0000000mkhP000000000000000U" + Fuzz([]byte(test)) +} From 427175153c0e33a7f640f86bf2ce4c97f03ede72 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 27 May 2021 18:43:55 +0200 Subject: [PATCH 062/208] p2p/msgrate: return capacity as integer, clamp to max uint32 (#22943) * p2p/msgrate: return capacity as integer * eth/protocols/snap: remove conversions * p2p/msgrate: add overflow test * p2p/msgrate: make the capacity overflow test actually overflow * p2p/msgrate: clamp capacity to max int32 * p2p/msgrate: fix min/max confusion --- eth/downloader/peer.go | 48 +++++++++++++++++++------------------ eth/protocols/snap/sync.go | 30 +++++++++++------------ p2p/msgrate/msgrate.go | 14 ++++++++--- p2p/msgrate/msgrate_test.go | 28 ++++++++++++++++++++++ 4 files changed, 79 insertions(+), 41 deletions(-) create mode 100644 p2p/msgrate/msgrate_test.go diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index b9c771694112..066a366315a7 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -21,7 +21,6 @@ package downloader import ( "errors" - "math" "math/big" "sort" "sync" @@ -232,7 +231,7 @@ func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) // HeaderCapacity retrieves the peers header download allowance based on its // previously discovered throughput. func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { - cap := int(math.Ceil(p.rates.Capacity(eth.BlockHeadersMsg, targetRTT))) + cap := p.rates.Capacity(eth.BlockHeadersMsg, targetRTT) if cap > MaxHeaderFetch { cap = MaxHeaderFetch } @@ -242,7 +241,7 @@ func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { // BlockCapacity retrieves the peers block download allowance based on its // previously discovered throughput. func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int { - cap := int(math.Ceil(p.rates.Capacity(eth.BlockBodiesMsg, targetRTT))) + cap := p.rates.Capacity(eth.BlockBodiesMsg, targetRTT) if cap > MaxBlockFetch { cap = MaxBlockFetch } @@ -252,7 +251,7 @@ func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int { // ReceiptCapacity retrieves the peers receipt download allowance based on its // previously discovered throughput. func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { - cap := int(math.Ceil(p.rates.Capacity(eth.ReceiptsMsg, targetRTT))) + cap := p.rates.Capacity(eth.ReceiptsMsg, targetRTT) if cap > MaxReceiptFetch { cap = MaxReceiptFetch } @@ -262,7 +261,7 @@ func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { // NodeDataCapacity retrieves the peers state download allowance based on its // previously discovered throughput. func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int { - cap := int(math.Ceil(p.rates.Capacity(eth.NodeDataMsg, targetRTT))) + cap := p.rates.Capacity(eth.NodeDataMsg, targetRTT) if cap > MaxStateFetch { cap = MaxStateFetch } @@ -411,7 +410,7 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { idle := func(p *peerConnection) bool { return atomic.LoadInt32(&p.headerIdle) == 0 } - throughput := func(p *peerConnection) float64 { + throughput := func(p *peerConnection) int { return p.rates.Capacity(eth.BlockHeadersMsg, time.Second) } return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput) @@ -423,7 +422,7 @@ func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { idle := func(p *peerConnection) bool { return atomic.LoadInt32(&p.blockIdle) == 0 } - throughput := func(p *peerConnection) float64 { + throughput := func(p *peerConnection) int { return p.rates.Capacity(eth.BlockBodiesMsg, time.Second) } return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput) @@ -435,7 +434,7 @@ func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { idle := func(p *peerConnection) bool { return atomic.LoadInt32(&p.receiptIdle) == 0 } - throughput := func(p *peerConnection) float64 { + throughput := func(p *peerConnection) int { return p.rates.Capacity(eth.ReceiptsMsg, time.Second) } return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput) @@ -447,7 +446,7 @@ func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { idle := func(p *peerConnection) bool { return atomic.LoadInt32(&p.stateIdle) == 0 } - throughput := func(p *peerConnection) float64 { + throughput := func(p *peerConnection) int { return p.rates.Capacity(eth.NodeDataMsg, time.Second) } return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput) @@ -455,45 +454,48 @@ func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { // idlePeers retrieves a flat list of all currently idle peers satisfying the // protocol version constraints, using the provided function to check idleness. -// The resulting set of peers are sorted by their measure throughput. -func (ps *peerSet) idlePeers(minProtocol, maxProtocol uint, idleCheck func(*peerConnection) bool, throughput func(*peerConnection) float64) ([]*peerConnection, int) { +// The resulting set of peers are sorted by their capacity. +func (ps *peerSet) idlePeers(minProtocol, maxProtocol uint, idleCheck func(*peerConnection) bool, capacity func(*peerConnection) int) ([]*peerConnection, int) { ps.lock.RLock() defer ps.lock.RUnlock() - idle, total := make([]*peerConnection, 0, len(ps.peers)), 0 - tps := make([]float64, 0, len(ps.peers)) + var ( + total = 0 + idle = make([]*peerConnection, 0, len(ps.peers)) + tps = make([]int, 0, len(ps.peers)) + ) for _, p := range ps.peers { if p.version >= minProtocol && p.version <= maxProtocol { if idleCheck(p) { idle = append(idle, p) - tps = append(tps, throughput(p)) + tps = append(tps, capacity(p)) } total++ } } + // And sort them - sortPeers := &peerThroughputSort{idle, tps} + sortPeers := &peerCapacitySort{idle, tps} sort.Sort(sortPeers) return sortPeers.p, total } -// peerThroughputSort implements the Sort interface, and allows for -// sorting a set of peers by their throughput -// The sorted data is with the _highest_ throughput first -type peerThroughputSort struct { +// peerCapacitySort implements sort.Interface. +// It sorts peer connections by capacity (descending). +type peerCapacitySort struct { p []*peerConnection - tp []float64 + tp []int } -func (ps *peerThroughputSort) Len() int { +func (ps *peerCapacitySort) Len() int { return len(ps.p) } -func (ps *peerThroughputSort) Less(i, j int) bool { +func (ps *peerCapacitySort) Less(i, j int) bool { return ps.tp[i] > ps.tp[j] } -func (ps *peerThroughputSort) Swap(i, j int) { +func (ps *peerCapacitySort) Swap(i, j int) { ps.p[i], ps.p[j] = ps.p[j], ps.p[i] ps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i] } diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index c57fcd71f643..646df03887f1 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -861,7 +861,7 @@ func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *ac // Sort the peers by download capacity to use faster ones if many available idlers := &capacitySort{ ids: make([]string, 0, len(s.accountIdlers)), - caps: make([]float64, 0, len(s.accountIdlers)), + caps: make([]int, 0, len(s.accountIdlers)), } targetTTL := s.rates.TargetTimeout() for id := range s.accountIdlers { @@ -958,7 +958,7 @@ func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan * // Sort the peers by download capacity to use faster ones if many available idlers := &capacitySort{ ids: make([]string, 0, len(s.bytecodeIdlers)), - caps: make([]float64, 0, len(s.bytecodeIdlers)), + caps: make([]int, 0, len(s.bytecodeIdlers)), } targetTTL := s.rates.TargetTimeout() for id := range s.bytecodeIdlers { @@ -1012,11 +1012,11 @@ func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan * if cap > maxCodeRequestCount { cap = maxCodeRequestCount } - hashes := make([]common.Hash, 0, int(cap)) + hashes := make([]common.Hash, 0, cap) for hash := range task.codeTasks { delete(task.codeTasks, hash) hashes = append(hashes, hash) - if len(hashes) >= int(cap) { + if len(hashes) >= cap { break } } @@ -1061,7 +1061,7 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st // Sort the peers by download capacity to use faster ones if many available idlers := &capacitySort{ ids: make([]string, 0, len(s.storageIdlers)), - caps: make([]float64, 0, len(s.storageIdlers)), + caps: make([]int, 0, len(s.storageIdlers)), } targetTTL := s.rates.TargetTimeout() for id := range s.storageIdlers { @@ -1120,7 +1120,7 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st if cap < minRequestSize { // Don't bother with peers below a bare minimum performance cap = minRequestSize } - storageSets := int(cap / 1024) + storageSets := cap / 1024 var ( accounts = make([]common.Hash, 0, storageSets) @@ -1217,7 +1217,7 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai // Sort the peers by download capacity to use faster ones if many available idlers := &capacitySort{ ids: make([]string, 0, len(s.trienodeHealIdlers)), - caps: make([]float64, 0, len(s.trienodeHealIdlers)), + caps: make([]int, 0, len(s.trienodeHealIdlers)), } targetTTL := s.rates.TargetTimeout() for id := range s.trienodeHealIdlers { @@ -1284,9 +1284,9 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai cap = maxTrieRequestCount } var ( - hashes = make([]common.Hash, 0, int(cap)) - paths = make([]trie.SyncPath, 0, int(cap)) - pathsets = make([]TrieNodePathSet, 0, int(cap)) + hashes = make([]common.Hash, 0, cap) + paths = make([]trie.SyncPath, 0, cap) + pathsets = make([]TrieNodePathSet, 0, cap) ) for hash, pathset := range s.healer.trieTasks { delete(s.healer.trieTasks, hash) @@ -1295,7 +1295,7 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai paths = append(paths, pathset) pathsets = append(pathsets, [][]byte(pathset)) // TODO(karalabe): group requests by account hash - if len(hashes) >= int(cap) { + if len(hashes) >= cap { break } } @@ -1341,7 +1341,7 @@ func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fai // Sort the peers by download capacity to use faster ones if many available idlers := &capacitySort{ ids: make([]string, 0, len(s.bytecodeHealIdlers)), - caps: make([]float64, 0, len(s.bytecodeHealIdlers)), + caps: make([]int, 0, len(s.bytecodeHealIdlers)), } targetTTL := s.rates.TargetTimeout() for id := range s.bytecodeHealIdlers { @@ -1407,12 +1407,12 @@ func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fai if cap > maxCodeRequestCount { cap = maxCodeRequestCount } - hashes := make([]common.Hash, 0, int(cap)) + hashes := make([]common.Hash, 0, cap) for hash := range s.healer.codeTasks { delete(s.healer.codeTasks, hash) hashes = append(hashes, hash) - if len(hashes) >= int(cap) { + if len(hashes) >= cap { break } } @@ -2852,7 +2852,7 @@ func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) { // of highest capacity being at the front. type capacitySort struct { ids []string - caps []float64 + caps []int } func (s *capacitySort) Len() int { diff --git a/p2p/msgrate/msgrate.go b/p2p/msgrate/msgrate.go index 7cd172c5662e..5bfa27b43378 100644 --- a/p2p/msgrate/msgrate.go +++ b/p2p/msgrate/msgrate.go @@ -19,6 +19,7 @@ package msgrate import ( "errors" + "math" "sort" "sync" "time" @@ -162,7 +163,7 @@ func NewTracker(caps map[uint64]float64, rtt time.Duration) *Tracker { // the load proportionally to the requested items, so fetching a bit more might // still take the same RTT. By forcefully overshooting by a small amount, we can // avoid locking into a lower-that-real capacity. -func (t *Tracker) Capacity(kind uint64, targetRTT time.Duration) float64 { +func (t *Tracker) Capacity(kind uint64, targetRTT time.Duration) int { t.lock.RLock() defer t.lock.RUnlock() @@ -171,7 +172,14 @@ func (t *Tracker) Capacity(kind uint64, targetRTT time.Duration) float64 { // Return an overestimation to force the peer out of a stuck minima, adding // +1 in case the item count is too low for the overestimator to dent - return 1 + capacityOverestimation*throughput + return roundCapacity(1 + capacityOverestimation*throughput) +} + +// roundCapacity gives the integer value of a capacity. +// The result fits int32, and is guaranteed to be positive. +func roundCapacity(cap float64) int { + const maxInt32 = float64(1<<31 - 1) + return int(math.Min(maxInt32, math.Max(1, math.Ceil(cap)))) } // Update modifies the peer's capacity values for a specific data type with a new @@ -435,7 +443,7 @@ func (t *Trackers) detune() { // Capacity is a helper function to access a specific tracker without having to // track it explicitly outside. -func (t *Trackers) Capacity(id string, kind uint64, targetRTT time.Duration) float64 { +func (t *Trackers) Capacity(id string, kind uint64, targetRTT time.Duration) int { t.lock.RLock() defer t.lock.RUnlock() diff --git a/p2p/msgrate/msgrate_test.go b/p2p/msgrate/msgrate_test.go new file mode 100644 index 000000000000..a5c8dd0518ca --- /dev/null +++ b/p2p/msgrate/msgrate_test.go @@ -0,0 +1,28 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package msgrate + +import "testing" + +func TestCapacityOverflow(t *testing.T) { + tracker := NewTracker(nil, 1) + tracker.Update(1, 1, 100000) + cap := tracker.Capacity(1, 10000000) + if int32(cap) < 0 { + t.Fatalf("Negative: %v", int32(cap)) + } +} From 04cb5e2be30e1aa6c0cca657e10fec7239a6334f Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Thu, 27 May 2021 18:45:13 +0200 Subject: [PATCH 063/208] cmd/puppeth: remove outdated mist support (#22940) --- cmd/puppeth/module_dashboard.go | 64 +--------- cmd/puppeth/module_wallet.go | 201 -------------------------------- cmd/puppeth/wizard_dashboard.go | 8 +- cmd/puppeth/wizard_netstats.go | 8 -- cmd/puppeth/wizard_network.go | 9 +- cmd/puppeth/wizard_wallet.go | 113 ------------------ 6 files changed, 5 insertions(+), 398 deletions(-) delete mode 100644 cmd/puppeth/module_wallet.go delete mode 100644 cmd/puppeth/wizard_wallet.go diff --git a/cmd/puppeth/module_dashboard.go b/cmd/puppeth/module_dashboard.go index a76ee19a068f..b238af0316d2 100644 --- a/cmd/puppeth/module_dashboard.go +++ b/cmd/puppeth/module_dashboard.go @@ -80,12 +80,10 @@ var dashboardContent = `